nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
CentOS-PaaS-SIG/linchpin
a449385a4823a43e6f336de42b874ac7877d8b3e
linchpin/cli/context.py
python
LinchpinCliContext.inventory_path
(self)
return self.get_cfg('lp', 'inventory_path', default)
getter function for inventory_path
getter function for inventory_path
[ "getter", "function", "for", "inventory_path" ]
def inventory_path(self): """ getter function for inventory_path """ default = self._get_latest_file(self.inventory_folder) return self.get_cfg('lp', 'inventory_path', default)
[ "def", "inventory_path", "(", "self", ")", ":", "default", "=", "self", ".", "_get_latest_file", "(", "self", ".", "inventory_folder", ")", "return", "self", ".", "get_cfg", "(", "'lp'", ",", "'inventory_path'", ",", "default", ")" ]
https://github.com/CentOS-PaaS-SIG/linchpin/blob/a449385a4823a43e6f336de42b874ac7877d8b3e/linchpin/cli/context.py#L149-L154
Diaoul/subliminal
e25589dbcc5b2455bf9f0b49cf2083bb0eae449f
subliminal/utils.py
python
hash_shooter
(video_path)
return ';'.join(filehash)
Compute a hash using Shooter's algorithm :param string video_path: path of the video :return: the hash :rtype: string
Compute a hash using Shooter's algorithm
[ "Compute", "a", "hash", "using", "Shooter", "s", "algorithm" ]
def hash_shooter(video_path): """Compute a hash using Shooter's algorithm :param string video_path: path of the video :return: the hash :rtype: string """ filesize = os.path.getsize(video_path) readsize = 4096 if os.path.getsize(video_path) < readsize * 2: return None offsets = (readsize, filesize // 3 * 2, filesize // 3, filesize - readsize * 2) filehash = [] with open(video_path, 'rb') as f: for offset in offsets: f.seek(offset) filehash.append(hashlib.md5(f.read(readsize)).hexdigest()) return ';'.join(filehash)
[ "def", "hash_shooter", "(", "video_path", ")", ":", "filesize", "=", "os", ".", "path", ".", "getsize", "(", "video_path", ")", "readsize", "=", "4096", "if", "os", ".", "path", ".", "getsize", "(", "video_path", ")", "<", "readsize", "*", "2", ":", ...
https://github.com/Diaoul/subliminal/blob/e25589dbcc5b2455bf9f0b49cf2083bb0eae449f/subliminal/utils.py#L83-L101
alejandroautalan/pygubu
29a46966900fdb621bce9b615746719949e7c68e
pygubu/widgets/dialog.py
python
Dialog.set_title
(self, title)
Sets the dialog title
Sets the dialog title
[ "Sets", "the", "dialog", "title" ]
def set_title(self, title): """Sets the dialog title""" if self.toplevel: self.toplevel.title(title)
[ "def", "set_title", "(", "self", ",", "title", ")", ":", "if", "self", ".", "toplevel", ":", "self", ".", "toplevel", ".", "title", "(", "title", ")" ]
https://github.com/alejandroautalan/pygubu/blob/29a46966900fdb621bce9b615746719949e7c68e/pygubu/widgets/dialog.py#L98-L101
pyscf/pyscf
0adfb464333f5ceee07b664f291d4084801bae64
pyscf/pbc/scf/hf.py
python
get_t
(cell, kpt=np.zeros(3))
return cell.pbc_intor('int1e_kin', hermi=1, kpts=kpt)
Get the kinetic energy AO matrix.
Get the kinetic energy AO matrix.
[ "Get", "the", "kinetic", "energy", "AO", "matrix", "." ]
def get_t(cell, kpt=np.zeros(3)): '''Get the kinetic energy AO matrix. ''' return cell.pbc_intor('int1e_kin', hermi=1, kpts=kpt)
[ "def", "get_t", "(", "cell", ",", "kpt", "=", "np", ".", "zeros", "(", "3", ")", ")", ":", "return", "cell", ".", "pbc_intor", "(", "'int1e_kin'", ",", "hermi", "=", "1", ",", "kpts", "=", "kpt", ")" ]
https://github.com/pyscf/pyscf/blob/0adfb464333f5ceee07b664f291d4084801bae64/pyscf/pbc/scf/hf.py#L88-L91
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/_pyio.py
python
BufferedRandom.readinto
(self, b)
return BufferedReader.readinto(self, b)
[]
def readinto(self, b): self.flush() return BufferedReader.readinto(self, b)
[ "def", "readinto", "(", "self", ",", "b", ")", ":", "self", ".", "flush", "(", ")", "return", "BufferedReader", ".", "readinto", "(", "self", ",", "b", ")" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/_pyio.py#L1446-L1448
ifwe/digsby
f5fe00244744aa131e07f09348d10563f3d8fa99
digsby/src/oscar/OscarBuddies.py
python
OscarBuddy.guess_client
(self)
Guess a buddy's client based on capabilities.
Guess a buddy's client based on capabilities.
[ "Guess", "a", "buddy", "s", "client", "based", "on", "capabilities", "." ]
def guess_client(self): ''' Guess a buddy's client based on capabilities. ''' # Hopefully this combination will seperate the official ICQ clients # from others caps = set(self.pretty_caps) might_be_icq = False if caps.issuperset(icq6_caps): might_be_icq = True if any(x.startswith('Miranda') for x in caps): if caps.issuperset(('mtn', 'icq_xtraz')): return 'miranda-icq' else: return 'miranda-aim' elif caps.issuperset(meebo_caps): return 'purple' elif caps.issuperset(icq7_caps): return 'icq7' elif any('QIP 2005' in x for x in caps): return 'qip-2005' elif caps.issuperset(aim5_caps): return 'aim59' elif 'digsby' in caps: return 'digsby' elif caps.issuperset(aim6_caps) or 'aim6_unknown1' in caps: return 'aim60' elif caps.issuperset(ichat_caps): return 'ichat' elif caps.issuperset(pidgin_caps): return 'purple' elif self.mobile or self.sms: return 'mobile' elif caps.issuperset(ebuddy_caps): if might_be_icq: return 'icq6' else: return 'ebuddy' else: if might_be_icq: return 'icq6' else: return 'unknown'
[ "def", "guess_client", "(", "self", ")", ":", "# Hopefully this combination will seperate the official ICQ clients", "# from others", "caps", "=", "set", "(", "self", ".", "pretty_caps", ")", "might_be_icq", "=", "False", "if", "caps", ".", "issuperset", "(", "icq6_ca...
https://github.com/ifwe/digsby/blob/f5fe00244744aa131e07f09348d10563f3d8fa99/digsby/src/oscar/OscarBuddies.py#L639-L684
wmliang/pe-afl
4036d2f41da20ff12ecac43a076de5d60ce68bd9
lighthouse/lighthouse/util/disassembler/api.py
python
DisassemblerAPI.warning
(self, text)
Display a warning dialog box with the given text.
Display a warning dialog box with the given text.
[ "Display", "a", "warning", "dialog", "box", "with", "the", "given", "text", "." ]
def warning(self, text): """ Display a warning dialog box with the given text. """ pass
[ "def", "warning", "(", "self", ",", "text", ")", ":", "pass" ]
https://github.com/wmliang/pe-afl/blob/4036d2f41da20ff12ecac43a076de5d60ce68bd9/lighthouse/lighthouse/util/disassembler/api.py#L191-L195
matrix-org/synapse
8e57584a5859a9002759963eb546d523d2498a01
synapse/http/proxyagent.py
python
http_proxy_endpoint
( proxy: Optional[bytes], reactor: IReactorCore, tls_options_factory: Optional[IPolicyForHTTPS], **kwargs, )
return proxy_endpoint, credentials
Parses an http proxy setting and returns an endpoint for the proxy Args: proxy: the proxy setting in the form: [scheme://][<username>:<password>@]<host>[:<port>] This currently supports http:// and https:// proxies. A hostname without scheme is assumed to be http. reactor: reactor to be used to connect to the proxy tls_options_factory: the TLS options to use when connecting through a https proxy kwargs: other args to be passed to HostnameEndpoint Returns: a tuple of endpoint to use to connect to the proxy, or None ProxyCredentials or if no credentials were found, or None Raise: ValueError if proxy has no hostname or unsupported scheme. RuntimeError if no tls_options_factory is given for a https connection
Parses an http proxy setting and returns an endpoint for the proxy
[ "Parses", "an", "http", "proxy", "setting", "and", "returns", "an", "endpoint", "for", "the", "proxy" ]
def http_proxy_endpoint( proxy: Optional[bytes], reactor: IReactorCore, tls_options_factory: Optional[IPolicyForHTTPS], **kwargs, ) -> Tuple[Optional[IStreamClientEndpoint], Optional[ProxyCredentials]]: """Parses an http proxy setting and returns an endpoint for the proxy Args: proxy: the proxy setting in the form: [scheme://][<username>:<password>@]<host>[:<port>] This currently supports http:// and https:// proxies. A hostname without scheme is assumed to be http. reactor: reactor to be used to connect to the proxy tls_options_factory: the TLS options to use when connecting through a https proxy kwargs: other args to be passed to HostnameEndpoint Returns: a tuple of endpoint to use to connect to the proxy, or None ProxyCredentials or if no credentials were found, or None Raise: ValueError if proxy has no hostname or unsupported scheme. RuntimeError if no tls_options_factory is given for a https connection """ if proxy is None: return None, None # Note: urlsplit/urlparse cannot be used here as that does not work (for Python # 3.9+) on scheme-less proxies, e.g. host:port. scheme, host, port, credentials = parse_proxy(proxy) proxy_endpoint = HostnameEndpoint(reactor, host, port, **kwargs) if scheme == b"https": if tls_options_factory: tls_options = tls_options_factory.creatorForNetloc(host, port) proxy_endpoint = wrapClientTLS(tls_options, proxy_endpoint) else: raise RuntimeError( f"No TLS options for a https connection via proxy {proxy!s}" ) return proxy_endpoint, credentials
[ "def", "http_proxy_endpoint", "(", "proxy", ":", "Optional", "[", "bytes", "]", ",", "reactor", ":", "IReactorCore", ",", "tls_options_factory", ":", "Optional", "[", "IPolicyForHTTPS", "]", ",", "*", "*", "kwargs", ",", ")", "->", "Tuple", "[", "Optional", ...
https://github.com/matrix-org/synapse/blob/8e57584a5859a9002759963eb546d523d2498a01/synapse/http/proxyagent.py#L244-L290
rembo10/headphones
b3199605be1ebc83a7a8feab6b1e99b64014187c
lib/html5lib/trie/py.py
python
Trie.__iter__
(self)
return iter(self._data)
[]
def __iter__(self): return iter(self._data)
[ "def", "__iter__", "(", "self", ")", ":", "return", "iter", "(", "self", ".", "_data", ")" ]
https://github.com/rembo10/headphones/blob/b3199605be1ebc83a7a8feab6b1e99b64014187c/lib/html5lib/trie/py.py#L25-L26
tanghaibao/goatools
647e9dd833695f688cd16c2f9ea18f1692e5c6bc
goatools/cli/gosubdag_plot.py
python
PlotCli._get_relationships
(kws_all, relationship_in_godag)
Return value for GoSubDag arg, relationships.
Return value for GoSubDag arg, relationships.
[ "Return", "value", "for", "GoSubDag", "arg", "relationships", "." ]
def _get_relationships(kws_all, relationship_in_godag): """Return value for GoSubDag arg, relationships.""" if not relationship_in_godag: return None if 'relationship' in kws_all: return RELATIONSHIP_SET if 'relationships' not in kws_all: return None relationships_arg = kws_all['relationships'] if isinstance(relationships_arg, str): relationships = set(kws_all['relationships'].split(',')) chk_relationships(relationships) return relationships if relationships_arg: return True
[ "def", "_get_relationships", "(", "kws_all", ",", "relationship_in_godag", ")", ":", "if", "not", "relationship_in_godag", ":", "return", "None", "if", "'relationship'", "in", "kws_all", ":", "return", "RELATIONSHIP_SET", "if", "'relationships'", "not", "in", "kws_a...
https://github.com/tanghaibao/goatools/blob/647e9dd833695f688cd16c2f9ea18f1692e5c6bc/goatools/cli/gosubdag_plot.py#L336-L350
brython-dev/brython
9cba5fb7f43a9b52fff13e89b403e02a1dfaa5f3
www/src/Lib/ntpath.py
python
relpath
(path, start=None)
Return a relative version of a path
Return a relative version of a path
[ "Return", "a", "relative", "version", "of", "a", "path" ]
def relpath(path, start=None): """Return a relative version of a path""" path = os.fspath(path) if isinstance(path, bytes): sep = b'\\' curdir = b'.' pardir = b'..' else: sep = '\\' curdir = '.' pardir = '..' if start is None: start = curdir if not path: raise ValueError("no path specified") start = os.fspath(start) try: start_abs = abspath(normpath(start)) path_abs = abspath(normpath(path)) start_drive, start_rest = splitdrive(start_abs) path_drive, path_rest = splitdrive(path_abs) if normcase(start_drive) != normcase(path_drive): raise ValueError("path is on mount %r, start on mount %r" % ( path_drive, start_drive)) start_list = [x for x in start_rest.split(sep) if x] path_list = [x for x in path_rest.split(sep) if x] # Work out how much of the filepath is shared by start and path. i = 0 for e1, e2 in zip(start_list, path_list): if normcase(e1) != normcase(e2): break i += 1 rel_list = [pardir] * (len(start_list)-i) + path_list[i:] if not rel_list: return curdir return join(*rel_list) except (TypeError, ValueError, AttributeError, BytesWarning, DeprecationWarning): genericpath._check_arg_types('relpath', path, start) raise
[ "def", "relpath", "(", "path", ",", "start", "=", "None", ")", ":", "path", "=", "os", ".", "fspath", "(", "path", ")", "if", "isinstance", "(", "path", ",", "bytes", ")", ":", "sep", "=", "b'\\\\'", "curdir", "=", "b'.'", "pardir", "=", "b'..'", ...
https://github.com/brython-dev/brython/blob/9cba5fb7f43a9b52fff13e89b403e02a1dfaa5f3/www/src/Lib/ntpath.py#L693-L736
microsoft/ptvsd
99c8513921021d2cc7cd82e132b65c644c256768
src/ptvsd/_vendored/pydevd/pydevd_attach_to_process/winappdbg/crash.py
python
Crash.getNotes
(self)
return self.notes
Get the list of notes of this crash event. @rtype: list( str ) @return: List of notes.
Get the list of notes of this crash event.
[ "Get", "the", "list", "of", "notes", "of", "this", "crash", "event", "." ]
def getNotes(self): """ Get the list of notes of this crash event. @rtype: list( str ) @return: List of notes. """ return self.notes
[ "def", "getNotes", "(", "self", ")", ":", "return", "self", ".", "notes" ]
https://github.com/microsoft/ptvsd/blob/99c8513921021d2cc7cd82e132b65c644c256768/src/ptvsd/_vendored/pydevd/pydevd_attach_to_process/winappdbg/crash.py#L1099-L1106
Anaconda-Platform/anaconda-project
df5ec33c12591e6512436d38d36c6132fa2e9618
anaconda_project/internal/conda_api.py
python
_contains_conda_meta
(path)
return os.path.isdir(conda_meta)
[]
def _contains_conda_meta(path): conda_meta = os.path.join(path, "conda-meta") return os.path.isdir(conda_meta)
[ "def", "_contains_conda_meta", "(", "path", ")", ":", "conda_meta", "=", "os", ".", "path", ".", "join", "(", "path", ",", "\"conda-meta\"", ")", "return", "os", ".", "path", ".", "isdir", "(", "conda_meta", ")" ]
https://github.com/Anaconda-Platform/anaconda-project/blob/df5ec33c12591e6512436d38d36c6132fa2e9618/anaconda_project/internal/conda_api.py#L408-L410
ideoforms/pylive
87a4f2a384668b6f3e9475a6bc3c3d63b4734f5b
live/set.py
python
Set.get_track_names
(self, offset=None, count=None)
Return all track names. If offset and count are given, return names within this range.
Return all track names. If offset and count are given, return names within this range.
[ "Return", "all", "track", "names", ".", "If", "offset", "and", "count", "are", "given", "return", "names", "within", "this", "range", "." ]
def get_track_names(self, offset=None, count=None): """ Return all track names. If offset and count are given, return names within this range. """ if count is None: rv = self.live.query("/live/name/track") rv = [ rv[a] for a in range(1, len(rv), 2) ] return rv else: # /live/name/trackblock does not return indices, just names. rv = self.live.query("/live/name/trackblock", offset, count) return rv
[ "def", "get_track_names", "(", "self", ",", "offset", "=", "None", ",", "count", "=", "None", ")", ":", "if", "count", "is", "None", ":", "rv", "=", "self", ".", "live", ".", "query", "(", "\"/live/name/track\"", ")", "rv", "=", "[", "rv", "[", "a"...
https://github.com/ideoforms/pylive/blob/87a4f2a384668b6f3e9475a6bc3c3d63b4734f5b/live/set.py#L375-L385
emposha/Shell-Detector
5ac8ab2bf514bea737ddff16a75d85d887478f85
shelldetect.py
python
ShellDetector.fingerprint
(self, _filename, _content)
[]
def fingerprint(self, _filename, _content): for _regex, shellname in self._get_precomputed_fingerprints(): _match = _regex.findall(base64.b64encode(_content)) if _match: self._badfiles.append([_filename]) _regex_shell = re.compile('^(.+?)\[(.+?)\]\[(.+?)\]\[(.+?)\]') _match_shell = list(_regex_shell.findall(shellname)[0]) _shell_note = '' if _match_shell[2] == 1: _shell_note = 'please note it`s a malicious file not a shell' elif _match_shell[2] == 2: _shell_note = 'please note potentially dangerous file (legit file but may be used by hackers)' _shellflag = _match_shell[0] + '(' + _match_shell[3] + ')' self.alert(' Fingerprint: Positive, it`s a ' + str(_shellflag) + ' ' + _shell_note, 'red')
[ "def", "fingerprint", "(", "self", ",", "_filename", ",", "_content", ")", ":", "for", "_regex", ",", "shellname", "in", "self", ".", "_get_precomputed_fingerprints", "(", ")", ":", "_match", "=", "_regex", ".", "findall", "(", "base64", ".", "b64encode", ...
https://github.com/emposha/Shell-Detector/blob/5ac8ab2bf514bea737ddff16a75d85d887478f85/shelldetect.py#L195-L208
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/parallel/map_reduce.py
python
RESetMapReduce._get_stats
(self)
r""" Gather the communication statistics at the end of a run. EXAMPLES:: sage: from sage.parallel.map_reduce import RESetMPExample sage: S = RESetMPExample(maxl=6) sage: S.run() # indirect doctest 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
r""" Gather the communication statistics at the end of a run.
[ "r", "Gather", "the", "communication", "statistics", "at", "the", "end", "of", "a", "run", "." ]
def _get_stats(self): r""" Gather the communication statistics at the end of a run. EXAMPLES:: sage: from sage.parallel.map_reduce import RESetMPExample sage: S = RESetMPExample(maxl=6) sage: S.run() # indirect doctest 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1 """ res = [] for i in range(self._nprocess): res.append(tuple(self._workers[i]._stats)) self._stats = res
[ "def", "_get_stats", "(", "self", ")", ":", "res", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "_nprocess", ")", ":", "res", ".", "append", "(", "tuple", "(", "self", ".", "_workers", "[", "i", "]", ".", "_stats", ")", ")", "sel...
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/parallel/map_reduce.py#L1487-L1501
matrix-org/synapse
8e57584a5859a9002759963eb546d523d2498a01
synapse/groups/groups_server.py
python
GroupsServerWorkerHandler.get_group_profile
( self, group_id: str, requester_user_id: str )
Get the group profile as seen by requester_user_id
Get the group profile as seen by requester_user_id
[ "Get", "the", "group", "profile", "as", "seen", "by", "requester_user_id" ]
async def get_group_profile( self, group_id: str, requester_user_id: str ) -> JsonDict: """Get the group profile as seen by requester_user_id""" await self.check_group_is_ours(group_id, requester_user_id) group = await self.store.get_group(group_id) if group: cols = [ "name", "short_description", "long_description", "avatar_url", "is_public", ] group_description = {key: group[key] for key in cols} group_description["is_openly_joinable"] = group["join_policy"] == "open" return group_description else: raise SynapseError(404, "Unknown group")
[ "async", "def", "get_group_profile", "(", "self", ",", "group_id", ":", "str", ",", "requester_user_id", ":", "str", ")", "->", "JsonDict", ":", "await", "self", ".", "check_group_is_ours", "(", "group_id", ",", "requester_user_id", ")", "group", "=", "await",...
https://github.com/matrix-org/synapse/blob/8e57584a5859a9002759963eb546d523d2498a01/synapse/groups/groups_server.py#L214-L236
gem/oq-engine
1bdb88f3914e390abcbd285600bfd39477aae47c
openquake/hazardlib/source/base.py
python
ParametricSeismicSource.modify_set_msr
(self, new_msr)
Updates the MSR originally assigned to the source :param new_msr: An instance of the :class:`openquake.hazardlib.scalerel.BaseMSR`
Updates the MSR originally assigned to the source
[ "Updates", "the", "MSR", "originally", "assigned", "to", "the", "source" ]
def modify_set_msr(self, new_msr): """ Updates the MSR originally assigned to the source :param new_msr: An instance of the :class:`openquake.hazardlib.scalerel.BaseMSR` """ self.magnitude_scaling_relationship = new_msr
[ "def", "modify_set_msr", "(", "self", ",", "new_msr", ")", ":", "self", ".", "magnitude_scaling_relationship", "=", "new_msr" ]
https://github.com/gem/oq-engine/blob/1bdb88f3914e390abcbd285600bfd39477aae47c/openquake/hazardlib/source/base.py#L363-L370
CedricGuillemet/Imogen
ee417b42747ed5b46cb11b02ef0c3630000085b3
bin/Lib/email/headerregistry.py
python
HeaderRegistry.map_to_type
(self, name, cls)
Register cls as the specialized class for handling "name" headers.
Register cls as the specialized class for handling "name" headers.
[ "Register", "cls", "as", "the", "specialized", "class", "for", "handling", "name", "headers", "." ]
def map_to_type(self, name, cls): """Register cls as the specialized class for handling "name" headers. """ self.registry[name.lower()] = cls
[ "def", "map_to_type", "(", "self", ",", "name", ",", "cls", ")", ":", "self", ".", "registry", "[", "name", ".", "lower", "(", ")", "]", "=", "cls" ]
https://github.com/CedricGuillemet/Imogen/blob/ee417b42747ed5b46cb11b02ef0c3630000085b3/bin/Lib/email/headerregistry.py#L569-L573
mstamy2/PyPDF2
18a2627adac13124d4122c8b92aaa863ccfb8c29
PyPDF2/pdf.py
python
PdfFileWriter.removeText
(self, ignoreByteStringObject=False)
Removes images from this output. :param bool ignoreByteStringObject: optional parameter to ignore ByteString Objects.
Removes images from this output.
[ "Removes", "images", "from", "this", "output", "." ]
def removeText(self, ignoreByteStringObject=False): """ Removes images from this output. :param bool ignoreByteStringObject: optional parameter to ignore ByteString Objects. """ pages = self.getObject(self._pages)['/Kids'] for j in range(len(pages)): page = pages[j] pageRef = self.getObject(page) content = pageRef['/Contents'].getObject() if not isinstance(content, ContentStream): content = ContentStream(content, pageRef) for operands,operator in content.operations: if operator == b_('Tj'): text = operands[0] if not ignoreByteStringObject: if isinstance(text, TextStringObject): operands[0] = TextStringObject() else: if isinstance(text, TextStringObject) or \ isinstance(text, ByteStringObject): operands[0] = TextStringObject() elif operator == b_("'"): text = operands[0] if not ignoreByteStringObject: if isinstance(text, TextStringObject): operands[0] = TextStringObject() else: if isinstance(text, TextStringObject) or \ isinstance(text, ByteStringObject): operands[0] = TextStringObject() elif operator == b_('"'): text = operands[2] if not ignoreByteStringObject: if isinstance(text, TextStringObject): operands[2] = TextStringObject() else: if isinstance(text, TextStringObject) or \ isinstance(text, ByteStringObject): operands[2] = TextStringObject() elif operator == b_("TJ"): for i in range(len(operands[0])): if not ignoreByteStringObject: if isinstance(operands[0][i], TextStringObject): operands[0][i] = TextStringObject() else: if isinstance(operands[0][i], TextStringObject) or \ isinstance(operands[0][i], ByteStringObject): operands[0][i] = TextStringObject() pageRef.__setitem__(NameObject('/Contents'), content)
[ "def", "removeText", "(", "self", ",", "ignoreByteStringObject", "=", "False", ")", ":", "pages", "=", "self", ".", "getObject", "(", "self", ".", "_pages", ")", "[", "'/Kids'", "]", "for", "j", "in", "range", "(", "len", "(", "pages", ")", ")", ":",...
https://github.com/mstamy2/PyPDF2/blob/18a2627adac13124d4122c8b92aaa863ccfb8c29/PyPDF2/pdf.py#L845-L897
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/tkinter/__init__.py
python
Text.edit_redo
(self)
return self.edit("redo")
Redo the last undone edit When the undo option is true, reapplies the last undone edits provided no other edits were done since then. Generates an error when the redo stack is empty. Does nothing when the undo option is false.
Redo the last undone edit
[ "Redo", "the", "last", "undone", "edit" ]
def edit_redo(self): """Redo the last undone edit When the undo option is true, reapplies the last undone edits provided no other edits were done since then. Generates an error when the redo stack is empty. Does nothing when the undo option is false. """ return self.edit("redo")
[ "def", "edit_redo", "(", "self", ")", ":", "return", "self", ".", "edit", "(", "\"redo\"", ")" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/tkinter/__init__.py#L3211-L3219
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
src/transformers/models/hubert/modeling_hubert.py
python
HubertEncoderLayer.forward
(self, hidden_states, attention_mask=None, output_attentions=False)
return outputs
[]
def forward(self, hidden_states, attention_mask=None, output_attentions=False): attn_residual = hidden_states hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
[ "def", "forward", "(", "self", ",", "hidden_states", ",", "attention_mask", "=", "None", ",", "output_attentions", "=", "False", ")", ":", "attn_residual", "=", "hidden_states", "hidden_states", ",", "attn_weights", ",", "_", "=", "self", ".", "attention", "("...
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/src/transformers/models/hubert/modeling_hubert.py#L554-L571
IronLanguages/main
a949455434b1fda8c783289e897e78a9a0caabb5
External.LCA_RESTRICTED/Languages/IronPython/repackage/pip/pip/_vendor/distlib/_backport/sysconfig.py
python
parse_config_h
(fp, vars=None)
return vars
Parse a config.h-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary.
Parse a config.h-style file.
[ "Parse", "a", "config", ".", "h", "-", "style", "file", "." ]
def parse_config_h(fp, vars=None): """Parse a config.h-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ if vars is None: vars = {} define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") while True: line = fp.readline() if not line: break m = define_rx.match(line) if m: n, v = m.group(1, 2) try: v = int(v) except ValueError: pass vars[n] = v else: m = undef_rx.match(line) if m: vars[m.group(1)] = 0 return vars
[ "def", "parse_config_h", "(", "fp", ",", "vars", "=", "None", ")", ":", "if", "vars", "is", "None", ":", "vars", "=", "{", "}", "define_rx", "=", "re", ".", "compile", "(", "\"#define ([A-Z][A-Za-z0-9_]+) (.*)\\n\"", ")", "undef_rx", "=", "re", ".", "com...
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/IronPython/repackage/pip/pip/_vendor/distlib/_backport/sysconfig.py#L388-L416
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
custom/inddex/reports/r2b_gaps_detail.py
python
GapsDetailsData.rows
(self)
[]
def rows(self): for row in sorted(self._gaps_data, key=lambda row: (row['food_name'], row['gap_type'], row['gap_code'])): yield format_row([row[header] for header in self.headers])
[ "def", "rows", "(", "self", ")", ":", "for", "row", "in", "sorted", "(", "self", ".", "_gaps_data", ",", "key", "=", "lambda", "row", ":", "(", "row", "[", "'food_name'", "]", ",", "row", "[", "'gap_type'", "]", ",", "row", "[", "'gap_code'", "]", ...
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/custom/inddex/reports/r2b_gaps_detail.py#L135-L137
ales-tsurko/cells
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
packaging/macos/python/lib/python3.7/posixpath.py
python
expanduser
(path)
return (userhome + path[i:]) or root
Expand ~ and ~user constructions. If user or $HOME is unknown, do nothing.
Expand ~ and ~user constructions. If user or $HOME is unknown, do nothing.
[ "Expand", "~", "and", "~user", "constructions", ".", "If", "user", "or", "$HOME", "is", "unknown", "do", "nothing", "." ]
def expanduser(path): """Expand ~ and ~user constructions. If user or $HOME is unknown, do nothing.""" path = os.fspath(path) if isinstance(path, bytes): tilde = b'~' else: tilde = '~' if not path.startswith(tilde): return path sep = _get_sep(path) i = path.find(sep, 1) if i < 0: i = len(path) if i == 1: if 'HOME' not in os.environ: import pwd try: userhome = pwd.getpwuid(os.getuid()).pw_dir except KeyError: # bpo-10496: if the current user identifier doesn't exist in the # password database, return the path unchanged return path else: userhome = os.environ['HOME'] else: import pwd name = path[1:i] if isinstance(name, bytes): name = str(name, 'ASCII') try: pwent = pwd.getpwnam(name) except KeyError: # bpo-10496: if the user name from the path doesn't exist in the # password database, return the path unchanged return path userhome = pwent.pw_dir if isinstance(path, bytes): userhome = os.fsencode(userhome) root = b'/' else: root = '/' userhome = userhome.rstrip(root) return (userhome + path[i:]) or root
[ "def", "expanduser", "(", "path", ")", ":", "path", "=", "os", ".", "fspath", "(", "path", ")", "if", "isinstance", "(", "path", ",", "bytes", ")", ":", "tilde", "=", "b'~'", "else", ":", "tilde", "=", "'~'", "if", "not", "path", ".", "startswith",...
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/posixpath.py#L232-L275
anson0910/CNN_face_detection
62cf7ea5a737b758095a9b7e7a07760cb4f306df
face_net_surgery/list_full_conv_blobs_ranges.py
python
cal_face_12c
(net_12_cal, caffe_img, rectangles)
return result
:param caffe_image: image in caffe style to detect faces :param rectangles: rectangles in form [x11, y11, x12, y12, confidence, current_scale] :return: rectangles after calibration
:param caffe_image: image in caffe style to detect faces :param rectangles: rectangles in form [x11, y11, x12, y12, confidence, current_scale] :return: rectangles after calibration
[ ":", "param", "caffe_image", ":", "image", "in", "caffe", "style", "to", "detect", "faces", ":", "param", "rectangles", ":", "rectangles", "in", "form", "[", "x11", "y11", "x12", "y12", "confidence", "current_scale", "]", ":", "return", ":", "rectangles", ...
def cal_face_12c(net_12_cal, caffe_img, rectangles): ''' :param caffe_image: image in caffe style to detect faces :param rectangles: rectangles in form [x11, y11, x12, y12, confidence, current_scale] :return: rectangles after calibration ''' height, width, channels = caffe_img.shape result = [] all_cropped_caffe_img = [] for cur_rectangle in rectangles: original_x1 = cur_rectangle[0] original_y1 = cur_rectangle[1] original_x2 = cur_rectangle[2] original_y2 = cur_rectangle[3] cropped_caffe_img = caffe_img[original_y1:original_y2, original_x1:original_x2] # crop image all_cropped_caffe_img.append(cropped_caffe_img) if len(all_cropped_caffe_img) == 0: return [] output_all = net_12_cal.predict(all_cropped_caffe_img) # predict through caffe for k, v in net_12_cal.blobs.items(): if k == 'data': if v.data.max() > net12cal_range[0]: net12cal_range[0] = v.data.max() if v.data.min() < net12cal_range[1]: net12cal_range[1] = v.data.min() elif k == 'conv1': if v.data.max() > net12cal_range[2]: net12cal_range[2] = v.data.max() if v.data.min() < net12cal_range[3]: net12cal_range[3] = v.data.min() elif k == 'pool1': if v.data.max() > net12cal_range[4]: net12cal_range[4] = v.data.max() if v.data.min() < net12cal_range[5]: net12cal_range[5] = v.data.min() elif k == 'fc2': if v.data.max() > net12cal_range[6]: net12cal_range[6] = v.data.max() if v.data.min() < net12cal_range[7]: net12cal_range[7] = v.data.min() elif k == 'fc3': if v.data.max() > net12cal_range[8]: net12cal_range[8] = v.data.max() if v.data.min() < net12cal_range[9]: net12cal_range[9] = v.data.min() elif k == 'prob': if v.data.max() > net12cal_range[10]: net12cal_range[10] = v.data.max() if v.data.min() < net12cal_range[11]: net12cal_range[11] = v.data.min() for cur_rect in range(len(rectangles)): cur_rectangle = rectangles[cur_rect] output = output_all[cur_rect] prediction = output[0] # (44, 1) ndarray threshold = 0.1 indices = np.nonzero(prediction > threshold)[0] # ndarray of indices where prediction is larger than threshold number_of_cals = len(indices) # number of calibrations larger than threshold if number_of_cals == 0: # if no calibration is needed, check next rectangle result.append(cur_rectangle) continue original_x1 = cur_rectangle[0] original_y1 = cur_rectangle[1] original_x2 = cur_rectangle[2] original_y2 = cur_rectangle[3] original_w = original_x2 - original_x1 original_h = original_y2 - original_y1 total_s_change = 0 total_x_change = 0 total_y_change = 0 for current_cal in range(number_of_cals): # accumulate changes, and calculate average cal_label = int(indices[current_cal]) # should be number in 0~44 if (cal_label >= 0) and (cal_label <= 8): # decide s change total_s_change += 0.83 elif (cal_label >= 9) and (cal_label <= 17): total_s_change += 0.91 elif (cal_label >= 18) and (cal_label <= 26): total_s_change += 1.0 elif (cal_label >= 27) and (cal_label <= 35): total_s_change += 1.10 else: total_s_change += 1.21 if cal_label % 9 <= 2: # decide x change total_x_change += -0.17 elif (cal_label % 9 >= 6) and (cal_label % 9 <= 8): # ignore case when 3<=x<=5, since adding 0 doesn't change total_x_change += 0.17 if cal_label % 3 == 0: # decide y change total_y_change += -0.17 elif cal_label % 3 == 2: # ignore case when 1, since adding 0 doesn't change total_y_change += 0.17 s_change = total_s_change / number_of_cals # calculate average x_change = total_x_change / number_of_cals y_change = total_y_change / number_of_cals cur_result = cur_rectangle # inherit format and last two attributes from original rectangle cur_result[0] = int(max(0, original_x1 - original_w * x_change / s_change)) cur_result[1] = int(max(0, original_y1 - original_h * y_change / s_change)) cur_result[2] = int(min(width, cur_result[0] + original_w / s_change)) cur_result[3] = int(min(height, cur_result[1] + original_h / s_change)) result.append(cur_result) result = sorted(result, key=itemgetter(4), reverse=True) # sort rectangles according to confidence # reverse, so that it ranks from large to small return result
[ "def", "cal_face_12c", "(", "net_12_cal", ",", "caffe_img", ",", "rectangles", ")", ":", "height", ",", "width", ",", "channels", "=", "caffe_img", ".", "shape", "result", "=", "[", "]", "all_cropped_caffe_img", "=", "[", "]", "for", "cur_rectangle", "in", ...
https://github.com/anson0910/CNN_face_detection/blob/62cf7ea5a737b758095a9b7e7a07760cb4f306df/face_net_surgery/list_full_conv_blobs_ranges.py#L262-L381
markmckinnon/Autopsy-Plugins
99f8a485bda437eb0508f35fcf843c303bd367c4
Executable Programs For Plugins/export_jl_ad.exe/JL_App_Ids.py
python
JL_App_Ids.__init__
(self)
Initializes the database file object.
Initializes the database file object.
[ "Initializes", "the", "database", "file", "object", "." ]
def __init__(self): """Initializes the database file object.""" super(JL_App_Ids, self).__init__() self._connection = None self._cursor = None self.filename = 'Jump_List_App_Ids.db3' self.read_only = None
[ "def", "__init__", "(", "self", ")", ":", "super", "(", "JL_App_Ids", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "_connection", "=", "None", "self", ".", "_cursor", "=", "None", "self", ".", "filename", "=", "'Jump_List_App_Ids.db3'", "sel...
https://github.com/markmckinnon/Autopsy-Plugins/blob/99f8a485bda437eb0508f35fcf843c303bd367c4/Executable Programs For Plugins/export_jl_ad.exe/JL_App_Ids.py#L8-L14
pyansys/pymapdl
c07291fc062b359abf0e92b95a92d753a95ef3d7
ansys/mapdl/core/_commands/post1_/path_operations.py
python
PathOperations.paresu
(self, lab="", fname="", ext="", **kwargs)
return self.run(command, **kwargs)
Restores previously saved paths from a file. APDL Command: PARESU Parameters ---------- lab Read operation: S - Saves only selected paths. ALL - Read all paths from the selected file (default). Pname - Saves the named path (from the PSEL command). fname File name and directory path (248 characters maximum, including the characters needed for the directory path). An unspecified directory path defaults to the working directory; in this case, you can use all 248 characters for the file name. The file name defaults to Jobname. ext Filename extension (eight-character maximum). The extension defaults to PATH if Fname is blank. Notes ----- This command removes all paths from virtual memory and then reads path data from a file written with the PASAVE command. All paths on the file will be restored. All paths currently in memory will be deleted.
Restores previously saved paths from a file.
[ "Restores", "previously", "saved", "paths", "from", "a", "file", "." ]
def paresu(self, lab="", fname="", ext="", **kwargs): """Restores previously saved paths from a file. APDL Command: PARESU Parameters ---------- lab Read operation: S - Saves only selected paths. ALL - Read all paths from the selected file (default). Pname - Saves the named path (from the PSEL command). fname File name and directory path (248 characters maximum, including the characters needed for the directory path). An unspecified directory path defaults to the working directory; in this case, you can use all 248 characters for the file name. The file name defaults to Jobname. ext Filename extension (eight-character maximum). The extension defaults to PATH if Fname is blank. Notes ----- This command removes all paths from virtual memory and then reads path data from a file written with the PASAVE command. All paths on the file will be restored. All paths currently in memory will be deleted. """ command = f"PARESU,{lab},{fname},{ext}" return self.run(command, **kwargs)
[ "def", "paresu", "(", "self", ",", "lab", "=", "\"\"", ",", "fname", "=", "\"\"", ",", "ext", "=", "\"\"", ",", "*", "*", "kwargs", ")", ":", "command", "=", "f\"PARESU,{lab},{fname},{ext}\"", "return", "self", ".", "run", "(", "command", ",", "*", "...
https://github.com/pyansys/pymapdl/blob/c07291fc062b359abf0e92b95a92d753a95ef3d7/ansys/mapdl/core/_commands/post1_/path_operations.py#L94-L131
ProgVal/Limnoria
181e34baf90a8cabc281e8349da6e36e1e558608
plugins/Channel/plugin.py
python
Channel.op
(self, irc, msg, args, channel, nicks)
[<channel>] [<nick> ...] If you have the #channel,op capability, this will give all the <nick>s you provide ops. If you don't provide any <nick>s, this will op you. <channel> is only necessary if the message isn't sent in the channel itself.
[<channel>] [<nick> ...]
[ "[", "<channel", ">", "]", "[", "<nick", ">", "...", "]" ]
def op(self, irc, msg, args, channel, nicks): """[<channel>] [<nick> ...] If you have the #channel,op capability, this will give all the <nick>s you provide ops. If you don't provide any <nick>s, this will op you. <channel> is only necessary if the message isn't sent in the channel itself. """ if not nicks: nicks = [msg.nick] def f(L): return ircmsgs.ops(channel, L) self._sendMsgs(irc, nicks, f)
[ "def", "op", "(", "self", ",", "irc", ",", "msg", ",", "args", ",", "channel", ",", "nicks", ")", ":", "if", "not", "nicks", ":", "nicks", "=", "[", "msg", ".", "nick", "]", "def", "f", "(", "L", ")", ":", "return", "ircmsgs", ".", "ops", "("...
https://github.com/ProgVal/Limnoria/blob/181e34baf90a8cabc281e8349da6e36e1e558608/plugins/Channel/plugin.py#L159-L171
ales-tsurko/cells
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
packaging/macos/python/lib/python3.7/asyncio/sslproto.py
python
_SSLProtocolTransport.pause_reading
(self)
Pause the receiving end. No data will be passed to the protocol's data_received() method until resume_reading() is called.
Pause the receiving end.
[ "Pause", "the", "receiving", "end", "." ]
def pause_reading(self): """Pause the receiving end. No data will be passed to the protocol's data_received() method until resume_reading() is called. """ self._ssl_protocol._transport.pause_reading()
[ "def", "pause_reading", "(", "self", ")", ":", "self", ".", "_ssl_protocol", ".", "_transport", ".", "pause_reading", "(", ")" ]
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/asyncio/sslproto.py#L331-L337
gkrizek/bash-lambda-layer
703b0ade8174022d44779d823172ab7ac33a5505
bin/docutils/nodes.py
python
Element.update_basic_atts
(self, dict_)
Update basic attributes ('ids', 'names', 'classes', 'dupnames', but not 'source') from node or dictionary `dict_`.
Update basic attributes ('ids', 'names', 'classes', 'dupnames', but not 'source') from node or dictionary `dict_`.
[ "Update", "basic", "attributes", "(", "ids", "names", "classes", "dupnames", "but", "not", "source", ")", "from", "node", "or", "dictionary", "dict_", "." ]
def update_basic_atts(self, dict_): """ Update basic attributes ('ids', 'names', 'classes', 'dupnames', but not 'source') from node or dictionary `dict_`. """ if isinstance(dict_, Node): dict_ = dict_.attributes for att in self.basic_attributes: self.append_attr_list(att, dict_.get(att, []))
[ "def", "update_basic_atts", "(", "self", ",", "dict_", ")", ":", "if", "isinstance", "(", "dict_", ",", "Node", ")", ":", "dict_", "=", "dict_", ".", "attributes", "for", "att", "in", "self", ".", "basic_attributes", ":", "self", ".", "append_attr_list", ...
https://github.com/gkrizek/bash-lambda-layer/blob/703b0ade8174022d44779d823172ab7ac33a5505/bin/docutils/nodes.py#L697-L705
Theano/Theano
8fd9203edfeecebced9344b0c70193be292a9ade
theano/gof/unify.py
python
unify_walk
(fv, v, U)
return U.merge(v, fv)
Both variables are unified.
Both variables are unified.
[ "Both", "variables", "are", "unified", "." ]
def unify_walk(fv, v, U): """ Both variables are unified. """ v = U[v] return U.merge(v, fv)
[ "def", "unify_walk", "(", "fv", ",", "v", ",", "U", ")", ":", "v", "=", "U", "[", "v", "]", "return", "U", ".", "merge", "(", "v", ",", "fv", ")" ]
https://github.com/Theano/Theano/blob/8fd9203edfeecebced9344b0c70193be292a9ade/theano/gof/unify.py#L278-L284
aliles/begins
23bc0d802198831375f4286a7e22377a1a1bf952
docs/examples/multiple_subcommands.py
python
quest
(answer)
What is your quest?
What is your quest?
[ "What", "is", "your", "quest?" ]
def quest(answer): "What is your quest?" print(answer)
[ "def", "quest", "(", "answer", ")", ":", "print", "(", "answer", ")" ]
https://github.com/aliles/begins/blob/23bc0d802198831375f4286a7e22377a1a1bf952/docs/examples/multiple_subcommands.py#L10-L12
djblets/djblets
0496e1ec49e43d43d776768c9fc5b6f8af56ec2c
djblets/extensions/settings.py
python
ExtensionSettings.set
(self, key, value)
Set a setting's value. This is equivalent to setting the value through standard dictionary attribute storage. Args: key (unicode): The key to set. value (object): The value for the setting.
Set a setting's value.
[ "Set", "a", "setting", "s", "value", "." ]
def set(self, key, value): """Set a setting's value. This is equivalent to setting the value through standard dictionary attribute storage. Args: key (unicode): The key to set. value (object): The value for the setting. """ self[key] = value
[ "def", "set", "(", "self", ",", "key", ",", "value", ")", ":", "self", "[", "key", "]", "=", "value" ]
https://github.com/djblets/djblets/blob/0496e1ec49e43d43d776768c9fc5b6f8af56ec2c/djblets/extensions/settings.py#L136-L149
wistbean/learn_python3_spider
73c873f4845f4385f097e5057407d03dd37a117b
stackoverflow/venv/lib/python3.6/site-packages/redis/client.py
python
Redis.hlen
(self, name)
return self.execute_command('HLEN', name)
Return the number of elements in hash ``name``
Return the number of elements in hash ``name``
[ "Return", "the", "number", "of", "elements", "in", "hash", "name" ]
def hlen(self, name): "Return the number of elements in hash ``name``" return self.execute_command('HLEN', name)
[ "def", "hlen", "(", "self", ",", "name", ")", ":", "return", "self", ".", "execute_command", "(", "'HLEN'", ",", "name", ")" ]
https://github.com/wistbean/learn_python3_spider/blob/73c873f4845f4385f097e5057407d03dd37a117b/stackoverflow/venv/lib/python3.6/site-packages/redis/client.py#L2733-L2735
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/google/appengine/ext/mapreduce/model.py
python
CountersMap.to_json
(self)
return {"counters": self.counters}
Serializes all the data in this map into json form. Returns: json-compatible data representation.
Serializes all the data in this map into json form.
[ "Serializes", "all", "the", "data", "in", "this", "map", "into", "json", "form", "." ]
def to_json(self): """Serializes all the data in this map into json form. Returns: json-compatible data representation. """ return {"counters": self.counters}
[ "def", "to_json", "(", "self", ")", ":", "return", "{", "\"counters\"", ":", "self", ".", "counters", "}" ]
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/google/appengine/ext/mapreduce/model.py#L328-L334
marl/jams
edea1c4e4d5751cb10ca4379802fd898dc025d01
jams/core.py
python
Annotation.to_html
(self, max_rows=None)
return out
Render this annotation list in HTML Returns ------- rendered : str An HTML table containing this annotation's data.
Render this annotation list in HTML
[ "Render", "this", "annotation", "list", "in", "HTML" ]
def to_html(self, max_rows=None): '''Render this annotation list in HTML Returns ------- rendered : str An HTML table containing this annotation's data. ''' n = len(self.data) div_id = _get_divid(self) out = r''' <div class="panel panel-default"> <div class="panel-heading" role="tab" id="heading-{0}"> <button type="button" data-toggle="collapse" data-parent="#accordion" href="#{0}" aria-expanded="false" class="collapsed btn btn-info btn-block" aria-controls="{0}"> {1:s} <span class="badge pull-right">{2:d}</span> </button> </div>'''.format(div_id, self.namespace, n) out += r''' <div id="{0}" class="panel-collapse collapse" role="tabpanel" aria-labelledby="heading-{0}"> <div class="panel-body">'''.format(div_id) out += r'''<div class="pull-right"> {} </div>'''.format(self.annotation_metadata._repr_html_()) out += r'''<div class="pull-right clearfix"> {} </div>'''.format(self.sandbox._repr_html_()) # -- Annotation content starts here out += r'''<div><table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>time</th> <th>duration</th> <th>value</th> <th>confidence</th> </tr> </thead>'''.format(self.namespace, n) out += r'''<tbody>''' if max_rows is None or n <= max_rows: out += self._fmt_rows(0, n) else: out += self._fmt_rows(0, max_rows//2) out += r'''<tr> <th>...</th> <td>...</td> <td>...</td> <td>...</td> <td>...</td> </tr>''' out += self._fmt_rows(n-max_rows//2, n) out += r'''</tbody>''' out += r'''</table></div>''' out += r'''</div></div></div>''' return out
[ "def", "to_html", "(", "self", ",", "max_rows", "=", "None", ")", ":", "n", "=", "len", "(", "self", ".", "data", ")", "div_id", "=", "_get_divid", "(", "self", ")", "out", "=", "r''' <div class=\"panel panel-default\">\n <div class=\"panel...
https://github.com/marl/jams/blob/edea1c4e4d5751cb10ca4379802fd898dc025d01/jams/core.py#L1168-L1238
pwndbg/pwndbg
136b3b6a80d94f494dcb00a614af1c24ca706700
pwndbg/arguments.py
python
argument
(n, abi=None)
return int(pwndbg.memory.poi(pwndbg.typeinfo.ppvoid, sp))
Returns the nth argument, as if $pc were a 'call' or 'bl' type instruction. Works only for ABIs that use registers for arguments.
Returns the nth argument, as if $pc were a 'call' or 'bl' type instruction. Works only for ABIs that use registers for arguments.
[ "Returns", "the", "nth", "argument", "as", "if", "$pc", "were", "a", "call", "or", "bl", "type", "instruction", ".", "Works", "only", "for", "ABIs", "that", "use", "registers", "for", "arguments", "." ]
def argument(n, abi=None): """ Returns the nth argument, as if $pc were a 'call' or 'bl' type instruction. Works only for ABIs that use registers for arguments. """ abi = abi or pwndbg.abi.ABI.default() regs = abi.register_arguments if n < len(regs): return getattr(pwndbg.regs, regs[n]) n -= len(regs) sp = pwndbg.regs.sp + (n * pwndbg.arch.ptrsize) return int(pwndbg.memory.poi(pwndbg.typeinfo.ppvoid, sp))
[ "def", "argument", "(", "n", ",", "abi", "=", "None", ")", ":", "abi", "=", "abi", "or", "pwndbg", ".", "abi", ".", "ABI", ".", "default", "(", ")", "regs", "=", "abi", ".", "register_arguments", "if", "n", "<", "len", "(", "regs", ")", ":", "r...
https://github.com/pwndbg/pwndbg/blob/136b3b6a80d94f494dcb00a614af1c24ca706700/pwndbg/arguments.py#L168-L184
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/aws/notify.py
python
AWSSNS.async_send_message
(self, message="", **kwargs)
Send notification to specified SNS ARN.
Send notification to specified SNS ARN.
[ "Send", "notification", "to", "specified", "SNS", "ARN", "." ]
async def async_send_message(self, message="", **kwargs): """Send notification to specified SNS ARN.""" if not kwargs.get(ATTR_TARGET): _LOGGER.error("At least one target is required") return message_attributes = { k: {"StringValue": json.dumps(v), "DataType": "String"} for k, v in kwargs.items() if v is not None } subject = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT) async with self.session.create_client( self.service, **self.aws_config ) as client: tasks = [] for target in kwargs.get(ATTR_TARGET, []): tasks.append( client.publish( TargetArn=target, Message=message, Subject=subject, MessageAttributes=message_attributes, ) ) if tasks: await asyncio.gather(*tasks)
[ "async", "def", "async_send_message", "(", "self", ",", "message", "=", "\"\"", ",", "*", "*", "kwargs", ")", ":", "if", "not", "kwargs", ".", "get", "(", "ATTR_TARGET", ")", ":", "_LOGGER", ".", "error", "(", "\"At least one target is required\"", ")", "r...
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/aws/notify.py#L163-L191
spulec/moto
a688c0032596a7dfef122b69a08f2bec3be2e481
moto/elbv2/models.py
python
FakeLoadBalancer.delete
(self, region)
Not exposed as part of the ELB API - used for CloudFormation.
Not exposed as part of the ELB API - used for CloudFormation.
[ "Not", "exposed", "as", "part", "of", "the", "ELB", "API", "-", "used", "for", "CloudFormation", "." ]
def delete(self, region): """Not exposed as part of the ELB API - used for CloudFormation.""" elbv2_backends[region].delete_load_balancer(self.arn)
[ "def", "delete", "(", "self", ",", "region", ")", ":", "elbv2_backends", "[", "region", "]", ".", "delete_load_balancer", "(", "self", ".", "arn", ")" ]
https://github.com/spulec/moto/blob/a688c0032596a7dfef122b69a08f2bec3be2e481/moto/elbv2/models.py#L487-L489
googleads/google-ads-python
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
google/ads/googleads/v7/services/services/keyword_plan_ad_group_keyword_service/client.py
python
KeywordPlanAdGroupKeywordServiceClient.keyword_plan_ad_group_keyword_path
( customer_id: str, keyword_plan_ad_group_keyword_id: str, )
return "customers/{customer_id}/keywordPlanAdGroupKeywords/{keyword_plan_ad_group_keyword_id}".format( customer_id=customer_id, keyword_plan_ad_group_keyword_id=keyword_plan_ad_group_keyword_id, )
Return a fully-qualified keyword_plan_ad_group_keyword string.
Return a fully-qualified keyword_plan_ad_group_keyword string.
[ "Return", "a", "fully", "-", "qualified", "keyword_plan_ad_group_keyword", "string", "." ]
def keyword_plan_ad_group_keyword_path( customer_id: str, keyword_plan_ad_group_keyword_id: str, ) -> str: """Return a fully-qualified keyword_plan_ad_group_keyword string.""" return "customers/{customer_id}/keywordPlanAdGroupKeywords/{keyword_plan_ad_group_keyword_id}".format( customer_id=customer_id, keyword_plan_ad_group_keyword_id=keyword_plan_ad_group_keyword_id, )
[ "def", "keyword_plan_ad_group_keyword_path", "(", "customer_id", ":", "str", ",", "keyword_plan_ad_group_keyword_id", ":", "str", ",", ")", "->", "str", ":", "return", "\"customers/{customer_id}/keywordPlanAdGroupKeywords/{keyword_plan_ad_group_keyword_id}\"", ".", "format", "(...
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v7/services/services/keyword_plan_ad_group_keyword_service/client.py#L195-L202
metabrainz/picard
535bf8c7d9363ffc7abb3f69418ec11823c38118
picard/pluginmanager.py
python
PluginManager._install_plugin_file
(self, path, update=False)
[]
def _install_plugin_file(self, path, update=False): dst = os.path.join(self.plugins_directory, os.path.basename(path)) if update: dst += _UPDATE_SUFFIX if os.path.isfile(dst): os.remove(dst) shutil.copy2(path, dst) log.debug("Plugin (file) saved to %r", dst)
[ "def", "_install_plugin_file", "(", "self", ",", "path", ",", "update", "=", "False", ")", ":", "dst", "=", "os", ".", "path", ".", "join", "(", "self", ".", "plugins_directory", ",", "os", ".", "path", ".", "basename", "(", "path", ")", ")", "if", ...
https://github.com/metabrainz/picard/blob/535bf8c7d9363ffc7abb3f69418ec11823c38118/picard/pluginmanager.py#L360-L367
somewacko/deconvfaces
9e9620c52cb1de37e6ae3f52387791ee21dbda67
faces/generate.py
python
GenParser.pose_vector
(self, value, params)
return self.constrain(vec, params['constrained'], params['ps_scale'], params['ps_min'], params['ps_max'])
Create an pose vector for a provided value.
Create an pose vector for a provided value.
[ "Create", "an", "pose", "vector", "for", "a", "provided", "value", "." ]
def pose_vector(self, value, params): """ Create an pose vector for a provided value. """ if isinstance(value, str): if '+' not in value: raise RuntimeError("Pose '{}' not understood".format(value)) try: values = [int(x) for x in value.split('+')] except: raise RuntimeError("Pose '{}' not understood".format(value)) elif isinstance(value, int): values = [value] else: raise RuntimeError("Pose '{}' not understood".format(value)) vec = np.zeros((NUM_YALE_POSES,)) for val in values: if val < 0 or NUM_YALE_POSES <= val: raise RuntimeError("Pose '{}' invalid".format(val)) vec[val] += 1.0 return self.constrain(vec, params['constrained'], params['ps_scale'], params['ps_min'], params['ps_max'])
[ "def", "pose_vector", "(", "self", ",", "value", ",", "params", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "if", "'+'", "not", "in", "value", ":", "raise", "RuntimeError", "(", "\"Pose '{}' not understood\"", ".", "format", "(", "v...
https://github.com/somewacko/deconvfaces/blob/9e9620c52cb1de37e6ae3f52387791ee21dbda67/faces/generate.py#L416-L439
yuxiaokui/Intranet-Penetration
f57678a204840c83cbf3308e3470ae56c5ff514b
proxy/XX-Net/code/default/python27/1.0/lib/noarch/sortedcontainers/sortedset.py
python
SortedSet.symmetric_difference
(self, that)
return new_set
Return a new set with elements in either *self* or *that* but not both.
Return a new set with elements in either *self* or *that* but not both.
[ "Return", "a", "new", "set", "with", "elements", "in", "either", "*", "self", "*", "or", "*", "that", "*", "but", "not", "both", "." ]
def symmetric_difference(self, that): """ Return a new set with elements in either *self* or *that* but not both. """ diff = self._set.symmetric_difference(that) new_set = self.__class__(key=self._key, load=self._load, _set=diff) return new_set
[ "def", "symmetric_difference", "(", "self", ",", "that", ")", ":", "diff", "=", "self", ".", "_set", ".", "symmetric_difference", "(", "that", ")", "new_set", "=", "self", ".", "__class__", "(", "key", "=", "self", ".", "_key", ",", "load", "=", "self"...
https://github.com/yuxiaokui/Intranet-Penetration/blob/f57678a204840c83cbf3308e3470ae56c5ff514b/proxy/XX-Net/code/default/python27/1.0/lib/noarch/sortedcontainers/sortedset.py#L230-L236
Kkevsterrr/backdoorme
f9755ca6cec600335e681752e7a1c5c617bb5a39
backdoors/shell/__pupy/pupy/packages/windows/x86/psutil/_pssunos.py
python
net_connections
(kind, _pid=-1)
return list(ret)
Return socket connections. If pid == -1 return system-wide connections (as opposed to connections opened by one process only). Only INET sockets are returned (UNIX are not).
Return socket connections. If pid == -1 return system-wide connections (as opposed to connections opened by one process only). Only INET sockets are returned (UNIX are not).
[ "Return", "socket", "connections", ".", "If", "pid", "==", "-", "1", "return", "system", "-", "wide", "connections", "(", "as", "opposed", "to", "connections", "opened", "by", "one", "process", "only", ")", ".", "Only", "INET", "sockets", "are", "returned"...
def net_connections(kind, _pid=-1): """Return socket connections. If pid == -1 return system-wide connections (as opposed to connections opened by one process only). Only INET sockets are returned (UNIX are not). """ cmap = _common.conn_tmap.copy() if _pid == -1: cmap.pop('unix', 0) if kind not in cmap: raise ValueError("invalid %r kind argument; choose between %s" % (kind, ', '.join([repr(x) for x in cmap]))) families, types = _common.conn_tmap[kind] rawlist = cext.net_connections(_pid, families, types) ret = set() for item in rawlist: fd, fam, type_, laddr, raddr, status, pid = item if fam not in families: continue if type_ not in types: continue status = TCP_STATUSES[status] fam = sockfam_to_enum(fam) type_ = socktype_to_enum(type_) if _pid == -1: nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid) else: nt = _common.pconn(fd, fam, type_, laddr, raddr, status) ret.add(nt) return list(ret)
[ "def", "net_connections", "(", "kind", ",", "_pid", "=", "-", "1", ")", ":", "cmap", "=", "_common", ".", "conn_tmap", ".", "copy", "(", ")", "if", "_pid", "==", "-", "1", ":", "cmap", ".", "pop", "(", "'unix'", ",", "0", ")", "if", "kind", "no...
https://github.com/Kkevsterrr/backdoorme/blob/f9755ca6cec600335e681752e7a1c5c617bb5a39/backdoors/shell/__pupy/pupy/packages/windows/x86/psutil/_pssunos.py#L206-L234
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
examples/research_projects/longform-qa/eli5_utils.py
python
eval_qa_s2s_epoch
(model, dataset, tokenizer, args)
[]
def eval_qa_s2s_epoch(model, dataset, tokenizer, args): model.eval() # make iterator train_sampler = SequentialSampler(dataset) model_collate_fn = functools.partial( make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() with torch.no_grad(): for step, batch_inputs in enumerate(epoch_iterator): pre_loss = model(**batch_inputs)[0] loss = pre_loss.sum() / pre_loss.shape[0] loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0: print( "{:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) print( "Total \t L: {:.3f} \t -- {:.3f}".format( loc_loss / loc_steps, time() - st_time, ) )
[ "def", "eval_qa_s2s_epoch", "(", "model", ",", "dataset", ",", "tokenizer", ",", "args", ")", ":", "model", ".", "eval", "(", ")", "# make iterator", "train_sampler", "=", "SequentialSampler", "(", "dataset", ")", "model_collate_fn", "=", "functools", ".", "pa...
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/examples/research_projects/longform-qa/eli5_utils.py#L459-L492
bsmali4/xssfork
515b45dfb0edb9263da544ad91fc1cb5f410bfd1
thirdparty/requests/cookies.py
python
RequestsCookieJar.__getstate__
(self)
return state
Unlike a normal CookieJar, this class is pickleable.
Unlike a normal CookieJar, this class is pickleable.
[ "Unlike", "a", "normal", "CookieJar", "this", "class", "is", "pickleable", "." ]
def __getstate__(self): """Unlike a normal CookieJar, this class is pickleable.""" state = self.__dict__.copy() # remove the unpickleable RLock object state.pop('_cookies_lock') return state
[ "def", "__getstate__", "(", "self", ")", ":", "state", "=", "self", ".", "__dict__", ".", "copy", "(", ")", "# remove the unpickleable RLock object", "state", ".", "pop", "(", "'_cookies_lock'", ")", "return", "state" ]
https://github.com/bsmali4/xssfork/blob/515b45dfb0edb9263da544ad91fc1cb5f410bfd1/thirdparty/requests/cookies.py#L333-L338
FSecureLABS/Jandroid
e31d0dab58a2bfd6ed8e0a387172b8bd7c893436
gui/lib/png.py
python
Writer.write_passes
(self, outfile, rows, packed=False)
return i+1
Write a PNG image to the output file. Most users are expected to find the :meth:`write` or :meth:`write_array` method more convenient. The rows should be given to this method in the order that they appear in the output file. For straightlaced images, this is the usual top to bottom ordering, but for interlaced images the rows should have already been interlaced before passing them to this function. `rows` should be an iterable that yields each row. When `packed` is ``False`` the rows should be in boxed row flat pixel format; when `packed` is ``True`` each row should be a packed sequence of bytes.
Write a PNG image to the output file.
[ "Write", "a", "PNG", "image", "to", "the", "output", "file", "." ]
def write_passes(self, outfile, rows, packed=False): """ Write a PNG image to the output file. Most users are expected to find the :meth:`write` or :meth:`write_array` method more convenient. The rows should be given to this method in the order that they appear in the output file. For straightlaced images, this is the usual top to bottom ordering, but for interlaced images the rows should have already been interlaced before passing them to this function. `rows` should be an iterable that yields each row. When `packed` is ``False`` the rows should be in boxed row flat pixel format; when `packed` is ``True`` each row should be a packed sequence of bytes. """ # http://www.w3.org/TR/PNG/#5PNG-file-signature outfile.write(_signature) # http://www.w3.org/TR/PNG/#11IHDR write_chunk(outfile, b'IHDR', struct.pack("!2I5B", self.width, self.height, self.bitdepth, self.color_type, 0, 0, self.interlace)) # See :chunk:order # http://www.w3.org/TR/PNG/#11gAMA if self.gamma is not None: write_chunk(outfile, b'gAMA', struct.pack("!L", int(round(self.gamma*1e5)))) # See :chunk:order # http://www.w3.org/TR/PNG/#11sBIT if self.rescale: write_chunk(outfile, b'sBIT', struct.pack('%dB' % self.planes, *[self.rescale[0]]*self.planes)) # :chunk:order: Without a palette (PLTE chunk), ordering is # relatively relaxed. With one, gAMA chunk must precede PLTE # chunk which must precede tRNS and bKGD. # See http://www.w3.org/TR/PNG/#5ChunkOrdering if self.palette: p,t = self.make_palette() write_chunk(outfile, b'PLTE', p) if t: # tRNS chunk is optional. Only needed if palette entries # have alpha. write_chunk(outfile, b'tRNS', t) # http://www.w3.org/TR/PNG/#11tRNS if self.transparent is not None: if self.greyscale: write_chunk(outfile, b'tRNS', struct.pack("!1H", *self.transparent)) else: write_chunk(outfile, b'tRNS', struct.pack("!3H", *self.transparent)) # http://www.w3.org/TR/PNG/#11bKGD if self.background is not None: if self.greyscale: write_chunk(outfile, b'bKGD', struct.pack("!1H", *self.background)) else: write_chunk(outfile, b'bKGD', struct.pack("!3H", *self.background)) # http://www.w3.org/TR/PNG/#11pHYs if self.x_pixels_per_unit is not None and self.y_pixels_per_unit is not None: tup = (self.x_pixels_per_unit, self.y_pixels_per_unit, int(self.unit_is_meter)) write_chunk(outfile, b'pHYs', struct.pack("!LLB",*tup)) # http://www.w3.org/TR/PNG/#11IDAT if self.compression is not None: compressor = zlib.compressobj(self.compression) else: compressor = zlib.compressobj() # Choose an extend function based on the bitdepth. The extend # function packs/decomposes the pixel values into bytes and # stuffs them onto the data array. data = array(str('B')) if self.bitdepth == 8 or packed: extend = data.extend elif self.bitdepth == 16: # Decompose into bytes def extend(sl): fmt = '!%dH' % len(sl) data.extend(array(str('B'), struct.pack(fmt, *sl))) else: # Pack into bytes assert self.bitdepth < 8 # samples per byte spb = int(8/self.bitdepth) def extend(sl): a = array(str('B'), sl) # Adding padding bytes so we can group into a whole # number of spb-tuples. l = float(len(a)) extra = math.ceil(l / float(spb))*spb - l a.extend([0]*int(extra)) # Pack into bytes l = group(a, spb) l = [reduce(lambda x,y: (x << self.bitdepth) + y, e) for e in l] data.extend(l) if self.rescale: oldextend = extend factor = \ float(2**self.rescale[1]-1) / float(2**self.rescale[0]-1) def extend(sl): oldextend([int(round(factor*x)) for x in sl]) # Build the first row, testing mostly to see if we need to # changed the extend function to cope with NumPy integer types # (they cause our ordinary definition of extend to fail, so we # wrap it). See # http://code.google.com/p/pypng/issues/detail?id=44 enumrows = enumerate(rows) del rows # First row's filter type. data.append(0) # :todo: Certain exceptions in the call to ``.next()`` or the # following try would indicate no row data supplied. # Should catch. i,row = next(enumrows) try: # If this fails... extend(row) except: # ... try a version that converts the values to int first. # Not only does this work for the (slightly broken) NumPy # types, there are probably lots of other, unknown, "nearly" # int types it works for. def wrapmapint(f): return lambda sl: f([int(x) for x in sl]) extend = wrapmapint(extend) del wrapmapint extend(row) for i,row in enumrows: # Add "None" filter type. Currently, it's essential that # this filter type be used for every scanline as we do not # mark the first row of a reduced pass image; that means we # could accidentally compute the wrong filtered scanline if # we used "up", "average", or "paeth" on such a line. data.append(0) extend(row) if len(data) > self.chunk_limit: compressed = compressor.compress(tostring(data)) if len(compressed): write_chunk(outfile, b'IDAT', compressed) # Because of our very witty definition of ``extend``, # above, we must re-use the same ``data`` object. Hence # we use ``del`` to empty this one, rather than create a # fresh one (which would be my natural FP instinct). del data[:] if len(data): compressed = compressor.compress(tostring(data)) else: compressed = b'' flushed = compressor.flush() if len(compressed) or len(flushed): write_chunk(outfile, b'IDAT', compressed + flushed) # http://www.w3.org/TR/PNG/#11IEND write_chunk(outfile, b'IEND') return i+1
[ "def", "write_passes", "(", "self", ",", "outfile", ",", "rows", ",", "packed", "=", "False", ")", ":", "# http://www.w3.org/TR/PNG/#5PNG-file-signature", "outfile", ".", "write", "(", "_signature", ")", "# http://www.w3.org/TR/PNG/#11IHDR", "write_chunk", "(", "outfi...
https://github.com/FSecureLABS/Jandroid/blob/e31d0dab58a2bfd6ed8e0a387172b8bd7c893436/gui/lib/png.py#L626-L797
log2timeline/plaso
fe2e316b8c76a0141760c0f2f181d84acb83abc2
plaso/parsers/winfirewall.py
python
WinFirewallParser._ParseLogLine
(self, parser_mediator, structure)
Parse a single log line and produce an event object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file.
Parse a single log line and produce an event object.
[ "Parse", "a", "single", "log", "line", "and", "produce", "an", "event", "object", "." ]
def _ParseLogLine(self, parser_mediator, structure): """Parse a single log line and produce an event object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. """ # Ensure time_elements_tuple is not a pyparsing.ParseResults otherwise # copy.deepcopy() of the dfDateTime object will fail on Python 3.8 with: # "TypeError: 'str' object is not callable" due to pyparsing.ParseResults # overriding __getattr__ with a function that returns an empty string when # named token does not exist. time_elements_structure = structure.get('date_time', None) try: year, month, day_of_month, hours, minutes, seconds = ( time_elements_structure) date_time = dfdatetime_time_elements.TimeElements(time_elements_tuple=( year, month, day_of_month, hours, minutes, seconds)) date_time.is_local_time = True except (TypeError, ValueError): parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0!s}'.format(time_elements_structure)) return event_data = WinFirewallEventData() event_data.action = self._GetValueFromStructure(structure, 'action') event_data.dest_ip = self._GetValueFromStructure(structure, 'dest_ip') event_data.dest_port = self._GetValueFromStructure(structure, 'dest_port') event_data.flags = self._GetValueFromStructure(structure, 'flags') event_data.icmp_code = self._GetValueFromStructure(structure, 'icmp_code') event_data.icmp_type = self._GetValueFromStructure(structure, 'icmp_type') event_data.info = self._GetValueFromStructure(structure, 'info') event_data.path = self._GetValueFromStructure(structure, 'path') event_data.protocol = self._GetValueFromStructure(structure, 'protocol') event_data.size = self._GetValueFromStructure(structure, 'size') event_data.source_ip = self._GetValueFromStructure(structure, 'source_ip') event_data.source_port = self._GetValueFromStructure( structure, 'source_port') event_data.tcp_ack = self._GetValueFromStructure(structure, 'tcp_ack') event_data.tcp_seq = self._GetValueFromStructure(structure, 'tcp_seq') event_data.tcp_win = self._GetValueFromStructure(structure, 'tcp_win') if self._use_local_timezone: time_zone = parser_mediator.timezone else: time_zone = pytz.UTC event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN, time_zone=time_zone) parser_mediator.ProduceEventWithEventData(event, event_data)
[ "def", "_ParseLogLine", "(", "self", ",", "parser_mediator", ",", "structure", ")", ":", "# Ensure time_elements_tuple is not a pyparsing.ParseResults otherwise", "# copy.deepcopy() of the dfDateTime object will fail on Python 3.8 with:", "# \"TypeError: 'str' object is not callable\" due to...
https://github.com/log2timeline/plaso/blob/fe2e316b8c76a0141760c0f2f181d84acb83abc2/plaso/parsers/winfirewall.py#L137-L191
ioflo/ioflo
177ac656d7c4ff801aebb0d8b401db365a5248ce
ioflo/aio/uxd/uxding.py
python
SocketUxdNb.send
(self, data, da)
return result
Perform non blocking send on socket. data is string in python2 and bytes in python3 da is destination address tuple (destHost, destPort)
Perform non blocking send on socket.
[ "Perform", "non", "blocking", "send", "on", "socket", "." ]
def send(self, data, da): """Perform non blocking send on socket. data is string in python2 and bytes in python3 da is destination address tuple (destHost, destPort) """ try: result = self.ss.sendto(data, da) #result is number of bytes sent except socket.error as ex: emsg = "socket.error = {0}: sending from {1} to {2}\n".format(ex, self.ha, da) console.profuse(emsg) result = 0 raise if console._verbosity >= console.Wordage.profuse: try: load = data[:result].decode("UTF-8") except UnicodeDecodeError as ex: load = "0x{0}".format(hexlify(data[:result]).decode("ASCII")) cmsg = ("Server at {0}, sent {1} bytes to {2}:\n------------\n" "{3}\n\n".format(self.ha, result, da, load)) console.profuse(cmsg) if self.wlog: self.wlog.writeTx(da, data) return result
[ "def", "send", "(", "self", ",", "data", ",", "da", ")", ":", "try", ":", "result", "=", "self", ".", "ss", ".", "sendto", "(", "data", ",", "da", ")", "#result is number of bytes sent", "except", "socket", ".", "error", "as", "ex", ":", "emsg", "=",...
https://github.com/ioflo/ioflo/blob/177ac656d7c4ff801aebb0d8b401db365a5248ce/ioflo/aio/uxd/uxding.py#L155-L181
cloudera/hue
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
desktop/core/ext-py/billiard-3.5.0.5/billiard/managers.py
python
dispatch
(c, id, methodname, args=(), kwds={})
Send a message to manager using connection `c` and return response
Send a message to manager using connection `c` and return response
[ "Send", "a", "message", "to", "manager", "using", "connection", "c", "and", "return", "response" ]
def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result)
[ "def", "dispatch", "(", "c", ",", "id", ",", "methodname", ",", "args", "=", "(", ")", ",", "kwds", "=", "{", "}", ")", ":", "c", ".", "send", "(", "(", "id", ",", "methodname", ",", "args", ",", "kwds", ")", ")", "kind", ",", "result", "=", ...
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/billiard-3.5.0.5/billiard/managers.py#L87-L95
pascanur/GroundHog
3691cbf2cd4e46aaae5d7a986b0ac172016ce729
groundhog/layers/rec_layers.py
python
RecurrentMultiLayer.step_fprop
(self, state_below, mask=None, dpmask=None, state_before=None, init_state=None, use_noise=True, no_noise_bias=False)
return rval
Constructs the computational graph of a single step of the recurrent layer. :type state_below: theano variable :param state_below: the input to the layer :type mask: None or theano variable :param mask: mask describing the length of each sequence in a minibatch :type state_before: theano variable :param state_before: the previous value of the hidden state of the layer :type use_noise: bool :param use_noise: flag saying if weight noise should be used in computing the output of this layer :type no_noise_bias: bool :param no_noise_bias: flag saying if weight noise should be added to the bias as well
Constructs the computational graph of a single step of the recurrent layer.
[ "Constructs", "the", "computational", "graph", "of", "a", "single", "step", "of", "the", "recurrent", "layer", "." ]
def step_fprop(self, state_below, mask=None, dpmask=None, state_before=None, init_state=None, use_noise=True, no_noise_bias=False): """ Constructs the computational graph of a single step of the recurrent layer. :type state_below: theano variable :param state_below: the input to the layer :type mask: None or theano variable :param mask: mask describing the length of each sequence in a minibatch :type state_before: theano variable :param state_before: the previous value of the hidden state of the layer :type use_noise: bool :param use_noise: flag saying if weight noise should be used in computing the output of this layer :type no_noise_bias: bool :param no_noise_bias: flag saying if weight noise should be added to the bias as well """ rval = [] if self.weight_noise and use_noise and self.noise_params: W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)] if not no_noise_bias: b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hss)] else: b_hhs = self.b_hhs else: W_hhs = self.W_hhs b_hhs = self.b_hhs preactiv = TT.dot(state_before, W_hhs[0]) +state_below preactiv = TT.cast(preactiv,theano.config.floatX) h = self.activation[0](preactiv) if self.activ_noise and use_noise: h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype) if self.dropout < 1.: if use_noise: if h.ndim == 2: h = h * dpmask[:,:h.shape[1]] dpidx = h.shape[1] else: h = h * dpmask[:h.shape[0]] dpidx = h.shape[0] else: h = h * self.dropout rval +=[h] for dx in xrange(1, self.n_layers): preactiv = TT.dot(h, W_hhs[dx]) + b_hhs[dx-1] h = self.activation[dx](preactiv) if self.activ_noise and use_noise: h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype) if self.dropout < 1.: if use_noise: if h.ndim == 2: h = h * dpmask[:,dpidx:dpidx+h.shape[1]] dpidx = dpidx + h.shape[1] else: h = h * dpmask[dpidx:dpidx+h.shape[0]] dpidx = dpidx + h.shape[0] else: h = h * self.dropout rval += [h] if mask is not None: if h.ndim ==2 and mask.ndim==1: mask = mask.dimshuffle(0,'x') h = mask * h + (1-mask) * state_before rval[-1] = h return rval
[ "def", "step_fprop", "(", "self", ",", "state_below", ",", "mask", "=", "None", ",", "dpmask", "=", "None", ",", "state_before", "=", "None", ",", "init_state", "=", "None", ",", "use_noise", "=", "True", ",", "no_noise_bias", "=", "False", ")", ":", "...
https://github.com/pascanur/GroundHog/blob/3691cbf2cd4e46aaae5d7a986b0ac172016ce729/groundhog/layers/rec_layers.py#L190-L270
lisa-lab/pylearn2
af81e5c362f0df4df85c3e54e23b2adeec026055
pylearn2/datasets/csv_dataset.py
python
CSVDataset._load_data
(self)
return X, y
Loads the data from a CSV file (ending with a '.csv' filename). Returns ------- X : object The features of the dataset. y : object, optional The target variable of the model.
Loads the data from a CSV file (ending with a '.csv' filename).
[ "Loads", "the", "data", "from", "a", "CSV", "file", "(", "ending", "with", "a", ".", "csv", "filename", ")", "." ]
def _load_data(self): """ Loads the data from a CSV file (ending with a '.csv' filename). Returns ------- X : object The features of the dataset. y : object, optional The target variable of the model. """ assert self.path.endswith('.csv') if self.expect_headers: data = np.loadtxt(self.path, delimiter=self.delimiter, skiprows=1) else: data = np.loadtxt(self.path, delimiter=self.delimiter) def take_subset(X, y): """ Takes a subset of the dataset if the start_fraction, stop_fraction or start/stop parameter of the class is set. Parameters ---------- X : object The features of the dataset. y : object, optional The target variable of the model. Returns ------- X : object The subset of the features of the dataset. y : object, optional The subset of the target variable of the model. """ if self.start_fraction is not None: n = X.shape[0] subset_end = int(self.start_fraction * n) X = X[0:subset_end, :] y = y[0:subset_end] elif self.end_fraction is not None: n = X.shape[0] subset_start = int((1 - self.end_fraction) * n) X = X[subset_start:, ] y = y[subset_start:] elif self.start is not None: X = X[self.start:self.stop, ] if y is not None: y = y[self.start:self.stop] return X, y if self.expect_labels: y = data[:, 0:self.num_outputs] X = data[:, self.num_outputs:] y = y.reshape((y.shape[0], self.num_outputs)) else: X = data y = None X, y = take_subset(X, y) return X, y
[ "def", "_load_data", "(", "self", ")", ":", "assert", "self", ".", "path", ".", "endswith", "(", "'.csv'", ")", "if", "self", ".", "expect_headers", ":", "data", "=", "np", ".", "loadtxt", "(", "self", ".", "path", ",", "delimiter", "=", "self", ".",...
https://github.com/lisa-lab/pylearn2/blob/af81e5c362f0df4df85c3e54e23b2adeec026055/pylearn2/datasets/csv_dataset.py#L131-L199
pixelogik/NearPy
1b534b864d320d875508e95cd2b76b6d8c07a90b
nearpy/storage/storage.py
python
Storage.get_all_bucket_keys
(self, hash_name)
Returns all bucket keys for the given hash as iterable of strings
Returns all bucket keys for the given hash as iterable of strings
[ "Returns", "all", "bucket", "keys", "for", "the", "given", "hash", "as", "iterable", "of", "strings" ]
def get_all_bucket_keys(self, hash_name): """ Returns all bucket keys for the given hash as iterable of strings """ raise NotImplementedError
[ "def", "get_all_bucket_keys", "(", "self", ",", "hash_name", ")", ":", "raise", "NotImplementedError" ]
https://github.com/pixelogik/NearPy/blob/1b534b864d320d875508e95cd2b76b6d8c07a90b/nearpy/storage/storage.py#L40-L44
softlayer/softlayer-python
cdef7d63c66413197a9a97b0414de9f95887a82a
SoftLayer/managers/network.py
python
NetworkManager.get_cancel_failure_reasons
(self, identifier)
return self.vlan.getCancelFailureReasons(id=identifier)
get the reasons why we cannot cancel the VLAN. :param integer identifier: the instance ID
get the reasons why we cannot cancel the VLAN.
[ "get", "the", "reasons", "why", "we", "cannot", "cancel", "the", "VLAN", "." ]
def get_cancel_failure_reasons(self, identifier): """get the reasons why we cannot cancel the VLAN. :param integer identifier: the instance ID """ return self.vlan.getCancelFailureReasons(id=identifier)
[ "def", "get_cancel_failure_reasons", "(", "self", ",", "identifier", ")", ":", "return", "self", ".", "vlan", ".", "getCancelFailureReasons", "(", "id", "=", "identifier", ")" ]
https://github.com/softlayer/softlayer-python/blob/cdef7d63c66413197a9a97b0414de9f95887a82a/SoftLayer/managers/network.py#L761-L766
volatilityfoundation/volatility3
168b0d0b053ab97a7cb096ef2048795cc54d885f
volatility3/framework/plugins/timeliner.py
python
Timeliner.get_usable_plugins
(cls, selected_list: List[str] = None)
return [plugin_class for plugin_class in plugin_list if filter_func(plugin_class.__name__, selected_list)]
[]
def get_usable_plugins(cls, selected_list: List[str] = None) -> List[Type]: # Initialize for the run plugin_list = list(framework.class_subclasses(TimeLinerInterface)) # Get the filter from the configuration def passthrough(name: str, selected: List[str]) -> bool: return True filter_func = passthrough if selected_list: def filter_plugins(name: str, selected: List[str]) -> bool: return any([s in name for s in selected]) filter_func = filter_plugins else: selected_list = [] return [plugin_class for plugin_class in plugin_list if filter_func(plugin_class.__name__, selected_list)]
[ "def", "get_usable_plugins", "(", "cls", ",", "selected_list", ":", "List", "[", "str", "]", "=", "None", ")", "->", "List", "[", "Type", "]", ":", "# Initialize for the run", "plugin_list", "=", "list", "(", "framework", ".", "class_subclasses", "(", "TimeL...
https://github.com/volatilityfoundation/volatility3/blob/168b0d0b053ab97a7cb096ef2048795cc54d885f/volatility3/framework/plugins/timeliner.py#L54-L72
aws-samples/aws-kube-codesuite
ab4e5ce45416b83bffb947ab8d234df5437f4fca
src/kubernetes/client/models/apps_v1beta1_deployment_status.py
python
AppsV1beta1DeploymentStatus.updated_replicas
(self)
return self._updated_replicas
Gets the updated_replicas of this AppsV1beta1DeploymentStatus. Total number of non-terminated pods targeted by this deployment that have the desired template spec. :return: The updated_replicas of this AppsV1beta1DeploymentStatus. :rtype: int
Gets the updated_replicas of this AppsV1beta1DeploymentStatus. Total number of non-terminated pods targeted by this deployment that have the desired template spec.
[ "Gets", "the", "updated_replicas", "of", "this", "AppsV1beta1DeploymentStatus", ".", "Total", "number", "of", "non", "-", "terminated", "pods", "targeted", "by", "this", "deployment", "that", "have", "the", "desired", "template", "spec", "." ]
def updated_replicas(self): """ Gets the updated_replicas of this AppsV1beta1DeploymentStatus. Total number of non-terminated pods targeted by this deployment that have the desired template spec. :return: The updated_replicas of this AppsV1beta1DeploymentStatus. :rtype: int """ return self._updated_replicas
[ "def", "updated_replicas", "(", "self", ")", ":", "return", "self", ".", "_updated_replicas" ]
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/kubernetes/client/models/apps_v1beta1_deployment_status.py#L226-L234
openstack/nova
b49b7663e1c3073917d5844b81d38db8e86d05c4
nova/compute/manager.py
python
ComputeManager._heal_instance_info_cache
(self, context)
Called periodically. On every call, try to update the info_cache's network information for another instance by calling to the network manager. This is implemented by keeping a cache of uuids of instances that live on this host. On each call, we pop one off of a list, pull the DB record, and try the call to the network API. If anything errors don't fail, as it's possible the instance has been deleted, etc.
Called periodically. On every call, try to update the info_cache's network information for another instance by calling to the network manager.
[ "Called", "periodically", ".", "On", "every", "call", "try", "to", "update", "the", "info_cache", "s", "network", "information", "for", "another", "instance", "by", "calling", "to", "the", "network", "manager", "." ]
def _heal_instance_info_cache(self, context): """Called periodically. On every call, try to update the info_cache's network information for another instance by calling to the network manager. This is implemented by keeping a cache of uuids of instances that live on this host. On each call, we pop one off of a list, pull the DB record, and try the call to the network API. If anything errors don't fail, as it's possible the instance has been deleted, etc. """ heal_interval = CONF.heal_instance_info_cache_interval if not heal_interval: return instance_uuids = getattr(self, '_instance_uuids_to_heal', []) instance = None LOG.debug('Starting heal instance info cache') if not instance_uuids: # The list of instances to heal is empty so rebuild it LOG.debug('Rebuilding the list of instances to heal') db_instances = objects.InstanceList.get_by_host( context, self.host, expected_attrs=[], use_slave=True) for inst in db_instances: # We don't want to refresh the cache for instances # which are building or deleting so don't put them # in the list. If they are building they will get # added to the list next time we build it. if (inst.vm_state == vm_states.BUILDING): LOG.debug('Skipping network cache update for instance ' 'because it is Building.', instance=inst) continue if (inst.task_state == task_states.DELETING): LOG.debug('Skipping network cache update for instance ' 'because it is being deleted.', instance=inst) continue if not instance: # Save the first one we find so we don't # have to get it again instance = inst else: instance_uuids.append(inst['uuid']) self._instance_uuids_to_heal = instance_uuids else: # Find the next valid instance on the list while instance_uuids: try: inst = objects.Instance.get_by_uuid( context, instance_uuids.pop(0), expected_attrs=['system_metadata', 'info_cache', 'flavor'], use_slave=True) except exception.InstanceNotFound: # Instance is gone. Try to grab another. continue # Check the instance hasn't been migrated if inst.host != self.host: LOG.debug('Skipping network cache update for instance ' 'because it has been migrated to another ' 'host.', instance=inst) # Check the instance isn't being deleting elif inst.task_state == task_states.DELETING: LOG.debug('Skipping network cache update for instance ' 'because it is being deleted.', instance=inst) else: instance = inst break if instance: # We have an instance now to refresh try: # Fix potential mismatch in port binding if evacuation failed # after reassigning the port binding to the dest host but # before the instance host is changed. # Do this only when instance has no pending task. if instance.task_state is None and \ self._require_nw_info_update(context, instance): LOG.info("Updating ports in neutron", instance=instance) self.network_api.setup_instance_network_on_host( context, instance, self.host) # Call to network API to get instance info.. this will # force an update to the instance's info_cache self.network_api.get_instance_nw_info( context, instance, force_refresh=True) LOG.debug('Updated the network info_cache for instance', instance=instance) except exception.InstanceNotFound: # Instance is gone. LOG.debug('Instance no longer exists. Unable to refresh', instance=instance) return except exception.InstanceInfoCacheNotFound: # InstanceInfoCache is gone. LOG.debug('InstanceInfoCache no longer exists. ' 'Unable to refresh', instance=instance) except Exception: LOG.error('An error occurred while refreshing the network ' 'cache.', instance=instance, exc_info=True) else: LOG.debug("Didn't find any instances for network info cache " "update.")
[ "def", "_heal_instance_info_cache", "(", "self", ",", "context", ")", ":", "heal_interval", "=", "CONF", ".", "heal_instance_info_cache_interval", "if", "not", "heal_interval", ":", "return", "instance_uuids", "=", "getattr", "(", "self", ",", "'_instance_uuids_to_hea...
https://github.com/openstack/nova/blob/b49b7663e1c3073917d5844b81d38db8e86d05c4/nova/compute/manager.py#L9340-L9445
klmitch/turnstile
8fe9a359b45e505d3192ab193ecf9be177ab1a17
turnstile/tools.py
python
turnstile_command
(conf_file, command, arguments=[], channel=None, debug=False)
Issue a command to all running control daemons. :param conf_file: Name of the configuration file. :param command: The command to execute. Note that 'ping' is handled specially; in particular, the "channel" parameter is implied. (A random value will be used for the channel to listen on.) :param arguments: A list of arguments for the command. Note that the colon character (':') cannot be used. :param channel: If not None, specifies the name of a message channel to listen for responses on. Will wait indefinitely; to terminate the listening loop, use the keyboard interrupt sequence. :param debug: If True, debugging messages are emitted while sending the command.
Issue a command to all running control daemons.
[ "Issue", "a", "command", "to", "all", "running", "control", "daemons", "." ]
def turnstile_command(conf_file, command, arguments=[], channel=None, debug=False): """ Issue a command to all running control daemons. :param conf_file: Name of the configuration file. :param command: The command to execute. Note that 'ping' is handled specially; in particular, the "channel" parameter is implied. (A random value will be used for the channel to listen on.) :param arguments: A list of arguments for the command. Note that the colon character (':') cannot be used. :param channel: If not None, specifies the name of a message channel to listen for responses on. Will wait indefinitely; to terminate the listening loop, use the keyboard interrupt sequence. :param debug: If True, debugging messages are emitted while sending the command. """ # Connect to the database... conf = config.Config(conf_file=conf_file) db = conf.get_database() control_channel = conf['control'].get('channel', 'control') # Now, set up the command command = command.lower() ts_conv = False if command == 'ping': # We handle 'ping' specially; first, figure out the channel if arguments: channel = arguments[0] else: channel = str(uuid.uuid4()) arguments = [channel] # Next, add on a timestamp if len(arguments) < 2: arguments.append(time.time()) ts_conv = True # Limit the argument list length arguments = arguments[:2] # OK, the command is all set up. Let us now send the command... if debug: cmd = [command] + arguments print >>sys.stderr, ("Issuing command: %s" % ' '.join(cmd)) database.command(db, control_channel, command, *arguments) # Were we asked to listen on a channel? if not channel: return # OK, let's subscribe to the channel... pubsub = db.pubsub() pubsub.subscribe(channel) # Now we listen... try: count = 0 for msg in pubsub.listen(): # Make sure the message is one we're interested in if debug: formatted = pprint.pformat(msg) print >>sys.stderr, "Received message: %s" % formatted if (msg['type'] not in ('pmessage', 'message') or msg['channel'] != channel): continue count += 1 # Figure out the response response = msg['data'].split(':') # If this is a 'pong' and ts_conv is true, add an RTT to # the response if ts_conv and response[0] == 'pong': try: rtt = (time.time() - float(response[2])) * 100 response.append('(RTT %.2fms)' % rtt) except Exception: # IndexError or ValueError, probably; ignore it pass # Print out the response print "Response % 5d: %s" % (count, ' '.join(response)) except KeyboardInterrupt: # We want to break out of the loop, but not return any error # to the caller... pass
[ "def", "turnstile_command", "(", "conf_file", ",", "command", ",", "arguments", "=", "[", "]", ",", "channel", "=", "None", ",", "debug", "=", "False", ")", ":", "# Connect to the database...", "conf", "=", "config", ".", "Config", "(", "conf_file", "=", "...
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L712-L803
chaoss/grimoirelab-perceval
ba19bfd5e40bffdd422ca8e68526326b47f97491
perceval/backend.py
python
fetch
(backend_class, backend_args, category, filter_classified=False, manager=None)
Fetch items using the given backend. Generator to get items using the given backend class. When an archive manager is given, this function will store the fetched items in an `Archive`. If an exception is raised, this archive will be removed to avoid corrupted archives. The parameters needed to initialize the `backend` class and get the items are given using `backend_args` dict parameter. :param backend_class: backend class to fetch items :param backend_args: dict of arguments needed to fetch the items :param category: category of the items to retrieve. If None, it will use the default backend category :param filter_classified: remove classified fields from the resulting items :param manager: archive manager needed to store the items :returns: a generator of items
Fetch items using the given backend.
[ "Fetch", "items", "using", "the", "given", "backend", "." ]
def fetch(backend_class, backend_args, category, filter_classified=False, manager=None): """Fetch items using the given backend. Generator to get items using the given backend class. When an archive manager is given, this function will store the fetched items in an `Archive`. If an exception is raised, this archive will be removed to avoid corrupted archives. The parameters needed to initialize the `backend` class and get the items are given using `backend_args` dict parameter. :param backend_class: backend class to fetch items :param backend_args: dict of arguments needed to fetch the items :param category: category of the items to retrieve. If None, it will use the default backend category :param filter_classified: remove classified fields from the resulting items :param manager: archive manager needed to store the items :returns: a generator of items """ init_args = find_signature_parameters(backend_class.__init__, backend_args) archive = manager.create_archive() if manager else None init_args['archive'] = archive backend = backend_class(**init_args) if category: backend_args['category'] = category if filter_classified: backend_args['filter_classified'] = filter_classified fetch_args = find_signature_parameters(backend.fetch, backend_args) items = backend.fetch(**fetch_args) try: for item in items: yield item except Exception as e: if manager: archive_path = archive.archive_path manager.remove_archive(archive_path) raise e
[ "def", "fetch", "(", "backend_class", ",", "backend_args", ",", "category", ",", "filter_classified", "=", "False", ",", "manager", "=", "None", ")", ":", "init_args", "=", "find_signature_parameters", "(", "backend_class", ".", "__init__", ",", "backend_args", ...
https://github.com/chaoss/grimoirelab-perceval/blob/ba19bfd5e40bffdd422ca8e68526326b47f97491/perceval/backend.py#L1096-L1140
douban/graph-index
cc60ac25edf167efb768ee9b93eaf4b9523671e5
bottle.py
python
html_quote
(string)
return '"%s"' % html_escape(string).replace('\n','%#10;')\ .replace('\r','&#13;').replace('\t','&#9;')
Escape and quote a string to be used as an HTTP attribute.
Escape and quote a string to be used as an HTTP attribute.
[ "Escape", "and", "quote", "a", "string", "to", "be", "used", "as", "an", "HTTP", "attribute", "." ]
def html_quote(string): ''' Escape and quote a string to be used as an HTTP attribute.''' return '"%s"' % html_escape(string).replace('\n','%#10;')\ .replace('\r','&#13;').replace('\t','&#9;')
[ "def", "html_quote", "(", "string", ")", ":", "return", "'\"%s\"'", "%", "html_escape", "(", "string", ")", ".", "replace", "(", "'\\n'", ",", "'%#10;'", ")", ".", "replace", "(", "'\\r'", ",", "'&#13;'", ")", ".", "replace", "(", "'\\t'", ",", "'&#9;'...
https://github.com/douban/graph-index/blob/cc60ac25edf167efb768ee9b93eaf4b9523671e5/bottle.py#L2220-L2223
girder/girder
0766ba8e7f9b25ce81e7c0d19bd343479bceea20
plugins/jobs/girder_jobs/models/job.py
python
Job.save
(self, job, *args, **kwargs)
return job
We extend save so that we can serialize the kwargs before sending them to the database. This will allow kwargs with $ and . characters in the keys.
We extend save so that we can serialize the kwargs before sending them to the database. This will allow kwargs with $ and . characters in the keys.
[ "We", "extend", "save", "so", "that", "we", "can", "serialize", "the", "kwargs", "before", "sending", "them", "to", "the", "database", ".", "This", "will", "allow", "kwargs", "with", "$", "and", ".", "characters", "in", "the", "keys", "." ]
def save(self, job, *args, **kwargs): """ We extend save so that we can serialize the kwargs before sending them to the database. This will allow kwargs with $ and . characters in the keys. """ job['kwargs'] = json_util.dumps(job['kwargs']) job = super().save(job, *args, **kwargs) job['kwargs'] = json_util.loads(job['kwargs']) return job
[ "def", "save", "(", "self", ",", "job", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "job", "[", "'kwargs'", "]", "=", "json_util", ".", "dumps", "(", "job", "[", "'kwargs'", "]", ")", "job", "=", "super", "(", ")", ".", "save", "(", ...
https://github.com/girder/girder/blob/0766ba8e7f9b25ce81e7c0d19bd343479bceea20/plugins/jobs/girder_jobs/models/job.py#L264-L273
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/polys/factortools.py
python
dup_zz_zassenhaus
(f, K)
return factors + [f]
Factor primitive square-free polynomials in `Z[x]`.
Factor primitive square-free polynomials in `Z[x]`.
[ "Factor", "primitive", "square", "-", "free", "polynomials", "in", "Z", "[", "x", "]", "." ]
def dup_zz_zassenhaus(f, K): """Factor primitive square-free polynomials in `Z[x]`. """ n = dup_degree(f) if n == 1: return [f] fc = f[-1] A = dup_max_norm(f, K) b = dup_LC(f, K) B = int(abs(K.sqrt(K(n + 1))*2**n*A*b)) C = int((n + 1)**(2*n)*A**(2*n - 1)) gamma = int(_ceil(2*_log(C, 2))) bound = int(2*gamma*_log(gamma)) a = [] # choose a prime number `p` such that `f` be square free in Z_p # if there are many factors in Z_p, choose among a few different `p` # the one with fewer factors for px in xrange(3, bound + 1): if not isprime(px) or b % px == 0: continue px = K.convert(px) F = gf_from_int_poly(f, px) if not gf_sqf_p(F, px, K): continue fsqfx = gf_factor_sqf(F, px, K)[1] a.append((px, fsqfx)) if len(fsqfx) < 15 or len(a) > 4: break p, fsqf = min(a, key=lambda x: len(x[1])) l = int(_ceil(_log(2*B + 1, p))) modular = [gf_to_int_poly(ff, p) for ff in fsqf] g = dup_zz_hensel_lift(p, f, modular, l, K) sorted_T = range(len(g)) T = set(sorted_T) factors, s = [], 1 pl = p**l while 2*s <= len(T): for S in subsets(sorted_T, s): # lift the constant coefficient of the product `G` of the factors # in the subset `S`; if it is does not divide `fc`, `G` does # not divide the input polynomial if b == 1: q = 1 for i in S: q = q*g[i][-1] q = q % pl if not _test_pl(fc, q, pl): continue else: G = [b] for i in S: G = dup_mul(G, g[i], K) G = dup_trunc(G, pl, K) G1 = dup_primitive(G, K)[1] q = G1[-1] if q and fc % q != 0: continue H = [b] S = set(S) T_S = T - S if b == 1: G = [b] for i in S: G = dup_mul(G, g[i], K) G = dup_trunc(G, pl, K) for i in T_S: H = dup_mul(H, g[i], K) H = dup_trunc(H, pl, K) G_norm = dup_l1_norm(G, K) H_norm = dup_l1_norm(H, K) if G_norm*H_norm <= B: T = T_S sorted_T = [i for i in sorted_T if i not in S] G = dup_primitive(G, K)[1] f = dup_primitive(H, K)[1] factors.append(G) b = dup_LC(f, K) break else: s += 1 return factors + [f]
[ "def", "dup_zz_zassenhaus", "(", "f", ",", "K", ")", ":", "n", "=", "dup_degree", "(", "f", ")", "if", "n", "==", "1", ":", "return", "[", "f", "]", "fc", "=", "f", "[", "-", "1", "]", "A", "=", "dup_max_norm", "(", "f", ",", "K", ")", "b",...
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/polys/factortools.py#L262-L362
nerdvegas/rez
d392c65bf63b4bca8106f938cec49144ba54e770
src/rez/utils/platform_.py
python
Platform.image_viewer
(self)
return self._image_viewer()
Returns the system default image viewer. If None, rez will use the web browser to display images.
Returns the system default image viewer.
[ "Returns", "the", "system", "default", "image", "viewer", "." ]
def image_viewer(self): """Returns the system default image viewer. If None, rez will use the web browser to display images. """ return self._image_viewer()
[ "def", "image_viewer", "(", "self", ")", ":", "return", "self", ".", "_image_viewer", "(", ")" ]
https://github.com/nerdvegas/rez/blob/d392c65bf63b4bca8106f938cec49144ba54e770/src/rez/utils/platform_.py#L75-L80
RJT1990/pyflux
297f2afc2095acd97c12e827dd500e8ea5da0c0f
pyflux/garch/egarchmreg.py
python
EGARCHMReg._sim_predicted_mean
(self, lmda, Y, scores, h, t_params, simulations, X_oos)
return np.append(lmda, np.array([np.mean(i) for i in np.transpose(sim_vector)]))
Simulates a h-step ahead mean prediction Parameters ---------- lmda : np.array The past predicted values Y : np.array The past data scores : np.array The past scores h : int How many steps ahead for the prediction t_params : np.array A vector of (transformed) latent variables simulations : int How many simulations to perform X_oos : np.array Out of sample predictors Returns ---------- Matrix of simulations
Simulates a h-step ahead mean prediction
[ "Simulates", "a", "h", "-", "step", "ahead", "mean", "prediction" ]
def _sim_predicted_mean(self, lmda, Y, scores, h, t_params, simulations, X_oos): """ Simulates a h-step ahead mean prediction Parameters ---------- lmda : np.array The past predicted values Y : np.array The past data scores : np.array The past scores h : int How many steps ahead for the prediction t_params : np.array A vector of (transformed) latent variables simulations : int How many simulations to perform X_oos : np.array Out of sample predictors Returns ---------- Matrix of simulations """ sim_vector = np.zeros([simulations,h]) for n in range(0,simulations): # Create arrays to iteratre over lmda_exp = lmda.copy() scores_exp = scores.copy() Y_exp = Y.copy() # Loop over h time periods for t in range(0,h): new_lambda_value = 0 if self.p != 0: for j in range(self.p): new_lambda_value += t_params[j]*lmda_exp[-j-1] if self.q != 0: for k in range(self.q): new_lambda_value += t_params[k+self.p]*scores_exp[-k-1] if self.leverage is True: new_lambda_value += t_params[self.p+self.q]*np.sign(-(Y_exp[-1]-new_theta_value))*(scores_exp[-1]+1) new_lambda_value += np.dot(X_oos[t],t_params[-len(self.X_names)*2:-len(self.X_names)]) new_theta_value = np.dot(X_oos[t],t_params[-len(self.X_names):]) + t_params[-(len(self.X_names)*2)-1]*np.exp(new_lambda_value/2.0) lmda_exp = np.append(lmda_exp,[new_lambda_value]) # For indexing consistency scores_exp = np.append(scores_exp,scores[np.random.randint(scores.shape[0])]) # expectation of score is zero Y_exp = np.append(Y_exp,Y[np.random.randint(Y.shape[0])]) # bootstrap returns sim_vector[n] = lmda_exp[-h:] return np.append(lmda, np.array([np.mean(i) for i in np.transpose(sim_vector)]))
[ "def", "_sim_predicted_mean", "(", "self", ",", "lmda", ",", "Y", ",", "scores", ",", "h", ",", "t_params", ",", "simulations", ",", "X_oos", ")", ":", "sim_vector", "=", "np", ".", "zeros", "(", "[", "simulations", ",", "h", "]", ")", "for", "n", ...
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/garch/egarchmreg.py#L403-L466
keiffster/program-y
8c99b56f8c32f01a7b9887b5daae9465619d0385
src/programy/parser/template/nodes/id.py
python
TemplateIdNode.to_xml
(self, client_context)
return "<id />"
[]
def to_xml(self, client_context): return "<id />"
[ "def", "to_xml", "(", "self", ",", "client_context", ")", ":", "return", "\"<id />\"" ]
https://github.com/keiffster/program-y/blob/8c99b56f8c32f01a7b9887b5daae9465619d0385/src/programy/parser/template/nodes/id.py#L36-L37
memray/seq2seq-keyphrase
9145c63ebdc4c3bc431f8091dc52547a46804012
emolga/models/covc_encdec.py
python
RNNLM.__init__
(self, config, n_rng, rng, mode='Evaluation')
[]
def __init__(self, config, n_rng, rng, mode='Evaluation'): super(RNNLM, self).__init__() self.config = config self.n_rng = n_rng # numpy random stream self.rng = rng # Theano random stream self.mode = mode self.name = 'rnnlm'
[ "def", "__init__", "(", "self", ",", "config", ",", "n_rng", ",", "rng", ",", "mode", "=", "'Evaluation'", ")", ":", "super", "(", "RNNLM", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "config", "=", "config", "self", ".", "n_rng", "="...
https://github.com/memray/seq2seq-keyphrase/blob/9145c63ebdc4c3bc431f8091dc52547a46804012/emolga/models/covc_encdec.py#L1562-L1571
hzy46/Deep-Learning-21-Examples
15c2d9edccad090cd67b033f24a43c544e5cba3e
chapter_3/slim/nets/resnet_v2.py
python
bottleneck
(inputs, depth, depth_bottleneck, stride, rate=1, outputs_collections=None, scope=None)
Bottleneck residual unit variant with BN before convolutions. This is the full preactivation residual unit variant proposed in [2]. See Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck variant which has an extra bottleneck layer. When putting together two consecutive ResNet blocks that use this unit, one should use stride = 2 in the last unit of the first block. Args: inputs: A tensor of size [batch, height, width, channels]. depth: The depth of the ResNet unit output. depth_bottleneck: The depth of the bottleneck layers. stride: The ResNet unit's stride. Determines the amount of downsampling of the units output compared to its input. rate: An integer, rate for atrous convolution. outputs_collections: Collection to add the ResNet unit output. scope: Optional variable_scope. Returns: The ResNet unit's output.
Bottleneck residual unit variant with BN before convolutions.
[ "Bottleneck", "residual", "unit", "variant", "with", "BN", "before", "convolutions", "." ]
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1, outputs_collections=None, scope=None): """Bottleneck residual unit variant with BN before convolutions. This is the full preactivation residual unit variant proposed in [2]. See Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck variant which has an extra bottleneck layer. When putting together two consecutive ResNet blocks that use this unit, one should use stride = 2 in the last unit of the first block. Args: inputs: A tensor of size [batch, height, width, channels]. depth: The depth of the ResNet unit output. depth_bottleneck: The depth of the bottleneck layers. stride: The ResNet unit's stride. Determines the amount of downsampling of the units output compared to its input. rate: An integer, rate for atrous convolution. outputs_collections: Collection to add the ResNet unit output. scope: Optional variable_scope. Returns: The ResNet unit's output. """ with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc: depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact') if depth == depth_in: shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') else: shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride, normalizer_fn=None, activation_fn=None, scope='shortcut') residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1, scope='conv1') residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2') residual = slim.conv2d(residual, depth, [1, 1], stride=1, normalizer_fn=None, activation_fn=None, scope='conv3') output = shortcut + residual return slim.utils.collect_named_outputs(outputs_collections, sc.original_name_scope, output)
[ "def", "bottleneck", "(", "inputs", ",", "depth", ",", "depth_bottleneck", ",", "stride", ",", "rate", "=", "1", ",", "outputs_collections", "=", "None", ",", "scope", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "scope", ",", "'bottl...
https://github.com/hzy46/Deep-Learning-21-Examples/blob/15c2d9edccad090cd67b033f24a43c544e5cba3e/chapter_3/slim/nets/resnet_v2.py#L62-L108
kubernetes-client/python
47b9da9de2d02b2b7a34fbe05afb44afd130d73a
kubernetes/client/models/v1_persistent_volume_list.py
python
V1PersistentVolumeList.to_str
(self)
return pprint.pformat(self.to_dict())
Returns the string representation of the model
Returns the string representation of the model
[ "Returns", "the", "string", "representation", "of", "the", "model" ]
def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict())
[ "def", "to_str", "(", "self", ")", ":", "return", "pprint", ".", "pformat", "(", "self", ".", "to_dict", "(", ")", ")" ]
https://github.com/kubernetes-client/python/blob/47b9da9de2d02b2b7a34fbe05afb44afd130d73a/kubernetes/client/models/v1_persistent_volume_list.py#L185-L187
freelawproject/courtlistener
ab3ae7bb6e5e836b286749113e7dbb403d470912
cl/corpus_importer/tasks.py
python
get_pacer_doc_by_rd_and_description
( self: Task, rd_pk: int, description_re: Pattern, cookies: RequestsCookieJar, fallback_to_main_doc: bool = False, tag_name: Optional[List[str]] = None, )
Using a RECAPDocument object ID and a description of a document, get the document from PACER. This function was originally meant to get civil cover sheets, but can be repurposed as needed. :param self: The celery task :param rd_pk: The PK of a RECAPDocument object to use as a source. :param description_re: A compiled regular expression to search against the description provided by the attachment page. :param cookies: A requests.cookies.RequestsCookieJar with the cookies of a logged-in PACER user. :param fallback_to_main_doc: Should we grab the main doc if none of the attachments match the regex? :param tag_name: A tag name to apply to any downloaded content. :return: None
Using a RECAPDocument object ID and a description of a document, get the document from PACER.
[ "Using", "a", "RECAPDocument", "object", "ID", "and", "a", "description", "of", "a", "document", "get", "the", "document", "from", "PACER", "." ]
def get_pacer_doc_by_rd_and_description( self: Task, rd_pk: int, description_re: Pattern, cookies: RequestsCookieJar, fallback_to_main_doc: bool = False, tag_name: Optional[List[str]] = None, ) -> None: """Using a RECAPDocument object ID and a description of a document, get the document from PACER. This function was originally meant to get civil cover sheets, but can be repurposed as needed. :param self: The celery task :param rd_pk: The PK of a RECAPDocument object to use as a source. :param description_re: A compiled regular expression to search against the description provided by the attachment page. :param cookies: A requests.cookies.RequestsCookieJar with the cookies of a logged-in PACER user. :param fallback_to_main_doc: Should we grab the main doc if none of the attachments match the regex? :param tag_name: A tag name to apply to any downloaded content. :return: None """ rd = RECAPDocument.objects.get(pk=rd_pk) att_report = get_attachment_page_by_rd(self, rd_pk, cookies) att_found = None for attachment in att_report.data.get("attachments", []): if description_re.search(attachment["description"]): att_found = attachment.copy() document_type = RECAPDocument.ATTACHMENT break if not att_found: if fallback_to_main_doc: logger.info( f"Falling back to main document for pacer_doc_id: {rd.pacer_doc_id}" ) att_found = att_report.data document_type = RECAPDocument.PACER_DOCUMENT else: msg = f"Aborting. Did not find civil cover sheet for {rd}." logger.error(msg) self.request.chain = None return None if not att_found.get("pacer_doc_id"): logger.warning("No pacer_doc_id for document (is it sealed?)") self.request.chain = None return # Try to find the attachment already in the collection rd, _ = RECAPDocument.objects.get_or_create( docket_entry=rd.docket_entry, attachment_number=att_found.get("attachment_number"), document_number=rd.document_number, pacer_doc_id=att_found["pacer_doc_id"], document_type=document_type, defaults={"date_upload": now()}, ) # Replace the description if we have description data. # Else fallback on old. rd.description = att_found.get("description", "") or rd.description if tag_name is not None: tag, _ = Tag.objects.get_or_create(name=tag_name) tag.tag_object(rd) if rd.is_available: # Great. Call it a day. rd.save() return pacer_case_id = rd.docket_entry.docket.pacer_case_id r = download_pacer_pdf_by_rd( rd.pk, pacer_case_id, att_found["pacer_doc_id"], cookies ) court_id = rd.docket_entry.docket.court_id success, msg = update_rd_metadata( self, rd_pk, r, court_id, pacer_case_id, rd.pacer_doc_id, rd.document_number, rd.attachment_number, ) if success is False: return # Skip OCR for now. It'll happen in a second step. extract_recap_pdf(rd.pk, skip_ocr=True) add_items_to_solr([rd.pk], "search.RECAPDocument")
[ "def", "get_pacer_doc_by_rd_and_description", "(", "self", ":", "Task", ",", "rd_pk", ":", "int", ",", "description_re", ":", "Pattern", ",", "cookies", ":", "RequestsCookieJar", ",", "fallback_to_main_doc", ":", "bool", "=", "False", ",", "tag_name", ":", "Opti...
https://github.com/freelawproject/courtlistener/blob/ab3ae7bb6e5e836b286749113e7dbb403d470912/cl/corpus_importer/tasks.py#L1684-L1778
gluon/AbletonLive9_RemoteScripts
0c0db5e2e29bbed88c82bf327f54d4968d36937e
ableton/v2/control_surface/profile.py
python
profile
(fn)
Decorator to mark a function to be profiled. Only mark top level functions
Decorator to mark a function to be profiled. Only mark top level functions
[ "Decorator", "to", "mark", "a", "function", "to", "be", "profiled", ".", "Only", "mark", "top", "level", "functions" ]
def profile(fn): """ Decorator to mark a function to be profiled. Only mark top level functions """ if ENABLE_PROFILING: @wraps(fn) def wrapper(self, *a, **k): if PROFILER: return PROFILER.runcall(partial(fn, self, *a, **k)) else: print('Can not profile (%s), it is probably reloaded' % fn.__name__) return fn(*a, **k) return wrapper else: return fn
[ "def", "profile", "(", "fn", ")", ":", "if", "ENABLE_PROFILING", ":", "@", "wraps", "(", "fn", ")", "def", "wrapper", "(", "self", ",", "*", "a", ",", "*", "*", "k", ")", ":", "if", "PROFILER", ":", "return", "PROFILER", ".", "runcall", "(", "par...
https://github.com/gluon/AbletonLive9_RemoteScripts/blob/0c0db5e2e29bbed88c82bf327f54d4968d36937e/ableton/v2/control_surface/profile.py#L9-L25
NVIDIA/NeMo
5b0c0b4dec12d87d3cd960846de4105309ce938e
nemo/utils/app_state.py
python
AppState.log_dir
(self)
return self._log_dir
Returns the log_dir set by exp_manager.
Returns the log_dir set by exp_manager.
[ "Returns", "the", "log_dir", "set", "by", "exp_manager", "." ]
def log_dir(self): """Returns the log_dir set by exp_manager. """ return self._log_dir
[ "def", "log_dir", "(", "self", ")", ":", "return", "self", ".", "_log_dir" ]
https://github.com/NVIDIA/NeMo/blob/5b0c0b4dec12d87d3cd960846de4105309ce938e/nemo/utils/app_state.py#L248-L251
happyleavesaoc/python-firetv
24707ca1842e6b08b502f442eee85dcab13682e3
firetv/__init__.py
python
FireTV.key_e
(self)
Send e keypress.
Send e keypress.
[ "Send", "e", "keypress", "." ]
def key_e(self): """Send e keypress.""" self._key(KEY_E)
[ "def", "key_e", "(", "self", ")", ":", "self", ".", "_key", "(", "KEY_E", ")" ]
https://github.com/happyleavesaoc/python-firetv/blob/24707ca1842e6b08b502f442eee85dcab13682e3/firetv/__init__.py#L851-L853
dropbox/dropbox-sdk-python
015437429be224732990041164a21a0501235db1
dropbox/team_log.py
python
EventType.file_request_delete
(cls, val)
return cls('file_request_delete', val)
Create an instance of this class set to the ``file_request_delete`` tag with value ``val``. :param FileRequestDeleteType val: :rtype: EventType
Create an instance of this class set to the ``file_request_delete`` tag with value ``val``.
[ "Create", "an", "instance", "of", "this", "class", "set", "to", "the", "file_request_delete", "tag", "with", "value", "val", "." ]
def file_request_delete(cls, val): """ Create an instance of this class set to the ``file_request_delete`` tag with value ``val``. :param FileRequestDeleteType val: :rtype: EventType """ return cls('file_request_delete', val)
[ "def", "file_request_delete", "(", "cls", ",", "val", ")", ":", "return", "cls", "(", "'file_request_delete'", ",", "val", ")" ]
https://github.com/dropbox/dropbox-sdk-python/blob/015437429be224732990041164a21a0501235db1/dropbox/team_log.py#L24283-L24291
JiYou/openstack
8607dd488bde0905044b303eb6e52bdea6806923
packages/source/keystone/keystone/common/serializer.py
python
XmlSerializer._populate_list
(self, element, k, v)
Populates an element with a key & list value.
Populates an element with a key & list value.
[ "Populates", "an", "element", "with", "a", "key", "&", "list", "value", "." ]
def _populate_list(self, element, k, v): """Populates an element with a key & list value.""" # spec has a lot of inconsistency here! container = element if k == 'media-types': # xsd compliance: <media-types> contains <media-type>s # find an existing <media-types> element or make one container = element.find('media-types') if container is None: container = etree.Element(k) element.append(container) name = k[:-1] elif k == 'serviceCatalog' or k == 'catalog': # xsd compliance: <serviceCatalog> contains <service>s container = etree.Element(k) element.append(container) name = 'service' elif k == 'roles' and element.tag == 'user': name = 'role' elif k == 'endpoints' and element.tag == 'service': name = 'endpoint' elif k == 'values' and element.tag[-1] == 's': # OS convention is to contain lists in a 'values' element, # so the list itself can have attributes, which is # unnecessary in XML name = element.tag[:-1] elif k[-1] == 's': container = etree.Element(k) element.append(container) if k == 'policies': # need to special-case policies since policie is not a word name = 'policy' else: name = k[:-1] else: name = k for item in v: child = etree.Element(name) self.populate_element(child, item) container.append(child)
[ "def", "_populate_list", "(", "self", ",", "element", ",", "k", ",", "v", ")", ":", "# spec has a lot of inconsistency here!", "container", "=", "element", "if", "k", "==", "'media-types'", ":", "# xsd compliance: <media-types> contains <media-type>s", "# find an existing...
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/keystone/keystone/common/serializer.py#L219-L260
anki/cozmo-python-sdk
dd29edef18748fcd816550469195323842a7872e
examples/tutorials/05_async_python/02_cube_blinker.py
python
cozmo_program
(robot: cozmo.robot.Robot)
The async equivalent of 01_cube_blinker_sync. The usage of ``async def`` makes the cozmo_program method a coroutine. Within a coroutine, ``await`` can be used. With ``await``, the statement blocks until the request being waited for has completed. Meanwhile the event loop continues in the background. For instance, the statement ``await robot.world.wait_for_observed_light_cube(timeout=60)`` blocks until Cozmo discovers a light cube or the 60 second timeout elapses, whichever occurs first. Likewise, the statement ``await cube.wait_for_tap(timeout=10)`` blocks until the tap event is received or the 10 second timeout occurs, whichever occurs first. For more information, see https://docs.python.org/3/library/asyncio-task.html
The async equivalent of 01_cube_blinker_sync.
[ "The", "async", "equivalent", "of", "01_cube_blinker_sync", "." ]
async def cozmo_program(robot: cozmo.robot.Robot): '''The async equivalent of 01_cube_blinker_sync. The usage of ``async def`` makes the cozmo_program method a coroutine. Within a coroutine, ``await`` can be used. With ``await``, the statement blocks until the request being waited for has completed. Meanwhile the event loop continues in the background. For instance, the statement ``await robot.world.wait_for_observed_light_cube(timeout=60)`` blocks until Cozmo discovers a light cube or the 60 second timeout elapses, whichever occurs first. Likewise, the statement ``await cube.wait_for_tap(timeout=10)`` blocks until the tap event is received or the 10 second timeout occurs, whichever occurs first. For more information, see https://docs.python.org/3/library/asyncio-task.html ''' cube = None look_around = robot.start_behavior(cozmo.behavior.BehaviorTypes.LookAroundInPlace) try: cube = await robot.world.wait_for_observed_light_cube(timeout=60) except asyncio.TimeoutError: print("Didn't find a cube :-(") return finally: look_around.stop() cube.start_light_chaser() try: print("Waiting for cube to be tapped") await cube.wait_for_tap(timeout=10) print("Cube tapped") except asyncio.TimeoutError: print("No-one tapped our cube :-(") finally: cube.stop_light_chaser() cube.set_lights_off()
[ "async", "def", "cozmo_program", "(", "robot", ":", "cozmo", ".", "robot", ".", "Robot", ")", ":", "cube", "=", "None", "look_around", "=", "robot", ".", "start_behavior", "(", "cozmo", ".", "behavior", ".", "BehaviorTypes", ".", "LookAroundInPlace", ")", ...
https://github.com/anki/cozmo-python-sdk/blob/dd29edef18748fcd816550469195323842a7872e/examples/tutorials/05_async_python/02_cube_blinker.py#L60-L101
pypa/pip
7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4
src/pip/_vendor/distlib/markers.py
python
Evaluator.evaluate
(self, expr, context)
return result
Evaluate a marker expression returned by the :func:`parse_requirement` function in the specified context.
Evaluate a marker expression returned by the :func:`parse_requirement` function in the specified context.
[ "Evaluate", "a", "marker", "expression", "returned", "by", "the", ":", "func", ":", "parse_requirement", "function", "in", "the", "specified", "context", "." ]
def evaluate(self, expr, context): """ Evaluate a marker expression returned by the :func:`parse_requirement` function in the specified context. """ if isinstance(expr, string_types): if expr[0] in '\'"': result = expr[1:-1] else: if expr not in context: raise SyntaxError('unknown variable: %s' % expr) result = context[expr] else: assert isinstance(expr, dict) op = expr['op'] if op not in self.operations: raise NotImplementedError('op not implemented: %s' % op) elhs = expr['lhs'] erhs = expr['rhs'] if _is_literal(expr['lhs']) and _is_literal(expr['rhs']): raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs)) lhs = self.evaluate(elhs, context) rhs = self.evaluate(erhs, context) if ((elhs == 'python_version' or erhs == 'python_version') and op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')): lhs = NV(lhs) rhs = NV(rhs) elif elhs == 'python_version' and op in ('in', 'not in'): lhs = NV(lhs) rhs = _get_versions(rhs) result = self.operations[op](lhs, rhs) return result
[ "def", "evaluate", "(", "self", ",", "expr", ",", "context", ")", ":", "if", "isinstance", "(", "expr", ",", "string_types", ")", ":", "if", "expr", "[", "0", "]", "in", "'\\'\"'", ":", "result", "=", "expr", "[", "1", ":", "-", "1", "]", "else",...
https://github.com/pypa/pip/blob/7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4/src/pip/_vendor/distlib/markers.py#L59-L91
jython/frozen-mirror
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
lib-python/2.7/lib2to3/refactor.py
python
RefactoringTool.gen_lines
(self, block, indent)
Generates lines as expected by tokenize from a list of lines. This strips the first len(indent + self.PS1) characters off each line.
Generates lines as expected by tokenize from a list of lines.
[ "Generates", "lines", "as", "expected", "by", "tokenize", "from", "a", "list", "of", "lines", "." ]
def gen_lines(self, block, indent): """Generates lines as expected by tokenize from a list of lines. This strips the first len(indent + self.PS1) characters off each line. """ prefix1 = indent + self.PS1 prefix2 = indent + self.PS2 prefix = prefix1 for line in block: if line.startswith(prefix): yield line[len(prefix):] elif line == prefix.rstrip() + u"\n": yield u"\n" else: raise AssertionError("line=%r, prefix=%r" % (line, prefix)) prefix = prefix2 while True: yield ""
[ "def", "gen_lines", "(", "self", ",", "block", ",", "indent", ")", ":", "prefix1", "=", "indent", "+", "self", ".", "PS1", "prefix2", "=", "indent", "+", "self", ".", "PS2", "prefix", "=", "prefix1", "for", "line", "in", "block", ":", "if", "line", ...
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/lib2to3/refactor.py#L671-L688
SCons/scons
309f0234d1d9cc76955818be47c5c722f577dac6
SCons/Tool/jar.py
python
Jar
(env, target = None, source = [], *args, **kw)
return env.JarFile(target = target, source = target_nodes, *args, **kw)
A pseudo-Builder wrapper around the separate Jar sources{File,Dir} Builders.
A pseudo-Builder wrapper around the separate Jar sources{File,Dir} Builders.
[ "A", "pseudo", "-", "Builder", "wrapper", "around", "the", "separate", "Jar", "sources", "{", "File", "Dir", "}", "Builders", "." ]
def Jar(env, target = None, source = [], *args, **kw): """ A pseudo-Builder wrapper around the separate Jar sources{File,Dir} Builders. """ # jar target should not be a list so assume they passed # no target and want implicit target to be made and the arg # was actaully the list of sources if SCons.Util.is_List(target) and source == []: SCons.Warnings.warn( SCons.Warnings.SConsWarning, "Making implicit target jar file, and treating the list as sources" ) source = target target = None # mutiple targets pass so build each target the same from the # same source #TODO Maybe this should only be done once, and the result copied # for each target since it should result in the same? if SCons.Util.is_List(target) and SCons.Util.is_List(source): jars = [] for single_target in target: jars += env.Jar( target = single_target, source = source, *args, **kw) return jars # they passed no target so make a target implicitly if target is None: try: # make target from the first source file target = os.path.splitext(str(source[0]))[0] + env.subst('$JARSUFFIX') except: # something strange is happening but attempt anyways SCons.Warnings.warn( SCons.Warnings.SConsWarning, "Could not make implicit target from sources, using directory" ) target = os.path.basename(str(env.Dir('.'))) + env.subst('$JARSUFFIX') # make lists out of our target and sources if not SCons.Util.is_List(target): target = [target] if not SCons.Util.is_List(source): source = [source] # setup for checking through all the sources and handle accordingly java_class_suffix = env.subst('$JAVACLASSSUFFIX') java_suffix = env.subst('$JAVASUFFIX') target_nodes = [] # function for determining what to do with a file and not a directory # if its already a class file then it can be used as a # source for jar, otherwise turn it into a class file then # return the source def file_to_class(s): if _my_normcase(str(s)).endswith(java_suffix): return env.JavaClassFile(source = s, *args, **kw) else: return [env.fs.File(s)] # function for calling the JavaClassDir builder if a directory is # passed as a source to Jar builder. The JavaClassDir builder will # return an empty list if there were not target classes built from # the directory, in this case assume the user wanted the directory # copied into the jar as is (it contains other files such as # resources or class files compiled from proir commands) # TODO: investigate the expexcted behavior for directories that # have mixed content, such as Java files along side other files # files. def dir_to_class(s): dir_targets = env.JavaClassDir(source = s, *args, **kw) if dir_targets == []: # no classes files could be built from the source dir # so pass the dir as is. return [env.fs.Dir(s)] else: return dir_targets # loop through the sources and handle each accordingly # the goal here is to get all the source files into a class # file or a directory that contains class files for s in SCons.Util.flatten(source): s = env.subst(s) if isinstance(s, SCons.Node.FS.Base): if isinstance(s, SCons.Node.FS.File): # found a file so make sure its a class file target_nodes.extend(file_to_class(s)) else: # found a dir so get the class files out of it target_nodes.extend(dir_to_class(s)) else: try: # source is string try to convert it to file target_nodes.extend(file_to_class(env.fs.File(s))) continue except: pass try: # source is string try to covnert it to dir target_nodes.extend(dir_to_class(env.fs.Dir(s))) continue except: pass SCons.Warnings.warn( SCons.Warnings.SConsWarning, ("File: " + str(s) + " could not be identified as File or Directory, skipping.") ) # at this point all our sources have been converted to classes or directories of class # so pass it to the Jar builder return env.JarFile(target = target, source = target_nodes, *args, **kw)
[ "def", "Jar", "(", "env", ",", "target", "=", "None", ",", "source", "=", "[", "]", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "# jar target should not be a list so assume they passed", "# no target and want implicit target to be made and the arg", "# was actaul...
https://github.com/SCons/scons/blob/309f0234d1d9cc76955818be47c5c722f577dac6/SCons/Tool/jar.py#L91-L205
alan-turing-institute/sktime
79cc513346b1257a6f3fa8e4ed855b5a2a7de716
sktime/forecasting/compose/_reduce.py
python
_concat_y_X
(y, X)
return z
Concatenate y and X prior to sliding-window transform.
Concatenate y and X prior to sliding-window transform.
[ "Concatenate", "y", "and", "X", "prior", "to", "sliding", "-", "window", "transform", "." ]
def _concat_y_X(y, X): """Concatenate y and X prior to sliding-window transform.""" z = y.to_numpy() if z.ndim == 1: z = z.reshape(-1, 1) if X is not None: z = np.column_stack([z, X.to_numpy()]) return z
[ "def", "_concat_y_X", "(", "y", ",", "X", ")", ":", "z", "=", "y", ".", "to_numpy", "(", ")", "if", "z", ".", "ndim", "==", "1", ":", "z", "=", "z", ".", "reshape", "(", "-", "1", ",", "1", ")", "if", "X", "is", "not", "None", ":", "z", ...
https://github.com/alan-turing-institute/sktime/blob/79cc513346b1257a6f3fa8e4ed855b5a2a7de716/sktime/forecasting/compose/_reduce.py#L37-L44
edfungus/Crouton
ada98b3930192938a48909072b45cb84b945f875
clients/esp8266_clients/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py
python
pprint
(walker)
return "\n".join(output)
Pretty printer for tree walkers
Pretty printer for tree walkers
[ "Pretty", "printer", "for", "tree", "walkers" ]
def pprint(walker): """Pretty printer for tree walkers""" output = [] indent = 0 for token in concatenateCharacterTokens(walker): type = token["type"] if type in ("StartTag", "EmptyTag"): # tag name if token["namespace"] and token["namespace"] != constants.namespaces["html"]: if token["namespace"] in constants.prefixes: ns = constants.prefixes[token["namespace"]] else: ns = token["namespace"] name = "%s %s" % (ns, token["name"]) else: name = token["name"] output.append("%s<%s>" % (" " * indent, name)) indent += 2 # attributes (sorted for consistent ordering) attrs = token["data"] for (namespace, localname), value in sorted(attrs.items()): if namespace: if namespace in constants.prefixes: ns = constants.prefixes[namespace] else: ns = namespace name = "%s %s" % (ns, localname) else: name = localname output.append("%s%s=\"%s\"" % (" " * indent, name, value)) # self-closing if type == "EmptyTag": indent -= 2 elif type == "EndTag": indent -= 2 elif type == "Comment": output.append("%s<!-- %s -->" % (" " * indent, token["data"])) elif type == "Doctype": if token["name"]: if token["publicId"]: output.append("""%s<!DOCTYPE %s "%s" "%s">""" % (" " * indent, token["name"], token["publicId"], token["systemId"] if token["systemId"] else "")) elif token["systemId"]: output.append("""%s<!DOCTYPE %s "" "%s">""" % (" " * indent, token["name"], token["systemId"])) else: output.append("%s<!DOCTYPE %s>" % (" " * indent, token["name"])) else: output.append("%s<!DOCTYPE >" % (" " * indent,)) elif type == "Characters": output.append("%s\"%s\"" % (" " * indent, token["data"])) elif type == "SpaceCharacters": assert False, "concatenateCharacterTokens should have got rid of all Space tokens" else: raise ValueError("Unknown token type, %s" % type) return "\n".join(output)
[ "def", "pprint", "(", "walker", ")", ":", "output", "=", "[", "]", "indent", "=", "0", "for", "token", "in", "concatenateCharacterTokens", "(", "walker", ")", ":", "type", "=", "token", "[", "\"type\"", "]", "if", "type", "in", "(", "\"StartTag\"", ","...
https://github.com/edfungus/Crouton/blob/ada98b3930192938a48909072b45cb84b945f875/clients/esp8266_clients/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py#L79-L147
francisck/DanderSpritz_docs
86bb7caca5a957147f120b18bb5c31f299914904
Python/Core/Lib/distutils/command/bdist_msi.py
python
PyDialog.next
(self, title, next, name='Next', active=1)
return self.pushbutton(name, 236, self.h - 27, 56, 17, flags, title, next)
Add a Next button with a given title, the tab-next button, its name in the Control table, possibly initially disabled. Return the button, so that events can be associated
Add a Next button with a given title, the tab-next button, its name in the Control table, possibly initially disabled. Return the button, so that events can be associated
[ "Add", "a", "Next", "button", "with", "a", "given", "title", "the", "tab", "-", "next", "button", "its", "name", "in", "the", "Control", "table", "possibly", "initially", "disabled", ".", "Return", "the", "button", "so", "that", "events", "can", "be", "a...
def next(self, title, next, name='Next', active=1): """Add a Next button with a given title, the tab-next button, its name in the Control table, possibly initially disabled. Return the button, so that events can be associated""" if active: flags = 3 else: flags = 1 return self.pushbutton(name, 236, self.h - 27, 56, 17, flags, title, next)
[ "def", "next", "(", "self", ",", "title", ",", "next", ",", "name", "=", "'Next'", ",", "active", "=", "1", ")", ":", "if", "active", ":", "flags", "=", "3", "else", ":", "flags", "=", "1", "return", "self", ".", "pushbutton", "(", "name", ",", ...
https://github.com/francisck/DanderSpritz_docs/blob/86bb7caca5a957147f120b18bb5c31f299914904/Python/Core/Lib/distutils/command/bdist_msi.py#L60-L69
tensorly/tensorly
87b435b3f3343447b49d47ebb5461118f6c8a9ab
doc/sphinx_ext/sphinx_gallery/notebook.py
python
directive_fun
(match, directive)
return ('<div class="alert alert-{0}"><h4>{1}</h4><p>{2}</p></div>' .format(directive_to_alert[directive], directive.capitalize(), match.group(1).strip()))
Helper to fill in directives
Helper to fill in directives
[ "Helper", "to", "fill", "in", "directives" ]
def directive_fun(match, directive): """Helper to fill in directives""" directive_to_alert = dict(note="info", warning="danger") return ('<div class="alert alert-{0}"><h4>{1}</h4><p>{2}</p></div>' .format(directive_to_alert[directive], directive.capitalize(), match.group(1).strip()))
[ "def", "directive_fun", "(", "match", ",", "directive", ")", ":", "directive_to_alert", "=", "dict", "(", "note", "=", "\"info\"", ",", "warning", "=", "\"danger\"", ")", "return", "(", "'<div class=\"alert alert-{0}\"><h4>{1}</h4><p>{2}</p></div>'", ".", "format", ...
https://github.com/tensorly/tensorly/blob/87b435b3f3343447b49d47ebb5461118f6c8a9ab/doc/sphinx_ext/sphinx_gallery/notebook.py#L51-L56
theislab/scanpy
b69015e9e7007193c9ac461d5c6fbf845b3d6962
scanpy/plotting/_utils.py
python
circles
(x, y, s, ax, marker=None, c='b', vmin=None, vmax=None, **kwargs)
return collection
Taken from here: https://gist.github.com/syrte/592a062c562cd2a98a83 Make a scatter plot of circles. Similar to pl.scatter, but the size of circles are in data scale. Parameters ---------- x, y : scalar or array_like, shape (n, ) Input data s : scalar or array_like, shape (n, ) Radius of circles. c : color or sequence of color, optional, default : 'b' `c` can be a single color format string, or a sequence of color specifications of length `N`, or a sequence of `N` numbers to be mapped to colors using the `cmap` and `norm` specified via kwargs. Note that `c` should not be a single numeric RGB or RGBA sequence because that is indistinguishable from an array of values to be colormapped. (If you insist, use `color` instead.) `c` can be a 2-D array in which the rows are RGB or RGBA, however. vmin, vmax : scalar, optional, default: None `vmin` and `vmax` are used in conjunction with `norm` to normalize luminance data. If either are `None`, the min and max of the color array is used. kwargs : `~matplotlib.collections.Collection` properties Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls), norm, cmap, transform, etc. Returns ------- paths : `~matplotlib.collections.PathCollection` Examples -------- a = np.arange(11) circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none') pl.colorbar() License -------- This code is under [The BSD 3-Clause License] (http://opensource.org/licenses/BSD-3-Clause)
Taken from here: https://gist.github.com/syrte/592a062c562cd2a98a83 Make a scatter plot of circles. Similar to pl.scatter, but the size of circles are in data scale. Parameters ---------- x, y : scalar or array_like, shape (n, ) Input data s : scalar or array_like, shape (n, ) Radius of circles. c : color or sequence of color, optional, default : 'b' `c` can be a single color format string, or a sequence of color specifications of length `N`, or a sequence of `N` numbers to be mapped to colors using the `cmap` and `norm` specified via kwargs. Note that `c` should not be a single numeric RGB or RGBA sequence because that is indistinguishable from an array of values to be colormapped. (If you insist, use `color` instead.) `c` can be a 2-D array in which the rows are RGB or RGBA, however. vmin, vmax : scalar, optional, default: None `vmin` and `vmax` are used in conjunction with `norm` to normalize luminance data. If either are `None`, the min and max of the color array is used. kwargs : `~matplotlib.collections.Collection` properties Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls), norm, cmap, transform, etc. Returns ------- paths : `~matplotlib.collections.PathCollection` Examples -------- a = np.arange(11) circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none') pl.colorbar() License -------- This code is under [The BSD 3-Clause License] (http://opensource.org/licenses/BSD-3-Clause)
[ "Taken", "from", "here", ":", "https", ":", "//", "gist", ".", "github", ".", "com", "/", "syrte", "/", "592a062c562cd2a98a83", "Make", "a", "scatter", "plot", "of", "circles", ".", "Similar", "to", "pl", ".", "scatter", "but", "the", "size", "of", "ci...
def circles(x, y, s, ax, marker=None, c='b', vmin=None, vmax=None, **kwargs): """ Taken from here: https://gist.github.com/syrte/592a062c562cd2a98a83 Make a scatter plot of circles. Similar to pl.scatter, but the size of circles are in data scale. Parameters ---------- x, y : scalar or array_like, shape (n, ) Input data s : scalar or array_like, shape (n, ) Radius of circles. c : color or sequence of color, optional, default : 'b' `c` can be a single color format string, or a sequence of color specifications of length `N`, or a sequence of `N` numbers to be mapped to colors using the `cmap` and `norm` specified via kwargs. Note that `c` should not be a single numeric RGB or RGBA sequence because that is indistinguishable from an array of values to be colormapped. (If you insist, use `color` instead.) `c` can be a 2-D array in which the rows are RGB or RGBA, however. vmin, vmax : scalar, optional, default: None `vmin` and `vmax` are used in conjunction with `norm` to normalize luminance data. If either are `None`, the min and max of the color array is used. kwargs : `~matplotlib.collections.Collection` properties Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls), norm, cmap, transform, etc. Returns ------- paths : `~matplotlib.collections.PathCollection` Examples -------- a = np.arange(11) circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none') pl.colorbar() License -------- This code is under [The BSD 3-Clause License] (http://opensource.org/licenses/BSD-3-Clause) """ # You can set `facecolor` with an array for each patch, # while you can only set `facecolors` with a value for all. zipped = np.broadcast(x, y, s) patches = [Circle((x_, y_), s_) for x_, y_, s_ in zipped] collection = PatchCollection(patches, **kwargs) if isinstance(c, np.ndarray) and np.issubdtype(c.dtype, np.number): collection.set_array(np.ma.masked_invalid(c)) collection.set_clim(vmin, vmax) else: collection.set_facecolor(c) ax.add_collection(collection) return collection
[ "def", "circles", "(", "x", ",", "y", ",", "s", ",", "ax", ",", "marker", "=", "None", ",", "c", "=", "'b'", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# You can set `facecolor` with an array for each patch,...
https://github.com/theislab/scanpy/blob/b69015e9e7007193c9ac461d5c6fbf845b3d6962/scanpy/plotting/_utils.py#L1063-L1117
euphrat1ca/fuzzdb-collect
f32552a4d5d84350552c68801aed281ca1f48e66
ScriptShare/0my/Some-PoC-oR-ExP-master/验证Joomla是否存在反序列化漏洞的脚本/批量/hackUtils-master/bs4/element.py
python
Tag.has_key
(self, key)
return self.has_attr(key)
This was kind of misleading because has_key() (attributes) was different from __in__ (contents). has_key() is gone in Python 3, anyway.
This was kind of misleading because has_key() (attributes) was different from __in__ (contents). has_key() is gone in Python 3, anyway.
[ "This", "was", "kind", "of", "misleading", "because", "has_key", "()", "(", "attributes", ")", "was", "different", "from", "__in__", "(", "contents", ")", ".", "has_key", "()", "is", "gone", "in", "Python", "3", "anyway", "." ]
def has_key(self, key): """This was kind of misleading because has_key() (attributes) was different from __in__ (contents). has_key() is gone in Python 3, anyway.""" warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % ( key)) return self.has_attr(key)
[ "def", "has_key", "(", "self", ",", "key", ")", ":", "warnings", ".", "warn", "(", "'has_key is deprecated. Use has_attr(\"%s\") instead.'", "%", "(", "key", ")", ")", "return", "self", ".", "has_attr", "(", "key", ")" ]
https://github.com/euphrat1ca/fuzzdb-collect/blob/f32552a4d5d84350552c68801aed281ca1f48e66/ScriptShare/0my/Some-PoC-oR-ExP-master/验证Joomla是否存在反序列化漏洞的脚本/批量/hackUtils-master/bs4/element.py#L1408-L1414
dmlc/gluon-cv
709bc139919c02f7454cb411311048be188cde64
gluoncv/model_zoo/center_net/target_generator.py
python
_gaussian_radius
(det_size, min_overlap=0.7)
return min(r1, r2, r3)
Calculate gaussian radius for foreground objects. Parameters ---------- det_size : tuple of int Object size (h, w). min_overlap : float Minimal overlap between objects. Returns ------- float Gaussian radius.
Calculate gaussian radius for foreground objects.
[ "Calculate", "gaussian", "radius", "for", "foreground", "objects", "." ]
def _gaussian_radius(det_size, min_overlap=0.7): """Calculate gaussian radius for foreground objects. Parameters ---------- det_size : tuple of int Object size (h, w). min_overlap : float Minimal overlap between objects. Returns ------- float Gaussian radius. """ height, width = det_size a1 = 1 b1 = (height + width) c1 = width * height * (1 - min_overlap) / (1 + min_overlap) sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1) r1 = (b1 + sq1) / 2 a2 = 4 b2 = 2 * (height + width) c2 = (1 - min_overlap) * width * height sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2) r2 = (b2 + sq2) / 2 a3 = 4 * min_overlap b3 = -2 * min_overlap * (height + width) c3 = (min_overlap - 1) * width * height sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3) r3 = (b3 + sq3) / 2 return min(r1, r2, r3)
[ "def", "_gaussian_radius", "(", "det_size", ",", "min_overlap", "=", "0.7", ")", ":", "height", ",", "width", "=", "det_size", "a1", "=", "1", "b1", "=", "(", "height", "+", "width", ")", "c1", "=", "width", "*", "height", "*", "(", "1", "-", "min_...
https://github.com/dmlc/gluon-cv/blob/709bc139919c02f7454cb411311048be188cde64/gluoncv/model_zoo/center_net/target_generator.py#L65-L100
ysymyth/3D-SDN
d7a4519bfd57d4c5d99dbdb6a53a82ba5b66ec9e
geometric/maskrcnn/config.py
python
Config.display
(self)
Display Configuration values.
Display Configuration values.
[ "Display", "Configuration", "values", "." ]
def display(self): """Display Configuration values.""" print("\nConfigurations:") for a in dir(self): if not a.startswith("__") and not callable(getattr(self, a)): print("{:30} {}".format(a, getattr(self, a))) print("\n")
[ "def", "display", "(", "self", ")", ":", "print", "(", "\"\\nConfigurations:\"", ")", "for", "a", "in", "dir", "(", "self", ")", ":", "if", "not", "a", ".", "startswith", "(", "\"__\"", ")", "and", "not", "callable", "(", "getattr", "(", "self", ",",...
https://github.com/ysymyth/3D-SDN/blob/d7a4519bfd57d4c5d99dbdb6a53a82ba5b66ec9e/geometric/maskrcnn/config.py#L177-L183
spotify/luigi
c3b66f4a5fa7eaa52f9a72eb6704b1049035c789
luigi/contrib/pyspark_runner.py
python
_pyspark_runner_with
(name, entry_point_class)
return type(name, (AbstractPySparkRunner,), {'_entry_point_class': entry_point_class})
[]
def _pyspark_runner_with(name, entry_point_class): return type(name, (AbstractPySparkRunner,), {'_entry_point_class': entry_point_class})
[ "def", "_pyspark_runner_with", "(", "name", ",", "entry_point_class", ")", ":", "return", "type", "(", "name", ",", "(", "AbstractPySparkRunner", ",", ")", ",", "{", "'_entry_point_class'", ":", "entry_point_class", "}", ")" ]
https://github.com/spotify/luigi/blob/c3b66f4a5fa7eaa52f9a72eb6704b1049035c789/luigi/contrib/pyspark_runner.py#L115-L116
hsoft/pdfmasher
08943dd1ad9d8fa91c547d5e91d2f90114d76094
hscommon/gui/column.py
python
Columns.resize_column
(self, colname, newwidth)
Set column ``colname``'s width to ``newwidth``.
Set column ``colname``'s width to ``newwidth``.
[ "Set", "column", "colname", "s", "width", "to", "newwidth", "." ]
def resize_column(self, colname, newwidth): """Set column ``colname``'s width to ``newwidth``. """ self._set_colname_attr(colname, 'width', newwidth)
[ "def", "resize_column", "(", "self", ",", "colname", ",", "newwidth", ")", ":", "self", ".", "_set_colname_attr", "(", "colname", ",", "'width'", ",", "newwidth", ")" ]
https://github.com/hsoft/pdfmasher/blob/08943dd1ad9d8fa91c547d5e91d2f90114d76094/hscommon/gui/column.py#L206-L209
andresriancho/w3af
cd22e5252243a87aaa6d0ddea47cf58dacfe00a9
w3af/plugins/bruteforce/form_auth.py
python
form_auth.audit
(self, freq, debugging_id=None)
Tries to bruteforce a form auth. This is slow! :param freq: A FuzzableRequest :param debugging_id: The ID to use in the logs to be able to track this call to audit(). Plugins need to send this ID to the ExtendedUrllib to get improved logging.
Tries to bruteforce a form auth. This is slow!
[ "Tries", "to", "bruteforce", "a", "form", "auth", ".", "This", "is", "slow!" ]
def audit(self, freq, debugging_id=None): """ Tries to bruteforce a form auth. This is slow! :param freq: A FuzzableRequest :param debugging_id: The ID to use in the logs to be able to track this call to audit(). Plugins need to send this ID to the ExtendedUrllib to get improved logging. """ if freq.get_url() in self._already_tested: return mutant = form_pointer_factory(freq) if not self._is_login_form(mutant): # Not a login form, login forms have these fields: # * username/password # * password return self._already_tested.append(mutant.get_url()) try: session = self._create_new_session(mutant, debugging_id) except BaseFrameworkException, bfe: msg = 'Failed to create new session during form bruteforce setup: "%s"' om.out.debug(msg % bfe) return try: login_failed_bodies = self._id_failed_login_pages(mutant, session, debugging_id) except BaseFrameworkException, bfe: msg = 'Failed to ID failed login page during form bruteforce setup: "%s"' om.out.debug(msg % bfe) return try: self._signature_test(mutant, session, login_failed_bodies, debugging_id) except BaseFrameworkException, bfe: msg = 'Signature test failed during form bruteforce setup: "%s"' om.out.debug(msg % bfe) return user_token, pass_token = mutant.get_dc().get_login_tokens() # Let the user know what we are doing msg = 'Found a user login form. The form action is: "%s"' om.out.information(msg % mutant.get_url()) if user_token is not None: msg = 'The username field to be used is: "%s"' om.out.information(msg % user_token.get_name()) msg = 'The password field to be used is: "%s"' om.out.information(msg % pass_token.get_name()) msg = 'Starting form authentication bruteforce on URL: "%s"' om.out.information(msg % mutant.get_url()) start = time.time() if user_token is not None: generator = self._create_user_pass_generator(mutant.get_url()) else: generator = self._create_pass_generator(mutant.get_url()) self._bruteforce_pool(mutant, login_failed_bodies, generator, session, debugging_id) # Report that we've finished. took_str = epoch_to_string(start) msg = 'Finished bruteforcing "%s" (spent %s)' args = (mutant.get_url(), took_str) om.out.information(msg % args)
[ "def", "audit", "(", "self", ",", "freq", ",", "debugging_id", "=", "None", ")", ":", "if", "freq", ".", "get_url", "(", ")", "in", "self", ".", "_already_tested", ":", "return", "mutant", "=", "form_pointer_factory", "(", "freq", ")", "if", "not", "se...
https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/plugins/bruteforce/form_auth.py#L56-L132
hubo1016/vlcp
61c4c2595b610675ac0cbc4dbc46f70ec40090d3
vlcp/utils/networkplugin.py
python
updatephysicalport
(update_processor = partial(default_processor, excluding=('vhost', 'systemid', 'bridge', 'name'), disabled=('physicalnetwork',)), reorder_dict = default_iterate_dict )
return walker
:param update_processor: update_processor(physcialport, walk, write, \*, parameters)
:param update_processor: update_processor(physcialport, walk, write, \*, parameters)
[ ":", "param", "update_processor", ":", "update_processor", "(", "physcialport", "walk", "write", "\\", "*", "parameters", ")" ]
def updatephysicalport(update_processor = partial(default_processor, excluding=('vhost', 'systemid', 'bridge', 'name'), disabled=('physicalnetwork',)), reorder_dict = default_iterate_dict ): """ :param update_processor: update_processor(physcialport, walk, write, \*, parameters) """ def walker(walk, write, timestamp, parameters_dict): for key, parameters in reorder_dict(parameters_dict): try: value = walk(key) except KeyError: pass else: if update_processor(value, walk, write, parameters=parameters): write(key, value) return walker
[ "def", "updatephysicalport", "(", "update_processor", "=", "partial", "(", "default_processor", ",", "excluding", "=", "(", "'vhost'", ",", "'systemid'", ",", "'bridge'", ",", "'name'", ")", ",", "disabled", "=", "(", "'physicalnetwork'", ",", ")", ")", ",", ...
https://github.com/hubo1016/vlcp/blob/61c4c2595b610675ac0cbc4dbc46f70ec40090d3/vlcp/utils/networkplugin.py#L183-L200
python/cpython
e13cdca0f5224ec4e23bdd04bb3120506964bc8b
Lib/tkinter/__init__.py
python
Misc.winfo_screenvisual
(self)
return self.tk.call('winfo', 'screenvisual', self._w)
Return one of the strings directcolor, grayscale, pseudocolor, staticcolor, staticgray, or truecolor for the default colormodel of this screen.
Return one of the strings directcolor, grayscale, pseudocolor, staticcolor, staticgray, or truecolor for the default colormodel of this screen.
[ "Return", "one", "of", "the", "strings", "directcolor", "grayscale", "pseudocolor", "staticcolor", "staticgray", "or", "truecolor", "for", "the", "default", "colormodel", "of", "this", "screen", "." ]
def winfo_screenvisual(self): """Return one of the strings directcolor, grayscale, pseudocolor, staticcolor, staticgray, or truecolor for the default colormodel of this screen.""" return self.tk.call('winfo', 'screenvisual', self._w)
[ "def", "winfo_screenvisual", "(", "self", ")", ":", "return", "self", ".", "tk", ".", "call", "(", "'winfo'", ",", "'screenvisual'", ",", "self", ".", "_w", ")" ]
https://github.com/python/cpython/blob/e13cdca0f5224ec4e23bdd04bb3120506964bc8b/Lib/tkinter/__init__.py#L1241-L1245
pgjones/quart
68a5bc4e98a07d903e7bfe148c1b7faa260f4334
src/quart/sessions.py
python
SessionInterface.get_cookie_name
(self, app: "Quart")
return app.session_cookie_name
Helper method to return the Cookie Name for the App.
Helper method to return the Cookie Name for the App.
[ "Helper", "method", "to", "return", "the", "Cookie", "Name", "for", "the", "App", "." ]
def get_cookie_name(self, app: "Quart") -> str: """Helper method to return the Cookie Name for the App.""" return app.session_cookie_name
[ "def", "get_cookie_name", "(", "self", ",", "app", ":", "\"Quart\"", ")", "->", "str", ":", "return", "app", ".", "session_cookie_name" ]
https://github.com/pgjones/quart/blob/68a5bc4e98a07d903e7bfe148c1b7faa260f4334/src/quart/sessions.py#L132-L134
IntelAI/models
1d7a53ccfad3e6f0e7378c9e3c8840895d63df8c
models/language_translation/tensorflow/bert/inference/fp32/tokenization.py
python
convert_by_vocab
(vocab, items)
return output
Converts a sequence of [tokens|ids] using the vocab.
Converts a sequence of [tokens|ids] using the vocab.
[ "Converts", "a", "sequence", "of", "[", "tokens|ids", "]", "using", "the", "vocab", "." ]
def convert_by_vocab(vocab, items): """Converts a sequence of [tokens|ids] using the vocab.""" output = [] for item in items: output.append(vocab[item]) return output
[ "def", "convert_by_vocab", "(", "vocab", ",", "items", ")", ":", "output", "=", "[", "]", "for", "item", "in", "items", ":", "output", ".", "append", "(", "vocab", "[", "item", "]", ")", "return", "output" ]
https://github.com/IntelAI/models/blob/1d7a53ccfad3e6f0e7378c9e3c8840895d63df8c/models/language_translation/tensorflow/bert/inference/fp32/tokenization.py#L136-L141
clips/pattern
d25511f9ca7ed9356b801d8663b8b5168464e68f
pattern/vector/__init__.py
python
SVM._extension
(self)
return self._svm.libsvm.libsvm
Yields the extension module object, e.g., pattern/vector/svm/3.17/libsvm-mac64.so.
Yields the extension module object, e.g., pattern/vector/svm/3.17/libsvm-mac64.so.
[ "Yields", "the", "extension", "module", "object", "e", ".", "g", ".", "pattern", "/", "vector", "/", "svm", "/", "3", ".", "17", "/", "libsvm", "-", "mac64", ".", "so", "." ]
def _extension(self): """ Yields the extension module object, e.g., pattern/vector/svm/3.17/libsvm-mac64.so. """ if self.extension == LIBLINEAR: return self._svm.liblinear.liblinear return self._svm.libsvm.libsvm
[ "def", "_extension", "(", "self", ")", ":", "if", "self", ".", "extension", "==", "LIBLINEAR", ":", "return", "self", ".", "_svm", ".", "liblinear", ".", "liblinear", "return", "self", ".", "_svm", ".", "libsvm", ".", "libsvm" ]
https://github.com/clips/pattern/blob/d25511f9ca7ed9356b801d8663b8b5168464e68f/pattern/vector/__init__.py#L3461-L3467
WerWolv/EdiZon_CheatsConfigsAndScripts
d16d36c7509c01dca770f402babd83ff2e9ae6e7
Scripts/lib/python3.5/pydoc.py
python
resolve
(thing, forceload=0)
Given an object or a path to an object, get the object and its name.
Given an object or a path to an object, get the object and its name.
[ "Given", "an", "object", "or", "a", "path", "to", "an", "object", "get", "the", "object", "and", "its", "name", "." ]
def resolve(thing, forceload=0): """Given an object or a path to an object, get the object and its name.""" if isinstance(thing, str): object = locate(thing, forceload) if object is None: raise ImportError('''\ No Python documentation found for %r. Use help() to get the interactive help utility. Use help(str) for help on the str class.''' % thing) return object, thing else: name = getattr(thing, '__name__', None) return thing, name if isinstance(name, str) else None
[ "def", "resolve", "(", "thing", ",", "forceload", "=", "0", ")", ":", "if", "isinstance", "(", "thing", ",", "str", ")", ":", "object", "=", "locate", "(", "thing", ",", "forceload", ")", "if", "object", "is", "None", ":", "raise", "ImportError", "("...
https://github.com/WerWolv/EdiZon_CheatsConfigsAndScripts/blob/d16d36c7509c01dca770f402babd83ff2e9ae6e7/Scripts/lib/python3.5/pydoc.py#L1589-L1601
whyliam/whyliam.workflows.youdao
2dfa7f1de56419dab1c2e70c1a27e5e13ba25a5c
urllib3/packages/rfc3986/_mixin.py
python
URIMixin.authority_is_valid
(self, require=False)
return validators.authority_is_valid( self.authority, host=self.host, require=require )
Determine if the authority component is valid. .. deprecated:: 1.1.0 Use the :class:`~rfc3986.validators.Validator` object instead. :param bool require: Set to ``True`` to require the presence of this component. :returns: ``True`` if the authority is valid. ``False`` otherwise. :rtype: bool
Determine if the authority component is valid.
[ "Determine", "if", "the", "authority", "component", "is", "valid", "." ]
def authority_is_valid(self, require=False): """Determine if the authority component is valid. .. deprecated:: 1.1.0 Use the :class:`~rfc3986.validators.Validator` object instead. :param bool require: Set to ``True`` to require the presence of this component. :returns: ``True`` if the authority is valid. ``False`` otherwise. :rtype: bool """ warnings.warn( "Please use rfc3986.validators.Validator instead. " "This method will be eventually removed.", DeprecationWarning, ) try: self.authority_info() except exc.InvalidAuthority: return False return validators.authority_is_valid( self.authority, host=self.host, require=require )
[ "def", "authority_is_valid", "(", "self", ",", "require", "=", "False", ")", ":", "warnings", ".", "warn", "(", "\"Please use rfc3986.validators.Validator instead. \"", "\"This method will be eventually removed.\"", ",", "DeprecationWarning", ",", ")", "try", ":", "self",...
https://github.com/whyliam/whyliam.workflows.youdao/blob/2dfa7f1de56419dab1c2e70c1a27e5e13ba25a5c/urllib3/packages/rfc3986/_mixin.py#L130-L156
clinton-hall/nzbToMedia
27669389216902d1085660167e7bda0bd8527ecf
libs/common/subliminal/utils.py
python
hash_thesubdb
(video_path)
return hashlib.md5(data).hexdigest()
Compute a hash using TheSubDB's algorithm. :param str video_path: path of the video. :return: the hash. :rtype: str
Compute a hash using TheSubDB's algorithm.
[ "Compute", "a", "hash", "using", "TheSubDB", "s", "algorithm", "." ]
def hash_thesubdb(video_path): """Compute a hash using TheSubDB's algorithm. :param str video_path: path of the video. :return: the hash. :rtype: str """ readsize = 64 * 1024 if os.path.getsize(video_path) < readsize: return with open(video_path, 'rb') as f: data = f.read(readsize) f.seek(-readsize, os.SEEK_END) data += f.read(readsize) return hashlib.md5(data).hexdigest()
[ "def", "hash_thesubdb", "(", "video_path", ")", ":", "readsize", "=", "64", "*", "1024", "if", "os", ".", "path", ".", "getsize", "(", "video_path", ")", "<", "readsize", ":", "return", "with", "open", "(", "video_path", ",", "'rb'", ")", "as", "f", ...
https://github.com/clinton-hall/nzbToMedia/blob/27669389216902d1085660167e7bda0bd8527ecf/libs/common/subliminal/utils.py#L39-L55
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/xml/sax/expatreader.py
python
ExpatParser.feed
(self, data, isFinal = 0)
[]
def feed(self, data, isFinal = 0): if not self._parsing: self.reset() self._parsing = 1 self._cont_handler.startDocument() try: # The isFinal parameter is internal to the expat reader. # If it is set to true, expat will check validity of the entire # document. When feeding chunks, they are not normally final - # except when invoked from close. self._parser.Parse(data, isFinal) except expat.error, e: exc = SAXParseException(expat.ErrorString(e.code), e, self) # FIXME: when to invoke error()? self._err_handler.fatalError(exc)
[ "def", "feed", "(", "self", ",", "data", ",", "isFinal", "=", "0", ")", ":", "if", "not", "self", ".", "_parsing", ":", "self", ".", "reset", "(", ")", "self", ".", "_parsing", "=", "1", "self", ".", "_cont_handler", ".", "startDocument", "(", ")",...
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/xml/sax/expatreader.py#L202-L217
ynhacler/RedKindle
7c970920dc840f869e38cbda480d630cc2e7b200
bs4/element.py
python
Tag.select
(self, selector, _candidate_generator=None)
return current_context
Perform a CSS selection operation on the current element.
Perform a CSS selection operation on the current element.
[ "Perform", "a", "CSS", "selection", "operation", "on", "the", "current", "element", "." ]
def select(self, selector, _candidate_generator=None): """Perform a CSS selection operation on the current element.""" tokens = selector.split() current_context = [self] if tokens[-1] in self._selector_combinators: raise ValueError( 'Final combinator "%s" is missing an argument.' % tokens[-1]) if self._select_debug: print 'Running CSS selector "%s"' % selector for index, token in enumerate(tokens): if self._select_debug: print ' Considering token "%s"' % token recursive_candidate_generator = None tag_name = None if tokens[index-1] in self._selector_combinators: # This token was consumed by the previous combinator. Skip it. if self._select_debug: print ' Token was consumed by the previous combinator.' continue # Each operation corresponds to a checker function, a rule # for determining whether a candidate matches the # selector. Candidates are generated by the active # iterator. checker = None m = self.attribselect_re.match(token) if m is not None: # Attribute selector tag_name, attribute, operator, value = m.groups() checker = self._attribute_checker(operator, attribute, value) elif '#' in token: # ID selector tag_name, tag_id = token.split('#', 1) def id_matches(tag): return tag.get('id', None) == tag_id checker = id_matches elif '.' in token: # Class selector tag_name, klass = token.split('.', 1) classes = set(klass.split('.')) def classes_match(candidate): return classes.issubset(candidate.get('class', [])) checker = classes_match elif ':' in token: # Pseudo-class tag_name, pseudo = token.split(':', 1) if tag_name == '': raise ValueError( "A pseudo-class must be prefixed with a tag name.") pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo) found = [] if pseudo_attributes is not None: pseudo_type, pseudo_value = pseudo_attributes.groups() if pseudo_type == 'nth-of-type': try: pseudo_value = int(pseudo_value) except: raise NotImplementedError( 'Only numeric values are currently supported for the nth-of-type pseudo-class.') if pseudo_value < 1: raise ValueError( 'nth-of-type pseudo-class value must be at least 1.') class Counter(object): def __init__(self, destination): self.count = 0 self.destination = destination def nth_child_of_type(self, tag): self.count += 1 if self.count == self.destination: return True if self.count > self.destination: # Stop the generator that's sending us # these things. raise StopIteration() return False checker = Counter(pseudo_value).nth_child_of_type else: raise NotImplementedError( 'Only the following pseudo-classes are implemented: nth-of-type.') elif token == '*': # Star selector -- matches everything pass elif token == '>': # Run the next token as a CSS selector against the # direct children of each tag in the current context. recursive_candidate_generator = lambda tag: tag.children elif token == '~': # Run the next token as a CSS selector against the # siblings of each tag in the current context. recursive_candidate_generator = lambda tag: tag.next_siblings elif token == '+': # For each tag in the current context, run the next # token as a CSS selector against the tag's next # sibling that's a tag. def next_tag_sibling(tag): yield tag.find_next_sibling(True) recursive_candidate_generator = next_tag_sibling elif self.tag_name_re.match(token): # Just a tag name. tag_name = token else: raise ValueError( 'Unsupported or invalid CSS selector: "%s"' % token) if recursive_candidate_generator: # This happens when the selector looks like "> foo". # # The generator calls select() recursively on every # member of the current context, passing in a different # candidate generator and a different selector. # # In the case of "> foo", the candidate generator is # one that yields a tag's direct children (">"), and # the selector is "foo". next_token = tokens[index+1] def recursive_select(tag): if self._select_debug: print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs) print '-' * 40 for i in tag.select(next_token, recursive_candidate_generator): if self._select_debug: print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs) yield i if self._select_debug: print '-' * 40 _use_candidate_generator = recursive_select elif _candidate_generator is None: # By default, a tag's candidates are all of its # children. If tag_name is defined, only yield tags # with that name. if self._select_debug: if tag_name: check = "[any]" else: check = tag_name print ' Default candidate generator, tag name="%s"' % check if self._select_debug: # This is redundant with later code, but it stops # a bunch of bogus tags from cluttering up the # debug log. def default_candidate_generator(tag): for child in tag.descendants: if not isinstance(child, Tag): continue if tag_name and not child.name == tag_name: continue yield child _use_candidate_generator = default_candidate_generator else: _use_candidate_generator = lambda tag: tag.descendants else: _use_candidate_generator = _candidate_generator new_context = [] new_context_ids = set([]) for tag in current_context: if self._select_debug: print " Running candidate generator on %s %s" % ( tag.name, repr(tag.attrs)) for candidate in _use_candidate_generator(tag): if not isinstance(candidate, Tag): continue if tag_name and candidate.name != tag_name: continue if checker is not None: try: result = checker(candidate) except StopIteration: # The checker has decided we should no longer # run the generator. break if checker is None or result: if self._select_debug: print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs)) if id(candidate) not in new_context_ids: # If a tag matches a selector more than once, # don't include it in the context more than once. new_context.append(candidate) new_context_ids.add(id(candidate)) elif self._select_debug: print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs)) current_context = new_context if self._select_debug: print "Final verdict:" for i in current_context: print " %s %s" % (i.name, i.attrs) return current_context
[ "def", "select", "(", "self", ",", "selector", ",", "_candidate_generator", "=", "None", ")", ":", "tokens", "=", "selector", ".", "split", "(", ")", "current_context", "=", "[", "self", "]", "if", "tokens", "[", "-", "1", "]", "in", "self", ".", "_s...
https://github.com/ynhacler/RedKindle/blob/7c970920dc840f869e38cbda480d630cc2e7b200/bs4/element.py#L1191-L1386