nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/polys/polytools.py
python
lcm
(f, g=None, *gens, **args)
Compute LCM of ``f`` and ``g``. Examples ======== >>> from sympy import lcm >>> from sympy.abc import x >>> lcm(x**2 - 1, x**2 - 3*x + 2) x**3 - 2*x**2 - x + 2
Compute LCM of ``f`` and ``g``.
[ "Compute", "LCM", "of", "f", "and", "g", "." ]
def lcm(f, g=None, *gens, **args): """ Compute LCM of ``f`` and ``g``. Examples ======== >>> from sympy import lcm >>> from sympy.abc import x >>> lcm(x**2 - 1, x**2 - 3*x + 2) x**3 - 2*x**2 - x + 2 """ if hasattr(f, '__iter__'): if g is not None: gens = (g,) + gens return lcm_list(f, *gens, **args) elif g is None: raise TypeError("lcm() takes 2 arguments or a sequence of arguments") options.allowed_flags(args, ['polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: domain, (a, b) = construct_domain(exc.exprs) try: return domain.to_sympy(domain.lcm(a, b)) except NotImplementedError: raise ComputationFailed('lcm', 2, exc) result = F.lcm(G) if not opt.polys: return result.as_expr() else: return result
[ "def", "lcm", "(", "f", ",", "g", "=", "None", ",", "*", "gens", ",", "*", "*", "args", ")", ":", "if", "hasattr", "(", "f", ",", "'__iter__'", ")", ":", "if", "g", "is", "not", "None", ":", "gens", "=", "(", "g", ",", ")", "+", "gens", "...
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/polys/polytools.py#L4873-L4912
JetBrains/python-skeletons
95ad24b666e475998e5d1cc02ed53a2188036167
builtins.py
python
complex.__rpow__
(self, y)
return 0j
x to the power y. :type y: numbers.Number :rtype: complex
x to the power y.
[ "x", "to", "the", "power", "y", "." ]
def __rpow__(self, y): """x to the power y. :type y: numbers.Number :rtype: complex """ return 0j
[ "def", "__rpow__", "(", "self", ",", "y", ")", ":", "return", "0j" ]
https://github.com/JetBrains/python-skeletons/blob/95ad24b666e475998e5d1cc02ed53a2188036167/builtins.py#L948-L954
dropbox/stone
b7b64320631b3a4d2f10681dca64e0718ebe68ee
stone/ir/data_types.py
python
doc_unwrap
(raw_doc)
return docstring
Applies two transformations to raw_doc: 1. N consecutive newlines are converted into N-1 newlines. 2. A lone newline is converted to a space, which basically unwraps text. Returns a new string, or None if the input was None.
Applies two transformations to raw_doc: 1. N consecutive newlines are converted into N-1 newlines. 2. A lone newline is converted to a space, which basically unwraps text.
[ "Applies", "two", "transformations", "to", "raw_doc", ":", "1", ".", "N", "consecutive", "newlines", "are", "converted", "into", "N", "-", "1", "newlines", ".", "2", ".", "A", "lone", "newline", "is", "converted", "to", "a", "space", "which", "basically", ...
def doc_unwrap(raw_doc): """ Applies two transformations to raw_doc: 1. N consecutive newlines are converted into N-1 newlines. 2. A lone newline is converted to a space, which basically unwraps text. Returns a new string, or None if the input was None. """ if raw_doc is None: return None docstring = '' consecutive_newlines = 0 # Remove all leading and trailing whitespace in the documentation block for c in raw_doc.strip(): if c == '\n': consecutive_newlines += 1 if consecutive_newlines > 1: docstring += c else: if consecutive_newlines == 1: docstring += ' ' consecutive_newlines = 0 docstring += c return docstring
[ "def", "doc_unwrap", "(", "raw_doc", ")", ":", "if", "raw_doc", "is", "None", ":", "return", "None", "docstring", "=", "''", "consecutive_newlines", "=", "0", "# Remove all leading and trailing whitespace in the documentation block", "for", "c", "in", "raw_doc", ".", ...
https://github.com/dropbox/stone/blob/b7b64320631b3a4d2f10681dca64e0718ebe68ee/stone/ir/data_types.py#L556-L579
QQuick/Transcrypt
68b4b71d4ff3e4d58281b24e9e2dcc9fc766e822
transcrypt/modules/datetime/__init__.py
python
datetime.utcnow
(cls)
return cls.utcfromtimestamp(t)
Construct a UTC datetime from time.time().
Construct a UTC datetime from time.time().
[ "Construct", "a", "UTC", "datetime", "from", "time", ".", "time", "()", "." ]
def utcnow(cls): """Construct a UTC datetime from time.time().""" t = _time.time() return cls.utcfromtimestamp(t)
[ "def", "utcnow", "(", "cls", ")", ":", "t", "=", "_time", ".", "time", "(", ")", "return", "cls", ".", "utcfromtimestamp", "(", "t", ")" ]
https://github.com/QQuick/Transcrypt/blob/68b4b71d4ff3e4d58281b24e9e2dcc9fc766e822/transcrypt/modules/datetime/__init__.py#L1365-L1368
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
mx.graalpython/mx_graalpython_benchmark.py
python
PythonJavaEmbeddingBenchmarkSuite.__init__
(self, name, bench_path, benchmarks)
[]
def __init__(self, name, bench_path, benchmarks): super(PythonJavaEmbeddingBenchmarkSuite, self).__init__(name, benchmarks) self._bench_path = bench_path
[ "def", "__init__", "(", "self", ",", "name", ",", "bench_path", ",", "benchmarks", ")", ":", "super", "(", "PythonJavaEmbeddingBenchmarkSuite", ",", "self", ")", ".", "__init__", "(", "name", ",", "benchmarks", ")", "self", ".", "_bench_path", "=", "bench_pa...
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/mx.graalpython/mx_graalpython_benchmark.py#L882-L884
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/plat-sunos5/STROPTS.py
python
hat_setrefmod
(pp)
return (hat_page_setattr(pp, P_REF|P_MOD))
[]
def hat_setrefmod(pp): return (hat_page_setattr(pp, P_REF|P_MOD))
[ "def", "hat_setrefmod", "(", "pp", ")", ":", "return", "(", "hat_page_setattr", "(", "pp", ",", "P_REF", "|", "P_MOD", ")", ")" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/plat-sunos5/STROPTS.py#L1519-L1519
hatRiot/zarp
2e772350a01c2aeed3f4da9685cd0cc5d6b3ecad
src/lib/scapy/layers/dot11.py
python
Dot11Elt.mysummary
(self)
[]
def mysummary(self): if self.ID == 0: return "SSID=%s"%repr(self.info),[Dot11] else: return ""
[ "def", "mysummary", "(", "self", ")", ":", "if", "self", ".", "ID", "==", "0", ":", "return", "\"SSID=%s\"", "%", "repr", "(", "self", ".", "info", ")", ",", "[", "Dot11", "]", "else", ":", "return", "\"\"" ]
https://github.com/hatRiot/zarp/blob/2e772350a01c2aeed3f4da9685cd0cc5d6b3ecad/src/lib/scapy/layers/dot11.py#L238-L242
golismero/golismero
7d605b937e241f51c1ca4f47b20f755eeefb9d76
golismero/api/data/resource/url.py
python
URL.__init__
(self, url, method = "GET", post_params = None, referer = None)
:param url: Absolute URL. :type url: str :param method: HTTP method. :type method: str :param post_params: POST parameters or raw data. :type post_params: dict(str -> str) | str :param referer: Referrer URL. :type referer: str :raises ValueError: Currently, relative URLs are not allowed.
:param url: Absolute URL. :type url: str
[ ":", "param", "url", ":", "Absolute", "URL", ".", ":", "type", "url", ":", "str" ]
def __init__(self, url, method = "GET", post_params = None, referer = None): """ :param url: Absolute URL. :type url: str :param method: HTTP method. :type method: str :param post_params: POST parameters or raw data. :type post_params: dict(str -> str) | str :param referer: Referrer URL. :type referer: str :raises ValueError: Currently, relative URLs are not allowed. """ # Validate the arguments. if method: method = to_utf8(method) else: method = "GET" if referer: referer = to_utf8(referer) else: referer = None if not isinstance(method, str): raise TypeError("Expected string, got %r instead" % type(method)) if post_params is not None and not isinstance(post_params, dict): raise TypeError("Expected dict, got %r instead" % type(post_params)) if referer is not None and not isinstance(referer, str): raise TypeError("Expected string, got %r instead" % type(referer)) if post_params: if hasattr(post_params, "iteritems"): post_params = { to_utf8(k): to_utf8(v) for k,v in post_params.iteritems() } post_data = '&'.join( '%s=%s' % ( quote(k, safe=''), quote(v, safe='') ) for (k, v) in sorted(post_params.iteritems()) ) else: post_data = to_utf8(post_params) post_params = None else: post_data = None post_params = None # Save the properties. self.__method = method self.__post_data = post_data self.__post_params = post_params self.__referer = parse_url(referer).url if referer else None # Call the parent constructor. super(URL, self).__init__(url) # Increment the crawling depth by one. self.depth += 1
[ "def", "__init__", "(", "self", ",", "url", ",", "method", "=", "\"GET\"", ",", "post_params", "=", "None", ",", "referer", "=", "None", ")", ":", "# Validate the arguments.", "if", "method", ":", "method", "=", "to_utf8", "(", "method", ")", "else", ":"...
https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/golismero/api/data/resource/url.py#L177-L235
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/config/__init__.py
python
_validate_pillar_roots
(pillar_roots)
return _normalize_roots(pillar_roots)
If the pillar_roots option has a key that is None then we will error out, just replace it with an empty list
If the pillar_roots option has a key that is None then we will error out, just replace it with an empty list
[ "If", "the", "pillar_roots", "option", "has", "a", "key", "that", "is", "None", "then", "we", "will", "error", "out", "just", "replace", "it", "with", "an", "empty", "list" ]
def _validate_pillar_roots(pillar_roots): """ If the pillar_roots option has a key that is None then we will error out, just replace it with an empty list """ if not isinstance(pillar_roots, dict): log.warning( "The pillar_roots parameter is not properly formatted, using defaults" ) return {"base": _expand_glob_path([salt.syspaths.BASE_PILLAR_ROOTS_DIR])} return _normalize_roots(pillar_roots)
[ "def", "_validate_pillar_roots", "(", "pillar_roots", ")", ":", "if", "not", "isinstance", "(", "pillar_roots", ",", "dict", ")", ":", "log", ".", "warning", "(", "\"The pillar_roots parameter is not properly formatted, using defaults\"", ")", "return", "{", "\"base\"",...
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/config/__init__.py#L1733-L1743
masyagin1998/robin
6591a02f1bb71185480303aa99e83f43e801296e
src/dataset/stsl-download.py
python
save_img
(link: str, path: str)
Get image via HTTP and save it asynchronously.
Get image via HTTP and save it asynchronously.
[ "Get", "image", "via", "HTTP", "and", "save", "it", "asynchronously", "." ]
async def save_img(link: str, path: str): """Get image via HTTP and save it asynchronously.""" cv2.imwrite(path, cv2.imdecode(np.asarray(bytearray(urlopen(link).read()), dtype='uint8'), cv2.IMREAD_COLOR))
[ "async", "def", "save_img", "(", "link", ":", "str", ",", "path", ":", "str", ")", ":", "cv2", ".", "imwrite", "(", "path", ",", "cv2", ".", "imdecode", "(", "np", ".", "asarray", "(", "bytearray", "(", "urlopen", "(", "link", ")", ".", "read", "...
https://github.com/masyagin1998/robin/blob/6591a02f1bb71185480303aa99e83f43e801296e/src/dataset/stsl-download.py#L14-L16
ctxis/CAPE
dae9fa6a254ecdbabeb7eb0d2389fa63722c1e82
lib/cuckoo/common/peepdf/PDFUtils.py
python
encodeName
(name)
return (0,'/'+encodedName)
Encode the given PDF name @param name: A PDFName string to encode @return: A tuple (status,statusContent), where statusContent is the encoded PDF name in case status = 0 or an error in case status = -1
Encode the given PDF name
[ "Encode", "the", "given", "PDF", "name" ]
def encodeName(name): ''' Encode the given PDF name @param name: A PDFName string to encode @return: A tuple (status,statusContent), where statusContent is the encoded PDF name in case status = 0 or an error in case status = -1 ''' encodedName = '' if name[0] == '/': name = name[1:] for char in name: if char == '\0': encodedName += char else: try: hex = '%x' % ord(char) encodedName += '#'+hex except: return (-1,'Error encoding name') return (0,'/'+encodedName)
[ "def", "encodeName", "(", "name", ")", ":", "encodedName", "=", "''", "if", "name", "[", "0", "]", "==", "'/'", ":", "name", "=", "name", "[", "1", ":", "]", "for", "char", "in", "name", ":", "if", "char", "==", "'\\0'", ":", "encodedName", "+=",...
https://github.com/ctxis/CAPE/blob/dae9fa6a254ecdbabeb7eb0d2389fa63722c1e82/lib/cuckoo/common/peepdf/PDFUtils.py#L104-L123
leonsio/YAHM
949b748b22fe2da0f3aff5b5681147c18c06cb03
share/tools/ubi_reader/ubi/block/sort.py
python
list_by_list
(blist, slist)
return slist_blocks
Sort list of block indexes, by another list. Argument: List:blist -- List of block indexes. List:slist -- Secondary list of blocks. Returns: List -- List of block indexes matching slist from blist.
Sort list of block indexes, by another list.
[ "Sort", "list", "of", "block", "indexes", "by", "another", "list", "." ]
def list_by_list(blist, slist): """Sort list of block indexes, by another list. Argument: List:blist -- List of block indexes. List:slist -- Secondary list of blocks. Returns: List -- List of block indexes matching slist from blist. """ slist_blocks = [] for block in blist: if block in slist: slist_blocks.append(block) return slist_blocks
[ "def", "list_by_list", "(", "blist", ",", "slist", ")", ":", "slist_blocks", "=", "[", "]", "for", "block", "in", "blist", ":", "if", "block", "in", "slist", ":", "slist_blocks", ".", "append", "(", "block", ")", "return", "slist_blocks" ]
https://github.com/leonsio/YAHM/blob/949b748b22fe2da0f3aff5b5681147c18c06cb03/share/tools/ubi_reader/ubi/block/sort.py#L20-L35
secdev/scapy
65089071da1acf54622df0b4fa7fc7673d47d3cd
scapy/contrib/automotive/gm/gmlanutils.py
python
GMLAN_InitDiagnostics
( sock, # type: SuperSocket broadcast_socket=None, # type: Optional[SuperSocket] timeout=None, # type: Optional[int] verbose=None, # type: Optional[bool] retry=0 # type: int )
return False
Send messages to put an ECU into diagnostic/programming state. :param sock: socket for communication. :param broadcast_socket: socket for broadcasting. If provided some message will be sent as broadcast. Recommended when used on a network with several ECUs. :param timeout: timeout for sending, receiving or sniffing packages. :param verbose: set verbosity level :param retry: number of retries in case of failure. :return: True on success else False
Send messages to put an ECU into diagnostic/programming state.
[ "Send", "messages", "to", "put", "an", "ECU", "into", "diagnostic", "/", "programming", "state", "." ]
def GMLAN_InitDiagnostics( sock, # type: SuperSocket broadcast_socket=None, # type: Optional[SuperSocket] timeout=None, # type: Optional[int] verbose=None, # type: Optional[bool] retry=0 # type: int ): # type: (...) -> bool """ Send messages to put an ECU into diagnostic/programming state. :param sock: socket for communication. :param broadcast_socket: socket for broadcasting. If provided some message will be sent as broadcast. Recommended when used on a network with several ECUs. :param timeout: timeout for sending, receiving or sniffing packages. :param verbose: set verbosity level :param retry: number of retries in case of failure. :return: True on success else False """ # Helper function def _send_and_check_response(sock, req, timeout, verbose): # type: (SuperSocket, Packet, Optional[int], Optional[bool]) -> bool if verbose: print("Sending %s" % repr(req)) resp = sock.sr1(req, timeout=timeout, verbose=False) return _check_response(resp, verbose) if verbose is None: verbose = conf.verb > 0 retry = abs(retry) while retry >= 0: retry -= 1 # DisableNormalCommunication p = GMLAN(service="DisableNormalCommunication") if broadcast_socket is None: if not _send_and_check_response(sock, p, timeout, verbose): continue else: if verbose: print("Sending %s as broadcast" % repr(p)) broadcast_socket.send(p) time.sleep(0.05) # ReportProgrammedState p = GMLAN(service="ReportProgrammingState") if not _send_and_check_response(sock, p, timeout, verbose): continue # ProgrammingMode requestProgramming p = GMLAN() / GMLAN_PM(subfunction="requestProgrammingMode") if not _send_and_check_response(sock, p, timeout, verbose): continue time.sleep(0.05) # InitiateProgramming enableProgramming # No response expected p = GMLAN() / GMLAN_PM(subfunction="enableProgrammingMode") if verbose: print("Sending %s" % repr(p)) sock.sr1(p, timeout=0.001, verbose=False) return True return False
[ "def", "GMLAN_InitDiagnostics", "(", "sock", ",", "# type: SuperSocket", "broadcast_socket", "=", "None", ",", "# type: Optional[SuperSocket]", "timeout", "=", "None", ",", "# type: Optional[int]", "verbose", "=", "None", ",", "# type: Optional[bool]", "retry", "=", "0"...
https://github.com/secdev/scapy/blob/65089071da1acf54622df0b4fa7fc7673d47d3cd/scapy/contrib/automotive/gm/gmlanutils.py#L75-L137
LiMeng95/MultiPoseNet.pytorch
7b548fc20ae5b0141a2113669e8606103fb7e6d4
network/fpn.py
python
FPN._upsample_add
(self, x, y)
return F.upsample(x, size=(H,W), mode='nearest', align_corners=None) + y
Upsample and add two feature maps. Args: x: top feature map to be upsampled. y: lateral feature map. Returns: added feature map.
Upsample and add two feature maps.
[ "Upsample", "and", "add", "two", "feature", "maps", "." ]
def _upsample_add(self, x, y): '''Upsample and add two feature maps. Args: x: top feature map to be upsampled. y: lateral feature map. Returns: added feature map. ''' _,_,H,W = y.size() return F.upsample(x, size=(H,W), mode='nearest', align_corners=None) + y
[ "def", "_upsample_add", "(", "self", ",", "x", ",", "y", ")", ":", "_", ",", "_", ",", "H", ",", "W", "=", "y", ".", "size", "(", ")", "return", "F", ".", "upsample", "(", "x", ",", "size", "=", "(", "H", ",", "W", ")", ",", "mode", "=", ...
https://github.com/LiMeng95/MultiPoseNet.pytorch/blob/7b548fc20ae5b0141a2113669e8606103fb7e6d4/network/fpn.py#L84-L95
ChineseGLUE/ChineseGLUE
1591b85cf5427c2ff60f718d359ecb71d2b44879
baselines/models/roberta/modeling.py
python
create_attention_mask_from_input_mask
(from_tensor, to_mask)
return mask
Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length].
Create 3D attention mask from a 2D tensor mask.
[ "Create", "3D", "attention", "mask", "from", "a", "2D", "tensor", "mask", "." ]
def create_attention_mask_from_input_mask(from_tensor, to_mask): """Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. """ from_shape = get_shape_list(from_tensor, expected_rank=[2, 3]) batch_size = from_shape[0] from_seq_length = from_shape[1] to_shape = get_shape_list(to_mask, expected_rank=2) to_seq_length = to_shape[1] to_mask = tf.cast( tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32) # We don't assume that `from_tensor` is a mask (although it could be). We # don't actually care if we attend *from* padding tokens (only *to* padding) # tokens so we create a tensor of all ones. # # `broadcast_ones` = [batch_size, from_seq_length, 1] broadcast_ones = tf.ones( shape=[batch_size, from_seq_length, 1], dtype=tf.float32) # Here we broadcast along two dimensions to create the mask. mask = broadcast_ones * to_mask return mask
[ "def", "create_attention_mask_from_input_mask", "(", "from_tensor", ",", "to_mask", ")", ":", "from_shape", "=", "get_shape_list", "(", "from_tensor", ",", "expected_rank", "=", "[", "2", ",", "3", "]", ")", "batch_size", "=", "from_shape", "[", "0", "]", "fro...
https://github.com/ChineseGLUE/ChineseGLUE/blob/1591b85cf5427c2ff60f718d359ecb71d2b44879/baselines/models/roberta/modeling.py#L524-L555
mnemosyne-proj/mnemosyne
e39e364e56343437f2e485e0b06ca714de2f2d2e
mnemosyne/libmnemosyne/renderers/anki/utils.py
python
fmtFloat
(float_value, point=1)
return locale.format_string(fmt, float_value)
Return a string with decimal separator according to current locale
Return a string with decimal separator according to current locale
[ "Return", "a", "string", "with", "decimal", "separator", "according", "to", "current", "locale" ]
def fmtFloat(float_value, point=1): "Return a string with decimal separator according to current locale" fmt = '%' + "0.%(b)df" % {'b': point} return locale.format_string(fmt, float_value)
[ "def", "fmtFloat", "(", "float_value", ",", "point", "=", "1", ")", ":", "fmt", "=", "'%'", "+", "\"0.%(b)df\"", "%", "{", "'b'", ":", "point", "}", "return", "locale", ".", "format_string", "(", "fmt", ",", "float_value", ")" ]
https://github.com/mnemosyne-proj/mnemosyne/blob/e39e364e56343437f2e485e0b06ca714de2f2d2e/mnemosyne/libmnemosyne/renderers/anki/utils.py#L120-L123
ChenglongChen/kaggle-HomeDepot
55c1033d0af3b6cf2f033fe4bcf3e1e0ffda3445
Code/Chenglong/feature_basic.py
python
AttrHasIndoorOutdoor.transform_one
(self, obs, target, id)
return 0
obs is a list of attributes
obs is a list of attributes
[ "obs", "is", "a", "list", "of", "attributes" ]
def transform_one(self, obs, target, id): """obs is a list of attributes""" for lst in obs: if lst[0].find("indoor outdoor") != -1: return 1 return 0
[ "def", "transform_one", "(", "self", ",", "obs", ",", "target", ",", "id", ")", ":", "for", "lst", "in", "obs", ":", "if", "lst", "[", "0", "]", ".", "find", "(", "\"indoor outdoor\"", ")", "!=", "-", "1", ":", "return", "1", "return", "0" ]
https://github.com/ChenglongChen/kaggle-HomeDepot/blob/55c1033d0af3b6cf2f033fe4bcf3e1e0ffda3445/Code/Chenglong/feature_basic.py#L363-L368
BEEmod/BEE2.4
02767f3cf476581789425ab308ca1bea978f6a74
src/precomp/brushLoc.py
python
_conv_key
(pos: _grid_keys)
return x, y, z
Convert the key given in [] to a grid-position, as a x,y,z tuple.
Convert the key given in [] to a grid-position, as a x,y,z tuple.
[ "Convert", "the", "key", "given", "in", "[]", "to", "a", "grid", "-", "position", "as", "a", "x", "y", "z", "tuple", "." ]
def _conv_key(pos: _grid_keys) -> tuple[float, float, float]: """Convert the key given in [] to a grid-position, as a x,y,z tuple.""" # TODO: Slices are assumed to be int by typeshed. # type: ignore if isinstance(pos, slice): system, slice_pos = pos.start, pos.stop if system == 'world': return tuple(world_to_grid(Vec(slice_pos))) else: return tuple(slice_pos) x, y, z = pos return x, y, z
[ "def", "_conv_key", "(", "pos", ":", "_grid_keys", ")", "->", "tuple", "[", "float", ",", "float", ",", "float", "]", ":", "# TODO: Slices are assumed to be int by typeshed.", "# type: ignore", "if", "isinstance", "(", "pos", ",", "slice", ")", ":", "system", ...
https://github.com/BEEmod/BEE2.4/blob/02767f3cf476581789425ab308ca1bea978f6a74/src/precomp/brushLoc.py#L131-L142
mozilla/pontoon
d26999eea57902a30b5c15e9b77277fe7e76a60f
pontoon/batch/views.py
python
update_translation_memory
(changed_translation_pks, project, locale)
Update translation memory for a list of translations.
Update translation memory for a list of translations.
[ "Update", "translation", "memory", "for", "a", "list", "of", "translations", "." ]
def update_translation_memory(changed_translation_pks, project, locale): """Update translation memory for a list of translations.""" memory_entries = [ TranslationMemoryEntry( source=t.tm_source, target=t.tm_target, locale=locale, entity=t.entity, translation=t, project=project, ) for t in ( Translation.objects.filter(pk__in=changed_translation_pks).prefetch_related( "entity__resource" ) ) ] TranslationMemoryEntry.objects.bulk_create(memory_entries)
[ "def", "update_translation_memory", "(", "changed_translation_pks", ",", "project", ",", "locale", ")", ":", "memory_entries", "=", "[", "TranslationMemoryEntry", "(", "source", "=", "t", ".", "tm_source", ",", "target", "=", "t", ".", "tm_target", ",", "locale"...
https://github.com/mozilla/pontoon/blob/d26999eea57902a30b5c15e9b77277fe7e76a60f/pontoon/batch/views.py#L45-L62
feisuzhu/thbattle
ac0dee1b2d86de7664289cf432b157ef25427ba1
src/pyglet/gl/glu_info.py
python
GLUInfo.get_version
(self)
return self.version
Get the current GLU version. :return: the GLU version :rtype: str
Get the current GLU version.
[ "Get", "the", "current", "GLU", "version", "." ]
def get_version(self): '''Get the current GLU version. :return: the GLU version :rtype: str ''' if not self.have_context: warnings.warn('No GL context created yet.') return self.version
[ "def", "get_version", "(", "self", ")", ":", "if", "not", "self", ".", "have_context", ":", "warnings", ".", "warn", "(", "'No GL context created yet.'", ")", "return", "self", ".", "version" ]
https://github.com/feisuzhu/thbattle/blob/ac0dee1b2d86de7664289cf432b157ef25427ba1/src/pyglet/gl/glu_info.py#L118-L126
CryptoSignal/crypto-signal
8769d0df2c50e5071b282300788a3860200b22c6
app/notification.py
python
Notifier._indicator_message_templater
(self, new_analysis, template)
return new_message
Creates a message from a user defined template Args: new_analysis (dict): A dictionary of data related to the analysis to send a message about. template (str): A Jinja formatted message template. Returns: str: The templated messages for the notifier.
Creates a message from a user defined template
[ "Creates", "a", "message", "from", "a", "user", "defined", "template" ]
def _indicator_message_templater(self, new_analysis, template): """Creates a message from a user defined template Args: new_analysis (dict): A dictionary of data related to the analysis to send a message about. template (str): A Jinja formatted message template. Returns: str: The templated messages for the notifier. """ if not self.last_analysis: self.last_analysis = new_analysis message_template = Template(template) new_message = str() for exchange in new_analysis: for market in new_analysis[exchange]: for indicator_type in new_analysis[exchange][market]: if indicator_type == 'informants': continue for indicator in new_analysis[exchange][market][indicator_type]: for index, analysis in enumerate(new_analysis[exchange][market][indicator_type][indicator]): if analysis['result'].shape[0] == 0: continue values = dict() if indicator_type == 'indicators': for signal in analysis['config']['signal']: latest_result = analysis['result'].iloc[-1] values[signal] = analysis['result'].iloc[-1][signal] if isinstance(values[signal], float): values[signal] = format(values[signal], '.8f') elif indicator_type == 'crossovers': latest_result = analysis['result'].iloc[-1] key_signal = '{}_{}'.format( analysis['config']['key_signal'], analysis['config']['key_indicator_index'] ) crossed_signal = '{}_{}'.format( analysis['config']['crossed_signal'], analysis['config']['crossed_indicator_index'] ) values[key_signal] = analysis['result'].iloc[-1][key_signal] if isinstance(values[key_signal], float): values[key_signal] = format(values[key_signal], '.8f') values[crossed_signal] = analysis['result'].iloc[-1][crossed_signal] if isinstance(values[crossed_signal], float): values[crossed_signal] = format(values[crossed_signal], '.8f') status = 'neutral' if latest_result['is_hot']: status = 'hot' elif latest_result['is_cold']: status = 'cold' # Save status of indicator's new analysis new_analysis[exchange][market][indicator_type][indicator][index]['status'] = status if latest_result['is_hot'] or latest_result['is_cold']: try: last_status = self.last_analysis[exchange][market][indicator_type][indicator][index]['status'] except: last_status = str() should_alert = True if analysis['config']['alert_frequency'] == 'once': if last_status == status: should_alert = False if not analysis['config']['alert_enabled']: should_alert = False if should_alert: base_currency, quote_currency = market.split('/') new_message += message_template.render( values=values, exchange=exchange, market=market, base_currency=base_currency, quote_currency=quote_currency, indicator=indicator, indicator_number=index, analysis=analysis, status=status, last_status=last_status ) # Merge changes from new analysis into last analysis self.last_analysis = {**self.last_analysis, **new_analysis} return new_message
[ "def", "_indicator_message_templater", "(", "self", ",", "new_analysis", ",", "template", ")", ":", "if", "not", "self", ".", "last_analysis", ":", "self", ".", "last_analysis", "=", "new_analysis", "message_template", "=", "Template", "(", "template", ")", "new...
https://github.com/CryptoSignal/crypto-signal/blob/8769d0df2c50e5071b282300788a3860200b22c6/app/notification.py#L243-L339
PaddlePaddle/Parakeet
8705a2a8405e3c63f2174d69880d2b5525a6c9fd
parakeet/frontend/arpabet.py
python
ARPABET.phoneticize
(self, sentence, add_start_end=False)
return phonemes
Normalize the input text sequence and convert it into pronunciation sequence. Parameters ----------- sentence: str The input text sequence. Returns ---------- List[str] The list of pronunciation sequence.
Normalize the input text sequence and convert it into pronunciation sequence. Parameters ----------- sentence: str The input text sequence. Returns ---------- List[str] The list of pronunciation sequence.
[ "Normalize", "the", "input", "text", "sequence", "and", "convert", "it", "into", "pronunciation", "sequence", ".", "Parameters", "-----------", "sentence", ":", "str", "The", "input", "text", "sequence", ".", "Returns", "----------", "List", "[", "str", "]", "...
def phoneticize(self, sentence, add_start_end=False): """ Normalize the input text sequence and convert it into pronunciation sequence. Parameters ----------- sentence: str The input text sequence. Returns ---------- List[str] The list of pronunciation sequence. """ phonemes = [ self._remove_vowels(item) for item in self.backend(sentence) ] if add_start_end: start = self.vocab.start_symbol end = self.vocab.end_symbol phonemes = [start] + phonemes + [end] phonemes = [item for item in phonemes if item in self.vocab.stoi] return phonemes
[ "def", "phoneticize", "(", "self", ",", "sentence", ",", "add_start_end", "=", "False", ")", ":", "phonemes", "=", "[", "self", ".", "_remove_vowels", "(", "item", ")", "for", "item", "in", "self", ".", "backend", "(", "sentence", ")", "]", "if", "add_...
https://github.com/PaddlePaddle/Parakeet/blob/8705a2a8405e3c63f2174d69880d2b5525a6c9fd/parakeet/frontend/arpabet.py#L134-L155
facebookresearch/XLM
cd281d32612d145c6742b4d3f048f80df8669c30
xlm/trainer.py
python
Trainer.pc_step
(self, lang1, lang2, lambda_coeff)
Parallel classification step. Predict if pairs of sentences are mutual translations of each other.
Parallel classification step. Predict if pairs of sentences are mutual translations of each other.
[ "Parallel", "classification", "step", ".", "Predict", "if", "pairs", "of", "sentences", "are", "mutual", "translations", "of", "each", "other", "." ]
def pc_step(self, lang1, lang2, lambda_coeff): """ Parallel classification step. Predict if pairs of sentences are mutual translations of each other. """ assert lambda_coeff >= 0 if lambda_coeff == 0: return params = self.params name = 'model' if params.encoder_only else 'encoder' model = getattr(self, name) model.train() lang1_id = params.lang2id[lang1] lang2_id = params.lang2id[lang2] # sample parallel sentences (x1, len1), (x2, len2) = self.get_batch('align', lang1, lang2) bs = len1.size(0) if bs == 1: # can happen (although very rarely), which makes the negative loss fail self.n_sentences += params.batch_size return # associate lang1 sentences with their translations, and random lang2 sentences y = torch.LongTensor(bs).random_(2) idx_pos = torch.arange(bs) idx_neg = ((idx_pos + torch.LongTensor(bs).random_(1, bs)) % bs) idx = (y == 1).long() * idx_pos + (y == 0).long() * idx_neg x2, len2 = x2[:, idx], len2[idx] # generate batch / cuda x, lengths, positions, langs = concat_batches(x1, len1, lang1_id, x2, len2, lang2_id, params.pad_index, params.eos_index, reset_positions=False) x, lengths, positions, langs, new_idx = self.round_batch(x, lengths, positions, langs) if new_idx is not None: y = y[new_idx] x, lengths, positions, langs = to_cuda(x, lengths, positions, langs) # get sentence embeddings h = model('fwd', x=x, lengths=lengths, positions=positions, langs=langs, causal=False)[0] # parallel classification loss CLF_ID1, CLF_ID2 = 8, 9 # very hacky, use embeddings to make weights for the classifier emb = (model.module if params.multi_gpu else model).embeddings.weight pred = F.linear(h, emb[CLF_ID1].unsqueeze(0), emb[CLF_ID2, 0]) loss = F.binary_cross_entropy_with_logits(pred.view(-1), y.to(pred.device).type_as(pred)) self.stats['PC-%s-%s' % (lang1, lang2)].append(loss.item()) loss = lambda_coeff * loss # optimize self.optimize(loss) # number of processed sentences / words self.n_sentences += params.batch_size self.stats['processed_s'] += bs self.stats['processed_w'] += lengths.sum().item()
[ "def", "pc_step", "(", "self", ",", "lang1", ",", "lang2", ",", "lambda_coeff", ")", ":", "assert", "lambda_coeff", ">=", "0", "if", "lambda_coeff", "==", "0", ":", "return", "params", "=", "self", ".", "params", "name", "=", "'model'", "if", "params", ...
https://github.com/facebookresearch/XLM/blob/cd281d32612d145c6742b4d3f048f80df8669c30/xlm/trainer.py#L732-L785
lektor/lektor-archive
d2ab208c756b1e7092b2056108571719abd8d6cd
lektor/pagination.py
python
Pagination.pages
(self)
return pages
The total number of pages
The total number of pages
[ "The", "total", "number", "of", "pages" ]
def pages(self): """The total number of pages""" if self.per_page == 0: pages = 0 else: pages = int(ceil(self.total / float(self.per_page))) return pages
[ "def", "pages", "(", "self", ")", ":", "if", "self", ".", "per_page", "==", "0", ":", "pages", "=", "0", "else", ":", "pages", "=", "int", "(", "ceil", "(", "self", ".", "total", "/", "float", "(", "self", ".", "per_page", ")", ")", ")", "retur...
https://github.com/lektor/lektor-archive/blob/d2ab208c756b1e7092b2056108571719abd8d6cd/lektor/pagination.py#L25-L31
tp4a/teleport
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
server/www/packages/packages-darwin/x64/tornado/netutil.py
python
BlockingResolver.initialize
(self)
[]
def initialize(self): super(BlockingResolver, self).initialize()
[ "def", "initialize", "(", "self", ")", ":", "super", "(", "BlockingResolver", ",", "self", ")", ".", "initialize", "(", ")" ]
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-darwin/x64/tornado/netutil.py#L428-L429
xiaolonw/adversarial-frcnn
2a7bb96c9884c0f09ca5bde35a981087be28562b
lib/fast_rcnn/config.py
python
cfg_from_file
(filename)
Load a config file and merge it into the default options.
Load a config file and merge it into the default options.
[ "Load", "a", "config", "file", "and", "merge", "it", "into", "the", "default", "options", "." ]
def cfg_from_file(filename): """Load a config file and merge it into the default options.""" import yaml with open(filename, 'r') as f: yaml_cfg = edict(yaml.load(f)) _merge_a_into_b(yaml_cfg, __C)
[ "def", "cfg_from_file", "(", "filename", ")", ":", "import", "yaml", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "yaml_cfg", "=", "edict", "(", "yaml", ".", "load", "(", "f", ")", ")", "_merge_a_into_b", "(", "yaml_cfg", ",", "_...
https://github.com/xiaolonw/adversarial-frcnn/blob/2a7bb96c9884c0f09ca5bde35a981087be28562b/lib/fast_rcnn/config.py#L269-L275
IntelAI/models
1d7a53ccfad3e6f0e7378c9e3c8840895d63df8c
models/image_segmentation/tensorflow/maskrcnn/inference/fp32/model.py
python
rpn_graph
(feature_map, anchors_per_location, anchor_stride)
return [rpn_class_logits, rpn_probs, rpn_bbox]
Builds the computation graph of Region Proposal Network. feature_map: backbone features [batch, height, width, depth] anchors_per_location: number of anchors per pixel in the feature map anchor_stride: Controls the density of anchors. Typically 1 (anchors for every pixel in the feature map), or 2 (every other pixel). Returns: rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax) rpn_probs: [batch, H, W, 2] Anchor classifier probabilities. rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be applied to anchors.
Builds the computation graph of Region Proposal Network.
[ "Builds", "the", "computation", "graph", "of", "Region", "Proposal", "Network", "." ]
def rpn_graph(feature_map, anchors_per_location, anchor_stride): """Builds the computation graph of Region Proposal Network. feature_map: backbone features [batch, height, width, depth] anchors_per_location: number of anchors per pixel in the feature map anchor_stride: Controls the density of anchors. Typically 1 (anchors for every pixel in the feature map), or 2 (every other pixel). Returns: rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax) rpn_probs: [batch, H, W, 2] Anchor classifier probabilities. rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be applied to anchors. """ # TODO: check if stride of 2 causes alignment issues if the featuremap # is not even. # Shared convolutional base of the RPN shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu', strides=anchor_stride, name='rpn_conv_shared')(feature_map) # Anchor Score. [batch, height, width, anchors per location * 2]. x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid', activation='linear', name='rpn_class_raw')(shared) # Reshape to [batch, anchors, 2] rpn_class_logits = KL.Lambda( lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x) # Softmax on last dimension of BG/FG. rpn_probs = KL.Activation( "softmax", name="rpn_class_xxx")(rpn_class_logits) # Bounding box refinement. [batch, H, W, anchors per location, depth] # where depth is [x, y, log(w), log(h)] x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid", activation='linear', name='rpn_bbox_pred')(shared) # Reshape to [batch, anchors, 4] rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x) return [rpn_class_logits, rpn_probs, rpn_bbox]
[ "def", "rpn_graph", "(", "feature_map", ",", "anchors_per_location", ",", "anchor_stride", ")", ":", "# TODO: check if stride of 2 causes alignment issues if the featuremap", "# is not even.", "# Shared convolutional base of the RPN", "shared", "=", "KL", ".", "Conv2D", "("...
https://github.com/IntelAI/models/blob/1d7a53ccfad3e6f0e7378c9e3c8840895d63df8c/models/image_segmentation/tensorflow/maskrcnn/inference/fp32/model.py#L858-L899
CouchPotato/CouchPotatoServer
7260c12f72447ddb6f062367c6dfbda03ecd4e9c
libs/rsa/cli.py
python
BigfileOperation.__call__
(self)
Runs the program.
Runs the program.
[ "Runs", "the", "program", "." ]
def __call__(self): '''Runs the program.''' (cli, cli_args) = self.parse_cli() key = self.read_key(cli_args[0], cli.keyform) # Get the file handles infile = self.get_infile(cli.input) outfile = self.get_outfile(cli.output) # Call the operation print(self.operation_progressive.title(), file=sys.stderr) self.perform_operation(infile, outfile, key, cli_args)
[ "def", "__call__", "(", "self", ")", ":", "(", "cli", ",", "cli_args", ")", "=", "self", ".", "parse_cli", "(", ")", "key", "=", "self", ".", "read_key", "(", "cli_args", "[", "0", "]", ",", "cli", ".", "keyform", ")", "# Get the file handles", "infi...
https://github.com/CouchPotato/CouchPotatoServer/blob/7260c12f72447ddb6f062367c6dfbda03ecd4e9c/libs/rsa/cli.py#L299-L312
emcconville/wand
03682680c351645f16c3b8ea23bde79fbb270305
wand/image.py
python
BaseImage.brightness_contrast
(self, brightness=0.0, contrast=0.0, channel=None)
return r
Converts ``brightness`` & ``contrast`` paramaters into a slope & intercept, and applies a polynomial function. :param brightness: between ``-100.0`` and ``100.0``. Default is ``0.0`` for unchanged. :type brightness: :class:`numbers.Real` :param contrast: between ``-100.0`` and ``100.0``. Default is ``0.0`` for unchanged. :type contrast: :class:`numbers.Real` :param channel: Isolate a single color channel to apply contrast. See :const:`CHANNELS`. .. versionadded:: 0.5.4 .. versionchanged:: 0.5.5 Optional ``channel`` argument added.
Converts ``brightness`` & ``contrast`` paramaters into a slope & intercept, and applies a polynomial function.
[ "Converts", "brightness", "&", "contrast", "paramaters", "into", "a", "slope", "&", "intercept", "and", "applies", "a", "polynomial", "function", "." ]
def brightness_contrast(self, brightness=0.0, contrast=0.0, channel=None): """Converts ``brightness`` & ``contrast`` paramaters into a slope & intercept, and applies a polynomial function. :param brightness: between ``-100.0`` and ``100.0``. Default is ``0.0`` for unchanged. :type brightness: :class:`numbers.Real` :param contrast: between ``-100.0`` and ``100.0``. Default is ``0.0`` for unchanged. :type contrast: :class:`numbers.Real` :param channel: Isolate a single color channel to apply contrast. See :const:`CHANNELS`. .. versionadded:: 0.5.4 .. versionchanged:: 0.5.5 Optional ``channel`` argument added. """ assertions.assert_real(brightness=brightness, contrast=contrast) if channel is None: r = library.MagickBrightnessContrastImage(self.wand, brightness, contrast) else: channel_ch = self._channel_to_mask(channel) if MAGICK_VERSION_NUMBER < 0x700: r = library.MagickBrightnessContrastImageChannel(self.wand, channel_ch, brightness, contrast) else: # pragma: no cover mask = library.MagickSetImageChannelMask(self.wand, channel_ch) r = library.MagickBrightnessContrastImage(self.wand, brightness, contrast) library.MagickSetImageChannelMask(self.wand, mask) return r
[ "def", "brightness_contrast", "(", "self", ",", "brightness", "=", "0.0", ",", "contrast", "=", "0.0", ",", "channel", "=", "None", ")", ":", "assertions", ".", "assert_real", "(", "brightness", "=", "brightness", ",", "contrast", "=", "contrast", ")", "if...
https://github.com/emcconville/wand/blob/03682680c351645f16c3b8ea23bde79fbb270305/wand/image.py#L3134-L3170
ztosec/hunter
4ee5cca8dc5fc5d7e631e935517bd0f493c30a37
HunterCelery/common/plugins_util.py
python
modify_default_checkers
()
修改插件状态,根据配置文件自动降级 :return:
修改插件状态,根据配置文件自动降级 :return:
[ "修改插件状态,根据配置文件自动降级", ":", "return", ":" ]
def modify_default_checkers(): """ 修改插件状态,根据配置文件自动降级 :return: """ from common.plugin_config.base_plugin_config import DegradablePluginConfig checkers = load_default_checkers() for (k, v) in DegradablePluginConfig.instance().get_plugin_config(True).items(): if k in checkers: # 确认配置文件和本地插件是否一致 checkers[k].useable = v["useable"] logger.info('local found %d plugins, %d plugins useable' % (len(CHECKER_INSTANCE_DICT), len(get_useable_checkers())))
[ "def", "modify_default_checkers", "(", ")", ":", "from", "common", ".", "plugin_config", ".", "base_plugin_config", "import", "DegradablePluginConfig", "checkers", "=", "load_default_checkers", "(", ")", "for", "(", "k", ",", "v", ")", "in", "DegradablePluginConfig"...
https://github.com/ztosec/hunter/blob/4ee5cca8dc5fc5d7e631e935517bd0f493c30a37/HunterCelery/common/plugins_util.py#L163-L175
treeio/treeio
bae3115f4015aad2cbc5ab45572232ceec990495
treeio/finance/templatetags/finance.py
python
finance_liability_list
(context, liabilities, skip_group=False)
return Markup(render_to_string('finance/tags/liability_list', {'liabilities': liabilities, 'skip_group': skip_group}, context_instance=RequestContext(request), response_format=response_format))
Print a list of orders
Print a list of orders
[ "Print", "a", "list", "of", "orders" ]
def finance_liability_list(context, liabilities, skip_group=False): "Print a list of orders" request = context['request'] response_format = 'html' if 'response_format' in context: response_format = context['response_format'] return Markup(render_to_string('finance/tags/liability_list', {'liabilities': liabilities, 'skip_group': skip_group}, context_instance=RequestContext(request), response_format=response_format))
[ "def", "finance_liability_list", "(", "context", ",", "liabilities", ",", "skip_group", "=", "False", ")", ":", "request", "=", "context", "[", "'request'", "]", "response_format", "=", "'html'", "if", "'response_format'", "in", "context", ":", "response_format", ...
https://github.com/treeio/treeio/blob/bae3115f4015aad2cbc5ab45572232ceec990495/treeio/finance/templatetags/finance.py#L34-L46
VirtueSecurity/aws-extender
d123b7e1a845847709ba3a481f11996bddc68a1c
BappModules/boto/sdb/db/sequence.py
python
SequenceGenerator.__init__
(self, sequence_string, rollover=False)
Create a new SequenceGenerator using the sequence_string as how to generate the next item. :param sequence_string: The string or list that explains how to generate the next item in the sequence :type sequence_string: str,iterable :param rollover: Rollover instead of incrementing when we hit the end of the sequence :type rollover: bool
Create a new SequenceGenerator using the sequence_string as how to generate the next item.
[ "Create", "a", "new", "SequenceGenerator", "using", "the", "sequence_string", "as", "how", "to", "generate", "the", "next", "item", "." ]
def __init__(self, sequence_string, rollover=False): """Create a new SequenceGenerator using the sequence_string as how to generate the next item. :param sequence_string: The string or list that explains how to generate the next item in the sequence :type sequence_string: str,iterable :param rollover: Rollover instead of incrementing when we hit the end of the sequence :type rollover: bool """ self.sequence_string = sequence_string self.sequence_length = len(sequence_string[0]) self.rollover = rollover self.last_item = sequence_string[-1] self.__name__ = "%s('%s')" % (self.__class__.__name__, sequence_string)
[ "def", "__init__", "(", "self", ",", "sequence_string", ",", "rollover", "=", "False", ")", ":", "self", ".", "sequence_string", "=", "sequence_string", "self", ".", "sequence_length", "=", "len", "(", "sequence_string", "[", "0", "]", ")", "self", ".", "r...
https://github.com/VirtueSecurity/aws-extender/blob/d123b7e1a845847709ba3a481f11996bddc68a1c/BappModules/boto/sdb/db/sequence.py#L40-L56
thiagopena/djangoSIGE
e32186b27bfd8acf21b0fa400e699cb5c73e5433
djangosige/apps/cadastro/views/base.py
python
EditarPessoaView.get
(self, request, form, *args, **kwargs)
return self.render_to_response(self.get_context_data(form=form, pessoa_juridica_form=pessoa_juridica_form, pessoa_fisica_form=pessoa_fisica_form, endereco_form=endereco_form, banco_form=banco_form, documento_form=documento_form, formsets=formsets, logo_file=logo_file, veiculo_form=veiculo_form, object=self.object))
[]
def get(self, request, form, *args, **kwargs): if self.object.tipo_pessoa == 'PJ': pessoa_juridica_form = PessoaJuridicaForm( instance=self.object, prefix='pessoa_jur_form') pessoa_fisica_form = PessoaFisicaForm(prefix='pessoa_fis_form') else: pessoa_juridica_form = PessoaJuridicaForm(prefix='pessoa_jur_form') pessoa_fisica_form = PessoaFisicaForm( instance=self.object, prefix='pessoa_fis_form') endereco_form = EnderecoFormSet( instance=self.object, prefix='endereco_form') banco_form = BancoFormSet(instance=self.object, prefix='banco_form') documento_form = DocumentoFormSet( instance=self.object, prefix='documento_form') telefone_form = TelefoneFormSet( instance=self.object, prefix='telefone_form') email_form = EmailFormSet(instance=self.object, prefix='email_form') site_form = SiteFormSet(instance=self.object, prefix='site_form') if Telefone.objects.filter(pessoa_tel=self.object.pk).count(): telefone_form.extra = 0 if Endereco.objects.filter(pessoa_end=self.object.pk).count(): endereco_form.extra = 0 if Email.objects.filter(pessoa_email=self.object.pk).count(): email_form.extra = 0 if Site.objects.filter(pessoa_site=self.object.pk).count(): site_form.extra = 0 if Banco.objects.filter(pessoa_banco=self.object.pk).count(): banco_form.extra = 0 if Documento.objects.filter(pessoa_documento=self.object.pk).count(): documento_form.extra = 0 formsets = [telefone_form, email_form, site_form] # Caso Empresa logo_file = kwargs.pop('logo_file', None) # Caso Transportadora veiculo_form = kwargs.pop('veiculo_form', None) return self.render_to_response(self.get_context_data(form=form, pessoa_juridica_form=pessoa_juridica_form, pessoa_fisica_form=pessoa_fisica_form, endereco_form=endereco_form, banco_form=banco_form, documento_form=documento_form, formsets=formsets, logo_file=logo_file, veiculo_form=veiculo_form, object=self.object))
[ "def", "get", "(", "self", ",", "request", ",", "form", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "object", ".", "tipo_pessoa", "==", "'PJ'", ":", "pessoa_juridica_form", "=", "PessoaJuridicaForm", "(", "instance", "=", "s...
https://github.com/thiagopena/djangoSIGE/blob/e32186b27bfd8acf21b0fa400e699cb5c73e5433/djangosige/apps/cadastro/views/base.py#L159-L209
caiiiac/Machine-Learning-with-Python
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
MachineLearning/venv/lib/python3.5/site-packages/pandas/core/indexes/datetimes.py
python
DatetimeIndex.to_julian_date
(self)
return Float64Index(day + np.fix((153 * month - 457) / 5) + 365 * year + np.floor(year / 4) - np.floor(year / 100) + np.floor(year / 400) + 1721118.5 + (self.hour + self.minute / 60.0 + self.second / 3600.0 + self.microsecond / 3600.0 / 1e+6 + self.nanosecond / 3600.0 / 1e+9 ) / 24.0)
Convert DatetimeIndex to Float64Index of Julian Dates. 0 Julian date is noon January 1, 4713 BC. http://en.wikipedia.org/wiki/Julian_day
Convert DatetimeIndex to Float64Index of Julian Dates. 0 Julian date is noon January 1, 4713 BC. http://en.wikipedia.org/wiki/Julian_day
[ "Convert", "DatetimeIndex", "to", "Float64Index", "of", "Julian", "Dates", ".", "0", "Julian", "date", "is", "noon", "January", "1", "4713", "BC", ".", "http", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Julian_day" ]
def to_julian_date(self): """ Convert DatetimeIndex to Float64Index of Julian Dates. 0 Julian date is noon January 1, 4713 BC. http://en.wikipedia.org/wiki/Julian_day """ # http://mysite.verizon.net/aesir_research/date/jdalg2.htm year = np.asarray(self.year) month = np.asarray(self.month) day = np.asarray(self.day) testarr = month < 3 year[testarr] -= 1 month[testarr] += 12 return Float64Index(day + np.fix((153 * month - 457) / 5) + 365 * year + np.floor(year / 4) - np.floor(year / 100) + np.floor(year / 400) + 1721118.5 + (self.hour + self.minute / 60.0 + self.second / 3600.0 + self.microsecond / 3600.0 / 1e+6 + self.nanosecond / 3600.0 / 1e+9 ) / 24.0)
[ "def", "to_julian_date", "(", "self", ")", ":", "# http://mysite.verizon.net/aesir_research/date/jdalg2.htm", "year", "=", "np", ".", "asarray", "(", "self", ".", "year", ")", "month", "=", "np", ".", "asarray", "(", "self", ".", "month", ")", "day", "=", "n...
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/pandas/core/indexes/datetimes.py#L1926-L1952
jupyterhub/repo2docker
a37a205c0e8a59240933d20c1c4fb80767e71db2
repo2docker/contentproviders/hydroshare.py
python
Hydroshare.fetch
(self, spec, output_dir, yield_output=False, timeout=120)
Fetch and unpack a Hydroshare resource
Fetch and unpack a Hydroshare resource
[ "Fetch", "and", "unpack", "a", "Hydroshare", "resource" ]
def fetch(self, spec, output_dir, yield_output=False, timeout=120): """Fetch and unpack a Hydroshare resource""" resource_id = spec["resource"] host = spec["host"] bag_url = "{}{}".format(host["django_irods"], resource_id) yield "Downloading {}.\n".format(bag_url) # bag downloads are prepared on demand and may need some time conn = self.urlopen(bag_url) total_wait_time = 0 while ( conn.status_code == 200 and conn.headers["content-type"] != "application/zip" ): wait_time = 10 total_wait_time += wait_time if total_wait_time > timeout: msg = "Bag taking too long to prepare, exiting now, try again later." yield msg raise ContentProviderException(msg) yield "Bag is being prepared, requesting again in {} seconds.\n".format( wait_time ) time.sleep(wait_time) conn = self.urlopen(bag_url) if conn.status_code != 200: msg = "Failed to download bag. status code {}.\n".format(conn.status_code) yield msg raise ContentProviderException(msg) # Bag creation seems to need a small time buffer after it says it's ready. time.sleep(1) filehandle, _ = self._urlretrieve(bag_url) zip_file_object = zipfile.ZipFile(filehandle, "r") yield "Downloaded, unpacking contents.\n" zip_file_object.extractall("temp") # resources store the contents in the data/contents directory, which is all we want to keep contents_dir = os.path.join("temp", self.resource_id, "data", "contents") files = os.listdir(contents_dir) for f in files: shutil.move(os.path.join(contents_dir, f), output_dir) yield "Finished, cleaning up.\n" shutil.rmtree("temp")
[ "def", "fetch", "(", "self", ",", "spec", ",", "output_dir", ",", "yield_output", "=", "False", ",", "timeout", "=", "120", ")", ":", "resource_id", "=", "spec", "[", "\"resource\"", "]", "host", "=", "spec", "[", "\"host\"", "]", "bag_url", "=", "\"{}...
https://github.com/jupyterhub/repo2docker/blob/a37a205c0e8a59240933d20c1c4fb80767e71db2/repo2docker/contentproviders/hydroshare.py#L57-L100
golismero/golismero
7d605b937e241f51c1ca4f47b20f755eeefb9d76
golismero/managers/importmanager.py
python
ImportManager.close
(self)
Release all resources held by this manager.
Release all resources held by this manager.
[ "Release", "all", "resources", "held", "by", "this", "manager", "." ]
def close(self): """ Release all resources held by this manager. """ self.__config = None self.__orchestrator = None self.__plugins = None self.__importers = None
[ "def", "close", "(", "self", ")", ":", "self", ".", "__config", "=", "None", "self", ".", "__orchestrator", "=", "None", "self", ".", "__plugins", "=", "None", "self", ".", "__importers", "=", "None" ]
https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/golismero/managers/importmanager.py#L157-L164
home-assistant/supervisor
69c2517d5211b483fdfe968b0a2b36b672ee7ab2
supervisor/store/utils.py
python
get_hash_from_repository
(name: str)
return hashlib.sha1(key).hexdigest()[:8]
Generate a hash from repository.
Generate a hash from repository.
[ "Generate", "a", "hash", "from", "repository", "." ]
def get_hash_from_repository(name: str) -> str: """Generate a hash from repository.""" key = name.lower().encode() return hashlib.sha1(key).hexdigest()[:8]
[ "def", "get_hash_from_repository", "(", "name", ":", "str", ")", "->", "str", ":", "key", "=", "name", ".", "lower", "(", ")", ".", "encode", "(", ")", "return", "hashlib", ".", "sha1", "(", "key", ")", ".", "hexdigest", "(", ")", "[", ":", "8", ...
https://github.com/home-assistant/supervisor/blob/69c2517d5211b483fdfe968b0a2b36b672ee7ab2/supervisor/store/utils.py#L11-L14
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/tkinter/__init__.py
python
Entry.get
(self)
return self.tk.call(self._w, 'get')
Return the text.
Return the text.
[ "Return", "the", "text", "." ]
def get(self): """Return the text.""" return self.tk.call(self._w, 'get')
[ "def", "get", "(", "self", ")", ":", "return", "self", ".", "tk", ".", "call", "(", "self", ".", "_w", ",", "'get'", ")" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/tkinter/__init__.py#L3036-L3038
readthedocs/sphinx-autoapi
71c6ceebe0b02c34027fcd3d56c8641e9b94c7af
autoapi/mappers/python/astroid_utils.py
python
_is_property_decorator
(decorator)
return False
[]
def _is_property_decorator(decorator): def _is_property_class(class_node): return ( class_node.name == "property" and class_node.root().name == builtins.__name__ ) for inferred in decorator.infer(): if not isinstance(inferred, astroid.nodes.ClassDef): continue if _is_property_class(inferred): return True if any(_is_property_class(ancestor) for ancestor in inferred.ancestors()): return True return False
[ "def", "_is_property_decorator", "(", "decorator", ")", ":", "def", "_is_property_class", "(", "class_node", ")", ":", "return", "(", "class_node", ".", "name", "==", "\"property\"", "and", "class_node", ".", "root", "(", ")", ".", "name", "==", "builtins", ...
https://github.com/readthedocs/sphinx-autoapi/blob/71c6ceebe0b02c34027fcd3d56c8641e9b94c7af/autoapi/mappers/python/astroid_utils.py#L221-L238
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/site-packages/gevent/hub.py
python
Waiter.exc_info
(self)
Holds the exception info passed to :meth:`throw` if :meth:`throw` was called. Otherwise ``None``.
Holds the exception info passed to :meth:`throw` if :meth:`throw` was called. Otherwise ``None``.
[ "Holds", "the", "exception", "info", "passed", "to", ":", "meth", ":", "throw", "if", ":", "meth", ":", "throw", "was", "called", ".", "Otherwise", "None", "." ]
def exc_info(self): "Holds the exception info passed to :meth:`throw` if :meth:`throw` was called. Otherwise ``None``." if self._exception is not _NONE: return self._exception
[ "def", "exc_info", "(", "self", ")", ":", "if", "self", ".", "_exception", "is", "not", "_NONE", ":", "return", "self", ".", "_exception" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/gevent/hub.py#L522-L525
yuxiaokui/Intranet-Penetration
f57678a204840c83cbf3308e3470ae56c5ff514b
proxy/XX-Net/code/default/python27/1.0/lib/wsgiref/headers.py
python
Headers.get_all
(self, name)
return [kv[1] for kv in self._headers if kv[0].lower()==name]
Return a list of all the values for the named field. These will be sorted in the order they appeared in the original header list or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list. If no fields exist with the given name, returns an empty list.
Return a list of all the values for the named field.
[ "Return", "a", "list", "of", "all", "the", "values", "for", "the", "named", "field", "." ]
def get_all(self, name): """Return a list of all the values for the named field. These will be sorted in the order they appeared in the original header list or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list. If no fields exist with the given name, returns an empty list. """ name = name.lower() return [kv[1] for kv in self._headers if kv[0].lower()==name]
[ "def", "get_all", "(", "self", ",", "name", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "return", "[", "kv", "[", "1", "]", "for", "kv", "in", "self", ".", "_headers", "if", "kv", "[", "0", "]", ".", "lower", "(", ")", "==", "name"...
https://github.com/yuxiaokui/Intranet-Penetration/blob/f57678a204840c83cbf3308e3470ae56c5ff514b/proxy/XX-Net/code/default/python27/1.0/lib/wsgiref/headers.py#L74-L83
KvasirSecurity/Kvasir
a5b3775184a8343240e1154a1f762f75df04dc0a
modules/zenmapCore_Kvasir/ScriptMetadata.py
python
get_script_entries
(scripts_dir, nselib_dir)
return entries
Merge the information obtained so far into one single entry for each script and return it.
Merge the information obtained so far into one single entry for each script and return it.
[ "Merge", "the", "information", "obtained", "so", "far", "into", "one", "single", "entry", "for", "each", "script", "and", "return", "it", "." ]
def get_script_entries(scripts_dir, nselib_dir): """Merge the information obtained so far into one single entry for each script and return it.""" metadata = ScriptMetadata(scripts_dir, nselib_dir) try: scriptdb = ScriptDB(os.path.join(scripts_dir, "script.db")) except IOError: return [] entries = [] for dbentry in scriptdb.get_entries_list(): entry = metadata.get_metadata(dbentry["filename"]) # Categories is the only thing ScriptMetadata doesn't take care of. entry.categories = dbentry["categories"] entries.append(entry) return entries
[ "def", "get_script_entries", "(", "scripts_dir", ",", "nselib_dir", ")", ":", "metadata", "=", "ScriptMetadata", "(", "scripts_dir", ",", "nselib_dir", ")", "try", ":", "scriptdb", "=", "ScriptDB", "(", "os", ".", "path", ".", "join", "(", "scripts_dir", ","...
https://github.com/KvasirSecurity/Kvasir/blob/a5b3775184a8343240e1154a1f762f75df04dc0a/modules/zenmapCore_Kvasir/ScriptMetadata.py#L449-L463
youngsterxyf/mpdp-code
282e13bac4b6c98036961e7d920b741509e92ab6
chapter6/facade.py
python
ProcessServer.kill
(self, restart=True)
杀死进程服务进程要求的操作
杀死进程服务进程要求的操作
[ "杀死进程服务进程要求的操作" ]
def kill(self, restart=True): print('Killing {}'.format(self)) '''杀死进程服务进程要求的操作''' self.state = State.restart if restart else State.zombie
[ "def", "kill", "(", "self", ",", "restart", "=", "True", ")", ":", "print", "(", "'Killing {}'", ".", "format", "(", "self", ")", ")", "self", ".", "state", "=", "State", ".", "restart", "if", "restart", "else", "State", ".", "zombie" ]
https://github.com/youngsterxyf/mpdp-code/blob/282e13bac4b6c98036961e7d920b741509e92ab6/chapter6/facade.py#L74-L77
rembo10/headphones
b3199605be1ebc83a7a8feab6b1e99b64014187c
lib/mutagen/asf/_objects.py
python
BaseObject.parse
(self, asf, data)
[]
def parse(self, asf, data): self.data = data
[ "def", "parse", "(", "self", ",", "asf", ",", "data", ")", ":", "self", ".", "data", "=", "data" ]
https://github.com/rembo10/headphones/blob/b3199605be1ebc83a7a8feab6b1e99b64014187c/lib/mutagen/asf/_objects.py#L30-L31
p2pool/p2pool
53c438bbada06b9d4a9a465bc13f7694a7a322b7
p2pool/p2p.py
python
Node.start
(self)
[]
def start(self): if self.running: raise ValueError('already running') self.clientfactory.start() self.serverfactory.start() self.singleclientconnectors = [reactor.connectTCP(addr, port, SingleClientFactory(self)) for addr, port in self.connect_addrs] self.running = True self._stop_thinking = deferral.run_repeatedly(self._think)
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "running", ":", "raise", "ValueError", "(", "'already running'", ")", "self", ".", "clientfactory", ".", "start", "(", ")", "self", ".", "serverfactory", ".", "start", "(", ")", "self", ".", "si...
https://github.com/p2pool/p2pool/blob/53c438bbada06b9d4a9a465bc13f7694a7a322b7/p2pool/p2p.py#L632-L642
auth0/auth0-python
511b016ac9853c7f4ee66769be7ad315c5585735
auth0/v3/management/connections.py
python
Connections.update
(self, id, body)
return self.client.patch(self._url(id), data=body)
Modifies a connection. Args: id: Id of the connection. body (dict): Specifies which fields are to be modified, and to what values. See: https://auth0.com/docs/api/management/v2#!/Connections/patch_connections_by_id Returns: The modified connection object.
Modifies a connection.
[ "Modifies", "a", "connection", "." ]
def update(self, id, body): """Modifies a connection. Args: id: Id of the connection. body (dict): Specifies which fields are to be modified, and to what values. See: https://auth0.com/docs/api/management/v2#!/Connections/patch_connections_by_id Returns: The modified connection object. """ return self.client.patch(self._url(id), data=body)
[ "def", "update", "(", "self", ",", "id", ",", "body", ")", ":", "return", "self", ".", "client", ".", "patch", "(", "self", ".", "_url", "(", "id", ")", ",", "data", "=", "body", ")" ]
https://github.com/auth0/auth0-python/blob/511b016ac9853c7f4ee66769be7ad315c5585735/auth0/v3/management/connections.py#L114-L128
gdraheim/docker-systemctl-replacement
9cbe1a00eb4bdac6ff05b96ca34ec9ed3d8fc06c
files/docker/systemctl3.py
python
Systemctl.load_sysv_unit_conf
(self, module)
return conf
read the unit file with a UnitConfParser (sysv)
read the unit file with a UnitConfParser (sysv)
[ "read", "the", "unit", "file", "with", "a", "UnitConfParser", "(", "sysv", ")" ]
def load_sysv_unit_conf(self, module): # -> conf? """ read the unit file with a UnitConfParser (sysv) """ path = self.unit_sysv_file(module) if not path: return None assert self._loaded_file_sysv is not None if path in self._loaded_file_sysv: return self._loaded_file_sysv[path] data = UnitConfParser() data.read_sysv(path) conf = SystemctlConf(data, module) conf._root = self._root self._loaded_file_sysv[path] = conf return conf
[ "def", "load_sysv_unit_conf", "(", "self", ",", "module", ")", ":", "# -> conf?", "path", "=", "self", ".", "unit_sysv_file", "(", "module", ")", "if", "not", "path", ":", "return", "None", "assert", "self", ".", "_loaded_file_sysv", "is", "not", "None", "...
https://github.com/gdraheim/docker-systemctl-replacement/blob/9cbe1a00eb4bdac6ff05b96ca34ec9ed3d8fc06c/files/docker/systemctl3.py#L1442-L1454
rhinstaller/anaconda
63edc8680f1b05cbfe11bef28703acba808c5174
pyanaconda/ui/gui/hubs/summary.py
python
SummaryHub._on_info_bar_clicked
(self, *args)
Call the callback to show a detailed message.
Call the callback to show a detailed message.
[ "Call", "the", "callback", "to", "show", "a", "detailed", "message", "." ]
def _on_info_bar_clicked(self, *args): """Call the callback to show a detailed message.""" if self._show_details_callback: self._show_details_callback()
[ "def", "_on_info_bar_clicked", "(", "self", ",", "*", "args", ")", ":", "if", "self", ".", "_show_details_callback", ":", "self", ".", "_show_details_callback", "(", ")" ]
https://github.com/rhinstaller/anaconda/blob/63edc8680f1b05cbfe11bef28703acba808c5174/pyanaconda/ui/gui/hubs/summary.py#L82-L85
Stiivi/bubbles
b3c9332b8a9534655bd77821586e45a5086b1ddc
bubbles/objects.py
python
DataObject.records
(self)
Returns an iterator of records - dictionary-like objects that can be acessed by field names. Default implementation returns dictionaries, however other objects mith return their own structures. For example the SQL object returns the same iterator as for rows, as it can serve as key-value structure as well.
Returns an iterator of records - dictionary-like objects that can be acessed by field names. Default implementation returns dictionaries, however other objects mith return their own structures. For example the SQL object returns the same iterator as for rows, as it can serve as key-value structure as well.
[ "Returns", "an", "iterator", "of", "records", "-", "dictionary", "-", "like", "objects", "that", "can", "be", "acessed", "by", "field", "names", ".", "Default", "implementation", "returns", "dictionaries", "however", "other", "objects", "mith", "return", "their"...
def records(self): """Returns an iterator of records - dictionary-like objects that can be acessed by field names. Default implementation returns dictionaries, however other objects mith return their own structures. For example the SQL object returns the same iterator as for rows, as it can serve as key-value structure as well.""" names = [str(field) for field in fields] for row in self.rows(): yield dict(zip(names, row))
[ "def", "records", "(", "self", ")", ":", "names", "=", "[", "str", "(", "field", ")", "for", "field", "in", "fields", "]", "for", "row", "in", "self", ".", "rows", "(", ")", ":", "yield", "dict", "(", "zip", "(", "names", ",", "row", ")", ")" ]
https://github.com/Stiivi/bubbles/blob/b3c9332b8a9534655bd77821586e45a5086b1ddc/bubbles/objects.py#L147-L157
miki725/django-url-filter
1585da89acf9f182f8db2534ab8c72e89c5c92f2
url_filter/utils.py
python
LookupConfig.name
(self)
return next(iter(self.data.keys()))
If the ``data`` is nested :class:`.LookupConfig`, this gets its first lookup key.
If the ``data`` is nested :class:`.LookupConfig`, this gets its first lookup key.
[ "If", "the", "data", "is", "nested", ":", "class", ":", ".", "LookupConfig", "this", "gets", "its", "first", "lookup", "key", "." ]
def name(self): """ If the ``data`` is nested :class:`.LookupConfig`, this gets its first lookup key. """ return next(iter(self.data.keys()))
[ "def", "name", "(", "self", ")", ":", "return", "next", "(", "iter", "(", "self", ".", "data", ".", "keys", "(", ")", ")", ")" ]
https://github.com/miki725/django-url-filter/blob/1585da89acf9f182f8db2534ab8c72e89c5c92f2/url_filter/utils.py#L154-L159
zsdlove/Hades
f3d8c43a40ccd7a1bca2a855d8cccc110c34448a
cfg/instructions.py
python
InstructionsFinder.add
(self, blk)
如果块不存在并且至少有一条指令,请将它添加到块列表中
如果块不存在并且至少有一条指令,请将它添加到块列表中
[ "如果块不存在并且至少有一条指令,请将它添加到块列表中" ]
def add(self, blk): ''' 如果块不存在并且至少有一条指令,请将它添加到块列表中 ''' if (not (blk in self.ParsedResults)) and (len(blk.instructiones)>0): self.ParsedResults.append(blk)
[ "def", "add", "(", "self", ",", "blk", ")", ":", "if", "(", "not", "(", "blk", "in", "self", ".", "ParsedResults", ")", ")", "and", "(", "len", "(", "blk", ".", "instructiones", ")", ">", "0", ")", ":", "self", ".", "ParsedResults", ".", "append"...
https://github.com/zsdlove/Hades/blob/f3d8c43a40ccd7a1bca2a855d8cccc110c34448a/cfg/instructions.py#L143-L148
openstack/cinder
23494a6d6c51451688191e1847a458f1d3cdcaa5
cinder/volume/drivers/hitachi/hbsd_rest_iscsi.py
python
HBSDRESTISCSI._set_target_portal
(self, port)
return True, ipv4_addr, tcp_port
Get port info and store it in an instance variable.
Get port info and store it in an instance variable.
[ "Get", "port", "info", "and", "store", "it", "in", "an", "instance", "variable", "." ]
def _set_target_portal(self, port): """Get port info and store it in an instance variable.""" result = self.client.get_port(port) ipv4_addr = result.get('ipv4Address') tcp_port = result.get('tcpPort') if not ipv4_addr or not tcp_port: return False, ipv4_addr, tcp_port self.storage_info['portals'][port] = '%(ip)s:%(port)s' % { 'ip': ipv4_addr, 'port': tcp_port, } return True, ipv4_addr, tcp_port
[ "def", "_set_target_portal", "(", "self", ",", "port", ")", ":", "result", "=", "self", ".", "client", ".", "get_port", "(", "port", ")", "ipv4_addr", "=", "result", ".", "get", "(", "'ipv4Address'", ")", "tcp_port", "=", "result", ".", "get", "(", "'t...
https://github.com/openstack/cinder/blob/23494a6d6c51451688191e1847a458f1d3cdcaa5/cinder/volume/drivers/hitachi/hbsd_rest_iscsi.py#L32-L43
littlecodersh/itchatmp
6a8f01415d88874f179c9b50a063b2b338c309cf
itchatmp/views/crypto.py
python
encrypt_msg
(timestamp, nonce, signature, config, replyDict)
return construct_msg({ 'FromUserName': replyDict['FromUserName'], 'ToUserName': replyDict['ToUserName'], 'MsgType': ENCRYPT, 'Encrypt': text.decode('utf8'), 'MsgSignature': hashlib.sha1(s).hexdigest(), 'TimeStamp': timestamp, 'Nonce': nonce, }, )
encrypt msg for sending to wechat * use AES_CBC encryption * return a string ready for sending * as in construct_msg, string in replyDict should be unicode
encrypt msg for sending to wechat * use AES_CBC encryption * return a string ready for sending * as in construct_msg, string in replyDict should be unicode
[ "encrypt", "msg", "for", "sending", "to", "wechat", "*", "use", "AES_CBC", "encryption", "*", "return", "a", "string", "ready", "for", "sending", "*", "as", "in", "construct_msg", "string", "in", "replyDict", "should", "be", "unicode" ]
def encrypt_msg(timestamp, nonce, signature, config, replyDict): ''' encrypt msg for sending to wechat * use AES_CBC encryption * return a string ready for sending * as in construct_msg, string in replyDict should be unicode ''' text = construct_msg(replyDict).encode('utf8') text = os.urandom(16) + struct.pack('>I', len(text)) +\ text + config.appId.encode('utf8') paddingAmount = 32 - (len(text) % 32) text += chr(paddingAmount).encode('utf8') * paddingAmount text = aes_encode(config._encodingAesKey, text) # Encrypt generated s = [i.encode('utf8') for i in (timestamp, nonce, config.token)] s += [text]; s.sort(); s = b''.join(s) # Signature generated return construct_msg({ 'FromUserName': replyDict['FromUserName'], 'ToUserName': replyDict['ToUserName'], 'MsgType': ENCRYPT, 'Encrypt': text.decode('utf8'), 'MsgSignature': hashlib.sha1(s).hexdigest(), 'TimeStamp': timestamp, 'Nonce': nonce, }, )
[ "def", "encrypt_msg", "(", "timestamp", ",", "nonce", ",", "signature", ",", "config", ",", "replyDict", ")", ":", "text", "=", "construct_msg", "(", "replyDict", ")", ".", "encode", "(", "'utf8'", ")", "text", "=", "os", ".", "urandom", "(", "16", ")"...
https://github.com/littlecodersh/itchatmp/blob/6a8f01415d88874f179c9b50a063b2b338c309cf/itchatmp/views/crypto.py#L59-L83
maxjiang93/ugscnn
89cdd512e21a2d0cbb884e52ee75645c39ad6ed7
experiments/exp3_2d3ds/model.py
python
Up.__init__
(self, in_ch, out_ch, level, mesh_folder, bias=True)
use mesh_file for the mesh of one-level up
use mesh_file for the mesh of one-level up
[ "use", "mesh_file", "for", "the", "mesh", "of", "one", "-", "level", "up" ]
def __init__(self, in_ch, out_ch, level, mesh_folder, bias=True): """ use mesh_file for the mesh of one-level up """ super().__init__() mesh_file = os.path.join(mesh_folder, "icosphere_{}.pkl".format(level)) half_in = int(in_ch/2) self.up = MeshConv_transpose(half_in, half_in, mesh_file, stride=2) self.conv = ResBlock(in_ch, out_ch, out_ch, level, False, mesh_folder)
[ "def", "__init__", "(", "self", ",", "in_ch", ",", "out_ch", ",", "level", ",", "mesh_folder", ",", "bias", "=", "True", ")", ":", "super", "(", ")", ".", "__init__", "(", ")", "mesh_file", "=", "os", ".", "path", ".", "join", "(", "mesh_folder", "...
https://github.com/maxjiang93/ugscnn/blob/89cdd512e21a2d0cbb884e52ee75645c39ad6ed7/experiments/exp3_2d3ds/model.py#L9-L17
typeddjango/django-stubs
7701957e4f5fac437b6b027a40e5aaeb26b87a5a
mypy_django_plugin/main.py
python
NewSemanalDjangoPlugin._get_current_model_bases
(self)
[]
def _get_current_model_bases(self) -> Dict[str, int]: model_sym = self.lookup_fully_qualified(fullnames.MODEL_CLASS_FULLNAME) if model_sym is not None and isinstance(model_sym.node, TypeInfo): return helpers.get_django_metadata(model_sym.node).setdefault( "model_bases", {fullnames.MODEL_CLASS_FULLNAME: 1} ) else: return {}
[ "def", "_get_current_model_bases", "(", "self", ")", "->", "Dict", "[", "str", ",", "int", "]", ":", "model_sym", "=", "self", ".", "lookup_fully_qualified", "(", "fullnames", ".", "MODEL_CLASS_FULLNAME", ")", "if", "model_sym", "is", "not", "None", "and", "...
https://github.com/typeddjango/django-stubs/blob/7701957e4f5fac437b6b027a40e5aaeb26b87a5a/mypy_django_plugin/main.py#L167-L174
mesonbuild/meson
a22d0f9a0a787df70ce79b05d0c45de90a970048
mesonbuild/compilers/compilers.py
python
Compiler.get_exelist
(self)
return self.exelist.copy()
[]
def get_exelist(self) -> T.List[str]: return self.exelist.copy()
[ "def", "get_exelist", "(", "self", ")", "->", "T", ".", "List", "[", "str", "]", ":", "return", "self", ".", "exelist", ".", "copy", "(", ")" ]
https://github.com/mesonbuild/meson/blob/a22d0f9a0a787df70ce79b05d0c45de90a970048/mesonbuild/compilers/compilers.py#L577-L578
Azure/azure-cli
6c1b085a0910c6c2139006fcbd8ade44006eb6dd
src/azure-cli/azure/cli/command_modules/serviceconnector/_validators.py
python
get_missing_connection_name
(namespace)
return missing_args
Get connection_name arg if user didn't provide it in command line
Get connection_name arg if user didn't provide it in command line
[ "Get", "connection_name", "arg", "if", "user", "didn", "t", "provide", "it", "in", "command", "line" ]
def get_missing_connection_name(namespace): '''Get connection_name arg if user didn't provide it in command line ''' missing_args = dict() if getattr(namespace, 'connection_name', None) is None: missing_args['connection_name'] = { 'help': 'The connection name', 'options': ['--connection'] } return missing_args
[ "def", "get_missing_connection_name", "(", "namespace", ")", ":", "missing_args", "=", "dict", "(", ")", "if", "getattr", "(", "namespace", ",", "'connection_name'", ",", "None", ")", "is", "None", ":", "missing_args", "[", "'connection_name'", "]", "=", "{", ...
https://github.com/Azure/azure-cli/blob/6c1b085a0910c6c2139006fcbd8ade44006eb6dd/src/azure-cli/azure/cli/command_modules/serviceconnector/_validators.py#L406-L416
aiogram/aiogram
4d2d81138681d730270819579f22b3a0001c43a5
examples/webhook_example_old.py
python
cmd_id
(message: types.Message)
return SendMessage(message.chat.id, '\n'.join(result_msg), reply_to_message_id=message.message_id, parse_mode=ParseMode.HTML)
Return info about user.
Return info about user.
[ "Return", "info", "about", "user", "." ]
async def cmd_id(message: types.Message): """ Return info about user. """ if message.reply_to_message: target = message.reply_to_message.from_user chat = message.chat elif message.forward_from and message.chat.type == ChatType.PRIVATE: target = message.forward_from chat = message.forward_from or message.chat else: target = message.from_user chat = message.chat result_msg = [hbold('Info about user:'), f"First name: {target.first_name}"] if target.last_name: result_msg.append(f"Last name: {target.last_name}") if target.username: result_msg.append(f"Username: {target.mention}") result_msg.append(f"User ID: {target.id}") result_msg.extend([hbold('Chat:'), f"Type: {chat.type}", f"Chat ID: {chat.id}"]) if chat.type != ChatType.PRIVATE: result_msg.append(f"Title: {chat.title}") else: result_msg.append(f"Title: {chat.full_name}") return SendMessage(message.chat.id, '\n'.join(result_msg), reply_to_message_id=message.message_id, parse_mode=ParseMode.HTML)
[ "async", "def", "cmd_id", "(", "message", ":", "types", ".", "Message", ")", ":", "if", "message", ".", "reply_to_message", ":", "target", "=", "message", ".", "reply_to_message", ".", "from_user", "chat", "=", "message", ".", "chat", "elif", "message", "....
https://github.com/aiogram/aiogram/blob/4d2d81138681d730270819579f22b3a0001c43a5/examples/webhook_example_old.py#L82-L112
Blazemeter/taurus
6e36b20397cf3e730e181cfebde0c8f19eb31fed
bzt/modules/blazemeter/blazemeter_reporter.py
python
BlazeMeterUploader._start_online
(self)
return self.results_url
Start online test
Start online test
[ "Start", "online", "test" ]
def _start_online(self): """ Start online test """ self.log.info("Initiating data feeding...") if self._test['id']: self._session, self._master = self._test.start_external() else: self._session, self._master, self.results_url = self._test.start_anonymous_external_test() self._test['id'] = self._session['testId'] if self._test.token: self.results_url = self._master.address + '/app/#/masters/%s' % self._master['id'] if self.report_name: self._session.set({"name": str(self.report_name)}) return self.results_url
[ "def", "_start_online", "(", "self", ")", ":", "self", ".", "log", ".", "info", "(", "\"Initiating data feeding...\"", ")", "if", "self", ".", "_test", "[", "'id'", "]", ":", "self", ".", "_session", ",", "self", ".", "_master", "=", "self", ".", "_tes...
https://github.com/Blazemeter/taurus/blob/6e36b20397cf3e730e181cfebde0c8f19eb31fed/bzt/modules/blazemeter/blazemeter_reporter.py#L157-L175
Xyntax/DirBrute
84a54013f57a4588add9c2032c7c6c0902e6f504
libs/requests/adapters.py
python
HTTPAdapter.init_poolmanager
(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs)
Initializes a urllib3 PoolManager. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param connections: The number of urllib3 connection pools to cache. :param maxsize: The maximum number of connections to save in the pool. :param block: Block when no free connections are available. :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
Initializes a urllib3 PoolManager.
[ "Initializes", "a", "urllib3", "PoolManager", "." ]
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs): """Initializes a urllib3 PoolManager. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param connections: The number of urllib3 connection pools to cache. :param maxsize: The maximum number of connections to save in the pool. :param block: Block when no free connections are available. :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. """ # save these values for pickling self._pool_connections = connections self._pool_maxsize = maxsize self._pool_block = block self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, block=block, strict=True, **pool_kwargs)
[ "def", "init_poolmanager", "(", "self", ",", "connections", ",", "maxsize", ",", "block", "=", "DEFAULT_POOLBLOCK", ",", "*", "*", "pool_kwargs", ")", ":", "# save these values for pickling", "self", ".", "_pool_connections", "=", "connections", "self", ".", "_poo...
https://github.com/Xyntax/DirBrute/blob/84a54013f57a4588add9c2032c7c6c0902e6f504/libs/requests/adapters.py#L116-L134
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/site-packages/Werkzeug-0.8.3-py2.7.egg/werkzeug/datastructures.py
python
HeaderSet.clear
(self)
Clear the set.
Clear the set.
[ "Clear", "the", "set", "." ]
def clear(self): """Clear the set.""" self._set.clear() del self._headers[:] if self.on_update is not None: self.on_update(self)
[ "def", "clear", "(", "self", ")", ":", "self", ".", "_set", ".", "clear", "(", ")", "del", "self", ".", "_headers", "[", ":", "]", "if", "self", ".", "on_update", "is", "not", "None", ":", "self", ".", "on_update", "(", "self", ")" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/Werkzeug-0.8.3-py2.7.egg/werkzeug/datastructures.py#L1955-L1960
leo-editor/leo-editor
383d6776d135ef17d73d935a2f0ecb3ac0e99494
leo/plugins/settings_finder.py
python
init
()
return True
Return True if the plugin has loaded successfully.
Return True if the plugin has loaded successfully.
[ "Return", "True", "if", "the", "plugin", "has", "loaded", "successfully", "." ]
def init(): """Return True if the plugin has loaded successfully.""" g.registerHandler('after-create-leo-frame', onCreate) g.plugin_signon(__name__) return True
[ "def", "init", "(", ")", ":", "g", ".", "registerHandler", "(", "'after-create-leo-frame'", ",", "onCreate", ")", "g", ".", "plugin_signon", "(", "__name__", ")", "return", "True" ]
https://github.com/leo-editor/leo-editor/blob/383d6776d135ef17d73d935a2f0ecb3ac0e99494/leo/plugins/settings_finder.py#L16-L20
tracim/tracim
a0e9746fde5a4c45b4e0f0bfa2caf9522b8c4e21
backend/tracim_backend/views/core_api/call.py
python
CallController.update_incoming_call_state
( self, context, request: TracimRequest, hapic_data=None )
return call_lib.update_call_state(hapic_data.path["call_id"], hapic_data.body["state"])
Update the state of a call.
Update the state of a call.
[ "Update", "the", "state", "of", "a", "call", "." ]
def update_incoming_call_state( self, context, request: TracimRequest, hapic_data=None ) -> UserCall: """ Update the state of a call. """ call_lib = CallLib( session=request.dbsession, config=request.app_config, current_user=request.current_user ) return call_lib.update_call_state(hapic_data.path["call_id"], hapic_data.body["state"])
[ "def", "update_incoming_call_state", "(", "self", ",", "context", ",", "request", ":", "TracimRequest", ",", "hapic_data", "=", "None", ")", "->", "UserCall", ":", "call_lib", "=", "CallLib", "(", "session", "=", "request", ".", "dbsession", ",", "config", "...
https://github.com/tracim/tracim/blob/a0e9746fde5a4c45b4e0f0bfa2caf9522b8c4e21/backend/tracim_backend/views/core_api/call.py#L51-L60
DataDog/integrations-core
934674b29d94b70ccc008f76ea172d0cdae05e1e
tokumx/datadog_checks/tokumx/vendor/pymongo/monitoring.py
python
_EventListeners.enabled_for_server
(self)
return self.__enabled_for_server
Are any ServerListener instances registered?
Are any ServerListener instances registered?
[ "Are", "any", "ServerListener", "instances", "registered?" ]
def enabled_for_server(self): """Are any ServerListener instances registered?""" return self.__enabled_for_server
[ "def", "enabled_for_server", "(", "self", ")", ":", "return", "self", ".", "__enabled_for_server" ]
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/tokumx/datadog_checks/tokumx/vendor/pymongo/monitoring.py#L697-L699
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/modules/nspawn.py
python
remove
(name, stop=False)
return True
Remove the named container .. warning:: This function will remove all data associated with the container. It will not, however, remove the btrfs subvolumes created by pulling container images (:mod:`nspawn.pull_raw <salt.modules.nspawn.pull_raw>`, :mod:`nspawn.pull_tar <salt.modules.nspawn.pull_tar>`, :mod:`nspawn.pull_dkr <salt.modules.nspawn.pull_dkr>`). stop : False If ``True``, the container will be destroyed even if it is running/frozen. CLI Examples: .. code-block:: bash salt '*' nspawn.remove foo salt '*' nspawn.remove foo stop=True
Remove the named container
[ "Remove", "the", "named", "container" ]
def remove(name, stop=False): """ Remove the named container .. warning:: This function will remove all data associated with the container. It will not, however, remove the btrfs subvolumes created by pulling container images (:mod:`nspawn.pull_raw <salt.modules.nspawn.pull_raw>`, :mod:`nspawn.pull_tar <salt.modules.nspawn.pull_tar>`, :mod:`nspawn.pull_dkr <salt.modules.nspawn.pull_dkr>`). stop : False If ``True``, the container will be destroyed even if it is running/frozen. CLI Examples: .. code-block:: bash salt '*' nspawn.remove foo salt '*' nspawn.remove foo stop=True """ if not stop and state(name) != "stopped": raise CommandExecutionError("Container '{}' is not stopped".format(name)) def _failed_remove(name, exc): raise CommandExecutionError( "Unable to remove container '{}': {}".format(name, exc) ) if _sd_version() >= 219: ret = _machinectl("remove {}".format(name)) if ret["retcode"] != 0: __context__["retcode"] = salt.defaults.exitcodes.EX_UNAVAILABLE _failed_remove(name, ret["stderr"]) else: try: shutil.rmtree(os.path.join(_root(), name)) except OSError as exc: _failed_remove(name, exc) return True
[ "def", "remove", "(", "name", ",", "stop", "=", "False", ")", ":", "if", "not", "stop", "and", "state", "(", "name", ")", "!=", "\"stopped\"", ":", "raise", "CommandExecutionError", "(", "\"Container '{}' is not stopped\"", ".", "format", "(", "name", ")", ...
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/nspawn.py#L1235-L1277
nicolargo/glances
00c65933ae1d0ebd3e72dc30fc3c215a83dfaae2
glances/password.py
python
GlancesPassword.get_hash
(self, salt, plain_password)
return hashlib.sha256(salt.encode() + plain_password.encode()).hexdigest()
Return the hashed password, salt + SHA-256.
Return the hashed password, salt + SHA-256.
[ "Return", "the", "hashed", "password", "salt", "+", "SHA", "-", "256", "." ]
def get_hash(self, salt, plain_password): """Return the hashed password, salt + SHA-256.""" return hashlib.sha256(salt.encode() + plain_password.encode()).hexdigest()
[ "def", "get_hash", "(", "self", ",", "salt", ",", "plain_password", ")", ":", "return", "hashlib", ".", "sha256", "(", "salt", ".", "encode", "(", ")", "+", "plain_password", ".", "encode", "(", ")", ")", ".", "hexdigest", "(", ")" ]
https://github.com/nicolargo/glances/blob/00c65933ae1d0ebd3e72dc30fc3c215a83dfaae2/glances/password.py#L49-L51
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/django/db/backends/base/base.py
python
BaseDatabaseWrapper.schema_editor
(self, *args, **kwargs)
return self.SchemaEditorClass(self, *args, **kwargs)
Returns a new instance of this backend's SchemaEditor.
Returns a new instance of this backend's SchemaEditor.
[ "Returns", "a", "new", "instance", "of", "this", "backend", "s", "SchemaEditor", "." ]
def schema_editor(self, *args, **kwargs): """ Returns a new instance of this backend's SchemaEditor. """ if self.SchemaEditorClass is None: raise NotImplementedError( 'The SchemaEditorClass attribute of this database wrapper is still None') return self.SchemaEditorClass(self, *args, **kwargs)
[ "def", "schema_editor", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "SchemaEditorClass", "is", "None", ":", "raise", "NotImplementedError", "(", "'The SchemaEditorClass attribute of this database wrapper is still None'", ")", ...
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/django/db/backends/base/base.py#L598-L605
phaethon/kamene
bf679a65d456411942ee4a907818ba3d6a183bfe
kamene/arch/windows/__init__.py
python
NetworkInterfaceDict.pcap_name
(self, devname)
Return pcap device name for given Windows device name.
Return pcap device name for given Windows device name.
[ "Return", "pcap", "device", "name", "for", "given", "Windows", "device", "name", "." ]
def pcap_name(self, devname): """Return pcap device name for given Windows device name.""" try: pcap_name = self.data[devname].pcap_name except KeyError: raise ValueError("Unknown network interface %r" % devname) else: return pcap_name
[ "def", "pcap_name", "(", "self", ",", "devname", ")", ":", "try", ":", "pcap_name", "=", "self", ".", "data", "[", "devname", "]", ".", "pcap_name", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Unknown network interface %r\"", "%", "devname", ")...
https://github.com/phaethon/kamene/blob/bf679a65d456411942ee4a907818ba3d6a183bfe/kamene/arch/windows/__init__.py#L175-L183
F8LEFT/DecLLVM
d38e45e3d0dd35634adae1d0cf7f96f3bd96e74c
python/idc.py
python
GetFlags
(ea)
return idaapi.getFlags(ea)
Get internal flags @param ea: linear address @return: 32-bit value of internal flags. See start of IDC.IDC file for explanations.
Get internal flags
[ "Get", "internal", "flags" ]
def GetFlags(ea): """ Get internal flags @param ea: linear address @return: 32-bit value of internal flags. See start of IDC.IDC file for explanations. """ return idaapi.getFlags(ea)
[ "def", "GetFlags", "(", "ea", ")", ":", "return", "idaapi", ".", "getFlags", "(", "ea", ")" ]
https://github.com/F8LEFT/DecLLVM/blob/d38e45e3d0dd35634adae1d0cf7f96f3bd96e74c/python/idc.py#L1697-L1706
mesonbuild/meson
a22d0f9a0a787df70ce79b05d0c45de90a970048
mesonbuild/mesonlib/universal.py
python
do_define
(regex: T.Pattern[str], line: str, confdata: 'ConfigurationData', variable_format: str)
[]
def do_define(regex: T.Pattern[str], line: str, confdata: 'ConfigurationData', variable_format: str) -> str: def get_cmake_define(line: str, confdata: 'ConfigurationData') -> str: arr = line.split() define_value = [] for token in arr[2:]: try: (v, desc) = confdata.get(token) define_value += [str(v)] except KeyError: define_value += [token] return ' '.join(define_value) arr = line.split() if variable_format == 'meson' and len(arr) != 2: raise MesonException('#mesondefine does not contain exactly two tokens: %s' % line.strip()) varname = arr[1] try: (v, desc) = confdata.get(varname) except KeyError: return '/* #undef %s */\n' % varname if isinstance(v, bool): if v: return '#define %s\n' % varname else: return '#undef %s\n' % varname elif isinstance(v, int): return '#define %s %d\n' % (varname, v) elif isinstance(v, str): if variable_format == 'meson': result = v else: result = get_cmake_define(line, confdata) result = f'#define {varname} {result}\n' (result, missing_variable) = do_replacement(regex, result, variable_format, confdata) return result else: raise MesonException('#mesondefine argument "%s" is of unknown type.' % varname)
[ "def", "do_define", "(", "regex", ":", "T", ".", "Pattern", "[", "str", "]", ",", "line", ":", "str", ",", "confdata", ":", "'ConfigurationData'", ",", "variable_format", ":", "str", ")", "->", "str", ":", "def", "get_cmake_define", "(", "line", ":", "...
https://github.com/mesonbuild/meson/blob/a22d0f9a0a787df70ce79b05d0c45de90a970048/mesonbuild/mesonlib/universal.py#L1132-L1169
mozillazg/pypy
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
pypy/module/_pypyjson/interp_decoder.py
python
JSONMap.should_cache_strings
(self)
return not (self.decoded_strings > JSONDecoder.STRING_CACHE_EVALUATION_SIZE and self.cache_hits * JSONDecoder.STRING_CACHE_USEFULNESS_FACTOR < self.decoded_strings)
return whether strings parsed in the context of this map should be cached.
return whether strings parsed in the context of this map should be cached.
[ "return", "whether", "strings", "parsed", "in", "the", "context", "of", "this", "map", "should", "be", "cached", "." ]
def should_cache_strings(self): """ return whether strings parsed in the context of this map should be cached. """ # we should cache if either we've seen few strings so far (less than # STRING_CACHE_EVALUATION_SIZE), or if we've seen many, and the cache # hit rate has been high enough return not (self.decoded_strings > JSONDecoder.STRING_CACHE_EVALUATION_SIZE and self.cache_hits * JSONDecoder.STRING_CACHE_USEFULNESS_FACTOR < self.decoded_strings)
[ "def", "should_cache_strings", "(", "self", ")", ":", "# we should cache if either we've seen few strings so far (less than", "# STRING_CACHE_EVALUATION_SIZE), or if we've seen many, and the cache", "# hit rate has been high enough", "return", "not", "(", "self", ".", "decoded_strings", ...
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/pypy/module/_pypyjson/interp_decoder.py#L1095-L1102
fonttools/fonttools
892322aaff6a89bea5927379ec06bc0da3dfb7df
Lib/fontTools/ttLib/tables/otConverters.py
python
UInt8.read
(self, reader, font, tableDict)
return reader.readUInt8()
[]
def read(self, reader, font, tableDict): return reader.readUInt8()
[ "def", "read", "(", "self", ",", "reader", ",", "font", ",", "tableDict", ")", ":", "return", "reader", ".", "readUInt8", "(", ")" ]
https://github.com/fonttools/fonttools/blob/892322aaff6a89bea5927379ec06bc0da3dfb7df/Lib/fontTools/ttLib/tables/otConverters.py#L310-L311
cea-sec/miasm
09376c524aedc7920a7eda304d6095e12f6958f4
miasm/core/types.py
python
Ptr.__init__
(self, fmt, dst_type, *type_args, **type_kwargs)
@fmt: (str) Num compatible format that will be the Ptr representation in memory @dst_type: (MemType or Type) the Type this Ptr points to. If a Type is given, it is transformed into a MemType with TheType.lval. *type_args, **type_kwargs: arguments to pass to the the pointed MemType when instantiating it (e.g. for MemStr encoding or MemArray field_type).
[]
def __init__(self, fmt, dst_type, *type_args, **type_kwargs): """ @fmt: (str) Num compatible format that will be the Ptr representation in memory @dst_type: (MemType or Type) the Type this Ptr points to. If a Type is given, it is transformed into a MemType with TheType.lval. *type_args, **type_kwargs: arguments to pass to the the pointed MemType when instantiating it (e.g. for MemStr encoding or MemArray field_type). """ if (not isinstance(dst_type, Type) and not (isinstance(dst_type, type) and issubclass(dst_type, MemType)) and not dst_type == MemSelf): raise ValueError("dst_type of Ptr must be a MemType type, a " "Type instance, the MemSelf marker or a class " "name.") super(Ptr, self).__init__(fmt) if isinstance(dst_type, Type): # Patch the field to propagate the MemSelf replacement dst_type._get_self_type = lambda: self._get_self_type() # dst_type cannot be patched here, since _get_self_type of the outer # class has not yet been set. Patching dst_type involves calling # dst_type.lval, which will only return a type that does not point # on MemSelf but on the right class only when _get_self_type of the # outer class has been replaced by _MetaMemStruct. # In short, dst_type = dst_type.lval is not valid here, it is done # lazily in _fix_dst_type self._dst_type = dst_type self._type_args = type_args self._type_kwargs = type_kwargs
[ "def", "__init__", "(", "self", ",", "fmt", ",", "dst_type", ",", "*", "type_args", ",", "*", "*", "type_kwargs", ")", ":", "if", "(", "not", "isinstance", "(", "dst_type", ",", "Type", ")", "and", "not", "(", "isinstance", "(", "dst_type", ",", "typ...
https://github.com/cea-sec/miasm/blob/09376c524aedc7920a7eda304d6095e12f6958f4/miasm/core/types.py#L421-L452
pyg-team/pytorch_geometric
b920e9a3a64e22c8356be55301c88444ff051cae
torch_geometric/datasets/amazon_products.py
python
AmazonProducts.process
(self)
[]
def process(self): f = np.load(osp.join(self.raw_dir, 'adj_full.npz')) adj = sp.csr_matrix((f['data'], f['indices'], f['indptr']), f['shape']) adj = adj.tocoo() row = torch.from_numpy(adj.row).to(torch.long) col = torch.from_numpy(adj.col).to(torch.long) edge_index = torch.stack([row, col], dim=0) x = np.load(osp.join(self.raw_dir, 'feats.npy')) x = torch.from_numpy(x).to(torch.float) ys = [-1] * x.size(0) with open(osp.join(self.raw_dir, 'class_map.json')) as f: class_map = json.load(f) for key, item in class_map.items(): ys[int(key)] = item y = torch.tensor(ys) with open(osp.join(self.raw_dir, 'role.json')) as f: role = json.load(f) train_mask = torch.zeros(x.size(0), dtype=torch.bool) train_mask[torch.tensor(role['tr'])] = True val_mask = torch.zeros(x.size(0), dtype=torch.bool) val_mask[torch.tensor(role['va'])] = True test_mask = torch.zeros(x.size(0), dtype=torch.bool) test_mask[torch.tensor(role['te'])] = True data = Data(x=x, edge_index=edge_index, y=y, train_mask=train_mask, val_mask=val_mask, test_mask=test_mask) data = data if self.pre_transform is None else self.pre_transform(data) torch.save(self.collate([data]), self.processed_paths[0])
[ "def", "process", "(", "self", ")", ":", "f", "=", "np", ".", "load", "(", "osp", ".", "join", "(", "self", ".", "raw_dir", ",", "'adj_full.npz'", ")", ")", "adj", "=", "sp", ".", "csr_matrix", "(", "(", "f", "[", "'data'", "]", ",", "f", "[", ...
https://github.com/pyg-team/pytorch_geometric/blob/b920e9a3a64e22c8356be55301c88444ff051cae/torch_geometric/datasets/amazon_products.py#L76-L111
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/django/db/backends/base/base.py
python
BaseDatabaseWrapper.create_cursor
(self)
Creates a cursor. Assumes that a connection is established.
Creates a cursor. Assumes that a connection is established.
[ "Creates", "a", "cursor", ".", "Assumes", "that", "a", "connection", "is", "established", "." ]
def create_cursor(self): """Creates a cursor. Assumes that a connection is established.""" raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method')
[ "def", "create_cursor", "(", "self", ")", ":", "raise", "NotImplementedError", "(", "'subclasses of BaseDatabaseWrapper may require a create_cursor() method'", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/django/db/backends/base/base.py#L150-L152
luyishisi/tensorflow
448e72bfb64f826aff8672d74fd7e59c0112e924
4.Object_Detection/object_detection/core/keypoint_ops.py
python
change_coordinate_frame
(keypoints, window, scope=None)
Changes coordinate frame of the keypoints to be relative to window's frame. Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint coordinates from keypoints of shape [num_instances, num_keypoints, 2] to be relative to this window. An example use case is data augmentation: where we are given groundtruth keypoints and would like to randomly crop the image to some window. In this case we need to change the coordinate frame of each groundtruth keypoint to be relative to this new window. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window we should change the coordinate frame to. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
Changes coordinate frame of the keypoints to be relative to window's frame.
[ "Changes", "coordinate", "frame", "of", "the", "keypoints", "to", "be", "relative", "to", "window", "s", "frame", "." ]
def change_coordinate_frame(keypoints, window, scope=None): """Changes coordinate frame of the keypoints to be relative to window's frame. Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint coordinates from keypoints of shape [num_instances, num_keypoints, 2] to be relative to this window. An example use case is data augmentation: where we are given groundtruth keypoints and would like to randomly crop the image to some window. In this case we need to change the coordinate frame of each groundtruth keypoint to be relative to this new window. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window we should change the coordinate frame to. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ with tf.name_scope(scope, 'ChangeCoordinateFrame'): win_height = window[2] - window[0] win_width = window[3] - window[1] new_keypoints = scale(keypoints - [window[0], window[1]], 1.0 / win_height, 1.0 / win_width) return new_keypoints
[ "def", "change_coordinate_frame", "(", "keypoints", ",", "window", ",", "scope", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "scope", ",", "'ChangeCoordinateFrame'", ")", ":", "win_height", "=", "window", "[", "2", "]", "-", "window", "[",...
https://github.com/luyishisi/tensorflow/blob/448e72bfb64f826aff8672d74fd7e59c0112e924/4.Object_Detection/object_detection/core/keypoint_ops.py#L99-L125
wbond/package_control
cfaaeb57612023e3679ecb7f8cd7ceac9f57990d
package_control/deps/asn1crypto/util.py
python
extended_datetime.tzinfo
(self)
return self._y2k.tzinfo
:return: If object is timezone aware, a datetime.tzinfo object, else None.
:return: If object is timezone aware, a datetime.tzinfo object, else None.
[ ":", "return", ":", "If", "object", "is", "timezone", "aware", "a", "datetime", ".", "tzinfo", "object", "else", "None", "." ]
def tzinfo(self): """ :return: If object is timezone aware, a datetime.tzinfo object, else None. """ return self._y2k.tzinfo
[ "def", "tzinfo", "(", "self", ")", ":", "return", "self", ".", "_y2k", ".", "tzinfo" ]
https://github.com/wbond/package_control/blob/cfaaeb57612023e3679ecb7f8cd7ceac9f57990d/package_control/deps/asn1crypto/util.py#L601-L607
internetarchive/warcprox
c027659001f76518b89055cea0d0a2d754f456d1
warcprox/dedup.py
python
DedupDb.notify
(self, recorded_url, records)
[]
def notify(self, recorded_url, records): if (records and records[0].type == b'response' and self.should_dedup(recorded_url)): digest_key = warcprox.digest_str( recorded_url.payload_digest, self.options.base32) if recorded_url.warcprox_meta and "dedup-buckets" in recorded_url.warcprox_meta: for bucket, bucket_mode in recorded_url.warcprox_meta["dedup-buckets"].items(): if not bucket_mode == "ro": self.save( digest_key, records[0], bucket=bucket) else: self.save(digest_key, records[0])
[ "def", "notify", "(", "self", ",", "recorded_url", ",", "records", ")", ":", "if", "(", "records", "and", "records", "[", "0", "]", ".", "type", "==", "b'response'", "and", "self", ".", "should_dedup", "(", "recorded_url", ")", ")", ":", "digest_key", ...
https://github.com/internetarchive/warcprox/blob/c027659001f76518b89055cea0d0a2d754f456d1/warcprox/dedup.py#L154-L166
HymanLiuTS/flaskTs
286648286976e85d9b9a5873632331efcafe0b21
flasky/lib/python2.7/site-packages/pip/utils/__init__.py
python
dist_in_site_packages
(dist)
return normalize_path( dist_location(dist) ).startswith(normalize_path(site_packages))
Return True if given Distribution is installed in distutils.sysconfig.get_python_lib().
Return True if given Distribution is installed in distutils.sysconfig.get_python_lib().
[ "Return", "True", "if", "given", "Distribution", "is", "installed", "in", "distutils", ".", "sysconfig", ".", "get_python_lib", "()", "." ]
def dist_in_site_packages(dist): """ Return True if given Distribution is installed in distutils.sysconfig.get_python_lib(). """ return normalize_path( dist_location(dist) ).startswith(normalize_path(site_packages))
[ "def", "dist_in_site_packages", "(", "dist", ")", ":", "return", "normalize_path", "(", "dist_location", "(", "dist", ")", ")", ".", "startswith", "(", "normalize_path", "(", "site_packages", ")", ")" ]
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/pip/utils/__init__.py#L308-L315
Drakkar-Software/OctoBot
c80ed2270e5d085994213955c0f56b9e3b70b476
octobot/logger.py
python
init_evaluator_chan_logger
(matrix_id: str)
[]
async def init_evaluator_chan_logger(matrix_id: str): await evaluator_channels.get_chan(channels_name.OctoBotEvaluatorsChannelsName.MATRIX_CHANNEL.value, matrix_id).new_consumer( matrix_callback, priority_level=LOGGER_PRIORITY_LEVEL ) await evaluator_channels.get_chan(channels_name.OctoBotEvaluatorsChannelsName.EVALUATORS_CHANNEL.value, matrix_id).new_consumer( evaluators_callback, priority_level=LOGGER_PRIORITY_LEVEL )
[ "async", "def", "init_evaluator_chan_logger", "(", "matrix_id", ":", "str", ")", ":", "await", "evaluator_channels", ".", "get_chan", "(", "channels_name", ".", "OctoBotEvaluatorsChannelsName", ".", "MATRIX_CHANNEL", ".", "value", ",", "matrix_id", ")", ".", "new_co...
https://github.com/Drakkar-Software/OctoBot/blob/c80ed2270e5d085994213955c0f56b9e3b70b476/octobot/logger.py#L159-L167
openstack/openstacksdk
58384268487fa854f21c470b101641ab382c9897
openstack/network/v2/_proxy.py
python
Proxy.delete_conntrack_helper
(self, conntrack_helper, router, ignore_missing=True)
Delete a L3 conntrack_helper :param conntrack_helper: The value can be the ID of a L3 conntrack helper or a :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper`, instance. :param router: The value can be the ID of a Router or a :class:`~openstack.network.v2.router.Router` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the floating ip does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent ip. :returns: ``None``
Delete a L3 conntrack_helper
[ "Delete", "a", "L3", "conntrack_helper" ]
def delete_conntrack_helper(self, conntrack_helper, router, ignore_missing=True): """Delete a L3 conntrack_helper :param conntrack_helper: The value can be the ID of a L3 conntrack helper or a :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper`, instance. :param router: The value can be the ID of a Router or a :class:`~openstack.network.v2.router.Router` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the floating ip does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent ip. :returns: ``None`` """ router = self._get_resource(_router.Router, router) self._delete(_l3_conntrack_helper.ConntrackHelper, conntrack_helper, router_id=router.id, ignore_missing=ignore_missing)
[ "def", "delete_conntrack_helper", "(", "self", ",", "conntrack_helper", ",", "router", ",", "ignore_missing", "=", "True", ")", ":", "router", "=", "self", ".", "_get_resource", "(", "_router", ".", "Router", ",", "router", ")", "self", ".", "_delete", "(", ...
https://github.com/openstack/openstacksdk/blob/58384268487fa854f21c470b101641ab382c9897/openstack/network/v2/_proxy.py#L4579-L4600
wxWidgets/Phoenix
b2199e299a6ca6d866aa6f3d0888499136ead9d6
wx/lib/agw/ultimatelistctrl.py
python
UltimateListCtrl.SetItemState
(self, item, state, stateMask)
return True
Sets the item state flags for the input item. :param `item`: the index of the item; if defaulted to -1, the state flag will be set for all the items; :param `state`: any combination of the following bits: ============================ ========= ============================== State Bits Hex Value Description ============================ ========= ============================== ``ULC_STATE_DONTCARE`` 0x0 Don't care what the state is ``ULC_STATE_DROPHILITED`` 0x1 The item is highlighted to receive a drop event ``ULC_STATE_FOCUSED`` 0x2 The item has the focus ``ULC_STATE_SELECTED`` 0x4 The item is selected ``ULC_STATE_CUT`` 0x8 The item is in the cut state ``ULC_STATE_DISABLED`` 0x10 The item is disabled ``ULC_STATE_FILTERED`` 0x20 The item has been filtered ``ULC_STATE_INUSE`` 0x40 The item is in use ``ULC_STATE_PICKED`` 0x80 The item has been picked ``ULC_STATE_SOURCE`` 0x100 The item is a drag and drop source ============================ ========= ============================== :param `stateMask`: the bitmask for the state flag.
Sets the item state flags for the input item.
[ "Sets", "the", "item", "state", "flags", "for", "the", "input", "item", "." ]
def SetItemState(self, item, state, stateMask): """ Sets the item state flags for the input item. :param `item`: the index of the item; if defaulted to -1, the state flag will be set for all the items; :param `state`: any combination of the following bits: ============================ ========= ============================== State Bits Hex Value Description ============================ ========= ============================== ``ULC_STATE_DONTCARE`` 0x0 Don't care what the state is ``ULC_STATE_DROPHILITED`` 0x1 The item is highlighted to receive a drop event ``ULC_STATE_FOCUSED`` 0x2 The item has the focus ``ULC_STATE_SELECTED`` 0x4 The item is selected ``ULC_STATE_CUT`` 0x8 The item is in the cut state ``ULC_STATE_DISABLED`` 0x10 The item is disabled ``ULC_STATE_FILTERED`` 0x20 The item has been filtered ``ULC_STATE_INUSE`` 0x40 The item is in use ``ULC_STATE_PICKED`` 0x80 The item has been picked ``ULC_STATE_SOURCE`` 0x100 The item is a drag and drop source ============================ ========= ============================== :param `stateMask`: the bitmask for the state flag. """ self._mainWin.SetItemState(item, state, stateMask) return True
[ "def", "SetItemState", "(", "self", ",", "item", ",", "state", ",", "stateMask", ")", ":", "self", ".", "_mainWin", ".", "SetItemState", "(", "item", ",", "state", ",", "stateMask", ")", "return", "True" ]
https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/lib/agw/ultimatelistctrl.py#L11352-L11380
sethmlarson/virtualbox-python
984a6e2cb0e8996f4df40f4444c1528849f1c70d
virtualbox/library.py
python
IHostDrivePartition.end_cylinder
(self)
return ret
Get int value for 'endCylinder' The cylinder (0..1023) of the last sector (inclusive) in the partition on an MBR disk, zero for not an MBR disk.
Get int value for 'endCylinder' The cylinder (0..1023) of the last sector (inclusive) in the partition on an MBR disk, zero for not an MBR disk.
[ "Get", "int", "value", "for", "endCylinder", "The", "cylinder", "(", "0", "..", "1023", ")", "of", "the", "last", "sector", "(", "inclusive", ")", "in", "the", "partition", "on", "an", "MBR", "disk", "zero", "for", "not", "an", "MBR", "disk", "." ]
def end_cylinder(self): """Get int value for 'endCylinder' The cylinder (0..1023) of the last sector (inclusive) in the partition on an MBR disk, zero for not an MBR disk. """ ret = self._get_attr("endCylinder") return ret
[ "def", "end_cylinder", "(", "self", ")", ":", "ret", "=", "self", ".", "_get_attr", "(", "\"endCylinder\"", ")", "return", "ret" ]
https://github.com/sethmlarson/virtualbox-python/blob/984a6e2cb0e8996f4df40f4444c1528849f1c70d/virtualbox/library.py#L19310-L19315
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
examples/research_projects/longform-qa/eli5_app.py
python
load_indexes
()
return (wiki40b_passages, wiki40b_gpu_index_flat, es_client)
[]
def load_indexes(): if LOAD_DENSE_INDEX: faiss_res = faiss.StandardGpuResources() wiki40b_passages = datasets.load_dataset(path="wiki_snippets", name="wiki40b_en_100_0")["train"] wiki40b_passage_reps = np.memmap( "wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat", dtype="float32", mode="r", shape=(wiki40b_passages.num_rows, 128), ) wiki40b_index_flat = faiss.IndexFlatIP(128) wiki40b_gpu_index_flat = faiss.index_cpu_to_gpu(faiss_res, 1, wiki40b_index_flat) wiki40b_gpu_index_flat.add(wiki40b_passage_reps) # TODO fix for larger GPU else: wiki40b_passages, wiki40b_gpu_index_flat = (None, None) es_client = Elasticsearch([{"host": "localhost", "port": "9200"}]) return (wiki40b_passages, wiki40b_gpu_index_flat, es_client)
[ "def", "load_indexes", "(", ")", ":", "if", "LOAD_DENSE_INDEX", ":", "faiss_res", "=", "faiss", ".", "StandardGpuResources", "(", ")", "wiki40b_passages", "=", "datasets", ".", "load_dataset", "(", "path", "=", "\"wiki_snippets\"", ",", "name", "=", "\"wiki40b_e...
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/examples/research_projects/longform-qa/eli5_app.py#L45-L61
krintoxi/NoobSec-Toolkit
38738541cbc03cedb9a3b3ed13b629f781ad64f6
NoobSecToolkit /scripts/sshbackdoors/rpyc/utils/teleportation.py
python
decode_codeobj
(codeobj)
[]
def decode_codeobj(codeobj): # adapted from dis.dis extended_arg = 0 if is_py3k: codestr = codeobj.co_code else: codestr = [ord(ch) for ch in codeobj.co_code] free = None i = 0 while i < len(codestr): op = codestr[i] opname = opcode.opname[op] i += 1 argval = None if op >= opcode.HAVE_ARGUMENT: oparg = codestr[i] + codestr[i + 1] * 256 + extended_arg i += 2 extended_arg = 0 if op == opcode.EXTENDED_ARG: extended_arg = oparg * 65536 continue if op in opcode.hasconst: argval = codeobj.co_consts[oparg] elif op in opcode.hasname: argval = codeobj.co_names[oparg] elif op in opcode.hasjrel: argval = i + oparg elif op in opcode.haslocal: argval = codeobj.co_varnames[oparg] elif op in opcode.hascompare: argval = opcode.cmp_op[oparg] elif op in opcode.hasfree: if free is None: free = codeobj.co_cellvars + codeobj.co_freevars argval = free[oparg] yield (opname, argval)
[ "def", "decode_codeobj", "(", "codeobj", ")", ":", "# adapted from dis.dis", "extended_arg", "=", "0", "if", "is_py3k", ":", "codestr", "=", "codeobj", ".", "co_code", "else", ":", "codestr", "=", "[", "ord", "(", "ch", ")", "for", "ch", "in", "codeobj", ...
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit /scripts/sshbackdoors/rpyc/utils/teleportation.py#L13-L50
leancloud/satori
701caccbd4fe45765001ca60435c0cb499477c03
satori-rules/plugin/libs/requests/cookies.py
python
RequestsCookieJar.keys
(self)
return list(self.iterkeys())
Dict-like keys() that returns a list of names of cookies from the jar. See values() and items().
Dict-like keys() that returns a list of names of cookies from the jar. See values() and items().
[ "Dict", "-", "like", "keys", "()", "that", "returns", "a", "list", "of", "names", "of", "cookies", "from", "the", "jar", ".", "See", "values", "()", "and", "items", "()", "." ]
def keys(self): """Dict-like keys() that returns a list of names of cookies from the jar. See values() and items().""" return list(self.iterkeys())
[ "def", "keys", "(", "self", ")", ":", "return", "list", "(", "self", ".", "iterkeys", "(", ")", ")" ]
https://github.com/leancloud/satori/blob/701caccbd4fe45765001ca60435c0cb499477c03/satori-rules/plugin/libs/requests/cookies.py#L214-L217
confluentinc/confluent-kafka-python
2ac0d72b24b14e5246445ad9ce66ec9c8828ef4e
examples/asyncio_example.py
python
Producer.produce
(self, topic, value, on_delivery=None)
[]
def produce(self, topic, value, on_delivery=None): self._producer.produce(topic, value, on_delivery=on_delivery)
[ "def", "produce", "(", "self", ",", "topic", ",", "value", ",", "on_delivery", "=", "None", ")", ":", "self", ".", "_producer", ".", "produce", "(", "topic", ",", "value", ",", "on_delivery", "=", "on_delivery", ")" ]
https://github.com/confluentinc/confluent-kafka-python/blob/2ac0d72b24b14e5246445ad9ce66ec9c8828ef4e/examples/asyncio_example.py#L103-L104
runawayhorse001/LearningApacheSpark
67f3879dce17553195f094f5728b94a01badcf24
pyspark/mllib/util.py
python
MLUtils.loadLibSVMFile
(sc, path, numFeatures=-1, minPartitions=None, multiclass=None)
return parsed.map(lambda x: LabeledPoint(x[0], Vectors.sparse(numFeatures, x[1], x[2])))
Loads labeled data in the LIBSVM format into an RDD of LabeledPoint. The LIBSVM format is a text-based format used by LIBSVM and LIBLINEAR. Each line represents a labeled sparse feature vector using the following format: label index1:value1 index2:value2 ... where the indices are one-based and in ascending order. This method parses each line into a LabeledPoint, where the feature indices are converted to zero-based. :param sc: Spark context :param path: file or directory path in any Hadoop-supported file system URI :param numFeatures: number of features, which will be determined from the input data if a nonpositive value is given. This is useful when the dataset is already split into multiple files and you want to load them separately, because some features may not present in certain files, which leads to inconsistent feature dimensions. :param minPartitions: min number of partitions @return: labeled data stored as an RDD of LabeledPoint >>> from tempfile import NamedTemporaryFile >>> from pyspark.mllib.util import MLUtils >>> from pyspark.mllib.regression import LabeledPoint >>> tempFile = NamedTemporaryFile(delete=True) >>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0") >>> tempFile.flush() >>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect() >>> tempFile.close() >>> examples[0] LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0])) >>> examples[1] LabeledPoint(-1.0, (6,[],[])) >>> examples[2] LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0]))
Loads labeled data in the LIBSVM format into an RDD of LabeledPoint. The LIBSVM format is a text-based format used by LIBSVM and LIBLINEAR. Each line represents a labeled sparse feature vector using the following format:
[ "Loads", "labeled", "data", "in", "the", "LIBSVM", "format", "into", "an", "RDD", "of", "LabeledPoint", ".", "The", "LIBSVM", "format", "is", "a", "text", "-", "based", "format", "used", "by", "LIBSVM", "and", "LIBLINEAR", ".", "Each", "line", "represents"...
def loadLibSVMFile(sc, path, numFeatures=-1, minPartitions=None, multiclass=None): """ Loads labeled data in the LIBSVM format into an RDD of LabeledPoint. The LIBSVM format is a text-based format used by LIBSVM and LIBLINEAR. Each line represents a labeled sparse feature vector using the following format: label index1:value1 index2:value2 ... where the indices are one-based and in ascending order. This method parses each line into a LabeledPoint, where the feature indices are converted to zero-based. :param sc: Spark context :param path: file or directory path in any Hadoop-supported file system URI :param numFeatures: number of features, which will be determined from the input data if a nonpositive value is given. This is useful when the dataset is already split into multiple files and you want to load them separately, because some features may not present in certain files, which leads to inconsistent feature dimensions. :param minPartitions: min number of partitions @return: labeled data stored as an RDD of LabeledPoint >>> from tempfile import NamedTemporaryFile >>> from pyspark.mllib.util import MLUtils >>> from pyspark.mllib.regression import LabeledPoint >>> tempFile = NamedTemporaryFile(delete=True) >>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0") >>> tempFile.flush() >>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect() >>> tempFile.close() >>> examples[0] LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0])) >>> examples[1] LabeledPoint(-1.0, (6,[],[])) >>> examples[2] LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0])) """ from pyspark.mllib.regression import LabeledPoint if multiclass is not None: warnings.warn("deprecated", DeprecationWarning) lines = sc.textFile(path, minPartitions) parsed = lines.map(lambda l: MLUtils._parse_libsvm_line(l)) if numFeatures <= 0: parsed.cache() numFeatures = parsed.map(lambda x: -1 if x[1].size == 0 else x[1][-1]).reduce(max) + 1 return parsed.map(lambda x: LabeledPoint(x[0], Vectors.sparse(numFeatures, x[1], x[2])))
[ "def", "loadLibSVMFile", "(", "sc", ",", "path", ",", "numFeatures", "=", "-", "1", ",", "minPartitions", "=", "None", ",", "multiclass", "=", "None", ")", ":", "from", "pyspark", ".", "mllib", ".", "regression", "import", "LabeledPoint", "if", "multiclass...
https://github.com/runawayhorse001/LearningApacheSpark/blob/67f3879dce17553195f094f5728b94a01badcf24/pyspark/mllib/util.py#L76-L127
silverapp/silver
a59dbc7216733ab49dca2fae525d229bdba04420
silver/currencies.py
python
DummyConverter.convert
(self, amount, from_currency, to_currency, date)
return amount
[]
def convert(self, amount, from_currency, to_currency, date): if from_currency != to_currency: raise RateNotFound(from_currency, to_currency, date) return amount
[ "def", "convert", "(", "self", ",", "amount", ",", "from_currency", ",", "to_currency", ",", "date", ")", ":", "if", "from_currency", "!=", "to_currency", ":", "raise", "RateNotFound", "(", "from_currency", ",", "to_currency", ",", "date", ")", "return", "am...
https://github.com/silverapp/silver/blob/a59dbc7216733ab49dca2fae525d229bdba04420/silver/currencies.py#L41-L44
maozezhong/TIANCHI_XUELANG_AI
cfec006032e264cc77a56776b0ed237e6d07fe48
code/main.py
python
mycrossentropy
(e = 0.1,nb_classes=2)
return mycrossentropy_fixed
https://spaces.ac.cn/archives/4493
https://spaces.ac.cn/archives/4493
[ "https", ":", "//", "spaces", ".", "ac", ".", "cn", "/", "archives", "/", "4493" ]
def mycrossentropy(e = 0.1,nb_classes=2): ''' https://spaces.ac.cn/archives/4493 ''' def mycrossentropy_fixed(y_true, y_pred): return (1-e)*K.categorical_crossentropy(y_true,y_pred) + e*K.categorical_crossentropy(K.ones_like(y_pred)/nb_classes, y_pred) return mycrossentropy_fixed
[ "def", "mycrossentropy", "(", "e", "=", "0.1", ",", "nb_classes", "=", "2", ")", ":", "def", "mycrossentropy_fixed", "(", "y_true", ",", "y_pred", ")", ":", "return", "(", "1", "-", "e", ")", "*", "K", ".", "categorical_crossentropy", "(", "y_true", ",...
https://github.com/maozezhong/TIANCHI_XUELANG_AI/blob/cfec006032e264cc77a56776b0ed237e6d07fe48/code/main.py#L74-L80
dmlc/dgl
8d14a739bc9e446d6c92ef83eafe5782398118de
python/dgl/backend/backend.py
python
ndim
(input)
Return the number of dimensions of the tensor. Parameters ---------- input : Tensor The input tensor. Returns ------- int The number of dimensions
Return the number of dimensions of the tensor.
[ "Return", "the", "number", "of", "dimensions", "of", "the", "tensor", "." ]
def ndim(input): """Return the number of dimensions of the tensor. Parameters ---------- input : Tensor The input tensor. Returns ------- int The number of dimensions """ pass
[ "def", "ndim", "(", "input", ")", ":", "pass" ]
https://github.com/dmlc/dgl/blob/8d14a739bc9e446d6c92ef83eafe5782398118de/python/dgl/backend/backend.py#L206-L219
naftaliharris/tauthon
5587ceec329b75f7caf6d65a036db61ac1bae214
Lib/lib2to3/pgen2/tokenize.py
python
untokenize
(iterable)
return ut.untokenize(iterable)
Transform tokens back into Python source code. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited intput: # Output text will tokenize the back to the input t1 = [tok[:2] for tok in generate_tokens(f.readline)] newcode = untokenize(t1) readline = iter(newcode.splitlines(1)).next t2 = [tok[:2] for tokin generate_tokens(readline)] assert t1 == t2
Transform tokens back into Python source code.
[ "Transform", "tokens", "back", "into", "Python", "source", "code", "." ]
def untokenize(iterable): """Transform tokens back into Python source code. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited intput: # Output text will tokenize the back to the input t1 = [tok[:2] for tok in generate_tokens(f.readline)] newcode = untokenize(t1) readline = iter(newcode.splitlines(1)).next t2 = [tok[:2] for tokin generate_tokens(readline)] assert t1 == t2 """ ut = Untokenizer() return ut.untokenize(iterable)
[ "def", "untokenize", "(", "iterable", ")", ":", "ut", "=", "Untokenizer", "(", ")", "return", "ut", ".", "untokenize", "(", "iterable", ")" ]
https://github.com/naftaliharris/tauthon/blob/5587ceec329b75f7caf6d65a036db61ac1bae214/Lib/lib2to3/pgen2/tokenize.py#L326-L345
TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials
5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e
deep-learning/Deep-Reinforcement-Learning-Complete-Collection/DeepRL-Code/chapter10/mountain_car.py
python
tiles
(iht_or_size, num_tilings, floats, ints=None, read_only=False)
return tiles
returns num-tilings tile indices corresponding to the floats and ints
returns num-tilings tile indices corresponding to the floats and ints
[ "returns", "num", "-", "tilings", "tile", "indices", "corresponding", "to", "the", "floats", "and", "ints" ]
def tiles(iht_or_size, num_tilings, floats, ints=None, read_only=False): """returns num-tilings tile indices corresponding to the floats and ints""" if ints is None: ints = [] qfloats = [floor(f * num_tilings) for f in floats] tiles = [] for tiling in range(num_tilings): tilingX2 = tiling * 2 coords = [tiling] b = tiling for q in qfloats: coords.append((q + b) // num_tilings) b += tilingX2 coords.extend(ints) tiles.append(hash_coords(coords, iht_or_size, read_only)) return tiles
[ "def", "tiles", "(", "iht_or_size", ",", "num_tilings", ",", "floats", ",", "ints", "=", "None", ",", "read_only", "=", "False", ")", ":", "if", "ints", "is", "None", ":", "ints", "=", "[", "]", "qfloats", "=", "[", "floor", "(", "f", "*", "num_til...
https://github.com/TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials/blob/5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e/deep-learning/Deep-Reinforcement-Learning-Complete-Collection/DeepRL-Code/chapter10/mountain_car.py#L58-L73
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/apps/settings/views.py
python
EnableMobilePrivilegesView.dispatch
(self, request, *args, **kwargs)
return not_found(request)
[]
def dispatch(self, request, *args, **kwargs): # raises a 404 if a user tries to access this page without the right authorizations if self.is_user_authorized(request.couch_user): return super(BaseMyAccountView, self).dispatch(request, *args, **kwargs) return not_found(request)
[ "def", "dispatch", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# raises a 404 if a user tries to access this page without the right authorizations", "if", "self", ".", "is_user_authorized", "(", "request", ".", "couch_user", ")",...
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/settings/views.py#L561-L565
python-hyper/h2
53feb0e0d8ddd28fa2dbd534d519a8eefd441f14
examples/curio/curio-server.py
python
H2Server.send_file
(self, file_path, stream_id)
Send a file, obeying the rules of HTTP/2 flow control.
Send a file, obeying the rules of HTTP/2 flow control.
[ "Send", "a", "file", "obeying", "the", "rules", "of", "HTTP", "/", "2", "flow", "control", "." ]
async def send_file(self, file_path, stream_id): """ Send a file, obeying the rules of HTTP/2 flow control. """ filesize = os.stat(file_path).st_size content_type, content_encoding = mimetypes.guess_type(file_path) response_headers = [ (':status', '200'), ('content-length', str(filesize)), ('server', 'curio-h2'), ] if content_type: response_headers.append(('content-type', content_type)) if content_encoding: response_headers.append(('content-encoding', content_encoding)) self.conn.send_headers(stream_id, response_headers) await self.sock.sendall(self.conn.data_to_send()) with open(file_path, 'rb', buffering=0) as f: await self._send_file_data(f, stream_id)
[ "async", "def", "send_file", "(", "self", ",", "file_path", ",", "stream_id", ")", ":", "filesize", "=", "os", ".", "stat", "(", "file_path", ")", ".", "st_size", "content_type", ",", "content_encoding", "=", "mimetypes", ".", "guess_type", "(", "file_path",...
https://github.com/python-hyper/h2/blob/53feb0e0d8ddd28fa2dbd534d519a8eefd441f14/examples/curio/curio-server.py#L126-L146
google/pytype
fa43edc95dd42ade6e3147d6580d63e778c9d506
pytype/tools/merge_pyi/merge_pyi.py
python
Pyi._get_imports
(self, inserted_types)
Get the imports that provide the given types.
Get the imports that provide the given types.
[ "Get", "the", "imports", "that", "provide", "the", "given", "types", "." ]
def _get_imports(self, inserted_types): """Get the imports that provide the given types.""" used_names = set() for node in inserted_types + self.assignments: for leaf in node.leaves(): if leaf.type == token.NAME: used_names.add(leaf.value) # All prefixes are possible imports. while '.' in leaf.value: value, _ = leaf.rsplit('.', 1) used_names.add(value) for (pkg, pkg_alias), names in self.imports: if not names: if (pkg_alias or pkg) in used_names: yield ((pkg, pkg_alias), names) else: names = [(name, alias) for name, alias in names if name == '*' or (alias or name) in used_names] if names: yield ((pkg, pkg_alias), names)
[ "def", "_get_imports", "(", "self", ",", "inserted_types", ")", ":", "used_names", "=", "set", "(", ")", "for", "node", "in", "inserted_types", "+", "self", ".", "assignments", ":", "for", "leaf", "in", "node", ".", "leaves", "(", ")", ":", "if", "leaf...
https://github.com/google/pytype/blob/fa43edc95dd42ade6e3147d6580d63e778c9d506/pytype/tools/merge_pyi/merge_pyi.py#L662-L681
geaxgx/tello-openpose
a76c5e2dcd7d444207e79962e93fc1b4956442aa
CameraMorse.py
python
RollingGraph.__init__
(self, window_name="Graph", width=640, height=250, step_width=5, y_min=0, y_max=255, colors=[(0,0,255)], thickness=[2], threshold=None, waitKey=True)
width, height: width and height in pixels of the OpenCv window in which the graph is draw step_width: width in pixels on the x-axis between each 2 consecutive points y_min, y_max : min and max of the variables colors : array of the colors used to draw the variables thickness: array of the thickness of the variable curves waitKey : boolean. In OpenCv, to display a window, we must call cv2.waitKey(). This call can be done by RollingGraph (if True) or by the program who calls RollingGraph (if False)
width, height: width and height in pixels of the OpenCv window in which the graph is draw step_width: width in pixels on the x-axis between each 2 consecutive points y_min, y_max : min and max of the variables colors : array of the colors used to draw the variables thickness: array of the thickness of the variable curves waitKey : boolean. In OpenCv, to display a window, we must call cv2.waitKey(). This call can be done by RollingGraph (if True) or by the program who calls RollingGraph (if False)
[ "width", "height", ":", "width", "and", "height", "in", "pixels", "of", "the", "OpenCv", "window", "in", "which", "the", "graph", "is", "draw", "step_width", ":", "width", "in", "pixels", "on", "the", "x", "-", "axis", "between", "each", "2", "consecutiv...
def __init__(self, window_name="Graph", width=640, height=250, step_width=5, y_min=0, y_max=255, colors=[(0,0,255)], thickness=[2], threshold=None, waitKey=True): """ width, height: width and height in pixels of the OpenCv window in which the graph is draw step_width: width in pixels on the x-axis between each 2 consecutive points y_min, y_max : min and max of the variables colors : array of the colors used to draw the variables thickness: array of the thickness of the variable curves waitKey : boolean. In OpenCv, to display a window, we must call cv2.waitKey(). This call can be done by RollingGraph (if True) or by the program who calls RollingGraph (if False) """ self.window_name = window_name self.width = width self.height = height self.step_width = step_width self.y_min = y_min self.y_max = y_max self.waitKey = waitKey assert len(colors) == len(thickness) self.colors = colors self.thickness = thickness self.iter = 0 self.canvas = np.zeros((height,width,3),dtype=np.uint8) self.nb_values = len(colors) self.threshold = threshold
[ "def", "__init__", "(", "self", ",", "window_name", "=", "\"Graph\"", ",", "width", "=", "640", ",", "height", "=", "250", ",", "step_width", "=", "5", ",", "y_min", "=", "0", ",", "y_max", "=", "255", ",", "colors", "=", "[", "(", "0", ",", "0",...
https://github.com/geaxgx/tello-openpose/blob/a76c5e2dcd7d444207e79962e93fc1b4956442aa/CameraMorse.py#L11-L34
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/reportlab/lib/colors.py
python
CMYKColor.cmyk
(self)
return (self.cyan, self.magenta, self.yellow, self.black)
Returns a tuple of four color components - syntactic sugar
Returns a tuple of four color components - syntactic sugar
[ "Returns", "a", "tuple", "of", "four", "color", "components", "-", "syntactic", "sugar" ]
def cmyk(self): "Returns a tuple of four color components - syntactic sugar" return (self.cyan, self.magenta, self.yellow, self.black)
[ "def", "cmyk", "(", "self", ")", ":", "return", "(", "self", ".", "cyan", ",", "self", ".", "magenta", ",", "self", ".", "yellow", ",", "self", ".", "black", ")" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/reportlab/lib/colors.py#L231-L233
DataDog/integrations-core
934674b29d94b70ccc008f76ea172d0cdae05e1e
tokumx/datadog_checks/tokumx/vendor/pymongo/monitoring.py
python
_EventListeners.publish_topology_closed
(self, topology_id)
Publish a TopologyClosedEvent to all topology listeners. :Parameters: - `topology_id`: A unique identifier for the topology this server is a part of.
Publish a TopologyClosedEvent to all topology listeners.
[ "Publish", "a", "TopologyClosedEvent", "to", "all", "topology", "listeners", "." ]
def publish_topology_closed(self, topology_id): """Publish a TopologyClosedEvent to all topology listeners. :Parameters: - `topology_id`: A unique identifier for the topology this server is a part of. """ event = TopologyClosedEvent(topology_id) for subscriber in self.__topology_listeners: try: subscriber.closed(event) except Exception: _handle_exception()
[ "def", "publish_topology_closed", "(", "self", ",", "topology_id", ")", ":", "event", "=", "TopologyClosedEvent", "(", "topology_id", ")", "for", "subscriber", "in", "self", ".", "__topology_listeners", ":", "try", ":", "subscriber", ".", "closed", "(", "event",...
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/tokumx/datadog_checks/tokumx/vendor/pymongo/monitoring.py#L902-L914
microsoft/hummingbird
2705c42585ac1d3ee69e65a93e5f59d7d42f44d5
hummingbird/ml/_parse.py
python
_parse_sklearn_stacking
(topology, model, inputs)
return var_out
Taken from https://github.com/onnx/sklearn-onnx/blob/9939c089a467676f4ffe9f3cb91098c4841f89d8/skl2onnx/_parse.py#L238. :param topology: Topology object :param model: A *scikit-learn* *Stacking* object :param inputs: A list of Variable objects :return: A list of output variables produced by column transformer
Taken from https://github.com/onnx/sklearn-onnx/blob/9939c089a467676f4ffe9f3cb91098c4841f89d8/skl2onnx/_parse.py#L238. :param topology: Topology object :param model: A *scikit-learn* *Stacking* object :param inputs: A list of Variable objects :return: A list of output variables produced by column transformer
[ "Taken", "from", "https", ":", "//", "github", ".", "com", "/", "onnx", "/", "sklearn", "-", "onnx", "/", "blob", "/", "9939c089a467676f4ffe9f3cb91098c4841f89d8", "/", "skl2onnx", "/", "_parse", ".", "py#L238", ".", ":", "param", "topology", ":", "Topology",...
def _parse_sklearn_stacking(topology, model, inputs): """ Taken from https://github.com/onnx/sklearn-onnx/blob/9939c089a467676f4ffe9f3cb91098c4841f89d8/skl2onnx/_parse.py#L238. :param topology: Topology object :param model: A *scikit-learn* *Stacking* object :param inputs: A list of Variable objects :return: A list of output variables produced by column transformer """ # Output variable name of each estimator. It's a list of variables. transformed_result_names = [] # Encode each estimator as our IR object. for op, method in zip(model.estimators_, model.stack_method_): var_out = _parse_sklearn_api(topology, op, inputs) if method not in ["predict_proba", "predict"]: raise ValueError( "Ensemble method {} not supported. Please fill an issue at https://github.com/microsoft/hummingbird.".format( method ) ) index = 0 if method == "predict_proba": index = 1 array_feature_extractor_operator = topology.declare_logical_operator("SklearnArrayFeatureExtractor") array_feature_extractor_operator.inputs = var_out array_feature_extractor_operator.column_indices = [index] output_variable_name = topology.declare_logical_variable("extracted_feature_columns", var_out[0].type) array_feature_extractor_operator.outputs.append(output_variable_name) transformed_result_names.append(output_variable_name) if model.passthrough: transformed_result_names.extend(inputs) if len(transformed_result_names) > 1: concat_operator = topology.declare_logical_operator("SklearnConcat") concat_operator.inputs = transformed_result_names # Declare output name of scikit-learn ColumnTransformer transformed_column_name = topology.declare_logical_variable("transformed_column") concat_operator.outputs.append(transformed_column_name) transformed_result_names = [transformed_column_name] op = model.final_estimator_ var_out = _parse_sklearn_api(topology, op, transformed_result_names) return var_out
[ "def", "_parse_sklearn_stacking", "(", "topology", ",", "model", ",", "inputs", ")", ":", "# Output variable name of each estimator. It's a list of variables.", "transformed_result_names", "=", "[", "]", "# Encode each estimator as our IR object.", "for", "op", ",", "method", ...
https://github.com/microsoft/hummingbird/blob/2705c42585ac1d3ee69e65a93e5f59d7d42f44d5/hummingbird/ml/_parse.py#L476-L519