nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
bookwyrm-social/bookwyrm
0c2537e27a2cdbc0136880dfbbf170d5fec72986
bookwyrm/models/status.py
python
Status.boostable
(self)
return self.privacy in ["unlisted", "public"]
you can't boost dms
you can't boost dms
[ "you", "can", "t", "boost", "dms" ]
def boostable(self): """you can't boost dms""" return self.privacy in ["unlisted", "public"]
[ "def", "boostable", "(", "self", ")", ":", "return", "self", ".", "privacy", "in", "[", "\"unlisted\"", ",", "\"public\"", "]" ]
https://github.com/bookwyrm-social/bookwyrm/blob/0c2537e27a2cdbc0136880dfbbf170d5fec72986/bookwyrm/models/status.py#L162-L164
osmr/imgclsmob
f2993d3ce73a2f7ddba05da3891defb08547d504
gluon/gluoncv2/models/efficientnet.py
python
efficientnet_b4b
(in_size=(380, 380), **kwargs)
return get_efficientnet(version="b4", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b4b", **kwargs)
EfficientNet-B4-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (380, 380) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters.
EfficientNet-B4-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
[ "EfficientNet", "-", "B4", "-", "b", "(", "like", "TF", "-", "implementation", ")", "model", "from", "EfficientNet", ":", "Rethinking", "Model", "Scaling", "for", "Convolutional", "Neural", "Networks", "https", ":", "//", "arxiv", ".", "org", "/", "abs", "...
def efficientnet_b4b(in_size=(380, 380), **kwargs): """ EfficientNet-B4-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (380, 380) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_efficientnet(version="b4", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b4b", **kwargs)
[ "def", "efficientnet_b4b", "(", "in_size", "=", "(", "380", ",", "380", ")", ",", "*", "*", "kwargs", ")", ":", "return", "get_efficientnet", "(", "version", "=", "\"b4\"", ",", "in_size", "=", "in_size", ",", "tf_mode", "=", "True", ",", "bn_epsilon", ...
https://github.com/osmr/imgclsmob/blob/f2993d3ce73a2f7ddba05da3891defb08547d504/gluon/gluoncv2/models/efficientnet.py#L794-L811
ViRb3/apk-utilities
818f0f455e4cb549896375bed0eada5c0d211651
bin/enjarify/jvm/constantpool.py
python
SplitConstantPool.lowspace
(self)
return 256 - self.bot
[]
def lowspace(self): return 256 - self.bot
[ "def", "lowspace", "(", "self", ")", ":", "return", "256", "-", "self", ".", "bot" ]
https://github.com/ViRb3/apk-utilities/blob/818f0f455e4cb549896375bed0eada5c0d211651/bin/enjarify/jvm/constantpool.py#L168-L168
maas/maas
db2f89970c640758a51247c59bf1ec6f60cf4ab5
src/provisioningserver/drivers/power/recs.py
python
RECSAPI.put
(self, command, urlparams=[], params={})
Dispatch a PUT request to a RECS_Master.
Dispatch a PUT request to a RECS_Master.
[ "Dispatch", "a", "PUT", "request", "to", "a", "RECS_Master", "." ]
def put(self, command, urlparams=[], params={}): """Dispatch a PUT request to a RECS_Master.""" url = self.build_url(command, urlparams) authinfo = urllib.request.HTTPPasswordMgrWithDefaultRealm() authinfo.add_password(None, url, self.username, self.password) proxy_handler = urllib.request.ProxyHandler({}) auth_handler = urllib.request.HTTPBasicAuthHandler(authinfo) opener = urllib.request.build_opener(proxy_handler, auth_handler) urllib.request.install_opener(opener) data = urllib.parse.urlencode(params).encode() req = urllib.request.Request(url, data, method="PUT") try: response = urllib.request.urlopen(req) except urllib.error.HTTPError as e: raise PowerConnError( "Could not make proper connection to RECS|Box." " HTTP error code: %s" % e.code ) except urllib.error.URLError as e: raise PowerConnError( "Could not make proper connection to RECS|Box." " Server could not be reached: %s" % e.reason ) else: return response.read()
[ "def", "put", "(", "self", ",", "command", ",", "urlparams", "=", "[", "]", ",", "params", "=", "{", "}", ")", ":", "url", "=", "self", ".", "build_url", "(", "command", ",", "urlparams", ")", "authinfo", "=", "urllib", ".", "request", ".", "HTTPPa...
https://github.com/maas/maas/blob/db2f89970c640758a51247c59bf1ec6f60cf4ab5/src/provisioningserver/drivers/power/recs.py#L125-L149
raffaele-forte/climber
5530a780446e35b1ce977bae140557050fe0b47c
Exscript/protocols/Protocol.py
python
Protocol.is_app_authenticated
(self)
return self.app_authenticated
Returns True if the application-level authentication procedure was completed, False otherwise. @rtype: bool @return: Whether the authentication was completed.
Returns True if the application-level authentication procedure was completed, False otherwise.
[ "Returns", "True", "if", "the", "application", "-", "level", "authentication", "procedure", "was", "completed", "False", "otherwise", "." ]
def is_app_authenticated(self): """ Returns True if the application-level authentication procedure was completed, False otherwise. @rtype: bool @return: Whether the authentication was completed. """ return self.app_authenticated
[ "def", "is_app_authenticated", "(", "self", ")", ":", "return", "self", ".", "app_authenticated" ]
https://github.com/raffaele-forte/climber/blob/5530a780446e35b1ce977bae140557050fe0b47c/Exscript/protocols/Protocol.py#L822-L830
xinntao/BasicSR
5668ba75eb8a77e8d2dd46746a36fee0fbb0fdcd
basicsr/metrics/metric_util.py
python
reorder_image
(img, input_order='HWC')
return img
Reorder images to 'HWC' order. If the input_order is (h, w), return (h, w, 1); If the input_order is (c, h, w), return (h, w, c); If the input_order is (h, w, c), return as it is. Args: img (ndarray): Input image. input_order (str): Whether the input order is 'HWC' or 'CHW'. If the input image shape is (h, w), input_order will not have effects. Default: 'HWC'. Returns: ndarray: reordered image.
Reorder images to 'HWC' order.
[ "Reorder", "images", "to", "HWC", "order", "." ]
def reorder_image(img, input_order='HWC'): """Reorder images to 'HWC' order. If the input_order is (h, w), return (h, w, 1); If the input_order is (c, h, w), return (h, w, c); If the input_order is (h, w, c), return as it is. Args: img (ndarray): Input image. input_order (str): Whether the input order is 'HWC' or 'CHW'. If the input image shape is (h, w), input_order will not have effects. Default: 'HWC'. Returns: ndarray: reordered image. """ if input_order not in ['HWC', 'CHW']: raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' "'HWC' and 'CHW'") if len(img.shape) == 2: img = img[..., None] if input_order == 'CHW': img = img.transpose(1, 2, 0) return img
[ "def", "reorder_image", "(", "img", ",", "input_order", "=", "'HWC'", ")", ":", "if", "input_order", "not", "in", "[", "'HWC'", ",", "'CHW'", "]", ":", "raise", "ValueError", "(", "f'Wrong input_order {input_order}. Supported input_orders are '", "\"'HWC' and 'CHW'\""...
https://github.com/xinntao/BasicSR/blob/5668ba75eb8a77e8d2dd46746a36fee0fbb0fdcd/basicsr/metrics/metric_util.py#L6-L29
mrJean1/PyGeodesy
7da5ca71aa3edb7bc49e219e0b8190686e1a7965
pygeodesy/utm.py
python
_lowerleft
(utm, center)
return utm.classof(utm.zone, utm.hemisphere, utm.easting - e, utm.northing - n, band=utm.band, falsed=utm.falsed, **r)
(INTERNAL) I{Un}-center a B{C{utm}} to its C{lowerleft} by C{B{center} meter} or by a I{guess} if B{C{center}} is C{0}.
(INTERNAL) I{Un}-center a B{C{utm}} to its C{lowerleft} by C{B{center} meter} or by a I{guess} if B{C{center}} is C{0}.
[ "(", "INTERNAL", ")", "I", "{", "Un", "}", "-", "center", "a", "B", "{", "C", "{", "utm", "}}", "to", "its", "C", "{", "lowerleft", "}", "by", "C", "{", "B", "{", "center", "}", "meter", "}", "or", "by", "a", "I", "{", "guess", "}", "if", ...
def _lowerleft(utm, center): # by .ellipsoidalBase.LatLon.toUtm '''(INTERNAL) I{Un}-center a B{C{utm}} to its C{lowerleft} by C{B{center} meter} or by a I{guess} if B{C{center}} is C{0}. ''' if center: e = n = -center else: c = 5 # center for _ in range(3): c *= 10 # 50, 500, 5000 t = c * 2 e = int(utm.easting % t) n = int(utm.northing % t) if (e == c and n in (c, c - 1)) or \ (n == c and e in (c, c - 1)): break else: return utm # unchanged r = _xkwds_not(None, datum=utm.datum, scale=utm.scale, convergence=utm.convergence) return utm.classof(utm.zone, utm.hemisphere, utm.easting - e, utm.northing - n, band=utm.band, falsed=utm.falsed, **r)
[ "def", "_lowerleft", "(", "utm", ",", "center", ")", ":", "# by .ellipsoidalBase.LatLon.toUtm", "if", "center", ":", "e", "=", "n", "=", "-", "center", "else", ":", "c", "=", "5", "# center", "for", "_", "in", "range", "(", "3", ")", ":", "c", "*=", ...
https://github.com/mrJean1/PyGeodesy/blob/7da5ca71aa3edb7bc49e219e0b8190686e1a7965/pygeodesy/utm.py#L628-L651
Yonv1943/Python
ecce2153892093d7a13686e4cbfd6b323cb59de8
ElegantRL/AgentZoo/ElegantRL-MultiGPU/agent.py
python
ReplayBufferMP.__init__
(self, max_len, state_dim, action_dim, rollout_num, if_on_policy, if_gpu)
Experience Replay Buffer for Multiple Processing :int max_len: the maximum capacity of ReplayBuffer. First In First Out :int state_dim: the dimension of state :int action_dim: the dimension of action (action_dim==1 for discrete action) :int rollout_num: the rollout workers number :bool if_on_policy: on-policy or off-policy :bool if_gpu: create buffer space on CPU RAM or GPU
Experience Replay Buffer for Multiple Processing
[ "Experience", "Replay", "Buffer", "for", "Multiple", "Processing" ]
def __init__(self, max_len, state_dim, action_dim, rollout_num, if_on_policy, if_gpu): """Experience Replay Buffer for Multiple Processing :int max_len: the maximum capacity of ReplayBuffer. First In First Out :int state_dim: the dimension of state :int action_dim: the dimension of action (action_dim==1 for discrete action) :int rollout_num: the rollout workers number :bool if_on_policy: on-policy or off-policy :bool if_gpu: create buffer space on CPU RAM or GPU """ self.now_len = 0 self.max_len = max_len self.rollout_num = rollout_num self.if_gpu = if_gpu if if_on_policy: self.if_gpu = False _max_len = max_len // rollout_num self.buffers = [ReplayBuffer(_max_len, state_dim, action_dim, if_on_policy, if_gpu=True) for _ in range(rollout_num)]
[ "def", "__init__", "(", "self", ",", "max_len", ",", "state_dim", ",", "action_dim", ",", "rollout_num", ",", "if_on_policy", ",", "if_gpu", ")", ":", "self", ".", "now_len", "=", "0", "self", ".", "max_len", "=", "max_len", "self", ".", "rollout_num", "...
https://github.com/Yonv1943/Python/blob/ecce2153892093d7a13686e4cbfd6b323cb59de8/ElegantRL/AgentZoo/ElegantRL-MultiGPU/agent.py#L1152-L1172
noamraph/dreampie
b09ee546ec099ee6549c649692ceb129e05fb229
comtypes/typeinfo.py
python
ITypeInfo.AddressOfMember
(self, memid, invkind)
return p.value
Get the address of a function in a dll
Get the address of a function in a dll
[ "Get", "the", "address", "of", "a", "function", "in", "a", "dll" ]
def AddressOfMember(self, memid, invkind): "Get the address of a function in a dll" raise "Check Me" p = c_void_p() self.__com_AddressOfMember(memid, invkind, byref(p)) # XXX Would the default impl return the value of p? return p.value
[ "def", "AddressOfMember", "(", "self", ",", "memid", ",", "invkind", ")", ":", "raise", "\"Check Me\"", "p", "=", "c_void_p", "(", ")", "self", ".", "__com_AddressOfMember", "(", "memid", ",", "invkind", ",", "byref", "(", "p", ")", ")", "# XXX Would the d...
https://github.com/noamraph/dreampie/blob/b09ee546ec099ee6549c649692ceb129e05fb229/comtypes/typeinfo.py#L311-L317
gxfxyz/unblockchn
c272dc58ca2d4e7c8a01131d0866ddb2c0ba92d9
unblockchn.py
python
Router.check_ss_redir
(cls)
return os.path.exists(f"/proc/{pid}")
检查 ss-redir 是否运行中
检查 ss-redir 是否运行中
[ "检查", "ss", "-", "redir", "是否运行中" ]
def check_ss_redir(cls): """检查 ss-redir 是否运行中""" if not os.path.isfile(SS_REDIR_PID_PATH): return False with open(SS_REDIR_PID_PATH, 'r', encoding='utf-8') as f: pid = f.read() return os.path.exists(f"/proc/{pid}")
[ "def", "check_ss_redir", "(", "cls", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "SS_REDIR_PID_PATH", ")", ":", "return", "False", "with", "open", "(", "SS_REDIR_PID_PATH", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ...
https://github.com/gxfxyz/unblockchn/blob/c272dc58ca2d4e7c8a01131d0866ddb2c0ba92d9/unblockchn.py#L855-L861
cloudera/hue
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
desktop/core/ext-py/SQLAlchemy-1.3.17/lib/sqlalchemy/util/compat.py
python
reraise
(tp, value, tb=None, cause=None)
r"""legacy. use raise\_()
r"""legacy. use raise\_()
[ "r", "legacy", ".", "use", "raise", "\\", "_", "()" ]
def reraise(tp, value, tb=None, cause=None): r"""legacy. use raise\_()""" raise_(value, with_traceback=tb, from_=cause)
[ "def", "reraise", "(", "tp", ",", "value", ",", "tb", "=", "None", ",", "cause", "=", "None", ")", ":", "raise_", "(", "value", ",", "with_traceback", "=", "tb", ",", "from_", "=", "cause", ")" ]
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/SQLAlchemy-1.3.17/lib/sqlalchemy/util/compat.py#L444-L447
openedx/edx-platform
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
openedx/core/djangoapps/content/block_structure/tasks.py
python
update_course_in_cache
(self, course_id)
Updates the course blocks (mongo -> BlockStructure) for the specified course.
Updates the course blocks (mongo -> BlockStructure) for the specified course.
[ "Updates", "the", "course", "blocks", "(", "mongo", "-", ">", "BlockStructure", ")", "for", "the", "specified", "course", "." ]
def update_course_in_cache(self, course_id): """ Updates the course blocks (mongo -> BlockStructure) for the specified course. """ _update_course_in_cache(self, course_id=course_id)
[ "def", "update_course_in_cache", "(", "self", ",", "course_id", ")", ":", "_update_course_in_cache", "(", "self", ",", "course_id", "=", "course_id", ")" ]
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/openedx/core/djangoapps/content/block_structure/tasks.py#L54-L58
wummel/linkchecker
c2ce810c3fb00b895a841a7be6b2e78c64e7b042
linkcheck/checker/fileurl.py
python
FileUrl.build_base_url
(self)
The URL is normed according to the platform: - the base URL is made an absolute file:// URL - under Windows platform the drive specifier is normed
The URL is normed according to the platform: - the base URL is made an absolute file:// URL - under Windows platform the drive specifier is normed
[ "The", "URL", "is", "normed", "according", "to", "the", "platform", ":", "-", "the", "base", "URL", "is", "made", "an", "absolute", "file", ":", "//", "URL", "-", "under", "Windows", "platform", "the", "drive", "specifier", "is", "normed" ]
def build_base_url(self): """The URL is normed according to the platform: - the base URL is made an absolute file:// URL - under Windows platform the drive specifier is normed """ if self.base_url is None: return base_url = self.base_url if not (self.parent_url or self.base_ref or base_url.startswith("file:")): base_url = os.path.expanduser(base_url) if not is_absolute_path(base_url): try: base_url = os.getcwd()+"/"+base_url except OSError as msg: # occurs on stale remote filesystems (eg. NFS) errmsg = _("Could not get current working directory: %(msg)s") % dict(msg=msg) raise LinkCheckerError(errmsg) if os.path.isdir(base_url): base_url += "/" base_url = "file://"+base_url if os.name == "nt": base_url = base_url.replace("\\", "/") # transform c:/windows into /c|/windows base_url = re.sub("^file://(/?)([a-zA-Z]):", r"file:///\2|", base_url) # transform file://path into file:///path base_url = re.sub("^file://([^/])", r"file:///\1", base_url) self.base_url = unicode(base_url)
[ "def", "build_base_url", "(", "self", ")", ":", "if", "self", ".", "base_url", "is", "None", ":", "return", "base_url", "=", "self", ".", "base_url", "if", "not", "(", "self", ".", "parent_url", "or", "self", ".", "base_ref", "or", "base_url", ".", "st...
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/fileurl.py#L112-L138
veeresht/CommPy
a1452bc981974c0e7b0955daeae16fd8960d659a
commpy/channelcoding/convcode.py
python
conv_encode
(message_bits, trellis, termination = 'term', puncture_matrix=None)
return p_outbits
Encode bits using a convolutional code. Parameters ---------- message_bits : 1D ndarray containing {0, 1} Stream of bits to be convolutionally encoded. trellis: pre-initialized Trellis structure. termination: {'cont', 'term'}, optional Create ('term') or not ('cont') termination bits. puncture_matrix: 2D ndarray containing {0, 1}, optional Matrix used for the puncturing algorithm Returns ------- coded_bits : 1D ndarray containing {0, 1} Encoded bit stream.
Encode bits using a convolutional code. Parameters ---------- message_bits : 1D ndarray containing {0, 1} Stream of bits to be convolutionally encoded. trellis: pre-initialized Trellis structure. termination: {'cont', 'term'}, optional Create ('term') or not ('cont') termination bits. puncture_matrix: 2D ndarray containing {0, 1}, optional Matrix used for the puncturing algorithm Returns ------- coded_bits : 1D ndarray containing {0, 1} Encoded bit stream.
[ "Encode", "bits", "using", "a", "convolutional", "code", ".", "Parameters", "----------", "message_bits", ":", "1D", "ndarray", "containing", "{", "0", "1", "}", "Stream", "of", "bits", "to", "be", "convolutionally", "encoded", ".", "trellis", ":", "pre", "-...
def conv_encode(message_bits, trellis, termination = 'term', puncture_matrix=None): """ Encode bits using a convolutional code. Parameters ---------- message_bits : 1D ndarray containing {0, 1} Stream of bits to be convolutionally encoded. trellis: pre-initialized Trellis structure. termination: {'cont', 'term'}, optional Create ('term') or not ('cont') termination bits. puncture_matrix: 2D ndarray containing {0, 1}, optional Matrix used for the puncturing algorithm Returns ------- coded_bits : 1D ndarray containing {0, 1} Encoded bit stream. """ k = trellis.k n = trellis.n total_memory = trellis.total_memory rate = float(k)/n code_type = trellis.code_type if puncture_matrix is None: puncture_matrix = np.ones((trellis.k, trellis.n)) number_message_bits = np.size(message_bits) if termination == 'cont': inbits = message_bits number_inbits = number_message_bits number_outbits = int(number_inbits/rate) else: # Initialize an array to contain the message bits plus the truncation zeros if code_type == 'rsc': inbits = message_bits number_inbits = number_message_bits number_outbits = int((number_inbits + k * total_memory)/rate) else: number_inbits = number_message_bits + total_memory + total_memory % k inbits = np.zeros(number_inbits, 'int') # Pad the input bits with M zeros (L-th terminated truncation) inbits[0:number_message_bits] = message_bits number_outbits = int(number_inbits/rate) outbits = np.zeros(number_outbits, 'int') if puncture_matrix is not None: p_outbits = np.zeros(number_outbits, 'int') else: p_outbits = np.zeros(int(number_outbits* puncture_matrix[0:].sum()/np.size(puncture_matrix, 1)), 'int') next_state_table = trellis.next_state_table output_table = trellis.output_table # Encoding process - Each iteration of the loop represents one clock cycle current_state = 0 j = 0 for i in range(int(number_inbits/k)): # Loop through all input bits current_input = bitarray2dec(inbits[i*k:(i+1)*k]) current_output = output_table[current_state][current_input] outbits[j*n:(j+1)*n] = dec2bitarray(current_output, n) current_state = next_state_table[current_state][current_input] j += 1 if code_type == 'rsc' and termination == 'term': term_bits = dec2bitarray(current_state, trellis.total_memory) term_bits = term_bits[::-1] for i in range(trellis.total_memory): current_input = bitarray2dec(term_bits[i*k:(i+1)*k]) current_output = output_table[current_state][current_input] outbits[j*n:(j+1)*n] = dec2bitarray(current_output, n) current_state = next_state_table[current_state][current_input] j += 1 j = 0 for i in range(number_outbits): if puncture_matrix[0][i % np.size(puncture_matrix, 1)] == 1: p_outbits[j] = outbits[i] j = j + 1 return p_outbits
[ "def", "conv_encode", "(", "message_bits", ",", "trellis", ",", "termination", "=", "'term'", ",", "puncture_matrix", "=", "None", ")", ":", "k", "=", "trellis", ".", "k", "n", "=", "trellis", ".", "n", "total_memory", "=", "trellis", ".", "total_memory", ...
https://github.com/veeresht/CommPy/blob/a1452bc981974c0e7b0955daeae16fd8960d659a/commpy/channelcoding/convcode.py#L475-L558
molecularsets/moses
7b8f83b21a9b7ded493349ec8ef292384ce2bb52
moses/metrics/utils.py
python
QED
(mol)
return qed(mol)
Computes RDKit's QED score
Computes RDKit's QED score
[ "Computes", "RDKit", "s", "QED", "score" ]
def QED(mol): """ Computes RDKit's QED score """ return qed(mol)
[ "def", "QED", "(", "mol", ")", ":", "return", "qed", "(", "mol", ")" ]
https://github.com/molecularsets/moses/blob/7b8f83b21a9b7ded493349ec8ef292384ce2bb52/moses/metrics/utils.py#L55-L59
vlachoudis/bCNC
67126b4894dabf6579baf47af8d0f9b7de35e6e3
bCNC/__main__.py
python
Application.acceptKey
(self, skipRun=False)
return True
[]
def acceptKey(self, skipRun=False): if not skipRun and self.running: return False focus = self.focus_get() if isinstance(focus, Entry) or \ isinstance(focus, Spinbox) or \ isinstance(focus, Listbox) or \ isinstance(focus, Text): return False return True
[ "def", "acceptKey", "(", "self", ",", "skipRun", "=", "False", ")", ":", "if", "not", "skipRun", "and", "self", ".", "running", ":", "return", "False", "focus", "=", "self", ".", "focus_get", "(", ")", "if", "isinstance", "(", "focus", ",", "Entry", ...
https://github.com/vlachoudis/bCNC/blob/67126b4894dabf6579baf47af8d0f9b7de35e6e3/bCNC/__main__.py#L531-L538
iagcl/watchmen
d329b357e6fde3ad91e972988b160a33c12afc2a
elasticsearch/roll_indexes/packages/urllib3/response.py
python
HTTPResponse.tell
(self)
return self._fp_bytes_read
Obtain the number of bytes pulled over the wire so far. May differ from the amount of content returned by :meth:``HTTPResponse.read`` if bytes are encoded on the wire (e.g, compressed).
Obtain the number of bytes pulled over the wire so far. May differ from the amount of content returned by :meth:``HTTPResponse.read`` if bytes are encoded on the wire (e.g, compressed).
[ "Obtain", "the", "number", "of", "bytes", "pulled", "over", "the", "wire", "so", "far", ".", "May", "differ", "from", "the", "amount", "of", "content", "returned", "by", ":", "meth", ":", "HTTPResponse", ".", "read", "if", "bytes", "are", "encoded", "on"...
def tell(self): """ Obtain the number of bytes pulled over the wire so far. May differ from the amount of content returned by :meth:``HTTPResponse.read`` if bytes are encoded on the wire (e.g, compressed). """ return self._fp_bytes_read
[ "def", "tell", "(", "self", ")", ":", "return", "self", ".", "_fp_bytes_read" ]
https://github.com/iagcl/watchmen/blob/d329b357e6fde3ad91e972988b160a33c12afc2a/elasticsearch/roll_indexes/packages/urllib3/response.py#L194-L200
disqus/django-perftools
a348b5d62ce8c9d2a1645be03ab087ca3b0a9da3
perftools/patcher.py
python
Patcher.__exit__
(self, exc_type, exc_value, traceback)
[]
def __exit__(self, exc_type, exc_value, traceback): setattr(self.target, self.attribute, self.original)
[ "def", "__exit__", "(", "self", ",", "exc_type", ",", "exc_value", ",", "traceback", ")", ":", "setattr", "(", "self", ".", "target", ",", "self", ".", "attribute", ",", "self", ".", "original", ")" ]
https://github.com/disqus/django-perftools/blob/a348b5d62ce8c9d2a1645be03ab087ca3b0a9da3/perftools/patcher.py#L37-L38
jaseg/python-mpv
1f59cfa07246c993737b25857fd01421b2da8bbd
mpv.py
python
MPV.video_reload
(self, video_id=None)
Mapped mpv video_reload command, see man mpv(1).
Mapped mpv video_reload command, see man mpv(1).
[ "Mapped", "mpv", "video_reload", "command", "see", "man", "mpv", "(", "1", ")", "." ]
def video_reload(self, video_id=None): """Mapped mpv video_reload command, see man mpv(1).""" self.command('video_reload', video_id)
[ "def", "video_reload", "(", "self", ",", "video_id", "=", "None", ")", ":", "self", ".", "command", "(", "'video_reload'", ",", "video_id", ")" ]
https://github.com/jaseg/python-mpv/blob/1f59cfa07246c993737b25857fd01421b2da8bbd/mpv.py#L1217-L1219
sktime/sktime-dl
e519bf5983f9ed60b04b0d14f4fe3fa049a82f04
sktime_dl/networks/_tapnet.py
python
TapNetNetwork.__init__
( self, dropout=0.5, filter_sizes=[256, 256, 128], kernel_size=[8, 5, 3], dilation=1, layers=[500, 300], use_rp=True, rp_params=[-1, 3], use_att=True, use_ss=False, use_metric=False, use_muse=False, use_lstm=True, use_cnn=True, random_state=1, padding='same' )
:param kernel_size: int, specifying the length of the 1D convolution window :param avg_pool_size: int, size of the average pooling windows :param layers: int, size of dense layers :param filter_sizes: int, array of shape = (nb_conv_layers) :param random_state: int, seed to any needed random actions :param rp_params: array of ints, parameters for random permutation :param dropout: dropout rate
:param kernel_size: int, specifying the length of the 1D convolution window :param avg_pool_size: int, size of the average pooling windows :param layers: int, size of dense layers :param filter_sizes: int, array of shape = (nb_conv_layers) :param random_state: int, seed to any needed random actions :param rp_params: array of ints, parameters for random permutation :param dropout: dropout rate
[ ":", "param", "kernel_size", ":", "int", "specifying", "the", "length", "of", "the", "1D", "convolution", "window", ":", "param", "avg_pool_size", ":", "int", "size", "of", "the", "average", "pooling", "windows", ":", "param", "layers", ":", "int", "size", ...
def __init__( self, dropout=0.5, filter_sizes=[256, 256, 128], kernel_size=[8, 5, 3], dilation=1, layers=[500, 300], use_rp=True, rp_params=[-1, 3], use_att=True, use_ss=False, use_metric=False, use_muse=False, use_lstm=True, use_cnn=True, random_state=1, padding='same' ): """ :param kernel_size: int, specifying the length of the 1D convolution window :param avg_pool_size: int, size of the average pooling windows :param layers: int, size of dense layers :param filter_sizes: int, array of shape = (nb_conv_layers) :param random_state: int, seed to any needed random actions :param rp_params: array of ints, parameters for random permutation :param dropout: dropout rate """ super(TapNetNetwork,self).__init__() self.random_state = random_state self.kernel_size = kernel_size self.layers=layers self.rp_params=rp_params self.filter_sizes = filter_sizes self.use_att=use_att self.use_ss=use_ss self.dilation=dilation self.padding=padding self.dropout = dropout self.use_metric = use_metric self.use_muse = use_muse self.use_lstm = use_lstm self.use_cnn = use_cnn # parameters for random projection self.use_rp = use_rp self.rp_params = rp_params
[ "def", "__init__", "(", "self", ",", "dropout", "=", "0.5", ",", "filter_sizes", "=", "[", "256", ",", "256", ",", "128", "]", ",", "kernel_size", "=", "[", "8", ",", "5", ",", "3", "]", ",", "dilation", "=", "1", ",", "layers", "=", "[", "500"...
https://github.com/sktime/sktime-dl/blob/e519bf5983f9ed60b04b0d14f4fe3fa049a82f04/sktime_dl/networks/_tapnet.py#L28-L77
CedricGuillemet/Imogen
ee417b42747ed5b46cb11b02ef0c3630000085b3
bin/Lib/nntplib.py
python
_NNTPBase.last
(self)
return self._statcmd('LAST')
Process a LAST command. No arguments. Return as for STAT.
Process a LAST command. No arguments. Return as for STAT.
[ "Process", "a", "LAST", "command", ".", "No", "arguments", ".", "Return", "as", "for", "STAT", "." ]
def last(self): """Process a LAST command. No arguments. Return as for STAT.""" return self._statcmd('LAST')
[ "def", "last", "(", "self", ")", ":", "return", "self", ".", "_statcmd", "(", "'LAST'", ")" ]
https://github.com/CedricGuillemet/Imogen/blob/ee417b42747ed5b46cb11b02ef0c3630000085b3/bin/Lib/nntplib.py#L720-L722
ray-project/ray
703c1610348615dcb8c2d141a0c46675084660f5
python/ray/util/dask/scheduler.py
python
ray_dask_get_sync
(dsk, keys, **kwargs)
A synchronous Dask-Ray scheduler. This scheduler will send top-level (non-inlined) Dask tasks to a Ray cluster for execution. The scheduler will wait for the tasks to finish executing, fetch the results, and repackage them into the appropriate Dask collections. This particular scheduler submits Ray tasks synchronously, which can be useful for debugging. This can be passed directly to `dask.compute()`, as the scheduler: >>> dask.compute(obj, scheduler=ray_dask_get_sync) You can override the currently active global Dask-Ray callbacks (e.g. supplied via a context manager): >>> dask.compute( obj, scheduler=ray_dask_get_sync, ray_callbacks=some_ray_dask_callbacks, ) Args: dsk (Dict): Dask graph, represented as a task DAG dictionary. keys (List[str]): List of Dask graph keys whose values we wish to compute and return. Returns: Computed values corresponding to the provided keys.
A synchronous Dask-Ray scheduler. This scheduler will send top-level (non-inlined) Dask tasks to a Ray cluster for execution. The scheduler will wait for the tasks to finish executing, fetch the results, and repackage them into the appropriate Dask collections. This particular scheduler submits Ray tasks synchronously, which can be useful for debugging.
[ "A", "synchronous", "Dask", "-", "Ray", "scheduler", ".", "This", "scheduler", "will", "send", "top", "-", "level", "(", "non", "-", "inlined", ")", "Dask", "tasks", "to", "a", "Ray", "cluster", "for", "execution", ".", "The", "scheduler", "will", "wait"...
def ray_dask_get_sync(dsk, keys, **kwargs): """ A synchronous Dask-Ray scheduler. This scheduler will send top-level (non-inlined) Dask tasks to a Ray cluster for execution. The scheduler will wait for the tasks to finish executing, fetch the results, and repackage them into the appropriate Dask collections. This particular scheduler submits Ray tasks synchronously, which can be useful for debugging. This can be passed directly to `dask.compute()`, as the scheduler: >>> dask.compute(obj, scheduler=ray_dask_get_sync) You can override the currently active global Dask-Ray callbacks (e.g. supplied via a context manager): >>> dask.compute( obj, scheduler=ray_dask_get_sync, ray_callbacks=some_ray_dask_callbacks, ) Args: dsk (Dict): Dask graph, represented as a task DAG dictionary. keys (List[str]): List of Dask graph keys whose values we wish to compute and return. Returns: Computed values corresponding to the provided keys. """ ray_callbacks = kwargs.pop("ray_callbacks", None) persist = kwargs.pop("ray_persist", False) with local_ray_callbacks(ray_callbacks) as ray_callbacks: # Unpack the Ray-specific callbacks. ( ray_presubmit_cbs, ray_postsubmit_cbs, ray_pretask_cbs, ray_posttask_cbs, ray_postsubmit_all_cbs, ray_finish_cbs, ) = unpack_ray_callbacks(ray_callbacks) # NOTE: We hijack Dask's `get_async` function, injecting a different # task executor. object_refs = get_async( _apply_async_wrapper( apply_sync, _rayify_task_wrapper, ray_presubmit_cbs, ray_postsubmit_cbs, ray_pretask_cbs, ray_posttask_cbs, ), 1, dsk, keys, **kwargs, ) if ray_postsubmit_all_cbs is not None: for cb in ray_postsubmit_all_cbs: cb(object_refs, dsk) # NOTE: We explicitly delete the Dask graph here so object references # are garbage-collected before this function returns, i.e. before all # Ray tasks are done. Otherwise, no intermediate objects will be # cleaned up until all Ray tasks are done. del dsk if persist: result = object_refs else: result = ray_get_unpack(object_refs) if ray_finish_cbs is not None: for cb in ray_finish_cbs: cb(result) return result
[ "def", "ray_dask_get_sync", "(", "dsk", ",", "keys", ",", "*", "*", "kwargs", ")", ":", "ray_callbacks", "=", "kwargs", ".", "pop", "(", "\"ray_callbacks\"", ",", "None", ")", "persist", "=", "kwargs", ".", "pop", "(", "\"ray_persist\"", ",", "False", ")...
https://github.com/ray-project/ray/blob/703c1610348615dcb8c2d141a0c46675084660f5/python/ray/util/dask/scheduler.py#L481-L556
robotlearn/pyrobolearn
9cd7c060723fda7d2779fa255ac998c2c82b8436
pyrobolearn/algos/cem.py
python
CEM.train
(self, num_steps=1000, num_rollouts=1, num_episodes=1, verbose=False, seed=None)
return avg_rewards, max_rewards
Train the policy. Args: num_steps (int): number of steps per rollout / episode. In one episode, how many steps does the environment proceeds. num_rollouts (int): number of rollouts per episode to average the results. num_episodes (int): number of episodes. verbose (bool): If True, it will print information about the training process. seed (int): random seed. Returns: list of float: average rewards per episode. list of float: maximum reward obtained per episode.
Train the policy.
[ "Train", "the", "policy", "." ]
def train(self, num_steps=1000, num_rollouts=1, num_episodes=1, verbose=False, seed=None): """ Train the policy. Args: num_steps (int): number of steps per rollout / episode. In one episode, how many steps does the environment proceeds. num_rollouts (int): number of rollouts per episode to average the results. num_episodes (int): number of episodes. verbose (bool): If True, it will print information about the training process. seed (int): random seed. Returns: list of float: average rewards per episode. list of float: maximum reward obtained per episode. """ # set seed if seed is not None: np.random.seed(seed) # create recorders max_rewards, avg_rewards = [], [] # init theta_mean = self.policy.get_vectorized_parameters(to_numpy=True) theta_std = np.ones(len(theta_mean)) # pool = Pool(self.num_workers) # for each episode/generation for episode in range(num_episodes): if verbose: print('\nEpisode {}'.format(episode+1)) # 1. Explore # sample parameter vectors thetas = np.random.multivariate_normal(theta_mean, np.diag(theta_std), self.population_size) # perform one episode for each parameter # jobs = [pool.apipe(self.task.run, num_steps, use_terminating_condition=False) for theta in thetas] rewards = [] for i, theta in enumerate(thetas): # set policy parameters self.policy.set_vectorized_parameters(theta) # run a number of rollouts reward = [] for rollout in range(num_rollouts): rew = self.task.run(num_steps=num_steps, use_terminating_condition=True, render=False) reward.append(rew) reward = np.mean(reward) rewards.append(reward) # print info if verbose: print(' -- individual {} with avg reward of {}'.format(i+1, reward)) # 2. Evaluate (compute loss) # 3. Update # get elite parameters num_elites = int(self.population_size * self.elite_fraction) elite_ids = np.argsort(rewards)[-num_elites:] elite_thetas = np.array([thetas[i] for i in elite_ids]) # update theta_mean and theta_std theta_mean = elite_thetas.mean(axis=0) theta_std = np.sqrt(np.mean((elite_thetas - theta_mean) ** 2, axis=0)) # 4. Save best reward and associated parameter max_reward, avg_reward = np.max(rewards), np.mean(rewards) if max_reward > self.best_reward: self.best_reward = max_reward self.best_parameters = thetas[elite_ids[-1]] # print info if verbose: print("Episode {} mean reward: {} max reward: {}".format(episode+1, avg_reward, max_reward)) # Save the evolution of the algo avg_rewards.append(avg_reward) max_rewards.append(max_reward) # print best reward if verbose: print("\nBest reward found: {}".format(self.best_reward)) # set the best parameters self.policy.set_vectorized_parameters(self.best_parameters) return avg_rewards, max_rewards
[ "def", "train", "(", "self", ",", "num_steps", "=", "1000", ",", "num_rollouts", "=", "1", ",", "num_episodes", "=", "1", ",", "verbose", "=", "False", ",", "seed", "=", "None", ")", ":", "# set seed", "if", "seed", "is", "not", "None", ":", "np", ...
https://github.com/robotlearn/pyrobolearn/blob/9cd7c060723fda7d2779fa255ac998c2c82b8436/pyrobolearn/algos/cem.py#L107-L197
Paper99/SRFBN_CVPR19
046f693afc58dc53f240a7d85ea78213eec08490
solvers/SRSolver.py
python
SRSolver.load
(self)
load or initialize network
load or initialize network
[ "load", "or", "initialize", "network" ]
def load(self): """ load or initialize network """ if (self.is_train and self.opt['solver']['pretrain']) or not self.is_train: model_path = self.opt['solver']['pretrained_path'] if model_path is None: raise ValueError("[Error] The 'pretrained_path' does not declarate in *.json") print('===> Loading model from [%s]...' % model_path) if self.is_train: checkpoint = torch.load(model_path) self.model.load_state_dict(checkpoint['state_dict']) if self.opt['solver']['pretrain'] == 'resume': self.cur_epoch = checkpoint['epoch'] + 1 self.optimizer.load_state_dict(checkpoint['optimizer']) self.best_pred = checkpoint['best_pred'] self.best_epoch = checkpoint['best_epoch'] self.records = checkpoint['records'] else: checkpoint = torch.load(model_path) if 'state_dict' in checkpoint.keys(): checkpoint = checkpoint['state_dict'] load_func = self.model.load_state_dict if isinstance(self.model, nn.DataParallel) \ else self.model.module.load_state_dict load_func(checkpoint) else: self._net_init()
[ "def", "load", "(", "self", ")", ":", "if", "(", "self", ".", "is_train", "and", "self", ".", "opt", "[", "'solver'", "]", "[", "'pretrain'", "]", ")", "or", "not", "self", ".", "is_train", ":", "model_path", "=", "self", ".", "opt", "[", "'solver'...
https://github.com/Paper99/SRFBN_CVPR19/blob/046f693afc58dc53f240a7d85ea78213eec08490/solvers/SRSolver.py#L284-L312
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/returners/etcd_return.py
python
save_load
(jid, load, minions=None)
Save the load to the specified jid
Save the load to the specified jid
[ "Save", "the", "load", "to", "the", "specified", "jid" ]
def save_load(jid, load, minions=None): """ Save the load to the specified jid """ log.debug("sdstack_etcd returner <save_load> called jid: %s", jid) write_profile = __opts__.get("etcd.returner_write_profile") client, path = _get_conn(__opts__, write_profile) if write_profile: ttl = __opts__.get(write_profile, {}).get("etcd.ttl") else: ttl = __opts__.get("etcd.ttl") client.set( "/".join((path, "jobs", jid, ".load.p")), salt.utils.json.dumps(load), ttl=ttl, )
[ "def", "save_load", "(", "jid", ",", "load", ",", "minions", "=", "None", ")", ":", "log", ".", "debug", "(", "\"sdstack_etcd returner <save_load> called jid: %s\"", ",", "jid", ")", "write_profile", "=", "__opts__", ".", "get", "(", "\"etcd.returner_write_profile...
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/returners/etcd_return.py#L129-L144
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Lib/plat-mac/pimp.py
python
PimpDatabase.url
(self)
return self._url
[]
def url(self): return self._url
[ "def", "url", "(", "self", ")", ":", "return", "self", ".", "_url" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/plat-mac/pimp.py#L374-L374
lad1337/XDM
0c1b7009fe00f06f102a6f67c793478f515e7efe
site-packages/logilab/common/decorators.py
python
locked
(acquire, release)
return decorator
Decorator taking two methods to acquire/release a lock as argument, returning a decorator function which will call the inner method after having called acquire(self) et will call release(self) afterwards.
Decorator taking two methods to acquire/release a lock as argument, returning a decorator function which will call the inner method after having called acquire(self) et will call release(self) afterwards.
[ "Decorator", "taking", "two", "methods", "to", "acquire", "/", "release", "a", "lock", "as", "argument", "returning", "a", "decorator", "function", "which", "will", "call", "the", "inner", "method", "after", "having", "called", "acquire", "(", "self", ")", "...
def locked(acquire, release): """Decorator taking two methods to acquire/release a lock as argument, returning a decorator function which will call the inner method after having called acquire(self) et will call release(self) afterwards. """ def decorator(f): def wrapper(self, *args, **kwargs): acquire(self) try: return f(self, *args, **kwargs) finally: release(self) return wrapper return decorator
[ "def", "locked", "(", "acquire", ",", "release", ")", ":", "def", "decorator", "(", "f", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "acquire", "(", "self", ")", "try", ":", "return", "f", "(", ...
https://github.com/lad1337/XDM/blob/0c1b7009fe00f06f102a6f67c793478f515e7efe/site-packages/logilab/common/decorators.py#L236-L249
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/site-packages/python_ldap-2.4.10-py2.7-linux-x86_64.egg/ldap/controls/__init__.py
python
RequestControl.encodeControlValue
(self)
return self.encodedControlValue
sets class attribute encodedControlValue to the BER-encoded ASN.1 control value composed by class attributes set before
sets class attribute encodedControlValue to the BER-encoded ASN.1 control value composed by class attributes set before
[ "sets", "class", "attribute", "encodedControlValue", "to", "the", "BER", "-", "encoded", "ASN", ".", "1", "control", "value", "composed", "by", "class", "attributes", "set", "before" ]
def encodeControlValue(self): """ sets class attribute encodedControlValue to the BER-encoded ASN.1 control value composed by class attributes set before """ return self.encodedControlValue
[ "def", "encodeControlValue", "(", "self", ")", ":", "return", "self", ".", "encodedControlValue" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/python_ldap-2.4.10-py2.7-linux-x86_64.egg/ldap/controls/__init__.py#L58-L63
allegro/ralph
1e4a9e1800d5f664abaef2624b8bf7512df279ce
src/ralph/virtual/models.py
python
update_service_env_on_cloudproject_save
(sender, instance, **kwargs)
Update CloudHost service_env while updating CloudProject
Update CloudHost service_env while updating CloudProject
[ "Update", "CloudHost", "service_env", "while", "updating", "CloudProject" ]
def update_service_env_on_cloudproject_save(sender, instance, **kwargs): """Update CloudHost service_env while updating CloudProject""" if instance.pk is not None: instance.children.all().update(service_env=instance.service_env)
[ "def", "update_service_env_on_cloudproject_save", "(", "sender", ",", "instance", ",", "*", "*", "kwargs", ")", ":", "if", "instance", ".", "pk", "is", "not", "None", ":", "instance", ".", "children", ".", "all", "(", ")", ".", "update", "(", "service_env"...
https://github.com/allegro/ralph/blob/1e4a9e1800d5f664abaef2624b8bf7512df279ce/src/ralph/virtual/models.py#L180-L183
secdev/scapy
65089071da1acf54622df0b4fa7fc7673d47d3cd
scapy/contrib/http2.py
python
UVarIntField.__init__
(self, name, default, size)
:param str name: the name of this field instance. :param default: the default value for this field instance. default must be positive or null. # noqa: E501 :raises: AssertionError
:param str name: the name of this field instance. :param default: the default value for this field instance. default must be positive or null. # noqa: E501 :raises: AssertionError
[ ":", "param", "str", "name", ":", "the", "name", "of", "this", "field", "instance", ".", ":", "param", "default", ":", "the", "default", "value", "for", "this", "field", "instance", ".", "default", "must", "be", "positive", "or", "null", ".", "#", "noq...
def __init__(self, name, default, size): # type: (str, int, int) -> None """ :param str name: the name of this field instance. :param default: the default value for this field instance. default must be positive or null. # noqa: E501 :raises: AssertionError """ assert(default >= 0) assert(0 < size <= 8) super(UVarIntField, self).__init__(name, default, size) self.size = size self._max_value = (1 << self.size) - 1 # Configuring the fake property that is useless for this class but that is # noqa: E501 # expected from BitFields self.rev = False
[ "def", "__init__", "(", "self", ",", "name", ",", "default", ",", "size", ")", ":", "# type: (str, int, int) -> None", "assert", "(", "default", ">=", "0", ")", "assert", "(", "0", "<", "size", "<=", "8", ")", "super", "(", "UVarIntField", ",", "self", ...
https://github.com/secdev/scapy/blob/65089071da1acf54622df0b4fa7fc7673d47d3cd/scapy/contrib/http2.py#L491-L507
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/pip/_vendor/requests/packages/urllib3/connectionpool.py
python
ConnectionPool.__exit__
(self, exc_type, exc_val, exc_tb)
return False
[]
def __exit__(self, exc_type, exc_val, exc_tb): self.close() # Return False to re-raise any potential exceptions return False
[ "def", "__exit__", "(", "self", ",", "exc_type", ",", "exc_val", ",", "exc_tb", ")", ":", "self", ".", "close", "(", ")", "# Return False to re-raise any potential exceptions", "return", "False" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/pip/_vendor/requests/packages/urllib3/connectionpool.py#L88-L91
karmab/kcli
fff2a2632841f54d9346b437821585df0ec659d7
kvirt/providers/sampleprovider.py
python
Kbase.create
(self, name, virttype=None, profile='', flavor=None, plan='kvirt', cpumodel='Westmere', cpuflags=[], cpupinning=[], numcpus=2, memory=512, guestid='guestrhel764', pool='default', image=None, disks=[{'size': 10}], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None, cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False, files=[], enableroot=True, alias=[], overrides={}, tags=[], storemetadata=False, sharedfolders=[], kernel=None, initrd=None, cmdline=None, cpuhotplug=False, memoryhotplug=False, numamode=None, numa=[], pcidevices=[], tpm=False, placement=[], autostart=False, rng=False, metadata={}, securitygroups=[])
return {'result': 'success'}
:param name: :param virttype: :param profile: :param flavor: :param plan: :param cpumodel: :param cpuflags: :param cpupinning: :param numcpus: :param memory: :param guestid: :param pool: :param image: :param disks: :param disksize: :param diskthin: :param diskinterface: :param nets: :param iso: :param vnc: :param cloudinit: :param reservedns: :param reservehost: :param start: :param keys: :param cmds: :param ips: :param netmasks: :param gateway: :param nested: :param dns: :param domain: :param tunnel: :param files: :param enableroot: :param alias: :param overrides: :param tags: :param cpuhotplug: :param memoryhotplug: :param numamode: :param numa: :param pcidevices: :param tpm: :return:
[]
def create(self, name, virttype=None, profile='', flavor=None, plan='kvirt', cpumodel='Westmere', cpuflags=[], cpupinning=[], numcpus=2, memory=512, guestid='guestrhel764', pool='default', image=None, disks=[{'size': 10}], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None, cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False, files=[], enableroot=True, alias=[], overrides={}, tags=[], storemetadata=False, sharedfolders=[], kernel=None, initrd=None, cmdline=None, cpuhotplug=False, memoryhotplug=False, numamode=None, numa=[], pcidevices=[], tpm=False, placement=[], autostart=False, rng=False, metadata={}, securitygroups=[]): """ :param name: :param virttype: :param profile: :param flavor: :param plan: :param cpumodel: :param cpuflags: :param cpupinning: :param numcpus: :param memory: :param guestid: :param pool: :param image: :param disks: :param disksize: :param diskthin: :param diskinterface: :param nets: :param iso: :param vnc: :param cloudinit: :param reservedns: :param reservehost: :param start: :param keys: :param cmds: :param ips: :param netmasks: :param gateway: :param nested: :param dns: :param domain: :param tunnel: :param files: :param enableroot: :param alias: :param overrides: :param tags: :param cpuhotplug: :param memoryhotplug: :param numamode: :param numa: :param pcidevices: :param tpm: :return: """ print("not implemented") return {'result': 'success'}
[ "def", "create", "(", "self", ",", "name", ",", "virttype", "=", "None", ",", "profile", "=", "''", ",", "flavor", "=", "None", ",", "plan", "=", "'kvirt'", ",", "cpumodel", "=", "'Westmere'", ",", "cpuflags", "=", "[", "]", ",", "cpupinning", "=", ...
https://github.com/karmab/kcli/blob/fff2a2632841f54d9346b437821585df0ec659d7/kvirt/providers/sampleprovider.py#L61-L122
SCUT-AILab/DCP
70a2e53ae896573b0b4323eac5817e5660315cb4
dcp/models/pruned_resnet.py
python
conv3x3
(in_planes, out_planes, stride=1)
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
3x3 convolution with padding
3x3 convolution with padding
[ "3x3", "convolution", "with", "padding" ]
def conv3x3(in_planes, out_planes, stride=1): "3x3 convolution with padding" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
[ "def", "conv3x3", "(", "in_planes", ",", "out_planes", ",", "stride", "=", "1", ")", ":", "return", "nn", ".", "Conv2d", "(", "in_planes", ",", "out_planes", ",", "kernel_size", "=", "3", ",", "stride", "=", "stride", ",", "padding", "=", "1", ",", "...
https://github.com/SCUT-AILab/DCP/blob/70a2e53ae896573b0b4323eac5817e5660315cb4/dcp/models/pruned_resnet.py#L9-L12
lebedov/scikit-cuda
5d3c74f926fe7ce67ecfc85e9623aab7bc0b344f
skcuda/magma.py
python
magma_get_spotrf_nb
(m)
return _libmagma.magma_get_spotrf_nb(m)
[]
def magma_get_spotrf_nb(m): return _libmagma.magma_get_spotrf_nb(m)
[ "def", "magma_get_spotrf_nb", "(", "m", ")", ":", "return", "_libmagma", ".", "magma_get_spotrf_nb", "(", "m", ")" ]
https://github.com/lebedov/scikit-cuda/blob/5d3c74f926fe7ce67ecfc85e9623aab7bc0b344f/skcuda/magma.py#L1054-L1055
saulpw/visidata
577f34127c09116e3cbe1fcb3f67d54484785ae7
visidata/pivot.py
python
PivotSheet.openRow
(self, row)
return vs
open sheet of source rows aggregated in current pivot row
open sheet of source rows aggregated in current pivot row
[ "open", "sheet", "of", "source", "rows", "aggregated", "in", "current", "pivot", "row" ]
def openRow(self, row): 'open sheet of source rows aggregated in current pivot row' vs = copy(self.source) vs.name += "_%s"%"+".join(map(str, row.discrete_keys)) vs.rows = sum(row.pivotrows.values(), []) return vs
[ "def", "openRow", "(", "self", ",", "row", ")", ":", "vs", "=", "copy", "(", "self", ".", "source", ")", "vs", ".", "name", "+=", "\"_%s\"", "%", "\"+\"", ".", "join", "(", "map", "(", "str", ",", "row", ".", "discrete_keys", ")", ")", "vs", "....
https://github.com/saulpw/visidata/blob/577f34127c09116e3cbe1fcb3f67d54484785ae7/visidata/pivot.py#L71-L76
devitocodes/devito
6abd441e3f5f091775ad332be6b95e017b8cbd16
devito/tools/data_structures.py
python
DAG.delete_edge
(self, ind_node, dep_node)
Delete an edge from the graph.
Delete an edge from the graph.
[ "Delete", "an", "edge", "from", "the", "graph", "." ]
def delete_edge(self, ind_node, dep_node): """Delete an edge from the graph.""" if dep_node not in self.graph.get(ind_node, []): raise KeyError('this edge does not exist in graph') self.graph[ind_node].remove(dep_node) try: del self.labels[ind_node][dep_node] except KeyError: pass
[ "def", "delete_edge", "(", "self", ",", "ind_node", ",", "dep_node", ")", ":", "if", "dep_node", "not", "in", "self", ".", "graph", ".", "get", "(", "ind_node", ",", "[", "]", ")", ":", "raise", "KeyError", "(", "'this edge does not exist in graph'", ")", ...
https://github.com/devitocodes/devito/blob/6abd441e3f5f091775ad332be6b95e017b8cbd16/devito/tools/data_structures.py#L348-L356
brainiak/brainiak
ee093597c6c11597b0a59e95b48d2118e40394a5
brainiak/utils/fmrisim.py
python
_generate_noise_temporal_drift
(trs, tr_duration, basis="cos_power_drop", period=150, )
return noise_drift
Generate the drift noise Create a trend (either sine or discrete_cos), of a given period and random phase, to represent the drift of the signal over time Parameters ---------- trs : int How many volumes (aka TRs) are there tr_duration : float How long in seconds is each volume acqusition basis : str What is the basis function for the drift. Could be made of discrete cosines (for longer run durations, more basis functions are created) that either have equal power ('discrete_cos') or the power diminishes such that 99% of the power is below a specified frequency ('cos_power_drop'). Alternatively, this drift could simply be a sine wave ('sine') period : int When the basis function is 'cos_power_drop' this is the period over which no power of the drift exceeds (i.e. the power of the drift asymptotes at this period). However for the other basis functions, this is simply how many seconds is the period of oscillation of the drift Returns ---------- noise_drift : one dimensional array, float The drift timecourse of activity
Generate the drift noise
[ "Generate", "the", "drift", "noise" ]
def _generate_noise_temporal_drift(trs, tr_duration, basis="cos_power_drop", period=150, ): """Generate the drift noise Create a trend (either sine or discrete_cos), of a given period and random phase, to represent the drift of the signal over time Parameters ---------- trs : int How many volumes (aka TRs) are there tr_duration : float How long in seconds is each volume acqusition basis : str What is the basis function for the drift. Could be made of discrete cosines (for longer run durations, more basis functions are created) that either have equal power ('discrete_cos') or the power diminishes such that 99% of the power is below a specified frequency ('cos_power_drop'). Alternatively, this drift could simply be a sine wave ('sine') period : int When the basis function is 'cos_power_drop' this is the period over which no power of the drift exceeds (i.e. the power of the drift asymptotes at this period). However for the other basis functions, this is simply how many seconds is the period of oscillation of the drift Returns ---------- noise_drift : one dimensional array, float The drift timecourse of activity """ # Calculate drift differently depending on the basis function if basis == 'discrete_cos': # Specify each tr in terms of its phase with the given period timepoints = np.linspace(0, trs - 1, trs) timepoints = ((timepoints * tr_duration) / period) * 2 * np.pi # Specify the other timing information duration = trs * tr_duration basis_funcs = int(np.floor(duration / period)) # How bases do you have if basis_funcs == 0: err_msg = 'Too few timepoints (' + str(trs) + ') to accurately ' \ 'model drift' logger.warning(err_msg) basis_funcs = 1 noise_drift = np.zeros((timepoints.shape[0], basis_funcs)) for basis_counter in list(range(1, basis_funcs + 1)): # What steps do you want to take for this basis function timepoints_basis = (timepoints/basis_counter) + (np.random.rand() * np.pi * 2) # Store the drift from this basis func noise_drift[:, basis_counter - 1] = np.cos(timepoints_basis) # Average the drift noise_drift = np.mean(noise_drift, 1) elif basis == 'sine': # Calculate the cycles of the drift for a given function. cycles = trs * tr_duration / period # Create a sine wave with a given number of cycles and random phase timepoints = np.linspace(0, trs - 1, trs) phaseshift = np.pi * 2 * np.random.random() phase = (timepoints / (trs - 1) * cycles * 2 * np.pi) + phaseshift noise_drift = np.sin(phase) elif basis == 'cos_power_drop': # Make a vector counting each TR timepoints = np.linspace(0, trs - 1, trs) * tr_duration # Specify the other timing information duration = trs * tr_duration # How bases do you have? This is to adhere to Nyquist basis_funcs = int(trs) noise_drift = np.zeros((timepoints.shape[0], basis_funcs)) for basis_counter in list(range(1, basis_funcs + 1)): # What steps do you want to take for this basis function random_phase = np.random.rand() * np.pi * 2 timepoint_phase = (timepoints / duration * np.pi * basis_counter) # In radians, what is the value for each time point timepoints_basis = timepoint_phase + random_phase # Store the drift from this basis func noise_drift[:, basis_counter - 1] = np.cos(timepoints_basis) def power_drop(r, L, F, tr_duration): # Function to return the drop rate for the power of basis functions # In other words, how much should the weight of each basis function # reduce in order to make the power you retain of the period's # frequency be 99% of the total power of the highest frequency, as # defined by the DCT. # For an example where there are 20 time points, there will be 20 # basis functions in the DCT. If the period of the signal you wish # to simulate is such that 99% of the power should drop off after # the equivalent of 5 of these basis functions, then the way this # code works is it finds the rate at which power must drop off for # all of the 20 basis functions such that by the 5th one, there is # only 1% of the power remaining. # r is the power reduction rate which should be between 0 and 1 # L is the duration of the run in seconds # F is period of the cycle in seconds It is assumed that this will # be greater than the tr_duration, or else this will not work # tr_duration is the duration of each TR in seconds # Check the TR duration if F < tr_duration: msg = 'Period %0.0f > TR duration %0.0f' % ((F, tr_duration)) raise ValueError(msg) percent_retained = 0.99 # What is the percentage of power retained # Compare the power at the period frequency (in the numerator) with # the power at the frequency of the DCT, AKA the highest possible # frequency in the data (in the denominator) numerator = 1 - r ** (2 * L / F) # Power of this period denominator = 1 - r ** (2 * L / tr_duration) # Power of DCT freq. # Calculate the retained power power_drop = abs((numerator / denominator) - percent_retained) return power_drop # Solve for power reduction rate. # This assumes that r is between 0 and 1 # Takes the duration and period as arguments sol = optimize.minimize_scalar(power_drop, bounds=(0, 1), method='Bounded', args=(duration, period, tr_duration)) # Pull out the solution r = sol.x # Weight the basis functions based on the power drop off basis_weights = r ** np.arange(basis_funcs) # Weigh the basis functions weighted_basis_funcs = np.multiply(noise_drift, basis_weights) # Average the drift noise_drift = np.mean(weighted_basis_funcs, 1) # Normalize so the sigma is 1 noise_drift = stats.zscore(noise_drift) # Return noise return noise_drift
[ "def", "_generate_noise_temporal_drift", "(", "trs", ",", "tr_duration", ",", "basis", "=", "\"cos_power_drop\"", ",", "period", "=", "150", ",", ")", ":", "# Calculate drift differently depending on the basis function", "if", "basis", "==", "'discrete_cos'", ":", "# Sp...
https://github.com/brainiak/brainiak/blob/ee093597c6c11597b0a59e95b48d2118e40394a5/brainiak/utils/fmrisim.py#L1528-L1695
funnyzhou/FPN-Pytorch
423a4499c4e826d17367762e821b51b9b1b0f2f3
lib/datasets/voc_eval.py
python
parse_rec
(filename)
return objects
Parse a PASCAL VOC xml file.
Parse a PASCAL VOC xml file.
[ "Parse", "a", "PASCAL", "VOC", "xml", "file", "." ]
def parse_rec(filename): """Parse a PASCAL VOC xml file.""" tree = ET.parse(filename) objects = [] for obj in tree.findall('object'): obj_struct = {} obj_struct['name'] = obj.find('name').text obj_struct['pose'] = obj.find('pose').text obj_struct['truncated'] = int(obj.find('truncated').text) obj_struct['difficult'] = int(obj.find('difficult').text) bbox = obj.find('bndbox') obj_struct['bbox'] = [int(bbox.find('xmin').text), int(bbox.find('ymin').text), int(bbox.find('xmax').text), int(bbox.find('ymax').text)] objects.append(obj_struct) return objects
[ "def", "parse_rec", "(", "filename", ")", ":", "tree", "=", "ET", ".", "parse", "(", "filename", ")", "objects", "=", "[", "]", "for", "obj", "in", "tree", ".", "findall", "(", "'object'", ")", ":", "obj_struct", "=", "{", "}", "obj_struct", "[", "...
https://github.com/funnyzhou/FPN-Pytorch/blob/423a4499c4e826d17367762e821b51b9b1b0f2f3/lib/datasets/voc_eval.py#L34-L51
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/full/tarfile.py
python
TarFile.bz2open
(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs)
return t
Open bzip2 compressed tar archive name for reading or writing. Appending is not allowed.
Open bzip2 compressed tar archive name for reading or writing. Appending is not allowed.
[ "Open", "bzip2", "compressed", "tar", "archive", "name", "for", "reading", "or", "writing", ".", "Appending", "is", "not", "allowed", "." ]
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): """Open bzip2 compressed tar archive name for reading or writing. Appending is not allowed. """ if mode not in ("r", "w", "x"): raise ValueError("mode must be 'r', 'w' or 'x'") try: from bz2 import BZ2File except ImportError: raise CompressionError("bz2 module is not available") from None fileobj = BZ2File(fileobj or name, mode, compresslevel=compresslevel) try: t = cls.taropen(name, mode, fileobj, **kwargs) except (OSError, EOFError) as e: fileobj.close() if mode == 'r': raise ReadError("not a bzip2 file") from e raise except: fileobj.close() raise t._extfileobj = False return t
[ "def", "bz2open", "(", "cls", ",", "name", ",", "mode", "=", "\"r\"", ",", "fileobj", "=", "None", ",", "compresslevel", "=", "9", ",", "*", "*", "kwargs", ")", ":", "if", "mode", "not", "in", "(", "\"r\"", ",", "\"w\"", ",", "\"x\"", ")", ":", ...
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/tarfile.py#L1699-L1724
PaddlePaddle/PaddleX
2bab73f81ab54e328204e7871e6ae4a82e719f5d
paddlex/ppcls/arch/backbone/model_zoo/resnext.py
python
ResNeXt101_32x4d
(pretrained=False, use_ssld=False, **kwargs)
return model
[]
def ResNeXt101_32x4d(pretrained=False, use_ssld=False, **kwargs): model = ResNeXt(layers=101, cardinality=32, **kwargs) _load_pretrained( pretrained, model, MODEL_URLS["ResNeXt101_32x4d"], use_ssld=use_ssld) return model
[ "def", "ResNeXt101_32x4d", "(", "pretrained", "=", "False", ",", "use_ssld", "=", "False", ",", "*", "*", "kwargs", ")", ":", "model", "=", "ResNeXt", "(", "layers", "=", "101", ",", "cardinality", "=", "32", ",", "*", "*", "kwargs", ")", "_load_pretra...
https://github.com/PaddlePaddle/PaddleX/blob/2bab73f81ab54e328204e7871e6ae4a82e719f5d/paddlex/ppcls/arch/backbone/model_zoo/resnext.py#L273-L277
zhang17173/Event-Extraction
daff56c9aa01b5286e9b5201d88a7cf9412de5fc
07案件相似度/utils.py
python
find_element
(l, *ss)
return "0"
查找在l的元素中中是否包含s :param l:列表 :param ss:一个或多个字符串 :return:
查找在l的元素中中是否包含s :param l:列表 :param ss:一个或多个字符串 :return:
[ "查找在l的元素中中是否包含s", ":", "param", "l", ":", "列表", ":", "param", "ss", ":", "一个或多个字符串", ":", "return", ":" ]
def find_element(l, *ss): """ 查找在l的元素中中是否包含s :param l:列表 :param ss:一个或多个字符串 :return: """ for s in ss: for element in l: if s in element: return "1" return "0"
[ "def", "find_element", "(", "l", ",", "*", "ss", ")", ":", "for", "s", "in", "ss", ":", "for", "element", "in", "l", ":", "if", "s", "in", "element", ":", "return", "\"1\"", "return", "\"0\"" ]
https://github.com/zhang17173/Event-Extraction/blob/daff56c9aa01b5286e9b5201d88a7cf9412de5fc/07案件相似度/utils.py#L17-L28
JacquesLucke/animation_nodes
b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1
animation_nodes/nodes/generic/expression.py
python
ExpressionNode.getUsedModules
(self)
return modules
[]
def getUsedModules(self): moduleNames = re.split("\W+", self.moduleNames) modules = [module for module in moduleNames if module != ""] return modules
[ "def", "getUsedModules", "(", "self", ")", ":", "moduleNames", "=", "re", ".", "split", "(", "\"\\W+\"", ",", "self", ".", "moduleNames", ")", "modules", "=", "[", "module", "for", "module", "in", "moduleNames", "if", "module", "!=", "\"\"", "]", "return...
https://github.com/JacquesLucke/animation_nodes/blob/b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1/animation_nodes/nodes/generic/expression.py#L142-L145
laspy/laspy
c9d9b9c0e8d84288134c02bf4ecec3964f5afa29
laspy/lasdata.py
python
LasData.remove_extra_dim
(self, name: str)
Remove an extra dimensions from this object .. note:: If you plan on removing multiple extra dimensions, prefer :meth:`.remove_extra_dims` as it will save re-allocations and data copy Parameters ---------- name: str, name of the extra dimension to be removed Raises ------ LaspyException: if you try to remove an extra dimension that do not exist.
Remove an extra dimensions from this object
[ "Remove", "an", "extra", "dimensions", "from", "this", "object" ]
def remove_extra_dim(self, name: str) -> None: """Remove an extra dimensions from this object .. note:: If you plan on removing multiple extra dimensions, prefer :meth:`.remove_extra_dims` as it will save re-allocations and data copy Parameters ---------- name: str, name of the extra dimension to be removed Raises ------ LaspyException: if you try to remove an extra dimension that do not exist. """ self.remove_extra_dims([name])
[ "def", "remove_extra_dim", "(", "self", ",", "name", ":", "str", ")", "->", "None", ":", "self", ".", "remove_extra_dims", "(", "[", "name", "]", ")" ]
https://github.com/laspy/laspy/blob/c9d9b9c0e8d84288134c02bf4ecec3964f5afa29/laspy/lasdata.py#L179-L201
vmware-archive/vsphere-storage-for-docker
96d2ce72457047af4ef05cb0a8794cf623803865
esx_service/utils/kvESX.py
python
load
(volpath)
Load and return dictionary from the sidecar
Load and return dictionary from the sidecar
[ "Load", "and", "return", "dictionary", "from", "the", "sidecar" ]
def load(volpath): """ Load and return dictionary from the sidecar """ vol_type = get_vol_type(volpath) if not vol_type: logging.warning("KV delete - could not determine type of volume %s", volpath) return None if vol_type == c_uint32(KV_VOL_VIRTUAL).value: meta_file = lib.DiskLib_SidecarMakeFileName(volpath.encode(), DVOL_KEY.encode()) else: meta_file = get_kv_filename(volpath) if not meta_file: return None retry_count = 0 vol_name = vmdk_utils.get_volname_from_vmdk_path(volpath) while True: try: with open(meta_file, "r") as fh: kv_str = fh.read() break except IOError as open_error: # This is a workaround to the timing/locking with metadata files issue #626 if open_error.errno == errno.EBUSY and retry_count <= vmdk_utils.VMDK_RETRY_COUNT: logging.warning("Meta file %s busy for load(), retrying...", meta_file) vmdk_utils.log_volume_lsof(vol_name) retry_count += 1 time.sleep(vmdk_utils.VMDK_RETRY_SLEEP) else: logging.exception("Failed to access %s", meta_file) return None try: return json.loads(kv_str) except ValueError: logging.exception("load:Failed to decode meta-data for %s", volpath) return None
[ "def", "load", "(", "volpath", ")", ":", "vol_type", "=", "get_vol_type", "(", "volpath", ")", "if", "not", "vol_type", ":", "logging", ".", "warning", "(", "\"KV delete - could not determine type of volume %s\"", ",", "volpath", ")", "return", "None", "if", "vo...
https://github.com/vmware-archive/vsphere-storage-for-docker/blob/96d2ce72457047af4ef05cb0a8794cf623803865/esx_service/utils/kvESX.py#L311-L348
cupy/cupy
a47ad3105f0fe817a4957de87d98ddccb8c7491f
cupyx/scipy/ndimage/fourier.py
python
fourier_uniform
(input, size, n=-1, axis=-1, output=None)
return output
Multidimensional uniform shift filter. The array is multiplied with the Fourier transform of a box of given size. Args: input (cupy.ndarray): The input array. size (float or sequence of float): The sigma of the box used for filtering. If a float, `size` is the same for all axes. If a sequence, `size` has to contain one value for each axis. n (int, optional): If `n` is negative (default), then the input is assumed to be the result of a complex fft. If `n` is larger than or equal to zero, the input is assumed to be the result of a real fft, and `n` gives the length of the array before transformation along the real transform direction. axis (int, optional): The axis of the real transform (only used when ``n > -1``). output (cupy.ndarray, optional): If given, the result of shifting the input is placed in this array. Returns: output (cupy.ndarray): The filtered output.
Multidimensional uniform shift filter.
[ "Multidimensional", "uniform", "shift", "filter", "." ]
def fourier_uniform(input, size, n=-1, axis=-1, output=None): """Multidimensional uniform shift filter. The array is multiplied with the Fourier transform of a box of given size. Args: input (cupy.ndarray): The input array. size (float or sequence of float): The sigma of the box used for filtering. If a float, `size` is the same for all axes. If a sequence, `size` has to contain one value for each axis. n (int, optional): If `n` is negative (default), then the input is assumed to be the result of a complex fft. If `n` is larger than or equal to zero, the input is assumed to be the result of a real fft, and `n` gives the length of the array before transformation along the real transform direction. axis (int, optional): The axis of the real transform (only used when ``n > -1``). output (cupy.ndarray, optional): If given, the result of shifting the input is placed in this array. Returns: output (cupy.ndarray): The filtered output. """ ndim = input.ndim output = _get_output_fourier(output, input) axis = internal._normalize_axis_index(axis, ndim) sizes = _util._fix_sequence_arg(size, ndim, 'size') output[...] = input for ax, (size, ax_size) in enumerate(zip(sizes, output.shape)): # compute the frequency grid in Hz if ax == axis and n > 0: arr = cupy.arange(ax_size, dtype=output.real.dtype) arr /= n else: arr = cupy.fft.fftfreq(ax_size) arr = arr.astype(output.real.dtype, copy=False) # compute the uniform filter weights arr *= size cupy.sinc(arr, out=arr) # reshape for broadcasting arr = _reshape_nd(arr, ndim=ndim, axis=ax) output *= arr return output
[ "def", "fourier_uniform", "(", "input", ",", "size", ",", "n", "=", "-", "1", ",", "axis", "=", "-", "1", ",", "output", "=", "None", ")", ":", "ndim", "=", "input", ".", "ndim", "output", "=", "_get_output_fourier", "(", "output", ",", "input", ")...
https://github.com/cupy/cupy/blob/a47ad3105f0fe817a4957de87d98ddccb8c7491f/cupyx/scipy/ndimage/fourier.py#L87-L134
SteveDoyle2/pyNastran
eda651ac2d4883d95a34951f8a002ff94f642a1a
pyNastran/bdf/bdf_interface/add_methods.py
python
AddMethods._add_secset_object
(self, set_obj: Union[SECSET, SECSET1])
adds an SECSET/SECSTE1 object
adds an SECSET/SECSTE1 object
[ "adds", "an", "SECSET", "/", "SECSTE1", "object" ]
def _add_secset_object(self, set_obj: Union[SECSET, SECSET1]) -> None: """adds an SECSET/SECSTE1 object""" self.model.se_csets.append(set_obj)
[ "def", "_add_secset_object", "(", "self", ",", "set_obj", ":", "Union", "[", "SECSET", ",", "SECSET1", "]", ")", "->", "None", ":", "self", ".", "model", ".", "se_csets", ".", "append", "(", "set_obj", ")" ]
https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/bdf/bdf_interface/add_methods.py#L1603-L1605
keiffster/program-y
8c99b56f8c32f01a7b9887b5daae9465619d0385
src/programy/parser/template/nodes/interval.py
python
TemplateIntervalNode.interval_from
(self, interval_from)
[]
def interval_from(self, interval_from): if isinstance(interval_from, TemplateNode): self._interval_from = interval_from else: self._interval_from = TemplateWordNode(interval_from)
[ "def", "interval_from", "(", "self", ",", "interval_from", ")", ":", "if", "isinstance", "(", "interval_from", ",", "TemplateNode", ")", ":", "self", ".", "_interval_from", "=", "interval_from", "else", ":", "self", ".", "_interval_from", "=", "TemplateWordNode"...
https://github.com/keiffster/program-y/blob/8c99b56f8c32f01a7b9887b5daae9465619d0385/src/programy/parser/template/nodes/interval.py#L51-L55
robotlearn/pyrobolearn
9cd7c060723fda7d2779fa255ac998c2c82b8436
pyrobolearn/models/gp/gp.py
python
GPR.likelihood
(self, x, y, to_numpy=False)
return self._convert(likelihood, to_numpy=to_numpy)
r"""Evaluate the likelihood p(y|f,x).
r"""Evaluate the likelihood p(y|f,x).
[ "r", "Evaluate", "the", "likelihood", "p", "(", "y|f", "x", ")", "." ]
def likelihood(self, x, y, to_numpy=False): r"""Evaluate the likelihood p(y|f,x).""" likelihood = torch.exp(self.log_likelihood(x, y, to_numpy=False)) return self._convert(likelihood, to_numpy=to_numpy)
[ "def", "likelihood", "(", "self", ",", "x", ",", "y", ",", "to_numpy", "=", "False", ")", ":", "likelihood", "=", "torch", ".", "exp", "(", "self", ".", "log_likelihood", "(", "x", ",", "y", ",", "to_numpy", "=", "False", ")", ")", "return", "self"...
https://github.com/robotlearn/pyrobolearn/blob/9cd7c060723fda7d2779fa255ac998c2c82b8436/pyrobolearn/models/gp/gp.py#L438-L441
edisonlz/fastor
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
base/site-packages/django/http/request.py
python
HttpRequest.get_host
(self)
Returns the HTTP host using the environment or request headers.
Returns the HTTP host using the environment or request headers.
[ "Returns", "the", "HTTP", "host", "using", "the", "environment", "or", "request", "headers", "." ]
def get_host(self): """Returns the HTTP host using the environment or request headers.""" # We try three options, in order of decreasing preference. if settings.USE_X_FORWARDED_HOST and ( 'HTTP_X_FORWARDED_HOST' in self.META): host = self.META['HTTP_X_FORWARDED_HOST'] elif 'HTTP_HOST' in self.META: host = self.META['HTTP_HOST'] else: # Reconstruct the host using the algorithm from PEP 333. host = self.META['SERVER_NAME'] server_port = str(self.META['SERVER_PORT']) if server_port != ('443' if self.is_secure() else '80'): host = '%s:%s' % (host, server_port) allowed_hosts = ['*'] if settings.DEBUG else settings.ALLOWED_HOSTS domain, port = split_domain_port(host) if domain and validate_host(domain, allowed_hosts): return host else: msg = "Invalid HTTP_HOST header: %r." % host if domain: msg += "You may need to add %r to ALLOWED_HOSTS." % domain raise DisallowedHost(msg)
[ "def", "get_host", "(", "self", ")", ":", "# We try three options, in order of decreasing preference.", "if", "settings", ".", "USE_X_FORWARDED_HOST", "and", "(", "'HTTP_X_FORWARDED_HOST'", "in", "self", ".", "META", ")", ":", "host", "=", "self", ".", "META", "[", ...
https://github.com/edisonlz/fastor/blob/342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3/base/site-packages/django/http/request.py#L56-L79
FSecureLABS/Jandroid
e31d0dab58a2bfd6ed8e0a387172b8bd7c893436
libs/platform-tools/platform-tools_darwin/systrace/catapult/third_party/pyserial/serial/serialutil.py
python
FileLike.__del__
(self)
Destructor. Calls close().
Destructor. Calls close().
[ "Destructor", ".", "Calls", "close", "()", "." ]
def __del__(self): """Destructor. Calls close().""" # The try/except block is in case this is called at program # exit time, when it's possible that globals have already been # deleted, and then the close() call might fail. Since # there's nothing we can do about such failures and they annoy # the end users, we suppress the traceback. try: self.close() except: pass
[ "def", "__del__", "(", "self", ")", ":", "# The try/except block is in case this is called at program", "# exit time, when it's possible that globals have already been", "# deleted, and then the close() call might fail. Since", "# there's nothing we can do about such failures and they annoy", "...
https://github.com/FSecureLABS/Jandroid/blob/e31d0dab58a2bfd6ed8e0a387172b8bd7c893436/libs/platform-tools/platform-tools_darwin/systrace/catapult/third_party/pyserial/serial/serialutil.py#L133-L143
ambakick/Person-Detection-and-Tracking
f925394ac29b5cf321f1ce89a71b193381519a0b
dataset_tools/create_pet_tf_record.py
python
create_tf_record
(output_filename, num_shards, label_map_dict, annotations_dir, image_dir, examples, faces_only=True, mask_type='png')
Creates a TFRecord file from examples. Args: output_filename: Path to where output file is saved. num_shards: Number of shards for output file. label_map_dict: The label map dictionary. annotations_dir: Directory where annotation files are stored. image_dir: Directory where image files are stored. examples: Examples to parse and save to tf record. faces_only: If True, generates bounding boxes for pet faces. Otherwise generates bounding boxes (as well as segmentations for full pet bodies). mask_type: 'numerical' or 'png'. 'png' is recommended because it leads to smaller file sizes.
Creates a TFRecord file from examples.
[ "Creates", "a", "TFRecord", "file", "from", "examples", "." ]
def create_tf_record(output_filename, num_shards, label_map_dict, annotations_dir, image_dir, examples, faces_only=True, mask_type='png'): """Creates a TFRecord file from examples. Args: output_filename: Path to where output file is saved. num_shards: Number of shards for output file. label_map_dict: The label map dictionary. annotations_dir: Directory where annotation files are stored. image_dir: Directory where image files are stored. examples: Examples to parse and save to tf record. faces_only: If True, generates bounding boxes for pet faces. Otherwise generates bounding boxes (as well as segmentations for full pet bodies). mask_type: 'numerical' or 'png'. 'png' is recommended because it leads to smaller file sizes. """ with contextlib2.ExitStack() as tf_record_close_stack: output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords( tf_record_close_stack, output_filename, num_shards) for idx, example in enumerate(examples): if idx % 100 == 0: logging.info('On image %d of %d', idx, len(examples)) xml_path = os.path.join(annotations_dir, 'xmls', example + '.xml') mask_path = os.path.join(annotations_dir, 'trimaps', example + '.png') if not os.path.exists(xml_path): logging.warning('Could not find %s, ignoring example.', xml_path) continue with tf.gfile.GFile(xml_path, 'r') as fid: xml_str = fid.read() xml = etree.fromstring(xml_str) data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation'] try: tf_example = dict_to_tf_example( data, mask_path, label_map_dict, image_dir, faces_only=faces_only, mask_type=mask_type) if tf_example: shard_idx = idx % num_shards output_tfrecords[shard_idx].write(tf_example.SerializeToString()) except ValueError: logging.warning('Invalid example: %s, ignoring.', xml_path)
[ "def", "create_tf_record", "(", "output_filename", ",", "num_shards", ",", "label_map_dict", ",", "annotations_dir", ",", "image_dir", ",", "examples", ",", "faces_only", "=", "True", ",", "mask_type", "=", "'png'", ")", ":", "with", "contextlib2", ".", "ExitSta...
https://github.com/ambakick/Person-Detection-and-Tracking/blob/f925394ac29b5cf321f1ce89a71b193381519a0b/dataset_tools/create_pet_tf_record.py#L214-L265
gem/oq-engine
1bdb88f3914e390abcbd285600bfd39477aae47c
openquake/calculators/extract.py
python
extract_disagg_by_src
(dstore, what)
return ArrayWrapper(arr[::-1], dict(json=hdf5.dumps(f)))
Extract the disagg_by_src information Example: http://127.0.0.1:8800/v1/calc/30/extract/disagg_by_src?site_id=0&imt_id=0&rlz_id=0&lvl_id=-1
Extract the disagg_by_src information Example: http://127.0.0.1:8800/v1/calc/30/extract/disagg_by_src?site_id=0&imt_id=0&rlz_id=0&lvl_id=-1
[ "Extract", "the", "disagg_by_src", "information", "Example", ":", "http", ":", "//", "127", ".", "0", ".", "0", ".", "1", ":", "8800", "/", "v1", "/", "calc", "/", "30", "/", "extract", "/", "disagg_by_src?site_id", "=", "0&imt_id", "=", "0&rlz_id", "=...
def extract_disagg_by_src(dstore, what): """ Extract the disagg_by_src information Example: http://127.0.0.1:8800/v1/calc/30/extract/disagg_by_src?site_id=0&imt_id=0&rlz_id=0&lvl_id=-1 """ qdict = parse(what) dic = hdf5.get_shape_descr(dstore['disagg_by_src'].attrs['json']) src_id = dic['src_id'] f = norm(qdict, 'site_id rlz_id lvl_id imt_id'.split()) poe = dstore['disagg_by_src'][ f['site_id'], f['rlz_id'], f['imt_id'], f['lvl_id']] arr = numpy.zeros(len(src_id), [('src_id', '<S16'), ('poe', '<f8')]) arr['src_id'] = src_id arr['poe'] = poe arr.sort(order='poe') return ArrayWrapper(arr[::-1], dict(json=hdf5.dumps(f)))
[ "def", "extract_disagg_by_src", "(", "dstore", ",", "what", ")", ":", "qdict", "=", "parse", "(", "what", ")", "dic", "=", "hdf5", ".", "get_shape_descr", "(", "dstore", "[", "'disagg_by_src'", "]", ".", "attrs", "[", "'json'", "]", ")", "src_id", "=", ...
https://github.com/gem/oq-engine/blob/1bdb88f3914e390abcbd285600bfd39477aae47c/openquake/calculators/extract.py#L1174-L1189
tomplus/kubernetes_asyncio
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
kubernetes_asyncio/client/api/networking_v1_api.py
python
NetworkingV1Api.delete_collection_ingress_class_with_http_info
(self, **kwargs)
return self.api_client.call_api( '/apis/networking.k8s.io/v1/ingressclasses', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
delete_collection_ingress_class # noqa: E501 delete collection of IngressClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_ingress_class_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param V1DeleteOptions body: :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread.
delete_collection_ingress_class # noqa: E501
[ "delete_collection_ingress_class", "#", "noqa", ":", "E501" ]
def delete_collection_ingress_class_with_http_info(self, **kwargs): # noqa: E501 """delete_collection_ingress_class # noqa: E501 delete collection of IngressClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_ingress_class_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param V1DeleteOptions body: :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'pretty', '_continue', 'dry_run', 'field_selector', 'grace_period_seconds', 'label_selector', 'limit', 'orphan_dependents', 'propagation_policy', 'resource_version', 'resource_version_match', 'timeout_seconds', 'body' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_ingress_class" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501 query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501 if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501 query_params.append(('continue', local_var_params['_continue'])) # noqa: E501 if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501 query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501 if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501 query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501 if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501 query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501 if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501 query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501 if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501 query_params.append(('limit', local_var_params['limit'])) # noqa: E501 if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501 query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501 if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501 query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501 if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501 query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501 if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501 query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501 if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501 query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/apis/networking.k8s.io/v1/ingressclasses', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
[ "def", "delete_collection_ingress_class_with_http_info", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "local_var_params", "=", "locals", "(", ")", "all_params", "=", "[", "'pretty'", ",", "'_continue'", ",", "'dry_run'", ",", "'field_selector'", ...
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/api/networking_v1_api.py#L481-L612
happinesslz/TANet
2d4b2ab99b8e57c03671b0f1531eab7dca8f3c1f
second.pytorch_with_TANet/torchplus/train/checkpoint.py
python
latest_checkpoint
(model_dir, model_name)
return str(ckpt_file_name)
return path of latest checkpoint in a model_dir Args: model_dir: string, indicate your model dir(save ckpts, summarys, logs, etc). model_name: name of your model. we find ckpts by name Returns: path: None if isn't exist or latest checkpoint path.
return path of latest checkpoint in a model_dir Args: model_dir: string, indicate your model dir(save ckpts, summarys, logs, etc). model_name: name of your model. we find ckpts by name Returns: path: None if isn't exist or latest checkpoint path.
[ "return", "path", "of", "latest", "checkpoint", "in", "a", "model_dir", "Args", ":", "model_dir", ":", "string", "indicate", "your", "model", "dir", "(", "save", "ckpts", "summarys", "logs", "etc", ")", ".", "model_name", ":", "name", "of", "your", "model"...
def latest_checkpoint(model_dir, model_name): """return path of latest checkpoint in a model_dir Args: model_dir: string, indicate your model dir(save ckpts, summarys, logs, etc). model_name: name of your model. we find ckpts by name Returns: path: None if isn't exist or latest checkpoint path. """ ckpt_info_path = Path(model_dir) / "checkpoints.json" if not ckpt_info_path.is_file(): return None with open(ckpt_info_path, 'r') as f: ckpt_dict = json.loads(f.read()) if model_name not in ckpt_dict['latest_ckpt']: return None latest_ckpt = ckpt_dict['latest_ckpt'][model_name] ckpt_file_name = Path(model_dir) / latest_ckpt if not ckpt_file_name.is_file(): return None return str(ckpt_file_name)
[ "def", "latest_checkpoint", "(", "model_dir", ",", "model_name", ")", ":", "ckpt_info_path", "=", "Path", "(", "model_dir", ")", "/", "\"checkpoints.json\"", "if", "not", "ckpt_info_path", ".", "is_file", "(", ")", ":", "return", "None", "with", "open", "(", ...
https://github.com/happinesslz/TANet/blob/2d4b2ab99b8e57c03671b0f1531eab7dca8f3c1f/second.pytorch_with_TANet/torchplus/train/checkpoint.py#L25-L46
mlcommons/training
4a4d5a0b7efe99c680306b1940749211d4238a84
language_model/tensorflow/bert/modeling.py
python
BertModel.get_sequence_output
(self)
return self.sequence_output
Gets final hidden layer of encoder. Returns: float Tensor of shape [batch_size, seq_length, hidden_size] corresponding to the final hidden of the transformer encoder.
Gets final hidden layer of encoder.
[ "Gets", "final", "hidden", "layer", "of", "encoder", "." ]
def get_sequence_output(self): """Gets final hidden layer of encoder. Returns: float Tensor of shape [batch_size, seq_length, hidden_size] corresponding to the final hidden of the transformer encoder. """ return self.sequence_output
[ "def", "get_sequence_output", "(", "self", ")", ":", "return", "self", ".", "sequence_output" ]
https://github.com/mlcommons/training/blob/4a4d5a0b7efe99c680306b1940749211d4238a84/language_model/tensorflow/bert/modeling.py#L224-L231
Srinivas11789/PcapXray
dc2bd65e94b2031b9203ed68e549110f46f9cd9c
Source/Module/tor_traffic_handle.py
python
torTrafficHandle.get_consensus_data
(self)
[]
def get_consensus_data(self): try: for desc in remote.get_consensus().run(): memory.tor_nodes.append((desc.address, desc.or_port)) except Exception as exc: print("Unable to retrieve the consensus: %s" % exc)
[ "def", "get_consensus_data", "(", "self", ")", ":", "try", ":", "for", "desc", "in", "remote", ".", "get_consensus", "(", ")", ".", "run", "(", ")", ":", "memory", ".", "tor_nodes", ".", "append", "(", "(", "desc", ".", "address", ",", "desc", ".", ...
https://github.com/Srinivas11789/PcapXray/blob/dc2bd65e94b2031b9203ed68e549110f46f9cd9c/Source/Module/tor_traffic_handle.py#L18-L23
msr-fiddle/pipedream
7db6a1c3e64996d5b319faec6ca38cb31bfea1c4
profiler/translation/seq2seq/models/decoder.py
python
RecurrentAttention.__init__
(self, input_size, context_size, hidden_size, num_layers=1, bias=True, batch_first=False, dropout=0)
Constructor for the RecurrentAttention. :param input_size: number of features in input tensor :param context_size: number of features in output from encoder :param hidden_size: internal hidden size :param num_layers: number of layers in LSTM :param bias: enables bias in LSTM layers :param batch_first: if True the model uses (batch,seq,feature) tensors, if false the model uses (seq, batch, feature) :param dropout: probability of dropout
Constructor for the RecurrentAttention.
[ "Constructor", "for", "the", "RecurrentAttention", "." ]
def __init__(self, input_size, context_size, hidden_size, num_layers=1, bias=True, batch_first=False, dropout=0): """ Constructor for the RecurrentAttention. :param input_size: number of features in input tensor :param context_size: number of features in output from encoder :param hidden_size: internal hidden size :param num_layers: number of layers in LSTM :param bias: enables bias in LSTM layers :param batch_first: if True the model uses (batch,seq,feature) tensors, if false the model uses (seq, batch, feature) :param dropout: probability of dropout """ super(RecurrentAttention, self).__init__() self.rnn = nn.LSTM(input_size, hidden_size, num_layers, bias, batch_first) self.attn = BahdanauAttention(hidden_size, context_size, context_size, normalize=True, batch_first=batch_first) self.dropout = nn.Dropout(dropout)
[ "def", "__init__", "(", "self", ",", "input_size", ",", "context_size", ",", "hidden_size", ",", "num_layers", "=", "1", ",", "bias", "=", "True", ",", "batch_first", "=", "False", ",", "dropout", "=", "0", ")", ":", "super", "(", "RecurrentAttention", "...
https://github.com/msr-fiddle/pipedream/blob/7db6a1c3e64996d5b319faec6ca38cb31bfea1c4/profiler/translation/seq2seq/models/decoder.py#L19-L42
dsnopek/anki-sync-server
c80aff695dc1d409cfd815d47a86636b44e3f191
AnkiServer/apps/sync_app.py
python
SimpleUserManager.authenticate
(self, username, password)
return True
Returns True if this username is allowed to connect with this password. False otherwise. Override this to change how users are authenticated.
Returns True if this username is allowed to connect with this password. False otherwise. Override this to change how users are authenticated.
[ "Returns", "True", "if", "this", "username", "is", "allowed", "to", "connect", "with", "this", "password", ".", "False", "otherwise", ".", "Override", "this", "to", "change", "how", "users", "are", "authenticated", "." ]
def authenticate(self, username, password): """ Returns True if this username is allowed to connect with this password. False otherwise. Override this to change how users are authenticated. """ return True
[ "def", "authenticate", "(", "self", ",", "username", ",", "password", ")", ":", "return", "True" ]
https://github.com/dsnopek/anki-sync-server/blob/c80aff695dc1d409cfd815d47a86636b44e3f191/AnkiServer/apps/sync_app.py#L344-L350
calthoff/self_taught
1c295df3b02eadb26b6b4c81e48461e268f93d3e
python_ex285.py
python
mad_libs
(mls)
:param mls: String with parts the user should fill out surrounded by double underscores. Underscores cannot be inside hint e.g., no __hint_hint__ only __hint__.
:param mls: String with parts the user should fill out surrounded by double underscores. Underscores cannot be inside hint e.g., no __hint_hint__ only __hint__.
[ ":", "param", "mls", ":", "String", "with", "parts", "the", "user", "should", "fill", "out", "surrounded", "by", "double", "underscores", ".", "Underscores", "cannot", "be", "inside", "hint", "e", ".", "g", ".", "no", "__hint_hint__", "only", "__hint__", "...
def mad_libs(mls): """ :param mls: String with parts the user should fill out surrounded by double underscores. Underscores cannot be inside hint e.g., no __hint_hint__ only __hint__. """ hints = re.findall("__.*?__", mls) if hints is not None: for word in hints: q = "Enter a {}"\ .format(word) new = input(q) mls = mls.replace(word, new, 1) print('\n') mls = mls.replace("\n", "") print(mls) else: print("invalid mls")
[ "def", "mad_libs", "(", "mls", ")", ":", "hints", "=", "re", ".", "findall", "(", "\"__.*?__\"", ",", "mls", ")", "if", "hints", "is", "not", "None", ":", "for", "word", "in", "hints", ":", "q", "=", "\"Enter a {}\"", ".", "format", "(", "word", ")...
https://github.com/calthoff/self_taught/blob/1c295df3b02eadb26b6b4c81e48461e268f93d3e/python_ex285.py#L19-L44
albertz/PyCParser
080a7b0472444fd366006c44fc15e388122989bd
sortedcontainers/sorteddict.py
python
ItemsView.__or__
(self, that)
return SortedSet(self._view | that)
Return a SortedSet of the union of self and *that*.
Return a SortedSet of the union of self and *that*.
[ "Return", "a", "SortedSet", "of", "the", "union", "of", "self", "and", "*", "that", "*", "." ]
def __or__(self, that): """Return a SortedSet of the union of self and *that*.""" return SortedSet(self._view | that)
[ "def", "__or__", "(", "self", ",", "that", ")", ":", "return", "SortedSet", "(", "self", ".", "_view", "|", "that", ")" ]
https://github.com/albertz/PyCParser/blob/080a7b0472444fd366006c44fc15e388122989bd/sortedcontainers/sorteddict.py#L723-L725
joke2k/faker
0ebe46fc9b9793fe315cf0fce430258ce74df6f8
faker/providers/ssn/et_EE/__init__.py
python
Provider.ssn
(self, min_age: int = 16, max_age: int = 90)
return ik + str(checksum([int(ch) for ch in ik]))
Returns 11 character Estonian personal identity code (isikukood, IK). Age of person is between 16 and 90 years, based on local computer date. This function assigns random sex to person. An Estonian Personal identification code consists of 11 digits, generally given without any whitespace or other delimiters. The form is GYYMMDDSSSC, where G shows sex and century of birth (odd number male, even number female, 1-2 19th century, 3-4 20th century, 5-6 21st century), SSS is a serial number separating persons born on the same date and C a checksum. https://en.wikipedia.org/wiki/National_identification_number#Estonia
Returns 11 character Estonian personal identity code (isikukood, IK).
[ "Returns", "11", "character", "Estonian", "personal", "identity", "code", "(", "isikukood", "IK", ")", "." ]
def ssn(self, min_age: int = 16, max_age: int = 90) -> str: """ Returns 11 character Estonian personal identity code (isikukood, IK). Age of person is between 16 and 90 years, based on local computer date. This function assigns random sex to person. An Estonian Personal identification code consists of 11 digits, generally given without any whitespace or other delimiters. The form is GYYMMDDSSSC, where G shows sex and century of birth (odd number male, even number female, 1-2 19th century, 3-4 20th century, 5-6 21st century), SSS is a serial number separating persons born on the same date and C a checksum. https://en.wikipedia.org/wiki/National_identification_number#Estonia """ age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365)) birthday = datetime.date.today() - age if birthday.year < 2000: ik = self.generator.random.choice(("3", "4")) elif birthday.year < 2100: ik = self.generator.random.choice(("5", "6")) else: ik = self.generator.random.choice(("7", "8")) ik += "%02d%02d%02d" % ((birthday.year % 100), birthday.month, birthday.day) ik += str(self.generator.random.randrange(0, 999)).zfill(3) return ik + str(checksum([int(ch) for ch in ik]))
[ "def", "ssn", "(", "self", ",", "min_age", ":", "int", "=", "16", ",", "max_age", ":", "int", "=", "90", ")", "->", "str", ":", "age", "=", "datetime", ".", "timedelta", "(", "days", "=", "self", ".", "generator", ".", "random", ".", "randrange", ...
https://github.com/joke2k/faker/blob/0ebe46fc9b9793fe315cf0fce430258ce74df6f8/faker/providers/ssn/et_EE/__init__.py#L35-L61
earthgecko/skyline
12754424de72593e29eb21009fb1ae3f07f3abff
skyline/mirage/mirage_algorithms.py
python
first_hour_average
(timeseries, second_order_resolution_seconds)
Calcuate the simple average over one hour, second order resolution seconds ago. A timeseries is anomalous if the average of the last three datapoints are outside of three standard deviations of this value.
Calcuate the simple average over one hour, second order resolution seconds ago. A timeseries is anomalous if the average of the last three datapoints are outside of three standard deviations of this value.
[ "Calcuate", "the", "simple", "average", "over", "one", "hour", "second", "order", "resolution", "seconds", "ago", ".", "A", "timeseries", "is", "anomalous", "if", "the", "average", "of", "the", "last", "three", "datapoints", "are", "outside", "of", "three", ...
def first_hour_average(timeseries, second_order_resolution_seconds): """ Calcuate the simple average over one hour, second order resolution seconds ago. A timeseries is anomalous if the average of the last three datapoints are outside of three standard deviations of this value. """ try: last_hour_threshold = time() - (second_order_resolution_seconds - 3600) # @modified 20211127 - Feature #4328: BATCH_METRICS_CUSTOM_FULL_DURATIONS # Calculate the "equivalent" of hour and handle daily frequency data # Handle daily data resolution = (timeseries[-1][0] - timeseries[-2][0]) if resolution > 80000 and resolution < 90000: last_hour_threshold = timeseries[-1][0] - ((resolution * 7) - resolution) series = pandas.Series([x[1] for x in timeseries if x[0] < last_hour_threshold]) mean = (series).mean() stdDev = (series).std() t = tail_avg(timeseries, second_order_resolution_seconds) return abs(t - mean) > 3 * stdDev except: traceback_format_exc_string = traceback.format_exc() algorithm_name = str(get_function_name()) record_algorithm_error(algorithm_name, traceback_format_exc_string) return None
[ "def", "first_hour_average", "(", "timeseries", ",", "second_order_resolution_seconds", ")", ":", "try", ":", "last_hour_threshold", "=", "time", "(", ")", "-", "(", "second_order_resolution_seconds", "-", "3600", ")", "# @modified 20211127 - Feature #4328: BATCH_METRICS_CU...
https://github.com/earthgecko/skyline/blob/12754424de72593e29eb21009fb1ae3f07f3abff/skyline/mirage/mirage_algorithms.py#L185-L210
Source-Python-Dev-Team/Source.Python
d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb
addons/source-python/Python3/pprint.py
python
isreadable
(object)
return _safe_repr(object, {}, None, 0)[1]
Determine if saferepr(object) is readable by eval().
Determine if saferepr(object) is readable by eval().
[ "Determine", "if", "saferepr", "(", "object", ")", "is", "readable", "by", "eval", "()", "." ]
def isreadable(object): """Determine if saferepr(object) is readable by eval().""" return _safe_repr(object, {}, None, 0)[1]
[ "def", "isreadable", "(", "object", ")", ":", "return", "_safe_repr", "(", "object", ",", "{", "}", ",", "None", ",", "0", ")", "[", "1", "]" ]
https://github.com/Source-Python-Dev-Team/Source.Python/blob/d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb/addons/source-python/Python3/pprint.py#L64-L66
researchmm/tasn
5dba8ccc096cedc63913730eeea14a9647911129
tasn-mxnet/3rdparty/tvm/nnvm/python/nnvm/frontend/mxnet.py
python
from_mxnet
(symbol, arg_params=None, aux_params=None)
return sym, params
Convert from MXNet's model into compatible NNVM format. Parameters ---------- symbol : mxnet.Symbol or mxnet.gluon.HybridBlock MXNet symbol arg_params : dict of str to mx.NDArray The argument parameters in mxnet aux_params : dict of str to mx.NDArray The auxiliary parameters in mxnet Returns ------- sym : nnvm.Symbol Compatible nnvm symbol params : dict of str to tvm.NDArray The parameter dict to be used by nnvm
Convert from MXNet's model into compatible NNVM format.
[ "Convert", "from", "MXNet", "s", "model", "into", "compatible", "NNVM", "format", "." ]
def from_mxnet(symbol, arg_params=None, aux_params=None): """Convert from MXNet's model into compatible NNVM format. Parameters ---------- symbol : mxnet.Symbol or mxnet.gluon.HybridBlock MXNet symbol arg_params : dict of str to mx.NDArray The argument parameters in mxnet aux_params : dict of str to mx.NDArray The auxiliary parameters in mxnet Returns ------- sym : nnvm.Symbol Compatible nnvm symbol params : dict of str to tvm.NDArray The parameter dict to be used by nnvm """ try: import mxnet as mx except ImportError as e: raise ImportError('{}. MXNet is required to parse symbols.'.format(e)) if isinstance(symbol, mx.sym.Symbol): sym = _from_mxnet_impl(symbol, {}) params = {} arg_params = arg_params if arg_params else {} aux_params = aux_params if aux_params else {} for k, v in arg_params.items(): params[k] = tvm.nd.array(v.asnumpy()) for k, v in aux_params.items(): params[k] = tvm.nd.array(v.asnumpy()) elif isinstance(symbol, mx.gluon.HybridBlock): data = mx.sym.Variable('data') sym = symbol(data) sym = _from_mxnet_impl(sym, {}) params = {} for k, v in symbol.collect_params().items(): params[k] = tvm.nd.array(v.data().asnumpy()) elif isinstance(symbol, mx.gluon.Block): raise NotImplementedError("Only Hybrid Blocks are supported now.") else: msg = "mxnet.Symbol or gluon.HybridBlock expected, got {}".format(type(symbol)) raise ValueError(msg) if isinstance(sym, list): sym = _sym.Group(sym) return sym, params
[ "def", "from_mxnet", "(", "symbol", ",", "arg_params", "=", "None", ",", "aux_params", "=", "None", ")", ":", "try", ":", "import", "mxnet", "as", "mx", "except", "ImportError", "as", "e", ":", "raise", "ImportError", "(", "'{}. MXNet is required to parse symb...
https://github.com/researchmm/tasn/blob/5dba8ccc096cedc63913730eeea14a9647911129/tasn-mxnet/3rdparty/tvm/nnvm/python/nnvm/frontend/mxnet.py#L413-L463
arskom/spyne
88b8e278335f03c7e615b913d6dabc2b8141730e
spyne/auxproc/_base.py
python
AuxProcBase.__init__
(self, process_exceptions=False)
Abstract Base class shared by all AuxProcs. :param process_exceptions: If false, does not execute auxiliary methods when the main method throws an exception.
Abstract Base class shared by all AuxProcs.
[ "Abstract", "Base", "class", "shared", "by", "all", "AuxProcs", "." ]
def __init__(self, process_exceptions=False): """Abstract Base class shared by all AuxProcs. :param process_exceptions: If false, does not execute auxiliary methods when the main method throws an exception. """ self.methods = [] self.process_exceptions = process_exceptions
[ "def", "__init__", "(", "self", ",", "process_exceptions", "=", "False", ")", ":", "self", ".", "methods", "=", "[", "]", "self", ".", "process_exceptions", "=", "process_exceptions" ]
https://github.com/arskom/spyne/blob/88b8e278335f03c7e615b913d6dabc2b8141730e/spyne/auxproc/_base.py#L42-L50
edwardlib/observations
2c8b1ac31025938cb17762e540f2f592e302d5de
observations/r/mofa.py
python
mofa
(path)
return x_train, metadata
International Expansion of U.S. Mofa's (majority–owned Foreign Affiliates) Expansion of U.S. Mofa's (majority–owned Foreign Affiliates) in Fire (finance, Insurance and Real Estate) a cross-section from 1982 *number of observations* : 50 *observation* : country *country* : United States A dataframe containing : capexp capital expenditures made by the MOFA's of nonbank U.S. corporations in finance, insurance and real estate. Source: "U.S. Direct Investment Abroad: 1982 Benchmark Survey data." Table III.C 6. gdp gross domestic product. Source: "World Bank, World Development Report 1984." Table 3. (This variable is scaled by a factor of 1/100,000) sales sales made by the majority owned foreign affiliates of nonbank U.S. parents in finance, insurance and real estate. Source: "U.S. Direct Investment Abroad: 1982 Benchmark Survey Data." Table III.D 3. (This variable is scaled by a factor of 1/100) nbaf the number of U.S. affiliates in the host country. Source: "U.S. Direct Investment Abroad: 1982 Benchmark Survey Data." Table 5. (This variable is scaled by a factor of 1/100) netinc net income earned by MOFA's of nonbank U.S. corporations operating in the nonbanking financial sector of the host country. Source: "U.S. Direct Investment Abroad: 1982 Benchmark Survey Data." Table III.D 6.(This variable is scaled by a factor of 1/10) Ioannatos, Petros E. (1995) “Censored regression estimation under unobserved heterogeneity : a stochastic parameter approach”, *Journal of Business and Economics Statistics*, **13(3)**, july, 327–335. Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `mofa.csv`. Returns: Tuple of np.ndarray `x_train` with 50 rows and 5 columns and dictionary `metadata` of column headers (feature names).
International Expansion of U.S. Mofa's (majority–owned Foreign Affiliates) Expansion of U.S. Mofa's (majority–owned Foreign Affiliates) in Fire (finance, Insurance and Real Estate)
[ "International", "Expansion", "of", "U", ".", "S", ".", "Mofa", "s", "(", "majority–owned", "Foreign", "Affiliates", ")", "Expansion", "of", "U", ".", "S", ".", "Mofa", "s", "(", "majority–owned", "Foreign", "Affiliates", ")", "in", "Fire", "(", "finance",...
def mofa(path): """International Expansion of U.S. Mofa's (majority–owned Foreign Affiliates) Expansion of U.S. Mofa's (majority–owned Foreign Affiliates) in Fire (finance, Insurance and Real Estate) a cross-section from 1982 *number of observations* : 50 *observation* : country *country* : United States A dataframe containing : capexp capital expenditures made by the MOFA's of nonbank U.S. corporations in finance, insurance and real estate. Source: "U.S. Direct Investment Abroad: 1982 Benchmark Survey data." Table III.C 6. gdp gross domestic product. Source: "World Bank, World Development Report 1984." Table 3. (This variable is scaled by a factor of 1/100,000) sales sales made by the majority owned foreign affiliates of nonbank U.S. parents in finance, insurance and real estate. Source: "U.S. Direct Investment Abroad: 1982 Benchmark Survey Data." Table III.D 3. (This variable is scaled by a factor of 1/100) nbaf the number of U.S. affiliates in the host country. Source: "U.S. Direct Investment Abroad: 1982 Benchmark Survey Data." Table 5. (This variable is scaled by a factor of 1/100) netinc net income earned by MOFA's of nonbank U.S. corporations operating in the nonbanking financial sector of the host country. Source: "U.S. Direct Investment Abroad: 1982 Benchmark Survey Data." Table III.D 6.(This variable is scaled by a factor of 1/10) Ioannatos, Petros E. (1995) “Censored regression estimation under unobserved heterogeneity : a stochastic parameter approach”, *Journal of Business and Economics Statistics*, **13(3)**, july, 327–335. Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `mofa.csv`. Returns: Tuple of np.ndarray `x_train` with 50 rows and 5 columns and dictionary `metadata` of column headers (feature names). """ import pandas as pd path = os.path.expanduser(path) filename = 'mofa.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/Ecdat/Mofa.csv' maybe_download_and_extract(path, url, save_file_name='mofa.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return x_train, metadata
[ "def", "mofa", "(", "path", ")", ":", "import", "pandas", "as", "pd", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "filename", "=", "'mofa.csv'", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", ...
https://github.com/edwardlib/observations/blob/2c8b1ac31025938cb17762e540f2f592e302d5de/observations/r/mofa.py#L14-L85
fusionbox/django-authtools
6ea614ed2bba56cd8fa7209896b0e20cba45b367
authtools/forms.py
python
FriendlyPasswordResetForm.clean_email
(self)
return email
Return an error message if the email address being reset is unknown. This is to revert https://code.djangoproject.com/ticket/19758 The bug #19758 tries not to leak emails through password reset because only usernames are unique in Django's default user model. django-authtools leaks email addresses through the registration form. In the case of django-authtools not warning the user doesn't add any security, and worsen user experience.
Return an error message if the email address being reset is unknown.
[ "Return", "an", "error", "message", "if", "the", "email", "address", "being", "reset", "is", "unknown", "." ]
def clean_email(self): """Return an error message if the email address being reset is unknown. This is to revert https://code.djangoproject.com/ticket/19758 The bug #19758 tries not to leak emails through password reset because only usernames are unique in Django's default user model. django-authtools leaks email addresses through the registration form. In the case of django-authtools not warning the user doesn't add any security, and worsen user experience. """ email = self.cleaned_data['email'] results = list(self.get_users(email)) if not results: raise forms.ValidationError(self.error_messages['unknown']) return email
[ "def", "clean_email", "(", "self", ")", ":", "email", "=", "self", ".", "cleaned_data", "[", "'email'", "]", "results", "=", "list", "(", "self", ".", "get_users", "(", "email", ")", ")", "if", "not", "results", ":", "raise", "forms", ".", "ValidationE...
https://github.com/fusionbox/django-authtools/blob/6ea614ed2bba56cd8fa7209896b0e20cba45b367/authtools/forms.py#L155-L172
nanoporetech/medaka
2b83074fe3b6a6ec971614bfc6804f543fe1e5f0
medaka/__init__.py
python
check_htslib_tool_version
(tool, pos=2)
return version
Get version of an htslib program. :param tool: program name. :param pos: the position index of the item containing the version information. e.g. `pos=2` extracts `1.3.1` from `tabix (htslib) 1.3.1`. :returns: the `LooseVersion` number.
Get version of an htslib program.
[ "Get", "version", "of", "an", "htslib", "program", "." ]
def check_htslib_tool_version(tool, pos=2): """Get version of an htslib program. :param tool: program name. :param pos: the position index of the item containing the version information. e.g. `pos=2` extracts `1.3.1` from `tabix (htslib) 1.3.1`. :returns: the `LooseVersion` number. """ try: proc = subprocess.run([tool, "--version"], stdout=subprocess.PIPE) if proc.returncode != 0: return None # tabix (htslib) 1.3.1\n... first_line = proc.stdout.decode().split("\n", 1)[0] version = first_line.split()[pos] version = LooseVersion(version) except Exception: return None return version
[ "def", "check_htslib_tool_version", "(", "tool", ",", "pos", "=", "2", ")", ":", "try", ":", "proc", "=", "subprocess", ".", "run", "(", "[", "tool", ",", "\"--version\"", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "if", "proc", ".", "r...
https://github.com/nanoporetech/medaka/blob/2b83074fe3b6a6ec971614bfc6804f543fe1e5f0/medaka/__init__.py#L25-L46
blackye/lalascan
e35726e6648525eb47493e39ee63a2a906dbb4b2
thirdparty_libs/tldextract/tldextract.py
python
ExtractResult._make
(cls, iterable, new=tuple.__new__, len=len)
return result
Make a new ExtractResult object from a sequence or iterable
Make a new ExtractResult object from a sequence or iterable
[ "Make", "a", "new", "ExtractResult", "object", "from", "a", "sequence", "or", "iterable" ]
def _make(cls, iterable, new=tuple.__new__, len=len): 'Make a new ExtractResult object from a sequence or iterable' result = new(cls, iterable) if len(result) != 3: raise TypeError('Expected 3 arguments, got %d' % len(result)) return result
[ "def", "_make", "(", "cls", ",", "iterable", ",", "new", "=", "tuple", ".", "__new__", ",", "len", "=", "len", ")", ":", "result", "=", "new", "(", "cls", ",", "iterable", ")", "if", "len", "(", "result", ")", "!=", "3", ":", "raise", "TypeError"...
https://github.com/blackye/lalascan/blob/e35726e6648525eb47493e39ee63a2a906dbb4b2/thirdparty_libs/tldextract/tldextract.py#L77-L82
dtmilano/AndroidViewClient
421b86e3f1a57683557fc0173951cd0332ab43f4
src/com/dtmilano/android/viewclient.py
python
UiAutomator2AndroidViewClient.EndElement
(self, name)
Expat end element event handler
Expat end element event handler
[ "Expat", "end", "element", "event", "handler" ]
def EndElement(self, name): ''' Expat end element event handler ''' if name == 'hierarchy': pass elif name == 'node': self.nodeStack.pop()
[ "def", "EndElement", "(", "self", ",", "name", ")", ":", "if", "name", "==", "'hierarchy'", ":", "pass", "elif", "name", "==", "'node'", ":", "self", ".", "nodeStack", ".", "pop", "(", ")" ]
https://github.com/dtmilano/AndroidViewClient/blob/421b86e3f1a57683557fc0173951cd0332ab43f4/src/com/dtmilano/android/viewclient.py#L2369-L2377
bugy/script-server
9a57ce15903c81bcb537b872f1330ee55ba31563
src/execution/process_base.py
python
ProcessWrapper.cleanup
(self)
[]
def cleanup(self): self.output_stream.dispose()
[ "def", "cleanup", "(", "self", ")", ":", "self", ".", "output_stream", ".", "dispose", "(", ")" ]
https://github.com/bugy/script-server/blob/9a57ce15903c81bcb537b872f1330ee55ba31563/src/execution/process_base.py#L117-L118
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/email/contentmanager.py
python
ContentManager.add_get_handler
(self, key, handler)
[]
def add_get_handler(self, key, handler): self.get_handlers[key] = handler
[ "def", "add_get_handler", "(", "self", ",", "key", ",", "handler", ")", ":", "self", ".", "get_handlers", "[", "key", "]", "=", "handler" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/email/contentmanager.py#L13-L14
kamalgill/flask-appengine-template
11760f83faccbb0d0afe416fc58e67ecfb4643c2
src/lib/werkzeug/wsgi.py
python
SharedDataMiddleware.__call__
(self, environ, start_response)
return wrap_file(environ, f)
[]
def __call__(self, environ, start_response): cleaned_path = get_path_info(environ) if PY2: cleaned_path = cleaned_path.encode(get_filesystem_encoding()) # sanitize the path for non unix systems cleaned_path = cleaned_path.strip('/') for sep in os.sep, os.altsep: if sep and sep != '/': cleaned_path = cleaned_path.replace(sep, '/') path = '/' + '/'.join(x for x in cleaned_path.split('/') if x and x != '..') file_loader = None for search_path, loader in iteritems(self.exports): if search_path == path: real_filename, file_loader = loader(None) if file_loader is not None: break if not search_path.endswith('/'): search_path += '/' if path.startswith(search_path): real_filename, file_loader = loader(path[len(search_path):]) if file_loader is not None: break if file_loader is None or not self.is_allowed(real_filename): return self.app(environ, start_response) guessed_type = mimetypes.guess_type(real_filename) mime_type = guessed_type[0] or self.fallback_mimetype f, mtime, file_size = file_loader() headers = [('Date', http_date())] if self.cache: timeout = self.cache_timeout etag = self.generate_etag(mtime, file_size, real_filename) headers += [ ('Etag', '"%s"' % etag), ('Cache-Control', 'max-age=%d, public' % timeout) ] if not is_resource_modified(environ, etag, last_modified=mtime): f.close() start_response('304 Not Modified', headers) return [] headers.append(('Expires', http_date(time() + timeout))) else: headers.append(('Cache-Control', 'public')) headers.extend(( ('Content-Type', mime_type), ('Content-Length', str(file_size)), ('Last-Modified', http_date(mtime)) )) start_response('200 OK', headers) return wrap_file(environ, f)
[ "def", "__call__", "(", "self", ",", "environ", ",", "start_response", ")", ":", "cleaned_path", "=", "get_path_info", "(", "environ", ")", "if", "PY2", ":", "cleaned_path", "=", "cleaned_path", ".", "encode", "(", "get_filesystem_encoding", "(", ")", ")", "...
https://github.com/kamalgill/flask-appengine-template/blob/11760f83faccbb0d0afe416fc58e67ecfb4643c2/src/lib/werkzeug/wsgi.py#L576-L628
rubys/venus
9de21094a8cf565bdfcf75688e121a5ad1f5397b
planet/vendor/compat_logging/__init__.py
python
Logger.isEnabledFor
(self, level)
return level >= self.getEffectiveLevel()
Is this logger enabled for level 'level'?
Is this logger enabled for level 'level'?
[ "Is", "this", "logger", "enabled", "for", "level", "level", "?" ]
def isEnabledFor(self, level): """ Is this logger enabled for level 'level'? """ if self.manager.disable >= level: return 0 return level >= self.getEffectiveLevel()
[ "def", "isEnabledFor", "(", "self", ",", "level", ")", ":", "if", "self", ".", "manager", ".", "disable", ">=", "level", ":", "return", "0", "return", "level", ">=", "self", ".", "getEffectiveLevel", "(", ")" ]
https://github.com/rubys/venus/blob/9de21094a8cf565bdfcf75688e121a5ad1f5397b/planet/vendor/compat_logging/__init__.py#L1061-L1067
chribsen/simple-machine-learning-examples
dc94e52a4cebdc8bb959ff88b81ff8cfeca25022
venv/lib/python2.7/site-packages/pip/wheel.py
python
Wheel.__init__
(self, filename)
:raises InvalidWheelFilename: when the filename is invalid for a wheel
:raises InvalidWheelFilename: when the filename is invalid for a wheel
[ ":", "raises", "InvalidWheelFilename", ":", "when", "the", "filename", "is", "invalid", "for", "a", "wheel" ]
def __init__(self, filename): """ :raises InvalidWheelFilename: when the filename is invalid for a wheel """ wheel_info = self.wheel_file_re.match(filename) if not wheel_info: raise InvalidWheelFilename( "%s is not a valid wheel filename." % filename ) self.filename = filename self.name = wheel_info.group('name').replace('_', '-') # we'll assume "_" means "-" due to wheel naming scheme # (https://github.com/pypa/pip/issues/1150) self.version = wheel_info.group('ver').replace('_', '-') self.pyversions = wheel_info.group('pyver').split('.') self.abis = wheel_info.group('abi').split('.') self.plats = wheel_info.group('plat').split('.') # All the tag combinations from this file self.file_tags = set( (x, y, z) for x in self.pyversions for y in self.abis for z in self.plats )
[ "def", "__init__", "(", "self", ",", "filename", ")", ":", "wheel_info", "=", "self", ".", "wheel_file_re", ".", "match", "(", "filename", ")", "if", "not", "wheel_info", ":", "raise", "InvalidWheelFilename", "(", "\"%s is not a valid wheel filename.\"", "%", "f...
https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/pip/wheel.py#L624-L646
qibinlou/SinaWeibo-Emotion-Classification
f336fc104abd68b0ec4180fe2ed80fafe49cb790
nltk/probability.py
python
FreqDist.samples
(self)
return self.keys()
Return a list of all samples that have been recorded as outcomes by this frequency distribution. Use ``fd[sample]`` to determine the count for each sample. :rtype: list
Return a list of all samples that have been recorded as outcomes by this frequency distribution. Use ``fd[sample]`` to determine the count for each sample.
[ "Return", "a", "list", "of", "all", "samples", "that", "have", "been", "recorded", "as", "outcomes", "by", "this", "frequency", "distribution", ".", "Use", "fd", "[", "sample", "]", "to", "determine", "the", "count", "for", "each", "sample", "." ]
def samples(self): """ Return a list of all samples that have been recorded as outcomes by this frequency distribution. Use ``fd[sample]`` to determine the count for each sample. :rtype: list """ return self.keys()
[ "def", "samples", "(", "self", ")", ":", "return", "self", ".", "keys", "(", ")" ]
https://github.com/qibinlou/SinaWeibo-Emotion-Classification/blob/f336fc104abd68b0ec4180fe2ed80fafe49cb790/nltk/probability.py#L162-L170
pwndbg/pwndbg
136b3b6a80d94f494dcb00a614af1c24ca706700
pwndbg/memory.py
python
Page.__hash__
(self)
return hash((self.vaddr, self.memsz, self.flags, self.offset, self.objfile))
[]
def __hash__(self): return hash((self.vaddr, self.memsz, self.flags, self.offset, self.objfile))
[ "def", "__hash__", "(", "self", ")", ":", "return", "hash", "(", "(", "self", ".", "vaddr", ",", "self", ".", "memsz", ",", "self", ".", "flags", ",", "self", ".", "offset", ",", "self", ".", "objfile", ")", ")" ]
https://github.com/pwndbg/pwndbg/blob/136b3b6a80d94f494dcb00a614af1c24ca706700/pwndbg/memory.py#L467-L468
nlloyd/SubliminalCollaborator
5c619e17ddbe8acb9eea8996ec038169ddcd50a1
libs/twisted/mail/imap4.py
python
IMailboxInfo.getHierarchicalDelimiter
()
Get the character which delimits namespaces for in this mailbox. @rtype: C{str}
Get the character which delimits namespaces for in this mailbox.
[ "Get", "the", "character", "which", "delimits", "namespaces", "for", "in", "this", "mailbox", "." ]
def getHierarchicalDelimiter(): """Get the character which delimits namespaces for in this mailbox. @rtype: C{str} """
[ "def", "getHierarchicalDelimiter", "(", ")", ":" ]
https://github.com/nlloyd/SubliminalCollaborator/blob/5c619e17ddbe8acb9eea8996ec038169ddcd50a1/libs/twisted/mail/imap4.py#L5401-L5405
nonebot/aiocqhttp
eaa850e8d7432e04394194b3d82bb88570390732
aiocqhttp/utils.py
python
ensure_async
(func: Callable[..., Any])
确保可调用对象 `func` 为异步函数,如果不是,则使用 `run_sync` 包裹,使其在 asyncio 的默认 executor 中运行。
确保可调用对象 `func` 为异步函数,如果不是,则使用 `run_sync` 包裹,使其在 asyncio 的默认 executor 中运行。
[ "确保可调用对象", "func", "为异步函数,如果不是,则使用", "run_sync", "包裹,使其在", "asyncio", "的默认", "executor", "中运行。" ]
def ensure_async(func: Callable[..., Any]) -> Callable[..., Awaitable[Any]]: """ 确保可调用对象 `func` 为异步函数,如果不是,则使用 `run_sync` 包裹,使其在 asyncio 的默认 executor 中运行。 """ if asyncio.iscoroutinefunction(func): return func else: return run_sync(func)
[ "def", "ensure_async", "(", "func", ":", "Callable", "[", "...", ",", "Any", "]", ")", "->", "Callable", "[", "...", ",", "Awaitable", "[", "Any", "]", "]", ":", "if", "asyncio", ".", "iscoroutinefunction", "(", "func", ")", ":", "return", "func", "e...
https://github.com/nonebot/aiocqhttp/blob/eaa850e8d7432e04394194b3d82bb88570390732/aiocqhttp/utils.py#L11-L19
microsoft/debugpy
be8dd607f6837244e0b565345e497aff7a0c08bf
src/debugpy/_vendored/pydevd/third_party/pep8/autopep8.py
python
create_parser
()
return parser
Return command-line parser.
Return command-line parser.
[ "Return", "command", "-", "line", "parser", "." ]
def create_parser(): """Return command-line parser.""" # Do import locally to be friendly to those who use autopep8 as a library # and are supporting Python 2.6. import argparse parser = argparse.ArgumentParser(description=docstring_summary(__doc__), prog='autopep8') parser.add_argument('--version', action='version', version='%(prog)s {0} ({1})'.format( __version__, _get_package_version())) parser.add_argument('-v', '--verbose', action='count', default=0, help='print verbose messages; ' 'multiple -v result in more verbose messages') parser.add_argument('-d', '--diff', action='store_true', help='print the diff for the fixed source') parser.add_argument('-i', '--in-place', action='store_true', help='make changes to files in place') parser.add_argument('--global-config', metavar='filename', default=DEFAULT_CONFIG, help='path to a global pep8 config file; if this file ' 'does not exist then this is ignored ' '(default: {0})'.format(DEFAULT_CONFIG)) parser.add_argument('--ignore-local-config', action='store_true', help="don't look for and apply local config files; " 'if not passed, defaults are updated with any ' "config files in the project's root directory") parser.add_argument('-r', '--recursive', action='store_true', help='run recursively over directories; ' 'must be used with --in-place or --diff') parser.add_argument('-j', '--jobs', type=int, metavar='n', default=1, help='number of parallel jobs; ' 'match CPU count if value is less than 1') parser.add_argument('-p', '--pep8-passes', metavar='n', default=-1, type=int, help='maximum number of additional pep8 passes ' '(default: infinite)') parser.add_argument('-a', '--aggressive', action='count', default=0, help='enable non-whitespace changes; ' 'multiple -a result in more aggressive changes') parser.add_argument('--experimental', action='store_true', help='enable experimental fixes') parser.add_argument('--exclude', metavar='globs', help='exclude file/directory names that match these ' 'comma-separated globs') parser.add_argument('--list-fixes', action='store_true', help='list codes for fixes; ' 'used by --ignore and --select') parser.add_argument('--ignore', metavar='errors', default='', help='do not fix these errors/warnings ' '(default: {0})'.format(DEFAULT_IGNORE)) parser.add_argument('--select', metavar='errors', default='', help='fix only these errors/warnings (e.g. E4,W)') parser.add_argument('--max-line-length', metavar='n', default=79, type=int, help='set maximum allowed line length ' '(default: %(default)s)') parser.add_argument('--line-range', '--range', metavar='line', default=None, type=int, nargs=2, help='only fix errors found within this inclusive ' 'range of line numbers (e.g. 1 99); ' 'line numbers are indexed at 1') parser.add_argument('--indent-size', default=DEFAULT_INDENT_SIZE, type=int, help=argparse.SUPPRESS) parser.add_argument('files', nargs='*', help="files to format or '-' for standard in") return parser
[ "def", "create_parser", "(", ")", ":", "# Do import locally to be friendly to those who use autopep8 as a library", "# and are supporting Python 2.6.", "import", "argparse", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "docstring_summary", "(", "__...
https://github.com/microsoft/debugpy/blob/be8dd607f6837244e0b565345e497aff7a0c08bf/src/debugpy/_vendored/pydevd/third_party/pep8/autopep8.py#L3189-L3256
airbnb/streamalert
26cf1d08432ca285fd4f7410511a6198ca104bbb
streamalert/apps/config.py
python
AppConfig._state_name
(self)
return '{}_{}'.format(self.function_name, self.STATE_CONFIG_SUFFIX)
The name of the state parameter in ssm
The name of the state parameter in ssm
[ "The", "name", "of", "the", "state", "parameter", "in", "ssm" ]
def _state_name(self): """The name of the state parameter in ssm""" return '{}_{}'.format(self.function_name, self.STATE_CONFIG_SUFFIX)
[ "def", "_state_name", "(", "self", ")", ":", "return", "'{}_{}'", ".", "format", "(", "self", ".", "function_name", ",", "self", ".", "STATE_CONFIG_SUFFIX", ")" ]
https://github.com/airbnb/streamalert/blob/26cf1d08432ca285fd4f7410511a6198ca104bbb/streamalert/apps/config.py#L104-L106
nipy/nipy
d16d268938dcd5c15748ca051532c21f57cf8a22
examples/labs/need_data/get_data_light.py
python
get_first_level_dataset
()
return DATA_DIR
Heavier dataset (30 MO) for first-level analysis
Heavier dataset (30 MO) for first-level analysis
[ "Heavier", "dataset", "(", "30", "MO", ")", "for", "first", "-", "level", "analysis" ]
def get_first_level_dataset(): """ Heavier dataset (30 MO) for first-level analysis """ # define several paths url = 'ftp://ftp.cea.fr/pub/dsv/madic/download/nipy' raw_fmri = os.path.join(DATA_DIR, 's12069_swaloc1_corr.nii.gz') paradigm = os.path.join(DATA_DIR, 'localizer_paradigm.csv') # create DATA_DIR if not os.path.exists(DATA_DIR): os.makedirs(DATA_DIR) assert os.path.exists(DATA_DIR) # download mask_image if necessary if not os.path.exists(paradigm): print('Downloading mask image, this may take time') datafile = os.path.join(url, 'localizer_paradigm.csv') fp = urlopen(datafile) local_file = open(paradigm, 'wb') local_file.write(fp.read()) local_file.flush() local_file.close() # download raw_fmri if necessary if not os.path.exists(raw_fmri): print('Downloading fmri image, this may take time') filename = 's12069_swaloc1_corr.nii.gz' datafile = os.path.join(url, filename) fp = urlopen(datafile) local_file = open(raw_fmri, 'wb') local_file.write(fp.read()) local_file.flush() local_file.close() return DATA_DIR
[ "def", "get_first_level_dataset", "(", ")", ":", "# define several paths", "url", "=", "'ftp://ftp.cea.fr/pub/dsv/madic/download/nipy'", "raw_fmri", "=", "os", ".", "path", ".", "join", "(", "DATA_DIR", ",", "'s12069_swaloc1_corr.nii.gz'", ")", "paradigm", "=", "os", ...
https://github.com/nipy/nipy/blob/d16d268938dcd5c15748ca051532c21f57cf8a22/examples/labs/need_data/get_data_light.py#L77-L111
pikpikcu/Pentest-Tools-Framework
cd6e6107764a809943dc4e073cde8149c1a2cd03
modules/dirsearch/thirdparty/requests/packages/urllib3/filepost.py
python
encode_multipart_formdata
(fields, boundary=None)
return body.getvalue(), content_type
Encode a dictionary of ``fields`` using the multipart/form-data MIME format. :param fields: Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`). :param boundary: If not specified, then a random boundary will be generated using :func:`mimetools.choose_boundary`.
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
[ "Encode", "a", "dictionary", "of", "fields", "using", "the", "multipart", "/", "form", "-", "data", "MIME", "format", "." ]
def encode_multipart_formdata(fields, boundary=None): """ Encode a dictionary of ``fields`` using the multipart/form-data MIME format. :param fields: Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`). :param boundary: If not specified, then a random boundary will be generated using :func:`mimetools.choose_boundary`. """ body = BytesIO() if boundary is None: boundary = choose_boundary() for field in iter_field_objects(fields): body.write(b('--%s\r\n' % (boundary))) writer(body).write(field.render_headers()) data = field.data if isinstance(data, int): data = str(data) # Backwards compatibility if isinstance(data, six.text_type): writer(body).write(data) else: body.write(data) body.write(b'\r\n') body.write(b('--%s--\r\n' % (boundary))) content_type = str('multipart/form-data; boundary=%s' % boundary) return body.getvalue(), content_type
[ "def", "encode_multipart_formdata", "(", "fields", ",", "boundary", "=", "None", ")", ":", "body", "=", "BytesIO", "(", ")", "if", "boundary", "is", "None", ":", "boundary", "=", "choose_boundary", "(", ")", "for", "field", "in", "iter_field_objects", "(", ...
https://github.com/pikpikcu/Pentest-Tools-Framework/blob/cd6e6107764a809943dc4e073cde8149c1a2cd03/modules/dirsearch/thirdparty/requests/packages/urllib3/filepost.py#L58-L93
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/distutils/command/build_ext.py
python
build_ext.get_export_symbols
(self, ext)
return ext.export_symbols
Return the list of symbols that a shared extension has to export. This either uses 'ext.export_symbols' or, if it's not provided, "PyInit_" + module_name. Only relevant on Windows, where the .pyd file (DLL) must export the module "PyInit_" function.
Return the list of symbols that a shared extension has to export. This either uses 'ext.export_symbols' or, if it's not provided, "PyInit_" + module_name. Only relevant on Windows, where the .pyd file (DLL) must export the module "PyInit_" function.
[ "Return", "the", "list", "of", "symbols", "that", "a", "shared", "extension", "has", "to", "export", ".", "This", "either", "uses", "ext", ".", "export_symbols", "or", "if", "it", "s", "not", "provided", "PyInit_", "+", "module_name", ".", "Only", "relevan...
def get_export_symbols(self, ext): """Return the list of symbols that a shared extension has to export. This either uses 'ext.export_symbols' or, if it's not provided, "PyInit_" + module_name. Only relevant on Windows, where the .pyd file (DLL) must export the module "PyInit_" function. """ suffix = '_' + ext.name.split('.')[-1] try: # Unicode module name support as defined in PEP-489 # https://www.python.org/dev/peps/pep-0489/#export-hook-name suffix.encode('ascii') except UnicodeEncodeError: suffix = 'U' + suffix.encode('punycode').replace(b'-', b'_').decode('ascii') initfunc_name = "PyInit" + suffix if initfunc_name not in ext.export_symbols: ext.export_symbols.append(initfunc_name) return ext.export_symbols
[ "def", "get_export_symbols", "(", "self", ",", "ext", ")", ":", "suffix", "=", "'_'", "+", "ext", ".", "name", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "try", ":", "# Unicode module name support as defined in PEP-489", "# https://www.python.org/dev/pep...
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/distutils/command/build_ext.py#L685-L702
nccgroup/ScoutSuite
b9b8e201a45bd63835f611eec67fe3bb7c892a0a
ScoutSuite/providers/base/provider.py
python
BaseProvider.__init__
(self, report_dir=None, timestamp=None, services=None, skipped_services=None, result_format='json', **kwargs)
:account_id account ID :last_run Information about the last run :metadata Metadata used to generate the HTML report :ruleset Ruleset used to perform the analysis :services AWS configuration sorted by service
[]
def __init__(self, report_dir=None, timestamp=None, services=None, skipped_services=None, result_format='json', **kwargs): """ :account_id account ID :last_run Information about the last run :metadata Metadata used to generate the HTML report :ruleset Ruleset used to perform the analysis :services AWS configuration sorted by service """ services = [] if services is None else services skipped_services = [] if skipped_services is None else skipped_services self.last_run = None self.metadata = None self._load_metadata() if not hasattr(self, 'services'): self.services = self.services_config(self.credentials) supported_services = vars(self.services).keys() # Ensures "credentials" is not included supported_services = list(supported_services) supported_services.remove('credentials') self.service_list = self._build_services_list(supported_services, services, skipped_services)
[ "def", "__init__", "(", "self", ",", "report_dir", "=", "None", ",", "timestamp", "=", "None", ",", "services", "=", "None", ",", "skipped_services", "=", "None", ",", "result_format", "=", "'json'", ",", "*", "*", "kwargs", ")", ":", "services", "=", ...
https://github.com/nccgroup/ScoutSuite/blob/b9b8e201a45bd63835f611eec67fe3bb7c892a0a/ScoutSuite/providers/base/provider.py#L21-L48
lmb-freiburg/netdef_models
7d3311579cf712b31d05ec29f3dc63df067aa07b
FlowNet3/CSS/net.py
python
Network.resample_occ
(self, blob, ref)
return nd.ops.softmax(resampled)
[]
def resample_occ(self, blob, ref): resampled = nd.ops.resample(blob, reference=ref) return nd.ops.softmax(resampled)
[ "def", "resample_occ", "(", "self", ",", "blob", ",", "ref", ")", ":", "resampled", "=", "nd", ".", "ops", ".", "resample", "(", "blob", ",", "reference", "=", "ref", ")", "return", "nd", ".", "ops", ".", "softmax", "(", "resampled", ")" ]
https://github.com/lmb-freiburg/netdef_models/blob/7d3311579cf712b31d05ec29f3dc63df067aa07b/FlowNet3/CSS/net.py#L14-L16
cms-dev/cms
0401c5336b34b1731736045da4877fef11889274
cms/service/workerpool.py
python
WorkerPool.on_worker_connected
(self, worker_coord)
To be called when a worker comes alive after being offline. We use this callback to instruct the worker to precache all files concerning the contest. worker_coord (ServiceCoord): the coordinates of the worker that came online.
To be called when a worker comes alive after being offline. We use this callback to instruct the worker to precache all files concerning the contest.
[ "To", "be", "called", "when", "a", "worker", "comes", "alive", "after", "being", "offline", ".", "We", "use", "this", "callback", "to", "instruct", "the", "worker", "to", "precache", "all", "files", "concerning", "the", "contest", "." ]
def on_worker_connected(self, worker_coord): """To be called when a worker comes alive after being offline. We use this callback to instruct the worker to precache all files concerning the contest. worker_coord (ServiceCoord): the coordinates of the worker that came online. """ shard = worker_coord.shard logger.info("Worker %s online again.", shard) if self._service.contest_id is not None: self._worker[shard].precache_files( contest_id=self._service.contest_id ) # We don't requeue the operation, because a connection lost # does not invalidate a potential result given by the worker # (as the problem was the connection and not the machine on # which the worker is). But the worker could have been idling, # so we wake up the consumers. self._workers_available_event.set()
[ "def", "on_worker_connected", "(", "self", ",", "worker_coord", ")", ":", "shard", "=", "worker_coord", ".", "shard", "logger", ".", "info", "(", "\"Worker %s online again.\"", ",", "shard", ")", "if", "self", ".", "_service", ".", "contest_id", "is", "not", ...
https://github.com/cms-dev/cms/blob/0401c5336b34b1731736045da4877fef11889274/cms/service/workerpool.py#L164-L184
pytorch/audio
7b6b2d000023e2aa3365b769866c5f375e0d5fda
torchaudio/functional/functional.py
python
phase_vocoder
(complex_specgrams: Tensor, rate: float, phase_advance: Tensor)
return complex_specgrams_stretch
r"""Given a STFT tensor, speed up in time without modifying pitch by a factor of ``rate``. Args: complex_specgrams (Tensor): A tensor of dimension `(..., freq, num_frame)` with complex dtype. rate (float): Speed-up factor phase_advance (Tensor): Expected phase advance in each bin. Dimension of `(freq, 1)` Returns: Tensor: Stretched spectrogram. The resulting tensor is of the same dtype as the input spectrogram, but the number of frames is changed to ``ceil(num_frame / rate)``. Example >>> freq, hop_length = 1025, 512 >>> # (channel, freq, time) >>> complex_specgrams = torch.randn(2, freq, 300, dtype=torch.cfloat) >>> rate = 1.3 # Speed up by 30% >>> phase_advance = torch.linspace( >>> 0, math.pi * hop_length, freq)[..., None] >>> x = phase_vocoder(complex_specgrams, rate, phase_advance) >>> x.shape # with 231 == ceil(300 / 1.3) torch.Size([2, 1025, 231])
r"""Given a STFT tensor, speed up in time without modifying pitch by a factor of ``rate``.
[ "r", "Given", "a", "STFT", "tensor", "speed", "up", "in", "time", "without", "modifying", "pitch", "by", "a", "factor", "of", "rate", "." ]
def phase_vocoder(complex_specgrams: Tensor, rate: float, phase_advance: Tensor) -> Tensor: r"""Given a STFT tensor, speed up in time without modifying pitch by a factor of ``rate``. Args: complex_specgrams (Tensor): A tensor of dimension `(..., freq, num_frame)` with complex dtype. rate (float): Speed-up factor phase_advance (Tensor): Expected phase advance in each bin. Dimension of `(freq, 1)` Returns: Tensor: Stretched spectrogram. The resulting tensor is of the same dtype as the input spectrogram, but the number of frames is changed to ``ceil(num_frame / rate)``. Example >>> freq, hop_length = 1025, 512 >>> # (channel, freq, time) >>> complex_specgrams = torch.randn(2, freq, 300, dtype=torch.cfloat) >>> rate = 1.3 # Speed up by 30% >>> phase_advance = torch.linspace( >>> 0, math.pi * hop_length, freq)[..., None] >>> x = phase_vocoder(complex_specgrams, rate, phase_advance) >>> x.shape # with 231 == ceil(300 / 1.3) torch.Size([2, 1025, 231]) """ if rate == 1.0: return complex_specgrams # pack batch shape = complex_specgrams.size() complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-2:])) # Figures out the corresponding real dtype, i.e. complex128 -> float64, complex64 -> float32 # Note torch.real is a view so it does not incur any memory copy. real_dtype = torch.real(complex_specgrams).dtype time_steps = torch.arange(0, complex_specgrams.size(-1), rate, device=complex_specgrams.device, dtype=real_dtype) alphas = time_steps % 1.0 phase_0 = complex_specgrams[..., :1].angle() # Time Padding complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 2]) # (new_bins, freq, 2) complex_specgrams_0 = complex_specgrams.index_select(-1, time_steps.long()) complex_specgrams_1 = complex_specgrams.index_select(-1, (time_steps + 1).long()) angle_0 = complex_specgrams_0.angle() angle_1 = complex_specgrams_1.angle() norm_0 = complex_specgrams_0.abs() norm_1 = complex_specgrams_1.abs() phase = angle_1 - angle_0 - phase_advance phase = phase - 2 * math.pi * torch.round(phase / (2 * math.pi)) # Compute Phase Accum phase = phase + phase_advance phase = torch.cat([phase_0, phase[..., :-1]], dim=-1) phase_acc = torch.cumsum(phase, -1) mag = alphas * norm_1 + (1 - alphas) * norm_0 complex_specgrams_stretch = torch.polar(mag, phase_acc) # unpack batch complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-2] + complex_specgrams_stretch.shape[1:]) return complex_specgrams_stretch
[ "def", "phase_vocoder", "(", "complex_specgrams", ":", "Tensor", ",", "rate", ":", "float", ",", "phase_advance", ":", "Tensor", ")", "->", "Tensor", ":", "if", "rate", "==", "1.0", ":", "return", "complex_specgrams", "# pack batch", "shape", "=", "complex_spe...
https://github.com/pytorch/audio/blob/7b6b2d000023e2aa3365b769866c5f375e0d5fda/torchaudio/functional/functional.py#L636-L704
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
script/gen_requirements_all.py
python
requirements_all_output
(reqs)
return "".join(output)
Generate output for requirements_all.
Generate output for requirements_all.
[ "Generate", "output", "for", "requirements_all", "." ]
def requirements_all_output(reqs): """Generate output for requirements_all.""" output = [ "# Home Assistant Core, full dependency set\n", "-r requirements.txt\n", ] output.append(generate_requirements_list(reqs)) return "".join(output)
[ "def", "requirements_all_output", "(", "reqs", ")", ":", "output", "=", "[", "\"# Home Assistant Core, full dependency set\\n\"", ",", "\"-r requirements.txt\\n\"", ",", "]", "output", ".", "append", "(", "generate_requirements_list", "(", "reqs", ")", ")", "return", ...
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/script/gen_requirements_all.py#L297-L305
dongrixinyu/JioNLP
2c5b11439915891f0f24955b7de4f637f38a4b44
jionlp/gadget/phone_location.py
python
PhoneLocation.__call__
(self, text)
return {'number': text, 'province': None, 'city': None, 'type': 'unknown'}
输入一段电话号码文本,返回其结果
输入一段电话号码文本,返回其结果
[ "输入一段电话号码文本,返回其结果" ]
def __call__(self, text): """ 输入一段电话号码文本,返回其结果 """ if self.cell_phone_location_trie is None: self._prepare() res = self.cell_phone_pattern.search(text) if res is not None: # 匹配至手机号码 cell_phone_number = res.group() first_seven = cell_phone_number[:7] _, location = self.cell_phone_location_trie.search(first_seven) province, city = location.split(' ') # print(province, city) _, operator = self.telecom_operator_trie.search(cell_phone_number[:4]) return {'number': text, 'province': province, 'city': city, 'type': 'cell_phone', 'operator': operator} res = self.landline_phone_pattern.search(text) if res is not None: # 匹配至固话号码 # 抽取固话号码的区号 res = self.landline_area_code_pattern.search(text) if res is not None: area_code = res.group(1) province, city = self.area_code_location.get(area_code, ' ').split(' ') if province == '': province, city = None, None return {'number': text, 'province': province, 'city': city, 'type': 'landline_phone'} else: return {'number': text, 'province': None, 'city': None, 'type': 'landline_phone'} return {'number': text, 'province': None, 'city': None, 'type': 'unknown'}
[ "def", "__call__", "(", "self", ",", "text", ")", ":", "if", "self", ".", "cell_phone_location_trie", "is", "None", ":", "self", ".", "_prepare", "(", ")", "res", "=", "self", ".", "cell_phone_pattern", ".", "search", "(", "text", ")", "if", "res", "is...
https://github.com/dongrixinyu/JioNLP/blob/2c5b11439915891f0f24955b7de4f637f38a4b44/jionlp/gadget/phone_location.py#L81-L116
DataBiosphere/toil
2e148eee2114ece8dcc3ec8a83f36333266ece0d
src/toil/job.py
python
Job._runner
( self, jobStore: "AbstractJobStore", fileStore: "AbstractFileStore", defer: Callable[[Any], None], **kwargs, )
Run the job, and serialise the next jobs. It marks the job as completed (by clearing its command) and creates the successor relationships to new successors, but it doesn't actually commit those updates to the current job into the JobStore. We take all arguments as keyword arguments, and accept and ignore additional keyword arguments, for compatibility with workflows (*cough* Cactus *cough*) which are reaching in and overriding _runner (which they aren't supposed to do). If everything is passed as name=value it won't break as soon as we add or remove a parameter. :param class jobStore: Instance of the job store :param fileStore: Instance of a cached or uncached filestore :param defer: Function yielded by open() context manager of :class:`toil.DeferredFunctionManager`, which is called to register deferred functions. :param kwargs: Catch-all to accept superfluous arguments passed by old versions of Cactus. Cactus shouldn't override this method, but it does.
Run the job, and serialise the next jobs.
[ "Run", "the", "job", "and", "serialise", "the", "next", "jobs", "." ]
def _runner( self, jobStore: "AbstractJobStore", fileStore: "AbstractFileStore", defer: Callable[[Any], None], **kwargs, ) -> None: """ Run the job, and serialise the next jobs. It marks the job as completed (by clearing its command) and creates the successor relationships to new successors, but it doesn't actually commit those updates to the current job into the JobStore. We take all arguments as keyword arguments, and accept and ignore additional keyword arguments, for compatibility with workflows (*cough* Cactus *cough*) which are reaching in and overriding _runner (which they aren't supposed to do). If everything is passed as name=value it won't break as soon as we add or remove a parameter. :param class jobStore: Instance of the job store :param fileStore: Instance of a cached or uncached filestore :param defer: Function yielded by open() context manager of :class:`toil.DeferredFunctionManager`, which is called to register deferred functions. :param kwargs: Catch-all to accept superfluous arguments passed by old versions of Cactus. Cactus shouldn't override this method, but it does. """ # Make deferred function registration available during run(). self._defer = defer # Make fileStore available as an attribute during run() ... self._fileStore = fileStore # ... but also pass it to _run() as an argument for backwards # compatibility with workflows that tinker around with our internals, # and send a fake jobGraph in case they still think jobGraph exists. returnValues = self._run(jobGraph=None, fileStore=fileStore) # Clean up state changes made for run() self._defer = None self._fileStore = None # Serialize the new Jobs defined by the run method to the jobStore self._saveJobGraph(jobStore, saveSelf=False, returnValues=returnValues) # Clear out the command, because the job is done. self.description.command = None
[ "def", "_runner", "(", "self", ",", "jobStore", ":", "\"AbstractJobStore\"", ",", "fileStore", ":", "\"AbstractFileStore\"", ",", "defer", ":", "Callable", "[", "[", "Any", "]", ",", "None", "]", ",", "*", "*", "kwargs", ",", ")", "->", "None", ":", "#...
https://github.com/DataBiosphere/toil/blob/2e148eee2114ece8dcc3ec8a83f36333266ece0d/src/toil/job.py#L2364-L2410
qqwweee/keras-yolo3
e6598d13c703029b2686bc2eb8d5c09badf42992
train.py
python
get_classes
(classes_path)
return class_names
loads the classes
loads the classes
[ "loads", "the", "classes" ]
def get_classes(classes_path): '''loads the classes''' with open(classes_path) as f: class_names = f.readlines() class_names = [c.strip() for c in class_names] return class_names
[ "def", "get_classes", "(", "classes_path", ")", ":", "with", "open", "(", "classes_path", ")", "as", "f", ":", "class_names", "=", "f", ".", "readlines", "(", ")", "class_names", "=", "[", "c", ".", "strip", "(", ")", "for", "c", "in", "class_names", ...
https://github.com/qqwweee/keras-yolo3/blob/e6598d13c703029b2686bc2eb8d5c09badf42992/train.py#L90-L95
SIlver--/remindmebot-reddit
94f35cabeebad1004837007eb9a65dad7207c68a
remindmebot_reply.py
python
Reply.search_db
(self)
Loop through data looking for which comments are old
Loop through data looking for which comments are old
[ "Loop", "through", "data", "looking", "for", "which", "comments", "are", "old" ]
def search_db(self): """ Loop through data looking for which comments are old """ data = self._queryDB.cursor.fetchall() alreadyCommented = [] for row in data: # checks to make sure ID hasn't been commented already # For situtations where errors happened if row[0] not in alreadyCommented: flagDelete = False # MySQl- permalink, message, origin date, reddit user flagDelete = self.new_reply(row[1],row[2], row[4], row[5]) # removes row based on flagDelete if flagDelete: cmd = "DELETE FROM message_date WHERE id = %s" self._queryDB.cursor.execute(cmd, [row[0]]) self._queryDB.connection.commit() alreadyCommented.append(row[0]) self._queryDB.connection.commit() self._queryDB.connection.close()
[ "def", "search_db", "(", "self", ")", ":", "data", "=", "self", ".", "_queryDB", ".", "cursor", ".", "fetchall", "(", ")", "alreadyCommented", "=", "[", "]", "for", "row", "in", "data", ":", "# checks to make sure ID hasn't been commented already", "# For situta...
https://github.com/SIlver--/remindmebot-reddit/blob/94f35cabeebad1004837007eb9a65dad7207c68a/remindmebot_reply.py#L107-L129
fortharris/Pcode
147962d160a834c219e12cb456abc130826468e4
rope/base/oi/objectdb.py
python
CallInfo.__init__
(self, args, returned)
[]
def __init__(self, args, returned): self.args = args self.returned = returned
[ "def", "__init__", "(", "self", ",", "args", ",", "returned", ")", ":", "self", ".", "args", "=", "args", "self", ".", "returned", "=", "returned" ]
https://github.com/fortharris/Pcode/blob/147962d160a834c219e12cb456abc130826468e4/rope/base/oi/objectdb.py#L175-L177
globaleaks/GlobaLeaks
4624ca937728adb8e21c4733a8aecec6a41cb3db
backend/globaleaks/handlers/rtip.py
python
db_revoke_tip_access
(session, tid, user_id, itip, receiver_id)
Transaction for revoking a user access to a report :param session: An ORM session :param tid: A tenant ID of the user performing the operation :param user_id: A user ID of the user performing the operation :param itip: A itip reference of the submission object of the operation :param receiver_id: A user ID of the the user to which revoke access to the report
Transaction for revoking a user access to a report
[ "Transaction", "for", "revoking", "a", "user", "access", "to", "a", "report" ]
def db_revoke_tip_access(session, tid, user_id, itip, receiver_id): """ Transaction for revoking a user access to a report :param session: An ORM session :param tid: A tenant ID of the user performing the operation :param user_id: A user ID of the user performing the operation :param itip: A itip reference of the submission object of the operation :param receiver_id: A user ID of the the user to which revoke access to the report """ rtip = session.query(models.ReceiverTip) \ .filter(models.ReceiverTip.internaltip_id == itip.id, models.ReceiverTip.receiver_id == receiver_id).one_or_none() if rtip is None: return session.delete(rtip) db_log(session, tid=tid, type='revoke_access', user_id=user_id, object_id=itip.id)
[ "def", "db_revoke_tip_access", "(", "session", ",", "tid", ",", "user_id", ",", "itip", ",", "receiver_id", ")", ":", "rtip", "=", "session", ".", "query", "(", "models", ".", "ReceiverTip", ")", ".", "filter", "(", "models", ".", "ReceiverTip", ".", "in...
https://github.com/globaleaks/GlobaLeaks/blob/4624ca937728adb8e21c4733a8aecec6a41cb3db/backend/globaleaks/handlers/rtip.py#L129-L148
GoogleCloudPlatform/PerfKitBenchmarker
6e3412d7d5e414b8ca30ed5eaf970cef1d919a67
perfkitbenchmarker/linux_packages/node_js.py
python
YumInstall
(vm)
Installs the node.js package on the VM.
Installs the node.js package on the VM.
[ "Installs", "the", "node", ".", "js", "package", "on", "the", "VM", "." ]
def YumInstall(vm): """Installs the node.js package on the VM.""" _Install(vm)
[ "def", "YumInstall", "(", "vm", ")", ":", "_Install", "(", "vm", ")" ]
https://github.com/GoogleCloudPlatform/PerfKitBenchmarker/blob/6e3412d7d5e414b8ca30ed5eaf970cef1d919a67/perfkitbenchmarker/linux_packages/node_js.py#L34-L36
Chaffelson/nipyapi
d3b186fd701ce308c2812746d98af9120955e810
nipyapi/nifi/models/system_diagnostics_snapshot_dto.py
python
SystemDiagnosticsSnapshotDTO.total_non_heap_bytes
(self, total_non_heap_bytes)
Sets the total_non_heap_bytes of this SystemDiagnosticsSnapshotDTO. Total number of bytes allocated to the JVM not used for heap :param total_non_heap_bytes: The total_non_heap_bytes of this SystemDiagnosticsSnapshotDTO. :type: int
Sets the total_non_heap_bytes of this SystemDiagnosticsSnapshotDTO. Total number of bytes allocated to the JVM not used for heap
[ "Sets", "the", "total_non_heap_bytes", "of", "this", "SystemDiagnosticsSnapshotDTO", ".", "Total", "number", "of", "bytes", "allocated", "to", "the", "JVM", "not", "used", "for", "heap" ]
def total_non_heap_bytes(self, total_non_heap_bytes): """ Sets the total_non_heap_bytes of this SystemDiagnosticsSnapshotDTO. Total number of bytes allocated to the JVM not used for heap :param total_non_heap_bytes: The total_non_heap_bytes of this SystemDiagnosticsSnapshotDTO. :type: int """ self._total_non_heap_bytes = total_non_heap_bytes
[ "def", "total_non_heap_bytes", "(", "self", ",", "total_non_heap_bytes", ")", ":", "self", ".", "_total_non_heap_bytes", "=", "total_non_heap_bytes" ]
https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/nifi/models/system_diagnostics_snapshot_dto.py#L226-L235
vmware/vsphere-automation-sdk-python
ba7d4e0742f58a641dfed9538ecbbb1db4f3891e
samples/vmc/networks_nsxt/hello_world.py
python
main
()
[]
def main(): auth_example = AuthExample() auth_example.get_domains()
[ "def", "main", "(", ")", ":", "auth_example", "=", "AuthExample", "(", ")", "auth_example", ".", "get_domains", "(", ")" ]
https://github.com/vmware/vsphere-automation-sdk-python/blob/ba7d4e0742f58a641dfed9538ecbbb1db4f3891e/samples/vmc/networks_nsxt/hello_world.py#L48-L50
Screetsec/BruteSploit
124029d7a4cf35d7017a0f2fa37c8f8e5f32a359
tools/instabrute.py
python
check_proxy
(q)
check proxy for and append to working proxies :param q:
check proxy for and append to working proxies :param q:
[ "check", "proxy", "for", "and", "append", "to", "working", "proxies", ":", "param", "q", ":" ]
def check_proxy(q): """ check proxy for and append to working proxies :param q: """ if not q.empty(): proxy = q.get(False) proxy = proxy.replace("\r", "").replace("\n", "") try: opener = rq.build_opener( rq.ProxyHandler({'https': 'https://' + proxy}), rq.HTTPHandler(), rq.HTTPSHandler() ) opener.addheaders = [('User-agent', 'Mozilla/5.0')] rq.install_opener(opener) req = rq.Request('https://api.ipify.org/') if rq.urlopen(req).read().decode() == proxy.partition(':')[0]: proxys_working_list.update({proxy: proxy}) if _verbose: print(bcolors.OKGREEN + " --[+] ", proxy, " | PASS" + bcolors.ENDC) else: if _verbose: print(" --[!] ", proxy, " | FAILED") except Exception as err: if _verbose: print(" --[!] ", proxy, " | FAILED") if _debug: logger.error(err) pass
[ "def", "check_proxy", "(", "q", ")", ":", "if", "not", "q", ".", "empty", "(", ")", ":", "proxy", "=", "q", ".", "get", "(", "False", ")", "proxy", "=", "proxy", ".", "replace", "(", "\"\\r\"", ",", "\"\"", ")", ".", "replace", "(", "\"\\n\"", ...
https://github.com/Screetsec/BruteSploit/blob/124029d7a4cf35d7017a0f2fa37c8f8e5f32a359/tools/instabrute.py#L42-L77
CGATOxford/cgat
326aad4694bdfae8ddc194171bb5d73911243947
obsolete/pipeline_snps.py
python
loadEnsembl2Uniprot
( infile, outfile )
load mapping from ENSEMBL transcripts ids to uniprot ids. This method expects an BioMart output file with the following five columns: Ensembl gene id, Ensembl transcript id, Uniprot Swissprot Id, Uniprot Accession Uniport/Trembl Accession
load mapping from ENSEMBL transcripts ids to uniprot ids.
[ "load", "mapping", "from", "ENSEMBL", "transcripts", "ids", "to", "uniprot", "ids", "." ]
def loadEnsembl2Uniprot( infile, outfile ): '''load mapping from ENSEMBL transcripts ids to uniprot ids. This method expects an BioMart output file with the following five columns: Ensembl gene id, Ensembl transcript id, Uniprot Swissprot Id, Uniprot Accession Uniport/Trembl Accession ''' table = P.toTable( outfile ) statement = '''gunzip < %(infile)s | perl -p -e "s/Ensembl Gene ID/gene_id/; s/Ensembl Transcript ID/transcript_id/; s/UniProt\/SwissProt ID/swissprot_id/; s/UniProt\/SwissProt Accession/swissprot_acc/; s/UniProt\/TrEMBL Accession/trembl_acc/" |python %(scriptsdir)s/csv2db.py %(csv2db_options)s \ --index=gene_id \ --index=transcript_id \ --index=trembl_acc \ --table=%(table)s > %(outfile)s ''' P.run()
[ "def", "loadEnsembl2Uniprot", "(", "infile", ",", "outfile", ")", ":", "table", "=", "P", ".", "toTable", "(", "outfile", ")", "statement", "=", "'''gunzip \n < %(infile)s\n | perl -p -e \n \"s/Ensembl Gene ID/gene_id/; \n s/Ensembl Transcript ID/transcri...
https://github.com/CGATOxford/cgat/blob/326aad4694bdfae8ddc194171bb5d73911243947/obsolete/pipeline_snps.py#L1526-L1556