nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
zenodo/zenodo
3c45e52a742ad5a0a7788a67b02fbbc15ab4d8d5
zenodo/modules/tokens/ext.py
python
ResourceAccessTokens.init_config
(app)
Initialize configuration.
Initialize configuration.
[ "Initialize", "configuration", "." ]
def init_config(app): """Initialize configuration.""" for k in dir(config): if k.startswith('ZENODO_TOKENS_'): app.config.setdefault(k, getattr(config, k))
[ "def", "init_config", "(", "app", ")", ":", "for", "k", "in", "dir", "(", "config", ")", ":", "if", "k", ".", "startswith", "(", "'ZENODO_TOKENS_'", ")", ":", "app", ".", "config", ".", "setdefault", "(", "k", ",", "getattr", "(", "config", ",", "k...
https://github.com/zenodo/zenodo/blob/3c45e52a742ad5a0a7788a67b02fbbc15ab4d8d5/zenodo/modules/tokens/ext.py#L41-L45
SeldonIO/alibi-detect
b5ec53cfadcd8e3463d400259f2ea1b752ed1812
alibi_detect/utils/saving.py
python
init_od_iforest
(state_dict: Dict)
return od
Initialize isolation forest. Parameters ---------- state_dict Dictionary containing the parameter values. Returns ------- Initialized IForest instance.
Initialize isolation forest.
[ "Initialize", "isolation", "forest", "." ]
def init_od_iforest(state_dict: Dict) -> IForest: """ Initialize isolation forest. Parameters ---------- state_dict Dictionary containing the parameter values. Returns ------- Initialized IForest instance. """ od = IForest(threshold=state_dict['threshold']) od.isolationforest = state_dict['isolationforest'] return od
[ "def", "init_od_iforest", "(", "state_dict", ":", "Dict", ")", "->", "IForest", ":", "od", "=", "IForest", "(", "threshold", "=", "state_dict", "[", "'threshold'", "]", ")", "od", ".", "isolationforest", "=", "state_dict", "[", "'isolationforest'", "]", "ret...
https://github.com/SeldonIO/alibi-detect/blob/b5ec53cfadcd8e3463d400259f2ea1b752ed1812/alibi_detect/utils/saving.py#L1725-L1740
bwohlberg/sporco
df67462abcf83af6ab1961bcb0d51b87a66483fa
sporco/admm/cbpdn.py
python
ConvBPDNProjL1.__init__
(self, D, S, gamma, opt=None, dimK=None, dimN=2)
| **Call graph** .. image:: ../_static/jonga/cbpdnprjl1_init.svg :width: 20% :target: ../_static/jonga/cbpdnprjl1_init.svg | Parameters ---------- D : array_like Dictionary matrix S : array_like Signal vector or matrix gamma : float Constraint parameter opt : :class:`ConvBPDNProjL1.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial dimensions
|
[ "|" ]
def __init__(self, D, S, gamma, opt=None, dimK=None, dimN=2): """ | **Call graph** .. image:: ../_static/jonga/cbpdnprjl1_init.svg :width: 20% :target: ../_static/jonga/cbpdnprjl1_init.svg | Parameters ---------- D : array_like Dictionary matrix S : array_like Signal vector or matrix gamma : float Constraint parameter opt : :class:`ConvBPDNProjL1.Options` object Algorithm options dimK : 0, 1, or None, optional (default None) Number of dimensions in input signal corresponding to multiple independent signals dimN : int, optional (default 2) Number of spatial dimensions """ # Set default options if necessary if opt is None: opt = ConvBPDNProjL1.Options() super(ConvBPDNProjL1, self).__init__(D, S, opt, dimK=dimK, dimN=dimN) self.gamma = self.dtype.type(gamma)
[ "def", "__init__", "(", "self", ",", "D", ",", "S", ",", "gamma", ",", "opt", "=", "None", ",", "dimK", "=", "None", ",", "dimN", "=", "2", ")", ":", "# Set default options if necessary", "if", "opt", "is", "None", ":", "opt", "=", "ConvBPDNProjL1", ...
https://github.com/bwohlberg/sporco/blob/df67462abcf83af6ab1961bcb0d51b87a66483fa/sporco/admm/cbpdn.py#L1319-L1355
magic282/NQG
3006ff8b29684adead2eec82c304d81b5907fa74
seq2seq_pt/s2s/xinit.py
python
orthogonal
(tensor, gain=1)
return tensor
Fills the input Tensor or Variable with a (semi) orthogonal matrix, as described in "Exact solutions to the nonlinear dynamics of learning in deep linear neural networks" - Saxe, A. et al. (2013). The input tensor must have at least 2 dimensions, and for tensors with more than 2 dimensions the trailing dimensions are flattened. Args: tensor: an n-dimensional torch.Tensor or autograd.Variable, where n >= 2 gain: optional scaling factor Examples: >>> w = torch.Tensor(3, 5) >>> nn.init.orthogonal(w)
Fills the input Tensor or Variable with a (semi) orthogonal matrix, as described in "Exact solutions to the nonlinear dynamics of learning in deep linear neural networks" - Saxe, A. et al. (2013). The input tensor must have at least 2 dimensions, and for tensors with more than 2 dimensions the trailing dimensions are flattened.
[ "Fills", "the", "input", "Tensor", "or", "Variable", "with", "a", "(", "semi", ")", "orthogonal", "matrix", "as", "described", "in", "Exact", "solutions", "to", "the", "nonlinear", "dynamics", "of", "learning", "in", "deep", "linear", "neural", "networks", "...
def orthogonal(tensor, gain=1): """Fills the input Tensor or Variable with a (semi) orthogonal matrix, as described in "Exact solutions to the nonlinear dynamics of learning in deep linear neural networks" - Saxe, A. et al. (2013). The input tensor must have at least 2 dimensions, and for tensors with more than 2 dimensions the trailing dimensions are flattened. Args: tensor: an n-dimensional torch.Tensor or autograd.Variable, where n >= 2 gain: optional scaling factor Examples: >>> w = torch.Tensor(3, 5) >>> nn.init.orthogonal(w) """ if isinstance(tensor, Variable): orthogonal(tensor.data, gain=gain) return tensor if tensor.ndimension() < 2: raise ValueError("Only tensors with 2 or more dimensions are supported") rows = tensor.size(0) cols = tensor[0].numel() flattened = torch.Tensor(rows, cols).normal_(0, 1) # Compute the qr factorization q, r = torch.qr(flattened) # Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf d = torch.diag(r, 0) ph = d.sign() q *= ph.expand_as(q) # Pad zeros to Q (if rows smaller than cols) if rows < cols: padding = torch.zeros(rows, cols - rows) if q.is_cuda: q = torch.cat([q, padding.cuda()], 1) else: q = torch.cat([q, padding], 1) tensor.view_as(q).copy_(q) tensor.mul_(gain) return tensor
[ "def", "orthogonal", "(", "tensor", ",", "gain", "=", "1", ")", ":", "if", "isinstance", "(", "tensor", ",", "Variable", ")", ":", "orthogonal", "(", "tensor", ".", "data", ",", "gain", "=", "gain", ")", "return", "tensor", "if", "tensor", ".", "ndim...
https://github.com/magic282/NQG/blob/3006ff8b29684adead2eec82c304d81b5907fa74/seq2seq_pt/s2s/xinit.py#L289-L328
python-diamond/Diamond
7000e16cfdf4508ed9291fc4b3800592557b2431
src/collectors/mesos/mesos.py
python
MesosCollector._collect_slave_statistics
(self)
[]
def _collect_slave_statistics(self): result = self._get('monitor/statistics') if not result: return result_copy = copy.deepcopy(result) self._group_and_publish_tasks_statistics(result) self._publish_tasks_statistics(result_copy)
[ "def", "_collect_slave_statistics", "(", "self", ")", ":", "result", "=", "self", ".", "_get", "(", "'monitor/statistics'", ")", "if", "not", "result", ":", "return", "result_copy", "=", "copy", ".", "deepcopy", "(", "result", ")", "self", ".", "_group_and_p...
https://github.com/python-diamond/Diamond/blob/7000e16cfdf4508ed9291fc4b3800592557b2431/src/collectors/mesos/mesos.py#L177-L185
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/sync/v1/service/__init__.py
python
ServiceInstance.reachability_webhooks_enabled
(self)
return self._properties['reachability_webhooks_enabled']
:returns: Whether the service instance calls webhook_url when client endpoints connect to Sync :rtype: bool
:returns: Whether the service instance calls webhook_url when client endpoints connect to Sync :rtype: bool
[ ":", "returns", ":", "Whether", "the", "service", "instance", "calls", "webhook_url", "when", "client", "endpoints", "connect", "to", "Sync", ":", "rtype", ":", "bool" ]
def reachability_webhooks_enabled(self): """ :returns: Whether the service instance calls webhook_url when client endpoints connect to Sync :rtype: bool """ return self._properties['reachability_webhooks_enabled']
[ "def", "reachability_webhooks_enabled", "(", "self", ")", ":", "return", "self", ".", "_properties", "[", "'reachability_webhooks_enabled'", "]" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/sync/v1/service/__init__.py#L473-L478
NVISOsecurity/ee-outliers
09854bcc02e5b551ab63973800dc2e5ee34cc457
app/analyzers/metrics.py
python
MetricsAnalyzer.remove_metric_from_batch
(eval_metrics_aggregator_value, index)
return eval_metrics_aggregator_value
Remove value from batch (does the opposite of add_metric_to_batch) :param eval_metrics_aggregator_value: batch value that need to be update :param index: index of document that need to be removed :return: new batch
Remove value from batch (does the opposite of add_metric_to_batch)
[ "Remove", "value", "from", "batch", "(", "does", "the", "opposite", "of", "add_metric_to_batch", ")" ]
def remove_metric_from_batch(eval_metrics_aggregator_value, index): """ Remove value from batch (does the opposite of add_metric_to_batch) :param eval_metrics_aggregator_value: batch value that need to be update :param index: index of document that need to be removed :return: new batch """ eval_metrics_aggregator_value["metrics"].pop(index) eval_metrics_aggregator_value["observations"].pop(index) eval_metrics_aggregator_value["raw_docs"].pop(index) return eval_metrics_aggregator_value
[ "def", "remove_metric_from_batch", "(", "eval_metrics_aggregator_value", ",", "index", ")", ":", "eval_metrics_aggregator_value", "[", "\"metrics\"", "]", ".", "pop", "(", "index", ")", "eval_metrics_aggregator_value", "[", "\"observations\"", "]", ".", "pop", "(", "i...
https://github.com/NVISOsecurity/ee-outliers/blob/09854bcc02e5b551ab63973800dc2e5ee34cc457/app/analyzers/metrics.py#L349-L361
sphinx-doc/sphinx
e79681c76843c1339863b365747079b2d662d0c1
sphinx/builders/__init__.py
python
Builder.write_doc
(self, docname: str, doctree: nodes.document)
Where you actually write something to the filesystem.
Where you actually write something to the filesystem.
[ "Where", "you", "actually", "write", "something", "to", "the", "filesystem", "." ]
def write_doc(self, docname: str, doctree: nodes.document) -> None: """Where you actually write something to the filesystem.""" raise NotImplementedError
[ "def", "write_doc", "(", "self", ",", "docname", ":", "str", ",", "doctree", ":", "nodes", ".", "document", ")", "->", "None", ":", "raise", "NotImplementedError" ]
https://github.com/sphinx-doc/sphinx/blob/e79681c76843c1339863b365747079b2d662d0c1/sphinx/builders/__init__.py#L579-L581
shaneshixiang/rllabplusplus
4d55f96ec98e3fe025b7991945e3e6a54fd5449f
rllab/algos/cma_es_lib.py
python
BoundaryHandlerBase.has_bounds
(self)
return False
return True, if any variable is bounded
return True, if any variable is bounded
[ "return", "True", "if", "any", "variable", "is", "bounded" ]
def has_bounds(self): """return True, if any variable is bounded""" bounds = self.bounds if bounds in (None, [None, None]): return False for ib, bound in enumerate(bounds): if bound is not None: sign_ = 2 * ib - 1 for bound_i in bound: if bound_i is not None and sign_ * bound_i < np.inf: return True return False
[ "def", "has_bounds", "(", "self", ")", ":", "bounds", "=", "self", ".", "bounds", "if", "bounds", "in", "(", "None", ",", "[", "None", ",", "None", "]", ")", ":", "return", "False", "for", "ib", ",", "bound", "in", "enumerate", "(", "bounds", ")", ...
https://github.com/shaneshixiang/rllabplusplus/blob/4d55f96ec98e3fe025b7991945e3e6a54fd5449f/rllab/algos/cma_es_lib.py#L953-L964
pm4py/pm4py-core
7807b09a088b02199cd0149d724d0e28793971bf
pm4py/algo/conformance/alignments/petri_net/variants/tweaked_state_equation_a_star.py
python
__update_heu_dict_specific_point
(heu_dict, heu_max_ind_dict, mm, index, h, x)
Updates the heuristics dictionary on the new marking, storing the information about the heuristics and the vector (point-specific method)
Updates the heuristics dictionary on the new marking, storing the information about the heuristics and the vector (point-specific method)
[ "Updates", "the", "heuristics", "dictionary", "on", "the", "new", "marking", "storing", "the", "information", "about", "the", "heuristics", "and", "the", "vector", "(", "point", "-", "specific", "method", ")" ]
def __update_heu_dict_specific_point(heu_dict, heu_max_ind_dict, mm, index, h, x): """ Updates the heuristics dictionary on the new marking, storing the information about the heuristics and the vector (point-specific method) """ if mm not in heu_dict: heu_dict[mm] = {} heu_max_ind_dict[mm] = -1 hdm = heu_dict[mm] if index not in hdm: hdm[index] = (-1, None) if h > hdm[index][0]: hdm[index] = (h, tuple(x)) heu_max_ind_dict[mm] = max(heu_max_ind_dict[mm], index)
[ "def", "__update_heu_dict_specific_point", "(", "heu_dict", ",", "heu_max_ind_dict", ",", "mm", ",", "index", ",", "h", ",", "x", ")", ":", "if", "mm", "not", "in", "heu_dict", ":", "heu_dict", "[", "mm", "]", "=", "{", "}", "heu_max_ind_dict", "[", "mm"...
https://github.com/pm4py/pm4py-core/blob/7807b09a088b02199cd0149d724d0e28793971bf/pm4py/algo/conformance/alignments/petri_net/variants/tweaked_state_equation_a_star.py#L579-L592
git-cola/git-cola
b48b8028e0c3baf47faf7b074b9773737358163d
cola/widgets/standard.py
python
ProgressAnimationThread.run
(self)
[]
def run(self): self.running = True while self.running: self.updated.emit(self.cycle()) time.sleep(self.timeout)
[ "def", "run", "(", "self", ")", ":", "self", ".", "running", "=", "True", "while", "self", ".", "running", ":", "self", ".", "updated", ".", "emit", "(", "self", ".", "cycle", "(", ")", ")", "time", ".", "sleep", "(", "self", ".", "timeout", ")" ...
https://github.com/git-cola/git-cola/blob/b48b8028e0c3baf47faf7b074b9773737358163d/cola/widgets/standard.py#L712-L716
lovelylain/pyctp
fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d
futures2/ctp/ApiStruct.py
python
Notice.__init__
(self, BrokerID='', Content='', SequenceLabel='')
[]
def __init__(self, BrokerID='', Content='', SequenceLabel=''): self.BrokerID = '' #经纪公司代码, char[11] self.Content = '' #消息正文, char[501] self.SequenceLabel = ''
[ "def", "__init__", "(", "self", ",", "BrokerID", "=", "''", ",", "Content", "=", "''", ",", "SequenceLabel", "=", "''", ")", ":", "self", ".", "BrokerID", "=", "''", "#经纪公司代码, char[11]", "self", ".", "Content", "=", "''", "#消息正文, char[501]", "self", ".",...
https://github.com/lovelylain/pyctp/blob/fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d/futures2/ctp/ApiStruct.py#L4201-L4204
tlsfuzzer/tlslite-ng
8720db53067ba4f7bb7b5a32d682033d8b5446f9
tlslite/utils/dsakey.py
python
DSAKey.hashAndVerify
(self, signature, data, hAlg="sha1")
Hash and verify the passed-in bytes with signature. :type signature: ASN1 bytearray :param signature: the r, s dsa signature :type data: str :param data: The data which will be hashed and verified. :type hAlg: str :param hAlg: The hash algorithm that will be used to hash data :rtype: bool :returns: return True if verification is OK.
Hash and verify the passed-in bytes with signature.
[ "Hash", "and", "verify", "the", "passed", "-", "in", "bytes", "with", "signature", "." ]
def hashAndVerify(self, signature, data, hAlg="sha1"): """Hash and verify the passed-in bytes with signature. :type signature: ASN1 bytearray :param signature: the r, s dsa signature :type data: str :param data: The data which will be hashed and verified. :type hAlg: str :param hAlg: The hash algorithm that will be used to hash data :rtype: bool :returns: return True if verification is OK. """ raise NotImplementedError()
[ "def", "hashAndVerify", "(", "self", ",", "signature", ",", "data", ",", "hAlg", "=", "\"sha1\"", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/tlsfuzzer/tlslite-ng/blob/8720db53067ba4f7bb7b5a32d682033d8b5446f9/tlslite/utils/dsakey.py#L66-L81
aws-samples/aws-kube-codesuite
ab4e5ce45416b83bffb947ab8d234df5437f4fca
src/kubernetes/client/models/v1_status_details.py
python
V1StatusDetails.kind
(self, kind)
Sets the kind of this V1StatusDetails. The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds :param kind: The kind of this V1StatusDetails. :type: str
Sets the kind of this V1StatusDetails. The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
[ "Sets", "the", "kind", "of", "this", "V1StatusDetails", ".", "The", "kind", "attribute", "of", "the", "resource", "associated", "with", "the", "status", "StatusReason", ".", "On", "some", "operations", "may", "differ", "from", "the", "requested", "resource", "...
def kind(self, kind): """ Sets the kind of this V1StatusDetails. The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds :param kind: The kind of this V1StatusDetails. :type: str """ self._kind = kind
[ "def", "kind", "(", "self", ",", "kind", ")", ":", "self", ".", "_kind", "=", "kind" ]
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/kubernetes/client/models/v1_status_details.py#L116-L125
czhu95/ternarynet
1a67251f7f5a1cdf854f87f90f841655c7c9f11c
tensorpack/predict/concurrency.py
python
MultiThreadAsyncPredictor.__init__
(self, predictors, batch_size=5)
:param predictors: a list of OnlinePredictor
:param predictors: a list of OnlinePredictor
[ ":", "param", "predictors", ":", "a", "list", "of", "OnlinePredictor" ]
def __init__(self, predictors, batch_size=5): """ :param predictors: a list of OnlinePredictor""" assert len(predictors) for k in predictors: #assert isinstance(k, OnlinePredictor), type(k) # TODO use predictors.return_input here assert k.return_input == False self.input_queue = queue.Queue(maxsize=len(predictors)*100) self.threads = [ PredictorWorkerThread( self.input_queue, f, id, batch_size=batch_size) for id, f in enumerate(predictors)] if six.PY2: # TODO XXX set logging here to avoid affecting TF logging import tornado.options as options options.parse_command_line(['--logging=debug'])
[ "def", "__init__", "(", "self", ",", "predictors", ",", "batch_size", "=", "5", ")", ":", "assert", "len", "(", "predictors", ")", "for", "k", "in", "predictors", ":", "#assert isinstance(k, OnlinePredictor), type(k)", "# TODO use predictors.return_input here", "asser...
https://github.com/czhu95/ternarynet/blob/1a67251f7f5a1cdf854f87f90f841655c7c9f11c/tensorpack/predict/concurrency.py#L128-L144
getalp/Flaubert
ded1cf89820a22dbf885c85ba3dccc8ab360681b
xlm/trainer.py
python
Trainer.save_periodic
(self)
Save the models periodically.
Save the models periodically.
[ "Save", "the", "models", "periodically", "." ]
def save_periodic(self): """ Save the models periodically. """ if not self.params.is_master: return if self.params.save_periodic > 0 and self.epoch % self.params.save_periodic == 0: self.save_checkpoint('periodic-%i' % self.epoch, include_optimizers=False)
[ "def", "save_periodic", "(", "self", ")", ":", "if", "not", "self", ".", "params", ".", "is_master", ":", "return", "if", "self", ".", "params", ".", "save_periodic", ">", "0", "and", "self", ".", "epoch", "%", "self", ".", "params", ".", "save_periodi...
https://github.com/getalp/Flaubert/blob/ded1cf89820a22dbf885c85ba3dccc8ab360681b/xlm/trainer.py#L620-L627
xtiankisutsa/MARA_Framework
ac4ac88bfd38f33ae8780a606ed09ab97177c562
tools/lobotomy/core/include/androguard/androguard/patch/zipfile.py
python
ZipFile.infolist
(self)
return self.filelist
Return a list of class ZipInfo instances for files in the archive.
Return a list of class ZipInfo instances for files in the archive.
[ "Return", "a", "list", "of", "class", "ZipInfo", "instances", "for", "files", "in", "the", "archive", "." ]
def infolist(self): """Return a list of class ZipInfo instances for files in the archive.""" return self.filelist
[ "def", "infolist", "(", "self", ")", ":", "return", "self", ".", "filelist" ]
https://github.com/xtiankisutsa/MARA_Framework/blob/ac4ac88bfd38f33ae8780a606ed09ab97177c562/tools/lobotomy/core/include/androguard/androguard/patch/zipfile.py#L825-L828
BlackLight/platypush
a6b552504e2ac327c94f3a28b607061b6b60cf36
platypush/plugins/otp/__init__.py
python
OtpPlugin.refresh_secret
(self, secret_path: Optional[str] = None)
return secret
Refresh the secret token for key generation given a secret path. :param secret_path: Secret path to refresh (default: default configured path).
Refresh the secret token for key generation given a secret path.
[ "Refresh", "the", "secret", "token", "for", "key", "generation", "given", "a", "secret", "path", "." ]
def refresh_secret(self, secret_path: Optional[str] = None) -> Response: """ Refresh the secret token for key generation given a secret path. :param secret_path: Secret path to refresh (default: default configured path). """ secret_path = secret_path or self.secret_path assert secret_path, 'No secret_path configured' os.makedirs(os.path.dirname(os.path.abspath(os.path.expanduser(secret_path))), exist_ok=True) secret = pyotp.random_base32() with open(secret_path, 'w') as f: f.writelines([secret]) # lgtm [py/clear-text-storage-sensitive-data] os.chmod(secret_path, 0o600) return secret
[ "def", "refresh_secret", "(", "self", ",", "secret_path", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Response", ":", "secret_path", "=", "secret_path", "or", "self", ".", "secret_path", "assert", "secret_path", ",", "'No secret_path configured'",...
https://github.com/BlackLight/platypush/blob/a6b552504e2ac327c94f3a28b607061b6b60cf36/platypush/plugins/otp/__init__.py#L70-L85
arrayfire/arrayfire-python
96fa9768ee02e5fb5ffcaf3d1f744c898b141637
arrayfire/array.py
python
Array.__repr__
(self)
return self._get_metadata_str()
Displays the meta data of the arrayfire array. Note ---- You can use af.display(a, pres) to display the contents of the array.
Displays the meta data of the arrayfire array.
[ "Displays", "the", "meta", "data", "of", "the", "arrayfire", "array", "." ]
def __repr__(self): """ Displays the meta data of the arrayfire array. Note ---- You can use af.display(a, pres) to display the contents of the array. """ return self._get_metadata_str()
[ "def", "__repr__", "(", "self", ")", ":", "return", "self", ".", "_get_metadata_str", "(", ")" ]
https://github.com/arrayfire/arrayfire-python/blob/96fa9768ee02e5fb5ffcaf3d1f744c898b141637/arrayfire/array.py#L1334-L1343
mkorman90/VolatilityBot
b31ef29e42fa820d2e1afc99e9e7c318174bb591
Utils/agent.py
python
handle_file
()
return json.dumps({'response': response, 'rc': 2})
Execute the sample sent by the manager :return:
Execute the sample sent by the manager :return:
[ "Execute", "the", "sample", "sent", "by", "the", "manager", ":", "return", ":" ]
def handle_file(): """ Execute the sample sent by the manager :return: """ print('machine {} will handle the sample'.format(agent_instance.vm_name)) input_json = json.loads(request.get_data().decode('utf-8')) response = agent_instance.generate_response(input_json['challenge']) print('Got challenge {}, sending response {}'.format(input_json['challenge'], response)) print(input_json['filename']) if input_json['key'] == agent_instance.AGENT_KEY: target_file_path = os.path.join(agent_instance.DEST_PATH, input_json['filename'] + '.exe') print('Saving file to {}'.format(target_file_path)) with open(target_file_path, 'wb+') as f: f.write(base64.b64decode(input_json['file_blob'])) # Verify file hash: if hashlib.sha256(open(target_file_path, 'rb').read()).hexdigest() == input_json['sha256']: # Execute the file print('Now executing {}'.format(target_file_path)) cmd = [target_file_path] p = Popen(cmd, shell=False, stdin=None, stdout=None, stderr=None, close_fds=True, creationflags=DETACHED_PROCESS) # subprocess.call([target_file_path]) return json.dumps({'response': response, 'rc': 0}) return json.dumps({'response': response, 'rc': 2})
[ "def", "handle_file", "(", ")", ":", "print", "(", "'machine {} will handle the sample'", ".", "format", "(", "agent_instance", ".", "vm_name", ")", ")", "input_json", "=", "json", ".", "loads", "(", "request", ".", "get_data", "(", ")", ".", "decode", "(", ...
https://github.com/mkorman90/VolatilityBot/blob/b31ef29e42fa820d2e1afc99e9e7c318174bb591/Utils/agent.py#L36-L63
cherrypy/cherrypy
a7983fe61f7237f2354915437b04295694100372
cherrypy/lib/cptools.py
python
log_traceback
(severity=logging.ERROR, debug=False)
Write the last error's traceback to the cherrypy error log.
Write the last error's traceback to the cherrypy error log.
[ "Write", "the", "last", "error", "s", "traceback", "to", "the", "cherrypy", "error", "log", "." ]
def log_traceback(severity=logging.ERROR, debug=False): """Write the last error's traceback to the cherrypy error log.""" cherrypy.log('', 'HTTP', severity=severity, traceback=True)
[ "def", "log_traceback", "(", "severity", "=", "logging", ".", "ERROR", ",", "debug", "=", "False", ")", ":", "cherrypy", ".", "log", "(", "''", ",", "'HTTP'", ",", "severity", "=", "severity", ",", "traceback", "=", "True", ")" ]
https://github.com/cherrypy/cherrypy/blob/a7983fe61f7237f2354915437b04295694100372/cherrypy/lib/cptools.py#L423-L425
plotly/plotly.py
cfad7862594b35965c0e000813bd7805e8494a5b
packages/python/plotly/plotly/graph_objs/_sunburst.py
python
Sunburst.visible
(self)
return self["visible"]
Determines whether or not this trace is visible. If "legendonly", the trace is not drawn, but can appear as a legend item (provided that the legend itself is visible). The 'visible' property is an enumeration that may be specified as: - One of the following enumeration values: [True, False, 'legendonly'] Returns ------- Any
Determines whether or not this trace is visible. If "legendonly", the trace is not drawn, but can appear as a legend item (provided that the legend itself is visible). The 'visible' property is an enumeration that may be specified as: - One of the following enumeration values: [True, False, 'legendonly']
[ "Determines", "whether", "or", "not", "this", "trace", "is", "visible", ".", "If", "legendonly", "the", "trace", "is", "not", "drawn", "but", "can", "appear", "as", "a", "legend", "item", "(", "provided", "that", "the", "legend", "itself", "is", "visible",...
def visible(self): """ Determines whether or not this trace is visible. If "legendonly", the trace is not drawn, but can appear as a legend item (provided that the legend itself is visible). The 'visible' property is an enumeration that may be specified as: - One of the following enumeration values: [True, False, 'legendonly'] Returns ------- Any """ return self["visible"]
[ "def", "visible", "(", "self", ")", ":", "return", "self", "[", "\"visible\"", "]" ]
https://github.com/plotly/plotly.py/blob/cfad7862594b35965c0e000813bd7805e8494a5b/packages/python/plotly/plotly/graph_objs/_sunburst.py#L1392-L1406
MrGiovanni/UNetPlusPlus
e145ba63862982bf1099cf2ec11d5466b434ae0b
pytorch/nnunet/preprocessing/preprocessing.py
python
GenericPreprocessor.__init__
(self, normalization_scheme_per_modality, use_nonzero_mask, transpose_forward: (tuple, list), intensityproperties=None)
:param normalization_scheme_per_modality: dict {0:'nonCT'} :param use_nonzero_mask: {0:False} :param intensityproperties:
[]
def __init__(self, normalization_scheme_per_modality, use_nonzero_mask, transpose_forward: (tuple, list), intensityproperties=None): """ :param normalization_scheme_per_modality: dict {0:'nonCT'} :param use_nonzero_mask: {0:False} :param intensityproperties: """ self.transpose_forward = transpose_forward self.intensityproperties = intensityproperties self.normalization_scheme_per_modality = normalization_scheme_per_modality self.use_nonzero_mask = use_nonzero_mask self.resample_separate_z_anisotropy_threshold = RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD
[ "def", "__init__", "(", "self", ",", "normalization_scheme_per_modality", ",", "use_nonzero_mask", ",", "transpose_forward", ":", "(", "tuple", ",", "list", ")", ",", "intensityproperties", "=", "None", ")", ":", "self", ".", "transpose_forward", "=", "transpose_f...
https://github.com/MrGiovanni/UNetPlusPlus/blob/e145ba63862982bf1099cf2ec11d5466b434ae0b/pytorch/nnunet/preprocessing/preprocessing.py#L202-L214
CGCookie/retopoflow
3d8b3a47d1d661f99ab0aeb21d31370bf15de35e
addon_common/common/ui_utilities.py
python
UI_Element_Utils.add_option_callback
(option)
return wrapper
[]
def add_option_callback(option): def wrapper(fn): def wrapped(self, *args, **kwargs): ret = fn(self, *args, **kwargs) return ret UI_Element_Utils._option_callbacks[option] = wrapped return wrapped return wrapper
[ "def", "add_option_callback", "(", "option", ")", ":", "def", "wrapper", "(", "fn", ")", ":", "def", "wrapped", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "fn", "(", "self", ",", "*", "args", ",", "*", "*", "k...
https://github.com/CGCookie/retopoflow/blob/3d8b3a47d1d661f99ab0aeb21d31370bf15de35e/addon_common/common/ui_utilities.py#L122-L129
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/logging/__init__.py
python
LoggerAdapter.getEffectiveLevel
(self)
return self.logger.getEffectiveLevel()
Get the effective level for the underlying logger.
Get the effective level for the underlying logger.
[ "Get", "the", "effective", "level", "for", "the", "underlying", "logger", "." ]
def getEffectiveLevel(self): """ Get the effective level for the underlying logger. """ return self.logger.getEffectiveLevel()
[ "def", "getEffectiveLevel", "(", "self", ")", ":", "return", "self", ".", "logger", ".", "getEffectiveLevel", "(", ")" ]
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/logging/__init__.py#L1614-L1618
JiYou/openstack
8607dd488bde0905044b303eb6e52bdea6806923
packages/source/cinder/cinder/openstack/common/rpc/impl_kombu.py
python
Connection.topic_send
(self, topic, msg, timeout=None)
Send a 'topic' message
Send a 'topic' message
[ "Send", "a", "topic", "message" ]
def topic_send(self, topic, msg, timeout=None): """Send a 'topic' message""" self.publisher_send(TopicPublisher, topic, msg, timeout)
[ "def", "topic_send", "(", "self", ",", "topic", ",", "msg", ",", "timeout", "=", "None", ")", ":", "self", ".", "publisher_send", "(", "TopicPublisher", ",", "topic", ",", "msg", ",", "timeout", ")" ]
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/cinder/cinder/openstack/common/rpc/impl_kombu.py#L703-L705
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/ftplib.py
python
parse229
(resp, peer)
return host, port
Parse the '229' response for a EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.
Parse the '229' response for a EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.
[ "Parse", "the", "229", "response", "for", "a", "EPSV", "request", ".", "Raises", "error_proto", "if", "it", "does", "not", "contain", "(", "|||port|", ")", "Return", "(", "host", ".", "addr", ".", "as", ".", "numbers", "port#", ")", "tuple", "." ]
def parse229(resp, peer): '''Parse the '229' response for a EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.''' if resp[:3] != '229': raise error_reply, resp left = resp.find('(') if left < 0: raise error_proto, resp right = resp.find(')', left + 1) if right < 0: raise error_proto, resp # should contain '(|||port|)' if resp[left + 1] != resp[right - 1]: raise error_proto, resp parts = resp[left + 1:right].split(resp[left+1]) if len(parts) != 5: raise error_proto, resp host = peer[0] port = int(parts[3]) return host, port
[ "def", "parse229", "(", "resp", ",", "peer", ")", ":", "if", "resp", "[", ":", "3", "]", "!=", "'229'", ":", "raise", "error_reply", ",", "resp", "left", "=", "resp", ".", "find", "(", "'('", ")", "if", "left", "<", "0", ":", "raise", "error_prot...
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/ftplib.py#L804-L823
kovidgoyal/calibre
2b41671370f2a9eb1109b9ae901ccf915f1bd0c8
src/calibre/srv/ajax.py
python
books_in
(ctx, rd, encoded_category, encoded_item, library_id)
Return the books (as list of ids) present in the specified category. Optional: ?num=100&offset=0&sort=title&sort_order=asc&get_additional_fields=
Return the books (as list of ids) present in the specified category.
[ "Return", "the", "books", "(", "as", "list", "of", "ids", ")", "present", "in", "the", "specified", "category", "." ]
def books_in(ctx, rd, encoded_category, encoded_item, library_id): ''' Return the books (as list of ids) present in the specified category. Optional: ?num=100&offset=0&sort=title&sort_order=asc&get_additional_fields= ''' db = get_db(ctx, rd, library_id) with db.safe_read_lock: try: dname, ditem = map(decode_name, (encoded_category, encoded_item)) except: raise HTTPNotFound(f'Invalid encoded param: {encoded_category!r} ({encoded_item!r})') num, offset = get_pagination(rd.query) sort, sort_order = rd.query.get('sort', 'title'), rd.query.get('sort_order') sort_order = ensure_val(sort_order, 'asc', 'desc') sfield = sanitize_sort_field_name(db.field_metadata, sort) if sfield not in db.field_metadata.sortable_field_keys(): raise HTTPNotFound('%s is not a valid sort field'%sort) if dname in ('allbooks', 'newest'): ids = ctx.allowed_book_ids(rd, db) elif dname == 'search': try: ids = ctx.search(rd, db, 'search:"%s"'%ditem) except Exception: raise HTTPNotFound('Search: %r not understood'%ditem) else: try: cid = int(ditem) except Exception: raise HTTPNotFound('Category id %r not an integer'%ditem) if dname == 'news': dname = 'tags' ids = db.get_books_for_category(dname, cid) & ctx.allowed_book_ids(rd, db) ids = db.multisort(fields=[(sfield, sort_order == 'asc')], ids_to_sort=ids) total_num = len(ids) ids = ids[offset:offset+num] result = { 'total_num': total_num, 'sort_order':sort_order, 'offset':offset, 'num':len(ids), 'sort':sort, 'base_url':ctx.url_for(books_in, encoded_category=encoded_category, encoded_item=encoded_item, library_id=db.server_library_id), 'book_ids':ids } get_additional_fields = rd.query.get('get_additional_fields') if get_additional_fields: additional_fields = {} for field in get_additional_fields.split(','): field = field.strip() if field: flist = additional_fields[field] = [] for id_ in ids: flist.append(db.field_for(field, id_, default_value=None)) if additional_fields: result['additional_fields'] = additional_fields return result
[ "def", "books_in", "(", "ctx", ",", "rd", ",", "encoded_category", ",", "encoded_item", ",", "library_id", ")", ":", "db", "=", "get_db", "(", "ctx", ",", "rd", ",", "library_id", ")", "with", "db", ".", "safe_read_lock", ":", "try", ":", "dname", ",",...
https://github.com/kovidgoyal/calibre/blob/2b41671370f2a9eb1109b9ae901ccf915f1bd0c8/src/calibre/srv/ajax.py#L465-L523
khalim19/gimp-plugin-export-layers
b37255f2957ad322f4d332689052351cdea6e563
export_layers/pygimplib/progress.py
python
ProgressUpdater.__init__
(self, progress_bar, num_total_tasks=0)
[]
def __init__(self, progress_bar, num_total_tasks=0): self.progress_bar = progress_bar self.num_total_tasks = num_total_tasks self._num_finished_tasks = 0
[ "def", "__init__", "(", "self", ",", "progress_bar", ",", "num_total_tasks", "=", "0", ")", ":", "self", ".", "progress_bar", "=", "progress_bar", "self", ".", "num_total_tasks", "=", "num_total_tasks", "self", ".", "_num_finished_tasks", "=", "0" ]
https://github.com/khalim19/gimp-plugin-export-layers/blob/b37255f2957ad322f4d332689052351cdea6e563/export_layers/pygimplib/progress.py#L44-L48
idiap/fast-transformers
f22c13716fc748bb21a7b226ada7f7b5f87f867f
fast_transformers/events/event_dispatcher.py
python
EventDispatcher.remove
(self, event_handler)
Remove the event_handler from the listeners so that no more events are dispatched to this handler.
Remove the event_handler from the listeners so that no more events are dispatched to this handler.
[ "Remove", "the", "event_handler", "from", "the", "listeners", "so", "that", "no", "more", "events", "are", "dispatched", "to", "this", "handler", "." ]
def remove(self, event_handler): """Remove the event_handler from the listeners so that no more events are dispatched to this handler.""" self._listeners.pop(event_handler, None)
[ "def", "remove", "(", "self", ",", "event_handler", ")", ":", "self", ".", "_listeners", ".", "pop", "(", "event_handler", ",", "None", ")" ]
https://github.com/idiap/fast-transformers/blob/f22c13716fc748bb21a7b226ada7f7b5f87f867f/fast_transformers/events/event_dispatcher.py#L54-L57
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/site-packages/docutils/parsers/rst/directives/__init__.py
python
unchanged_required
(argument)
Return the argument text, unchanged. (Directive option conversion function.) Raise ``ValueError`` if no argument is found.
Return the argument text, unchanged. (Directive option conversion function.)
[ "Return", "the", "argument", "text", "unchanged", ".", "(", "Directive", "option", "conversion", "function", ".", ")" ]
def unchanged_required(argument): """ Return the argument text, unchanged. (Directive option conversion function.) Raise ``ValueError`` if no argument is found. """ if argument is None: raise ValueError('argument required but none supplied') else: return argument
[ "def", "unchanged_required", "(", "argument", ")", ":", "if", "argument", "is", "None", ":", "raise", "ValueError", "(", "'argument required but none supplied'", ")", "else", ":", "return", "argument" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/docutils/parsers/rst/directives/__init__.py#L153-L163
andresriancho/w3af
cd22e5252243a87aaa6d0ddea47cf58dacfe00a9
w3af/plugins/attack/xpath.py
python
XPathReader.getxml
(self)
This method executes a command in the remote operating system by exploiting the vulnerability. :param command: The command to handle ( ie. "ls", "whoami", etc ). :return: The result of the command.
This method executes a command in the remote operating system by exploiting the vulnerability.
[ "This", "method", "executes", "a", "command", "in", "the", "remote", "operating", "system", "by", "exploiting", "the", "vulnerability", "." ]
def getxml(self): """ This method executes a command in the remote operating system by exploiting the vulnerability. :param command: The command to handle ( ie. "ls", "whoami", etc ). :return: The result of the command. """ try: data_len = self._get_data_len() except BaseFrameworkException, e: return 'Error found during data length extraction: "%s"' % e if data_len is not None: try: data = self.get_data(data_len) except BaseFrameworkException, e: return 'Error found during data extraction: "%s"' % e else: return data
[ "def", "getxml", "(", "self", ")", ":", "try", ":", "data_len", "=", "self", ".", "_get_data_len", "(", ")", "except", "BaseFrameworkException", ",", "e", ":", "return", "'Error found during data length extraction: \"%s\"'", "%", "e", "if", "data_len", "is", "no...
https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/plugins/attack/xpath.py#L307-L326
VLSIDA/OpenRAM
f66aac3264598eeae31225c62b6a4af52412d407
compiler/sram/sram_base.py
python
sram_base.create_control_logic
(self)
return insts
Add control logic instances
Add control logic instances
[ "Add", "control", "logic", "instances" ]
def create_control_logic(self): """ Add control logic instances """ insts = [] for port in self.all_ports: if port in self.readwrite_ports: mod = self.control_logic_rw elif port in self.write_ports: mod = self.control_logic_w else: mod = self.control_logic_r insts.append(self.add_inst(name="control{}".format(port), mod=mod)) # Inputs temp = ["csb{}".format(port)] if port in self.readwrite_ports: temp.append("web{}".format(port)) temp.append("clk{}".format(port)) temp.append("rbl_bl{}".format(port)) # Outputs if port in self.read_ports: temp.append("s_en{}".format(port)) if port in self.write_ports: temp.append("w_en{}".format(port)) temp.append("p_en_bar{}".format(port)) temp.extend(["wl_en{}".format(port), "clk_buf{}".format(port)] + self.ext_supplies) self.connect_inst(temp) return insts
[ "def", "create_control_logic", "(", "self", ")", ":", "insts", "=", "[", "]", "for", "port", "in", "self", ".", "all_ports", ":", "if", "port", "in", "self", ".", "readwrite_ports", ":", "mod", "=", "self", ".", "control_logic_rw", "elif", "port", "in", ...
https://github.com/VLSIDA/OpenRAM/blob/f66aac3264598eeae31225c62b6a4af52412d407/compiler/sram/sram_base.py#L712-L742
wummel/patool
723006abd43d0926581b11df0cd37e46a30525eb
patoolib/__init__.py
python
find_archive_program
(format, command, program=None, password=None)
Find suitable archive program for given format and mode.
Find suitable archive program for given format and mode.
[ "Find", "suitable", "archive", "program", "for", "given", "format", "and", "mode", "." ]
def find_archive_program (format, command, program=None, password=None): """Find suitable archive program for given format and mode.""" commands = ArchivePrograms[format] programs = [] if program is not None: # try a specific program first programs.append(program) # first try the universal programs with key None for key in (None, command): if key in commands: programs.extend(commands[key]) if password is not None: programs = _remove_command_without_password_support(programs, format, command) if not programs: raise util.PatoolError("%s archive format `%s' is not supported" % (command, format)) # return the first existing program for program in programs: if program.startswith('py_'): # it's a Python module and therefore always supported return program exe = util.find_program(program) if exe: if program == '7z' and format == 'rar' and not util.p7zip_supports_rar(): continue return exe # no programs found raise util.PatoolError("could not find an executable program to %s format %s; candidates are (%s)," % (command, format, ",".join(programs)))
[ "def", "find_archive_program", "(", "format", ",", "command", ",", "program", "=", "None", ",", "password", "=", "None", ")", ":", "commands", "=", "ArchivePrograms", "[", "format", "]", "programs", "=", "[", "]", "if", "program", "is", "not", "None", ":...
https://github.com/wummel/patool/blob/723006abd43d0926581b11df0cd37e46a30525eb/patoolib/__init__.py#L367-L393
LexPredict/lexpredict-contraxsuite
1d5a2540d31f8f3f1adc442cfa13a7c007319899
contraxsuite_services/apps/common/mixins.py
python
CustomCountQuerySet.get_count_custom_sql
(self)
return row[0]
Perform a COUNT() query using custom SQL
Perform a COUNT() query using custom SQL
[ "Perform", "a", "COUNT", "()", "query", "using", "custom", "SQL" ]
def get_count_custom_sql(self): """ Perform a COUNT() query using custom SQL """ #if self._result_cache is not None: # return len(self._result_cache) with connection.cursor() as cursor: cursor.execute(self.optional_count_query) row = cursor.fetchone() return row[0]
[ "def", "get_count_custom_sql", "(", "self", ")", ":", "#if self._result_cache is not None:", "# return len(self._result_cache)", "with", "connection", ".", "cursor", "(", ")", "as", "cursor", ":", "cursor", ".", "execute", "(", "self", ".", "optional_count_query", ...
https://github.com/LexPredict/lexpredict-contraxsuite/blob/1d5a2540d31f8f3f1adc442cfa13a7c007319899/contraxsuite_services/apps/common/mixins.py#L1361-L1371
pypa/pip
7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4
src/pip/_internal/network/download.py
python
_get_http_response_filename
(resp: Response, link: Link)
return filename
Get an ideal filename from the given HTTP response, falling back to the link filename if not provided.
Get an ideal filename from the given HTTP response, falling back to the link filename if not provided.
[ "Get", "an", "ideal", "filename", "from", "the", "given", "HTTP", "response", "falling", "back", "to", "the", "link", "filename", "if", "not", "provided", "." ]
def _get_http_response_filename(resp: Response, link: Link) -> str: """Get an ideal filename from the given HTTP response, falling back to the link filename if not provided. """ filename = link.filename # fallback # Have a look at the Content-Disposition header for a better guess content_disposition = resp.headers.get("content-disposition") if content_disposition: filename = parse_content_disposition(content_disposition, filename) ext: Optional[str] = splitext(filename)[1] if not ext: ext = mimetypes.guess_extension(resp.headers.get("content-type", "")) if ext: filename += ext if not ext and link.url != resp.url: ext = os.path.splitext(resp.url)[1] if ext: filename += ext return filename
[ "def", "_get_http_response_filename", "(", "resp", ":", "Response", ",", "link", ":", "Link", ")", "->", "str", ":", "filename", "=", "link", ".", "filename", "# fallback", "# Have a look at the Content-Disposition header for a better guess", "content_disposition", "=", ...
https://github.com/pypa/pip/blob/7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4/src/pip/_internal/network/download.py#L93-L111
apigee/henchman
13c53c66669800aaa89f1799ac974b45ec473c3d
modules/curl/curl/requests/requests/structures.py
python
LookupDict.__repr__
(self)
return '<lookup \'%s\'>' % (self.name)
[]
def __repr__(self): return '<lookup \'%s\'>' % (self.name)
[ "def", "__repr__", "(", "self", ")", ":", "return", "'<lookup \\'%s\\'>'", "%", "(", "self", ".", "name", ")" ]
https://github.com/apigee/henchman/blob/13c53c66669800aaa89f1799ac974b45ec473c3d/modules/curl/curl/requests/requests/structures.py#L95-L96
Juniper/py-junos-eznc
fd81d476e37ac1a234b503ab77f76ec658d04590
lib/jnpr/junos/utils/start_shell.py
python
StartShell.close
(self)
Close the SSH client channel
Close the SSH client channel
[ "Close", "the", "SSH", "client", "channel" ]
def close(self): """Close the SSH client channel""" if self.ON_JUNOS is True: self._chan.terminate() else: self._chan.close() self._client.close()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "ON_JUNOS", "is", "True", ":", "self", ".", "_chan", ".", "terminate", "(", ")", "else", ":", "self", ".", "_chan", ".", "close", "(", ")", "self", ".", "_client", ".", "close", "(", ")" ]
https://github.com/Juniper/py-junos-eznc/blob/fd81d476e37ac1a234b503ab77f76ec658d04590/lib/jnpr/junos/utils/start_shell.py#L117-L124
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/benchmarks/src/benchmarks/sympy/sympy/functions/elementary/exponential.py
python
ExpBase._eval_is_bounded
(self)
[]
def _eval_is_bounded(self): arg = self.args[0] if arg.is_unbounded: if arg.is_negative: return True if arg.is_positive: return False if arg.is_bounded: return True
[ "def", "_eval_is_bounded", "(", "self", ")", ":", "arg", "=", "self", ".", "args", "[", "0", "]", "if", "arg", ".", "is_unbounded", ":", "if", "arg", ".", "is_negative", ":", "return", "True", "if", "arg", ".", "is_positive", ":", "return", "False", ...
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/sympy/sympy/functions/elementary/exponential.py#L77-L85
cloudera/impyla
0c736af4cad2bade9b8e313badc08ec50e81c948
impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py
python
get_partition_names_ps_result.read
(self, iprot)
[]
def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.LIST: self.success = [] (_etype876, _size873) = iprot.readListBegin() for _i877 in range(_size873): _elem878 = iprot.readString() self.success.append(_elem878) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.o1 = MetaException() self.o1.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.o2 = NoSuchObjectException() self.o2.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd()
[ "def", "read", "(", "self", ",", "iprot", ")", ":", "if", "iprot", ".", "_fast_decode", "is", "not", "None", "and", "isinstance", "(", "iprot", ".", "trans", ",", "TTransport", ".", "CReadableTransport", ")", "and", "self", ".", "thrift_spec", "is", "not...
https://github.com/cloudera/impyla/blob/0c736af4cad2bade9b8e313badc08ec50e81c948/impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py#L21337-L21371
nate-parrott/Flashlight
c3a7c7278a1cccf8918e7543faffc68e863ff5ab
flashlightplugins/bs4/element.py
python
Tag.clear
(self, decompose=False)
Extract all children. If decompose is True, decompose instead.
Extract all children. If decompose is True, decompose instead.
[ "Extract", "all", "children", ".", "If", "decompose", "is", "True", "decompose", "instead", "." ]
def clear(self, decompose=False): """ Extract all children. If decompose is True, decompose instead. """ if decompose: for element in self.contents[:]: if isinstance(element, Tag): element.decompose() else: element.extract() else: for element in self.contents[:]: element.extract()
[ "def", "clear", "(", "self", ",", "decompose", "=", "False", ")", ":", "if", "decompose", ":", "for", "element", "in", "self", ".", "contents", "[", ":", "]", ":", "if", "isinstance", "(", "element", ",", "Tag", ")", ":", "element", ".", "decompose",...
https://github.com/nate-parrott/Flashlight/blob/c3a7c7278a1cccf8918e7543faffc68e863ff5ab/flashlightplugins/bs4/element.py#L840-L852
realpython/book2-exercises
cde325eac8e6d8cff2316601c2e5b36bb46af7d0
web2py-rest/gluon/cache.py
python
CacheAbstract.__init__
(self, request=None)
Initializes the object Args: request: the global request object
Initializes the object
[ "Initializes", "the", "object" ]
def __init__(self, request=None): """Initializes the object Args: request: the global request object """ raise NotImplementedError
[ "def", "__init__", "(", "self", ",", "request", "=", "None", ")", ":", "raise", "NotImplementedError" ]
https://github.com/realpython/book2-exercises/blob/cde325eac8e6d8cff2316601c2e5b36bb46af7d0/web2py-rest/gluon/cache.py#L107-L113
svip-lab/impersonator
b041dd415157c1e7f5b46e579a1ad4dffabb2e66
thirdparty/his_evaluators/his_evaluators/protocols/iPER.py
python
IPERProtocol.get_kps_path
(self, name)
return smpl_path
Args: name (str): such as `001/9/1`. Returns: kps_path (str):
[]
def get_kps_path(self, name): """ Args: name (str): such as `001/9/1`. Returns: kps_path (str): """ smpl_path = os.path.join(self.data_dir, self.smpls_folder, name, "kps.pkl") return smpl_path
[ "def", "get_kps_path", "(", "self", ",", "name", ")", ":", "smpl_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "data_dir", ",", "self", ".", "smpls_folder", ",", "name", ",", "\"kps.pkl\"", ")", "return", "smpl_path" ]
https://github.com/svip-lab/impersonator/blob/b041dd415157c1e7f5b46e579a1ad4dffabb2e66/thirdparty/his_evaluators/his_evaluators/protocols/iPER.py#L232-L242
omaha-consulting/omaha-server
1aa507b51e3656b490f72a3c9d60ee9d085e389e
omaha_server/crash/forms.py
python
SymbolsAdminForm.clean_file
(self)
return file
[]
def clean_file(self): file = self.cleaned_data["file"] try: head = file.readline().rstrip() meta = parse_debug_meta_info(head, exception=forms.ValidationError) self.cleaned_data.update(meta) except: raise forms.ValidationError(u"The file contains invalid data.") return file
[ "def", "clean_file", "(", "self", ")", ":", "file", "=", "self", ".", "cleaned_data", "[", "\"file\"", "]", "try", ":", "head", "=", "file", ".", "readline", "(", ")", ".", "rstrip", "(", ")", "meta", "=", "parse_debug_meta_info", "(", "head", ",", "...
https://github.com/omaha-consulting/omaha-server/blob/1aa507b51e3656b490f72a3c9d60ee9d085e389e/omaha_server/crash/forms.py#L116-L124
eliben/pyelftools
8f7a0becaface09435c4374947548b7851e3d1a2
elftools/construct/macros.py
python
Embedded
(subcon)
return Reconfig(subcon.name, subcon, subcon.FLAG_EMBED)
embeds a struct into the enclosing struct. * subcon - the struct to embed
embeds a struct into the enclosing struct. * subcon - the struct to embed
[ "embeds", "a", "struct", "into", "the", "enclosing", "struct", ".", "*", "subcon", "-", "the", "struct", "to", "embed" ]
def Embedded(subcon): """embeds a struct into the enclosing struct. * subcon - the struct to embed """ return Reconfig(subcon.name, subcon, subcon.FLAG_EMBED)
[ "def", "Embedded", "(", "subcon", ")", ":", "return", "Reconfig", "(", "subcon", ".", "name", ",", "subcon", ",", "subcon", ".", "FLAG_EMBED", ")" ]
https://github.com/eliben/pyelftools/blob/8f7a0becaface09435c4374947548b7851e3d1a2/elftools/construct/macros.py#L381-L385
kuri65536/python-for-android
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
python-build/python-libs/xmpppy/xmpp/protocol.py
python
DataField.addValue
(self,val)
Add one more value to this field. Used in 'get' iq's or such.
Add one more value to this field. Used in 'get' iq's or such.
[ "Add", "one", "more", "value", "to", "this", "field", ".", "Used", "in", "get", "iq", "s", "or", "such", "." ]
def addValue(self,val): """ Add one more value to this field. Used in 'get' iq's or such.""" self.addChild('value',{},[val])
[ "def", "addValue", "(", "self", ",", "val", ")", ":", "self", ".", "addChild", "(", "'value'", ",", "{", "}", ",", "[", "val", "]", ")" ]
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-build/python-libs/xmpppy/xmpp/protocol.py#L637-L639
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
frontend/flag_spec.py
python
_FlagSpec.ShortFlag
(self, short_name, arg_type=None, long_name=None, help=None)
This is very similar to ShortFlag for FlagSpecAndMore, except we have separate arity0 and arity1 dicts.
This is very similar to ShortFlag for FlagSpecAndMore, except we have separate arity0 and arity1 dicts.
[ "This", "is", "very", "similar", "to", "ShortFlag", "for", "FlagSpecAndMore", "except", "we", "have", "separate", "arity0", "and", "arity1", "dicts", "." ]
def ShortFlag(self, short_name, arg_type=None, long_name=None, help=None): # type: (str, Optional[int], Optional[str], Optional[str]) -> None """ This is very similar to ShortFlag for FlagSpecAndMore, except we have separate arity0 and arity1 dicts. """ assert short_name.startswith('-'), short_name assert len(short_name) == 2, short_name typ = _FlagType(arg_type) char = short_name[1] # Hack for read -0. Make it a valid variable name if char == '0': char = 'Z' if arg_type is None: self.arity0.append(char) else: self.arity1[char] = _MakeAction(arg_type, char) if long_name is not None: name = long_name[2:] # key for parsing if arg_type is None: self.actions_long[name] = args.SetToTrue(char) else: self.actions_long[name] = _MakeAction(arg_type, char) self.defaults[char] = _Default(arg_type) self.fields[char] = typ
[ "def", "ShortFlag", "(", "self", ",", "short_name", ",", "arg_type", "=", "None", ",", "long_name", "=", "None", ",", "help", "=", "None", ")", ":", "# type: (str, Optional[int], Optional[str], Optional[str]) -> None", "assert", "short_name", ".", "startswith", "(",...
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/frontend/flag_spec.py#L194-L223
cunjian/pytorch_face_landmark
f575be168a24af6f4807c852173fdfedf6d2c67d
vision/ssd/ssd.py
python
SSD.init_from_base_net
(self, model)
[]
def init_from_base_net(self, model): self.base_net.load_state_dict(torch.load(model, map_location=lambda storage, loc: storage), strict=True) self.source_layer_add_ons.apply(_xavier_init_) self.extras.apply(_xavier_init_) self.classification_headers.apply(_xavier_init_) self.regression_headers.apply(_xavier_init_)
[ "def", "init_from_base_net", "(", "self", ",", "model", ")", ":", "self", ".", "base_net", ".", "load_state_dict", "(", "torch", ".", "load", "(", "model", ",", "map_location", "=", "lambda", "storage", ",", "loc", ":", "storage", ")", ",", "strict", "="...
https://github.com/cunjian/pytorch_face_landmark/blob/f575be168a24af6f4807c852173fdfedf6d2c67d/vision/ssd/ssd.py#L114-L119
analysiscenter/batchflow
294747da0bca309785f925be891441fdd824e9fa
batchflow/models/torch/callbacks/base.py
python
LogCallback.on_iter_end
(self, **kwargs)
Log requested information. Called at the end of :meth:`TorchModel.train`.
Log requested information. Called at the end of :meth:`TorchModel.train`.
[ "Log", "requested", "information", ".", "Called", "at", "the", "end", "of", ":", "meth", ":", "TorchModel", ".", "train", "." ]
def on_iter_end(self, **kwargs): """ Log requested information. Called at the end of :meth:`TorchModel.train`. """ _ = kwargs i = self.model.iteration if i % self.frequency == 0: # Default message: timestamp, iteration and loss value timestamp = strftime('%Y-%m-%d %H:%M:%S', gmtime()) avg_loss = sum(self.model.loss_list[-self.frequency:]) / self.frequency msg = f'{timestamp} {i:5}: {avg_loss:6.6f}' if self.resources: # Monitor resources memory = round(USSMonitor.get_usage(), 2) gpu_memory = GPUMemoryMonitor.get_usage() gpu_utilization = GPUMonitor.get_usage() msg += f' | {memory:6.3f} : ' msg += ' : '.join([f'{round(item, 2):6.3f}' for item in gpu_memory]) + ' : ' msg += ' : '.join([f'{round(item, 2):6.3f}' for item in gpu_utilization]) if self.shapes: # Model in/out shapes in_shapes = self.model.iter_info['actual_model_inputs_shape'] out_shapes = self.model.iter_info['actual_model_outputs_shape'] msg += f' | {in_shapes} : {out_shapes}' if self.microbatch: # Internal microbatch parameters of the last iteration microbatch = self.model.microbatch num_microbatches = self.model.iter_info['steps'] msg += f' | {microbatch} : {num_microbatches}' self.stream(msg)
[ "def", "on_iter_end", "(", "self", ",", "*", "*", "kwargs", ")", ":", "_", "=", "kwargs", "i", "=", "self", ".", "model", ".", "iteration", "if", "i", "%", "self", ".", "frequency", "==", "0", ":", "# Default message: timestamp, iteration and loss value", ...
https://github.com/analysiscenter/batchflow/blob/294747da0bca309785f925be891441fdd824e9fa/batchflow/models/torch/callbacks/base.py#L99-L135
shiweibsw/Translation-Tools
2fbbf902364e557fa7017f9a74a8797b7440c077
venv/Lib/site-packages/pip-9.0.3-py3.6.egg/pip/_vendor/urllib3/packages/ordered_dict.py
python
OrderedDict.__delitem__
(self, key, dict_delitem=dict.__delitem__)
od.__delitem__(y) <==> del od[y]
od.__delitem__(y) <==> del od[y]
[ "od", ".", "__delitem__", "(", "y", ")", "<", "==", ">", "del", "od", "[", "y", "]" ]
def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev
[ "def", "__delitem__", "(", "self", ",", "key", ",", "dict_delitem", "=", "dict", ".", "__delitem__", ")", ":", "# Deleting an existing item uses self.__map to find the link which is", "# then removed by updating the links in the predecessor and successor nodes.", "dict_delitem", "(...
https://github.com/shiweibsw/Translation-Tools/blob/2fbbf902364e557fa7017f9a74a8797b7440c077/venv/Lib/site-packages/pip-9.0.3-py3.6.egg/pip/_vendor/urllib3/packages/ordered_dict.py#L54-L61
BerkeleyAutomation/dex-net
cccf93319095374b0eefc24b8b6cd40bc23966d2
src/dexnet/grasping/grasp.py
python
VacuumPoint.params_from_configuration
(configuration)
return configuration[0:3], configuration[3:6]
Converts configuration vector into vacuum grasp parameters. Returns ------- center : :obj:`numpy.ndarray` center of grasp in 3D space axis : :obj:`numpy.ndarray` normalized axis of grasp in 3D space
Converts configuration vector into vacuum grasp parameters. Returns ------- center : :obj:`numpy.ndarray` center of grasp in 3D space axis : :obj:`numpy.ndarray` normalized axis of grasp in 3D space
[ "Converts", "configuration", "vector", "into", "vacuum", "grasp", "parameters", ".", "Returns", "-------", "center", ":", ":", "obj", ":", "numpy", ".", "ndarray", "center", "of", "grasp", "in", "3D", "space", "axis", ":", ":", "obj", ":", "numpy", ".", ...
def params_from_configuration(configuration): """ Converts configuration vector into vacuum grasp parameters. Returns ------- center : :obj:`numpy.ndarray` center of grasp in 3D space axis : :obj:`numpy.ndarray` normalized axis of grasp in 3D space """ if not isinstance(configuration, np.ndarray) or configuration.shape[0] != 6: raise ValueError('Configuration must be numpy ndarray of size 6') if np.abs(np.linalg.norm(configuration[3:6]) - 1.0) > 1e-5: raise ValueError('Illegal vacuum axis. Must be norm one') return configuration[0:3], configuration[3:6]
[ "def", "params_from_configuration", "(", "configuration", ")", ":", "if", "not", "isinstance", "(", "configuration", ",", "np", ".", "ndarray", ")", "or", "configuration", ".", "shape", "[", "0", "]", "!=", "6", ":", "raise", "ValueError", "(", "'Configurati...
https://github.com/BerkeleyAutomation/dex-net/blob/cccf93319095374b0eefc24b8b6cd40bc23966d2/src/dexnet/grasping/grasp.py#L894-L908
joelgrus/data-science-from-scratch
d5d0f117f41b3ccab3b07f1ee1fa21cfcf69afa1
first-edition/code-python3/network_analysis.py
python
find_eigenvector
(A, tolerance=0.00001)
[]
def find_eigenvector(A, tolerance=0.00001): guess = [1 for __ in A] while True: result = matrix_operate(A, guess) length = magnitude(result) next_guess = scalar_multiply(1/length, result) if distance(guess, next_guess) < tolerance: return next_guess, length # eigenvector, eigenvalue guess = next_guess
[ "def", "find_eigenvector", "(", "A", ",", "tolerance", "=", "0.00001", ")", ":", "guess", "=", "[", "1", "for", "__", "in", "A", "]", "while", "True", ":", "result", "=", "matrix_operate", "(", "A", ",", "guess", ")", "length", "=", "magnitude", "(",...
https://github.com/joelgrus/data-science-from-scratch/blob/d5d0f117f41b3ccab3b07f1ee1fa21cfcf69afa1/first-edition/code-python3/network_analysis.py#L139-L150
ChineseGLUE/ChineseGLUE
1591b85cf5427c2ff60f718d359ecb71d2b44879
baselines/models/roberta_wwm_large_ext/conlleval.py
python
calculate_metrics
(correct, guessed, total)
return Metrics(tp, fp, fn, p, r, f)
[]
def calculate_metrics(correct, guessed, total): tp, fp, fn = correct, guessed-correct, total-correct p = 0 if tp + fp == 0 else 1.*tp / (tp + fp) r = 0 if tp + fn == 0 else 1.*tp / (tp + fn) f = 0 if p + r == 0 else 2 * p * r / (p + r) return Metrics(tp, fp, fn, p, r, f)
[ "def", "calculate_metrics", "(", "correct", ",", "guessed", ",", "total", ")", ":", "tp", ",", "fp", ",", "fn", "=", "correct", ",", "guessed", "-", "correct", ",", "total", "-", "correct", "p", "=", "0", "if", "tp", "+", "fp", "==", "0", "else", ...
https://github.com/ChineseGLUE/ChineseGLUE/blob/1591b85cf5427c2ff60f718d359ecb71d2b44879/baselines/models/roberta_wwm_large_ext/conlleval.py#L152-L157
hzlzh/AlfredWorkflow.com
7055f14f6922c80ea5943839eb0caff11ae57255
Sources/Workflows/SafariBookmark/workflow/background.py
python
is_running
(name)
return False
Test whether task ``name`` is currently running. :param name: name of task :type name: unicode :returns: ``True`` if task with name ``name`` is running, else ``False`` :rtype: bool
Test whether task ``name`` is currently running.
[ "Test", "whether", "task", "name", "is", "currently", "running", "." ]
def is_running(name): """Test whether task ``name`` is currently running. :param name: name of task :type name: unicode :returns: ``True`` if task with name ``name`` is running, else ``False`` :rtype: bool """ pidfile = _pid_file(name) if not os.path.exists(pidfile): return False with open(pidfile, 'rb') as file_obj: pid = int(file_obj.read().strip()) if _process_exists(pid): return True elif os.path.exists(pidfile): os.unlink(pidfile) return False
[ "def", "is_running", "(", "name", ")", ":", "pidfile", "=", "_pid_file", "(", "name", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "pidfile", ")", ":", "return", "False", "with", "open", "(", "pidfile", ",", "'rb'", ")", "as", "file_obj",...
https://github.com/hzlzh/AlfredWorkflow.com/blob/7055f14f6922c80ea5943839eb0caff11ae57255/Sources/Workflows/SafariBookmark/workflow/background.py#L85-L107
selimsef/dfdc_deepfake_challenge
89c6290490bac96b29193a4061b3db9dd3933e36
training/zoo/unet.py
python
EfficientUnetClassifier.get_encoder_features
(self, x)
return encoder_results
[]
def get_encoder_features(self, x): encoder_results = [] x = self.encoder.conv_stem(x) x = self.encoder.bn1(x) x = self.encoder.act1(x) encoder_results.append(x) x = self.encoder.blocks[:2](x) encoder_results.append(x) x = self.encoder.blocks[2:3](x) encoder_results.append(x) x = self.encoder.blocks[3:5](x) encoder_results.append(x) x = self.encoder.blocks[5:](x) x = self.encoder.conv_head(x) x = self.encoder.bn2(x) x = self.encoder.act2(x) encoder_results.append(x) encoder_results = list(reversed(encoder_results)) return encoder_results
[ "def", "get_encoder_features", "(", "self", ",", "x", ")", ":", "encoder_results", "=", "[", "]", "x", "=", "self", ".", "encoder", ".", "conv_stem", "(", "x", ")", "x", "=", "self", ".", "encoder", ".", "bn1", "(", "x", ")", "x", "=", "self", "....
https://github.com/selimsef/dfdc_deepfake_challenge/blob/89c6290490bac96b29193a4061b3db9dd3933e36/training/zoo/unet.py#L116-L134
pret/pokemon-reverse-engineering-tools
5e0715f2579adcfeb683448c9a7826cfd3afa57d
redtools/map_block_dumper.py
python
extract_map_block_data
(map_id, savefile=False)
[]
def extract_map_block_data(map_id, savefile=False): map = extract_maps.map_headers[map_id] if map["name"] == "FREEZE": return #skip this one blocksdata_pointer = int(map["map_pointer"], 16) y = int(map["y"], 16) x = int(map["x"], 16) size = x*y #fetch the data from the rom blocksdata = extract_maps.rom[blocksdata_pointer:blocksdata_pointer+size] #clean up the filename and label (for pokered.asm) cleaned_name = map_name_cleaner(map["name"], None) label_text = cleaned_name.replace("_h", "Blocks") filename = cleaned_name.replace("_h", "").lower() full_filepath = "maps/" + filename + ".blk" if savefile: print("Saving ../maps/" + filename + ".blk for map id=" + str(map_id)) fh = open("../maps/" + filename + ".blk", "w") fh.write(blocksdata) fh.close()
[ "def", "extract_map_block_data", "(", "map_id", ",", "savefile", "=", "False", ")", ":", "map", "=", "extract_maps", ".", "map_headers", "[", "map_id", "]", "if", "map", "[", "\"name\"", "]", "==", "\"FREEZE\"", ":", "return", "#skip this one", "blocksdata_poi...
https://github.com/pret/pokemon-reverse-engineering-tools/blob/5e0715f2579adcfeb683448c9a7826cfd3afa57d/redtools/map_block_dumper.py#L17-L40
facebookresearch/pytext
1a4e184b233856fcfb9997d74f167cbf5bbbfb8d
pytext/optimizer/fairseq_fp16_utils.py
python
Fairseq_MemoryEfficientFP16OptimizerMixin.zero_grad
(self)
Clears the gradients of all optimized parameters.
Clears the gradients of all optimized parameters.
[ "Clears", "the", "gradients", "of", "all", "optimized", "parameters", "." ]
def zero_grad(self): """Clears the gradients of all optimized parameters.""" self.wrapped_optimizer.zero_grad() self._grads_are_scaled = False
[ "def", "zero_grad", "(", "self", ")", ":", "self", ".", "wrapped_optimizer", ".", "zero_grad", "(", ")", "self", ".", "_grads_are_scaled", "=", "False" ]
https://github.com/facebookresearch/pytext/blob/1a4e184b233856fcfb9997d74f167cbf5bbbfb8d/pytext/optimizer/fairseq_fp16_utils.py#L232-L235
waditu/tushare
093856995af0811d3ebbe8c179b8febf4ae706f0
tushare/stock/reference.py
python
sh_margin_details
(date='', symbol='', start='', end='', retry_count=3, pause=0.001)
return df
获取沪市融资融券明细列表 Parameters -------- date:string 明细数据日期 format:YYYY-MM-DD 默认为空'' symbol:string 标的代码,6位数字e.g.600848,默认为空 start:string 开始日期 format:YYYY-MM-DD 默认为空'' end:string 结束日期 format:YYYY-MM-DD 默认为空'' retry_count : int, 默认 3 如遇网络等问题重复执行的次数 pause : int, 默认 0 重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题 Return ------ DataFrame opDate:信用交易日期 stockCode:标的证券代码 securityAbbr:标的证券简称 rzye:本日融资余额(元) rzmre: 本日融资买入额(元) rzche:本日融资偿还额(元) rqyl: 本日融券余量 rqmcl: 本日融券卖出量 rqchl: 本日融券偿还量
获取沪市融资融券明细列表 Parameters -------- date:string 明细数据日期 format:YYYY-MM-DD 默认为空'' symbol:string 标的代码,6位数字e.g.600848,默认为空 start:string 开始日期 format:YYYY-MM-DD 默认为空'' end:string 结束日期 format:YYYY-MM-DD 默认为空'' retry_count : int, 默认 3 如遇网络等问题重复执行的次数 pause : int, 默认 0 重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题 Return ------ DataFrame opDate:信用交易日期 stockCode:标的证券代码 securityAbbr:标的证券简称 rzye:本日融资余额(元) rzmre: 本日融资买入额(元) rzche:本日融资偿还额(元) rqyl: 本日融券余量 rqmcl: 本日融券卖出量 rqchl: 本日融券偿还量
[ "获取沪市融资融券明细列表", "Parameters", "--------", "date", ":", "string", "明细数据日期", "format:YYYY", "-", "MM", "-", "DD", "默认为空", "symbol:string", "标的代码,6位数字e", ".", "g", ".", "600848,默认为空", "start", ":", "string", "开始日期", "format:YYYY", "-", "MM", "-", "DD", "默认为空", ...
def sh_margin_details(date='', symbol='', start='', end='', retry_count=3, pause=0.001): """ 获取沪市融资融券明细列表 Parameters -------- date:string 明细数据日期 format:YYYY-MM-DD 默认为空'' symbol:string 标的代码,6位数字e.g.600848,默认为空 start:string 开始日期 format:YYYY-MM-DD 默认为空'' end:string 结束日期 format:YYYY-MM-DD 默认为空'' retry_count : int, 默认 3 如遇网络等问题重复执行的次数 pause : int, 默认 0 重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题 Return ------ DataFrame opDate:信用交易日期 stockCode:标的证券代码 securityAbbr:标的证券简称 rzye:本日融资余额(元) rzmre: 本日融资买入额(元) rzche:本日融资偿还额(元) rqyl: 本日融券余量 rqmcl: 本日融券卖出量 rqchl: 本日融券偿还量 """ date = date if date == '' else date.replace('-', '') start = start if start == '' else start.replace('-', '') end = end if end == '' else end.replace('-', '') if (start != '') & (end != ''): date = '' data = pd.DataFrame() ct._write_head() df = _sh_mx(data, date=date, start=start, end=end, symbol=symbol, retry_count=retry_count, pause=pause) return df
[ "def", "sh_margin_details", "(", "date", "=", "''", ",", "symbol", "=", "''", ",", "start", "=", "''", ",", "end", "=", "''", ",", "retry_count", "=", "3", ",", "pause", "=", "0.001", ")", ":", "date", "=", "date", "if", "date", "==", "''", "else...
https://github.com/waditu/tushare/blob/093856995af0811d3ebbe8c179b8febf4ae706f0/tushare/stock/reference.py#L619-L663
MVIG-SJTU/AlphaPose
bcfbc997526bcac464d116356ac2efea9483ff68
detector/tracker/tracker/multitracker.py
python
STrack.tlbr
(self)
return ret
Convert bounding box to format `(min x, min y, max x, max y)`, i.e., `(top left, bottom right)`.
Convert bounding box to format `(min x, min y, max x, max y)`, i.e., `(top left, bottom right)`.
[ "Convert", "bounding", "box", "to", "format", "(", "min", "x", "min", "y", "max", "x", "max", "y", ")", "i", ".", "e", ".", "(", "top", "left", "bottom", "right", ")", "." ]
def tlbr(self): """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., `(top left, bottom right)`. """ ret = self.tlwh.copy() ret[2:] += ret[:2] return ret
[ "def", "tlbr", "(", "self", ")", ":", "ret", "=", "self", ".", "tlwh", ".", "copy", "(", ")", "ret", "[", "2", ":", "]", "+=", "ret", "[", ":", "2", "]", "return", "ret" ]
https://github.com/MVIG-SJTU/AlphaPose/blob/bcfbc997526bcac464d116356ac2efea9483ff68/detector/tracker/tracker/multitracker.py#L110-L116
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/lib/django-0.96/django/utils/simplejson/encoder.py
python
JSONEncoder.__init__
(self, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, sort_keys=False, indent=None, separators=None)
Constructor for JSONEncoder, with sensible defaults. If skipkeys is False, then it is a TypeError to attempt encoding of keys that are not str, int, long, float or None. If skipkeys is True, such items are simply skipped. If ensure_ascii is True, the output is guaranteed to be str objects with all incoming unicode characters escaped. If ensure_ascii is false, the output will be unicode object. If check_circular is True, then lists, dicts, and custom encoded objects will be checked for circular references during encoding to prevent an infinite recursion (which would cause an OverflowError). Otherwise, no such check takes place. If allow_nan is True, then NaN, Infinity, and -Infinity will be encoded as such. This behavior is not JSON specification compliant, but is consistent with most JavaScript based encoders and decoders. Otherwise, it will be a ValueError to encode such floats. If sort_keys is True, then the output of dictionaries will be sorted by key; this is useful for regression tests to ensure that JSON serializations can be compared on a day-to-day basis. If indent is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. None is the most compact representation. If specified, separators should be a (item_separator, key_separator) tuple. The default is (', ', ': '). To get the most compact JSON representation you should specify (',', ':') to eliminate whitespace.
Constructor for JSONEncoder, with sensible defaults.
[ "Constructor", "for", "JSONEncoder", "with", "sensible", "defaults", "." ]
def __init__(self, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, sort_keys=False, indent=None, separators=None): """ Constructor for JSONEncoder, with sensible defaults. If skipkeys is False, then it is a TypeError to attempt encoding of keys that are not str, int, long, float or None. If skipkeys is True, such items are simply skipped. If ensure_ascii is True, the output is guaranteed to be str objects with all incoming unicode characters escaped. If ensure_ascii is false, the output will be unicode object. If check_circular is True, then lists, dicts, and custom encoded objects will be checked for circular references during encoding to prevent an infinite recursion (which would cause an OverflowError). Otherwise, no such check takes place. If allow_nan is True, then NaN, Infinity, and -Infinity will be encoded as such. This behavior is not JSON specification compliant, but is consistent with most JavaScript based encoders and decoders. Otherwise, it will be a ValueError to encode such floats. If sort_keys is True, then the output of dictionaries will be sorted by key; this is useful for regression tests to ensure that JSON serializations can be compared on a day-to-day basis. If indent is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. None is the most compact representation. If specified, separators should be a (item_separator, key_separator) tuple. The default is (', ', ': '). To get the most compact JSON representation you should specify (',', ':') to eliminate whitespace. """ self.skipkeys = skipkeys self.ensure_ascii = ensure_ascii self.check_circular = check_circular self.allow_nan = allow_nan self.sort_keys = sort_keys self.indent = indent self.current_indent_level = 0 if separators is not None: self.item_separator, self.key_separator = separators
[ "def", "__init__", "(", "self", ",", "skipkeys", "=", "False", ",", "ensure_ascii", "=", "True", ",", "check_circular", "=", "True", ",", "allow_nan", "=", "True", ",", "sort_keys", "=", "False", ",", "indent", "=", "None", ",", "separators", "=", "None"...
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/lib/django-0.96/django/utils/simplejson/encoder.py#L95-L141
geigi/cozy
5006ea7097534e18adf525d1d0ec384ddc000404
cozy/ext/inject/__init__.py
python
Binder.bind_to_provider
(self, cls: Binding, provider: Provider)
return self
Bind a class to a callable instance provider executed for each injection.
Bind a class to a callable instance provider executed for each injection.
[ "Bind", "a", "class", "to", "a", "callable", "instance", "provider", "executed", "for", "each", "injection", "." ]
def bind_to_provider(self, cls: Binding, provider: Provider) -> 'Binder': """Bind a class to a callable instance provider executed for each injection.""" self._check_class(cls) if provider is None: raise InjectorException('Provider cannot be None, key=%s' % cls) b = provider self._bindings[cls] = b self._maybe_bind_forward(cls, b) logger.debug('Bound %s to a provider %s', cls, provider) return self
[ "def", "bind_to_provider", "(", "self", ",", "cls", ":", "Binding", ",", "provider", ":", "Provider", ")", "->", "'Binder'", ":", "self", ".", "_check_class", "(", "cls", ")", "if", "provider", "is", "None", ":", "raise", "InjectorException", "(", "'Provid...
https://github.com/geigi/cozy/blob/5006ea7097534e18adf525d1d0ec384ddc000404/cozy/ext/inject/__init__.py#L153-L164
alexa/alexa-skills-kit-sdk-for-python
079de73bc8b827be51ea700a3e4e19c29983a173
ask-sdk-webservice-support/ask_sdk_webservice_support/verifier.py
python
RequestVerifier.__init__
( self, signature_cert_chain_url_key=SIGNATURE_CERT_CHAIN_URL_HEADER, signature_key=SIGNATURE_HEADER, padding=PKCS1v15(), hash_algorithm=SHA1())
Verifier that performs request signature verification. This is a concrete implementation of :py:class:`AbstractVerifier` class, handling the request signature verification of the input request. This verifier uses the Cryptography module x509 functions to validate the signature chain in the input request. The verification follows the mechanism explained here : https://developer.amazon.com/docs/custom-skills/host-a-custom-skill-as-a-web-service.html#checking-the-signature-of-the-request The constructor takes the header key names for retrieving Signature Certificate Chain and Signature. They are defaulted to the header names present in the :py:mod:`ask_sdk_webservice_support.conf`. Additionally, one can also provide the Padding and the Hash Algorithm functions that is used to verify the input body. These are defaulted as :py:class:`cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15` and :py:class:`cryptography.hazmat.primitives.hashes.SHA1` instances respectively. A certificate cache is initialized, to store certificate chains for faster retrieval and validation in subsequent input dispatch. :param signature_cert_chain_url_key: Header key to be used, to retrieve Signature Certificate Chain URL from headers :type signature_cert_chain_url_key: str :param signature_key: Header key to be used, to retrieve Signature from headers :type signature_key: str :param padding: Asymmetric padding algorithm instance to be used to verify the hash value of the request body with the decrypted signature. Defaulted to `PKCS1v15` :type padding: cryptography.hazmat.primitives.asymmetric.padding.AsymmetricPadding :param hash_algorithm: Hash algorithm instance to be used to verify the hash value of the request body with the decrypted signature. Defaulted to `SHA1` :type hash_algorithm: cryptography.hazmat.primitives.hashes.HashAlgorithm
Verifier that performs request signature verification.
[ "Verifier", "that", "performs", "request", "signature", "verification", "." ]
def __init__( self, signature_cert_chain_url_key=SIGNATURE_CERT_CHAIN_URL_HEADER, signature_key=SIGNATURE_HEADER, padding=PKCS1v15(), hash_algorithm=SHA1()): # type: (str, str, AsymmetricPadding, HashAlgorithm) -> None """Verifier that performs request signature verification. This is a concrete implementation of :py:class:`AbstractVerifier` class, handling the request signature verification of the input request. This verifier uses the Cryptography module x509 functions to validate the signature chain in the input request. The verification follows the mechanism explained here : https://developer.amazon.com/docs/custom-skills/host-a-custom-skill-as-a-web-service.html#checking-the-signature-of-the-request The constructor takes the header key names for retrieving Signature Certificate Chain and Signature. They are defaulted to the header names present in the :py:mod:`ask_sdk_webservice_support.conf`. Additionally, one can also provide the Padding and the Hash Algorithm functions that is used to verify the input body. These are defaulted as :py:class:`cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15` and :py:class:`cryptography.hazmat.primitives.hashes.SHA1` instances respectively. A certificate cache is initialized, to store certificate chains for faster retrieval and validation in subsequent input dispatch. :param signature_cert_chain_url_key: Header key to be used, to retrieve Signature Certificate Chain URL from headers :type signature_cert_chain_url_key: str :param signature_key: Header key to be used, to retrieve Signature from headers :type signature_key: str :param padding: Asymmetric padding algorithm instance to be used to verify the hash value of the request body with the decrypted signature. Defaulted to `PKCS1v15` :type padding: cryptography.hazmat.primitives.asymmetric.padding.AsymmetricPadding :param hash_algorithm: Hash algorithm instance to be used to verify the hash value of the request body with the decrypted signature. Defaulted to `SHA1` :type hash_algorithm: cryptography.hazmat.primitives.hashes.HashAlgorithm """ self._signature_cert_chain_url_key = signature_cert_chain_url_key self._signature_key = signature_key self._padding = padding self._hash_algorithm = hash_algorithm self._cert_cache = {}
[ "def", "__init__", "(", "self", ",", "signature_cert_chain_url_key", "=", "SIGNATURE_CERT_CHAIN_URL_HEADER", ",", "signature_key", "=", "SIGNATURE_HEADER", ",", "padding", "=", "PKCS1v15", "(", ")", ",", "hash_algorithm", "=", "SHA1", "(", ")", ")", ":", "# type: ...
https://github.com/alexa/alexa-skills-kit-sdk-for-python/blob/079de73bc8b827be51ea700a3e4e19c29983a173/ask-sdk-webservice-support/ask_sdk_webservice_support/verifier.py#L120-L171
brightmart/albert_zh
652faed6b362c730eb046e9a2e5620d898736a01
resources/create_pretraining_data_roberta.py
python
create_instances_from_document_original
( all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)
return instances
Creates `TrainingInstance`s for a single document.
Creates `TrainingInstance`s for a single document.
[ "Creates", "TrainingInstance", "s", "for", "a", "single", "document", "." ]
def create_instances_from_document_original( all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): """Creates `TrainingInstance`s for a single document.""" document = all_documents[document_index] # Account for [CLS], [SEP], [SEP] max_num_tokens = max_seq_length - 3 # We *usually* want to fill up the entire sequence since we are padding # to `max_seq_length` anyways, so short sequences are generally wasted # computation. However, we *sometimes* # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter # sequences to minimize the mismatch between pre-training and fine-tuning. # The `target_seq_length` is just a rough target however, whereas # `max_seq_length` is a hard limit. target_seq_length = max_num_tokens if rng.random() < short_seq_prob: target_seq_length = rng.randint(2, max_num_tokens) # We DON'T just concatenate all of the tokens from a document into a long # sequence and choose an arbitrary split point because this would make the # next sentence prediction task too easy. Instead, we split the input into # segments "A" and "B" based on the actual "sentences" provided by the user # input. instances = [] current_chunk = [] current_length = 0 i = 0 print("document_index:",document_index,"document:",type(document)," ;document:",document) # document即一整段话,包含多个句子。每个句子叫做segment. while i < len(document): segment = document[i] # 取到一个部分(可能是一段话) print("i:",i," ;segment:",segment) #################################################################################################################### segment = get_new_segment(segment) # 结合分词的中文的whole mask设置即在需要的地方加上“##” ################################################################################################################### current_chunk.append(segment) current_length += len(segment) print("#####condition:",i == len(document) - 1 or current_length >= target_seq_length) if i == len(document) - 1 or current_length >= target_seq_length: if current_chunk: # `a_end` is how many segments from `current_chunk` go into the `A` # (first) sentence. a_end = 1 if len(current_chunk) >= 2: a_end = rng.randint(1, len(current_chunk) - 1) tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) tokens_b = [] # Random next is_random_next = False if len(current_chunk) == 1 or rng.random() < 0.5: is_random_next = True target_b_length = target_seq_length - len(tokens_a) # This should rarely go for more than one iteration for large # corpora. However, just to be careful, we try to make sure that # the random document is not the same as the document # we're processing. for _ in range(10): random_document_index = rng.randint(0, len(all_documents) - 1) if random_document_index != document_index: break random_document = all_documents[random_document_index] random_start = rng.randint(0, len(random_document) - 1) for j in range(random_start, len(random_document)): tokens_b.extend(random_document[j]) if len(tokens_b) >= target_b_length: break # We didn't actually use these segments so we "put them back" so # they don't go to waste. num_unused_segments = len(current_chunk) - a_end i -= num_unused_segments # Actual next else: is_random_next = False for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng) assert len(tokens_a) >= 1 assert len(tokens_b) >= 1 tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) (tokens, masked_lm_positions, masked_lm_labels) = create_masked_lm_predictions( tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng) instance = TrainingInstance( tokens=tokens, segment_ids=segment_ids, is_random_next=is_random_next, masked_lm_positions=masked_lm_positions, masked_lm_labels=masked_lm_labels) instances.append(instance) current_chunk = [] current_length = 0 i += 1 return instances
[ "def", "create_instances_from_document_original", "(", "all_documents", ",", "document_index", ",", "max_seq_length", ",", "short_seq_prob", ",", "masked_lm_prob", ",", "max_predictions_per_seq", ",", "vocab_words", ",", "rng", ")", ":", "document", "=", "all_documents", ...
https://github.com/brightmart/albert_zh/blob/652faed6b362c730eb046e9a2e5620d898736a01/resources/create_pretraining_data_roberta.py#L376-L494
ganeti/ganeti
d340a9ddd12f501bef57da421b5f9b969a4ba905
lib/utils/io.py
python
TemporaryFileManager.Cleanup
(self)
Delete all files marked for deletion
Delete all files marked for deletion
[ "Delete", "all", "files", "marked", "for", "deletion" ]
def Cleanup(self): """Delete all files marked for deletion """ while self._files: RemoveFile(self._files.pop())
[ "def", "Cleanup", "(", "self", ")", ":", "while", "self", ".", "_files", ":", "RemoveFile", "(", "self", ".", "_files", ".", "pop", "(", ")", ")" ]
https://github.com/ganeti/ganeti/blob/d340a9ddd12f501bef57da421b5f9b969a4ba905/lib/utils/io.py#L1018-L1023
openedx/edx-platform
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
openedx/core/djangoapps/oauth_dispatch/views.py
python
_DispatchingView.get_adapter
(self, request)
return self.dot_adapter
Returns the appropriate adapter based on the OAuth client linked to the request.
Returns the appropriate adapter based on the OAuth client linked to the request.
[ "Returns", "the", "appropriate", "adapter", "based", "on", "the", "OAuth", "client", "linked", "to", "the", "request", "." ]
def get_adapter(self, request): """ Returns the appropriate adapter based on the OAuth client linked to the request. """ client_id = self._get_client_id(request) monitoring_utils.set_custom_attribute('oauth_client_id', client_id) return self.dot_adapter
[ "def", "get_adapter", "(", "self", ",", "request", ")", ":", "client_id", "=", "self", ".", "_get_client_id", "(", "request", ")", "monitoring_utils", ".", "set_custom_attribute", "(", "'oauth_client_id'", ",", "client_id", ")", "return", "self", ".", "dot_adapt...
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/openedx/core/djangoapps/oauth_dispatch/views.py#L32-L39
nansencenter/DAPPER
406f5a526919286aa73b017add507f5b0cb98233
dapper/xp_process.py
python
SparseSpace.__setitem__
(self, key, val)
Setitem ensuring coordinate conforms.
Setitem ensuring coordinate conforms.
[ "Setitem", "ensuring", "coordinate", "conforms", "." ]
def __setitem__(self, key, val): """Setitem ensuring coordinate conforms.""" try: key = self.Coord(*key) except TypeError: raise TypeError( f"The key {key!r} did not fit the coord. system " f"which has dims {self.dims}") super().__setitem__(key, val)
[ "def", "__setitem__", "(", "self", ",", "key", ",", "val", ")", ":", "try", ":", "key", "=", "self", ".", "Coord", "(", "*", "key", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "f\"The key {key!r} did not fit the coord. system \"", "f\"which has...
https://github.com/nansencenter/DAPPER/blob/406f5a526919286aa73b017add507f5b0cb98233/dapper/xp_process.py#L119-L127
adamchainz/django-mysql
389594dc078f73c9f204306014332344fe4b6d04
src/django_mysql/operations.py
python
AlterStorageEngine.name_lower
(self)
return self.name.lower()
[]
def name_lower(self) -> str: return self.name.lower()
[ "def", "name_lower", "(", "self", ")", "->", "str", ":", "return", "self", ".", "name", ".", "lower", "(", ")" ]
https://github.com/adamchainz/django-mysql/blob/389594dc078f73c9f204306014332344fe4b6d04/src/django_mysql/operations.py#L162-L163
prabhupant/python-ds
f6cad01ec80c12a27bc30eb23de951855ac45d03
data_structures/array/random_matrix.py
python
random_matrix
(n)
return m
Generate random n x n matrix without repeated entries in rows or columns. The entries are integers between 1 and n.
Generate random n x n matrix without repeated entries in rows or columns. The entries are integers between 1 and n.
[ "Generate", "random", "n", "x", "n", "matrix", "without", "repeated", "entries", "in", "rows", "or", "columns", ".", "The", "entries", "are", "integers", "between", "1", "and", "n", "." ]
def random_matrix(n): """ Generate random n x n matrix without repeated entries in rows or columns. The entries are integers between 1 and n. """ from random import shuffle a = list(range(n + 1)) shuffle(a) # Use slicing to left rotate m = [a[i:] + a[:i] for i in range(n + 1)] # Shuffle rows in matrix shuffle(m) # Shuffle cols in matrix (optional) m = list(map(list, zip(*m))) # Transpose the matrix shuffle(m) return m
[ "def", "random_matrix", "(", "n", ")", ":", "from", "random", "import", "shuffle", "a", "=", "list", "(", "range", "(", "n", "+", "1", ")", ")", "shuffle", "(", "a", ")", "# Use slicing to left rotate", "m", "=", "[", "a", "[", "i", ":", "]", "+", ...
https://github.com/prabhupant/python-ds/blob/f6cad01ec80c12a27bc30eb23de951855ac45d03/data_structures/array/random_matrix.py#L1-L20
chaostoolkit/chaostoolkit-kubernetes
4fe9b65209d849efd4c97e99d9eb31fde38a8c84
chaosk8s/crd/actions.py
python
delete_cluster_custom_object
( group: str, version: str, plural: str, name: str, secrets: Secrets = None )
Delete a custom object cluster wide. Read more about custom resources here: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/
Delete a custom object cluster wide.
[ "Delete", "a", "custom", "object", "cluster", "wide", "." ]
def delete_cluster_custom_object( group: str, version: str, plural: str, name: str, secrets: Secrets = None ) -> Dict[str, Any]: """ Delete a custom object cluster wide. Read more about custom resources here: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/ """ # noqa: E501 api = client.CustomObjectsApi(create_k8s_api_client(secrets)) try: r = api.delete_cluster_custom_object( group, version, plural, name, _preload_content=False ) return json.loads(r.data) except ApiException as x: raise ActivityFailed( f"Failed to delete custom resource object: '{x.reason}' {x.body}" )
[ "def", "delete_cluster_custom_object", "(", "group", ":", "str", ",", "version", ":", "str", ",", "plural", ":", "str", ",", "name", ":", "str", ",", "secrets", ":", "Secrets", "=", "None", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "# noqa...
https://github.com/chaostoolkit/chaostoolkit-kubernetes/blob/4fe9b65209d849efd4c97e99d9eb31fde38a8c84/chaosk8s/crd/actions.py#L120-L139
openstack/nova
b49b7663e1c3073917d5844b81d38db8e86d05c4
nova/scheduler/client/report.py
python
SchedulerReportClient.get_allocations_for_provider_tree
(self, context, nodename)
return {consumer: self.get_allocs_for_consumer(context, consumer) for consumer in consumers}
Retrieve allocation records associated with all providers in the provider tree. This method uses the cache exclusively to discover providers. The caller must ensure that the cache is populated. This method is (and should remain) used exclusively in the reshaper flow by the resource tracker. Note that, in addition to allocations on providers in this compute node's provider tree, this method will return allocations on sharing providers if those allocations are associated with a consumer on this compute node. This is intentional and desirable. But it may also return allocations belonging to other hosts, e.g. if this is happening in the middle of an evacuate. ComputeDriver.update_provider_tree is supposed to ignore such allocations if they appear. :param context: The security context :param nodename: The name of a node for whose tree we are getting allocations. :returns: A dict, keyed by consumer UUID, of allocation records: { $CONSUMER_UUID: { # The shape of each "allocations" dict below is identical # to the return from GET /allocations/{consumer_uuid} "allocations": { $RP_UUID: { "generation": $RP_GEN, "resources": { $RESOURCE_CLASS: $AMOUNT, ... }, }, ... }, "project_id": $PROJ_ID, "user_id": $USER_ID, "consumer_generation": $CONSUMER_GEN, }, ... } :raises: keystoneauth1.exceptions.ClientException if placement API communication fails. :raises: ResourceProviderAllocationRetrievalFailed if a placement API call fails. :raises: ValueError if there's no provider with the specified nodename.
Retrieve allocation records associated with all providers in the provider tree.
[ "Retrieve", "allocation", "records", "associated", "with", "all", "providers", "in", "the", "provider", "tree", "." ]
def get_allocations_for_provider_tree(self, context, nodename): """Retrieve allocation records associated with all providers in the provider tree. This method uses the cache exclusively to discover providers. The caller must ensure that the cache is populated. This method is (and should remain) used exclusively in the reshaper flow by the resource tracker. Note that, in addition to allocations on providers in this compute node's provider tree, this method will return allocations on sharing providers if those allocations are associated with a consumer on this compute node. This is intentional and desirable. But it may also return allocations belonging to other hosts, e.g. if this is happening in the middle of an evacuate. ComputeDriver.update_provider_tree is supposed to ignore such allocations if they appear. :param context: The security context :param nodename: The name of a node for whose tree we are getting allocations. :returns: A dict, keyed by consumer UUID, of allocation records: { $CONSUMER_UUID: { # The shape of each "allocations" dict below is identical # to the return from GET /allocations/{consumer_uuid} "allocations": { $RP_UUID: { "generation": $RP_GEN, "resources": { $RESOURCE_CLASS: $AMOUNT, ... }, }, ... }, "project_id": $PROJ_ID, "user_id": $USER_ID, "consumer_generation": $CONSUMER_GEN, }, ... } :raises: keystoneauth1.exceptions.ClientException if placement API communication fails. :raises: ResourceProviderAllocationRetrievalFailed if a placement API call fails. :raises: ValueError if there's no provider with the specified nodename. """ # NOTE(efried): Despite our best efforts, there are some scenarios # (e.g. mid-evacuate) where we can still wind up returning allocations # against providers belonging to other hosts. We count on the consumer # of this information (i.e. the reshaper flow of a virt driver's # update_provider_tree) to ignore allocations associated with any # provider it is not reshaping - and it should never be reshaping # providers belonging to other hosts. # We can't get *all* allocations for associated sharing providers # because some of those will belong to consumers on other hosts. So we # have to discover all the consumers associated with the providers in # the "local" tree (we use the nodename to figure out which providers # are "local"). # All we want to do at this point is accumulate the set of consumers we # care about. consumers = set() # TODO(efried): This could be more efficient if placement offered an # operation like GET /allocations?rp_uuid=in:<list> for u in self._provider_tree.get_provider_uuids(name_or_uuid=nodename): alloc_info = self.get_allocations_for_resource_provider(context, u) # The allocations dict is keyed by consumer UUID consumers.update(alloc_info.allocations) # Now get all the allocations for each of these consumers to build the # result. This will include allocations on sharing providers, which is # intentional and desirable. But it may also include allocations # belonging to other hosts, e.g. if this is happening in the middle of # an evacuate. ComputeDriver.update_provider_tree is supposed to ignore # such allocations if they appear. # TODO(efried): This could be more efficient if placement offered an # operation like GET /allocations?consumer_uuid=in:<list> return {consumer: self.get_allocs_for_consumer(context, consumer) for consumer in consumers}
[ "def", "get_allocations_for_provider_tree", "(", "self", ",", "context", ",", "nodename", ")", ":", "# NOTE(efried): Despite our best efforts, there are some scenarios", "# (e.g. mid-evacuate) where we can still wind up returning allocations", "# against providers belonging to other hosts. W...
https://github.com/openstack/nova/blob/b49b7663e1c3073917d5844b81d38db8e86d05c4/nova/scheduler/client/report.py#L2171-L2250
PyCQA/pylint
3fc855f9d0fa8e6410be5a23cf954ffd5471b4eb
pylint/checkers/base.py
python
_determine_function_name_type
(node: nodes.FunctionDef, config=None)
return "method"
Determine the name type whose regex the function's name should match. :param node: A function node. :param config: Configuration from which to pull additional property classes. :type config: :class:`optparse.Values` :returns: One of ('function', 'method', 'attr') :rtype: str
Determine the name type whose regex the function's name should match.
[ "Determine", "the", "name", "type", "whose", "regex", "the", "function", "s", "name", "should", "match", "." ]
def _determine_function_name_type(node: nodes.FunctionDef, config=None): """Determine the name type whose regex the function's name should match. :param node: A function node. :param config: Configuration from which to pull additional property classes. :type config: :class:`optparse.Values` :returns: One of ('function', 'method', 'attr') :rtype: str """ property_classes, property_names = _get_properties(config) if not node.is_method(): return "function" if is_property_setter(node) or is_property_deleter(node): # If the function is decorated using the prop_method.{setter,getter} # form, treat it like an attribute as well. return "attr" decorators = node.decorators.nodes if node.decorators else [] for decorator in decorators: # If the function is a property (decorated with @property # or @abc.abstractproperty), the name type is 'attr'. if isinstance(decorator, nodes.Name) or ( isinstance(decorator, nodes.Attribute) and decorator.attrname in property_names ): inferred = utils.safe_infer(decorator) if ( inferred and hasattr(inferred, "qname") and inferred.qname() in property_classes ): return "attr" return "method"
[ "def", "_determine_function_name_type", "(", "node", ":", "nodes", ".", "FunctionDef", ",", "config", "=", "None", ")", ":", "property_classes", ",", "property_names", "=", "_get_properties", "(", "config", ")", "if", "not", "node", ".", "is_method", "(", ")",...
https://github.com/PyCQA/pylint/blob/3fc855f9d0fa8e6410be5a23cf954ffd5471b4eb/pylint/checkers/base.py#L353-L387
Jenyay/outwiker
50530cf7b3f71480bb075b2829bc0669773b835b
plugins/source/source/pygments/formatters/latex.py
python
LatexEmbeddedLexer._find_safe_escape_tokens
(self, text)
find escape tokens that are not in strings or comments
find escape tokens that are not in strings or comments
[ "find", "escape", "tokens", "that", "are", "not", "in", "strings", "or", "comments" ]
def _find_safe_escape_tokens(self, text): """ find escape tokens that are not in strings or comments """ for i, t, v in self._filter_to( self.lang.get_tokens_unprocessed(text), lambda t: t in Token.Comment or t in Token.String ): if t is None: for i2, t2, v2 in self._find_escape_tokens(v): yield i + i2, t2, v2 else: yield i, None, v
[ "def", "_find_safe_escape_tokens", "(", "self", ",", "text", ")", ":", "for", "i", ",", "t", ",", "v", "in", "self", ".", "_filter_to", "(", "self", ".", "lang", ".", "get_tokens_unprocessed", "(", "text", ")", ",", "lambda", "t", ":", "t", "in", "To...
https://github.com/Jenyay/outwiker/blob/50530cf7b3f71480bb075b2829bc0669773b835b/plugins/source/source/pygments/formatters/latex.py#L466-L476
rembo10/headphones
b3199605be1ebc83a7a8feab6b1e99b64014187c
lib/beets/importer.py
python
history_get
()
return state[HISTORY_KEY]
Get the set of completed path tuples in incremental imports.
Get the set of completed path tuples in incremental imports.
[ "Get", "the", "set", "of", "completed", "path", "tuples", "in", "incremental", "imports", "." ]
def history_get(): """Get the set of completed path tuples in incremental imports. """ state = _open_state() if HISTORY_KEY not in state: return set() return state[HISTORY_KEY]
[ "def", "history_get", "(", ")", ":", "state", "=", "_open_state", "(", ")", "if", "HISTORY_KEY", "not", "in", "state", ":", "return", "set", "(", ")", "return", "state", "[", "HISTORY_KEY", "]" ]
https://github.com/rembo10/headphones/blob/b3199605be1ebc83a7a8feab6b1e99b64014187c/lib/beets/importer.py#L166-L172
peering-manager/peering-manager
62c870fb9caa6dfc056feb77c595d45bc3c4988a
peeringdb/migrations/0009_auto_20181212_2322.py
python
Migration.forwards_func
(apps, schema_editor)
[]
def forwards_func(apps, schema_editor): Network = apps.get_model("peeringdb", "Network") db_alias = schema_editor.connection.alias Network.objects.using(db_alias).filter(info_prefixes4=-1).update( info_prefixes4=0 ) Network.objects.using(db_alias).filter(info_prefixes6=-1).update( info_prefixes6=0 )
[ "def", "forwards_func", "(", "apps", ",", "schema_editor", ")", ":", "Network", "=", "apps", ".", "get_model", "(", "\"peeringdb\"", ",", "\"Network\"", ")", "db_alias", "=", "schema_editor", ".", "connection", ".", "alias", "Network", ".", "objects", ".", "...
https://github.com/peering-manager/peering-manager/blob/62c870fb9caa6dfc056feb77c595d45bc3c4988a/peeringdb/migrations/0009_auto_20181212_2322.py#L7-L15
scikit-learn/scikit-learn
1d1aadd0711b87d2a11c80aad15df6f8cf156712
examples/applications/plot_prediction_latency.py
python
benchmark_throughputs
(configuration, duration_secs=0.1)
return throughputs
benchmark throughput for different estimators.
benchmark throughput for different estimators.
[ "benchmark", "throughput", "for", "different", "estimators", "." ]
def benchmark_throughputs(configuration, duration_secs=0.1): """benchmark throughput for different estimators.""" X_train, y_train, X_test, y_test = generate_dataset( configuration["n_train"], configuration["n_test"], configuration["n_features"] ) throughputs = dict() for estimator_config in configuration["estimators"]: estimator_config["instance"].fit(X_train, y_train) start_time = time.time() n_predictions = 0 while (time.time() - start_time) < duration_secs: estimator_config["instance"].predict(X_test[[0]]) n_predictions += 1 throughputs[estimator_config["name"]] = n_predictions / duration_secs return throughputs
[ "def", "benchmark_throughputs", "(", "configuration", ",", "duration_secs", "=", "0.1", ")", ":", "X_train", ",", "y_train", ",", "X_test", ",", "y_test", "=", "generate_dataset", "(", "configuration", "[", "\"n_train\"", "]", ",", "configuration", "[", "\"n_tes...
https://github.com/scikit-learn/scikit-learn/blob/1d1aadd0711b87d2a11c80aad15df6f8cf156712/examples/applications/plot_prediction_latency.py#L246-L260
zulip/python-zulip-api
70b86614bd15347e28ec2cab4c87c01122faae16
zulip_bots/zulip_bots/bots/tictactoe/tictactoe.py
python
coords_from_command
(cmd: str)
return cmd
As there are various ways to input a coordinate (with/without parentheses, with/without spaces, etc.) the input is stripped to just the numbers before being used in the program.
As there are various ways to input a coordinate (with/without parentheses, with/without spaces, etc.) the input is stripped to just the numbers before being used in the program.
[ "As", "there", "are", "various", "ways", "to", "input", "a", "coordinate", "(", "with", "/", "without", "parentheses", "with", "/", "without", "spaces", "etc", ".", ")", "the", "input", "is", "stripped", "to", "just", "the", "numbers", "before", "being", ...
def coords_from_command(cmd: str) -> str: # This function translates the input command into a TicTacToeGame move. # It should return two indices, each one of (1,2,3), separated by a comma, eg. "3,2" """As there are various ways to input a coordinate (with/without parentheses, with/without spaces, etc.) the input is stripped to just the numbers before being used in the program.""" cmd_num = int(cmd.replace("move ", "")) - 1 cmd = f"{(cmd_num % 3) + 1},{(cmd_num // 3) + 1}" return cmd
[ "def", "coords_from_command", "(", "cmd", ":", "str", ")", "->", "str", ":", "# This function translates the input command into a TicTacToeGame move.", "# It should return two indices, each one of (1,2,3), separated by a comma, eg. \"3,2\"", "cmd_num", "=", "int", "(", "cmd", ".", ...
https://github.com/zulip/python-zulip-api/blob/70b86614bd15347e28ec2cab4c87c01122faae16/zulip_bots/zulip_bots/bots/tictactoe/tictactoe.py#L296-L303
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/stat.py
python
S_IFMT
(mode)
return mode & 0o170000
Return the portion of the file's mode that describes the file type.
Return the portion of the file's mode that describes the file type.
[ "Return", "the", "portion", "of", "the", "file", "s", "mode", "that", "describes", "the", "file", "type", "." ]
def S_IFMT(mode): """Return the portion of the file's mode that describes the file type. """ return mode & 0o170000
[ "def", "S_IFMT", "(", "mode", ")", ":", "return", "mode", "&", "0o170000" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/stat.py#L27-L31
tribe29/checkmk
6260f2512e159e311f426e16b84b19d0b8e9ad0c
cmk/base/plugins/agent_based/check_mk.py
python
parse_checkmk_labels
(string_table: StringTable)
return {"version": None, "agentos": None, **section}
Example: <<<check_mk>>> Version: 1.7.0 BuildDate: Sep 15 2020 AgentOS: windows Hostname: MSEDGEWIN10 Architecture: 64bit OnlyFrom: 123.0.0.1 OnlyFrom: 123.0.0.2 The parsing mimics the behaviour of arguments passed to systemd units. On repetition either append (if another value is provided), or unset the value (if no value is provided).
Example:
[ "Example", ":" ]
def parse_checkmk_labels(string_table: StringTable) -> CheckmkSection: """ Example: <<<check_mk>>> Version: 1.7.0 BuildDate: Sep 15 2020 AgentOS: windows Hostname: MSEDGEWIN10 Architecture: 64bit OnlyFrom: 123.0.0.1 OnlyFrom: 123.0.0.2 The parsing mimics the behaviour of arguments passed to systemd units. On repetition either append (if another value is provided), or unset the value (if no value is provided). """ section: Dict[str, Optional[str]] = {} for line in string_table: key = line[0][:-1].lower() val = " ".join(line[1:]) section[key] = f"{section.get(key) or ''} {val}".strip() if len(line) > 1 else None return {"version": None, "agentos": None, **section}
[ "def", "parse_checkmk_labels", "(", "string_table", ":", "StringTable", ")", "->", "CheckmkSection", ":", "section", ":", "Dict", "[", "str", ",", "Optional", "[", "str", "]", "]", "=", "{", "}", "for", "line", "in", "string_table", ":", "key", "=", "lin...
https://github.com/tribe29/checkmk/blob/6260f2512e159e311f426e16b84b19d0b8e9ad0c/cmk/base/plugins/agent_based/check_mk.py#L18-L43
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/twisted/twisted/python/components.py
python
backwardsCompatImplements
(klass)
DEPRECATED. Does nothing. Previously handled backwards compat from a zope.interface using class to a class wanting old twisted components interface behaviors.
DEPRECATED.
[ "DEPRECATED", "." ]
def backwardsCompatImplements(klass): """DEPRECATED. Does nothing. Previously handled backwards compat from a zope.interface using class to a class wanting old twisted components interface behaviors. """ warnings.warn("components.backwardsCompatImplements doesn't do anything in Twisted 2.3, stop calling it.", ComponentsDeprecationWarning, stacklevel=2)
[ "def", "backwardsCompatImplements", "(", "klass", ")", ":", "warnings", ".", "warn", "(", "\"components.backwardsCompatImplements doesn't do anything in Twisted 2.3, stop calling it.\"", ",", "ComponentsDeprecationWarning", ",", "stacklevel", "=", "2", ")" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/twisted/twisted/python/components.py#L125-L132
kevoreilly/CAPEv2
6cf79c33264624b3604d4cd432cde2a6b4536de6
modules/processing/behavior.py
python
Enhanced._process_call
(self, call)
return event
Gets files calls @return: information list
Gets files calls
[ "Gets", "files", "calls" ]
def _process_call(self, call): """Gets files calls @return: information list """ def _load_args(call): """ Load arguments from call """ res = {} for argument in call["arguments"]: res[argument["name"]] = argument["value"] return res def _generic_handle_details(self, call, item): """ Generic handling of api calls @call: the call dict @item: Generic item to process """ event = None if call["api"] in item["apis"]: args = _load_args(call) self.eid += 1 event = { "event": item["event"], "object": item["object"], "timestamp": call["timestamp"], "eid": self.eid, "data": {}, } for logname, dataname in item["args"]: event["data"][logname] = args.get(dataname) return event def _generic_handle(self, data, call): """Generic handling of api calls.""" for item in data: event = _generic_handle_details(self, call, item) if event: return event return None def _get_service_action(control_code): """@see: http://msdn.microsoft.com/en-us/library/windows/desktop/ms682108%28v=vs.85%29.aspx""" codes = {1: "stop", 2: "pause", 3: "continue", 4: "info"} default = "user" if int(control_code) >= 128 else "notify" return codes.get(control_code, default) event = None gendat = [ { "event": "move", "object": "file", "apis": [ "MoveFileWithProgressW", "MoveFileWithProgressTransactedW", ], "args": [("from", "ExistingFileName"), ("to", "NewFileName")], }, { "event": "copy", "object": "file", "apis": ["CopyFileA", "CopyFileW", "CopyFileExW", "CopyFileExA"], "args": [("from", "ExistingFileName"), ("to", "NewFileName")], }, { "event": "delete", "object": "file", "apis": ["DeleteFileA", "DeleteFileW", "NtDeleteFile"], "args": [("file", "FileName")], }, { "event": "delete", "object": "dir", "apis": ["RemoveDirectoryA", "RemoveDirectoryW"], "args": [("file", "DirectoryName")], }, { "event": "create", "object": "dir", "apis": ["CreateDirectoryW", "CreateDirectoryExW"], "args": [("file", "DirectoryName")], }, { "event": "write", "object": "file", "apis": ["URLDownloadToFileW", "URLDownloadToFileA"], "args": [("file", "FileName")], }, { "event": "read", "object": "file", "apis": [ "NtReadFile", ], "args": [("file", "HandleName")], }, { "event": "write", "object": "file", "apis": [ "NtWriteFile", ], "args": [("file", "HandleName")], }, { "event": "execute", "object": "file", "apis": [ "CreateProcessAsUserA", "CreateProcessAsUserW", "CreateProcessA", "CreateProcessW", "NtCreateProcess", "NtCreateProcessEx", ], "args": [("file", "FileName")], }, { "event": "execute", "object": "file", "apis": [ "CreateProcessInternalW", "CreateProcessWithLogonW", "CreateProcessWithTokenW", ], "args": [("file", "CommandLine")], }, { "event": "execute", "object": "file", "apis": [ "ShellExecuteExA", "ShellExecuteExW", ], "args": [("file", "FilePath")], }, { "event": "load", "object": "library", "apis": ["LoadLibraryA", "LoadLibraryW", "LoadLibraryExA", "LoadLibraryExW", "LdrLoadDll", "LdrGetDllHandle"], "args": [("file", "FileName"), ("pathtofile", "PathToFile"), ("moduleaddress", "BaseAddress")], }, { "event": "findwindow", "object": "windowname", "apis": ["FindWindowA", "FindWindowW", "FindWindowExA", "FindWindowExW"], "args": [("classname", "ClassName"), ("windowname", "WindowName")], }, { "event": "write", "object": "registry", "apis": ["RegSetValueExA", "RegSetValueExW"], "args": [("regkey", "FullName"), ("content", "Buffer")], }, { "event": "write", "object": "registry", "apis": ["RegCreateKeyExA", "RegCreateKeyExW"], "args": [("regkey", "FullName")], }, { "event": "read", "object": "registry", "apis": [ "RegQueryValueExA", "RegQueryValueExW", ], "args": [("regkey", "FullName"), ("content", "Data")], }, { "event": "read", "object": "registry", "apis": ["NtQueryValueKey"], "args": [("regkey", "FullName"), ("content", "Information")], }, { "event": "delete", "object": "registry", "apis": ["RegDeleteKeyA", "RegDeleteKeyW", "RegDeleteValueA", "RegDeleteValueW", "NtDeleteValueKey"], "args": [("regkey", "FullName")], }, { "event": "create", "object": "windowshook", "apis": ["SetWindowsHookExA"], "args": [("id", "HookIdentifier"), ("moduleaddress", "ModuleAddress"), ("procedureaddress", "ProcedureAddress")], }, { "event": "start", "object": "service", "apis": ["StartServiceA", "StartServiceW"], "args": [("service", "ServiceName")], }, { "event": "modify", "object": "service", "apis": ["ControlService"], "args": [("service", "ServiceName"), ("controlcode", "ControlCode")], }, {"event": "delete", "object": "service", "apis": ["DeleteService"], "args": [("service", "ServiceName")]}, ] # Not sure I really want this, way too noisy anyway and doesn't bring # much value. # if self.details: # gendata += [{"event" : "get", # "object" : "procedure", # "apis" : ["LdrGetProcedureAddress"], # "args": [("name", "FunctionName"), ("ordinal", "Ordinal")] # },] event = _generic_handle(self, gendat, call) args = _load_args(call) if event: if ( call["api"] in ["LoadLibraryA", "LoadLibraryW", "LoadLibraryExA", "LoadLibraryExW", "LdrGetDllHandle"] and call["status"] ): self._add_loaded_module(args.get("FileName", ""), args.get("ModuleHandle", "")) elif call["api"] in ["LdrLoadDll"] and call["status"]: self._add_loaded_module(args.get("FileName", ""), args.get("BaseAddress", "")) elif call["api"] in ["LdrGetProcedureAddress"] and call["status"]: self._add_procedure(args.get("ModuleHandle", ""), args.get("FunctionName", ""), args.get("FunctionAddress", "")) event["data"]["module"] = self._get_loaded_module(args.get("ModuleHandle", "")) elif call["api"] in ["SetWindowsHookExA"]: event["data"]["module"] = self._get_loaded_module(args.get("ModuleAddress", "")) if call["api"] in ["ControlService"]: event["data"]["action"] = _get_service_action(args["ControlCode"]) return event return event
[ "def", "_process_call", "(", "self", ",", "call", ")", ":", "def", "_load_args", "(", "call", ")", ":", "\"\"\"\n Load arguments from call\n \"\"\"", "res", "=", "{", "}", "for", "argument", "in", "call", "[", "\"arguments\"", "]", ":", "re...
https://github.com/kevoreilly/CAPEv2/blob/6cf79c33264624b3604d4cd432cde2a6b4536de6/modules/processing/behavior.py#L687-L931
sisl/MADRL
4a6d780e8cf111f312b757cca1b9f83441644958
madrl_environments/walker/multi_walker.py
python
BipedalWalker.__init__
(self, world, init_x=TERRAIN_STEP * TERRAIN_STARTPAD / 2, init_y=TERRAIN_HEIGHT + 2 * LEG_H, n_walkers=2, one_hot=False)
[]
def __init__(self, world, init_x=TERRAIN_STEP * TERRAIN_STARTPAD / 2, init_y=TERRAIN_HEIGHT + 2 * LEG_H, n_walkers=2, one_hot=False): self.world = world self._n_walkers = n_walkers self.one_hot = one_hot self.hull = None self.init_x = init_x self.init_y = init_y self._seed()
[ "def", "__init__", "(", "self", ",", "world", ",", "init_x", "=", "TERRAIN_STEP", "*", "TERRAIN_STARTPAD", "/", "2", ",", "init_y", "=", "TERRAIN_HEIGHT", "+", "2", "*", "LEG_H", ",", "n_walkers", "=", "2", ",", "one_hot", "=", "False", ")", ":", "self...
https://github.com/sisl/MADRL/blob/4a6d780e8cf111f312b757cca1b9f83441644958/madrl_environments/walker/multi_walker.py#L89-L97
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/sqlalchemy/sql/elements.py
python
UnaryExpression._negate
(self)
[]
def _negate(self): if self.negate is not None: return UnaryExpression( self.element, operator=self.negate, negate=self.operator, modifier=self.modifier, type_=self.type, wraps_column_expression=self.wraps_column_expression) elif self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity: return UnaryExpression( self.self_group(against=operators.inv), operator=operators.inv, type_=type_api.BOOLEANTYPE, wraps_column_expression=self.wraps_column_expression, negate=None) else: return ClauseElement._negate(self)
[ "def", "_negate", "(", "self", ")", ":", "if", "self", ".", "negate", "is", "not", "None", ":", "return", "UnaryExpression", "(", "self", ".", "element", ",", "operator", "=", "self", ".", "negate", ",", "negate", "=", "self", ".", "operator", ",", "...
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/sqlalchemy/sql/elements.py#L2806-L2823
KalleHallden/AutoTimer
2d954216700c4930baa154e28dbddc34609af7ce
env/lib/python2.7/site-packages/pkg_resources/__init__.py
python
_handle_ns
(packageName, path_item)
return subpath
Ensure that named package includes a subpath of path_item (if needed)
Ensure that named package includes a subpath of path_item (if needed)
[ "Ensure", "that", "named", "package", "includes", "a", "subpath", "of", "path_item", "(", "if", "needed", ")" ]
def _handle_ns(packageName, path_item): """Ensure that named package includes a subpath of path_item (if needed)""" importer = get_importer(path_item) if importer is None: return None # capture warnings due to #1111 with warnings.catch_warnings(): warnings.simplefilter("ignore") loader = importer.find_module(packageName) if loader is None: return None module = sys.modules.get(packageName) if module is None: module = sys.modules[packageName] = types.ModuleType(packageName) module.__path__ = [] _set_parent_ns(packageName) elif not hasattr(module, '__path__'): raise TypeError("Not a package:", packageName) handler = _find_adapter(_namespace_handlers, importer) subpath = handler(importer, path_item, packageName, module) if subpath is not None: path = module.__path__ path.append(subpath) loader.load_module(packageName) _rebuild_mod_path(path, packageName, module) return subpath
[ "def", "_handle_ns", "(", "packageName", ",", "path_item", ")", ":", "importer", "=", "get_importer", "(", "path_item", ")", "if", "importer", "is", "None", ":", "return", "None", "# capture warnings due to #1111", "with", "warnings", ".", "catch_warnings", "(", ...
https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/pkg_resources/__init__.py#L2191-L2219
bjmayor/hacker
e3ce2ad74839c2733b27dac6c0f495e0743e1866
venv/lib/python3.5/site-packages/PIL/OleFileIO.py
python
_OleDirectoryEntry.__init__
(self, entry, sid, olefile)
Constructor for an _OleDirectoryEntry object. Parses a 128-bytes entry from the OLE Directory stream. :param entry : string (must be 128 bytes long) :param sid : index of this directory entry in the OLE file directory :param olefile: OleFileIO containing this directory entry
Constructor for an _OleDirectoryEntry object. Parses a 128-bytes entry from the OLE Directory stream.
[ "Constructor", "for", "an", "_OleDirectoryEntry", "object", ".", "Parses", "a", "128", "-", "bytes", "entry", "from", "the", "OLE", "Directory", "stream", "." ]
def __init__(self, entry, sid, olefile): """ Constructor for an _OleDirectoryEntry object. Parses a 128-bytes entry from the OLE Directory stream. :param entry : string (must be 128 bytes long) :param sid : index of this directory entry in the OLE file directory :param olefile: OleFileIO containing this directory entry """ self.sid = sid # ref to olefile is stored for future use self.olefile = olefile # kids is a list of children entries, if this entry is a storage: # (list of _OleDirectoryEntry objects) self.kids = [] # kids_dict is a dictionary of children entries, indexed by their # name in lowercase: used to quickly find an entry, and to detect # duplicates self.kids_dict = {} # flag used to detect if the entry is referenced more than once in # directory: self.used = False # decode DirEntry ( name, namelength, self.entry_type, self.color, self.sid_left, self.sid_right, self.sid_child, clsid, self.dwUserFlags, self.createTime, self.modifyTime, self.isectStart, sizeLow, sizeHigh ) = struct.unpack(_OleDirectoryEntry.STRUCT_DIRENTRY, entry) if self.entry_type not in [STGTY_ROOT, STGTY_STORAGE, STGTY_STREAM, STGTY_EMPTY]: olefile.raise_defect(DEFECT_INCORRECT, 'unhandled OLE storage type') # only first directory entry can (and should) be root: if self.entry_type == STGTY_ROOT and sid != 0: olefile.raise_defect(DEFECT_INCORRECT, 'duplicate OLE root entry') if sid == 0 and self.entry_type != STGTY_ROOT: olefile.raise_defect(DEFECT_INCORRECT, 'incorrect OLE root entry') #debug (struct.unpack(fmt_entry, entry[:len_entry])) # name should be at most 31 unicode characters + null character, # so 64 bytes in total (31*2 + 2): if namelength > 64: olefile.raise_defect(DEFECT_INCORRECT, 'incorrect DirEntry name length') # if exception not raised, namelength is set to the maximum value: namelength = 64 # only characters without ending null char are kept: name = name[:(namelength-2)] #TODO: check if the name is actually followed by a null unicode character ([MS-CFB] 2.6.1) #TODO: check if the name does not contain forbidden characters: # [MS-CFB] 2.6.1: "The following characters are illegal and MUST NOT be part of the name: '/', '\', ':', '!'." # name is converted from UTF-16LE to the path encoding specified in the OleFileIO: self.name = olefile._decode_utf16_str(name) debug('DirEntry SID=%d: %s' % (self.sid, repr(self.name))) debug(' - type: %d' % self.entry_type) debug(' - sect: %d' % self.isectStart) debug(' - SID left: %d, right: %d, child: %d' % (self.sid_left, self.sid_right, self.sid_child)) # sizeHigh is only used for 4K sectors, it should be zero for 512 bytes # sectors, BUT apparently some implementations set it as 0xFFFFFFFF, 1 # or some other value so it cannot be raised as a defect in general: if olefile.sectorsize == 512: if sizeHigh != 0 and sizeHigh != 0xFFFFFFFF: debug('sectorsize=%d, sizeLow=%d, sizeHigh=%d (%X)' % (olefile.sectorsize, sizeLow, sizeHigh, sizeHigh)) olefile.raise_defect(DEFECT_UNSURE, 'incorrect OLE stream size') self.size = sizeLow else: self.size = sizeLow + (long(sizeHigh) << 32) debug(' - size: %d (sizeLow=%d, sizeHigh=%d)' % (self.size, sizeLow, sizeHigh)) self.clsid = _clsid(clsid) # a storage should have a null size, BUT some implementations such as # Word 8 for Mac seem to allow non-null values => Potential defect: if self.entry_type == STGTY_STORAGE and self.size != 0: olefile.raise_defect(DEFECT_POTENTIAL, 'OLE storage with size>0') # check if stream is not already referenced elsewhere: if self.entry_type in (STGTY_ROOT, STGTY_STREAM) and self.size > 0: if self.size < olefile.minisectorcutoff \ and self.entry_type == STGTY_STREAM: # only streams can be in MiniFAT # ministream object minifat = True else: minifat = False olefile._check_duplicate_stream(self.isectStart, minifat)
[ "def", "__init__", "(", "self", ",", "entry", ",", "sid", ",", "olefile", ")", ":", "self", ".", "sid", "=", "sid", "# ref to olefile is stored for future use", "self", ".", "olefile", "=", "olefile", "# kids is a list of children entries, if this entry is a storage:", ...
https://github.com/bjmayor/hacker/blob/e3ce2ad74839c2733b27dac6c0f495e0743e1866/venv/lib/python3.5/site-packages/PIL/OleFileIO.py#L840-L933
RasaHQ/rasa
54823b68c1297849ba7ae841a4246193cd1223a1
rasa/core/utils.py
python
configure_file_logging
( logger_obj: logging.Logger, log_file: Optional[Text], use_syslog: Optional[bool], syslog_address: Optional[Text] = None, syslog_port: Optional[int] = None, syslog_protocol: Optional[Text] = None, )
Configure logging to a file. Args: logger_obj: Logger object to configure. log_file: Path of log file to write to. use_syslog: Add syslog as a logger. syslog_address: Adress of the syslog server. syslog_port: Port of the syslog server. syslog_protocol: Protocol with the syslog server
Configure logging to a file.
[ "Configure", "logging", "to", "a", "file", "." ]
def configure_file_logging( logger_obj: logging.Logger, log_file: Optional[Text], use_syslog: Optional[bool], syslog_address: Optional[Text] = None, syslog_port: Optional[int] = None, syslog_protocol: Optional[Text] = None, ) -> None: """Configure logging to a file. Args: logger_obj: Logger object to configure. log_file: Path of log file to write to. use_syslog: Add syslog as a logger. syslog_address: Adress of the syslog server. syslog_port: Port of the syslog server. syslog_protocol: Protocol with the syslog server """ if use_syslog: formatter = logging.Formatter( "%(asctime)s [%(levelname)-5.5s] [%(process)d]" " %(message)s" ) socktype = SOCK_STREAM if syslog_protocol == TCP_PROTOCOL else SOCK_DGRAM syslog_handler = logging.handlers.SysLogHandler( address=(syslog_address, syslog_port), socktype=socktype, ) syslog_handler.setLevel(logger_obj.level) syslog_handler.setFormatter(formatter) logger_obj.addHandler(syslog_handler) if log_file: formatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s") file_handler = logging.FileHandler( log_file, encoding=rasa.shared.utils.io.DEFAULT_ENCODING ) file_handler.setLevel(logger_obj.level) file_handler.setFormatter(formatter) logger_obj.addHandler(file_handler)
[ "def", "configure_file_logging", "(", "logger_obj", ":", "logging", ".", "Logger", ",", "log_file", ":", "Optional", "[", "Text", "]", ",", "use_syslog", ":", "Optional", "[", "bool", "]", ",", "syslog_address", ":", "Optional", "[", "Text", "]", "=", "Non...
https://github.com/RasaHQ/rasa/blob/54823b68c1297849ba7ae841a4246193cd1223a1/rasa/core/utils.py#L33-L69
mrkipling/maraschino
c6be9286937783ae01df2d6d8cebfc8b2734a7d7
lib/jinja2/environment.py
python
Environment.extend
(self, **attributes)
Add the items to the instance of the environment if they do not exist yet. This is used by :ref:`extensions <writing-extensions>` to register callbacks and configuration values without breaking inheritance.
Add the items to the instance of the environment if they do not exist yet. This is used by :ref:`extensions <writing-extensions>` to register callbacks and configuration values without breaking inheritance.
[ "Add", "the", "items", "to", "the", "instance", "of", "the", "environment", "if", "they", "do", "not", "exist", "yet", ".", "This", "is", "used", "by", ":", "ref", ":", "extensions", "<writing", "-", "extensions", ">", "to", "register", "callbacks", "and...
def extend(self, **attributes): """Add the items to the instance of the environment if they do not exist yet. This is used by :ref:`extensions <writing-extensions>` to register callbacks and configuration values without breaking inheritance. """ for key, value in attributes.iteritems(): if not hasattr(self, key): setattr(self, key, value)
[ "def", "extend", "(", "self", ",", "*", "*", "attributes", ")", ":", "for", "key", ",", "value", "in", "attributes", ".", "iteritems", "(", ")", ":", "if", "not", "hasattr", "(", "self", ",", "key", ")", ":", "setattr", "(", "self", ",", "key", "...
https://github.com/mrkipling/maraschino/blob/c6be9286937783ae01df2d6d8cebfc8b2734a7d7/lib/jinja2/environment.py#L290-L297
haiwen/seahub
e92fcd44e3e46260597d8faa9347cb8222b8b10d
seahub/api2/endpoints/group_owned_libraries.py
python
GroupOwnedLibraryGroupShare.has_shared_to_group
(self, request, repo_id, path, group_id)
return has_shared
[]
def has_shared_to_group(self, request, repo_id, path, group_id): items = self.list_group_shared_items(request, repo_id, path) has_shared = False for item in items: if group_id == item['group_id']: has_shared = True break return has_shared
[ "def", "has_shared_to_group", "(", "self", ",", "request", ",", "repo_id", ",", "path", ",", "group_id", ")", ":", "items", "=", "self", ".", "list_group_shared_items", "(", "request", ",", "repo_id", ",", "path", ")", "has_shared", "=", "False", "for", "i...
https://github.com/haiwen/seahub/blob/e92fcd44e3e46260597d8faa9347cb8222b8b10d/seahub/api2/endpoints/group_owned_libraries.py#L1105-L1114
shapely/shapely
9258e6dd4dcca61699d69c2a5853a486b132ed86
shapely/prepared.py
python
PreparedGeometry.overlaps
(self, other)
return self.context.overlaps(other)
Returns True if geometries overlap, else False
Returns True if geometries overlap, else False
[ "Returns", "True", "if", "geometries", "overlap", "else", "False" ]
def overlaps(self, other): """Returns True if geometries overlap, else False""" return self.context.overlaps(other)
[ "def", "overlaps", "(", "self", ",", "other", ")", ":", "return", "self", ".", "context", ".", "overlaps", "(", "other", ")" ]
https://github.com/shapely/shapely/blob/9258e6dd4dcca61699d69c2a5853a486b132ed86/shapely/prepared.py#L59-L61
namisan/mt-dnn
8564c8cfa971391187bd699116fbe4388438d62d
mt_dnn/loss.py
python
KlCriterion.__init__
(self, alpha=1.0, name='KL Div Criterion')
[]
def __init__(self, alpha=1.0, name='KL Div Criterion'): super().__init__() self.alpha = alpha self.name = name
[ "def", "__init__", "(", "self", ",", "alpha", "=", "1.0", ",", "name", "=", "'KL Div Criterion'", ")", ":", "super", "(", ")", ".", "__init__", "(", ")", "self", ".", "alpha", "=", "alpha", "self", ".", "name", "=", "name" ]
https://github.com/namisan/mt-dnn/blob/8564c8cfa971391187bd699116fbe4388438d62d/mt_dnn/loss.py#L85-L88
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/sympy/series/formal.py
python
FormalPowerSeries.product
(self, other, x=None, n=6)
return FormalPowerSeriesProduct(self, other)
Multiplies two Formal Power Series, using discrete convolution and return the truncated terms upto specified order. Parameters ========== n : Number, optional Specifies the order of the term up to which the polynomial should be truncated. Examples ======== >>> from sympy import fps, sin, exp, convolution >>> from sympy.abc import x >>> f1 = fps(sin(x)) >>> f2 = fps(exp(x)) >>> f1.product(f2, x).truncate(4) x + x**2 + x**3/3 + O(x**4) See Also ======== sympy.discrete.convolutions sympy.series.formal.FormalPowerSeriesProduct
Multiplies two Formal Power Series, using discrete convolution and return the truncated terms upto specified order.
[ "Multiplies", "two", "Formal", "Power", "Series", "using", "discrete", "convolution", "and", "return", "the", "truncated", "terms", "upto", "specified", "order", "." ]
def product(self, other, x=None, n=6): """Multiplies two Formal Power Series, using discrete convolution and return the truncated terms upto specified order. Parameters ========== n : Number, optional Specifies the order of the term up to which the polynomial should be truncated. Examples ======== >>> from sympy import fps, sin, exp, convolution >>> from sympy.abc import x >>> f1 = fps(sin(x)) >>> f2 = fps(exp(x)) >>> f1.product(f2, x).truncate(4) x + x**2 + x**3/3 + O(x**4) See Also ======== sympy.discrete.convolutions sympy.series.formal.FormalPowerSeriesProduct """ if x is None: x = self.x if n is None: return iter(self) other = sympify(other) if not isinstance(other, FormalPowerSeries): raise ValueError("Both series should be an instance of FormalPowerSeries" " class.") if self.dir != other.dir: raise ValueError("Both series should be calculated from the" " same direction.") elif self.x0 != other.x0: raise ValueError("Both series should be calculated about the" " same point.") elif self.x != other.x: raise ValueError("Both series should have the same symbol.") return FormalPowerSeriesProduct(self, other)
[ "def", "product", "(", "self", ",", "other", ",", "x", "=", "None", ",", "n", "=", "6", ")", ":", "if", "x", "is", "None", ":", "x", "=", "self", ".", "x", "if", "n", "is", "None", ":", "return", "iter", "(", "self", ")", "other", "=", "sym...
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/sympy/series/formal.py#L1161-L1212
python-hyper/hyperlink
ec7d17268809f89f044e04d71225d6a6e4df264d
src/hyperlink/_url.py
python
URL.port
(self)
return self._port
The port is an integer that is commonly used in connecting to the :attr:`host`, and almost never appears without it. When not present in the original URL, this attribute defaults to the scheme's default port. If the scheme's default port is not known, and the port is not provided, this attribute will be set to None. >>> URL.from_text(u'http://example.com/pa/th').port 80 >>> URL.from_text(u'foo://example.com/pa/th').port >>> URL.from_text(u'foo://example.com:8042/pa/th').port 8042 .. note:: Per the standard, when the port is the same as the schemes default port, it will be omitted in the text URL.
The port is an integer that is commonly used in connecting to the :attr:`host`, and almost never appears without it.
[ "The", "port", "is", "an", "integer", "that", "is", "commonly", "used", "in", "connecting", "to", "the", ":", "attr", ":", "host", "and", "almost", "never", "appears", "without", "it", "." ]
def port(self): # type: () -> Optional[int] """The port is an integer that is commonly used in connecting to the :attr:`host`, and almost never appears without it. When not present in the original URL, this attribute defaults to the scheme's default port. If the scheme's default port is not known, and the port is not provided, this attribute will be set to None. >>> URL.from_text(u'http://example.com/pa/th').port 80 >>> URL.from_text(u'foo://example.com/pa/th').port >>> URL.from_text(u'foo://example.com:8042/pa/th').port 8042 .. note:: Per the standard, when the port is the same as the schemes default port, it will be omitted in the text URL. """ return self._port
[ "def", "port", "(", "self", ")", ":", "# type: () -> Optional[int]", "return", "self", ".", "_port" ]
https://github.com/python-hyper/hyperlink/blob/ec7d17268809f89f044e04d71225d6a6e4df264d/src/hyperlink/_url.py#L1094-L1115
krintoxi/NoobSec-Toolkit
38738541cbc03cedb9a3b3ed13b629f781ad64f6
NoobSecToolkit - MAC OSX/tools/inject/lib/controller/controller.py
python
_formatInjection
(inj)
return data
[]
def _formatInjection(inj): paramType = conf.method if conf.method not in (None, HTTPMETHOD.GET, HTTPMETHOD.POST) else inj.place data = "Parameter: %s (%s)\n" % (inj.parameter, paramType) for stype, sdata in inj.data.items(): title = sdata.title vector = sdata.vector comment = sdata.comment payload = agent.adjustLateValues(sdata.payload) if inj.place == PLACE.CUSTOM_HEADER: payload = payload.split(',', 1)[1] if stype == PAYLOAD.TECHNIQUE.UNION: count = re.sub(r"(?i)(\(.+\))|(\blimit[^A-Za-z]+)", "", sdata.payload).count(',') + 1 title = re.sub(r"\d+ to \d+", str(count), title) vector = agent.forgeUnionQuery("[QUERY]", vector[0], vector[1], vector[2], None, None, vector[5], vector[6]) if count == 1: title = title.replace("columns", "column") elif comment: vector = "%s%s" % (vector, comment) data += " Type: %s\n" % PAYLOAD.SQLINJECTION[stype] data += " Title: %s\n" % title data += " Payload: %s\n" % urldecode(payload, unsafe="&", plusspace=(inj.place != PLACE.GET and kb.postSpaceToPlus)) data += " Vector: %s\n\n" % vector if conf.verbose > 1 else "\n" return data
[ "def", "_formatInjection", "(", "inj", ")", ":", "paramType", "=", "conf", ".", "method", "if", "conf", ".", "method", "not", "in", "(", "None", ",", "HTTPMETHOD", ".", "GET", ",", "HTTPMETHOD", ".", "POST", ")", "else", "inj", ".", "place", "data", ...
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit - MAC OSX/tools/inject/lib/controller/controller.py#L131-L155
ClusterHQ/flocker
eaa586248986d7cd681c99c948546c2b507e44de
benchmark/script.py
python
parse_userdata
(options)
return None
Parse the userdata option and add to result. :param BenchmarkOptions options: Script options. :return: Parsed user data.
Parse the userdata option and add to result.
[ "Parse", "the", "userdata", "option", "and", "add", "to", "result", "." ]
def parse_userdata(options): """ Parse the userdata option and add to result. :param BenchmarkOptions options: Script options. :return: Parsed user data. """ userdata = options['userdata'] if userdata: try: if userdata.startswith('@'): try: with open(userdata[1:]) as f: return json.load(f) except IOError as e: usage( options, 'Invalid user data file: {}'.format(e.strerror) ) else: return json.loads(userdata) except ValueError as e: usage(options, 'Invalid user data: {}'.format(e.args[0])) return None
[ "def", "parse_userdata", "(", "options", ")", ":", "userdata", "=", "options", "[", "'userdata'", "]", "if", "userdata", ":", "try", ":", "if", "userdata", ".", "startswith", "(", "'@'", ")", ":", "try", ":", "with", "open", "(", "userdata", "[", "1", ...
https://github.com/ClusterHQ/flocker/blob/eaa586248986d7cd681c99c948546c2b507e44de/benchmark/script.py#L210-L233
makehumancommunity/makehuman
8006cf2cc851624619485658bb933a4244bbfd7c
makehuman/core/gui3d.py
python
View.addObject
(self, object)
return object
Adds the object to the view. If the view is attached to the app, the object will also be attached and will get an OpenGL counterpart. :param object: The object to be added. :type object: gui3d.Object :return: The object, for convenience. :rvalue: gui3d.Object
Adds the object to the view. If the view is attached to the app, the object will also be attached and will get an OpenGL counterpart.
[ "Adds", "the", "object", "to", "the", "view", ".", "If", "the", "view", "is", "attached", "to", "the", "app", "the", "object", "will", "also", "be", "attached", "and", "will", "get", "an", "OpenGL", "counterpart", "." ]
def addObject(self, object): """ Adds the object to the view. If the view is attached to the app, the object will also be attached and will get an OpenGL counterpart. :param object: The object to be added. :type object: gui3d.Object :return: The object, for convenience. :rvalue: gui3d.Object """ if object._view: raise RuntimeError('The object is already added to a view') object._view = weakref.ref(self) if self._attached: object._attach() self.objects.append(object) return object
[ "def", "addObject", "(", "self", ",", "object", ")", ":", "if", "object", ".", "_view", ":", "raise", "RuntimeError", "(", "'The object is already added to a view'", ")", "object", ".", "_view", "=", "weakref", ".", "ref", "(", "self", ")", "if", "self", "...
https://github.com/makehumancommunity/makehuman/blob/8006cf2cc851624619485658bb933a4244bbfd7c/makehuman/core/gui3d.py#L131-L149
python/cpython
e13cdca0f5224ec4e23bdd04bb3120506964bc8b
Lib/operator.py
python
and_
(a, b)
return a & b
Same as a & b.
Same as a & b.
[ "Same", "as", "a", "&", "b", "." ]
def and_(a, b): "Same as a & b." return a & b
[ "def", "and_", "(", "a", ",", "b", ")", ":", "return", "a", "&", "b" ]
https://github.com/python/cpython/blob/e13cdca0f5224ec4e23bdd04bb3120506964bc8b/Lib/operator.py#L79-L81
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/lib-python/3/tkinter/tix.py
python
Grid.size_row
(self, index, **kw)
return self.tk.split(self.tk.call( self, 'size', 'row', index, *self._options({}, kw)))
Queries or sets the size of the row given by INDEX. INDEX may be any non-negative integer that gives the position of a given row . INDEX can also be the string "default"; in this case, this command queries or sets the default size of all rows. When no option-value pair is given, this command returns a list con- taining the current size setting of the given row . When option-value pairs are given, the corresponding options of the size setting of the given row are changed. Options may be one of the follwing: pad0 pixels Specifies the paddings to the top of a row. pad1 pixels Specifies the paddings to the bottom of a row. size val Specifies the height of a row. Val may be: "auto" -- the height of the row is set the the highest cell in the row; a valid Tk screen distance unit; or a real number following by the word chars (e.g. 3.4chars) that sets the height of the row to the given number of characters.
Queries or sets the size of the row given by INDEX. INDEX may be any non-negative integer that gives the position of a given row . INDEX can also be the string "default"; in this case, this command queries or sets the default size of all rows. When no option-value pair is given, this command returns a list con- taining the current size setting of the given row . When option-value pairs are given, the corresponding options of the size setting of the given row are changed. Options may be one of the follwing: pad0 pixels Specifies the paddings to the top of a row. pad1 pixels Specifies the paddings to the bottom of a row. size val Specifies the height of a row. Val may be: "auto" -- the height of the row is set the the highest cell in the row; a valid Tk screen distance unit; or a real number following by the word chars (e.g. 3.4chars) that sets the height of the row to the given number of characters.
[ "Queries", "or", "sets", "the", "size", "of", "the", "row", "given", "by", "INDEX", ".", "INDEX", "may", "be", "any", "non", "-", "negative", "integer", "that", "gives", "the", "position", "of", "a", "given", "row", ".", "INDEX", "can", "also", "be", ...
def size_row(self, index, **kw): """Queries or sets the size of the row given by INDEX. INDEX may be any non-negative integer that gives the position of a given row . INDEX can also be the string "default"; in this case, this command queries or sets the default size of all rows. When no option-value pair is given, this command returns a list con- taining the current size setting of the given row . When option-value pairs are given, the corresponding options of the size setting of the given row are changed. Options may be one of the follwing: pad0 pixels Specifies the paddings to the top of a row. pad1 pixels Specifies the paddings to the bottom of a row. size val Specifies the height of a row. Val may be: "auto" -- the height of the row is set the the highest cell in the row; a valid Tk screen distance unit; or a real number following by the word chars (e.g. 3.4chars) that sets the height of the row to the given number of characters.""" return self.tk.split(self.tk.call( self, 'size', 'row', index, *self._options({}, kw)))
[ "def", "size_row", "(", "self", ",", "index", ",", "*", "*", "kw", ")", ":", "return", "self", ".", "tk", ".", "split", "(", "self", ".", "tk", ".", "call", "(", "self", ",", "'size'", ",", "'row'", ",", "index", ",", "*", "self", ".", "_option...
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/tkinter/tix.py#L1928-L1950
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/sqlalchemy/engine/base.py
python
Engine.connect
(self, **kwargs)
return self._connection_cls(self, **kwargs)
Return a new :class:`.Connection` object. The :class:`.Connection` object is a facade that uses a DBAPI connection internally in order to communicate with the database. This connection is procured from the connection-holding :class:`.Pool` referenced by this :class:`.Engine`. When the :meth:`~.Connection.close` method of the :class:`.Connection` object is called, the underlying DBAPI connection is then returned to the connection pool, where it may be used again in a subsequent call to :meth:`~.Engine.connect`.
Return a new :class:`.Connection` object.
[ "Return", "a", "new", ":", "class", ":", ".", "Connection", "object", "." ]
def connect(self, **kwargs): """Return a new :class:`.Connection` object. The :class:`.Connection` object is a facade that uses a DBAPI connection internally in order to communicate with the database. This connection is procured from the connection-holding :class:`.Pool` referenced by this :class:`.Engine`. When the :meth:`~.Connection.close` method of the :class:`.Connection` object is called, the underlying DBAPI connection is then returned to the connection pool, where it may be used again in a subsequent call to :meth:`~.Engine.connect`. """ return self._connection_cls(self, **kwargs)
[ "def", "connect", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_connection_cls", "(", "self", ",", "*", "*", "kwargs", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/sqlalchemy/engine/base.py#L2088-L2102
certbot/certbot
30b066f08260b73fc26256b5484a180468b9d0a6
certbot/certbot/plugins/dns_common.py
python
DNSAuthenticator._prompt_for_file
(label: str, validator: Optional[Callable[[str], None]] = None)
Prompt the user for a path. :param str label: The user-friendly label for the file. :param callable validator: A method which will be called to validate the supplied input after it has been validated to be a non-empty path to an existing file. Should throw a `~certbot.errors.PluginError` to indicate any issue. :returns: The user's response (guaranteed to exist). :rtype: str
Prompt the user for a path.
[ "Prompt", "the", "user", "for", "a", "path", "." ]
def _prompt_for_file(label: str, validator: Optional[Callable[[str], None]] = None) -> str: """ Prompt the user for a path. :param str label: The user-friendly label for the file. :param callable validator: A method which will be called to validate the supplied input after it has been validated to be a non-empty path to an existing file. Should throw a `~certbot.errors.PluginError` to indicate any issue. :returns: The user's response (guaranteed to exist). :rtype: str """ def __validator(filename: str) -> None: # pylint: disable=unused-private-member if not filename: raise errors.PluginError('Please enter a valid path to your {0}.'.format(label)) filename = os.path.expanduser(filename) validate_file(filename) if validator: validator(filename) code, response = ops.validated_directory( __validator, 'Input the path to your {0}'.format(label), force_interactive=True) if code == display_util.OK: return response raise errors.PluginError('{0} required to proceed.'.format(label))
[ "def", "_prompt_for_file", "(", "label", ":", "str", ",", "validator", ":", "Optional", "[", "Callable", "[", "[", "str", "]", ",", "None", "]", "]", "=", "None", ")", "->", "str", ":", "def", "__validator", "(", "filename", ":", "str", ")", "->", ...
https://github.com/certbot/certbot/blob/30b066f08260b73fc26256b5484a180468b9d0a6/certbot/certbot/plugins/dns_common.py#L228-L258
SymbiFlow/symbiflow-arch-defs
f38793112ff78a06de9f1e3269bd22543e29729f
xc/common/utils/prjxray_edge_library.py
python
compute_segment_lengths
(conn)
Determine segment lengths used for cost normalization.
Determine segment lengths used for cost normalization.
[ "Determine", "segment", "lengths", "used", "for", "cost", "normalization", "." ]
def compute_segment_lengths(conn): """ Determine segment lengths used for cost normalization. """ cur = conn.cursor() cur2 = conn.cursor() cur3 = conn.cursor() cur4 = conn.cursor() write_cur = conn.cursor() write_cur.execute("""BEGIN EXCLUSIVE TRANSACTION;""") for (segment_pkey, ) in cur.execute("SELECT pkey FROM segment"): segment_lengths = [] # Get all tracks with this segment for (track_pkey, src_phy_tile_pkey) in cur2.execute(""" SELECT pkey, canon_phy_tile_pkey FROM track WHERE canon_phy_tile_pkey IS NOT NULL AND segment_pkey = ? """, (segment_pkey, )): segment_length = 1 cur4.execute( "SELECT grid_x, grid_y FROM phy_tile WHERE pkey = ?", (src_phy_tile_pkey, ) ) src_x, src_y = cur4.fetchone() # Get tiles downstream of this track. for (dest_phy_tile_pkey, ) in cur3.execute(""" SELECT DISTINCT canon_phy_tile_pkey FROM track WHERE pkey IN ( SELECT track_pkey FROM graph_node WHERE pkey IN ( SELECT dest_graph_node_pkey FROM graph_edge WHERE src_graph_node_pkey IN ( SELECT pkey FROM graph_node WHERE track_pkey = ? ) ) ) AND canon_phy_tile_pkey IS NOT NULL """, (track_pkey, )): if src_phy_tile_pkey == dest_phy_tile_pkey: continue cur4.execute( "SELECT grid_x, grid_y FROM phy_tile WHERE pkey = ?", (dest_phy_tile_pkey, ) ) dest_x, dest_y = cur4.fetchone() segment_length = max( segment_length, abs(dest_x - src_x) + abs(dest_y - src_y) ) segment_lengths.append(segment_length) write_cur.execute( "UPDATE segment SET length = ? WHERE pkey = ?", ( get_segment_length(segment_lengths), segment_pkey, ) ) write_cur.execute("""COMMIT TRANSACTION;""")
[ "def", "compute_segment_lengths", "(", "conn", ")", ":", "cur", "=", "conn", ".", "cursor", "(", ")", "cur2", "=", "conn", ".", "cursor", "(", ")", "cur3", "=", "conn", ".", "cursor", "(", ")", "cur4", "=", "conn", ".", "cursor", "(", ")", "write_c...
https://github.com/SymbiFlow/symbiflow-arch-defs/blob/f38793112ff78a06de9f1e3269bd22543e29729f/xc/common/utils/prjxray_edge_library.py#L1937-L2000
eirannejad/pyRevit
49c0b7eb54eb343458ce1365425e6552d0c47d44
site-packages/sortedcollections/ordereddict.py
python
KeysView.__reversed__
(self)
return reversed(self._mapping)
``reversed(keys_view)``
``reversed(keys_view)``
[ "reversed", "(", "keys_view", ")" ]
def __reversed__(self): "``reversed(keys_view)``" return reversed(self._mapping)
[ "def", "__reversed__", "(", "self", ")", ":", "return", "reversed", "(", "self", ".", "_mapping", ")" ]
https://github.com/eirannejad/pyRevit/blob/49c0b7eb54eb343458ce1365425e6552d0c47d44/site-packages/sortedcollections/ordereddict.py#L23-L25