nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
odlgroup/odl
0b088df8dc4621c68b9414c3deff9127f4c4f11d
odl/set/domain.py
python
IntervalProd.__repr__
(self)
Return ``repr(self)``.
Return ``repr(self)``.
[ "Return", "repr", "(", "self", ")", "." ]
def __repr__(self): """Return ``repr(self)``.""" if self.ndim == 1: return '{}({:.4}, {:.4})'.format(self.__class__.__name__, self.min_pt[0], self.max_pt[0]) else: return '{}({}, {})'.format(self.__class__.__name__, array_str(self.min_pt), array_str(self.max_pt))
[ "def", "__repr__", "(", "self", ")", ":", "if", "self", ".", "ndim", "==", "1", ":", "return", "'{}({:.4}, {:.4})'", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "self", ".", "min_pt", "[", "0", "]", ",", "self", ".", "max_pt",...
https://github.com/odlgroup/odl/blob/0b088df8dc4621c68b9414c3deff9127f4c4f11d/odl/set/domain.py#L838-L846
Charleswyt/tf_audio_steganalysis
b16073669ea5fc13668f7ef3137d2a85ac7fed6d
src/networks/image_classification.py
python
vgg16
(input_data, class_num=4096)
return logits
vgg16 for image classification
vgg16 for image classification
[ "vgg16", "for", "image", "classification" ]
def vgg16(input_data, class_num=4096): """ vgg16 for image classification """ print("vgg16: Remove the 1x1 conv layers.") print("Network Structure: ") # vgg16 conv1_1 = conv_layer(input_data, 3, 3, 1, 1, 64, "conv1_1") conv1_2 = conv_layer(conv1_1, 3, 3, 1, 1, 64, "conv1_2") pool1_3 = pool_layer(conv1_2, 2, 2, 2, 2, "pool1_3") conv2_1 = conv_layer(pool1_3, 3, 3, 1, 1, 128, "conv2_1") conv2_2 = conv_layer(conv2_1, 3, 3, 1, 1, 128, "conv2_2") pool2_3 = pool_layer(conv2_2, 2, 2, 2, 2, "pool2_3") conv3_1 = conv_layer(pool2_3, 3, 3, 1, 1, 256, "conv3_1") conv3_2 = conv_layer(conv3_1, 3, 3, 1, 1, 256, "conv3_2") conv3_3 = conv_layer(conv3_2, 3, 3, 1, 1, 256, "conv3_3") pool3_4 = pool_layer(conv3_3, 2, 2, 2, 2, "pool3_4") conv4_1 = conv_layer(pool3_4, 3, 3, 1, 1, 512, "conv4_1") conv4_2 = conv_layer(conv4_1, 3, 3, 1, 1, 512, "conv4_2") conv4_3 = conv_layer(conv4_2, 3, 3, 1, 1, 512, "conv4_3") pool4_4 = pool_layer(conv4_3, 2, 2, 2, 2, "pool4_4") conv5_1 = conv_layer(pool4_4, 3, 3, 1, 1, 512, "conv5_1") conv5_2 = conv_layer(conv5_1, 3, 3, 1, 1, 512, "conv5_2") conv5_3 = conv_layer(conv5_2, 3, 3, 1, 1, 512, "conv5_3") pool5_4 = pool_layer(conv5_3, 2, 2, 2, 2, "pool5_4") fc6 = fc_layer(pool5_4, 4096, "fc6") fc6_drop = dropout(fc6, keep_pro=0.5, name="fc6_drop") fc7 = fc_layer(fc6_drop, 4096, "fc7") fc7_drop = dropout(fc7, keep_pro=0.5, name="fc7_drop") logits = fc_layer(fc7_drop, class_num, "fc8") return logits
[ "def", "vgg16", "(", "input_data", ",", "class_num", "=", "4096", ")", ":", "print", "(", "\"vgg16: Remove the 1x1 conv layers.\"", ")", "print", "(", "\"Network Structure: \"", ")", "# vgg16", "conv1_1", "=", "conv_layer", "(", "input_data", ",", "3", ",", "3",...
https://github.com/Charleswyt/tf_audio_steganalysis/blob/b16073669ea5fc13668f7ef3137d2a85ac7fed6d/src/networks/image_classification.py#L47-L84
caiiiac/Machine-Learning-with-Python
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
MachineLearning/venv/lib/python3.5/site-packages/sklearn/metrics/cluster/bicluster.py
python
_check_rows_and_columns
(a, b)
return a_rows, a_cols, b_rows, b_cols
Unpacks the row and column arrays and checks their shape.
Unpacks the row and column arrays and checks their shape.
[ "Unpacks", "the", "row", "and", "column", "arrays", "and", "checks", "their", "shape", "." ]
def _check_rows_and_columns(a, b): """Unpacks the row and column arrays and checks their shape.""" check_consistent_length(*a) check_consistent_length(*b) checks = lambda x: check_array(x, ensure_2d=False) a_rows, a_cols = map(checks, a) b_rows, b_cols = map(checks, b) return a_rows, a_cols, b_rows, b_cols
[ "def", "_check_rows_and_columns", "(", "a", ",", "b", ")", ":", "check_consistent_length", "(", "*", "a", ")", "check_consistent_length", "(", "*", "b", ")", "checks", "=", "lambda", "x", ":", "check_array", "(", "x", ",", "ensure_2d", "=", "False", ")", ...
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/sklearn/metrics/cluster/bicluster.py#L11-L18
mozillazg/pypy
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
lib-python/2.7/lib2to3/refactor.py
python
RefactoringTool.print_output
(self, old_text, new_text, filename, equal)
Called with the old version, new version, and filename of a refactored file.
Called with the old version, new version, and filename of a refactored file.
[ "Called", "with", "the", "old", "version", "new", "version", "and", "filename", "of", "a", "refactored", "file", "." ]
def print_output(self, old_text, new_text, filename, equal): """Called with the old version, new version, and filename of a refactored file.""" pass
[ "def", "print_output", "(", "self", ",", "old_text", ",", "new_text", ",", "filename", ",", "equal", ")", ":", "pass" ]
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/lib-python/2.7/lib2to3/refactor.py#L289-L292
seanbell/intrinsic
698b1e0fd23c216bb65164927c4de85b2c94b1af
bell2014/input.py
python
IntrinsicInput.image_rgb
(self)
return self._image_rgb
Image in linear RGB space
Image in linear RGB space
[ "Image", "in", "linear", "RGB", "space" ]
def image_rgb(self): """ Image in linear RGB space """ return self._image_rgb
[ "def", "image_rgb", "(", "self", ")", ":", "return", "self", ".", "_image_rgb" ]
https://github.com/seanbell/intrinsic/blob/698b1e0fd23c216bb65164927c4de85b2c94b1af/bell2014/input.py#L204-L206
Trusted-AI/adversarial-robustness-toolbox
9fabffdbb92947efa1ecc5d825d634d30dfbaf29
art/estimators/classification/scikitlearn.py
python
ScikitlearnLogisticRegression.__init__
( self, model: "sklearn.linear_model.LogisticRegression", clip_values: Optional["CLIP_VALUES_TYPE"] = None, preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), )
Create a `Classifier` instance from a scikit-learn Logistic Regression model. :param model: scikit-learn LogisticRegression model :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed for features. :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier. :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier. :param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be used for data preprocessing. The first value will be subtracted from the input. The input will then be divided by the second one.
Create a `Classifier` instance from a scikit-learn Logistic Regression model.
[ "Create", "a", "Classifier", "instance", "from", "a", "scikit", "-", "learn", "Logistic", "Regression", "model", "." ]
def __init__( self, model: "sklearn.linear_model.LogisticRegression", clip_values: Optional["CLIP_VALUES_TYPE"] = None, preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ Create a `Classifier` instance from a scikit-learn Logistic Regression model. :param model: scikit-learn LogisticRegression model :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed for features. :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier. :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier. :param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be used for data preprocessing. The first value will be subtracted from the input. The input will then be divided by the second one. """ # pylint: disable=E0001 import sklearn # lgtm [py/repeated-import] if not isinstance(model, sklearn.linear_model.LogisticRegression): raise TypeError("Model must be of type sklearn.linear_model.LogisticRegression).") super().__init__( model=model, clip_values=clip_values, preprocessing_defences=preprocessing_defences, postprocessing_defences=postprocessing_defences, preprocessing=preprocessing, )
[ "def", "__init__", "(", "self", ",", "model", ":", "\"sklearn.linear_model.LogisticRegression\"", ",", "clip_values", ":", "Optional", "[", "\"CLIP_VALUES_TYPE\"", "]", "=", "None", ",", "preprocessing_defences", ":", "Union", "[", "\"Preprocessor\"", ",", "List", "...
https://github.com/Trusted-AI/adversarial-robustness-toolbox/blob/9fabffdbb92947efa1ecc5d825d634d30dfbaf29/art/estimators/classification/scikitlearn.py#L755-L787
wbond/package_control
cfaaeb57612023e3679ecb7f8cd7ceac9f57990d
package_control/deps/oscrypto/_mac/asymmetric.py
python
ecdsa_sign
(private_key, data, hash_algorithm)
return _sign(private_key, data, hash_algorithm)
Generates an ECDSA signature :param private_key: The PrivateKey to generate the signature with :param data: A byte string of the data the signature is for :param hash_algorithm: A unicode string of "md5", "sha1", "sha224", "sha256", "sha384" or "sha512" :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the signature
Generates an ECDSA signature
[ "Generates", "an", "ECDSA", "signature" ]
def ecdsa_sign(private_key, data, hash_algorithm): """ Generates an ECDSA signature :param private_key: The PrivateKey to generate the signature with :param data: A byte string of the data the signature is for :param hash_algorithm: A unicode string of "md5", "sha1", "sha224", "sha256", "sha384" or "sha512" :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the signature """ if private_key.algorithm != 'ec': raise ValueError('The key specified is not an EC private key') return _sign(private_key, data, hash_algorithm)
[ "def", "ecdsa_sign", "(", "private_key", ",", "data", ",", "hash_algorithm", ")", ":", "if", "private_key", ".", "algorithm", "!=", "'ec'", ":", "raise", "ValueError", "(", "'The key specified is not an EC private key'", ")", "return", "_sign", "(", "private_key", ...
https://github.com/wbond/package_control/blob/cfaaeb57612023e3679ecb7f8cd7ceac9f57990d/package_control/deps/oscrypto/_mac/asymmetric.py#L1796-L1822
ideoforms/pylive
87a4f2a384668b6f3e9475a6bc3c3d63b4734f5b
live/set.py
python
Set.state
(self)
return self.live.query("/live/state")
Return the global state tuple: (tempo, overdub)
Return the global state tuple: (tempo, overdub)
[ "Return", "the", "global", "state", "tuple", ":", "(", "tempo", "overdub", ")" ]
def state(self): """ Return the global state tuple: (tempo, overdub) """ return self.live.query("/live/state")
[ "def", "state", "(", "self", ")", ":", "return", "self", ".", "live", ".", "query", "(", "\"/live/state\"", ")" ]
https://github.com/ideoforms/pylive/blob/87a4f2a384668b6f3e9475a6bc3c3d63b4734f5b/live/set.py#L241-L243
qiime2/qiime2
3906f67c70a1321e99e7fc59e79550c2432a8cee
qiime2/sdk/usage.py
python
Usage.import_from_format
(self, name: str, semantic_type: str, variable: UsageVariable, view_type: 'qiime2.core.format.FormatBase' = None )
return self._usage_variable(name, factory, 'artifact')
Communicate that an import should be done. Parameters ---------- name : str The name of the resulting variable. semantic_type : str The semantic type to import as. variable : UsageVariable A variable of type 'format' which possesses a factory to materialize the actual data to be imported. view_type : format or str The view type to import as, in the event it is different from the default. Returns ------- UsageVariable Variable of type 'artifact'. Examples -------- >>> # A factory which will be used in the example to generate data. >>> def factory(): ... from qiime2.core.testing.format import IntSequenceFormat ... from qiime2.plugin.util import transform ... ff = transform([1, 2, 3], to_type=IntSequenceFormat) ... ... ff.validate() # good practice ... return ff ... >>> to_import = use.init_format('to_import', factory, ext='.hello') >>> to_import <ExecutionUsageVariable name='to_import', var_type='format'> >>> ints = use.import_from_format('ints', ... semantic_type='IntSequence1', ... variable=to_import, ... view_type='IntSequenceFormat') >>> ints <ExecutionUsageVariable name='ints', var_type='artifact'> See Also -------- init_format
Communicate that an import should be done.
[ "Communicate", "that", "an", "import", "should", "be", "done", "." ]
def import_from_format(self, name: str, semantic_type: str, variable: UsageVariable, view_type: 'qiime2.core.format.FormatBase' = None ) -> UsageVariable: """Communicate that an import should be done. Parameters ---------- name : str The name of the resulting variable. semantic_type : str The semantic type to import as. variable : UsageVariable A variable of type 'format' which possesses a factory to materialize the actual data to be imported. view_type : format or str The view type to import as, in the event it is different from the default. Returns ------- UsageVariable Variable of type 'artifact'. Examples -------- >>> # A factory which will be used in the example to generate data. >>> def factory(): ... from qiime2.core.testing.format import IntSequenceFormat ... from qiime2.plugin.util import transform ... ff = transform([1, 2, 3], to_type=IntSequenceFormat) ... ... ff.validate() # good practice ... return ff ... >>> to_import = use.init_format('to_import', factory, ext='.hello') >>> to_import <ExecutionUsageVariable name='to_import', var_type='format'> >>> ints = use.import_from_format('ints', ... semantic_type='IntSequence1', ... variable=to_import, ... view_type='IntSequenceFormat') >>> ints <ExecutionUsageVariable name='ints', var_type='artifact'> See Also -------- init_format """ assert_usage_var_type(variable, 'format') def factory(): from qiime2 import Artifact fmt = variable.execute() artifact = Artifact.import_data( semantic_type, str(fmt), view_type=view_type) return artifact return self._usage_variable(name, factory, 'artifact')
[ "def", "import_from_format", "(", "self", ",", "name", ":", "str", ",", "semantic_type", ":", "str", ",", "variable", ":", "UsageVariable", ",", "view_type", ":", "'qiime2.core.format.FormatBase'", "=", "None", ")", "->", "UsageVariable", ":", "assert_usage_var_ty...
https://github.com/qiime2/qiime2/blob/3906f67c70a1321e99e7fc59e79550c2432a8cee/qiime2/sdk/usage.py#L925-L984
youngwanLEE/CenterMask
72147e8aae673fcaf4103ee90a6a6b73863e7fa1
maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py
python
FastRCNNLossComputation.__call__
(self, class_logits, box_regression)
return classification_loss, box_loss
Computes the loss for Faster R-CNN. This requires that the subsample method has been called beforehand. Arguments: class_logits (list[Tensor]) box_regression (list[Tensor]) Returns: classification_loss (Tensor) box_loss (Tensor)
Computes the loss for Faster R-CNN. This requires that the subsample method has been called beforehand.
[ "Computes", "the", "loss", "for", "Faster", "R", "-", "CNN", ".", "This", "requires", "that", "the", "subsample", "method", "has", "been", "called", "beforehand", "." ]
def __call__(self, class_logits, box_regression): """ Computes the loss for Faster R-CNN. This requires that the subsample method has been called beforehand. Arguments: class_logits (list[Tensor]) box_regression (list[Tensor]) Returns: classification_loss (Tensor) box_loss (Tensor) """ class_logits = cat(class_logits, dim=0) box_regression = cat(box_regression, dim=0) device = class_logits.device if not hasattr(self, "_proposals"): raise RuntimeError("subsample needs to be called before") proposals = self._proposals labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0) regression_targets = cat( [proposal.get_field("regression_targets") for proposal in proposals], dim=0 ) classification_loss = F.cross_entropy(class_logits, labels) # get indices that correspond to the regression targets for # the corresponding ground truth labels, to be used with # advanced indexing sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1) labels_pos = labels[sampled_pos_inds_subset] if self.cls_agnostic_bbox_reg: map_inds = torch.tensor([4, 5, 6, 7], device=device) else: map_inds = 4 * labels_pos[:, None] + torch.tensor( [0, 1, 2, 3], device=device) box_loss = smooth_l1_loss( box_regression[sampled_pos_inds_subset[:, None], map_inds], regression_targets[sampled_pos_inds_subset], size_average=False, beta=1, ) box_loss = box_loss / labels.numel() return classification_loss, box_loss
[ "def", "__call__", "(", "self", ",", "class_logits", ",", "box_regression", ")", ":", "class_logits", "=", "cat", "(", "class_logits", ",", "dim", "=", "0", ")", "box_regression", "=", "cat", "(", "box_regression", ",", "dim", "=", "0", ")", "device", "=...
https://github.com/youngwanLEE/CenterMask/blob/72147e8aae673fcaf4103ee90a6a6b73863e7fa1/maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py#L118-L167
sphinx-doc/sphinx
e79681c76843c1339863b365747079b2d662d0c1
sphinx/search/__init__.py
python
IndexBuilder.feed
(self, docname: str, filename: str, title: str, doctree: nodes.document)
Feed a doctree to the index.
Feed a doctree to the index.
[ "Feed", "a", "doctree", "to", "the", "index", "." ]
def feed(self, docname: str, filename: str, title: str, doctree: nodes.document) -> None: """Feed a doctree to the index.""" self._titles[docname] = title self._filenames[docname] = filename visitor = WordCollector(doctree, self.lang) doctree.walk(visitor) # memoize self.lang.stem def stem(word: str) -> str: try: return self._stem_cache[word] except KeyError: self._stem_cache[word] = self.lang.stem(word).lower() return self._stem_cache[word] _filter = self.lang.word_filter for word in visitor.found_title_words: stemmed_word = stem(word) if _filter(stemmed_word): self._title_mapping.setdefault(stemmed_word, set()).add(docname) elif _filter(word): # stemmer must not remove words from search index self._title_mapping.setdefault(word, set()).add(docname) for word in visitor.found_words: stemmed_word = stem(word) # again, stemmer must not remove words from search index if not _filter(stemmed_word) and _filter(word): stemmed_word = word already_indexed = docname in self._title_mapping.get(stemmed_word, set()) if _filter(stemmed_word) and not already_indexed: self._mapping.setdefault(stemmed_word, set()).add(docname)
[ "def", "feed", "(", "self", ",", "docname", ":", "str", ",", "filename", ":", "str", ",", "title", ":", "str", ",", "doctree", ":", "nodes", ".", "document", ")", "->", "None", ":", "self", ".", "_titles", "[", "docname", "]", "=", "title", "self",...
https://github.com/sphinx-doc/sphinx/blob/e79681c76843c1339863b365747079b2d662d0c1/sphinx/search/__init__.py#L388-L419
eggnogdb/eggnog-mapper
d6e6cdf0a829f2bd85480f3f3f16e38c213cd091
eggnogmapper/search/hmmer/hmmer_setup.py
python
setup_hmm_search
(db, scantype, dbtype, qtype = QUERY_TYPE_SEQ, port = DEFAULT_PORT, end_port = DEFAULT_END_PORT, servers_list = None, silent = False)
return dbname, dbpath, host, port, end_port, idmap_file, setup_type
[]
def setup_hmm_search(db, scantype, dbtype, qtype = QUERY_TYPE_SEQ, port = DEFAULT_PORT, end_port = DEFAULT_END_PORT, servers_list = None, silent = False): setup_type = None if ":" in db or servers_list is not None: dbname, dbpath, host, port, idmap_file = setup_remote_db(db, dbtype, qtype) end_port = port setup_type = SETUP_TYPE_REMOTE else: # setup_local_db --> dbpath, host, port, idmap_file if db in get_hmmer_databases(): dbpath, host, idmap_file = setup_eggnog_db(db, scantype) dbname = db setup_type = SETUP_TYPE_EGGNOG else: dbpath, host, idmap_file = setup_custom_db(db, scantype, dbtype, silent) dbname = db setup_type = SETUP_TYPE_CUSTOM return dbname, dbpath, host, port, end_port, idmap_file, setup_type
[ "def", "setup_hmm_search", "(", "db", ",", "scantype", ",", "dbtype", ",", "qtype", "=", "QUERY_TYPE_SEQ", ",", "port", "=", "DEFAULT_PORT", ",", "end_port", "=", "DEFAULT_END_PORT", ",", "servers_list", "=", "None", ",", "silent", "=", "False", ")", ":", ...
https://github.com/eggnogdb/eggnog-mapper/blob/d6e6cdf0a829f2bd85480f3f3f16e38c213cd091/eggnogmapper/search/hmmer/hmmer_setup.py#L26-L47
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/combinat/combinat.py
python
CombinatorialObject.__le__
(self, other)
EXAMPLES:: sage: c = CombinatorialObject([1,2,3]) sage: d = CombinatorialObject([2,3,4]) sage: c <= c True sage: c <= d True sage: c <= [1,2,3] True
EXAMPLES::
[ "EXAMPLES", "::" ]
def __le__(self, other): """ EXAMPLES:: sage: c = CombinatorialObject([1,2,3]) sage: d = CombinatorialObject([2,3,4]) sage: c <= c True sage: c <= d True sage: c <= [1,2,3] True """ if isinstance(other, CombinatorialObject): return self._list <= other._list else: return self._list <= other
[ "def", "__le__", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "CombinatorialObject", ")", ":", "return", "self", ".", "_list", "<=", "other", ".", "_list", "else", ":", "return", "self", ".", "_list", "<=", "other" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/combinat/combinat.py#L1242-L1258
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/operator.py
python
xor
(a, b)
return a ^ b
Same as a ^ b.
Same as a ^ b.
[ "Same", "as", "a", "^", "b", "." ]
def xor(a, b): "Same as a ^ b." return a ^ b
[ "def", "xor", "(", "a", ",", "b", ")", ":", "return", "a", "^", "b" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/operator.py#L140-L142
pyqtgraph/pyqtgraph
ac3887abfca4e529aac44f022f8e40556a2587b0
pyqtgraph/debug.py
python
walkQObjectTree
(obj, counts=None, verbose=False, depth=0)
return counts
Walk through a tree of QObjects, doing nothing to them. The purpose of this function is to find dead objects and generate a crash immediately rather than stumbling upon them later. Prints a count of the objects encountered, for fun. (or is it?)
Walk through a tree of QObjects, doing nothing to them. The purpose of this function is to find dead objects and generate a crash immediately rather than stumbling upon them later. Prints a count of the objects encountered, for fun. (or is it?)
[ "Walk", "through", "a", "tree", "of", "QObjects", "doing", "nothing", "to", "them", ".", "The", "purpose", "of", "this", "function", "is", "to", "find", "dead", "objects", "and", "generate", "a", "crash", "immediately", "rather", "than", "stumbling", "upon",...
def walkQObjectTree(obj, counts=None, verbose=False, depth=0): """ Walk through a tree of QObjects, doing nothing to them. The purpose of this function is to find dead objects and generate a crash immediately rather than stumbling upon them later. Prints a count of the objects encountered, for fun. (or is it?) """ if verbose: print(" "*depth + typeStr(obj)) report = False if counts is None: counts = {} report = True typ = str(type(obj)) try: counts[typ] += 1 except KeyError: counts[typ] = 1 for child in obj.children(): walkQObjectTree(child, counts, verbose, depth+1) return counts
[ "def", "walkQObjectTree", "(", "obj", ",", "counts", "=", "None", ",", "verbose", "=", "False", ",", "depth", "=", "0", ")", ":", "if", "verbose", ":", "print", "(", "\" \"", "*", "depth", "+", "typeStr", "(", "obj", ")", ")", "report", "=", "Fals...
https://github.com/pyqtgraph/pyqtgraph/blob/ac3887abfca4e529aac44f022f8e40556a2587b0/pyqtgraph/debug.py#L1046-L1068
intohole/moodstyle
1d06fc565c0df4bf07196854f3efb94bbefd1bfb
moodstyle/classifier/Hmm1.py
python
TrainSeg.word_state
(self , word)
[]
def word_state(self , word): if len(word) == 0: yield elif len(word) == 1: yield HmmItem(word , 's') elif len(word) == 2: yield HmmItem(word, 'b') yield HmmItem(word , 'e') elif len(word) >=3: yield HmmItem(word[0] , 'b') for _word in word[1:-1]: yield HmmItem(_word , 'm') yield HmmItem(word[-1] , 'e')
[ "def", "word_state", "(", "self", ",", "word", ")", ":", "if", "len", "(", "word", ")", "==", "0", ":", "yield", "elif", "len", "(", "word", ")", "==", "1", ":", "yield", "HmmItem", "(", "word", ",", "'s'", ")", "elif", "len", "(", "word", ")",...
https://github.com/intohole/moodstyle/blob/1d06fc565c0df4bf07196854f3efb94bbefd1bfb/moodstyle/classifier/Hmm1.py#L198-L210
aosp-mirror/platform_development
10d2ee6c3d6e0ffafadb170b4557b38f81824799
vndk/tools/sourcedr/blueprint/blueprint.py
python
Parser.parse_dict
(self)
return result
Parse a dict.
Parse a dict.
[ "Parse", "a", "dict", "." ]
def parse_dict(self): """Parse a dict.""" result = Dict() lexer = self.lexer is_func_syntax = lexer.token == Token.LPAREN if is_func_syntax: lexer.consume(Token.LPAREN) else: lexer.consume(Token.LBRACE) while lexer.token != Token.RBRACE and lexer.token != Token.RPAREN: if lexer.token != Token.IDENT: raise ParseError(lexer, 'unexpected token ' + lexer.token.name) key = self.parse_ident_lvalue() if lexer.token == Token.ASSIGN: lexer.consume(Token.ASSIGN) else: lexer.consume(Token.COLON) value = self.parse_expression() result[key] = value if lexer.token == Token.COMMA: lexer.consume(Token.COMMA) if is_func_syntax: lexer.consume(Token.RPAREN) else: lexer.consume(Token.RBRACE) return result
[ "def", "parse_dict", "(", "self", ")", ":", "result", "=", "Dict", "(", ")", "lexer", "=", "self", ".", "lexer", "is_func_syntax", "=", "lexer", ".", "token", "==", "Token", ".", "LPAREN", "if", "is_func_syntax", ":", "lexer", ".", "consume", "(", "Tok...
https://github.com/aosp-mirror/platform_development/blob/10d2ee6c3d6e0ffafadb170b4557b38f81824799/vndk/tools/sourcedr/blueprint/blueprint.py#L716-L748
biopython/biopython
2dd97e71762af7b046d7f7f8a4f1e38db6b06c86
Bio/KEGG/KGML/KGML_pathway.py
python
Pathway.element
(self)
return pathway
Return the Pathway as a valid KGML element.
Return the Pathway as a valid KGML element.
[ "Return", "the", "Pathway", "as", "a", "valid", "KGML", "element", "." ]
def element(self): """Return the Pathway as a valid KGML element.""" # The root is this Pathway element pathway = ET.Element("pathway") pathway.attrib = { "name": self._name, "org": self.org, "number": str(self._number), "title": self.title, "image": self.image, "link": self.link, } # We add the Entries in node ID order for eid, entry in sorted(self.entries.items()): pathway.append(entry.element) # Next we add Relations for relation in self._relations: pathway.append(relation.element) for eid, reaction in sorted(self._reactions.items()): pathway.append(reaction.element) return pathway
[ "def", "element", "(", "self", ")", ":", "# The root is this Pathway element", "pathway", "=", "ET", ".", "Element", "(", "\"pathway\"", ")", "pathway", ".", "attrib", "=", "{", "\"name\"", ":", "self", ".", "_name", ",", "\"org\"", ":", "self", ".", "org"...
https://github.com/biopython/biopython/blob/2dd97e71762af7b046d7f7f8a4f1e38db6b06c86/Bio/KEGG/KGML/KGML_pathway.py#L222-L242
securityclippy/elasticintel
aa08d3e9f5ab1c000128e95161139ce97ff0e334
ingest_feed_lambda/pandas/core/base.py
python
IndexOpsMixin.transpose
(self, *args, **kwargs)
return self
return the transpose, which is by definition self
return the transpose, which is by definition self
[ "return", "the", "transpose", "which", "is", "by", "definition", "self" ]
def transpose(self, *args, **kwargs): """ return the transpose, which is by definition self """ nv.validate_transpose(args, kwargs) return self
[ "def", "transpose", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_transpose", "(", "args", ",", "kwargs", ")", "return", "self" ]
https://github.com/securityclippy/elasticintel/blob/aa08d3e9f5ab1c000128e95161139ce97ff0e334/ingest_feed_lambda/pandas/core/base.py#L694-L697
jhorey/ferry
bbaa047df08386e17130a939e20fde5e840d1ffa
ferry/docker/docker.py
python
DockerCLI.pull
(self, image, server=None)
return self._continuous_print(child, "downloading image...")
Pull a remote image to the local registry.
Pull a remote image to the local registry.
[ "Pull", "a", "remote", "image", "to", "the", "local", "registry", "." ]
def pull(self, image, server=None): """ Pull a remote image to the local registry. """ pull = self.docker + ' ' + self.pull_cmd + ' ' + image logging.warning(pull) child = self._execute_cmd(pull, server, read_output=False) return self._continuous_print(child, "downloading image...")
[ "def", "pull", "(", "self", ",", "image", ",", "server", "=", "None", ")", ":", "pull", "=", "self", ".", "docker", "+", "' '", "+", "self", ".", "pull_cmd", "+", "' '", "+", "image", "logging", ".", "warning", "(", "pull", ")", "child", "=", "se...
https://github.com/jhorey/ferry/blob/bbaa047df08386e17130a939e20fde5e840d1ffa/ferry/docker/docker.py#L292-L299
sfepy/sfepy
02ec7bb2ab39ee1dfe1eb4cd509f0ffb7dcc8b25
sfepy/discrete/dg/fields.py
python
DGField._set_dg_periodic_facet_neighbours
(self, facet_neighbours, eq_map)
return facet_neighbours
Parameters ---------- facet_neighbours : array_like Shape is (n_cell, n_el_facet, 2), first value is index of the neighbouring cell the second is index of the facet in said nb. cell. eq_map : must contain dg_ep_bc a List with pairs of slave and master boundary cell boundary facet mapping Returns ------- facet_neighbours : ndarray Updated incidence array.
[]
def _set_dg_periodic_facet_neighbours(self, facet_neighbours, eq_map): """ Parameters ---------- facet_neighbours : array_like Shape is (n_cell, n_el_facet, 2), first value is index of the neighbouring cell the second is index of the facet in said nb. cell. eq_map : must contain dg_ep_bc a List with pairs of slave and master boundary cell boundary facet mapping Returns ------- facet_neighbours : ndarray Updated incidence array. """ # if eq_map. # treat DG EPBC - these are definitely preferred if eq_map.n_dg_epbc > 0 and self.gel.name not in ["1_2", "2_4", "3_6"]: raise ValueError( "Periodic boundary conditions not supported " + "for geometry {} elements.".format(self.gel.name)) dg_epbc = eq_map.dg_epbc for master_bc2bfi, slave_bc2bfi in dg_epbc: # set neighbours of periodic cells to one another facet_neighbours[master_bc2bfi[:, 0], master_bc2bfi[:, 1], 0] = \ slave_bc2bfi[:, 0] facet_neighbours[slave_bc2bfi[:, 0], slave_bc2bfi[:, 1], 0] = \ master_bc2bfi[:, 0] # set neighbours facets facet_neighbours[slave_bc2bfi[:, 0], slave_bc2bfi[:, 1], 1] = \ master_bc2bfi[:, 1] facet_neighbours[master_bc2bfi[:, 0], master_bc2bfi[:, 1], 1] =\ slave_bc2bfi[:, 1] return facet_neighbours
[ "def", "_set_dg_periodic_facet_neighbours", "(", "self", ",", "facet_neighbours", ",", "eq_map", ")", ":", "# if eq_map.", "# treat DG EPBC - these are definitely preferred", "if", "eq_map", ".", "n_dg_epbc", ">", "0", "and", "self", ".", "gel", ".", "name", "not", ...
https://github.com/sfepy/sfepy/blob/02ec7bb2ab39ee1dfe1eb4cd509f0ffb7dcc8b25/sfepy/discrete/dg/fields.py#L564-L607
trakt/Plex-Trakt-Scrobbler
aeb0bfbe62fad4b06c164f1b95581da7f35dce0b
Trakttv.bundle/Contents/Libraries/Shared/OpenSSL/crypto.py
python
_get_backend
()
return backend
Importing the backend from cryptography has the side effect of activating the osrandom engine. This mutates the global state of OpenSSL in the process and causes issues for various programs that use subinterpreters or embed Python. By putting the import in this function we can avoid triggering this side effect unless _get_backend is called.
Importing the backend from cryptography has the side effect of activating the osrandom engine. This mutates the global state of OpenSSL in the process and causes issues for various programs that use subinterpreters or embed Python. By putting the import in this function we can avoid triggering this side effect unless _get_backend is called.
[ "Importing", "the", "backend", "from", "cryptography", "has", "the", "side", "effect", "of", "activating", "the", "osrandom", "engine", ".", "This", "mutates", "the", "global", "state", "of", "OpenSSL", "in", "the", "process", "and", "causes", "issues", "for",...
def _get_backend(): """ Importing the backend from cryptography has the side effect of activating the osrandom engine. This mutates the global state of OpenSSL in the process and causes issues for various programs that use subinterpreters or embed Python. By putting the import in this function we can avoid triggering this side effect unless _get_backend is called. """ from cryptography.hazmat.backends.openssl.backend import backend return backend
[ "def", "_get_backend", "(", ")", ":", "from", "cryptography", ".", "hazmat", ".", "backends", ".", "openssl", ".", "backend", "import", "backend", "return", "backend" ]
https://github.com/trakt/Plex-Trakt-Scrobbler/blob/aeb0bfbe62fad4b06c164f1b95581da7f35dce0b/Trakttv.bundle/Contents/Libraries/Shared/OpenSSL/crypto.py#L46-L55
returntocorp/bento
05b365da71b65170d41fe92a702480ab76c1d17c
bento/tool/runner/python_tool.py
python
PythonTool._packages_installed
(self)
return to_install
Checks whether the given packages are installed. The value for each package is the version specification.
Checks whether the given packages are installed.
[ "Checks", "whether", "the", "given", "packages", "are", "installed", "." ]
def _packages_installed(self) -> Dict[str, SimpleSpec]: """ Checks whether the given packages are installed. The value for each package is the version specification. """ installed: Dict[str, Version] = {} for package in json.loads( self.venv_exec([*PythonTool.PIP_CMD, "list", "--format", "json"]) ): try: installed[package["name"]] = Version(package["version"]) except ValueError: # skip it pass to_install: Dict[str, SimpleSpec] = {} for name, spec in self.required_packages().items(): if name not in installed or not spec.match(installed[name]): to_install[name] = spec return to_install
[ "def", "_packages_installed", "(", "self", ")", "->", "Dict", "[", "str", ",", "SimpleSpec", "]", ":", "installed", ":", "Dict", "[", "str", ",", "Version", "]", "=", "{", "}", "for", "package", "in", "json", ".", "loads", "(", "self", ".", "venv_exe...
https://github.com/returntocorp/bento/blob/05b365da71b65170d41fe92a702480ab76c1d17c/bento/tool/runner/python_tool.py#L101-L121
vivisect/vivisect
37b0b655d8dedfcf322e86b0f144b096e48d547e
vivisect/__init__.py
python
VivWorkspace.addSegment
(self, va, size, name, filename)
Add a "segment" to the workspace. A segment is generally some meaningful area inside of a memory map. For PE binaries, a segment and a memory map are synonymous. However, some platforms (Elf) specify their memory maps (program headers) and segments (sectons) seperately.
Add a "segment" to the workspace. A segment is generally some meaningful area inside of a memory map. For PE binaries, a segment and a memory map are synonymous. However, some platforms (Elf) specify their memory maps (program headers) and segments (sectons) seperately.
[ "Add", "a", "segment", "to", "the", "workspace", ".", "A", "segment", "is", "generally", "some", "meaningful", "area", "inside", "of", "a", "memory", "map", ".", "For", "PE", "binaries", "a", "segment", "and", "a", "memory", "map", "are", "synonymous", "...
def addSegment(self, va, size, name, filename): """ Add a "segment" to the workspace. A segment is generally some meaningful area inside of a memory map. For PE binaries, a segment and a memory map are synonymous. However, some platforms (Elf) specify their memory maps (program headers) and segments (sectons) seperately. """ self._fireEvent(VWE_ADDSEGMENT, (va,size,name,filename))
[ "def", "addSegment", "(", "self", ",", "va", ",", "size", ",", "name", ",", "filename", ")", ":", "self", ".", "_fireEvent", "(", "VWE_ADDSEGMENT", ",", "(", "va", ",", "size", ",", "name", ",", "filename", ")", ")" ]
https://github.com/vivisect/vivisect/blob/37b0b655d8dedfcf322e86b0f144b096e48d547e/vivisect/__init__.py#L1842-L1849
DayBreak-u/Thundernet_Pytorch
ac359d128a44e566ba5852a830c0a2154e10edb2
lib/model/utils/cente_decode.py
python
_top_aggregate
(heat)
return (ret - heat).transpose(1, 0).reshape(shape).transpose(3, 2)
heat: batchsize x channels x h x w
heat: batchsize x channels x h x w
[ "heat", ":", "batchsize", "x", "channels", "x", "h", "x", "w" ]
def _top_aggregate(heat): ''' heat: batchsize x channels x h x w ''' heat = heat.transpose(3, 2) shape = heat.shape heat = heat.reshape(-1, heat.shape[3]) heat = heat.transpose(1, 0).contiguous() ret = heat.clone() for i in range(1, heat.shape[0]): inds = (heat[i] >= heat[i - 1]) ret[i] += ret[i - 1] * inds.float() return (ret - heat).transpose(1, 0).reshape(shape).transpose(3, 2)
[ "def", "_top_aggregate", "(", "heat", ")", ":", "heat", "=", "heat", ".", "transpose", "(", "3", ",", "2", ")", "shape", "=", "heat", ".", "shape", "heat", "=", "heat", ".", "reshape", "(", "-", "1", ",", "heat", ".", "shape", "[", "3", "]", ")...
https://github.com/DayBreak-u/Thundernet_Pytorch/blob/ac359d128a44e566ba5852a830c0a2154e10edb2/lib/model/utils/cente_decode.py#L57-L69
terrycain/aioboto3
67bf574e5fd6221ec99a47c3f1b12f97c6721d54
aioboto3/resources/collection.py
python
AIOCollectionFactory._create_batch_action
(factory_self, resource_name, snake_cased, action_model, collection_model, service_model, event_emitter)
return batch_action
Creates a new method which makes a batch operation request to the underlying service API.
Creates a new method which makes a batch operation request to the underlying service API.
[ "Creates", "a", "new", "method", "which", "makes", "a", "batch", "operation", "request", "to", "the", "underlying", "service", "API", "." ]
def _create_batch_action(factory_self, resource_name, snake_cased, action_model, collection_model, service_model, event_emitter): """ Creates a new method which makes a batch operation request to the underlying service API. """ action = AioBatchAction(action_model) def batch_action(self, *args, **kwargs): return action(self, *args, **kwargs) batch_action.__name__ = str(snake_cased) batch_action.__doc__ = docstring.BatchActionDocstring( resource_name=resource_name, event_emitter=event_emitter, batch_action_model=action_model, service_model=service_model, collection_model=collection_model, include_signature=False ) return batch_action
[ "def", "_create_batch_action", "(", "factory_self", ",", "resource_name", ",", "snake_cased", ",", "action_model", ",", "collection_model", ",", "service_model", ",", "event_emitter", ")", ":", "action", "=", "AioBatchAction", "(", "action_model", ")", "def", "batch...
https://github.com/terrycain/aioboto3/blob/67bf574e5fd6221ec99a47c3f1b12f97c6721d54/aioboto3/resources/collection.py#L148-L169
ganeti/ganeti
d340a9ddd12f501bef57da421b5f9b969a4ba905
lib/query.py
python
GetAllFields
(fielddefs)
return [fdef for (fdef, _, _, _) in fielddefs]
Extract L{objects.QueryFieldDefinition} from field definitions. @rtype: list of L{objects.QueryFieldDefinition}
Extract L{objects.QueryFieldDefinition} from field definitions.
[ "Extract", "L", "{", "objects", ".", "QueryFieldDefinition", "}", "from", "field", "definitions", "." ]
def GetAllFields(fielddefs): """Extract L{objects.QueryFieldDefinition} from field definitions. @rtype: list of L{objects.QueryFieldDefinition} """ return [fdef for (fdef, _, _, _) in fielddefs]
[ "def", "GetAllFields", "(", "fielddefs", ")", ":", "return", "[", "fdef", "for", "(", "fdef", ",", "_", ",", "_", ",", "_", ")", "in", "fielddefs", "]" ]
https://github.com/ganeti/ganeti/blob/d340a9ddd12f501bef57da421b5f9b969a4ba905/lib/query.py#L207-L213
hzlzh/AlfredWorkflow.com
7055f14f6922c80ea5943839eb0caff11ae57255
Sources/Workflows/Alfred-Time-Keeper/PyAl/Request/requests/packages/oauthlib/oauth1/rfc5849/__init__.py
python
Server.dummy_request_token
(self)
Dummy request token used when an invalid token was supplied. The dummy request token should be associated with a request token secret such that get_request_token_secret(.., dummy_request_token) returns a valid secret.
Dummy request token used when an invalid token was supplied.
[ "Dummy", "request", "token", "used", "when", "an", "invalid", "token", "was", "supplied", "." ]
def dummy_request_token(self): """Dummy request token used when an invalid token was supplied. The dummy request token should be associated with a request token secret such that get_request_token_secret(.., dummy_request_token) returns a valid secret. """ raise NotImplementedError("Subclasses must implement this function.")
[ "def", "dummy_request_token", "(", "self", ")", ":", "raise", "NotImplementedError", "(", "\"Subclasses must implement this function.\"", ")" ]
https://github.com/hzlzh/AlfredWorkflow.com/blob/7055f14f6922c80ea5943839eb0caff11ae57255/Sources/Workflows/Alfred-Time-Keeper/PyAl/Request/requests/packages/oauthlib/oauth1/rfc5849/__init__.py#L425-L432
onaio/onadata
89ad16744e8f247fb748219476f6ac295869a95f
onadata/libs/utils/csv_import.py
python
submit_csv_async
(username, xform_id, file_path, overwrite=False)
Imports CSV data to an existing xform asynchrounously.
Imports CSV data to an existing xform asynchrounously.
[ "Imports", "CSV", "data", "to", "an", "existing", "xform", "asynchrounously", "." ]
def submit_csv_async(username, xform_id, file_path, overwrite=False): """Imports CSV data to an existing xform asynchrounously.""" xform = XForm.objects.get(pk=xform_id) with default_storage.open(file_path) as csv_file: return submit_csv(username, xform, csv_file, overwrite)
[ "def", "submit_csv_async", "(", "username", ",", "xform_id", ",", "file_path", ",", "overwrite", "=", "False", ")", ":", "xform", "=", "XForm", ".", "objects", ".", "get", "(", "pk", "=", "xform_id", ")", "with", "default_storage", ".", "open", "(", "fil...
https://github.com/onaio/onadata/blob/89ad16744e8f247fb748219476f6ac295869a95f/onadata/libs/utils/csv_import.py#L149-L154
PythonCharmers/python-future
80523f383fbba1c6de0551e19d0277e73e69573c
src/future/backports/socket.py
python
getfqdn
(name='')
return name
Get fully qualified domain name from name. An empty argument is interpreted as meaning the local host. First the hostname returned by gethostbyaddr() is checked, then possibly existing aliases. In case no FQDN is available, hostname from gethostname() is returned.
Get fully qualified domain name from name.
[ "Get", "fully", "qualified", "domain", "name", "from", "name", "." ]
def getfqdn(name=''): """Get fully qualified domain name from name. An empty argument is interpreted as meaning the local host. First the hostname returned by gethostbyaddr() is checked, then possibly existing aliases. In case no FQDN is available, hostname from gethostname() is returned. """ name = name.strip() if not name or name == '0.0.0.0': name = gethostname() try: hostname, aliases, ipaddrs = gethostbyaddr(name) except error: pass else: aliases.insert(0, hostname) for name in aliases: if '.' in name: break else: name = hostname return name
[ "def", "getfqdn", "(", "name", "=", "''", ")", ":", "name", "=", "name", ".", "strip", "(", ")", "if", "not", "name", "or", "name", "==", "'0.0.0.0'", ":", "name", "=", "gethostname", "(", ")", "try", ":", "hostname", ",", "aliases", ",", "ipaddrs"...
https://github.com/PythonCharmers/python-future/blob/80523f383fbba1c6de0551e19d0277e73e69573c/src/future/backports/socket.py#L387-L410
BEEmod/BEE2.4
02767f3cf476581789425ab308ca1bea978f6a74
src/app/BEE2.py
python
done_callback
(trio_main_outcome)
The app finished, quit.
The app finished, quit.
[ "The", "app", "finished", "quit", "." ]
def done_callback(trio_main_outcome): """The app finished, quit.""" from app import UI UI.quit_application()
[ "def", "done_callback", "(", "trio_main_outcome", ")", ":", "from", "app", "import", "UI", "UI", ".", "quit_application", "(", ")" ]
https://github.com/BEEmod/BEE2.4/blob/02767f3cf476581789425ab308ca1bea978f6a74/src/app/BEE2.py#L140-L143
xenith/django-base-template
670cdfeb6b6e80d3da1730f271bbd5dbc55d684e
fabfile.py
python
deploy
()
Deploy the project.
Deploy the project.
[ "Deploy", "the", "project", "." ]
def deploy(): """ Deploy the project. """ with settings(warn_only=True): webserver_stop() push_sources() install_dependencies() update_database() build_static() webserver_start()
[ "def", "deploy", "(", ")", ":", "with", "settings", "(", "warn_only", "=", "True", ")", ":", "webserver_stop", "(", ")", "push_sources", "(", ")", "install_dependencies", "(", ")", "update_database", "(", ")", "build_static", "(", ")", "webserver_start", "("...
https://github.com/xenith/django-base-template/blob/670cdfeb6b6e80d3da1730f271bbd5dbc55d684e/fabfile.py#L200-L210
glue-viz/glue
840b4c1364b0fa63bf67c914540c93dd71df41e1
glue/core/edit_subset_mode.py
python
XorMode
(edit_subset, new_state)
Edit_subset.subset state is xor-combined with new_state
Edit_subset.subset state is xor-combined with new_state
[ "Edit_subset", ".", "subset", "state", "is", "xor", "-", "combined", "with", "new_state" ]
def XorMode(edit_subset, new_state): """ Edit_subset.subset state is xor-combined with new_state """ new_state.parent = edit_subset state = new_state ^ edit_subset.subset_state edit_subset.subset_state = state
[ "def", "XorMode", "(", "edit_subset", ",", "new_state", ")", ":", "new_state", ".", "parent", "=", "edit_subset", "state", "=", "new_state", "^", "edit_subset", ".", "subset_state", "edit_subset", ".", "subset_state", "=", "state" ]
https://github.com/glue-viz/glue/blob/840b4c1364b0fa63bf67c914540c93dd71df41e1/glue/core/edit_subset_mode.py#L132-L136
openedx/edx-platform
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
common/djangoapps/student/views/dashboard.py
python
get_filtered_course_entitlements
(user, org_whitelist, org_blacklist)
return filtered_entitlements, course_entitlement_available_sessions, unfulfilled_entitlement_pseudo_sessions
Given a user, return a filtered set of their course entitlements. Arguments: user (User): the user in question. org_whitelist (list[str]): If not None, ONLY entitlements of these orgs will be returned. org_blacklist (list[str]): CourseEntitlements of these orgs will be excluded. Returns: generator[CourseEntitlement]: a sequence of entitlements to be displayed on the user's dashboard.
Given a user, return a filtered set of their course entitlements.
[ "Given", "a", "user", "return", "a", "filtered", "set", "of", "their", "course", "entitlements", "." ]
def get_filtered_course_entitlements(user, org_whitelist, org_blacklist): """ Given a user, return a filtered set of their course entitlements. Arguments: user (User): the user in question. org_whitelist (list[str]): If not None, ONLY entitlements of these orgs will be returned. org_blacklist (list[str]): CourseEntitlements of these orgs will be excluded. Returns: generator[CourseEntitlement]: a sequence of entitlements to be displayed on the user's dashboard. """ course_entitlement_available_sessions = {} unfulfilled_entitlement_pseudo_sessions = {} course_entitlements = list(CourseEntitlement.get_active_entitlements_for_user(user)) filtered_entitlements = [] pseudo_session = None course_run_key = None for course_entitlement in course_entitlements: course_entitlement.update_expired_at() available_runs = get_visible_sessions_for_entitlement(course_entitlement) if not course_entitlement.enrollment_course_run: # Unfulfilled entitlements need a mock session for metadata pseudo_session = get_pseudo_session_for_entitlement(course_entitlement) unfulfilled_entitlement_pseudo_sessions[str(course_entitlement.uuid)] = pseudo_session # Check the org of the Course and filter out entitlements that are not available. if course_entitlement.enrollment_course_run: course_run_key = course_entitlement.enrollment_course_run.course_id elif available_runs: course_run_key = CourseKey.from_string(available_runs[0]['key']) elif pseudo_session: course_run_key = CourseKey.from_string(pseudo_session['key']) if course_run_key: # If there is no course_run_key at this point we will be unable to determine if it should be shown. # Therefore it should be excluded by default. if org_whitelist and course_run_key.org not in org_whitelist: continue elif org_blacklist and course_run_key.org in org_blacklist: continue course_entitlement_available_sessions[str(course_entitlement.uuid)] = available_runs filtered_entitlements.append(course_entitlement) return filtered_entitlements, course_entitlement_available_sessions, unfulfilled_entitlement_pseudo_sessions
[ "def", "get_filtered_course_entitlements", "(", "user", ",", "org_whitelist", ",", "org_blacklist", ")", ":", "course_entitlement_available_sessions", "=", "{", "}", "unfulfilled_entitlement_pseudo_sessions", "=", "{", "}", "course_entitlements", "=", "list", "(", "Course...
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/common/djangoapps/student/views/dashboard.py#L194-L242
LinOTP/LinOTP
bb3940bbaccea99550e6c063ff824f258dd6d6d7
linotp/lib/resolver.py
python
parse_resolver_spec
(resolver_spec)
return cls_identifier, config_identifier
expects a resolver specification and returns a tuple containing the resolver class identifier and the config identifier :param resolver_spec: a resolver specification format: <resolver class identifier>.<config identifier> :return: (cls_identifier, config_identifier)
expects a resolver specification and returns a tuple containing the resolver class identifier and the config identifier
[ "expects", "a", "resolver", "specification", "and", "returns", "a", "tuple", "containing", "the", "resolver", "class", "identifier", "and", "the", "config", "identifier" ]
def parse_resolver_spec(resolver_spec): """ expects a resolver specification and returns a tuple containing the resolver class identifier and the config identifier :param resolver_spec: a resolver specification format: <resolver class identifier>.<config identifier> :return: (cls_identifier, config_identifier) """ cls_identifier, _sep, config_identifier = resolver_spec.rpartition(".") return cls_identifier, config_identifier
[ "def", "parse_resolver_spec", "(", "resolver_spec", ")", ":", "cls_identifier", ",", "_sep", ",", "config_identifier", "=", "resolver_spec", ".", "rpartition", "(", "\".\"", ")", "return", "cls_identifier", ",", "config_identifier" ]
https://github.com/LinOTP/LinOTP/blob/bb3940bbaccea99550e6c063ff824f258dd6d6d7/linotp/lib/resolver.py#L967-L983
pythonarcade/arcade
1ee3eb1900683213e8e8df93943327c2ea784564
arcade/examples/sprite_collect_coins_diff_levels.py
python
MyGame.on_mouse_motion
(self, x, y, dx, dy)
Called whenever the mouse moves.
Called whenever the mouse moves.
[ "Called", "whenever", "the", "mouse", "moves", "." ]
def on_mouse_motion(self, x, y, dx, dy): """ Called whenever the mouse moves. """ self.player_sprite.center_x = x self.player_sprite.center_y = y
[ "def", "on_mouse_motion", "(", "self", ",", "x", ",", "y", ",", "dx", ",", "dy", ")", ":", "self", ".", "player_sprite", ".", "center_x", "=", "x", "self", ".", "player_sprite", ".", "center_y", "=", "y" ]
https://github.com/pythonarcade/arcade/blob/1ee3eb1900683213e8e8df93943327c2ea784564/arcade/examples/sprite_collect_coins_diff_levels.py#L162-L167
bangq/django-wshop
683428295e2e9e1ba89ca7142a5589bd234564b5
extra_apps/material/templatetags/material_form_internal.py
python
is_initial_file
(value)
return bool(value and getattr(value, 'url', False))
Check for initial value of FileFile.
Check for initial value of FileFile.
[ "Check", "for", "initial", "value", "of", "FileFile", "." ]
def is_initial_file(value): """Check for initial value of FileFile.""" return bool(value and getattr(value, 'url', False))
[ "def", "is_initial_file", "(", "value", ")", ":", "return", "bool", "(", "value", "and", "getattr", "(", "value", ",", "'url'", ",", "False", ")", ")" ]
https://github.com/bangq/django-wshop/blob/683428295e2e9e1ba89ca7142a5589bd234564b5/extra_apps/material/templatetags/material_form_internal.py#L177-L179
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/idlelib/delegator.py
python
Delegator.resetcache
(self)
Removes added attributes while leaving original attributes.
Removes added attributes while leaving original attributes.
[ "Removes", "added", "attributes", "while", "leaving", "original", "attributes", "." ]
def resetcache(self): "Removes added attributes while leaving original attributes." # Function is really about resetting delegator dict # to original state. Cache is just a means for key in self.__cache: try: delattr(self, key) except AttributeError: pass self.__cache.clear()
[ "def", "resetcache", "(", "self", ")", ":", "# Function is really about resetting delegator dict", "# to original state. Cache is just a means", "for", "key", "in", "self", ".", "__cache", ":", "try", ":", "delattr", "(", "self", ",", "key", ")", "except", "Attribute...
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/idlelib/delegator.py#L15-L24
smart-mobile-software/gitstack
d9fee8f414f202143eb6e620529e8e5539a2af56
python/Lib/lib-tk/Tkinter.py
python
Misc.winfo_vrootx
(self)
return getint( self.tk.call('winfo', 'vrootx', self._w))
Return the x offset of the virtual root relative to the root window of the screen of this widget.
Return the x offset of the virtual root relative to the root window of the screen of this widget.
[ "Return", "the", "x", "offset", "of", "the", "virtual", "root", "relative", "to", "the", "root", "window", "of", "the", "screen", "of", "this", "widget", "." ]
def winfo_vrootx(self): """Return the x offset of the virtual root relative to the root window of the screen of this widget.""" return getint( self.tk.call('winfo', 'vrootx', self._w))
[ "def", "winfo_vrootx", "(", "self", ")", ":", "return", "getint", "(", "self", ".", "tk", ".", "call", "(", "'winfo'", ",", "'vrootx'", ",", "self", ".", "_w", ")", ")" ]
https://github.com/smart-mobile-software/gitstack/blob/d9fee8f414f202143eb6e620529e8e5539a2af56/python/Lib/lib-tk/Tkinter.py#L885-L889
savio-code/fern-wifi-cracker
0da03aba988c66dfa131a45824568abb84b7704a
Fern-Wifi-Cracker/core/tools.py
python
settings_dialog.change_settings
(self)
[]
def change_settings(self): channel = str(self.channel_combobox.currentText()) term_settings = self.xterm_checkbox.isChecked() if channel == 'All Channels': variables.static_channel = str() else: variables.static_channel = channel if term_settings: self.settings.create_settings("xterm","xterm -geometry 100 -e") else: self.settings.create_settings("xterm",str()) variables.xterm_setting = self.settings.read_last_settings("xterm")
[ "def", "change_settings", "(", "self", ")", ":", "channel", "=", "str", "(", "self", ".", "channel_combobox", ".", "currentText", "(", ")", ")", "term_settings", "=", "self", ".", "xterm_checkbox", ".", "isChecked", "(", ")", "if", "channel", "==", "'All C...
https://github.com/savio-code/fern-wifi-cracker/blob/0da03aba988c66dfa131a45824568abb84b7704a/Fern-Wifi-Cracker/core/tools.py#L263-L276
jieter/django-tables2
ce392ee2ee341d7180345a6113919cf9a3925f16
django_tables2/views.py
python
SingleTableMixin.get_context_data
(self, **kwargs)
return context
Overridden version of `.TemplateResponseMixin` to inject the table into the template's context.
Overridden version of `.TemplateResponseMixin` to inject the table into the template's context.
[ "Overridden", "version", "of", ".", "TemplateResponseMixin", "to", "inject", "the", "table", "into", "the", "template", "s", "context", "." ]
def get_context_data(self, **kwargs): """ Overridden version of `.TemplateResponseMixin` to inject the table into the template's context. """ context = super().get_context_data(**kwargs) table = self.get_table(**self.get_table_kwargs()) context[self.get_context_table_name(table)] = table return context
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "context", "=", "super", "(", ")", ".", "get_context_data", "(", "*", "*", "kwargs", ")", "table", "=", "self", ".", "get_table", "(", "*", "*", "self", ".", "get_table_kwargs",...
https://github.com/jieter/django-tables2/blob/ce392ee2ee341d7180345a6113919cf9a3925f16/django_tables2/views.py#L140-L148
guildai/guildai
1665985a3d4d788efc1a3180ca51cc417f71ca78
guild/external/pip/_vendor/pyparsing.py
python
countedArray
( expr, intExpr=None )
return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
Helper to define a counted list of expressions. This helper defines a pattern of the form:: integer expr expr expr... where the leading integer tells how many expr expressions follow. The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value. Example:: countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] # in this parser, the leading integer value is given in binary, # '10' indicating that 2 values are in the array binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
Helper to define a counted list of expressions. This helper defines a pattern of the form:: integer expr expr expr... where the leading integer tells how many expr expressions follow. The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
[ "Helper", "to", "define", "a", "counted", "list", "of", "expressions", ".", "This", "helper", "defines", "a", "pattern", "of", "the", "form", "::", "integer", "expr", "expr", "expr", "...", "where", "the", "leading", "integer", "tells", "how", "many", "exp...
def countedArray( expr, intExpr=None ): """ Helper to define a counted list of expressions. This helper defines a pattern of the form:: integer expr expr expr... where the leading integer tells how many expr expressions follow. The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value. Example:: countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] # in this parser, the leading integer value is given in binary, # '10' indicating that 2 values are in the array binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] """ arrayExpr = Forward() def countFieldParseAction(s,l,t): n = t[0] arrayExpr << (n and Group(And([expr]*n)) or Group(empty)) return [] if intExpr is None: intExpr = Word(nums).setParseAction(lambda t:int(t[0])) else: intExpr = intExpr.copy() intExpr.setName("arrayLen") intExpr.addParseAction(countFieldParseAction, callDuringTry=True) return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
[ "def", "countedArray", "(", "expr", ",", "intExpr", "=", "None", ")", ":", "arrayExpr", "=", "Forward", "(", ")", "def", "countFieldParseAction", "(", "s", ",", "l", ",", "t", ")", ":", "n", "=", "t", "[", "0", "]", "arrayExpr", "<<", "(", "n", "...
https://github.com/guildai/guildai/blob/1665985a3d4d788efc1a3180ca51cc417f71ca78/guild/external/pip/_vendor/pyparsing.py#L4447-L4476
dmlc/dgl
8d14a739bc9e446d6c92ef83eafe5782398118de
python/dgl/nn/pytorch/conv/gatedgraphconv.py
python
GatedGraphConv.reset_parameters
(self)
r""" Description ----------- Reinitialize learnable parameters. Note ---- The model parameters are initialized using Glorot uniform initialization and the bias is initialized to be zero.
r"""
[ "r" ]
def reset_parameters(self): r""" Description ----------- Reinitialize learnable parameters. Note ---- The model parameters are initialized using Glorot uniform initialization and the bias is initialized to be zero. """ gain = init.calculate_gain('relu') self.gru.reset_parameters() for linear in self.linears: init.xavier_normal_(linear.weight, gain=gain) init.zeros_(linear.bias)
[ "def", "reset_parameters", "(", "self", ")", ":", "gain", "=", "init", ".", "calculate_gain", "(", "'relu'", ")", "self", ".", "gru", ".", "reset_parameters", "(", ")", "for", "linear", "in", "self", ".", "linears", ":", "init", ".", "xavier_normal_", "(...
https://github.com/dmlc/dgl/blob/8d14a739bc9e446d6c92ef83eafe5782398118de/python/dgl/nn/pytorch/conv/gatedgraphconv.py#L82-L98
inspurer/WorkAttendanceSystem
1221e2d67bdf5bb15fe99517cc3ded58ccb066df
V2.0/venv/Lib/site-packages/pip-9.0.1-py3.5.egg/pip/_vendor/distlib/_backport/tarfile.py
python
TarInfo._proc_member
(self, tarfile)
Choose the right processing method depending on the type and call it.
Choose the right processing method depending on the type and call it.
[ "Choose", "the", "right", "processing", "method", "depending", "on", "the", "type", "and", "call", "it", "." ]
def _proc_member(self, tarfile): """Choose the right processing method depending on the type and call it. """ if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): return self._proc_gnulong(tarfile) elif self.type == GNUTYPE_SPARSE: return self._proc_sparse(tarfile) elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): return self._proc_pax(tarfile) else: return self._proc_builtin(tarfile)
[ "def", "_proc_member", "(", "self", ",", "tarfile", ")", ":", "if", "self", ".", "type", "in", "(", "GNUTYPE_LONGNAME", ",", "GNUTYPE_LONGLINK", ")", ":", "return", "self", ".", "_proc_gnulong", "(", "tarfile", ")", "elif", "self", ".", "type", "==", "GN...
https://github.com/inspurer/WorkAttendanceSystem/blob/1221e2d67bdf5bb15fe99517cc3ded58ccb066df/V2.0/venv/Lib/site-packages/pip-9.0.1-py3.5.egg/pip/_vendor/distlib/_backport/tarfile.py#L1303-L1314
out0fmemory/GoAgent-Always-Available
c4254984fea633ce3d1893fe5901debd9f22c2a9
server/lib/google/appengine/ext/ndb/model.py
python
non_transactional
(func, args, kwds, allow_existing=True)
A decorator that ensures a function is run outside a transaction. If there is an existing transaction (and allow_existing=True), the existing transaction is paused while the function is executed. Args: allow_existing: If false, throw an exception if called from within a transaction. If true, temporarily re-establish the previous non-transactional context. Defaults to True. This supports two forms, similar to transactional(). Returns: A wrapper for the decorated function that ensures it runs outside a transaction.
A decorator that ensures a function is run outside a transaction.
[ "A", "decorator", "that", "ensures", "a", "function", "is", "run", "outside", "a", "transaction", "." ]
def non_transactional(func, args, kwds, allow_existing=True): """A decorator that ensures a function is run outside a transaction. If there is an existing transaction (and allow_existing=True), the existing transaction is paused while the function is executed. Args: allow_existing: If false, throw an exception if called from within a transaction. If true, temporarily re-establish the previous non-transactional context. Defaults to True. This supports two forms, similar to transactional(). Returns: A wrapper for the decorated function that ensures it runs outside a transaction. """ from . import tasklets ctx = tasklets.get_context() if not ctx.in_transaction(): return func(*args, **kwds) if not allow_existing: raise datastore_errors.BadRequestError( '%s cannot be called within a transaction.' % func.__name__) save_ctx = ctx while ctx.in_transaction(): ctx = ctx._parent_context if ctx is None: raise datastore_errors.BadRequestError( 'Context without non-transactional ancestor') save_ds_conn = datastore._GetConnection() try: if hasattr(save_ctx, '_old_ds_conn'): datastore._SetConnection(save_ctx._old_ds_conn) tasklets.set_context(ctx) return func(*args, **kwds) finally: tasklets.set_context(save_ctx) datastore._SetConnection(save_ds_conn)
[ "def", "non_transactional", "(", "func", ",", "args", ",", "kwds", ",", "allow_existing", "=", "True", ")", ":", "from", ".", "import", "tasklets", "ctx", "=", "tasklets", ".", "get_context", "(", ")", "if", "not", "ctx", ".", "in_transaction", "(", ")",...
https://github.com/out0fmemory/GoAgent-Always-Available/blob/c4254984fea633ce3d1893fe5901debd9f22c2a9/server/lib/google/appengine/ext/ndb/model.py#L3858-L3896
SteveDoyle2/pyNastran
eda651ac2d4883d95a34951f8a002ff94f642a1a
pyNastran/op2/tables/oee_energy/oee_objects.py
python
ComplexStrainEnergyArray.build
(self)
sizes the vectorized attributes of the ComplexStrainEnergyArray
sizes the vectorized attributes of the ComplexStrainEnergyArray
[ "sizes", "the", "vectorized", "attributes", "of", "the", "ComplexStrainEnergyArray" ]
def build(self): """sizes the vectorized attributes of the ComplexStrainEnergyArray""" del self.dt_temp #print(self._ntotals) assert self.ntimes > 0, 'ntimes=%s' % self.ntimes assert self.nelements > 0, 'nelements=%s' % self.nelements assert self.ntotal > 0, 'ntotal=%s' % self.ntotal self.ntotal = max(self._ntotals) #if max(self._ntotals) != min(self._ntotals): #raise RuntimeError('variable length in RealStrainEnergyArray') #self.names = [] #self.nelements = self.ntotal // self.ntimes self.nelements = self.ntotal self.itime = 0 self.ielement = 0 self.itotal = 0 #self.itotal2 = 0 #self.ntimes = 0 #self.nelements = 0 #print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal)) dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt) self.build_data(dtype)
[ "def", "build", "(", "self", ")", ":", "del", "self", ".", "dt_temp", "#print(self._ntotals)", "assert", "self", ".", "ntimes", ">", "0", ",", "'ntimes=%s'", "%", "self", ".", "ntimes", "assert", "self", ".", "nelements", ">", "0", ",", "'nelements=%s'", ...
https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/op2/tables/oee_energy/oee_objects.py#L786-L811
TesterlifeRaymond/doraemon
d5cb6e34bd5f2aa97273ce0c0c9303e32beaa333
venv/lib/python3.6/site-packages/pip/_vendor/distlib/wheel.py
python
Wheel.is_compatible
(self)
return is_compatible(self)
Determine if a wheel is compatible with the running system.
Determine if a wheel is compatible with the running system.
[ "Determine", "if", "a", "wheel", "is", "compatible", "with", "the", "running", "system", "." ]
def is_compatible(self): """ Determine if a wheel is compatible with the running system. """ return is_compatible(self)
[ "def", "is_compatible", "(", "self", ")", ":", "return", "is_compatible", "(", "self", ")" ]
https://github.com/TesterlifeRaymond/doraemon/blob/d5cb6e34bd5f2aa97273ce0c0c9303e32beaa333/venv/lib/python3.6/site-packages/pip/_vendor/distlib/wheel.py#L701-L705
akfamily/akshare
590e50eece9ec067da3538c7059fd660b71f1339
akshare/stock_feature/stock_gdfx_em.py
python
stock_gdfx_free_holding_analyse_em
(date: str = "20210930")
return big_df
东方财富网-数据中心-股东分析-股东持股分析-十大流通股东 https://data.eastmoney.com/gdfx/HoldingAnalyse.html :param date: 报告期 :type date: str :return: 十大流通股东 :rtype: pandas.DataFrame
东方财富网-数据中心-股东分析-股东持股分析-十大流通股东 https://data.eastmoney.com/gdfx/HoldingAnalyse.html :param date: 报告期 :type date: str :return: 十大流通股东 :rtype: pandas.DataFrame
[ "东方财富网", "-", "数据中心", "-", "股东分析", "-", "股东持股分析", "-", "十大流通股东", "https", ":", "//", "data", ".", "eastmoney", ".", "com", "/", "gdfx", "/", "HoldingAnalyse", ".", "html", ":", "param", "date", ":", "报告期", ":", "type", "date", ":", "str", ":", "ret...
def stock_gdfx_free_holding_analyse_em(date: str = "20210930") -> pd.DataFrame: """ 东方财富网-数据中心-股东分析-股东持股分析-十大流通股东 https://data.eastmoney.com/gdfx/HoldingAnalyse.html :param date: 报告期 :type date: str :return: 十大流通股东 :rtype: pandas.DataFrame """ url = "https://datacenter-web.eastmoney.com/api/data/v1/get" params = { "sortColumns": "UPDATE_DATE,SECURITY_CODE,HOLDER_RANK", "sortTypes": "-1,1,1", "pageSize": "500", "pageNumber": "1", "reportName": "RPT_CUSTOM_F10_EH_FREEHOLDERS_JOIN_FREEHOLDER_SHAREANALYSIS", "columns": "ALL;D10_ADJCHRATE,D30_ADJCHRATE,D60_ADJCHRATE", "source": "WEB", "client": "WEB", "filter": f"(END_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')", } r = requests.get(url, params=params) data_json = r.json() total_page = data_json["result"]["pages"] big_df = pd.DataFrame() for page in tqdm(range(1, total_page + 1)): params.update({"pageNumber": page}) r = requests.get(url, params=params) data_json = r.json() temp_df = pd.DataFrame(data_json["result"]["data"]) big_df = big_df.append(temp_df, ignore_index=True) big_df.reset_index(inplace=True) big_df["index"] = big_df.index + 1 big_df.columns = [ "序号", "-", "股票代码", "-", "-", "股东名称", "期末持股-数量", "-", "-", "-", "-", "-", "股票简称", "-", "-", "-", "期末持股-流通市值", "-", "-", "期末持股-数量变化比例", "股东类型", "-", "公告日", "报告期", "-", "-", "-", "-", "-", "-", "期末持股-持股变动", "-", "-", "-", "-", "期末持股-数量变化", "公告日后涨跌幅-10个交易日", "公告日后涨跌幅-30个交易日", "公告日后涨跌幅-60个交易日", ] big_df = big_df[ [ "序号", "股东名称", "股东类型", "股票代码", "股票简称", "报告期", "期末持股-数量", "期末持股-数量变化", "期末持股-数量变化比例", "期末持股-持股变动", "期末持股-流通市值", "公告日", "公告日后涨跌幅-10个交易日", "公告日后涨跌幅-30个交易日", "公告日后涨跌幅-60个交易日", ] ] big_df["公告日"] = pd.to_datetime(big_df["公告日"]).dt.date big_df["期末持股-数量"] = pd.to_numeric(big_df["期末持股-数量"]) big_df["期末持股-数量变化"] = pd.to_numeric(big_df["期末持股-数量变化"]) big_df["期末持股-数量变化比例"] = pd.to_numeric(big_df["期末持股-数量变化比例"]) big_df["期末持股-流通市值"] = pd.to_numeric(big_df["期末持股-流通市值"]) big_df["公告日后涨跌幅-10个交易日"] = pd.to_numeric(big_df["公告日后涨跌幅-10个交易日"]) big_df["公告日后涨跌幅-30个交易日"] = pd.to_numeric(big_df["公告日后涨跌幅-30个交易日"]) big_df["公告日后涨跌幅-60个交易日"] = pd.to_numeric(big_df["公告日后涨跌幅-60个交易日"]) return big_df
[ "def", "stock_gdfx_free_holding_analyse_em", "(", "date", ":", "str", "=", "\"20210930\"", ")", "->", "pd", ".", "DataFrame", ":", "url", "=", "\"https://datacenter-web.eastmoney.com/api/data/v1/get\"", "params", "=", "{", "\"sortColumns\"", ":", "\"UPDATE_DATE,SECURITY_C...
https://github.com/akfamily/akshare/blob/590e50eece9ec067da3538c7059fd660b71f1339/akshare/stock_feature/stock_gdfx_em.py#L635-L736
facebookresearch/pytorch3d
fddd6a700fa9685c1ce2d4b266c111d7db424ecc
pytorch3d/structures/meshes.py
python
Meshes.split
(self, split_sizes: list)
return meshlist
Splits Meshes object of size N into a list of Meshes objects of size len(split_sizes), where the i-th Meshes object is of size split_sizes[i]. Similar to torch.split(). Args: split_sizes: List of integer sizes of Meshes objects to be returned. Returns: list[Meshes].
Splits Meshes object of size N into a list of Meshes objects of size len(split_sizes), where the i-th Meshes object is of size split_sizes[i]. Similar to torch.split().
[ "Splits", "Meshes", "object", "of", "size", "N", "into", "a", "list", "of", "Meshes", "objects", "of", "size", "len", "(", "split_sizes", ")", "where", "the", "i", "-", "th", "Meshes", "object", "is", "of", "size", "split_sizes", "[", "i", "]", ".", ...
def split(self, split_sizes: list): """ Splits Meshes object of size N into a list of Meshes objects of size len(split_sizes), where the i-th Meshes object is of size split_sizes[i]. Similar to torch.split(). Args: split_sizes: List of integer sizes of Meshes objects to be returned. Returns: list[Meshes]. """ if not all(isinstance(x, int) for x in split_sizes): raise ValueError("Value of split_sizes must be a list of integers.") meshlist = [] curi = 0 for i in split_sizes: meshlist.append(self[curi : curi + i]) curi += i return meshlist
[ "def", "split", "(", "self", ",", "split_sizes", ":", "list", ")", ":", "if", "not", "all", "(", "isinstance", "(", "x", ",", "int", ")", "for", "x", "in", "split_sizes", ")", ":", "raise", "ValueError", "(", "\"Value of split_sizes must be a list of integer...
https://github.com/facebookresearch/pytorch3d/blob/fddd6a700fa9685c1ce2d4b266c111d7db424ecc/pytorch3d/structures/meshes.py#L1269-L1288
StanfordHCI/termite
f795be291a1598cb2ae1df1a598c989f369e9ce8
pipeline/tokenize.py
python
Tokenize.__init__
( self, logging_level )
[]
def __init__( self, logging_level ): self.logger = logging.getLogger( 'Tokenize' ) self.logger.setLevel( logging_level ) handler = logging.StreamHandler( sys.stderr ) handler.setLevel( logging_level ) self.logger.addHandler( handler )
[ "def", "__init__", "(", "self", ",", "logging_level", ")", ":", "self", ".", "logger", "=", "logging", ".", "getLogger", "(", "'Tokenize'", ")", "self", ".", "logger", ".", "setLevel", "(", "logging_level", ")", "handler", "=", "logging", ".", "StreamHandl...
https://github.com/StanfordHCI/termite/blob/f795be291a1598cb2ae1df1a598c989f369e9ce8/pipeline/tokenize.py#L29-L34
krintoxi/NoobSec-Toolkit
38738541cbc03cedb9a3b3ed13b629f781ad64f6
NoobSecToolkit /scripts/sshbackdoors/backdoors/shell/pupy/pupy/packages/windows/x86/psutil/__init__.py
python
Process.nice
(self, value=None)
Get or set process niceness (priority).
Get or set process niceness (priority).
[ "Get", "or", "set", "process", "niceness", "(", "priority", ")", "." ]
def nice(self, value=None): """Get or set process niceness (priority).""" if value is None: return self._proc.nice_get() else: if not self.is_running(): raise NoSuchProcess(self.pid, self._name) self._proc.nice_set(value)
[ "def", "nice", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "None", ":", "return", "self", ".", "_proc", ".", "nice_get", "(", ")", "else", ":", "if", "not", "self", ".", "is_running", "(", ")", ":", "raise", "NoSuchProces...
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit /scripts/sshbackdoors/backdoors/shell/pupy/pupy/packages/windows/x86/psutil/__init__.py#L624-L631
tacnetsol/ghidra_scripts
5c4d24bc7166f672015003572daeeb04d2e1f30e
utils/leafblower.py
python
LeafFunctionFinder.find_leaves
(self)
Find leaf functions. Leaf functions are functions that have loops, make no external calls, require 1-3 arguments, and have a reference count greater than 25.
Find leaf functions. Leaf functions are functions that have loops, make no external calls, require 1-3 arguments, and have a reference count greater than 25.
[ "Find", "leaf", "functions", ".", "Leaf", "functions", "are", "functions", "that", "have", "loops", "make", "no", "external", "calls", "require", "1", "-", "3", "arguments", "and", "have", "a", "reference", "count", "greater", "than", "25", "." ]
def find_leaves(self): """ Find leaf functions. Leaf functions are functions that have loops, make no external calls, require 1-3 arguments, and have a reference count greater than 25. """ function_manager = self._program.getFunctionManager() for function in function_manager.getFunctions(True): if not self._function_makes_call(function): loops = self._function_has_loops(function) argc = self._get_argument_count(function) if LeafFunction.is_candidate(function, loops, argc): self.leaf_functions.append(LeafFunction(function, loops, argc)) self.leaf_functions.sort(key=lambda x: x.xref_count, reverse=True)
[ "def", "find_leaves", "(", "self", ")", ":", "function_manager", "=", "self", ".", "_program", ".", "getFunctionManager", "(", ")", "for", "function", "in", "function_manager", ".", "getFunctions", "(", "True", ")", ":", "if", "not", "self", ".", "_function_...
https://github.com/tacnetsol/ghidra_scripts/blob/5c4d24bc7166f672015003572daeeb04d2e1f30e/utils/leafblower.py#L165-L183
securityclippy/elasticintel
aa08d3e9f5ab1c000128e95161139ce97ff0e334
ingest_feed_lambda/pandas/core/reshape/tile.py
python
cut
(x, bins, right=True, labels=None, retbins=False, precision=3, include_lowest=False)
return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name)
Return indices of half-open bins to which each value of `x` belongs. Parameters ---------- x : array-like Input array to be binned. It has to be 1-dimensional. bins : int, sequence of scalars, or IntervalIndex If `bins` is an int, it defines the number of equal-width bins in the range of `x`. However, in this case, the range of `x` is extended by .1% on each side to include the min or max values of `x`. If `bins` is a sequence it defines the bin edges allowing for non-uniform bin width. No extension of the range of `x` is done in this case. right : bool, optional Indicates whether the bins include the rightmost edge or not. If right == True (the default), then the bins [1,2,3,4] indicate (1,2], (2,3], (3,4]. labels : array or boolean, default None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, return only integer indicators of the bins. retbins : bool, optional Whether to return the bins or not. Can be useful if bins is given as a scalar. precision : int, optional The precision at which to store and display the bins labels include_lowest : bool, optional Whether the first interval should be left-inclusive or not. Returns ------- out : Categorical or Series or array of integers if labels is False The return type (Categorical or Series) depends on the input: a Series of type category if input is a Series else Categorical. Bins are represented as categories when categorical data is returned. bins : ndarray of floats Returned only if `retbins` is True. Notes ----- The `cut` function can be useful for going from a continuous variable to a categorical variable. For example, `cut` could convert ages to groups of age ranges. Any NA values will be NA in the result. Out of bounds values will be NA in the resulting Categorical object Examples -------- >>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3, retbins=True) ... # doctest: +ELLIPSIS ([(0.19, 3.367], (0.19, 3.367], (0.19, 3.367], (3.367, 6.533], ... Categories (3, interval[float64]): [(0.19, 3.367] < (3.367, 6.533] ... >>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), ... 3, labels=["good", "medium", "bad"]) ... # doctest: +SKIP [good, good, good, medium, bad, good] Categories (3, object): [good < medium < bad] >>> pd.cut(np.ones(5), 4, labels=False) array([1, 1, 1, 1, 1])
Return indices of half-open bins to which each value of `x` belongs.
[ "Return", "indices", "of", "half", "-", "open", "bins", "to", "which", "each", "value", "of", "x", "belongs", "." ]
def cut(x, bins, right=True, labels=None, retbins=False, precision=3, include_lowest=False): """ Return indices of half-open bins to which each value of `x` belongs. Parameters ---------- x : array-like Input array to be binned. It has to be 1-dimensional. bins : int, sequence of scalars, or IntervalIndex If `bins` is an int, it defines the number of equal-width bins in the range of `x`. However, in this case, the range of `x` is extended by .1% on each side to include the min or max values of `x`. If `bins` is a sequence it defines the bin edges allowing for non-uniform bin width. No extension of the range of `x` is done in this case. right : bool, optional Indicates whether the bins include the rightmost edge or not. If right == True (the default), then the bins [1,2,3,4] indicate (1,2], (2,3], (3,4]. labels : array or boolean, default None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, return only integer indicators of the bins. retbins : bool, optional Whether to return the bins or not. Can be useful if bins is given as a scalar. precision : int, optional The precision at which to store and display the bins labels include_lowest : bool, optional Whether the first interval should be left-inclusive or not. Returns ------- out : Categorical or Series or array of integers if labels is False The return type (Categorical or Series) depends on the input: a Series of type category if input is a Series else Categorical. Bins are represented as categories when categorical data is returned. bins : ndarray of floats Returned only if `retbins` is True. Notes ----- The `cut` function can be useful for going from a continuous variable to a categorical variable. For example, `cut` could convert ages to groups of age ranges. Any NA values will be NA in the result. Out of bounds values will be NA in the resulting Categorical object Examples -------- >>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3, retbins=True) ... # doctest: +ELLIPSIS ([(0.19, 3.367], (0.19, 3.367], (0.19, 3.367], (3.367, 6.533], ... Categories (3, interval[float64]): [(0.19, 3.367] < (3.367, 6.533] ... >>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), ... 3, labels=["good", "medium", "bad"]) ... # doctest: +SKIP [good, good, good, medium, bad, good] Categories (3, object): [good < medium < bad] >>> pd.cut(np.ones(5), 4, labels=False) array([1, 1, 1, 1, 1]) """ # NOTE: this binning code is changed a bit from histogram for var(x) == 0 # for handling the cut for datetime and timedelta objects x_is_series, series_index, name, x = _preprocess_for_cut(x) x, dtype = _coerce_to_type(x) if not np.iterable(bins): if is_scalar(bins) and bins < 1: raise ValueError("`bins` should be a positive integer.") try: # for array-like sz = x.size except AttributeError: x = np.asarray(x) sz = x.size if sz == 0: raise ValueError('Cannot cut empty array') rng = (nanops.nanmin(x), nanops.nanmax(x)) mn, mx = [mi + 0.0 for mi in rng] if mn == mx: # adjust end points before binning mn -= .001 * abs(mn) if mn != 0 else .001 mx += .001 * abs(mx) if mx != 0 else .001 bins = np.linspace(mn, mx, bins + 1, endpoint=True) else: # adjust end points after binning bins = np.linspace(mn, mx, bins + 1, endpoint=True) adj = (mx - mn) * 0.001 # 0.1% of the range if right: bins[0] -= adj else: bins[-1] += adj elif isinstance(bins, IntervalIndex): pass else: bins = np.asarray(bins) bins = _convert_bin_to_numeric_type(bins, dtype) if (np.diff(bins) < 0).any(): raise ValueError('bins must increase monotonically.') fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, dtype=dtype) return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name)
[ "def", "cut", "(", "x", ",", "bins", ",", "right", "=", "True", ",", "labels", "=", "None", ",", "retbins", "=", "False", ",", "precision", "=", "3", ",", "include_lowest", "=", "False", ")", ":", "# NOTE: this binning code is changed a bit from histogram for ...
https://github.com/securityclippy/elasticintel/blob/aa08d3e9f5ab1c000128e95161139ce97ff0e334/ingest_feed_lambda/pandas/core/reshape/tile.py#L24-L139
jython/frozen-mirror
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
lib-python/2.7/mailbox.py
python
_create_temporary
(path)
return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()), socket.gethostname(), os.getpid()))
Create a temp file based on path and open for reading and writing.
Create a temp file based on path and open for reading and writing.
[ "Create", "a", "temp", "file", "based", "on", "path", "and", "open", "for", "reading", "and", "writing", "." ]
def _create_temporary(path): """Create a temp file based on path and open for reading and writing.""" return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()), socket.gethostname(), os.getpid()))
[ "def", "_create_temporary", "(", "path", ")", ":", "return", "_create_carefully", "(", "'%s.%s.%s.%s'", "%", "(", "path", ",", "int", "(", "time", ".", "time", "(", ")", ")", ",", "socket", ".", "gethostname", "(", ")", ",", "os", ".", "getpid", "(", ...
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/mailbox.py#L2021-L2025
tensorlayer/tensorlayer
cb4eb896dd063e650ef22533ed6fa6056a71cad5
tensorlayer/prepro.py
python
transform_matrix_offset_center
(matrix, y, x)
return transform_matrix
Convert the matrix from Cartesian coordinates (the origin in the middle of image) to Image coordinates (the origin on the top-left of image). Parameters ---------- matrix : numpy.array Transform matrix. x and y : 2 int Size of image. Returns ------- numpy.array The transform matrix. Examples -------- - See ``tl.prepro.rotation``, ``tl.prepro.shear``, ``tl.prepro.zoom``.
Convert the matrix from Cartesian coordinates (the origin in the middle of image) to Image coordinates (the origin on the top-left of image).
[ "Convert", "the", "matrix", "from", "Cartesian", "coordinates", "(", "the", "origin", "in", "the", "middle", "of", "image", ")", "to", "Image", "coordinates", "(", "the", "origin", "on", "the", "top", "-", "left", "of", "image", ")", "." ]
def transform_matrix_offset_center(matrix, y, x): """Convert the matrix from Cartesian coordinates (the origin in the middle of image) to Image coordinates (the origin on the top-left of image). Parameters ---------- matrix : numpy.array Transform matrix. x and y : 2 int Size of image. Returns ------- numpy.array The transform matrix. Examples -------- - See ``tl.prepro.rotation``, ``tl.prepro.shear``, ``tl.prepro.zoom``. """ o_x = (x - 1) / 2.0 o_y = (y - 1) / 2.0 offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]]) reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]]) transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix) return transform_matrix
[ "def", "transform_matrix_offset_center", "(", "matrix", ",", "y", ",", "x", ")", ":", "o_x", "=", "(", "x", "-", "1", ")", "/", "2.0", "o_y", "=", "(", "y", "-", "1", ")", "/", "2.0", "offset_matrix", "=", "np", ".", "array", "(", "[", "[", "1"...
https://github.com/tensorlayer/tensorlayer/blob/cb4eb896dd063e650ef22533ed6fa6056a71cad5/tensorlayer/prepro.py#L461-L485
NVIDIA/NeMo
5b0c0b4dec12d87d3cd960846de4105309ce938e
nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py
python
MegatronGPTModel.configure_gradient_clipping
(self, *args, **kwargs)
PTL hook to configure gradients. We use gradient clipping implementation from megatron-lm.
PTL hook to configure gradients. We use gradient clipping implementation from megatron-lm.
[ "PTL", "hook", "to", "configure", "gradients", ".", "We", "use", "gradient", "clipping", "implementation", "from", "megatron", "-", "lm", "." ]
def configure_gradient_clipping(self, *args, **kwargs): """PTL hook to configure gradients. We use gradient clipping implementation from megatron-lm. """ clip_val = self.trainer.gradient_clip_val if clip_val is None: return clip_val = float(clip_val) if clip_val <= 0: return parameters = [param for param in self.model.parameters() if param.requires_grad] clip_grad_norm_fp32(parameters=parameters, max_norm=clip_val)
[ "def", "configure_gradient_clipping", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "clip_val", "=", "self", ".", "trainer", ".", "gradient_clip_val", "if", "clip_val", "is", "None", ":", "return", "clip_val", "=", "float", "(", "clip_v...
https://github.com/NVIDIA/NeMo/blob/5b0c0b4dec12d87d3cd960846de4105309ce938e/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py#L432-L445
HazyResearch/fonduer
c9fd6b91998cd708ab95aeee3dfaf47b9e549ffd
src/fonduer/learning/utils.py
python
mention_to_tokens
( mention: Mention, token_type: str = "words", lowercase: bool = False )
return [w.lower() if lowercase else w for w in tokens]
Extract tokens from the mention. :param mention: mention object. :param token_type: token type that wants to extract (e.g. words, lemmas, poses). :param lowercase: use lowercase or not. :return: The token list.
Extract tokens from the mention.
[ "Extract", "tokens", "from", "the", "mention", "." ]
def mention_to_tokens( mention: Mention, token_type: str = "words", lowercase: bool = False ) -> List[str]: """Extract tokens from the mention. :param mention: mention object. :param token_type: token type that wants to extract (e.g. words, lemmas, poses). :param lowercase: use lowercase or not. :return: The token list. """ tokens = getattr(mention.context.sentence, token_type) return [w.lower() if lowercase else w for w in tokens]
[ "def", "mention_to_tokens", "(", "mention", ":", "Mention", ",", "token_type", ":", "str", "=", "\"words\"", ",", "lowercase", ":", "bool", "=", "False", ")", "->", "List", "[", "str", "]", ":", "tokens", "=", "getattr", "(", "mention", ".", "context", ...
https://github.com/HazyResearch/fonduer/blob/c9fd6b91998cd708ab95aeee3dfaf47b9e549ffd/src/fonduer/learning/utils.py#L96-L107
USEPA/WNTR
2f92bab5736da6ef3591fc4b0229ec1ac6cd6fcc
wntr/epanet/util.py
python
FlowUnits.__int__
(self)
return int(value[0])
Convert to an EPANET Toolkit enum number.
Convert to an EPANET Toolkit enum number.
[ "Convert", "to", "an", "EPANET", "Toolkit", "enum", "number", "." ]
def __int__(self): """Convert to an EPANET Toolkit enum number.""" value = super().value return int(value[0])
[ "def", "__int__", "(", "self", ")", ":", "value", "=", "super", "(", ")", ".", "value", "return", "int", "(", "value", "[", "0", "]", ")" ]
https://github.com/USEPA/WNTR/blob/2f92bab5736da6ef3591fc4b0229ec1ac6cd6fcc/wntr/epanet/util.py#L151-L154
cronyo/cronyo
cd5abab0871b68bf31b18aac934303928130a441
cronyo/vendor/requests/api.py
python
patch
(url, data=None, **kwargs)
return request('patch', url, data=data, **kwargs)
r"""Sends a PATCH request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response
r"""Sends a PATCH request.
[ "r", "Sends", "a", "PATCH", "request", "." ]
def patch(url, data=None, **kwargs): r"""Sends a PATCH request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('patch', url, data=data, **kwargs)
[ "def", "patch", "(", "url", ",", "data", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "request", "(", "'patch'", ",", "url", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
https://github.com/cronyo/cronyo/blob/cd5abab0871b68bf31b18aac934303928130a441/cronyo/vendor/requests/api.py#L134-L146
facebookresearch/Large-Scale-VRD
7ababfe1023941c3653d7aebe9f835a47f5e8277
lib/utils/keypoints.py
python
heatmaps_to_keypoints
(maps, rois)
return xy_preds
Extract predicted keypoint locations from heatmaps. Output has shape (#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob) for each keypoint.
Extract predicted keypoint locations from heatmaps. Output has shape (#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob) for each keypoint.
[ "Extract", "predicted", "keypoint", "locations", "from", "heatmaps", ".", "Output", "has", "shape", "(", "#rois", "4", "#keypoints", ")", "with", "the", "4", "rows", "corresponding", "to", "(", "x", "y", "logit", "prob", ")", "for", "each", "keypoint", "."...
def heatmaps_to_keypoints(maps, rois): """Extract predicted keypoint locations from heatmaps. Output has shape (#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob) for each keypoint. """ # This function converts a discrete image coordinate in a HEATMAP_SIZE x # HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain # consistency with keypoints_to_heatmap_labels by using the conversion from # Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a # continuous coordinate. offset_x = rois[:, 0] offset_y = rois[:, 1] widths = rois[:, 2] - rois[:, 0] heights = rois[:, 3] - rois[:, 1] widths = np.maximum(widths, 1) heights = np.maximum(heights, 1) widths_ceil = np.ceil(widths) heights_ceil = np.ceil(heights) # NCHW to NHWC for use with OpenCV maps = np.transpose(maps, [0, 2, 3, 1]) min_size = cfg.KRCNN.INFERENCE_MIN_SIZE xy_preds = np.zeros( (len(rois), 4, cfg.KRCNN.NUM_KEYPOINTS), dtype=np.float32) for i in range(len(rois)): if min_size > 0: roi_map_width = int(np.maximum(widths_ceil[i], min_size)) roi_map_height = int(np.maximum(heights_ceil[i], min_size)) else: roi_map_width = widths_ceil[i] roi_map_height = heights_ceil[i] width_correction = widths[i] / roi_map_width height_correction = heights[i] / roi_map_height roi_map = cv2.resize( maps[i], (roi_map_width, roi_map_height), interpolation=cv2.INTER_CUBIC) # Bring back to CHW roi_map = np.transpose(roi_map, [2, 0, 1]) roi_map_probs = scores_to_probs(roi_map.copy()) w = roi_map.shape[2] for k in range(cfg.KRCNN.NUM_KEYPOINTS): pos = roi_map[k, :, :].argmax() x_int = pos % w y_int = (pos - x_int) // w assert (roi_map_probs[k, y_int, x_int] == roi_map_probs[k, :, :].max()) x = (x_int + 0.5) * width_correction y = (y_int + 0.5) * height_correction xy_preds[i, 0, k] = x + offset_x[i] xy_preds[i, 1, k] = y + offset_y[i] xy_preds[i, 2, k] = roi_map[k, y_int, x_int] xy_preds[i, 3, k] = roi_map_probs[k, y_int, x_int] return xy_preds
[ "def", "heatmaps_to_keypoints", "(", "maps", ",", "rois", ")", ":", "# This function converts a discrete image coordinate in a HEATMAP_SIZE x", "# HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain", "# consistency with keypoints_to_heatmap_labels by using the conversion from", ...
https://github.com/facebookresearch/Large-Scale-VRD/blob/7ababfe1023941c3653d7aebe9f835a47f5e8277/lib/utils/keypoints.py#L94-L148
areed1192/td-ameritrade-python-api
3378ca89f464df80a5b651f3e365f2f7d9c758d7
td/message.py
python
StreamingMessage.__init__
(self, message: str)
Initalizes the `StreamingMessage` object. During the initalization process, the raw message will be decoded and parsed. Arguments: ---- message {str} -- The raw text message from a stream.
Initalizes the `StreamingMessage` object.
[ "Initalizes", "the", "StreamingMessage", "object", "." ]
def __init__(self, message: str) -> None: """Initalizes the `StreamingMessage` object. During the initalization process, the raw message will be decoded and parsed. Arguments: ---- message {str} -- The raw text message from a stream. """ self.raw_message = message self.decoded_message = self.parse(message=self.raw_message)
[ "def", "__init__", "(", "self", ",", "message", ":", "str", ")", "->", "None", ":", "self", ".", "raw_message", "=", "message", "self", ".", "decoded_message", "=", "self", ".", "parse", "(", "message", "=", "self", ".", "raw_message", ")" ]
https://github.com/areed1192/td-ameritrade-python-api/blob/3378ca89f464df80a5b651f3e365f2f7d9c758d7/td/message.py#L16-L28
n0fate/chainbreaker
6f5a2c74bb922769e2f3d05f7ead6f36d2750277
pyDes.py
python
TripleDES.getIV
(self)
return self.__iv
getIV() -> string
getIV() -> string
[ "getIV", "()", "-", ">", "string" ]
def getIV(self): """getIV() -> string""" return self.__iv
[ "def", "getIV", "(", "self", ")", ":", "return", "self", ".", "__iv" ]
https://github.com/n0fate/chainbreaker/blob/6f5a2c74bb922769e2f3d05f7ead6f36d2750277/pyDes.py#L558-L560
qibinlou/SinaWeibo-Emotion-Classification
f336fc104abd68b0ec4180fe2ed80fafe49cb790
nltk/collocations.py
python
TrigramCollocationFinder.__init__
(self, word_fd, bigram_fd, wildcard_fd, trigram_fd)
Construct a TrigramCollocationFinder, given FreqDists for appearances of words, bigrams, two words with any word between them, and trigrams.
Construct a TrigramCollocationFinder, given FreqDists for appearances of words, bigrams, two words with any word between them, and trigrams.
[ "Construct", "a", "TrigramCollocationFinder", "given", "FreqDists", "for", "appearances", "of", "words", "bigrams", "two", "words", "with", "any", "word", "between", "them", "and", "trigrams", "." ]
def __init__(self, word_fd, bigram_fd, wildcard_fd, trigram_fd): """Construct a TrigramCollocationFinder, given FreqDists for appearances of words, bigrams, two words with any word between them, and trigrams. """ AbstractCollocationFinder.__init__(self, word_fd, trigram_fd) self.wildcard_fd = wildcard_fd self.bigram_fd = bigram_fd
[ "def", "__init__", "(", "self", ",", "word_fd", ",", "bigram_fd", ",", "wildcard_fd", ",", "trigram_fd", ")", ":", "AbstractCollocationFinder", ".", "__init__", "(", "self", ",", "word_fd", ",", "trigram_fd", ")", "self", ".", "wildcard_fd", "=", "wildcard_fd"...
https://github.com/qibinlou/SinaWeibo-Emotion-Classification/blob/f336fc104abd68b0ec4180fe2ed80fafe49cb790/nltk/collocations.py#L173-L180
raveberry/raveberry
df0186c94b238b57de86d3fd5c595dcd08a7c708
backend/core/settings/sound.py
python
connect_bluetooth
(request: WSGIRequest)
return HttpResponse("Connected. Set output device to activate.")
Connect to a given bluetooth device.
Connect to a given bluetooth device.
[ "Connect", "to", "a", "given", "bluetooth", "device", "." ]
def connect_bluetooth(request: WSGIRequest) -> HttpResponse: """Connect to a given bluetooth device.""" address = request.POST.get("address") if address is None or address == "": return HttpResponseBadRequest("No device selected") bluetoothctl = _start_bluetoothctl() if not bluetoothctl: return HttpResponseBadRequest("Stop scanning before connecting") assert bluetoothctl.stdin error = "" # A Function that acts as a timeout for unexpected errors (or timeouts) def _timeout() -> None: nonlocal error time.sleep(20) error = "Timed out" if bluetoothctl is not None: _stop_bluetoothctl(bluetoothctl) Thread(target=_timeout).start() # Sometimes, pairing hangs forever. Since connecting alone is enough, skip pairing. # bluetoothctl.stdin.write(b"pair " + address.encode() + b"\n") # bluetoothctl.stdin.flush() # while True: # line = _get_bluetoothctl_line(bluetoothctl) # if not line: # break # if re.match(".*Device " + address + " not available", line): # error = "Device unavailable" # break # if re.match(".*Failed to pair: org.bluez.Error.AlreadyExists", line): # break # if re.match(".*Pairing successful", line): # break # if error: # _stop_bluetoothctl() # return HttpResponseBadRequest(error) bluetoothctl.stdin.write(b"connect " + address.encode() + b"\n") bluetoothctl.stdin.flush() while True: line = _get_bluetoothctl_line(bluetoothctl) if not line: break if re.match(".*Device " + address + " not available", line): error = "Device unavailable" break if re.match(".*Failed to connect: org.bluez.Error.Failed", line): error = "Connect Failed" break if re.match(".*Failed to connect: org.bluez.Error.InProgress", line): error = "Connect in progress" break if re.match(".*Connection successful", line): break # trust the device to automatically reconnect when it is available again bluetoothctl.stdin.write(b"trust " + address.encode() + b"\n") bluetoothctl.stdin.flush() _stop_bluetoothctl(bluetoothctl) if error: return HttpResponseBadRequest(error) return HttpResponse("Connected. Set output device to activate.")
[ "def", "connect_bluetooth", "(", "request", ":", "WSGIRequest", ")", "->", "HttpResponse", ":", "address", "=", "request", ".", "POST", ".", "get", "(", "\"address\"", ")", "if", "address", "is", "None", "or", "address", "==", "\"\"", ":", "return", "HttpR...
https://github.com/raveberry/raveberry/blob/df0186c94b238b57de86d3fd5c595dcd08a7c708/backend/core/settings/sound.py#L115-L181
SCons/scons
309f0234d1d9cc76955818be47c5c722f577dac6
SCons/Tool/MSCommon/vs.py
python
query_versions
()
return versions
Query the system to get available versions of VS. A version is considered when a batfile is found.
Query the system to get available versions of VS. A version is considered when a batfile is found.
[ "Query", "the", "system", "to", "get", "available", "versions", "of", "VS", ".", "A", "version", "is", "considered", "when", "a", "batfile", "is", "found", "." ]
def query_versions(): """Query the system to get available versions of VS. A version is considered when a batfile is found.""" msvs_list = get_installed_visual_studios() versions = [msvs.version for msvs in msvs_list] return versions
[ "def", "query_versions", "(", ")", ":", "msvs_list", "=", "get_installed_visual_studios", "(", ")", "versions", "=", "[", "msvs", ".", "version", "for", "msvs", "in", "msvs_list", "]", "return", "versions" ]
https://github.com/SCons/scons/blob/309f0234d1d9cc76955818be47c5c722f577dac6/SCons/Tool/MSCommon/vs.py#L600-L605
fedspendingtransparency/usaspending-api
b13bd5bcba0369ff8512f61a34745626c3969391
usaspending_api/common/sqs/sqs_work_dispatcher.py
python
SQSWorkDispatcher.dispatch_by_message_attribute
( self, message_transformer, *additional_job_args, worker_process_name=None, **additional_job_kwargs )
return self._dispatch(job, worker_process_name, exit_handler, *job_args, **job_kwargs)
Use a provided function to derive the callable job and its arguments from attributes within the queue message Args: message_transformer (Callable[[SQS.Message], dict]): A callable function that takes in the SQS message as its argument and returns a dict of:: { '_job': Callable, # Required. The job to run '_exit_handler': Callable # Optional. A callable to be called when handling an :attr:`EXIT_SIGNALS` signal, giving the opportunity to perform cleanup before the process exits. Gets the ``job_args`` and any other items in this dict as args passed to it when run. '_job_args': tuple, # Optional. Partial or full collection of job args as a list or tuple. 'named_job_arg1': Any, # Optional. A named argument to be used as a keyword arg when calling the job. ``named_job_arg1`` is representative, and the actual names of the ``_job``'s params should be used here. This is the preferred way to pass args to the ``_job`` 'named_job_argN: Any # Optional: Same as above. As many as are needed. } worker_process_name (str): Name given to the newly created child process. If not already set, defaults to the name of the provided job Callable additional_job_args: Zero or many variadic args that are unnamed (not keyword) that may be passed along with those from the message to the callable job. NOTE: Passing args to the job this way can be done when needed, but it is preferred to use keyword-args through :param:`additional_job_kwargs` to be more explicit what arg values are going to what params. additional_job_kwargs: Zero or many variadic keyword-args that may be passed along with those from the message to the callable job Examples: Given job function:: def my_job(a, b, c, d): pass # do something The preferred way to dispatch it is with a ``message_transformer`` that returns a dictionary and, if necessary, additional args passed as keyword args:: def work_routing_message_transformer(msg): job = job_strategy_factory.from(msg.message_attributes["origin"]["StringValue"]) return {"_job": job, "a": msg.body, "b": db.get_org(msg.body)} dispatcher.dispatch_by_message_attribute(work_routing_message_transformer, c=some_tracking_id, d=datetime.datetime.now()) Returns: bool: True if a message was found on the queue and dispatched, otherwise False if nothing on the queue Raises: SystemExit(1): If it can't connect to the queue or receive messages QueueWorkerProcessError: Under various conditions where the child worker process fails to run the job QueueWorkDispatcherError: Under various conditions where the parent dispatcher process can't orchestrate work execution, monitoring, or exit-signal-handling
Use a provided function to derive the callable job and its arguments from attributes within the queue message
[ "Use", "a", "provided", "function", "to", "derive", "the", "callable", "job", "and", "its", "arguments", "from", "attributes", "within", "the", "queue", "message" ]
def dispatch_by_message_attribute( self, message_transformer, *additional_job_args, worker_process_name=None, **additional_job_kwargs ): """Use a provided function to derive the callable job and its arguments from attributes within the queue message Args: message_transformer (Callable[[SQS.Message], dict]): A callable function that takes in the SQS message as its argument and returns a dict of:: { '_job': Callable, # Required. The job to run '_exit_handler': Callable # Optional. A callable to be called when handling an :attr:`EXIT_SIGNALS` signal, giving the opportunity to perform cleanup before the process exits. Gets the ``job_args`` and any other items in this dict as args passed to it when run. '_job_args': tuple, # Optional. Partial or full collection of job args as a list or tuple. 'named_job_arg1': Any, # Optional. A named argument to be used as a keyword arg when calling the job. ``named_job_arg1`` is representative, and the actual names of the ``_job``'s params should be used here. This is the preferred way to pass args to the ``_job`` 'named_job_argN: Any # Optional: Same as above. As many as are needed. } worker_process_name (str): Name given to the newly created child process. If not already set, defaults to the name of the provided job Callable additional_job_args: Zero or many variadic args that are unnamed (not keyword) that may be passed along with those from the message to the callable job. NOTE: Passing args to the job this way can be done when needed, but it is preferred to use keyword-args through :param:`additional_job_kwargs` to be more explicit what arg values are going to what params. additional_job_kwargs: Zero or many variadic keyword-args that may be passed along with those from the message to the callable job Examples: Given job function:: def my_job(a, b, c, d): pass # do something The preferred way to dispatch it is with a ``message_transformer`` that returns a dictionary and, if necessary, additional args passed as keyword args:: def work_routing_message_transformer(msg): job = job_strategy_factory.from(msg.message_attributes["origin"]["StringValue"]) return {"_job": job, "a": msg.body, "b": db.get_org(msg.body)} dispatcher.dispatch_by_message_attribute(work_routing_message_transformer, c=some_tracking_id, d=datetime.datetime.now()) Returns: bool: True if a message was found on the queue and dispatched, otherwise False if nothing on the queue Raises: SystemExit(1): If it can't connect to the queue or receive messages QueueWorkerProcessError: Under various conditions where the child worker process fails to run the job QueueWorkDispatcherError: Under various conditions where the parent dispatcher process can't orchestrate work execution, monitoring, or exit-signal-handling """ self._dequeue_message(self._long_poll_seconds) if self._current_sqs_message is None: return False job_args = () job_kwargs = {} results = message_transformer(self._current_sqs_message) def parse_message_transformer_results(_job, _exit_handler=None, _job_args=(), **_job_kwargs): """Use to map dictionary items to this functions params, and packs up remaining items in a separate dictionary. Then returns the organized data as a 4-tuple """ return _job, _exit_handler, _job_args, _job_kwargs # Parse the result components from the returned dictionary job, exit_handler, msg_args, msg_kwargs = parse_message_transformer_results(**results) if isinstance(msg_args, list) or isinstance(msg_args, tuple): job_args = tuple(msg_args) + additional_job_args else: job_args = (msg_args,) + additional_job_args # single element if msg_kwargs.keys() & additional_job_kwargs.keys(): raise KeyError( "message_transformer produces named keyword args that are duplicative of those in " "additional_job_kwargs and would be overwritten" ) job_kwargs = {**msg_kwargs, **additional_job_kwargs} return self._dispatch(job, worker_process_name, exit_handler, *job_args, **job_kwargs)
[ "def", "dispatch_by_message_attribute", "(", "self", ",", "message_transformer", ",", "*", "additional_job_args", ",", "worker_process_name", "=", "None", ",", "*", "*", "additional_job_kwargs", ")", ":", "self", ".", "_dequeue_message", "(", "self", ".", "_long_pol...
https://github.com/fedspendingtransparency/usaspending-api/blob/b13bd5bcba0369ff8512f61a34745626c3969391/usaspending_api/common/sqs/sqs_work_dispatcher.py#L325-L416
turicas/brasil.io
f1c371fe828a090510259a5027b49e2e651936b4
covid19/management/commands/update_bulletin.py
python
Command.add_arguments
(self, parser)
[]
def add_arguments(self, parser): parser.add_argument("date", type=str, help="Date in format YYYY-MM-DD") parser.add_argument("image_url", type=str, help="Bulletin image URL") parser.add_argument("--csv", type=Path, help="CSV file to update the bulletin JSON field")
[ "def", "add_arguments", "(", "self", ",", "parser", ")", ":", "parser", ".", "add_argument", "(", "\"date\"", ",", "type", "=", "str", ",", "help", "=", "\"Date in format YYYY-MM-DD\"", ")", "parser", ".", "add_argument", "(", "\"image_url\"", ",", "type", "...
https://github.com/turicas/brasil.io/blob/f1c371fe828a090510259a5027b49e2e651936b4/covid19/management/commands/update_bulletin.py#L14-L17
chribsen/simple-machine-learning-examples
dc94e52a4cebdc8bb959ff88b81ff8cfeca25022
venv/lib/python2.7/site-packages/numpy/core/numeric.py
python
argwhere
(a)
return transpose(nonzero(a))
Find the indices of array elements that are non-zero, grouped by element. Parameters ---------- a : array_like Input data. Returns ------- index_array : ndarray Indices of elements that are non-zero. Indices are grouped by element. See Also -------- where, nonzero Notes ----- ``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``. The output of ``argwhere`` is not suitable for indexing arrays. For this purpose use ``where(a)`` instead. Examples -------- >>> x = np.arange(6).reshape(2,3) >>> x array([[0, 1, 2], [3, 4, 5]]) >>> np.argwhere(x>1) array([[0, 2], [1, 0], [1, 1], [1, 2]])
Find the indices of array elements that are non-zero, grouped by element.
[ "Find", "the", "indices", "of", "array", "elements", "that", "are", "non", "-", "zero", "grouped", "by", "element", "." ]
def argwhere(a): """ Find the indices of array elements that are non-zero, grouped by element. Parameters ---------- a : array_like Input data. Returns ------- index_array : ndarray Indices of elements that are non-zero. Indices are grouped by element. See Also -------- where, nonzero Notes ----- ``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``. The output of ``argwhere`` is not suitable for indexing arrays. For this purpose use ``where(a)`` instead. Examples -------- >>> x = np.arange(6).reshape(2,3) >>> x array([[0, 1, 2], [3, 4, 5]]) >>> np.argwhere(x>1) array([[0, 2], [1, 0], [1, 1], [1, 2]]) """ return transpose(nonzero(a))
[ "def", "argwhere", "(", "a", ")", ":", "return", "transpose", "(", "nonzero", "(", "a", ")", ")" ]
https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/numpy/core/numeric.py#L763-L801
FederatedAI/FATE
32540492623568ecd1afcb367360133616e02fa3
examples/pipeline/homo_nn/runner.py
python
HomoNNExample.__str__
(self)
return self.name
[]
def __str__(self): return self.name
[ "def", "__str__", "(", "self", ")", ":", "return", "self", ".", "name" ]
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/examples/pipeline/homo_nn/runner.py#L31-L32
deepfakes/faceswap
09c7d8aca3c608d1afad941ea78e9fd9b64d9219
plugins/train/model/_base.py
python
State._new_session_id
(self)
return session_id
Generate a new session id. Returns 1 if this is a new model, or the last session id + 1 if it is a pre-existing model. Returns ------- int The newly generated session id
Generate a new session id. Returns 1 if this is a new model, or the last session id + 1 if it is a pre-existing model.
[ "Generate", "a", "new", "session", "id", ".", "Returns", "1", "if", "this", "is", "a", "new", "model", "or", "the", "last", "session", "id", "+", "1", "if", "it", "is", "a", "pre", "-", "existing", "model", "." ]
def _new_session_id(self): """ Generate a new session id. Returns 1 if this is a new model, or the last session id + 1 if it is a pre-existing model. Returns ------- int The newly generated session id """ if not self._sessions: session_id = 1 else: session_id = max(int(key) for key in self._sessions.keys()) + 1 logger.debug(session_id) return session_id
[ "def", "_new_session_id", "(", "self", ")", ":", "if", "not", "self", ".", "_sessions", ":", "session_id", "=", "1", "else", ":", "session_id", "=", "max", "(", "int", "(", "key", ")", "for", "key", "in", "self", ".", "_sessions", ".", "keys", "(", ...
https://github.com/deepfakes/faceswap/blob/09c7d8aca3c608d1afad941ea78e9fd9b64d9219/plugins/train/model/_base.py#L1394-L1408
allegroai/clearml
5953dc6eefadcdfcc2bdbb6a0da32be58823a5af
clearml/storage/helper.py
python
_FileStorageDriver._check_container_name
(self, container_name)
Check if the container name is valid :param container_name: Container name :type container_name: ``str``
Check if the container name is valid
[ "Check", "if", "the", "container", "name", "is", "valid" ]
def _check_container_name(self, container_name): """ Check if the container name is valid :param container_name: Container name :type container_name: ``str`` """ if '/' in container_name or '\\' in container_name: raise ValueError("Container name \"{}\" cannot contain \\ or / ".format(container_name))
[ "def", "_check_container_name", "(", "self", ",", "container_name", ")", ":", "if", "'/'", "in", "container_name", "or", "'\\\\'", "in", "container_name", ":", "raise", "ValueError", "(", "\"Container name \\\"{}\\\" cannot contain \\\\ or / \"", ".", "format", "(", "...
https://github.com/allegroai/clearml/blob/5953dc6eefadcdfcc2bdbb6a0da32be58823a5af/clearml/storage/helper.py#L1909-L1918
flow-project/flow
a511c41c48e6b928bb2060de8ad1ef3c3e3d9554
flow/core/kernel/vehicle/base.py
python
KernelVehicle.get_road_grade
(self, veh_id)
Return the road-grade of the vehicle with veh_id.
Return the road-grade of the vehicle with veh_id.
[ "Return", "the", "road", "-", "grade", "of", "the", "vehicle", "with", "veh_id", "." ]
def get_road_grade(self, veh_id): """Return the road-grade of the vehicle with veh_id.""" pass
[ "def", "get_road_grade", "(", "self", ",", "veh_id", ")", ":", "pass" ]
https://github.com/flow-project/flow/blob/a511c41c48e6b928bb2060de8ad1ef3c3e3d9554/flow/core/kernel/vehicle/base.py#L774-L776
ahmetcemturan/SFACT
7576e29ba72b33e5058049b77b7b558875542747
fabmetheus_utilities/settings.py
python
HelpPage.addToDialog
( self, gridPosition )
Add this to the dialog.
Add this to the dialog.
[ "Add", "this", "to", "the", "dialog", "." ]
def addToDialog( self, gridPosition ): "Add this to the dialog." capitalizedName = getEachWordCapitalized( self.name ) self.displayButton = Tkinter.Button( gridPosition.master, activebackground = 'black', activeforeground = 'white', command = self.openPage, text = capitalizedName ) if len( capitalizedName ) < 12: self.displayButton['width'] = 10 self.displayButton.grid( row = gridPosition.row, column = self.column, columnspan = 2 )
[ "def", "addToDialog", "(", "self", ",", "gridPosition", ")", ":", "capitalizedName", "=", "getEachWordCapitalized", "(", "self", ".", "name", ")", "self", ".", "displayButton", "=", "Tkinter", ".", "Button", "(", "gridPosition", ".", "master", ",", "activeback...
https://github.com/ahmetcemturan/SFACT/blob/7576e29ba72b33e5058049b77b7b558875542747/fabmetheus_utilities/settings.py#L1213-L1219
deepfakes/faceswap
09c7d8aca3c608d1afad941ea78e9fd9b64d9219
plugins/train/model/dlight.py
python
Model.decoder_b_fast
(self)
return KerasModel([input_], outputs=outputs, name="decoder_b_fast")
DeLight Fast Decoder B(new face) Network
DeLight Fast Decoder B(new face) Network
[ "DeLight", "Fast", "Decoder", "B", "(", "new", "face", ")", "Network" ]
def decoder_b_fast(self): """ DeLight Fast Decoder B(new face) Network """ input_ = Input(shape=(4, 4, 1024)) dec_b_complexity = 512 mask_complexity = 128 var_xy = input_ var_xy = UpscaleBlock(512, scale_factor=self.upscale_ratio, activation="leakyrelu")(var_xy) var_x = var_xy var_x = Upscale2xBlock(dec_b_complexity, activation="leakyrelu", fast=True)(var_x) var_x = Upscale2xBlock(dec_b_complexity // 2, activation="leakyrelu", fast=True)(var_x) var_x = Upscale2xBlock(dec_b_complexity // 4, activation="leakyrelu", fast=True)(var_x) var_x = Upscale2xBlock(dec_b_complexity // 8, activation="leakyrelu", fast=True)(var_x) var_x = Conv2DOutput(3, 5, name="face_out")(var_x) outputs = [var_x] if self.config.get("learn_mask", False): var_y = var_xy # mask decoder var_y = Upscale2xBlock(mask_complexity, activation="leakyrelu", fast=False)(var_y) var_y = Upscale2xBlock(mask_complexity // 2, activation="leakyrelu", fast=False)(var_y) var_y = Upscale2xBlock(mask_complexity // 4, activation="leakyrelu", fast=False)(var_y) var_y = Upscale2xBlock(mask_complexity // 8, activation="leakyrelu", fast=False)(var_y) var_y = Conv2DOutput(1, 5, name="mask_out")(var_y) outputs.append(var_y) return KerasModel([input_], outputs=outputs, name="decoder_b_fast")
[ "def", "decoder_b_fast", "(", "self", ")", ":", "input_", "=", "Input", "(", "shape", "=", "(", "4", ",", "4", ",", "1024", ")", ")", "dec_b_complexity", "=", "512", "mask_complexity", "=", "128", "var_xy", "=", "input_", "var_xy", "=", "UpscaleBlock", ...
https://github.com/deepfakes/faceswap/blob/09c7d8aca3c608d1afad941ea78e9fd9b64d9219/plugins/train/model/dlight.py#L131-L164
securityclippy/elasticintel
aa08d3e9f5ab1c000128e95161139ce97ff0e334
ingest_feed_lambda/numpy/linalg/linalg.py
python
norm
(x, ord=None, axis=None, keepdims=False)
Matrix or vector norm. This function is able to return one of eight different matrix norms, or one of an infinite number of vector norms (described below), depending on the value of the ``ord`` parameter. Parameters ---------- x : array_like Input array. If `axis` is None, `x` must be 1-D or 2-D. ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional Order of the norm (see table under ``Notes``). inf means numpy's `inf` object. axis : {int, 2-tuple of ints, None}, optional If `axis` is an integer, it specifies the axis of `x` along which to compute the vector norms. If `axis` is a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of these matrices are computed. If `axis` is None then either a vector norm (when `x` is 1-D) or a matrix norm (when `x` is 2-D) is returned. keepdims : bool, optional If this is set to True, the axes which are normed over are left in the result as dimensions with size one. With this option the result will broadcast correctly against the original `x`. .. versionadded:: 1.10.0 Returns ------- n : float or ndarray Norm of the matrix or vector(s). Notes ----- For values of ``ord <= 0``, the result is, strictly speaking, not a mathematical 'norm', but it may still be useful for various numerical purposes. The following norms can be calculated: ===== ============================ ========================== ord norm for matrices norm for vectors ===== ============================ ========================== None Frobenius norm 2-norm 'fro' Frobenius norm -- 'nuc' nuclear norm -- inf max(sum(abs(x), axis=1)) max(abs(x)) -inf min(sum(abs(x), axis=1)) min(abs(x)) 0 -- sum(x != 0) 1 max(sum(abs(x), axis=0)) as below -1 min(sum(abs(x), axis=0)) as below 2 2-norm (largest sing. value) as below -2 smallest singular value as below other -- sum(abs(x)**ord)**(1./ord) ===== ============================ ========================== The Frobenius norm is given by [1]_: :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` The nuclear norm is the sum of the singular values. References ---------- .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 Examples -------- >>> from numpy import linalg as LA >>> a = np.arange(9) - 4 >>> a array([-4, -3, -2, -1, 0, 1, 2, 3, 4]) >>> b = a.reshape((3, 3)) >>> b array([[-4, -3, -2], [-1, 0, 1], [ 2, 3, 4]]) >>> LA.norm(a) 7.745966692414834 >>> LA.norm(b) 7.745966692414834 >>> LA.norm(b, 'fro') 7.745966692414834 >>> LA.norm(a, np.inf) 4.0 >>> LA.norm(b, np.inf) 9.0 >>> LA.norm(a, -np.inf) 0.0 >>> LA.norm(b, -np.inf) 2.0 >>> LA.norm(a, 1) 20.0 >>> LA.norm(b, 1) 7.0 >>> LA.norm(a, -1) -4.6566128774142013e-010 >>> LA.norm(b, -1) 6.0 >>> LA.norm(a, 2) 7.745966692414834 >>> LA.norm(b, 2) 7.3484692283495345 >>> LA.norm(a, -2) nan >>> LA.norm(b, -2) 1.8570331885190563e-016 >>> LA.norm(a, 3) 5.8480354764257312 >>> LA.norm(a, -3) nan Using the `axis` argument to compute vector norms: >>> c = np.array([[ 1, 2, 3], ... [-1, 1, 4]]) >>> LA.norm(c, axis=0) array([ 1.41421356, 2.23606798, 5. ]) >>> LA.norm(c, axis=1) array([ 3.74165739, 4.24264069]) >>> LA.norm(c, ord=1, axis=1) array([ 6., 6.]) Using the `axis` argument to compute matrix norms: >>> m = np.arange(8).reshape(2,2,2) >>> LA.norm(m, axis=(1,2)) array([ 3.74165739, 11.22497216]) >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) (3.7416573867739413, 11.224972160321824)
Matrix or vector norm.
[ "Matrix", "or", "vector", "norm", "." ]
def norm(x, ord=None, axis=None, keepdims=False): """ Matrix or vector norm. This function is able to return one of eight different matrix norms, or one of an infinite number of vector norms (described below), depending on the value of the ``ord`` parameter. Parameters ---------- x : array_like Input array. If `axis` is None, `x` must be 1-D or 2-D. ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional Order of the norm (see table under ``Notes``). inf means numpy's `inf` object. axis : {int, 2-tuple of ints, None}, optional If `axis` is an integer, it specifies the axis of `x` along which to compute the vector norms. If `axis` is a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of these matrices are computed. If `axis` is None then either a vector norm (when `x` is 1-D) or a matrix norm (when `x` is 2-D) is returned. keepdims : bool, optional If this is set to True, the axes which are normed over are left in the result as dimensions with size one. With this option the result will broadcast correctly against the original `x`. .. versionadded:: 1.10.0 Returns ------- n : float or ndarray Norm of the matrix or vector(s). Notes ----- For values of ``ord <= 0``, the result is, strictly speaking, not a mathematical 'norm', but it may still be useful for various numerical purposes. The following norms can be calculated: ===== ============================ ========================== ord norm for matrices norm for vectors ===== ============================ ========================== None Frobenius norm 2-norm 'fro' Frobenius norm -- 'nuc' nuclear norm -- inf max(sum(abs(x), axis=1)) max(abs(x)) -inf min(sum(abs(x), axis=1)) min(abs(x)) 0 -- sum(x != 0) 1 max(sum(abs(x), axis=0)) as below -1 min(sum(abs(x), axis=0)) as below 2 2-norm (largest sing. value) as below -2 smallest singular value as below other -- sum(abs(x)**ord)**(1./ord) ===== ============================ ========================== The Frobenius norm is given by [1]_: :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` The nuclear norm is the sum of the singular values. References ---------- .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 Examples -------- >>> from numpy import linalg as LA >>> a = np.arange(9) - 4 >>> a array([-4, -3, -2, -1, 0, 1, 2, 3, 4]) >>> b = a.reshape((3, 3)) >>> b array([[-4, -3, -2], [-1, 0, 1], [ 2, 3, 4]]) >>> LA.norm(a) 7.745966692414834 >>> LA.norm(b) 7.745966692414834 >>> LA.norm(b, 'fro') 7.745966692414834 >>> LA.norm(a, np.inf) 4.0 >>> LA.norm(b, np.inf) 9.0 >>> LA.norm(a, -np.inf) 0.0 >>> LA.norm(b, -np.inf) 2.0 >>> LA.norm(a, 1) 20.0 >>> LA.norm(b, 1) 7.0 >>> LA.norm(a, -1) -4.6566128774142013e-010 >>> LA.norm(b, -1) 6.0 >>> LA.norm(a, 2) 7.745966692414834 >>> LA.norm(b, 2) 7.3484692283495345 >>> LA.norm(a, -2) nan >>> LA.norm(b, -2) 1.8570331885190563e-016 >>> LA.norm(a, 3) 5.8480354764257312 >>> LA.norm(a, -3) nan Using the `axis` argument to compute vector norms: >>> c = np.array([[ 1, 2, 3], ... [-1, 1, 4]]) >>> LA.norm(c, axis=0) array([ 1.41421356, 2.23606798, 5. ]) >>> LA.norm(c, axis=1) array([ 3.74165739, 4.24264069]) >>> LA.norm(c, ord=1, axis=1) array([ 6., 6.]) Using the `axis` argument to compute matrix norms: >>> m = np.arange(8).reshape(2,2,2) >>> LA.norm(m, axis=(1,2)) array([ 3.74165739, 11.22497216]) >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) (3.7416573867739413, 11.224972160321824) """ x = asarray(x) if not issubclass(x.dtype.type, (inexact, object_)): x = x.astype(float) # Immediately handle some default, simple, fast, and common cases. if axis is None: ndim = x.ndim if ((ord is None) or (ord in ('f', 'fro') and ndim == 2) or (ord == 2 and ndim == 1)): x = x.ravel(order='K') if isComplexType(x.dtype.type): sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag) else: sqnorm = dot(x, x) ret = sqrt(sqnorm) if keepdims: ret = ret.reshape(ndim*[1]) return ret # Normalize the `axis` argument to a tuple. nd = x.ndim if axis is None: axis = tuple(range(nd)) elif not isinstance(axis, tuple): try: axis = int(axis) except: raise TypeError("'axis' must be None, an integer or a tuple of integers") axis = (axis,) if len(axis) == 1: if ord == Inf: return abs(x).max(axis=axis, keepdims=keepdims) elif ord == -Inf: return abs(x).min(axis=axis, keepdims=keepdims) elif ord == 0: # Zero norm return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims) elif ord == 1: # special case for speedup return add.reduce(abs(x), axis=axis, keepdims=keepdims) elif ord is None or ord == 2: # special case for speedup s = (x.conj() * x).real return sqrt(add.reduce(s, axis=axis, keepdims=keepdims)) else: try: ord + 1 except TypeError: raise ValueError("Invalid norm order for vectors.") if x.dtype.type is longdouble: # Convert to a float type, so integer arrays give # float results. Don't apply asfarray to longdouble arrays, # because it will downcast to float64. absx = abs(x) else: absx = x if isComplexType(x.dtype.type) else asfarray(x) if absx.dtype is x.dtype: absx = abs(absx) else: # if the type changed, we can safely overwrite absx abs(absx, out=absx) absx **= ord return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord) elif len(axis) == 2: row_axis, col_axis = axis row_axis = normalize_axis_index(row_axis, nd) col_axis = normalize_axis_index(col_axis, nd) if row_axis == col_axis: raise ValueError('Duplicate axes given.') if ord == 2: ret = _multi_svd_norm(x, row_axis, col_axis, amax) elif ord == -2: ret = _multi_svd_norm(x, row_axis, col_axis, amin) elif ord == 1: if col_axis > row_axis: col_axis -= 1 ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis) elif ord == Inf: if row_axis > col_axis: row_axis -= 1 ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis) elif ord == -1: if col_axis > row_axis: col_axis -= 1 ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis) elif ord == -Inf: if row_axis > col_axis: row_axis -= 1 ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis) elif ord in [None, 'fro', 'f']: ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) elif ord == 'nuc': ret = _multi_svd_norm(x, row_axis, col_axis, sum) else: raise ValueError("Invalid norm order for matrices.") if keepdims: ret_shape = list(x.shape) ret_shape[axis[0]] = 1 ret_shape[axis[1]] = 1 ret = ret.reshape(ret_shape) return ret else: raise ValueError("Improper number of dimensions to norm.")
[ "def", "norm", "(", "x", ",", "ord", "=", "None", ",", "axis", "=", "None", ",", "keepdims", "=", "False", ")", ":", "x", "=", "asarray", "(", "x", ")", "if", "not", "issubclass", "(", "x", ".", "dtype", ".", "type", ",", "(", "inexact", ",", ...
https://github.com/securityclippy/elasticintel/blob/aa08d3e9f5ab1c000128e95161139ce97ff0e334/ingest_feed_lambda/numpy/linalg/linalg.py#L2014-L2257
Tautulli/Tautulli
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
lib/bs4/element.py
python
PageElement._find_one
(self, method, name, attrs, text, **kwargs)
return r
[]
def _find_one(self, method, name, attrs, text, **kwargs): r = None l = method(name, attrs, text, 1, **kwargs) if l: r = l[0] return r
[ "def", "_find_one", "(", "self", ",", "method", ",", "name", ",", "attrs", ",", "text", ",", "*", "*", "kwargs", ")", ":", "r", "=", "None", "l", "=", "method", "(", "name", ",", "attrs", ",", "text", ",", "1", ",", "*", "*", "kwargs", ")", "...
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/bs4/element.py#L773-L778
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/pandas-0.24.2-py3.7-macosx-10.9-x86_64.egg/pandas/core/series.py
python
Series.corr
(self, other, method='pearson', min_periods=None)
Compute correlation with `other` Series, excluding missing values. Parameters ---------- other : Series method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarray and returning a float .. versionadded:: 0.24.0 min_periods : int, optional Minimum number of observations needed to have a valid result Returns ------- correlation : float Examples -------- >>> histogram_intersection = lambda a, b: np.minimum(a, b ... ).sum().round(decimals=1) >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3
Compute correlation with `other` Series, excluding missing values.
[ "Compute", "correlation", "with", "other", "Series", "excluding", "missing", "values", "." ]
def corr(self, other, method='pearson', min_periods=None): """ Compute correlation with `other` Series, excluding missing values. Parameters ---------- other : Series method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarray and returning a float .. versionadded:: 0.24.0 min_periods : int, optional Minimum number of observations needed to have a valid result Returns ------- correlation : float Examples -------- >>> histogram_intersection = lambda a, b: np.minimum(a, b ... ).sum().round(decimals=1) >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ this, other = self.align(other, join='inner', copy=False) if len(this) == 0: return np.nan if method in ['pearson', 'spearman', 'kendall'] or callable(method): return nanops.nancorr(this.values, other.values, method=method, min_periods=min_periods) raise ValueError("method must be either 'pearson', " "'spearman', or 'kendall', '{method}' " "was supplied".format(method=method))
[ "def", "corr", "(", "self", ",", "other", ",", "method", "=", "'pearson'", ",", "min_periods", "=", "None", ")", ":", "this", ",", "other", "=", "self", ".", "align", "(", "other", ",", "join", "=", "'inner'", ",", "copy", "=", "False", ")", "if", ...
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/pandas-0.24.2-py3.7-macosx-10.9-x86_64.egg/pandas/core/series.py#L2068-L2109
Tautulli/Tautulli
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
lib/cherrypy/__init__.py
python
_cherrypy_pydoc_resolve
(thing, forceload=0)
return _pydoc._builtin_resolve(thing, forceload)
Given an object or a path to an object, get the object and its name.
Given an object or a path to an object, get the object and its name.
[ "Given", "an", "object", "or", "a", "path", "to", "an", "object", "get", "the", "object", "and", "its", "name", "." ]
def _cherrypy_pydoc_resolve(thing, forceload=0): """Given an object or a path to an object, get the object and its name.""" if isinstance(thing, _ThreadLocalProxy): thing = getattr(serving, thing.__attrname__) return _pydoc._builtin_resolve(thing, forceload)
[ "def", "_cherrypy_pydoc_resolve", "(", "thing", ",", "forceload", "=", "0", ")", ":", "if", "isinstance", "(", "thing", ",", "_ThreadLocalProxy", ")", ":", "thing", "=", "getattr", "(", "serving", ",", "thing", ".", "__attrname__", ")", "return", "_pydoc", ...
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/cherrypy/__init__.py#L291-L295
allenai/allennlp
a3d71254fcc0f3615910e9c3d48874515edf53e0
allennlp/modules/elmo_lstm.py
python
ElmoLstm._lstm_forward
( self, inputs: PackedSequence, initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, )
return stacked_sequence_outputs, final_state_tuple
# Parameters inputs : `PackedSequence`, required. A batch first `PackedSequence` to run the stacked LSTM over. initial_state : `Tuple[torch.Tensor, torch.Tensor]`, optional, (default = `None`) A tuple (state, memory) representing the initial hidden state and memory of the LSTM, with shape (num_layers, batch_size, 2 * hidden_size) and (num_layers, batch_size, 2 * cell_size) respectively. # Returns output_sequence : `torch.FloatTensor` The encoded sequence of shape (num_layers, batch_size, sequence_length, hidden_size) final_states : `Tuple[torch.FloatTensor, torch.FloatTensor]` The per-layer final (state, memory) states of the LSTM, with shape (num_layers, batch_size, 2 * hidden_size) and (num_layers, batch_size, 2 * cell_size) respectively. The last dimension is duplicated because it contains the state/memory for both the forward and backward layers.
# Parameters
[ "#", "Parameters" ]
def _lstm_forward( self, inputs: PackedSequence, initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: """ # Parameters inputs : `PackedSequence`, required. A batch first `PackedSequence` to run the stacked LSTM over. initial_state : `Tuple[torch.Tensor, torch.Tensor]`, optional, (default = `None`) A tuple (state, memory) representing the initial hidden state and memory of the LSTM, with shape (num_layers, batch_size, 2 * hidden_size) and (num_layers, batch_size, 2 * cell_size) respectively. # Returns output_sequence : `torch.FloatTensor` The encoded sequence of shape (num_layers, batch_size, sequence_length, hidden_size) final_states : `Tuple[torch.FloatTensor, torch.FloatTensor]` The per-layer final (state, memory) states of the LSTM, with shape (num_layers, batch_size, 2 * hidden_size) and (num_layers, batch_size, 2 * cell_size) respectively. The last dimension is duplicated because it contains the state/memory for both the forward and backward layers. """ if initial_state is None: hidden_states: List[Optional[Tuple[torch.Tensor, torch.Tensor]]] = [None] * len( self.forward_layers ) elif initial_state[0].size()[0] != len(self.forward_layers): raise ConfigurationError( "Initial states were passed to forward() but the number of " "initial states does not match the number of layers." ) else: hidden_states = list(zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0))) inputs, batch_lengths = pad_packed_sequence(inputs, batch_first=True) forward_output_sequence = inputs backward_output_sequence = inputs final_states = [] sequence_outputs = [] for layer_index, state in enumerate(hidden_states): forward_layer = getattr(self, "forward_layer_{}".format(layer_index)) backward_layer = getattr(self, "backward_layer_{}".format(layer_index)) forward_cache = forward_output_sequence backward_cache = backward_output_sequence forward_state: Optional[Tuple[Any, Any]] = None backward_state: Optional[Tuple[Any, Any]] = None if state is not None: forward_hidden_state, backward_hidden_state = state[0].split(self.hidden_size, 2) forward_memory_state, backward_memory_state = state[1].split(self.cell_size, 2) forward_state = (forward_hidden_state, forward_memory_state) backward_state = (backward_hidden_state, backward_memory_state) forward_output_sequence, forward_state = forward_layer( forward_output_sequence, batch_lengths, forward_state ) backward_output_sequence, backward_state = backward_layer( backward_output_sequence, batch_lengths, backward_state ) # Skip connections, just adding the input to the output. if layer_index != 0: forward_output_sequence += forward_cache backward_output_sequence += backward_cache sequence_outputs.append( torch.cat([forward_output_sequence, backward_output_sequence], -1) ) # Append the state tuples in a list, so that we can return # the final states for all the layers. final_states.append( ( torch.cat([forward_state[0], backward_state[0]], -1), # type: ignore torch.cat([forward_state[1], backward_state[1]], -1), # type: ignore ) ) stacked_sequence_outputs: torch.FloatTensor = torch.stack(sequence_outputs) # Stack the hidden state and memory for each layer into 2 tensors of shape # (num_layers, batch_size, hidden_size) and (num_layers, batch_size, cell_size) # respectively. final_hidden_states, final_memory_states = zip(*final_states) final_state_tuple: Tuple[torch.FloatTensor, torch.FloatTensor] = ( torch.cat(final_hidden_states, 0), torch.cat(final_memory_states, 0), ) return stacked_sequence_outputs, final_state_tuple
[ "def", "_lstm_forward", "(", "self", ",", "inputs", ":", "PackedSequence", ",", "initial_state", ":", "Optional", "[", "Tuple", "[", "torch", ".", "Tensor", ",", "torch", ".", "Tensor", "]", "]", "=", "None", ",", ")", "->", "Tuple", "[", "torch", ".",...
https://github.com/allenai/allennlp/blob/a3d71254fcc0f3615910e9c3d48874515edf53e0/allennlp/modules/elmo_lstm.py#L169-L259
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
custom/abt/reports/filters_2020.py
python
LevelFourFilter.options
(self)
return [(loc['id'], loc['name']) for loc in level_4s]
[]
def options(self): level_1 = self.request.GET.get('level_1') level_2 = self.request.GET.get('level_2') level_3 = self.request.GET.get('level_3') if level_3: l3_ids = [level_3] elif level_2: l2_ids = [level_2] l3_ids = [loc['id'] for loc in self.get_level_3s(l2_ids)] elif level_1: l1_ids = [level_1] l2_ids = [loc['id'] for loc in self.get_level_2s(l1_ids)] l3_ids = [loc['id'] for loc in self.get_level_3s(l2_ids)] else: l3_ids = None data_types_by_tag = get_data_types_by_tag(self.domain) level_4s = get_fixture_dicts( self.domain, data_types_by_tag["level_4_eco"]._id, filter_in={'level_3_eco': l3_ids}, filter_out={'other': '1'}, ) return [(loc['id'], loc['name']) for loc in level_4s]
[ "def", "options", "(", "self", ")", ":", "level_1", "=", "self", ".", "request", ".", "GET", ".", "get", "(", "'level_1'", ")", "level_2", "=", "self", ".", "request", ".", "GET", ".", "get", "(", "'level_2'", ")", "level_3", "=", "self", ".", "req...
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/custom/abt/reports/filters_2020.py#L81-L103
atc-project/atomic-threat-coverage
89a48db5be0ee500ad158b7db32a0945ec872331
scripts/customer.py
python
Customer.render_template
(self, template_type)
return True
Render template with data in it template_type: - "markdown" - "confluence"
Render template with data in it template_type: - "markdown" - "confluence"
[ "Render", "template", "with", "data", "in", "it", "template_type", ":", "-", "markdown", "-", "confluence" ]
def render_template(self, template_type): """Render template with data in it template_type: - "markdown" - "confluence" """ if template_type not in ["markdown", "confluence"]: raise Exception( "Bad template_type. Available values: " + "[\"markdown\", \"confluence\"]") self.cu_fields.update( {'description': self.description.strip()} ) # Transform variables to arrays if not provided correctly in yaml if isinstance(self.data_needed, str): self.cu_fields.update({'dataneeded': [self.data_needed]}) if isinstance(self.logging_policies, str): self.cu_fields.update({'loggingpolicy': [self.logging_policies]}) detectionrule_with_path = [] for title in self.detection_rules: if title is not None: name = rules_by_title.get(title)[1] else: name = '' dr = (title, name) detectionrule_with_path.append(dr) self.cu_fields.update({'detectionrule': detectionrule_with_path}) usecase_with_path = [] if self.use_cases is not None: for title in self.use_cases: if title is not None: name = usecases_by_title.get(title)[1] else: name = '' uc = (title, name) usecase_with_path.append(uc) self.cu_fields.update({'usecase': usecase_with_path}) # Get proper template if template_type == "markdown": template = env\ .get_template('markdown_customer_template.md.j2') elif template_type == "confluence": template = env.get_template( 'confluence_customer_template.html.j2') self.cu_fields.update( {'confluence_viewpage_url': ATCconfig.get('confluence_viewpage_url')}) if not self.logging_policies: self.logging_policies = ["None", ] logging_policies_with_id = [] for lp in self.logging_policies: if lp != "None" and self.apipath and self.auth and self.space: logging_policies_id = str(ATCutils.confluence_get_page_id( self.apipath, self.auth, self.space, lp)) else: logging_policies_id = "" lp = (lp, logging_policies_id) logging_policies_with_id.append(lp) self.cu_fields.update({'loggingpolicy': logging_policies_with_id}) if not self.data_needed: self.data_needed = ["None", ] data_needed_with_id = [] for dn in self.data_needed: if dn != "None" and self.apipath and self.auth and self.space: data_needed_id = str(ATCutils.confluence_get_page_id( self.apipath, self.auth, self.space, dn)) else: data_needed_id = "" dn = (dn, data_needed_id) data_needed_with_id.append(dn) self.cu_fields.update({'data_needed': data_needed_with_id}) usecases_with_id = [] if self.use_cases is not None: for uc in self.use_cases: if uc != "None" and self.apipath and self.auth and self.space: usecase_id = str(ATCutils.confluence_get_page_id( self.apipath, self.auth, self.space, uc)) else: usecase_id = "" uc = (uc, usecase_id) usecases_with_id.append(uc) self.cu_fields.update({'usecase': usecases_with_id}) detection_rules_with_id = [] for dn in self.detection_rules: if dn != "None" and self.apipath and self.auth and self.space: detection_rules_id = str(ATCutils.confluence_get_page_id( self.apipath, self.auth, self.space, dn)) else: detection_rules_id = "" dn = (dn, detection_rules_id) detection_rules_with_id.append(dn) self.cu_fields.update({'detectionrule': detection_rules_with_id}) self.content = template.render(self.cu_fields) return True
[ "def", "render_template", "(", "self", ",", "template_type", ")", ":", "if", "template_type", "not", "in", "[", "\"markdown\"", ",", "\"confluence\"", "]", ":", "raise", "Exception", "(", "\"Bad template_type. Available values: \"", "+", "\"[\\\"markdown\\\", \\\"conflu...
https://github.com/atc-project/atomic-threat-coverage/blob/89a48db5be0ee500ad158b7db32a0945ec872331/scripts/customer.py#L111-L234
inkandswitch/livebook
93c8d467734787366ad084fc3566bf5cbe249c51
public/pypyjs/modules/numpy/lib/recfunctions.py
python
zip_descr
(seqarrays, flatten=False)
return np.dtype(newdtype).descr
Combine the dtype description of a series of arrays. Parameters ---------- seqarrays : sequence of arrays Sequence of arrays flatten : {boolean}, optional Whether to collapse nested descriptions.
Combine the dtype description of a series of arrays.
[ "Combine", "the", "dtype", "description", "of", "a", "series", "of", "arrays", "." ]
def zip_descr(seqarrays, flatten=False): """ Combine the dtype description of a series of arrays. Parameters ---------- seqarrays : sequence of arrays Sequence of arrays flatten : {boolean}, optional Whether to collapse nested descriptions. """ newdtype = [] if flatten: for a in seqarrays: newdtype.extend(flatten_descr(a.dtype)) else: for a in seqarrays: current = a.dtype names = current.names or () if len(names) > 1: newdtype.append(('', current.descr)) else: newdtype.extend(current.descr) return np.dtype(newdtype).descr
[ "def", "zip_descr", "(", "seqarrays", ",", "flatten", "=", "False", ")", ":", "newdtype", "=", "[", "]", "if", "flatten", ":", "for", "a", "in", "seqarrays", ":", "newdtype", ".", "extend", "(", "flatten_descr", "(", "a", ".", "dtype", ")", ")", "els...
https://github.com/inkandswitch/livebook/blob/93c8d467734787366ad084fc3566bf5cbe249c51/public/pypyjs/modules/numpy/lib/recfunctions.py#L161-L184
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/min/bdb.py
python
get_break
(self, filename, lineno)
return filename in self.breaks and \ lineno in self.breaks[filename]
Return True if there is a breakpoint for filename:lineno.
Return True if there is a breakpoint for filename:lineno.
[ "Return", "True", "if", "there", "is", "a", "breakpoint", "for", "filename", ":", "lineno", "." ]
def get_break(self, filename, lineno): """Return True if there is a breakpoint for filename:lineno.""" filename = self.canonic(filename) return filename in self.breaks and \ lineno in self.breaks[filename]
[ "def", "get_break", "(", "self", ",", "filename", ",", "lineno", ")", ":", "filename", "=", "self", ".", "canonic", "(", "filename", ")", "return", "filename", "in", "self", ".", "breaks", "and", "lineno", "in", "self", ".", "breaks", "[", "filename", ...
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/min/bdb.py#L494-L498
nextstrain/augur
a004d3f8f0b661fb0fb88cf07a43acc01d74de6a
augur/filter.py
python
filter_kwargs_to_str
(kwargs)
return json.dumps(kwarg_list)
Convert a dictionary of kwargs to a JSON string for downstream reporting. This structured string can be converted back into a Python data structure later for more sophisticated reporting by specific kwargs. This function excludes data types from arguments like pandas DataFrames and also converts floating point numbers to a fixed precision for better readability and reproducibility. Parameters ---------- kwargs : dict Dictionary of kwargs passed to a given filter function. Returns ------- str : String representation of the kwargs for reporting. >>> sequence_index = pd.DataFrame([{"strain": "strain1", "ACGT": 28000}, {"strain": "strain2", "ACGT": 26000}, {"strain": "strain3", "ACGT": 5000}]).set_index("strain") >>> exclude_by = [(filter_by_sequence_length, {"sequence_index": sequence_index, "min_length": 27000})] >>> filter_kwargs_to_str(exclude_by[0][1]) '[["min_length", 27000]]' >>> exclude_by = [(filter_by_date, {"max_date": numeric_date("2020-04-01"), "min_date": numeric_date("2020-03-01")})] >>> filter_kwargs_to_str(exclude_by[0][1]) '[["max_date", 2020.25], ["min_date", 2020.17]]'
Convert a dictionary of kwargs to a JSON string for downstream reporting.
[ "Convert", "a", "dictionary", "of", "kwargs", "to", "a", "JSON", "string", "for", "downstream", "reporting", "." ]
def filter_kwargs_to_str(kwargs): """Convert a dictionary of kwargs to a JSON string for downstream reporting. This structured string can be converted back into a Python data structure later for more sophisticated reporting by specific kwargs. This function excludes data types from arguments like pandas DataFrames and also converts floating point numbers to a fixed precision for better readability and reproducibility. Parameters ---------- kwargs : dict Dictionary of kwargs passed to a given filter function. Returns ------- str : String representation of the kwargs for reporting. >>> sequence_index = pd.DataFrame([{"strain": "strain1", "ACGT": 28000}, {"strain": "strain2", "ACGT": 26000}, {"strain": "strain3", "ACGT": 5000}]).set_index("strain") >>> exclude_by = [(filter_by_sequence_length, {"sequence_index": sequence_index, "min_length": 27000})] >>> filter_kwargs_to_str(exclude_by[0][1]) '[["min_length", 27000]]' >>> exclude_by = [(filter_by_date, {"max_date": numeric_date("2020-04-01"), "min_date": numeric_date("2020-03-01")})] >>> filter_kwargs_to_str(exclude_by[0][1]) '[["max_date", 2020.25], ["min_date", 2020.17]]' """ # Sort keys prior to processing to guarantee the same output order # regardless of the input order. sorted_keys = sorted(kwargs.keys()) kwarg_list = [] for key in sorted_keys: value = kwargs[key] # Handle special cases for data types that we want to represent # differently from their defaults or not at all. if isinstance(value, pd.DataFrame): continue elif isinstance(value, float): value = round(value, 2) kwarg_list.append((key, value)) return json.dumps(kwarg_list)
[ "def", "filter_kwargs_to_str", "(", "kwargs", ")", ":", "# Sort keys prior to processing to guarantee the same output order", "# regardless of the input order.", "sorted_keys", "=", "sorted", "(", "kwargs", ".", "keys", "(", ")", ")", "kwarg_list", "=", "[", "]", "for", ...
https://github.com/nextstrain/augur/blob/a004d3f8f0b661fb0fb88cf07a43acc01d74de6a/augur/filter.py#L645-L691
jeongyoonlee/Kaggler
71370d3dabcf27d23b29b369e73c6f62eb894c7a
kaggler/model/automl.py
python
BaseAutoML.__init__
(self, params, space, n_est=500, n_stop=10, sample_size=SAMPLE_SIZE, valid_size=VALID_SIZE, shuffle=True, feature_selection=True, n_fs=10, fs_th=0., fs_pct=.0, hyperparam_opt=True, n_hpopt=100, minimize=True, n_random_col=10, random_state=RANDOM_SEED)
Initialize an optimized regressor class object. Args: params (dict): default parameters for a regressor space (dict): parameter space for hyperopt to explore n_est (int): the number of iterations for a regressor n_stop (int): early stopping rounds for a regressor sample_size (int): the number of samples for feature selection and parameter search valid_size (float): the fraction of samples for feature selection and/or hyperparameter tuning shuffle (bool): if true, it uses random sampling for sampling and training/validation split. Otherwise last sample_size and valid_size will be used. feature_selection (bool): whether to select features n_fs (int): the number of iterations for feature selection fs_th (float): the feature importance threshold. Features with importances higher than it will be selected. fs_pct (float): the feature importance percentile. Features with importances higher than bottom x% of ranom features hyperparam_opt (bool): whether to search optimal parameters n_hpopt (int): the number of iterations for hyper-parameter optimization minimize (bool): whether the lower the metric is the better n_random_col (int): the number of random columns to added for feature selection random_state (None, int, or numpy.random.RandomState): random seed or a RandomState instance
Initialize an optimized regressor class object.
[ "Initialize", "an", "optimized", "regressor", "class", "object", "." ]
def __init__(self, params, space, n_est=500, n_stop=10, sample_size=SAMPLE_SIZE, valid_size=VALID_SIZE, shuffle=True, feature_selection=True, n_fs=10, fs_th=0., fs_pct=.0, hyperparam_opt=True, n_hpopt=100, minimize=True, n_random_col=10, random_state=RANDOM_SEED): """Initialize an optimized regressor class object. Args: params (dict): default parameters for a regressor space (dict): parameter space for hyperopt to explore n_est (int): the number of iterations for a regressor n_stop (int): early stopping rounds for a regressor sample_size (int): the number of samples for feature selection and parameter search valid_size (float): the fraction of samples for feature selection and/or hyperparameter tuning shuffle (bool): if true, it uses random sampling for sampling and training/validation split. Otherwise last sample_size and valid_size will be used. feature_selection (bool): whether to select features n_fs (int): the number of iterations for feature selection fs_th (float): the feature importance threshold. Features with importances higher than it will be selected. fs_pct (float): the feature importance percentile. Features with importances higher than bottom x% of ranom features hyperparam_opt (bool): whether to search optimal parameters n_hpopt (int): the number of iterations for hyper-parameter optimization minimize (bool): whether the lower the metric is the better n_random_col (int): the number of random columns to added for feature selection random_state (None, int, or numpy.random.RandomState): random seed or a RandomState instance """ self.params = params self.space = space for param in [p for p in params if p in self.space]: del self.space[param] self.n_est = n_est self.n_stop = n_stop self.n_fs = n_fs self.n_hpopt = n_hpopt self.sample_size = sample_size self.valid_size = valid_size self.shuffle = True self.feature_selection = feature_selection self.fs_th = fs_th self.fs_pct = fs_pct self.hyperparam_opt = hyperparam_opt if minimize: self.loss_sign = 1 else: self.loss_sign = -1 self.n_random_col = n_random_col if random_state is None or isinstance(random_state, int): self.random_state = np.random.RandomState(random_state) elif isinstance(random_state, np.random.RandomState): self.random_state = random_state else: raise ValueError('Invalid input for random_state: {}'.format(random_state)) self.n_best = -1 self.model = None self.features = []
[ "def", "__init__", "(", "self", ",", "params", ",", "space", ",", "n_est", "=", "500", ",", "n_stop", "=", "10", ",", "sample_size", "=", "SAMPLE_SIZE", ",", "valid_size", "=", "VALID_SIZE", ",", "shuffle", "=", "True", ",", "feature_selection", "=", "Tr...
https://github.com/jeongyoonlee/Kaggler/blob/71370d3dabcf27d23b29b369e73c6f62eb894c7a/kaggler/model/automl.py#L59-L116
beeware/ouroboros
a29123c6fab6a807caffbb7587cf548e0c370296
ouroboros/ipaddress.py
python
collapse_addresses
(addresses)
return iter(_collapse_addresses_recursive(sorted( addrs + nets, key=_BaseNetwork._get_networks_key)))
Collapse a list of IP objects. Example: collapse_addresses([IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/25')]) -> [IPv4Network('192.0.2.0/24')] Args: addresses: An iterator of IPv4Network or IPv6Network objects. Returns: An iterator of the collapsed IPv(4|6)Network objects. Raises: TypeError: If passed a list of mixed version objects.
Collapse a list of IP objects.
[ "Collapse", "a", "list", "of", "IP", "objects", "." ]
def collapse_addresses(addresses): """Collapse a list of IP objects. Example: collapse_addresses([IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/25')]) -> [IPv4Network('192.0.2.0/24')] Args: addresses: An iterator of IPv4Network or IPv6Network objects. Returns: An iterator of the collapsed IPv(4|6)Network objects. Raises: TypeError: If passed a list of mixed version objects. """ i = 0 addrs = [] ips = [] nets = [] # split IP addresses and networks for ip in addresses: if isinstance(ip, _BaseAddress): if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, ips[-1])) ips.append(ip) elif ip._prefixlen == ip._max_prefixlen: if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, ips[-1])) try: ips.append(ip.ip) except AttributeError: ips.append(ip.network_address) else: if nets and nets[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, nets[-1])) nets.append(ip) # sort and dedup ips = sorted(set(ips)) nets = sorted(set(nets)) while i < len(ips): (first, last) = _find_address_range(ips[i:]) i = ips.index(last) + 1 addrs.extend(summarize_address_range(first, last)) return iter(_collapse_addresses_recursive(sorted( addrs + nets, key=_BaseNetwork._get_networks_key)))
[ "def", "collapse_addresses", "(", "addresses", ")", ":", "i", "=", "0", "addrs", "=", "[", "]", "ips", "=", "[", "]", "nets", "=", "[", "]", "# split IP addresses and networks", "for", "ip", "in", "addresses", ":", "if", "isinstance", "(", "ip", ",", "...
https://github.com/beeware/ouroboros/blob/a29123c6fab6a807caffbb7587cf548e0c370296/ouroboros/ipaddress.py#L309-L363
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/config.py
python
async_create_default_config
(hass: HomeAssistant)
return await hass.async_add_executor_job( _write_default_config, hass.config.config_dir )
Create a default configuration file in given configuration directory. Return if creation was successful.
Create a default configuration file in given configuration directory.
[ "Create", "a", "default", "configuration", "file", "in", "given", "configuration", "directory", "." ]
async def async_create_default_config(hass: HomeAssistant) -> bool: """Create a default configuration file in given configuration directory. Return if creation was successful. """ return await hass.async_add_executor_job( _write_default_config, hass.config.config_dir )
[ "async", "def", "async_create_default_config", "(", "hass", ":", "HomeAssistant", ")", "->", "bool", ":", "return", "await", "hass", ".", "async_add_executor_job", "(", "_write_default_config", ",", "hass", ".", "config", ".", "config_dir", ")" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/config.py#L284-L291
googleads/google-ads-python
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
google/ads/googleads/v8/services/services/customer_label_service/client.py
python
CustomerLabelServiceClient.from_service_account_file
(cls, filename: str, *args, **kwargs)
return cls(*args, **kwargs)
Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: CustomerLabelServiceClient: The constructed client.
Creates an instance of this client using the provided credentials file.
[ "Creates", "an", "instance", "of", "this", "client", "using", "the", "provided", "credentials", "file", "." ]
def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: CustomerLabelServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file( filename ) kwargs["credentials"] = credentials return cls(*args, **kwargs)
[ "def", "from_service_account_file", "(", "cls", ",", "filename", ":", "str", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "credentials", "=", "service_account", ".", "Credentials", ".", "from_service_account_file", "(", "filename", ")", "kwargs", "[", ...
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v8/services/services/customer_label_service/client.py#L129-L146
jaywink/socialhome
c3178b044936a5c57a502ab6ed2b4f43c8e076ca
socialhome/search/views.py
python
GlobalSearchView.get_context_data
(self, *args, **kwargs)
return context
Add tags results to the context.
Add tags results to the context.
[ "Add", "tags", "results", "to", "the", "context", "." ]
def get_context_data(self, *args, **kwargs): """Add tags results to the context.""" context = super().get_context_data(*args, **kwargs) tags = self.get_tags_qs() tags_context = { 'paginator': None, 'page_obj': None, 'is_paginated': False, 'object_list': tags, } page_size = self.get_paginate_by(tags) if page_size: try: paginator, page, queryset, is_paginated = self.paginate_queryset(tags, page_size) except Http404: pass else: tags_context = { 'paginator': paginator, 'page_obj': page, 'is_paginated': is_paginated, 'object_list': queryset, } context['tags'] = tags_context return context
[ "def", "get_context_data", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "context", "=", "super", "(", ")", ".", "get_context_data", "(", "*", "args", ",", "*", "*", "kwargs", ")", "tags", "=", "self", ".", "get_tags_qs", "(", "...
https://github.com/jaywink/socialhome/blob/c3178b044936a5c57a502ab6ed2b4f43c8e076ca/socialhome/search/views.py#L24-L48
FederatedAI/FATE
32540492623568ecd1afcb367360133616e02fa3
python/fate_client/pipeline/param/feldman_verifiable_sum_param.py
python
FeldmanVerifiableSumParam.check
(self)
[]
def check(self): if isinstance(self.sum_cols, list): for idx in self.sum_cols: if not isinstance(idx, int): raise ValueError(f"type mismatch, column_indexes with element {idx}(type is {type(idx)})") if not isinstance(self.q_n, int): raise ValueError(f"Init param's q_n {self.q_n} not supported, should be int type", type is {type(self.q_n)}) if self.q_n < 0: raise ValueError(f"param's q_n {self.q_n} not supported, should be non-negative int value") elif self.q_n > 16: raise ValueError(f"param's q_n {self.q_n} not supported, should be less than or equal to 16")
[ "def", "check", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "sum_cols", ",", "list", ")", ":", "for", "idx", "in", "self", ".", "sum_cols", ":", "if", "not", "isinstance", "(", "idx", ",", "int", ")", ":", "raise", "ValueError", "(...
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/fate_client/pipeline/param/feldman_verifiable_sum_param.py#L43-L55
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/min/threading.py
python
Event.wait
(self, timeout=None)
Block until the internal flag is true. If the internal flag is true on entry, return immediately. Otherwise, block until another thread calls set() to set the flag to true, or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). This method returns the internal flag on exit, so it will always return True except if a timeout is given and the operation times out.
Block until the internal flag is true.
[ "Block", "until", "the", "internal", "flag", "is", "true", "." ]
def wait(self, timeout=None): """Block until the internal flag is true. If the internal flag is true on entry, return immediately. Otherwise, block until another thread calls set() to set the flag to true, or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). This method returns the internal flag on exit, so it will always return True except if a timeout is given and the operation times out. """ with self._cond: signaled = self._flag if not signaled: signaled = self._cond.wait(timeout) return signaled
[ "def", "wait", "(", "self", ",", "timeout", "=", "None", ")", ":", "with", "self", ".", "_cond", ":", "signaled", "=", "self", ".", "_flag", "if", "not", "signaled", ":", "signaled", "=", "self", ".", "_cond", ".", "wait", "(", "timeout", ")", "ret...
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/min/threading.py#L582-L601
accel-brain/accel-brain-code
86f489dc9be001a3bae6d053f48d6b57c0bedb95
Algorithm-Wars/algowars/truesampler/volatility_conditional_true_sampler.py
python
VolatilityConditionalTrueSampler.set_end_date
(self, value)
setter
setter
[ "setter" ]
def set_end_date(self, value): ''' setter ''' self.__end_date = value
[ "def", "set_end_date", "(", "self", ",", "value", ")", ":", "self", ".", "__end_date", "=", "value" ]
https://github.com/accel-brain/accel-brain-code/blob/86f489dc9be001a3bae6d053f48d6b57c0bedb95/Algorithm-Wars/algowars/truesampler/volatility_conditional_true_sampler.py#L367-L369
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/hunterdouglas_powerview/cover.py
python
PowerViewShade._async_update_shade_from_group
(self)
Update with new data from the coordinator.
Update with new data from the coordinator.
[ "Update", "with", "new", "data", "from", "the", "coordinator", "." ]
def _async_update_shade_from_group(self): """Update with new data from the coordinator.""" if self._scheduled_transition_update: # If a transition in in progress # the data will be wrong return self._async_process_new_shade_data(self.coordinator.data[self._shade.id]) self.async_write_ha_state()
[ "def", "_async_update_shade_from_group", "(", "self", ")", ":", "if", "self", ".", "_scheduled_transition_update", ":", "# If a transition in in progress", "# the data will be wrong", "return", "self", ".", "_async_process_new_shade_data", "(", "self", ".", "coordinator", "...
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/hunterdouglas_powerview/cover.py#L278-L285
tensorlayer/hyperpose
e34c6acb91144e1d090466324f99c521fbf47cdb
hyperpose/Model/__init__.py
python
get_preprocessor
(config)
get a preprocessor class based on the specified model_type get the preprocessor class of the specified kind of model to help user directly construct their own train pipeline(rather than using the integrated train pipeline) when in need. the preprocessor class is able to construct a preprocessor object that could convert the image and annotation to the model output format for training. Parameters ---------- arg1 : config config object return by Config.get_config function Returns ------- class a preprocessor class of the specified kind of model
get a preprocessor class based on the specified model_type
[ "get", "a", "preprocessor", "class", "based", "on", "the", "specified", "model_type" ]
def get_preprocessor(config): '''get a preprocessor class based on the specified model_type get the preprocessor class of the specified kind of model to help user directly construct their own train pipeline(rather than using the integrated train pipeline) when in need. the preprocessor class is able to construct a preprocessor object that could convert the image and annotation to the model output format for training. Parameters ---------- arg1 : config config object return by Config.get_config function Returns ------- class a preprocessor class of the specified kind of model ''' model_type = config.model.model_type if(config.model.custom_preprocessor is not None): return config.model.custom_preprocessor else: if model_type == MODEL.Openpose or model_type == MODEL.LightweightOpenpose or model_type==MODEL.MobilenetThinOpenpose: from .openpose import PreProcessor elif model_type == MODEL.PoseProposal: from .pose_proposal import PreProcessor elif model_type == MODEL.Pifpaf: from .pifpaf import PreProcessor return PreProcessor
[ "def", "get_preprocessor", "(", "config", ")", ":", "model_type", "=", "config", ".", "model", ".", "model_type", "if", "(", "config", ".", "model", ".", "custom_preprocessor", "is", "not", "None", ")", ":", "return", "config", ".", "model", ".", "custom_p...
https://github.com/tensorlayer/hyperpose/blob/e34c6acb91144e1d090466324f99c521fbf47cdb/hyperpose/Model/__init__.py#L298-L327
CvvT/dumpDex
92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1
python/idaapi.py
python
auto_display_t.__init__
(self, *args)
__init__(self) -> auto_display_t
__init__(self) -> auto_display_t
[ "__init__", "(", "self", ")", "-", ">", "auto_display_t" ]
def __init__(self, *args): """ __init__(self) -> auto_display_t """ this = _idaapi.new_auto_display_t(*args) try: self.this.append(this) except: self.this = this
[ "def", "__init__", "(", "self", ",", "*", "args", ")", ":", "this", "=", "_idaapi", ".", "new_auto_display_t", "(", "*", "args", ")", "try", ":", "self", ".", "this", ".", "append", "(", "this", ")", "except", ":", "self", ".", "this", "=", "this" ...
https://github.com/CvvT/dumpDex/blob/92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1/python/idaapi.py#L21285-L21291
jsocol/pystatsd
f3f304b4b2c3d5eddeb9f4977d9c82c64c37a052
statsd/client/base.py
python
PipelineBase.__enter__
(self)
return self
[]
def __enter__(self): return self
[ "def", "__enter__", "(", "self", ")", ":", "return", "self" ]
https://github.com/jsocol/pystatsd/blob/f3f304b4b2c3d5eddeb9f4977d9c82c64c37a052/statsd/client/base.py#L95-L96
TRI-ML/packnet-sfm
f59b1d615777a9987285a10e45b5d87b0369fa7d
packnet_sfm/trainers/horovod_trainer.py
python
HorovodTrainer.__init__
(self, **kwargs)
[]
def __init__(self, **kwargs): super().__init__(**kwargs) hvd.init() torch.set_num_threads(int(os.environ.get("OMP_NUM_THREADS", 1))) torch.cuda.set_device(hvd.local_rank()) torch.backends.cudnn.benchmark = True self.avg_loss = AvgMeter(50) self.dtype = kwargs.get("dtype", None)
[ "def", "__init__", "(", "self", ",", "*", "*", "kwargs", ")", ":", "super", "(", ")", ".", "__init__", "(", "*", "*", "kwargs", ")", "hvd", ".", "init", "(", ")", "torch", ".", "set_num_threads", "(", "int", "(", "os", ".", "environ", ".", "get",...
https://github.com/TRI-ML/packnet-sfm/blob/f59b1d615777a9987285a10e45b5d87b0369fa7d/packnet_sfm/trainers/horovod_trainer.py#L13-L22
mozman/ezdxf
59d0fc2ea63f5cf82293428f5931da7e9f9718e9
src/ezdxf/math/linalg.py
python
Matrix.set_col
( self, index: int, items: Union[float, Iterable[float]] = 1.0 )
Set column values to a fixed value or from an iterable of floats.
Set column values to a fixed value or from an iterable of floats.
[ "Set", "column", "values", "to", "a", "fixed", "value", "or", "from", "an", "iterable", "of", "floats", "." ]
def set_col( self, index: int, items: Union[float, Iterable[float]] = 1.0 ) -> None: """Set column values to a fixed value or from an iterable of floats.""" if isinstance(items, (float, int)): items = [float(items)] * self.nrows for row, item in zip(self.rows(), items): row[index] = item
[ "def", "set_col", "(", "self", ",", "index", ":", "int", ",", "items", ":", "Union", "[", "float", ",", "Iterable", "[", "float", "]", "]", "=", "1.0", ")", "->", "None", ":", "if", "isinstance", "(", "items", ",", "(", "float", ",", "int", ")", ...
https://github.com/mozman/ezdxf/blob/59d0fc2ea63f5cf82293428f5931da7e9f9718e9/src/ezdxf/math/linalg.py#L223-L231
openstack/mistral
b2d6de569c7bba96cd3179189ffbcee6b7a28c1f
mistral/db/sqlalchemy/migration/alembic_migrations/env.py
python
run_migrations_offline
()
Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output.
Run migrations in 'offline' mode.
[ "Run", "migrations", "in", "offline", "mode", "." ]
def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ context.configure(url=mistral_config.database.connection) with context.begin_transaction(): context.run_migrations()
[ "def", "run_migrations_offline", "(", ")", ":", "context", ".", "configure", "(", "url", "=", "mistral_config", ".", "database", ".", "connection", ")", "with", "context", ".", "begin_transaction", "(", ")", ":", "context", ".", "run_migrations", "(", ")" ]
https://github.com/openstack/mistral/blob/b2d6de569c7bba96cd3179189ffbcee6b7a28c1f/mistral/db/sqlalchemy/migration/alembic_migrations/env.py#L36-L51
Arelle/Arelle
20f3d8a8afd41668e1520799acd333349ce0ba17
arelle/TkTableWrapper.py
python
Table.icursor
(self, arg=None)
return self.tk.call(self._w, 'icursor', arg)
If arg is not specified, return the location of the insertion cursor in the active cell. Otherwise, set the cursor to that point in the string. 0 is before the first character, you can also use insert or end for the current insertion point or the end of the text. If there is no active cell, or the cell or table is disabled, this will return -1.
If arg is not specified, return the location of the insertion cursor in the active cell. Otherwise, set the cursor to that point in the string.
[ "If", "arg", "is", "not", "specified", "return", "the", "location", "of", "the", "insertion", "cursor", "in", "the", "active", "cell", ".", "Otherwise", "set", "the", "cursor", "to", "that", "point", "in", "the", "string", "." ]
def icursor(self, arg=None): """If arg is not specified, return the location of the insertion cursor in the active cell. Otherwise, set the cursor to that point in the string. 0 is before the first character, you can also use insert or end for the current insertion point or the end of the text. If there is no active cell, or the cell or table is disabled, this will return -1.""" return self.tk.call(self._w, 'icursor', arg)
[ "def", "icursor", "(", "self", ",", "arg", "=", "None", ")", ":", "return", "self", ".", "tk", ".", "call", "(", "self", ".", "_w", ",", "'icursor'", ",", "arg", ")" ]
https://github.com/Arelle/Arelle/blob/20f3d8a8afd41668e1520799acd333349ce0ba17/arelle/TkTableWrapper.py#L272-L280