language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
scipy__scipy
scipy/spatial/tests/test_kdtree.py
{ "start": 13114, "end": 14063 }
class ____(_Test_random_ball): def setup_method(self): super().setup_method() self.p = np.inf def test_random_ball_vectorized(kdtree_type): n = 20 m = 5 np.random.seed(1234) T = kdtree_type(np.random.randn(n, m)) r = T.query_ball_point(np.random.randn(2, 3, m), 1) assert_equal(r.shape, (2, 3)) assert_(isinstance(r[0, 0], list)) @pytest.mark.fail_slow(5) def test_query_ball_point_multithreading(kdtree_type): np.random.seed(0) n = 5000 k = 2 points = np.random.randn(n, k) T = kdtree_type(points) l1 = T.query_ball_point(points, 0.003, workers=1) l2 = T.query_ball_point(points, 0.003, workers=64) l3 = T.query_ball_point(points, 0.003, workers=-1) for i in range(n): if l1[i] or l2[i]: assert_array_equal(l1[i], l2[i]) for i in range(n): if l1[i] or l3[i]: assert_array_equal(l1[i], l3[i])
_Test_random_ball_linf
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/orm/exc.py
{ "start": 1910, "end": 2013 }
class ____(sa_exc.SQLAlchemyError): """A invalid condition was detected during flush()."""
FlushError
python
scikit-learn__scikit-learn
sklearn/model_selection/_search_successive_halving.py
{ "start": 14323, "end": 29126 }
class ____(BaseSuccessiveHalving): """Search over specified parameter values with successive halving. The search strategy starts evaluating all the candidates with a small amount of resources and iteratively selects the best candidates, using more and more resources. Read more in the :ref:`User guide <successive_halving_user_guide>`. .. note:: This estimator is still **experimental** for now: the predictions and the API might change without any deprecation cycle. To use it, you need to explicitly import ``enable_halving_search_cv``:: >>> # explicitly require this experimental feature >>> from sklearn.experimental import enable_halving_search_cv # noqa >>> # now you can import normally from model_selection >>> from sklearn.model_selection import HalvingGridSearchCV Parameters ---------- estimator : estimator object This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. factor : int or float, default=3 The 'halving' parameter, which determines the proportion of candidates that are selected for each subsequent iteration. For example, ``factor=3`` means that only one third of the candidates are selected. resource : ``'n_samples'`` or str, default='n_samples' Defines the resource that increases with each iteration. By default, the resource is the number of samples. It can also be set to any parameter of the base estimator that accepts positive integer values, e.g. 'n_iterations' or 'n_estimators' for a gradient boosting estimator. In this case ``max_resources`` cannot be 'auto' and must be set explicitly. max_resources : int, default='auto' The maximum amount of resource that any candidate is allowed to use for a given iteration. By default, this is set to ``n_samples`` when ``resource='n_samples'`` (default), else an error is raised. min_resources : {'exhaust', 'smallest'} or int, default='exhaust' The minimum amount of resource that any candidate is allowed to use for a given iteration. Equivalently, this defines the amount of resources `r0` that are allocated for each candidate at the first iteration. - 'smallest' is a heuristic that sets `r0` to a small value: - ``n_splits * 2`` when ``resource='n_samples'`` for a regression problem - ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a classification problem - ``1`` when ``resource != 'n_samples'`` - 'exhaust' will set `r0` such that the **last** iteration uses as much resources as possible. Namely, the last iteration will use the highest value smaller than ``max_resources`` that is a multiple of both ``min_resources`` and ``factor``. In general, using 'exhaust' leads to a more accurate estimator, but is slightly more time consuming. Note that the amount of resources used at each iteration is always a multiple of ``min_resources``. aggressive_elimination : bool, default=False This is only relevant in cases where there isn't enough resources to reduce the remaining candidates to at most `factor` after the last iteration. If ``True``, then the search process will 'replay' the first iteration for as long as needed until the number of candidates is small enough. This is ``False`` by default, which means that the last iteration may evaluate more than ``factor`` candidates. See :ref:`aggressive_elimination` for more details. cv : int, cross-validation generator or iterable, default=5 Determines the cross-validation splitting strategy. Possible inputs for cv are: - integer, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. These splitters are instantiated with `shuffle=False` so the splits will be the same across calls. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. note:: Due to implementation details, the folds produced by `cv` must be the same across multiple calls to `cv.split()`. For built-in `scikit-learn` iterators, this can be achieved by deactivating shuffling (`shuffle=False`), or by setting the `cv`'s `random_state` parameter to an integer. scoring : str or callable, default=None Scoring method to use to evaluate the predictions on the test set. - str: see :ref:`scoring_string_names` for options. - callable: a scorer callable object (e.g., function) with signature ``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details. - `None`: the `estimator`'s :ref:`default evaluation criterion <scoring_api_overview>` is used. refit : bool or callable, default=True Refit an estimator using the best found parameters on the whole dataset. Where there are considerations other than maximum score in choosing a best estimator, ``refit`` can be set to a function which returns the selected ``best_index_`` given ``cv_results_``. In that case, the ``best_estimator_`` and ``best_params_`` will be set according to the returned ``best_index_`` while the ``best_score_`` attribute will not be available. The refitted estimator is made available at the ``best_estimator_`` attribute and permits using ``predict`` directly on this ``HalvingGridSearchCV`` instance. See :ref:`this example <sphx_glr_auto_examples_model_selection_plot_grid_search_refit_callable.py>` for an example of how to use ``refit=callable`` to balance model complexity and cross-validated score. error_score : 'raise' or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Default is ``np.nan``. return_train_score : bool, default=False If ``False``, the ``cv_results_`` attribute will not include training scores. Computing training scores is used to get insights on how different parameter settings impact the overfitting/underfitting trade-off. However computing the scores on the training set can be computationally expensive and is not strictly required to select the parameters that yield the best generalization performance. random_state : int, RandomState instance or None, default=None Pseudo random number generator state used for subsampling the dataset when `resources != 'n_samples'`. Ignored otherwise. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. n_jobs : int or None, default=None Number of jobs to run in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : int Controls the verbosity: the higher, the more messages. Attributes ---------- n_resources_ : list of int The amount of resources used at each iteration. n_candidates_ : list of int The number of candidate parameters that were evaluated at each iteration. n_remaining_candidates_ : int The number of candidate parameters that are left after the last iteration. It corresponds to `ceil(n_candidates[-1] / factor)` max_resources_ : int The maximum number of resources that any candidate is allowed to use for a given iteration. Note that since the number of resources used at each iteration must be a multiple of ``min_resources_``, the actual number of resources used at the last iteration may be smaller than ``max_resources_``. min_resources_ : int The amount of resources that are allocated for each candidate at the first iteration. n_iterations_ : int The actual number of iterations that were run. This is equal to ``n_required_iterations_`` if ``aggressive_elimination`` is ``True``. Else, this is equal to ``min(n_possible_iterations_, n_required_iterations_)``. n_possible_iterations_ : int The number of iterations that are possible starting with ``min_resources_`` resources and without exceeding ``max_resources_``. n_required_iterations_ : int The number of iterations that are required to end up with less than ``factor`` candidates at the last iteration, starting with ``min_resources_`` resources. This will be smaller than ``n_possible_iterations_`` when there isn't enough resources. cv_results_ : dict of numpy (masked) ndarrays A dict with keys as column headers and values as columns, that can be imported into a pandas ``DataFrame``. It contains lots of information for analysing the results of a search. Please refer to the :ref:`User guide<successive_halving_cv_results>` for details. For an example of analysing ``cv_results_``, see :ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_stats.py`. best_estimator_ : estimator or dict Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if ``refit=False``. best_score_ : float Mean cross-validated score of the best_estimator. best_params_ : dict Parameter setting that gave the best results on the hold out data. best_index_ : int The index (of the ``cv_results_`` arrays) which corresponds to the best candidate parameter setting. The dict at ``search.cv_results_['params'][search.best_index_]`` gives the parameter setting for the best model, that gives the highest mean score (``search.best_score_``). scorer_ : function or a dict Scorer function used on the held out data to choose the best parameters for the model. n_splits_ : int The number of cross-validation splits (folds/iterations). refit_time_ : float Seconds used for refitting the best model on the whole dataset. This is present only if ``refit`` is not False. multimetric_ : bool Whether or not the scorers compute several metrics. classes_ : ndarray of shape (n_classes,) The classes labels. This is present only if ``refit`` is specified and the underlying estimator is a classifier. n_features_in_ : int Number of features seen during :term:`fit`. Only defined if `best_estimator_` is defined (see the documentation for the `refit` parameter for more details) and that `best_estimator_` exposes `n_features_in_` when fit. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Only defined if `best_estimator_` is defined (see the documentation for the `refit` parameter for more details) and that `best_estimator_` exposes `feature_names_in_` when fit. .. versionadded:: 1.0 See Also -------- :class:`HalvingRandomSearchCV`: Random search over a set of parameters using successive halving. Notes ----- The parameters selected are those that maximize the score of the held-out data, according to the scoring parameter. All parameter combinations scored with a NaN will share the lowest rank. Examples -------- >>> from sklearn.datasets import load_iris >>> from sklearn.ensemble import RandomForestClassifier >>> from sklearn.experimental import enable_halving_search_cv # noqa >>> from sklearn.model_selection import HalvingGridSearchCV ... >>> X, y = load_iris(return_X_y=True) >>> clf = RandomForestClassifier(random_state=0) ... >>> param_grid = {"max_depth": [3, None], ... "min_samples_split": [5, 10]} >>> search = HalvingGridSearchCV(clf, param_grid, resource='n_estimators', ... max_resources=10, ... random_state=0).fit(X, y) >>> search.best_params_ # doctest: +SKIP {'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9} """ _parameter_constraints: dict = { **BaseSuccessiveHalving._parameter_constraints, "param_grid": [dict, list], } def __init__( self, estimator, param_grid, *, factor=3, resource="n_samples", max_resources="auto", min_resources="exhaust", aggressive_elimination=False, cv=5, scoring=None, refit=True, error_score=np.nan, return_train_score=True, random_state=None, n_jobs=None, verbose=0, ): super().__init__( estimator, scoring=scoring, n_jobs=n_jobs, refit=refit, verbose=verbose, cv=cv, random_state=random_state, error_score=error_score, return_train_score=return_train_score, max_resources=max_resources, resource=resource, factor=factor, min_resources=min_resources, aggressive_elimination=aggressive_elimination, ) self.param_grid = param_grid def _generate_candidate_params(self): return ParameterGrid(self.param_grid)
HalvingGridSearchCV
python
lepture__authlib
authlib/jose/drafts/_jwe_enc_cryptography.py
{ "start": 308, "end": 1731 }
class ____(JWEEncAlgorithm): # Use of an IV of size 96 bits is REQUIRED with this algorithm. # https://datatracker.ietf.org/doc/html/draft-amringer-jose-chacha-02#section-4.1 IV_SIZE = 96 def __init__(self, key_size): self.name = "C20P" self.description = "ChaCha20-Poly1305" self.key_size = key_size self.CEK_SIZE = key_size def encrypt(self, msg, aad, iv, key): """Content Encryption with AEAD_CHACHA20_POLY1305. :param msg: text to be encrypt in bytes :param aad: additional authenticated data in bytes :param iv: initialization vector in bytes :param key: encrypted key in bytes :return: (ciphertext, tag) """ self.check_iv(iv) chacha = ChaCha20Poly1305(key) ciphertext = chacha.encrypt(iv, msg, aad) return ciphertext[:-16], ciphertext[-16:] def decrypt(self, ciphertext, aad, iv, tag, key): """Content Decryption with AEAD_CHACHA20_POLY1305. :param ciphertext: ciphertext in bytes :param aad: additional authenticated data in bytes :param iv: initialization vector in bytes :param tag: authentication tag in bytes :param key: encrypted key in bytes :return: message """ self.check_iv(iv) chacha = ChaCha20Poly1305(key) return chacha.decrypt(iv, ciphertext + tag, aad)
C20PEncAlgorithm
python
vyperlang__vyper
vyper/warnings.py
{ "start": 1475, "end": 1579 }
class ____(VyperWarning): """ Warn about using `enum` instead of `flag """ pass
EnumUsage
python
walkccc__LeetCode
solutions/951. Flip Equivalent Binary Trees/951.py
{ "start": 0, "end": 447 }
class ____: def flipEquiv(self, root1: TreeNode | None, root2: TreeNode | None) -> bool: if not root1: return not root2 if not root2: return not root1 if root1.val != root2.val: return False return (self.flipEquiv(root1.left, root2.left) and self.flipEquiv(root1.right, root2.right) or self.flipEquiv(root1.left, root2.right) and self.flipEquiv(root1.right, root2.left))
Solution
python
doocs__leetcode
solution/2100-2199/2104.Sum of Subarray Ranges/Solution.py
{ "start": 0, "end": 325 }
class ____: def subArrayRanges(self, nums: List[int]) -> int: ans, n = 0, len(nums) for i in range(n - 1): mi = mx = nums[i] for j in range(i + 1, n): mi = min(mi, nums[j]) mx = max(mx, nums[j]) ans += mx - mi return ans
Solution
python
psf__black
tests/data/cases/preview_long_strings__regression.py
{ "start": 4253, "end": 4647 }
class ____: def foo(): some_func_call( 'xxxxxxxxxx', ( "xx {xxxxxxxxxxx}/xxxxxxxxxxx.xxx xxxx.xxx && xxxxxx -x " "\"xxxx xxxxxxx xxxxxx xxxx; xxxx xxxxxx_xxxxx xxxxxx xxxx; " "xxxx.xxxx_xxxxxx(['xxxx.xxx'], xxxx.xxxxxxx().xxxxxxxxxx)\" " ), None, ('xxxxxxxxxxx',), ),
A
python
ansible__ansible
lib/ansible/module_utils/facts/network/hpux.py
{ "start": 3290, "end": 3390 }
class ____(NetworkCollector): _fact_class = HPUXNetwork _platform = 'HP-UX'
HPUXNetworkCollector
python
tensorflow__tensorflow
tensorflow/python/compiler/tensorrt/test/conv2d_test.py
{ "start": 5935, "end": 6994 }
class ____(trt_test.TfTrtIntegrationTestBase): """Testing conversion of conv2d_transpose (AKA Conv2DBackpropInput)""" def GraphFn(self, inp): np.random.seed(1234) dtype = inp.dtype n, c, h, w = 13, 3, 7, 11 num_filters = 8 weights_shape = [2, 2, num_filters, c] weights = constant_op.constant(np.random.randn(*weights_shape), dtype=dtype) output_shape = constant_op.constant([n, num_filters, h * 2, w * 2], dtype=dtypes.int32) output = nn_ops.conv2d_transpose( inp, weights, output_shape, strides=[1, 1, 2, 2], padding="SAME", data_format="NCHW") return array_ops.identity(output, name="output_0") def GetParams(self): return self.BuildParams(self.GraphFn, dtypes.float32, [[13, 3, 7, 11]], [[13, 8, 14, 22]]) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return ["TRTEngineOp_000"] if __name__ == "__main__": test.main()
Conv2DTranposeTest
python
kamyu104__LeetCode-Solutions
Python/check-if-word-can-be-placed-in-crossword.py
{ "start": 1388, "end": 2005 }
class ____(object): def placeWordInCrossword(self, board, word): """ :type board: List[List[str]] :type word: str :rtype: bool """ words = [word, word[::-1]] for mat in (board, zip(*board)): for row in mat: blocks = ''.join(row).split('#') for s in blocks: if len(s) != len(word): continue for w in words: if all(s[i] in (w[i], ' ') for i in xrange(len(s))): return True return False
Solution2
python
getsentry__sentry
src/sentry/deletions/base.py
{ "start": 1599, "end": 1945 }
class ____: def __init__(self, params: Mapping[str, Any], task: type[BaseDeletionTask[Any]] | None) -> None: self.task = task self.params = params def __repr__(self) -> str: class_type = type(self) return f"<{class_type.__module__}.{class_type.__name__}: task={self.task} params={self.params}>"
BaseRelation
python
numpy__numpy
numpy/matrixlib/tests/test_defmatrix.py
{ "start": 1961, "end": 6411 }
class ____: def test_sum(self): """Test whether matrix.sum(axis=1) preserves orientation. Fails in NumPy <= 0.9.6.2127. """ M = matrix([[1, 2, 0, 0], [3, 4, 0, 0], [1, 2, 1, 2], [3, 4, 3, 4]]) sum0 = matrix([8, 12, 4, 6]) sum1 = matrix([3, 7, 6, 14]).T sumall = 30 assert_array_equal(sum0, M.sum(axis=0)) assert_array_equal(sum1, M.sum(axis=1)) assert_equal(sumall, M.sum()) assert_array_equal(sum0, np.sum(M, axis=0)) assert_array_equal(sum1, np.sum(M, axis=1)) assert_equal(sumall, np.sum(M)) def test_prod(self): x = matrix([[1, 2, 3], [4, 5, 6]]) assert_equal(x.prod(), 720) assert_equal(x.prod(0), matrix([[4, 10, 18]])) assert_equal(x.prod(1), matrix([[6], [120]])) assert_equal(np.prod(x), 720) assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]])) assert_equal(np.prod(x, axis=1), matrix([[6], [120]])) y = matrix([0, 1, 3]) assert_(y.prod() == 0) def test_max(self): x = matrix([[1, 2, 3], [4, 5, 6]]) assert_equal(x.max(), 6) assert_equal(x.max(0), matrix([[4, 5, 6]])) assert_equal(x.max(1), matrix([[3], [6]])) assert_equal(np.max(x), 6) assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]])) assert_equal(np.max(x, axis=1), matrix([[3], [6]])) def test_min(self): x = matrix([[1, 2, 3], [4, 5, 6]]) assert_equal(x.min(), 1) assert_equal(x.min(0), matrix([[1, 2, 3]])) assert_equal(x.min(1), matrix([[1], [4]])) assert_equal(np.min(x), 1) assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]])) assert_equal(np.min(x, axis=1), matrix([[1], [4]])) def test_ptp(self): x = np.arange(4).reshape((2, 2)) mx = x.view(np.matrix) assert_(mx.ptp() == 3) assert_(np.all(mx.ptp(0) == np.array([2, 2]))) assert_(np.all(mx.ptp(1) == np.array([1, 1]))) def test_var(self): x = np.arange(9).reshape((3, 3)) mx = x.view(np.matrix) assert_equal(x.var(ddof=0), mx.var(ddof=0)) assert_equal(x.var(ddof=1), mx.var(ddof=1)) def test_basic(self): import numpy.linalg as linalg A = np.array([[1., 2.], [3., 4.]]) mA = matrix(A) assert_(np.allclose(linalg.inv(A), mA.I)) assert_(np.all(np.array(np.transpose(A) == mA.T))) assert_(np.all(np.array(np.transpose(A) == mA.H))) assert_(np.all(A == mA.A)) B = A + 2j * A mB = matrix(B) assert_(np.allclose(linalg.inv(B), mB.I)) assert_(np.all(np.array(np.transpose(B) == mB.T))) assert_(np.all(np.array(np.transpose(B).conj() == mB.H))) def test_pinv(self): x = matrix(np.arange(6).reshape(2, 3)) xpinv = matrix([[-0.77777778, 0.27777778], [-0.11111111, 0.11111111], [ 0.55555556, -0.05555556]]) assert_almost_equal(x.I, xpinv) def test_comparisons(self): A = np.arange(100).reshape(10, 10) mA = matrix(A) mB = matrix(A) + 0.1 assert_(np.all(mB == A + 0.1)) assert_(np.all(mB == matrix(A + 0.1))) assert_(not np.any(mB == matrix(A - 0.1))) assert_(np.all(mA < mB)) assert_(np.all(mA <= mB)) assert_(np.all(mA <= mA)) assert_(not np.any(mA < mA)) assert_(not np.any(mB < mA)) assert_(np.all(mB >= mA)) assert_(np.all(mB >= mB)) assert_(not np.any(mB > mB)) assert_(np.all(mA == mA)) assert_(not np.any(mA == mB)) assert_(np.all(mB != mA)) assert_(not np.all(abs(mA) > 0)) assert_(np.all(abs(mB > 0))) def test_asmatrix(self): A = np.arange(100).reshape(10, 10) mA = asmatrix(A) A[0, 0] = -10 assert_(A[0, 0] == mA[0, 0]) def test_noaxis(self): A = matrix([[1, 0], [0, 1]]) assert_(A.sum() == matrix(2)) assert_(A.mean() == matrix(0.5)) def test_repr(self): A = matrix([[1, 0], [0, 1]]) assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])") def test_make_bool_matrix_from_str(self): A = matrix('True; True; False') B = matrix([[True], [True], [False]]) assert_array_equal(A, B)
TestProperties
python
pytorch__pytorch
torch/fx/experimental/proxy_tensor.py
{ "start": 72185, "end": 85023 }
class ____(PythonKeyTracer): r"""Customized version of PythonKeyTracer that retains module stack information in node.meta["nn_module_stack"]. FX symbolic trace actually does this already, but it relies on `self.root` being the actual module being traced. Since make_fx traces a lambda of our creation, things don't work properly. So for this version we hold onto a reference to the original module (scope_root) and use that to match the path. Also when we see, A / \ B C \ / D we want to record the path as A.B.D by recording only one path. See Note [Preserving the nn module stack metadata during export non-strict mode] # noqa: W605 """ def __init__(self, scope_root: GraphModule) -> None: super().__init__() self.record_stack_traces = True self._record_forward_stack_traces_only = True self.scope_root = scope_root self.enable_attr_proxy = False self.submodule_paths = {} for name, m in self.scope_root.named_modules(remove_duplicate=False): if m in self.submodule_paths: log.info( "Shared module found between %s and %s, AttrProxy is enabled.", self.submodule_paths[m], name, ) self.enable_attr_proxy = True else: self.submodule_paths[m] = name self.proxy_paths: WeakKeyDictionary[_AttrProxy, str] = WeakKeyDictionary() self.attr_proxy_map: WeakKeyDictionary[Module, _AttrProxy] = WeakKeyDictionary() self.proxy_modules: WeakKeyDictionary[_AttrProxy, Module] = WeakKeyDictionary() self.counter = 0 self.module_id_cache = defaultdict(list) for name, mod in self.scope_root.named_modules(remove_duplicate=False): self.module_id_cache[id(mod)].append(name) # Build a wrapper around _AttrProxy to provide the tracer. We can't # store it on _AttrProxy itself beceause we mimic the underlying class # (including its attributes). tracer = self class AttrProxy(_AttrProxy): def __init__(self, base: Union[Module, _AttrProxy], path: str) -> None: if isinstance(base, _AttrProxy): base = base.get_base() # type: ignore[attr-defined] assert isinstance(base, Module) # Class is modified to be a subclass of torch.nn.Module # Warning: We blow away our own attributes here to mimic the base class # - so don't expect `self.x` to do anything useful. # pyrefly: ignore [no-matching-overload] # pyrefly: ignore [bad-override] self.__class__ = type( base.__class__.__name__, (self.__class__, base.__class__), {}, ) self.__dict__ = base.__dict__ self.__class__.__module__ = base.__class__.__module__ self.__class__.__qualname__ = base.__class__.__qualname__ # This overwrites any existing paths if `base` is an AttrProxy tracer.proxy_paths[self] = path tracer.proxy_modules[self] = base def __getattr__(self, name: str) -> AttrProxy: assert isinstance(self, Module) # Calling into torch.nn.Module.__getattr__ with super(), # That __getattr__ is patched to be module_getattr_wrapper in _symbolic_trace.py. # which then calls into _ModuleStackTracer.getattr attr_val = super().__getattr__(name) # type: ignore[misc] if not isinstance(attr_val, Module): return attr_val # pyrefly: ignore [index-error] return AttrProxy(attr_val, tracer.proxy_paths[self] + "." + name) def get_base(self) -> Module: return tracer.proxy_modules[self] def __getitem__(self, idx: Union[int, slice]) -> AttrProxy: if isinstance(idx, slice): if isinstance(self, torch.nn.Sequential): # Copied from nn/modules/container.py res = torch.nn.Sequential( OrderedDict(list(self._modules.items())[idx]) ) # pyrefly: ignore [index-error] return AttrProxy(res, f"{tracer.proxy_paths[self]}.{idx}") elif isinstance(self, torch.nn.ModuleList): # Copied from nn/modules/container.py res = torch.nn.ModuleList(list(self._modules.values())[idx]) # pyrefly: ignore [index-error] return AttrProxy(res, f"{tracer.proxy_paths[self]}.{idx}") return super().__getitem__(idx) # type: ignore[misc] @property def _modules(self) -> dict[str, AttrProxy]: assert "_modules" in self.__dict__ submodules = self.__dict__["_modules"] assert isinstance(submodules, dict) return { key: ( AttrProxy(value, tracer.proxy_paths[self] + "." + str(key)) # type: ignore[misc] if value is not None else value ) for key, value in submodules.items() } self.proxy_type = AttrProxy def path_of_module(self, mod: Module) -> str: """ Use tracked access path during tracing instead of the default BFS behavior. Still use all the possible module paths to verify the result. """ if mod is self.scope_root: return "" if isinstance(mod, _AttrProxy): return self.proxy_paths[mod] try: return Tracer.path_of_module(self, mod) except NameError as e: raise _ModuleNotInstalledAsSubmoduleError from e def getattr( self, attr: str, attr_val: object, parameter_proxy_cache: dict[str, Proxy] ) -> object: if ( not isinstance(attr_val, Module) or isinstance(attr_val, fx.GraphModule) or not self.enable_attr_proxy ): return super().getattr(attr, attr_val, parameter_proxy_cache) if isinstance(attr_val, _AttrProxy): return attr_val # See NOTE [caching AttrProxy]. if attr_val not in self.attr_proxy_map: self.attr_proxy_map[attr_val] = self.proxy_type(attr_val, attr) else: self.attr_proxy_map[attr_val].reset_proxy_mapping(attr_val, attr) return self.attr_proxy_map[attr_val] def trace( # type: ignore[override] self, root: Union[Module, Callable], concrete_args: Optional[dict[str, object]] ) -> fx.Graph: res = super().trace(root, concrete_args) # NOTE [export non-strict fake tensor leak detection] # In non-strict export, we don't have dynamo's side effect # tracking logic which makes some cases hard to detect. # In general, our detecting strategy is: # (1) We instrument fake tensor creation to log all the fake tensors created during export. # (2) We dump the proxy to fake tensor map from make_fx tracer (_FAKE_TENSOR_ID_TO_PROXY_MAP_FOR_EXPORT)) # (3) Filter out fake tensors that are logged during (1): # (1) Associated with TrackedFake (input tracking thing in symbolic_shapes) # (2) Associated with gm.meta # (4) Do ID match with the proxies global _FAKE_TENSOR_ID_TO_PROXY_MAP_FOR_EXPORT _FAKE_TENSOR_ID_TO_PROXY_MAP_FOR_EXPORT.clear() for key, val in self.tensor_tracker.items(): _FAKE_TENSOR_ID_TO_PROXY_MAP_FOR_EXPORT[id(key)] = val.proxy.node # Since we are making _AttrProxy mimic the original # submodule, when someone registers a module directly # to the tracer while tracing, the proxy object gets registered # first. So we need to replace the proxy modules with the real ones # This can happen during HOO tracing proxy_module_names_to_be_replaced: list[tuple[str, _AttrProxy]] = [] for name, module in self.root.named_modules(): if module in self.proxy_modules: proxy_module_names_to_be_replaced.append((name, module)) def _delete_proxy_attr(obj: Module, target: str) -> bool: # Copied from fx/graph_module.py # Customized it for proxy type atoms = target.split(".") path, target_submod = atoms[:-1], atoms[-1] assert isinstance(obj, Module) mod = obj # Get the parent module for item in path: if not hasattr(mod, item): return False mod = getattr(mod, item) if not isinstance(mod, (_AttrProxy, Module)): return False if not hasattr(mod, target_submod): return False # At least the leaf module should be proxy type. if not isinstance(getattr(mod, target_submod), _AttrProxy): return False delattr(mod, target_submod) return True for proxy_module_name, proxy_module in proxy_module_names_to_be_replaced: _delete_proxy_attr(self.root, proxy_module_name) actual_module = self.proxy_modules[proxy_module] _assign_attr(actual_module, self.root, proxy_module_name) return res def call_module( self, m: Module, forward: Callable, args: tuple[object, ...], kwargs: dict[str, object], ) -> None: """PythonKeyTracer overrides call_module to avoid the scope handling, but we actually want it. """ from torch._dynamo import OptimizedModule # FIXME (tmanlaibaatar) # When we call torch.compile inside HOO, we will end up # invoking a module that is not registered on the root. For # now, we just inline them. But once we start supporting # mark_strict in export, we do need to properly handle this. # Right now, it doesn't matter because current non-strict # use cases don't need to work with HOO. if isinstance(m, (OptimizedModule, GraphModule)): return forward(*args, **kwargs) try: return Tracer.call_module(self, m, forward, args, kwargs) except _ModuleNotInstalledAsSubmoduleError: log.debug( "Unable to find the path of the module %s. " "This might be because the module was not properly registered " "as a submodule, which is not good practice. We will trace " "through the module without recording stack information.", str(m), ) return forward(*args, **kwargs) def is_leaf_module(self, m: Module, module_qualified_name: str) -> bool: return False def create_node(self, *args: object, **kwargs: object) -> fx.node.Node: """ Create node and add on metadata. Add nn_module_stack here instead of TracerBase, since calls to make_fx() might not want to record module stack metadata. Add torch_fn by looking at torch_fn_metadata and torch_fn_counts. Add stack_trace by filtering out forward() stack frames. """ node = super().create_node(*args, **kwargs) # type: ignore[arg-type] # nn_module_stack if node.op not in ["placeholder", "output"]: if node.meta.get("nn_module_stack") is None: node.meta["nn_module_stack"] = self.module_stack.copy() # convert nn_module_stack from Dict[key, (FQN, class)] -> Dict[str, Tuple[str, str]] for key, (fqn, mod_cls) in node.meta["nn_module_stack"].items(): if isinstance(mod_cls, type): node.meta["nn_module_stack"][key] = ( fqn, mod_cls.__module__ + "." + mod_cls.__qualname__, ) # torch_fn if ( node.op == "call_function" and self.torch_fn_metadata is not None and "torch_fn" not in node.meta ): node.meta["torch_fn"] = ( f"{self.torch_fn_metadata.__name__}_{self.torch_fn_counts[self.torch_fn_metadata]}", f"{self.torch_fn_metadata.__class__.__name__}.{self.torch_fn_metadata.__name__}", ) return node
_ModuleStackTracer
python
jmcnamara__XlsxWriter
xlsxwriter/test/styles/test_write_fills.py
{ "start": 295, "end": 884 }
class ____(unittest.TestCase): """ Test the Styles _write_fills() method. """ def setUp(self): self.fh = StringIO() self.styles = Styles() self.styles._set_filehandle(self.fh) def test_write_fills(self): """Test the _write_fills() method""" self.styles.fill_count = 2 self.styles._write_fills() exp = """<fills count="2"><fill><patternFill patternType="none"/></fill><fill><patternFill patternType="gray125"/></fill></fills>""" got = self.fh.getvalue() self.assertEqual(exp, got)
TestWriteFills
python
great-expectations__great_expectations
tests/metrics/test_metric.py
{ "start": 929, "end": 1114 }
class ____: @pytest.mark.unit def test_metric_instantiation_raises(self): with pytest.raises(AbstractClassInstantiationError): Metric(column=COLUMN)
TestMetric
python
RaRe-Technologies__gensim
gensim/corpora/dictionary.py
{ "start": 525, "end": 30226 }
class ____(utils.SaveLoad, Mapping): """Dictionary encapsulates the mapping between normalized words and their integer ids. Notable instance attributes: Attributes ---------- token2id : dict of (str, int) token -> token_id. I.e. the reverse mapping to `self[token_id]`. cfs : dict of (int, int) Collection frequencies: token_id -> how many instances of this token are contained in the documents. dfs : dict of (int, int) Document frequencies: token_id -> how many documents contain this token. num_docs : int Number of documents processed. num_pos : int Total number of corpus positions (number of processed words). num_nnz : int Total number of non-zeroes in the BOW matrix (sum of the number of unique words per document over the entire corpus). """ def __init__(self, documents=None, prune_at=2000000): """ Parameters ---------- documents : iterable of iterable of str, optional Documents to be used to initialize the mapping and collect corpus statistics. prune_at : int, optional Dictionary will try to keep no more than `prune_at` words in its mapping, to limit its RAM footprint, the correctness is not guaranteed. Use :meth:`~gensim.corpora.dictionary.Dictionary.filter_extremes` to perform proper filtering. Examples -------- .. sourcecode:: pycon >>> from gensim.corpora import Dictionary >>> >>> texts = [['human', 'interface', 'computer']] >>> dct = Dictionary(texts) # initialize a Dictionary >>> dct.add_documents([["cat", "say", "meow"], ["dog"]]) # add more document (extend the vocabulary) >>> dct.doc2bow(["dog", "computer", "non_existent_word"]) [(0, 1), (6, 1)] """ self.token2id = {} self.id2token = {} self.cfs = {} self.dfs = {} self.num_docs = 0 self.num_pos = 0 self.num_nnz = 0 if documents is not None: self.add_documents(documents, prune_at=prune_at) self.add_lifecycle_event( "created", msg=f"built {self} from {self.num_docs} documents (total {self.num_pos} corpus positions)", ) def __getitem__(self, tokenid): """Get the string token that corresponds to `tokenid`. Parameters ---------- tokenid : int Id of token. Returns ------- str Token corresponding to `tokenid`. Raises ------ KeyError If this Dictionary doesn't contain such `tokenid`. """ if len(self.id2token) != len(self.token2id): # the word->id mapping has changed (presumably via add_documents); # recompute id->word accordingly self.id2token = utils.revdict(self.token2id) return self.id2token[tokenid] # will throw for non-existent ids def __iter__(self): """Iterate over all tokens.""" return iter(self.keys()) # restore Py2-style dict API iterkeys = __iter__ def iteritems(self): return self.items() def itervalues(self): return self.values() def keys(self): """Get all stored ids. Returns ------- list of int List of all token ids. """ return list(self.token2id.values()) def __len__(self): """Get number of stored tokens. Returns ------- int Number of stored tokens. """ return len(self.token2id) def __str__(self): some_keys = list(itertools.islice(self.token2id.keys(), 5)) return "%s<%i unique tokens: %s%s>" % ( self.__class__.__name__, len(self), some_keys, '...' if len(self) > 5 else '' ) @staticmethod def from_documents(documents): """Create :class:`~gensim.corpora.dictionary.Dictionary` from `documents`. Equivalent to `Dictionary(documents=documents)`. Parameters ---------- documents : iterable of iterable of str Input corpus. Returns ------- :class:`~gensim.corpora.dictionary.Dictionary` Dictionary initialized from `documents`. """ return Dictionary(documents=documents) def add_documents(self, documents, prune_at=2000000): """Update dictionary from a collection of `documents`. Parameters ---------- documents : iterable of iterable of str Input corpus. All tokens should be already **tokenized and normalized**. prune_at : int, optional Dictionary will try to keep no more than `prune_at` words in its mapping, to limit its RAM footprint, the correctness is not guaranteed. Use :meth:`~gensim.corpora.dictionary.Dictionary.filter_extremes` to perform proper filtering. Examples -------- .. sourcecode:: pycon >>> from gensim.corpora import Dictionary >>> >>> corpus = ["máma mele maso".split(), "ema má máma".split()] >>> dct = Dictionary(corpus) >>> len(dct) 5 >>> dct.add_documents([["this", "is", "sparta"], ["just", "joking"]]) >>> len(dct) 10 """ for docno, document in enumerate(documents): # log progress & run a regular check for pruning, once every 10k docs if docno % 10000 == 0: if prune_at is not None and len(self) > prune_at: self.filter_extremes(no_below=0, no_above=1.0, keep_n=prune_at) logger.info("adding document #%i to %s", docno, self) # update Dictionary with the document self.doc2bow(document, allow_update=True) # ignore the result, here we only care about updating token ids logger.info("built %s from %i documents (total %i corpus positions)", self, self.num_docs, self.num_pos) def doc2bow(self, document, allow_update=False, return_missing=False): """Convert `document` into the bag-of-words (BoW) format = list of `(token_id, token_count)` tuples. Parameters ---------- document : list of str Input document. allow_update : bool, optional Update self, by adding new tokens from `document` and updating internal corpus statistics. return_missing : bool, optional Return missing tokens (tokens present in `document` but not in self) with frequencies? Return ------ list of (int, int) BoW representation of `document`. list of (int, int), dict of (str, int) If `return_missing` is True, return BoW representation of `document` + dictionary with missing tokens and their frequencies. Examples -------- .. sourcecode:: pycon >>> from gensim.corpora import Dictionary >>> dct = Dictionary(["máma mele maso".split(), "ema má máma".split()]) >>> dct.doc2bow(["this", "is", "máma"]) [(2, 1)] >>> dct.doc2bow(["this", "is", "máma"], return_missing=True) ([(2, 1)], {u'this': 1, u'is': 1}) """ if isinstance(document, str): raise TypeError("doc2bow expects an array of unicode tokens on input, not a single string") # Construct (word, frequency) mapping. counter = defaultdict(int) for w in document: counter[w if isinstance(w, str) else str(w, 'utf-8')] += 1 token2id = self.token2id if allow_update or return_missing: missing = sorted(x for x in counter.items() if x[0] not in token2id) if allow_update: for w, _ in missing: # new id = number of ids made so far; # NOTE this assumes there are no gaps in the id sequence! token2id[w] = len(token2id) result = {token2id[w]: freq for w, freq in counter.items() if w in token2id} if allow_update: self.num_docs += 1 self.num_pos += sum(counter.values()) self.num_nnz += len(result) # keep track of document and collection frequencies for tokenid, freq in result.items(): self.cfs[tokenid] = self.cfs.get(tokenid, 0) + freq self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1 # return tokenids, in ascending id order result = sorted(result.items()) if return_missing: return result, dict(missing) else: return result def doc2idx(self, document, unknown_word_index=-1): """Convert `document` (a list of words) into a list of indexes = list of `token_id`. Replace all unknown words i.e, words not in the dictionary with the index as set via `unknown_word_index`. Parameters ---------- document : list of str Input document unknown_word_index : int, optional Index to use for words not in the dictionary. Returns ------- list of int Token ids for tokens in `document`, in the same order. Examples -------- .. sourcecode:: pycon >>> from gensim.corpora import Dictionary >>> >>> corpus = [["a", "a", "b"], ["a", "c"]] >>> dct = Dictionary(corpus) >>> dct.doc2idx(["a", "a", "c", "not_in_dictionary", "c"]) [0, 0, 2, -1, 2] """ if isinstance(document, str): raise TypeError("doc2idx expects an array of unicode tokens on input, not a single string") document = [word if isinstance(word, str) else str(word, 'utf-8') for word in document] return [self.token2id.get(word, unknown_word_index) for word in document] def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000, keep_tokens=None): """Filter out tokens in the dictionary by their frequency. Parameters ---------- no_below : int, optional Keep tokens which are contained in at least `no_below` documents. no_above : float, optional Keep tokens which are contained in no more than `no_above` documents (fraction of total corpus size, not an absolute number). keep_n : int, optional Keep only the first `keep_n` most frequent tokens. keep_tokens : iterable of str Iterable of tokens that **must** stay in dictionary after filtering. Notes ----- This removes all tokens in the dictionary that are: #. Less frequent than `no_below` documents (absolute number, e.g. `5`) or \n #. More frequent than `no_above` documents (fraction of the total corpus size, e.g. `0.3`). #. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `keep_n=None`). After the pruning, resulting gaps in word ids are shrunk. Due to this gap shrinking, **the same word may have a different word id before and after the call to this function!** See :class:`gensim.models.VocabTransform` and the `dedicated FAQ entry <https://github.com/RaRe-Technologies/gensim/wiki/Recipes-&-FAQ#q8-how-can-i-filter-a-saved-corpus-and-its-corresponding-dictionary>`_ on how # noqa to transform a corpus built with a dictionary before pruning. Examples -------- .. sourcecode:: pycon >>> from gensim.corpora import Dictionary >>> >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]] >>> dct = Dictionary(corpus) >>> len(dct) 5 >>> dct.filter_extremes(no_below=1, no_above=0.5, keep_n=1) >>> len(dct) 1 """ no_above_abs = int(no_above * self.num_docs) # convert fractional threshold to absolute threshold # determine which tokens to keep if keep_tokens: keep_ids = {self.token2id[v] for v in keep_tokens if v in self.token2id} good_ids = [ v for v in self.token2id.values() if no_below <= self.dfs.get(v, 0) <= no_above_abs or v in keep_ids ] good_ids.sort(key=lambda x: self.num_docs if x in keep_ids else self.dfs.get(x, 0), reverse=True) else: good_ids = [ v for v in self.token2id.values() if no_below <= self.dfs.get(v, 0) <= no_above_abs ] good_ids.sort(key=self.dfs.get, reverse=True) if keep_n is not None: good_ids = good_ids[:keep_n] bad_words = [(self[idx], self.dfs.get(idx, 0)) for idx in set(self).difference(good_ids)] logger.info("discarding %i tokens: %s...", len(self) - len(good_ids), bad_words[:10]) logger.info( "keeping %i tokens which were in no less than %i and no more than %i (=%.1f%%) documents", len(good_ids), no_below, no_above_abs, 100.0 * no_above ) # do the actual filtering, then rebuild dictionary to remove gaps in ids self.filter_tokens(good_ids=good_ids) logger.info("resulting dictionary: %s", self) def filter_n_most_frequent(self, remove_n): """Filter out the 'remove_n' most frequent tokens that appear in the documents. Parameters ---------- remove_n : int Number of the most frequent tokens that will be removed. Examples -------- .. sourcecode:: pycon >>> from gensim.corpora import Dictionary >>> >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]] >>> dct = Dictionary(corpus) >>> len(dct) 5 >>> dct.filter_n_most_frequent(2) >>> len(dct) 3 """ # determine which tokens to keep most_frequent_ids = (v for v in self.token2id.values()) most_frequent_ids = sorted(most_frequent_ids, key=self.dfs.get, reverse=True) most_frequent_ids = most_frequent_ids[:remove_n] # do the actual filtering, then rebuild dictionary to remove gaps in ids most_frequent_words = [(self[idx], self.dfs.get(idx, 0)) for idx in most_frequent_ids] logger.info("discarding %i tokens: %s...", len(most_frequent_ids), most_frequent_words[:10]) self.filter_tokens(bad_ids=most_frequent_ids) logger.info("resulting dictionary: %s", self) def filter_tokens(self, bad_ids=None, good_ids=None): """Remove the selected `bad_ids` tokens from :class:`~gensim.corpora.dictionary.Dictionary`. Alternatively, keep selected `good_ids` in :class:`~gensim.corpora.dictionary.Dictionary` and remove the rest. Parameters ---------- bad_ids : iterable of int, optional Collection of word ids to be removed. good_ids : collection of int, optional Keep selected collection of word ids and remove the rest. Examples -------- .. sourcecode:: pycon >>> from gensim.corpora import Dictionary >>> >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]] >>> dct = Dictionary(corpus) >>> 'ema' in dct.token2id True >>> dct.filter_tokens(bad_ids=[dct.token2id['ema']]) >>> 'ema' in dct.token2id False >>> len(dct) 4 >>> dct.filter_tokens(good_ids=[dct.token2id['maso']]) >>> len(dct) 1 """ if bad_ids is not None: bad_ids = set(bad_ids) self.token2id = {token: tokenid for token, tokenid in self.token2id.items() if tokenid not in bad_ids} self.cfs = {tokenid: freq for tokenid, freq in self.cfs.items() if tokenid not in bad_ids} self.dfs = {tokenid: freq for tokenid, freq in self.dfs.items() if tokenid not in bad_ids} if good_ids is not None: good_ids = set(good_ids) self.token2id = {token: tokenid for token, tokenid in self.token2id.items() if tokenid in good_ids} self.cfs = {tokenid: freq for tokenid, freq in self.cfs.items() if tokenid in good_ids} self.dfs = {tokenid: freq for tokenid, freq in self.dfs.items() if tokenid in good_ids} self.compactify() def compactify(self): """Assign new word ids to all words, shrinking any gaps.""" logger.debug("rebuilding dictionary, shrinking gaps") # build mapping from old id -> new id idmap = dict(zip(sorted(self.token2id.values()), range(len(self.token2id)))) # reassign mappings to new ids self.token2id = {token: idmap[tokenid] for token, tokenid in self.token2id.items()} self.id2token = {} self.dfs = {idmap[tokenid]: freq for tokenid, freq in self.dfs.items()} self.cfs = {idmap[tokenid]: freq for tokenid, freq in self.cfs.items()} def save_as_text(self, fname, sort_by_word=True): """Save :class:`~gensim.corpora.dictionary.Dictionary` to a text file. Parameters ---------- fname : str Path to output file. sort_by_word : bool, optional Sort words in lexicographical order before writing them out? Notes ----- Format:: num_docs id_1[TAB]word_1[TAB]document_frequency_1[NEWLINE] id_2[TAB]word_2[TAB]document_frequency_2[NEWLINE] .... id_k[TAB]word_k[TAB]document_frequency_k[NEWLINE] This text format is great for corpus inspection and debugging. As plaintext, it's also easily portable to other tools and frameworks. For better performance and to store the entire object state, including collected corpus statistics, use :meth:`~gensim.corpora.dictionary.Dictionary.save` and :meth:`~gensim.corpora.dictionary.Dictionary.load` instead. See Also -------- :meth:`~gensim.corpora.dictionary.Dictionary.load_from_text` Load :class:`~gensim.corpora.dictionary.Dictionary` from text file. Examples -------- .. sourcecode:: pycon >>> from gensim.corpora import Dictionary >>> from gensim.test.utils import get_tmpfile >>> >>> tmp_fname = get_tmpfile("dictionary") >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]] >>> >>> dct = Dictionary(corpus) >>> dct.save_as_text(tmp_fname) >>> >>> loaded_dct = Dictionary.load_from_text(tmp_fname) >>> assert dct.token2id == loaded_dct.token2id """ logger.info("saving dictionary mapping to %s", fname) with utils.open(fname, 'wb') as fout: numdocs_line = "%d\n" % self.num_docs fout.write(utils.to_utf8(numdocs_line)) if sort_by_word: for token, tokenid in sorted(self.token2id.items()): line = "%i\t%s\t%i\n" % (tokenid, token, self.dfs.get(tokenid, 0)) fout.write(utils.to_utf8(line)) else: for tokenid, freq in sorted(self.dfs.items(), key=lambda item: -item[1]): line = "%i\t%s\t%i\n" % (tokenid, self[tokenid], freq) fout.write(utils.to_utf8(line)) def merge_with(self, other): """Merge another dictionary into this dictionary, mapping the same tokens to the same ids and new tokens to new ids. Notes ----- The purpose is to merge two corpora created using two different dictionaries: `self` and `other`. `other` can be any id=>word mapping (a dict, a Dictionary object, ...). Return a transformation object which, when accessed as `result[doc_from_other_corpus]`, will convert documents from a corpus built using the `other` dictionary into a document using the new, merged dictionary. Parameters ---------- other : {dict, :class:`~gensim.corpora.dictionary.Dictionary`} Other dictionary. Return ------ :class:`gensim.models.VocabTransform` Transformation object. Examples -------- .. sourcecode:: pycon >>> from gensim.corpora import Dictionary >>> >>> corpus_1, corpus_2 = [["a", "b", "c"]], [["a", "f", "f"]] >>> dct_1, dct_2 = Dictionary(corpus_1), Dictionary(corpus_2) >>> dct_1.doc2bow(corpus_2[0]) [(0, 1)] >>> transformer = dct_1.merge_with(dct_2) >>> dct_1.doc2bow(corpus_2[0]) [(0, 1), (3, 2)] """ old2new = {} for other_id, other_token in other.items(): if other_token in self.token2id: new_id = self.token2id[other_token] else: new_id = len(self.token2id) self.token2id[other_token] = new_id self.dfs[new_id] = 0 old2new[other_id] = new_id try: self.dfs[new_id] += other.dfs[other_id] except Exception: # `other` isn't a Dictionary (probably just a dict) => ignore dfs, keep going pass try: self.num_docs += other.num_docs self.num_nnz += other.num_nnz self.num_pos += other.num_pos except Exception: pass import gensim.models return gensim.models.VocabTransform(old2new) def patch_with_special_tokens(self, special_token_dict): """Patch token2id and id2token using a dictionary of special tokens. **Usecase:** when doing sequence modeling (e.g. named entity recognition), one may want to specify special tokens that behave differently than others. One example is the "unknown" token, and another is the padding token. It is usual to set the padding token to have index `0`, and patching the dictionary with `{'<PAD>': 0}` would be one way to specify this. Parameters ---------- special_token_dict : dict of (str, int) dict containing the special tokens as keys and their wanted indices as values. Examples -------- .. sourcecode:: pycon >>> from gensim.corpora import Dictionary >>> >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]] >>> dct = Dictionary(corpus) >>> >>> special_tokens = {'pad': 0, 'space': 1} >>> print(dct.token2id) {'maso': 0, 'mele': 1, 'máma': 2, 'ema': 3, 'má': 4} >>> >>> dct.patch_with_special_tokens(special_tokens) >>> print(dct.token2id) {'maso': 6, 'mele': 7, 'máma': 2, 'ema': 3, 'má': 4, 'pad': 0, 'space': 1} """ possible_ids = [] for token, idx in special_token_dict.items(): if token in self.token2id and self.token2id[token] == idx: continue if token in self.token2id and self.token2id[token] != idx: possible_ids.append(self.token2id[token]) del self.token2id[token] old_token = self[idx] self.token2id[token] = idx self.token2id[old_token] = possible_ids.pop() if \ len(possible_ids) > 0 else len(self.token2id) - 1 self.id2token = {} # Make sure that id2token is updated according to special tokens. @staticmethod def load_from_text(fname): """Load a previously stored :class:`~gensim.corpora.dictionary.Dictionary` from a text file. Mirror function to :meth:`~gensim.corpora.dictionary.Dictionary.save_as_text`. Parameters ---------- fname: str Path to a file produced by :meth:`~gensim.corpora.dictionary.Dictionary.save_as_text`. See Also -------- :meth:`~gensim.corpora.dictionary.Dictionary.save_as_text` Save :class:`~gensim.corpora.dictionary.Dictionary` to text file. Examples -------- .. sourcecode:: pycon >>> from gensim.corpora import Dictionary >>> from gensim.test.utils import get_tmpfile >>> >>> tmp_fname = get_tmpfile("dictionary") >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]] >>> >>> dct = Dictionary(corpus) >>> dct.save_as_text(tmp_fname) >>> >>> loaded_dct = Dictionary.load_from_text(tmp_fname) >>> assert dct.token2id == loaded_dct.token2id """ result = Dictionary() with utils.open(fname, 'rb') as f: for lineno, line in enumerate(f): line = utils.to_unicode(line) if lineno == 0: if line.strip().isdigit(): # Older versions of save_as_text may not write num_docs on first line. result.num_docs = int(line.strip()) continue else: logging.warning("Text does not contain num_docs on the first line.") try: wordid, word, docfreq = line[:-1].split('\t') except Exception: raise ValueError("invalid line in dictionary file %s: %s" % (fname, line.strip())) wordid = int(wordid) if word in result.token2id: raise KeyError('token %s is defined as ID %d and as ID %d' % (word, wordid, result.token2id[word])) result.token2id[word] = wordid result.dfs[wordid] = int(docfreq) return result def most_common(self, n: Optional[int] = None) -> List[Tuple[str, int]]: """Return a list of the n most common words and their counts from the most common to the least. Words with equal counts are ordered in the increasing order of their ids. Parameters ---------- n : int or None, optional The number of most common words to be returned. If `None`, all words in the dictionary will be returned. Default is `None`. Returns ------- most_common : list of (str, int) The n most common words and their counts from the most common to the least. """ most_common = [ (self[word], count) for word, count in sorted(self.cfs.items(), key=lambda x: (-x[1], x[0]))[:n] ] return most_common @staticmethod def from_corpus(corpus, id2word=None): """Create :class:`~gensim.corpora.dictionary.Dictionary` from an existing corpus. Parameters ---------- corpus : iterable of iterable of (int, number) Corpus in BoW format. id2word : dict of (int, object) Mapping id -> word. If None, the mapping `id2word[word_id] = str(word_id)` will be used. Notes ----- This can be useful if you only have a term-document BOW matrix (represented by `corpus`), but not the original text corpus. This method will scan the term-document count matrix for all word ids that appear in it, then construct :class:`~gensim.corpora.dictionary.Dictionary` which maps each `word_id -> id2word[word_id]`. `id2word` is an optional dictionary that maps the `word_id` to a token. In case `id2word` isn't specified the mapping `id2word[word_id] = str(word_id)` will be used. Returns ------- :class:`~gensim.corpora.dictionary.Dictionary` Inferred dictionary from corpus. Examples -------- .. sourcecode:: pycon >>> from gensim.corpora import Dictionary >>> >>> corpus = [[(1, 1.0)], [], [(0, 5.0), (2, 1.0)], []] >>> dct = Dictionary.from_corpus(corpus) >>> len(dct) 3 """ result = Dictionary() max_id = -1 for docno, document in enumerate(corpus): if docno % 10000 == 0: logger.info("adding document #%i to %s", docno, result) result.num_docs += 1 result.num_nnz += len(document) for wordid, word_freq in document: max_id = max(wordid, max_id) result.num_pos += word_freq result.dfs[wordid] = result.dfs.get(wordid, 0) + 1 if id2word is None: # make sure length(result) == get_max_id(corpus) + 1 result.token2id = {str(i): i for i in range(max_id + 1)} else: # id=>word mapping given: simply copy it result.token2id = {utils.to_unicode(token): idx for idx, token in id2word.items()} for idx in result.token2id.values(): # make sure all token ids have a valid `dfs` entry result.dfs[idx] = result.dfs.get(idx, 0) logger.info( "built %s from %i documents (total %i corpus positions)", result, result.num_docs, result.num_pos ) return result
Dictionary
python
celery__celery
t/unit/app/test_beat.py
{ "start": 496, "end": 657 }
class ____(dict): closed = False synced = False def close(self): self.closed = True def sync(self): self.synced = True
MockShelve
python
astropy__astropy
astropy/io/fits/tests/test_util.py
{ "start": 2449, "end": 7060 }
class ____(FitsTestCase): """ The high-level tests are partially covered by test_core.TestConvenienceFunctions.test_fileobj_mode_guessing but added some low-level tests as well. """ def test_mode_strings(self): # A string signals that the file should be opened so the function # should return None, because it's simply not opened yet. assert util.fileobj_mode("tmp1.fits") is None @pytest.mark.skipif(not HAS_PIL, reason="requires pil") def test_mode_pil_image(self): img = np.random.randint(0, 255, (5, 5, 3)).astype(np.uint8) result = Image.fromarray(img) result.save(self.temp("test_simple.jpg")) # PIL doesn't support append mode. So it will always use binary read. with Image.open(self.temp("test_simple.jpg")) as fileobj: assert util.fileobj_mode(fileobj) == "rb" def test_mode_gzip(self): # Open a gzip in every possible (gzip is binary or "touch" only) way # and check if the mode was correctly identified. # The lists consist of tuples: filenumber, given mode, identified mode # The filenumber must be given because read expects the file to exist # and x expects it to NOT exist. num_mode_resmode = [ (0, "a", "ab"), (0, "ab", "ab"), (0, "w", "wb"), (0, "wb", "wb"), (1, "x", "xb"), (1, "r", "rb"), (1, "rb", "rb"), ] for num, mode, res in num_mode_resmode: filename = self.temp(f"test{num}.gz") with gzip.GzipFile(filename, mode) as fileobj: assert util.fileobj_mode(fileobj) == res def test_mode_normal_buffering(self): # Use the python IO with buffering parameter. Binary mode only: # see "test_mode_gzip" for explanation of tuple meanings. num_mode_resmode = [ (0, "ab", "ab"), (0, "wb", "wb"), (1, "xb", "xb"), (1, "rb", "rb"), ] for num, mode, res in num_mode_resmode: filename = self.temp(f"test1{num}.dat") with open(filename, mode, buffering=0) as fileobj: assert util.fileobj_mode(fileobj) == res def test_mode_normal_no_buffering(self): # Python IO without buffering # see "test_mode_gzip" for explanation of tuple meanings. num_mode_resmode = [ (0, "a", "a"), (0, "ab", "ab"), (0, "w", "w"), (0, "wb", "wb"), (1, "x", "x"), (1, "r", "r"), (1, "rb", "rb"), ] for num, mode, res in num_mode_resmode: filename = self.temp(f"test2{num}.dat") with open(filename, mode) as fileobj: assert util.fileobj_mode(fileobj) == res def test_mode_normalization(self): # Use the normal python IO in append mode with all possible permutation # of the "mode" letters. # Tuple gives a file name suffix, the given mode and the functions # return. The filenumber is only for consistency with the other # test functions. Append can deal with existing and not existing files. for num, mode, res in [ (0, "a", "a"), (0, "a+", "a+"), (0, "ab", "ab"), (0, "a+b", "ab+"), (0, "ab+", "ab+"), ]: filename = self.temp(f"test3{num}.dat") with open(filename, mode) as fileobj: assert util.fileobj_mode(fileobj) == res def test_rstrip_inplace(): # Incorrect type s = np.array([1, 2, 3]) with pytest.raises(TypeError) as exc: _rstrip_inplace(s) assert exc.value.args[0] == "This function can only be used on string arrays" # Bytes array s = np.array(["a ", " b", " c c "], dtype="S6") _rstrip_inplace(s) assert_equal(s, np.array(["a", " b", " c c"], dtype="S6")) # Unicode array s = np.array(["a ", " b", " c c "], dtype="U6") _rstrip_inplace(s) assert_equal(s, np.array(["a", " b", " c c"], dtype="U6")) # 2-dimensional array s = np.array([["a ", " b"], [" c c ", " a "]], dtype="S6") _rstrip_inplace(s) assert_equal(s, np.array([["a", " b"], [" c c", " a"]], dtype="S6")) # 3-dimensional array s = np.repeat(" a a ", 24).reshape((2, 3, 4)) _rstrip_inplace(s) assert_equal(s, " a a") # 3-dimensional non-contiguous array s = np.repeat(" a a ", 1000).reshape((10, 10, 10))[:2, :3, :4] _rstrip_inplace(s) assert_equal(s, " a a")
TestUtilMode
python
walkccc__LeetCode
solutions/101. Symmetric Tree/101.py
{ "start": 0, "end": 348 }
class ____: def isSymmetric(self, root: TreeNode | None) -> bool: def isSymmetric(p: TreeNode | None, q: TreeNode | None) -> bool: if not p or not q: return p == q return (p.val == q.val and isSymmetric(p.left, q.right) and isSymmetric(p.right, q.left)) return isSymmetric(root, root)
Solution
python
joke2k__faker
faker/providers/credit_card/zh_CN/__init__.py
{ "start": 184, "end": 1481 }
class ____(CreditCardProvider): """Custom credit card provider for the zh_CN locale.""" prefix_unionpay = ["62"] # UnionPay cards typically start with 62 prefix_visa = ["4"] prefix_mastercard = ["51", "52", "53", "54", "55"] credit_card_types = OrderedDict( ( ("unionpay", CreditCard("UnionPay", prefix_unionpay, security_code="CVN2")), ("visa", CreditCard("Visa", prefix_visa, security_code="CVV2")), ("mastercard", CreditCard("Mastercard", prefix_mastercard, security_code="CVC2")), ) ) def credit_card_full(self, card_type: Optional[CardType] = None) -> str: """Generate a full Chinese credit card with details.""" card = self._credit_card_type(card_type) tpl = "{provider}\n{owner}\n{number} {expire_date}\n{security}: {security_nb}\n{issuer}" tpl = tpl.format( provider=card.name, owner=self.generator.parse("{{first_name}}{{last_name}}"), number=self.credit_card_number(card), expire_date=self.credit_card_expire(), security=card.security_code, security_nb=self.credit_card_security_code(card), issuer=self.generator.parse("{{bank}}"), ) return self.generator.parse(tpl)
Provider
python
getsentry__sentry
src/sentry/migrations/0977_commitfilechange_break_commit_fk.py
{ "start": 222, "end": 2473 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = False dependencies = [ ("sentry", "0976_sentry_app_json_field"), ] operations = [ migrations.SeparateDatabaseAndState( database_operations=[ migrations.AlterField( model_name="commitfilechange", name="commit", field=sentry.db.models.fields.foreignkey.FlexibleForeignKey( db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to="sentry.commit", ), ) ], state_operations=[ migrations.RemoveField( model_name="commitfilechange", name="commit", ), migrations.AddField( model_name="commitfilechange", name="commit_id", field=sentry.db.models.fields.bounded.BoundedBigIntegerField(db_index=True), ), migrations.AlterUniqueTogether( name="commitfilechange", unique_together={("commit_id", "filename")}, ), ], ), ]
Migration
python
spack__spack
lib/spack/spack/variant.py
{ "start": 8661, "end": 17705 }
class ____: """A VariantValue is a key-value pair that represents a variant. It can have zero or more values. Values have set semantics, so they are unordered and unique. The variant type can be narrowed from multi to single to boolean, this limits the number of values that can be stored in the variant. Multi-valued variants can either be concrete or abstract: abstract means that the variant takes at least the values specified, but may take more when concretized. Concrete means that the variant takes exactly the values specified. Lastly, a variant can be marked as propagating, which means that it should be propagated to dependencies.""" name: str propagate: bool concrete: bool type: VariantType _values: ValueType slots = ("name", "propagate", "concrete", "type", "_values") def __init__( self, type: VariantType, name: str, value: ValueType, *, propagate: bool = False, concrete: bool = False, ) -> None: self.name = name self.type = type self.propagate = propagate # only multi-valued variants can be abstract self.concrete = concrete or type in (VariantType.BOOL, VariantType.SINGLE) # Invokes property setter self.set(*value) @staticmethod def from_node_dict( name: str, value: Union[str, List[str]], *, propagate: bool = False, abstract: bool = False ) -> "VariantValue": """Reconstruct a variant from a node dict.""" if isinstance(value, list): return VariantValue( VariantType.MULTI, name, tuple(value), propagate=propagate, concrete=not abstract ) # todo: is this necessary? not literal true / false in json/yaml? elif str(value).upper() == "TRUE" or str(value).upper() == "FALSE": return VariantValue( VariantType.BOOL, name, (str(value).upper() == "TRUE",), propagate=propagate ) return VariantValue(VariantType.SINGLE, name, (value,), propagate=propagate) @staticmethod def from_string_or_bool( name: str, value: Union[str, bool], *, propagate: bool = False, concrete: bool = False ) -> "VariantValue": if value is True or value is False: return VariantValue(VariantType.BOOL, name, (value,), propagate=propagate) elif value.upper() in ("TRUE", "FALSE"): return VariantValue( VariantType.BOOL, name, (value.upper() == "TRUE",), propagate=propagate ) elif value == "*": return VariantValue(VariantType.MULTI, name, (), propagate=propagate) return VariantValue( VariantType.MULTI, name, tuple(value.split(",")), propagate=propagate, concrete=concrete, ) @staticmethod def from_concretizer(name: str, value: str, type: str) -> "VariantValue": """Reconstruct a variant from concretizer output.""" if type == "bool": return VariantValue(VariantType.BOOL, name, (value == "True",)) elif type == "multi": return VariantValue(VariantType.MULTI, name, (value,), concrete=True) else: return VariantValue(VariantType.SINGLE, name, (value,)) def yaml_entry(self) -> Tuple[str, SerializedValueType]: """Returns a (key, value) tuple suitable to be an entry in a yaml dict. Returns: tuple: (name, value_representation) """ if self.type == VariantType.MULTI: return self.name, list(self.values) return self.name, self.values[0] @property def values(self) -> ValueType: return self._values @property def value(self) -> Union[ValueType, bool, str]: return self._values[0] if self.type != VariantType.MULTI else self._values def set(self, *value: Union[bool, str]) -> None: """Set the value(s) of the variant.""" if len(value) > 1: value = tuple(sorted(set(value))) if self.type != VariantType.MULTI: if len(value) != 1: raise MultipleValuesInExclusiveVariantError(self) unwrapped = value[0] if self.type == VariantType.BOOL and unwrapped not in (True, False): raise ValueError( f"cannot set a boolean variant to a value that is not a boolean: {unwrapped}" ) if "*" in value: raise InvalidVariantValueError("cannot use reserved value '*'") self._values = value def _cmp_iter(self) -> Iterable: yield self.name yield self.propagate yield self.concrete yield from (str(v) for v in self.values) def copy(self) -> "VariantValue": return VariantValue( self.type, self.name, self.values, propagate=self.propagate, concrete=self.concrete ) def satisfies(self, other: "VariantValue") -> bool: """The lhs satisfies the rhs if all possible concretizations of lhs are also possible concretizations of rhs.""" if self.name != other.name: return False if not other.concrete: # rhs abstract means the lhs must at least contain its values. # special-case patches with rhs abstract: their values may be prefixes of the lhs # values. if self.name == "patches": return all( isinstance(v, str) and any(isinstance(w, str) and w.startswith(v) for w in self.values) for v in other.values ) return all(v in self for v in other.values) if self.concrete: # both concrete: they must be equal return self.values == other.values return False def intersects(self, other: "VariantValue") -> bool: """True iff there exists a concretization that satisfies both lhs and rhs.""" if self.name != other.name: return False if self.concrete: if other.concrete: return self.values == other.values return all(v in self for v in other.values) if other.concrete: return all(v in other for v in self.values) # both abstract: the union is a valid concretization of both return True def constrain(self, other: "VariantValue") -> bool: """Constrain self with other if they intersect. Returns true iff self was changed.""" if not self.intersects(other): raise UnsatisfiableVariantSpecError(self, other) old_values = self.values self.set(*self.values, *other.values) changed = old_values != self.values if self.propagate and not other.propagate: self.propagate = False changed = True if not self.concrete and other.concrete: self.concrete = True changed = True if self.type > other.type: self.type = other.type changed = True return changed def append(self, value: Union[str, bool]) -> None: self.set(*self.values, value) def __contains__(self, item: Union[str, bool]) -> bool: return item in self.values def __str__(self) -> str: # boolean variants are printed +foo or ~foo if self.type == VariantType.BOOL: sigil = "+" if self.value else "~" if self.propagate: sigil *= 2 return f"{sigil}{self.name}" # concrete multi-valued foo:=bar,baz concrete = ":" if self.type == VariantType.MULTI and self.concrete else "" delim = "==" if self.propagate else "=" if not self.values: value_str = "*" elif self.name == "patches" and self.concrete: value_str = ",".join(str(x)[:7] for x in self.values) else: value_str = ",".join(str(x) for x in self.values) return f"{self.name}{concrete}{delim}{spack.spec_parser.quote_if_needed(value_str)}" def __repr__(self): return ( f"VariantValue({self.type!r}, {self.name!r}, {self.values!r}, " f"propagate={self.propagate!r}, concrete={self.concrete!r})" ) def MultiValuedVariant(name: str, value: ValueType, propagate: bool = False) -> VariantValue: return VariantValue(VariantType.MULTI, name, value, propagate=propagate, concrete=True) def SingleValuedVariant( name: str, value: Union[bool, str], propagate: bool = False ) -> VariantValue: return VariantValue(VariantType.SINGLE, name, (value,), propagate=propagate) def BoolValuedVariant(name: str, value: bool, propagate: bool = False) -> VariantValue: return VariantValue(VariantType.BOOL, name, (value,), propagate=propagate) # The class below inherit from Sequence to disguise as a tuple and comply # with the semantic expected by the 'values' argument of the variant directive
VariantValue
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/compute.py
{ "start": 35186, "end": 42622 }
class ____(ComputeEngineBaseOperator): """ Creates an Instance Template using specified fields. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:ComputeEngineInsertInstanceTemplateOperator` :param body: Instance template representation as object. :param project_id: Google Cloud project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param request_id: Unique request_id that you might add to achieve full idempotence (for example when client call times out repeating the request with the same request id will not create a new instance template again) It should be in UUID format as defined in RFC 4122 :param resource_id: Name of the Instance Template. If the name of Instance Template is not specified in body['name'], the name will be taken from 'resource_id' parameter :param gcp_conn_id: The connection ID used to connect to Google Cloud. Defaults to 'google_cloud_default'. :param api_version: API version used (for example v1 - or beta). Defaults to v1. :param impersonation_chain: Service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). :param retry: A retry object used to retry requests. If `None` is specified, requests will not be retried. :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if `retry` is specified, the timeout applies to each individual attempt. :param metadata: Additional metadata that is provided to the method. """ operator_extra_links = (ComputeInstanceTemplateDetailsLink(),) # [START gce_instance_template_insert_fields] template_fields: Sequence[str] = ( "body", "project_id", "request_id", "gcp_conn_id", "api_version", "impersonation_chain", "resource_id", ) # [END gce_instance_template_insert_fields] def __init__( self, *, body: dict, project_id: str = PROVIDE_PROJECT_ID, resource_id: str | None = None, request_id: str | None = None, retry: Retry | None = None, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), gcp_conn_id: str = "google_cloud_default", api_version: str = "v1", validate_body: bool = True, impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: self.body = body self.request_id = request_id if "name" in body: resource_id = self.body["name"] self._field_validator = None # Optional[GcpBodyFieldValidator] self.retry = retry self.timeout = timeout self.metadata = metadata if validate_body: self._field_validator = GcpBodyFieldValidator( GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version ) self._field_sanitizer = GcpBodyFieldSanitizer(GCE_INSTANCE_FIELDS_TO_SANITIZE) super().__init__( project_id=project_id, zone="global", resource_id=resource_id, gcp_conn_id=gcp_conn_id, api_version=api_version, impersonation_chain=impersonation_chain, **kwargs, ) def check_body_fields(self) -> None: required_params = ["machine_type", "disks", "network_interfaces"] for param in required_params: if param not in self.body["properties"]: readable_param = param.replace("_", " ") raise AirflowException( f"The body '{self.body}' should contain at least {readable_param} for the new operator " f"in the '{param}' field. Check (google.cloud.compute_v1.types.Instance) " f"for more details about body fields description." ) def _validate_all_body_fields(self) -> None: if self._field_validator: self._field_validator.validate(self.body) def _validate_inputs(self) -> None: super()._validate_inputs() if not self.resource_id and "name" not in self.body: raise AirflowException( "The required parameters 'resource_id' and body['name'] are missing. " "Please, provide at least one of them." ) def execute(self, context: Context) -> dict: hook = ComputeEngineHook( gcp_conn_id=self.gcp_conn_id, api_version=self.api_version, impersonation_chain=self.impersonation_chain, ) self._validate_all_body_fields() self.check_body_fields() self._field_sanitizer.sanitize(self.body) try: # Idempotence check (sort of) - we want to check if the new Template # is already created and if is, then we assume it was created by previous run # of operator - we do not check if content of the Template # is as expected. Templates are immutable, so we cannot update it anyway # and deleting/recreating is not worth the hassle especially # that we cannot delete template if it is already used in some Instance # Group Manager. We assume success if the template is simply present existing_template = hook.get_instance_template( resource_id=self.resource_id, project_id=self.project_id ) except exceptions.NotFound as e: # We actually expect to get 404 / Not Found here as the template should # not yet exist if e.code != 404: raise e else: self.log.info("The %s Template already exists.", existing_template) ComputeInstanceTemplateDetailsLink.persist( context=context, project_id=self.project_id or hook.project_id, ) return InstanceTemplate.to_dict(existing_template) self._field_sanitizer.sanitize(self.body) self.log.info("Creating Instance Template with specified body: %s", self.body) hook.insert_instance_template( body=self.body, request_id=self.request_id, project_id=self.project_id, ) self.log.info("The specified Instance Template has been created SUCCESSFULLY", self.body) new_template = hook.get_instance_template( resource_id=self.resource_id, project_id=self.project_id, ) ComputeInstanceTemplateDetailsLink.persist( context=context, project_id=self.project_id or hook.project_id, ) return InstanceTemplate.to_dict(new_template)
ComputeEngineInsertInstanceTemplateOperator
python
pytorch__pytorch
test/distributed/pipelining/test_unflatten.py
{ "start": 915, "end": 1264 }
class ____(torch.nn.Module): def __init__(self) -> None: super().__init__() self.block0 = Block() self.block1 = Block() def forward(self, x: torch.Tensor, constant=None) -> torch.Tensor: x = self.block0(x, constant=constant) pipe_split() x = self.block1(x, constant=constant) return x
M
python
dagster-io__dagster
python_modules/dagster/dagster/_core/op_concurrency_limits_counter.py
{ "start": 2500, "end": 11441 }
class ____: def __init__( self, instance: DagsterInstance, runs: Sequence[DagsterRun], in_progress_run_records: Sequence[RunRecord], concurrency_keys: set[str], pool_limits: Sequence[PoolLimit], slot_count_offset: int = 0, pool_granularity: Optional[PoolGranularity] = None, ): self._root_pools_by_run = {} self._concurrency_info_by_key: dict[str, ConcurrencyKeyInfo] = {} self._launched_pool_counts = defaultdict(int) self._in_progress_pool_counts = defaultdict(int) self._slot_count_offset = slot_count_offset self._pool_granularity = pool_granularity if pool_granularity else PoolGranularity.OP self._in_progress_run_ids: set[str] = set( [record.dagster_run.run_id for record in in_progress_run_records] ) self._started_run_pools_allotted_seconds = int( os.getenv("DAGSTER_OP_CONCURRENCY_KEYS_ALLOTTED_FOR_STARTED_RUN_SECONDS", "5") ) queued_pool_names = self._get_queued_pool_names(runs) # initialize all the pool limits to the default if necessary self._initialize_pool_limits(instance, queued_pool_names, pool_limits) # fetch all the configured pool keys all_configured_pool_names = concurrency_keys configured_queued_pool_names = all_configured_pool_names.intersection(queued_pool_names) # fetch all the concurrency info for all of the runs at once, so we can claim in the correct # priority order self._fetch_concurrency_info(instance, configured_queued_pool_names) # fetch all the outstanding pools for in-progress runs self._process_in_progress_runs(in_progress_run_records) def _get_queued_pool_names(self, queued_runs: Sequence[DagsterRun]) -> set[str]: queued_pool_names = set() for run in queued_runs: if run.run_op_concurrency: # if using run granularity, consider all the concurrency keys required by the run # if using op granularity, consider only the root keys run_pools = ( run.run_op_concurrency.root_key_counts.keys() if self._pool_granularity == PoolGranularity.OP else run.run_op_concurrency.all_pools or [] ) queued_pool_names.update(run_pools) return queued_pool_names def _initialize_pool_limits( self, instance: DagsterInstance, pool_names: set[str], pool_limits: Sequence[PoolLimit] ): default_limit = instance.global_op_concurrency_default_limit pool_limits_by_name = {pool.name: pool for pool in pool_limits} for pool_name in pool_names: if pool_name is None: continue if (pool_name not in pool_limits_by_name and default_limit) or ( pool_name in pool_limits_by_name and pool_limits_by_name[pool_name].from_default and pool_limits_by_name[pool_name].limit != default_limit ): instance.event_log_storage.initialize_concurrency_limit_to_default(pool_name) def _fetch_concurrency_info(self, instance: DagsterInstance, pool_names: set[str]): for pool_name in pool_names: if pool_name is None: continue self._concurrency_info_by_key[pool_name] = ( instance.event_log_storage.get_concurrency_info(pool_name) ) def _should_allocate_slots_for_in_progress_run(self, record: RunRecord): if not record.dagster_run.run_op_concurrency: return False status = record.dagster_run.status if status not in IN_PROGRESS_RUN_STATUSES: return False if self._pool_granularity == PoolGranularity.RUN: return True if status == DagsterRunStatus.STARTING: return True if status != DagsterRunStatus.STARTED or not record.start_time: return False time_elapsed = get_current_timestamp() - record.start_time if time_elapsed < self._started_run_pools_allotted_seconds: return True return False def _slot_counts_for_run(self, run: DagsterRun) -> Mapping[str, int]: if not run.run_op_concurrency: return {} if self._pool_granularity == PoolGranularity.OP: return {**run.run_op_concurrency.root_key_counts} elif self._pool_granularity == PoolGranularity.RUN: return {pool: 1 for pool in run.run_op_concurrency.all_pools or []} else: check.failed(f"Unexpected pool granularity {self._pool_granularity}") def _process_in_progress_runs(self, in_progress_records: Sequence[RunRecord]): for record in in_progress_records: if not self._should_allocate_slots_for_in_progress_run(record): continue for pool, count in self._slot_counts_for_run(record.dagster_run).items(): self._in_progress_pool_counts[pool] += count def is_blocked(self, run: DagsterRun) -> bool: # if any of the ops in the run can make progress (not blocked by concurrency keys), we # should dequeue if not run.run_op_concurrency: return False if ( self._pool_granularity == PoolGranularity.OP and run.run_op_concurrency.has_unconstrained_root_nodes ): # if the granularity is at the op level and there exists a root node that is not # concurrency blocked, we should dequeue. return False if self._pool_granularity == PoolGranularity.OP: # we just need to check all of the root concurrency keys, instead of all the concurrency keys # in the run for pool in run.run_op_concurrency.root_key_counts.keys(): if pool not in self._concurrency_info_by_key: # there is no concurrency limit set for this key, we should dequeue return False key_info = self._concurrency_info_by_key[pool] available_count = ( key_info.slot_count - len(key_info.pending_steps) - self._launched_pool_counts[pool] - self._in_progress_pool_counts[pool] ) if available_count + self._slot_count_offset > 0: # there exists a root concurrency key that is not blocked, we should dequeue return False # if we reached here, then every root concurrency key is blocked, so we should not dequeue return True elif self._pool_granularity == PoolGranularity.RUN: # if the granularity is at the run level, we should check if any of the concurrency # keys are blocked for pool in run.run_op_concurrency.all_pools or []: if pool not in self._concurrency_info_by_key: # there is no concurrency limit set for this key continue key_info = self._concurrency_info_by_key[pool] available_count = ( key_info.slot_count - self._launched_pool_counts[pool] - self._in_progress_pool_counts[pool] ) if available_count + self._slot_count_offset <= 0: return True # if we reached here then there is at least one available slot for every single concurrency key # required by this run, so we should dequeue return False else: check.failed(f"Unexpected pool granularity {self._pool_granularity}") def get_blocked_run_debug_info(self, run: DagsterRun) -> Mapping: if not run.run_op_concurrency: return {} log_info = {} for pool in run.run_op_concurrency.root_key_counts.keys(): concurrency_info = self._concurrency_info_by_key.get(pool) if not concurrency_info: continue log_info[pool] = { "granularity": self._pool_granularity.value, "slot_count": concurrency_info.slot_count, "pending_step_count": len(concurrency_info.pending_steps), "pending_step_run_ids": list( {step.run_id for step in concurrency_info.pending_steps} ), "launched_count": self._launched_pool_counts[pool], "in_progress_count": self._in_progress_pool_counts[pool], } return log_info def update_counters_with_launched_item(self, run: DagsterRun): for pool, count in self._slot_counts_for_run(run).items(): self._launched_pool_counts[pool] += count
GlobalOpConcurrencyLimitsCounter
python
modin-project__modin
modin/tests/core/storage_formats/pandas/test_internals.py
{ "start": 60105, "end": 60820 }
class ____: """ A dummy object emulating future's behaviour, this class is used in ``test_call_queue_serialization``. It stores a random numeric value representing its data and `was_materialized` state. Initially this object is considered to be serialized, the state can be changed by calling the ``.materialize()`` method. """ def __init__(self): self._value = np.random.randint(0, 1_000_000) self._was_materialized = False def materialize(self): self._was_materialized = True return self def __eq__(self, other): if isinstance(other, type(self)) and self._value == other._value: return True return False
DummyFuture
python
pytorch__pytorch
torch/testing/_internal/common_device_type.py
{ "start": 62563, "end": 62719 }
class ____(dtypes): def __init__(self, *args): super().__init__(*args, device_type="cuda") # Overrides specified dtypes on Intel GPU.
dtypesIfCUDA
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/operators/glue_databrew.py
{ "start": 1455, "end": 6169 }
class ____(AwsBaseOperator[GlueDataBrewHook]): """ Start an AWS Glue DataBrew job. AWS Glue DataBrew is a visual data preparation tool that makes it easier for data analysts and data scientists to clean and normalize data to prepare it for analytics and machine learning (ML). .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:GlueDataBrewStartJobOperator` :param job_name: unique job name per AWS Account :param wait_for_completion: Whether to wait for job run completion. (default: True) :param deferrable: If True, the operator will wait asynchronously for the job to complete. This implies waiting for completion. This mode requires aiobotocore module to be installed. (default: False) :param waiter_delay: Time in seconds to wait between status checks. Default is 30. :param waiter_max_attempts: Maximum number of attempts to check for job completion. (default: 60) :return: dictionary with key run_id and value of the resulting job's run_id. :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether or not to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param botocore_config: Configuration dictionary (key-values) for botocore client. See: https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html """ aws_hook_class = GlueDataBrewHook template_fields: Sequence[str] = aws_template_fields( "job_name", "wait_for_completion", "waiter_delay", "waiter_max_attempts", "deferrable", ) def __init__( self, job_name: str, wait_for_completion: bool = True, delay: int | None = None, waiter_delay: int = 30, waiter_max_attempts: int = 60, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), **kwargs, ): super().__init__(**kwargs) self.job_name = job_name self.wait_for_completion = wait_for_completion self.waiter_delay = waiter_delay self.waiter_max_attempts = waiter_max_attempts self.deferrable = deferrable def execute(self, context: Context): job = self.hook.conn.start_job_run(Name=self.job_name) run_id = job["RunId"] self.log.info("AWS Glue DataBrew Job: %s. Run Id: %s submitted.", self.job_name, run_id) if self.deferrable: self.log.info("Deferring job %s with run_id %s", self.job_name, run_id) self.defer( trigger=GlueDataBrewJobCompleteTrigger( job_name=self.job_name, run_id=run_id, waiter_delay=self.waiter_delay, waiter_max_attempts=self.waiter_max_attempts, aws_conn_id=self.aws_conn_id, region_name=self.region_name, verify=self.verify, botocore_config=self.botocore_config, ), method_name="execute_complete", ) elif self.wait_for_completion: self.log.info( "Waiting for AWS Glue DataBrew Job: %s. Run Id: %s to complete.", self.job_name, run_id ) status = self.hook.job_completion( job_name=self.job_name, delay=self.waiter_delay, run_id=run_id, max_attempts=self.waiter_max_attempts, ) self.log.info("Glue DataBrew Job: %s status: %s", self.job_name, status) return {"run_id": run_id} def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> dict[str, str]: validated_event = validate_execute_complete_event(event) if validated_event["status"] != "success": raise AirflowException("Error while running AWS Glue DataBrew job: %s", validated_event) run_id = validated_event.get("run_id", "") status = validated_event.get("status", "") self.log.info("AWS Glue DataBrew runID: %s completed with status: %s", run_id, status) return {"run_id": run_id}
GlueDataBrewStartJobOperator
python
django__django
tests/auth_tests/test_auth_backends.py
{ "start": 25171, "end": 26118 }
class ____(TestCase): """ The model backend can accept a credentials kwarg labeled with custom user model's USERNAME_FIELD. """ def test_authenticate(self): test_user = CustomUser._default_manager.create_user( email="test@example.com", password="test", date_of_birth=date(2006, 4, 25) ) authenticated_user = authenticate(email="test@example.com", password="test") self.assertEqual(test_user, authenticated_user) async def test_aauthenticate(self): test_user = await CustomUser._default_manager.acreate_user( email="test@example.com", password="test", date_of_birth=date(2006, 4, 25) ) authenticated_user = await aauthenticate( email="test@example.com", password="test" ) self.assertEqual(test_user, authenticated_user) @override_settings(AUTH_USER_MODEL="auth_tests.UUIDUser")
CustomUserModelBackendAuthenticateTest
python
openai__openai-python
tests/lib/schema_types/query.py
{ "start": 778, "end": 935 }
class ____(BaseModel): name: Optional[str] = None table_name: Table columns: List[Column] conditions: List[Condition] order_by: OrderBy
Query
python
Textualize__textual
docs/examples/styles/text_align.py
{ "start": 301, "end": 718 }
class ____(App): CSS_PATH = "text_align.tcss" def compose(self): yield Grid( Label("[b]Left aligned[/]\n" + TEXT, id="one"), Label("[b]Center aligned[/]\n" + TEXT, id="two"), Label("[b]Right aligned[/]\n" + TEXT, id="three"), Label("[b]Justified[/]\n" + TEXT, id="four"), ) if __name__ == "__main__": app = TextAlign() app.run()
TextAlign
python
pytorch__pytorch
torch/_inductor/scheduler.py
{ "start": 16507, "end": 47978 }
class ____: ancestors: OrderedSet[str] group: tuple[torch.device, tuple[tuple[sympy.Expr, ...], ...]] last_usage: OrderedSet[str] # .min_order and .max_order are only relevant for "grouped" nodes such as FusedSchedulerNode. # e.g. if the FusedSchedulerNode includes nodes (op_1, op_2, op_3), and op_X is X-th node # in `self.scheduler.nodes`, then for this FusedSchedulerNode, .min_order is 1 and .max_order is 3. # For non-"grouped" nodes (i.e. regular SchedulerNode), # .min_order = .max_order = X if this node is X-th node in `self.scheduler.nodes`. min_order: int max_order: int mpi_node: MemoryPlanningInfoForNode mutation_renames: dict[str, str] node: Optional[ir.Operation] = None outputs: list[SchedulerBuffer] outputs_by_name: dict[str, SchedulerBuffer] override_estimated_runtime: Optional[float] = None read_writes: dependencies.ReadWrites unmet_dependencies: OrderedSet[Dep] written: bool = False def __init__(self, scheduler: Scheduler) -> None: self.scheduler: Scheduler = scheduler self.debug_device_str: Callable[[BaseSchedulerNode], list[str]] = ( lambda *args, **kwargs: [] ) def _init_from_node(self, node: ir.Operation) -> None: self.node = node self.ancestors = OrderedSet() self.last_usage = OrderedSet[ str ]() # buffers that won't be used after this kernel self.written = False self.outputs = [ SchedulerBuffer( scheduler=self.scheduler, node=output, defining_op=self, ) for output in node.get_outputs() ] self.outputs_by_name = {buf.get_name(): buf for buf in self.outputs} # mutation_renames for the current node. Due to potential # more mutations happening later, this can be different # to Scheduler.mutation_renames. Also this dict should be small # since only mutation information relevant to the deps for this # node is stored here. self.mutation_renames = {} def __repr__(self) -> str: return f"{type(self).__name__}(name={self.get_name()!r})" def debug_str(self) -> str: """Longer form printout for trace logs""" name = self.get_name() buf = IndentedBuffer() buf.splice( f"""\ {name}: {type(self).__name__}({type(getattr(self, "node", None)).__name__}) {name}.writes = {pformat(self.read_writes.writes)} {name}.unmet_dependencies = {pformat(self.unmet_dependencies)} {name}.met_dependencies = {pformat(self.read_writes.reads - self.unmet_dependencies)} {name}.outputs = [ """ ) with buf.indent(): for out in self.get_outputs(): buf.splice(out.debug_str()) buf.writeline("]") try: buf.splice(self.debug_str_extra()) except Exception: log.warning("Ignoring error in debug_str()", exc_info=True) return buf.getrawvalue().rstrip() def debug_str_extra(self) -> str: return "" def _debug_str_for_device(self) -> list[str]: return self.debug_device_str(self) def debug_str_short(self) -> str: maybe_data = getattr(self.node, "data", None) data_str = "" if isinstance(maybe_data, torch._inductor.ir.Pointwise): data_str = ", " + maybe_data.str_helper( [maybe_data.get_size()], shorten=False, multiline=False ) elif isinstance(maybe_data, torch._inductor.ir.Reduction): data_str = ", " + maybe_data.str_helper( [maybe_data.get_reduction_size(), maybe_data.get_reduction_type()], shorten=False, multiline=False, ) return f"{self}{data_str}" def log_details(self) -> None: log.info( "%s: unmet_dependencies = %s, writes = %s", self, self.unmet_dependencies, self.read_writes.writes, ) def reorder_loops_by_dep_pair( self, self_dep: MemoryDep, other_dep: MemoryDep ) -> bool: return False def update_mutated_names(self, renames: dict[str, str]) -> None: self.mutation_renames = { name: renames[name] for name in (dep.name for dep in self.read_writes.reads_and_writes()) if name in renames } self.set_read_writes(self.read_writes.rename(self.mutation_renames)) def add_fake_dep(self, dep: Dep) -> None: self.set_read_writes(self.read_writes.with_read(dep)) def has_aliasing_or_mutation(self) -> bool: return any( buf.get_aliases() or buf.get_mutations() for buf in self.get_outputs() ) def set_read_writes(self, rw: dependencies.ReadWrites) -> None: self.read_writes = rw self.unmet_dependencies = self.read_writes.reads self.prune_deps() def set_last_usage( self, future_used_buffers: OrderedSet[str], mutation_real_name: dict[str, str] ) -> None: used_buffers = self.used_or_aliased_buffer_names() used_buffers = OrderedSet(mutation_real_name.get(k, k) for k in used_buffers) self.last_usage = used_buffers - future_used_buffers def mark_run(self) -> None: for buf in self.outputs: buf.allocate() def used_buffer_names(self) -> OrderedSet[str]: return OrderedSet( dep.name for dep in itertools.chain(self.read_writes.reads, self.read_writes.writes) ) def used_or_aliased_buffer_names(self) -> OrderedSet[str]: used_names: OrderedSet[str] = OrderedSet() deps = [ dep.name for dep in itertools.chain(self.read_writes.reads, self.read_writes.writes) ] while len(deps) > 0: dep = deps.pop() used_names.add(dep) if V.graph.name_to_buffer.get(dep): deps.extend( alias for alias in V.graph.name_to_buffer[ dep ].get_inputs_that_alias_output() if alias not in used_names ) return used_names def prune_deps(self) -> None: self.unmet_dependencies = OrderedSet( dep for dep in self.unmet_dependencies if dep.name not in self.scheduler.available_buffer_names ) def prune_weak_deps(self) -> None: # Prune weak dependencies on operations that have been removed def should_prune(dep: Dep) -> bool: if not isinstance(dep, WeakDep): return False op_name = self.scheduler.name_to_buf[dep.name].defining_op_name() return op_name in V.graph.removed_operations to_remove = OrderedSet( dep for dep in self.read_writes.reads if should_prune(dep) ) self.set_read_writes(self.read_writes.remove_reads(to_remove)) def prune_redundant_deps( self, name_to_fused_node: dict[str, BaseSchedulerNode] ) -> None: _prune_redundant_deps(self, name_to_fused_node, self.scheduler.name_to_buf) def get_name(self) -> str: assert self.node is not None return self.node.get_operation_name() def get_first_name(self) -> str: return self.get_name() @cache_on_self def get_operation_names(self) -> OrderedSet[str]: return OrderedSet(node.get_name() for node in self.get_nodes()) @cache_on_self def get_buffer_names(self) -> OrderedSet[str]: return OrderedSet(out.get_name() for out in self.outputs) @cache_on_self def can_codegen_in_low_precision(self) -> bool: return all( isinstance(n, SchedulerNode) and can_codegen_without_upcasts(n, disallow_fp32_ops=True) for n in self.get_nodes() ) @cache_on_self def can_codegen_without_upcasts(self) -> bool: return all( isinstance(n, SchedulerNode) and can_codegen_without_upcasts(n) for n in self.get_nodes() ) def get_nodes(self) -> Sequence[BaseSchedulerNode]: return [self] def get_outputs(self) -> Sequence[SchedulerBuffer]: return self.outputs def get_output(self, buf_name: str) -> SchedulerBuffer: return self.outputs_by_name[buf_name] def get_device(self) -> Optional[torch.device]: assert self.node is not None return self.node.get_device() def is_cpu(self) -> bool: device = self.get_device() return device is not None and device.type == "cpu" def is_gpu(self) -> bool: device = self.get_device() return device is not None and is_gpu(device.type) def is_reduction(self) -> bool: return False def is_native_matmul(self) -> bool: return False def is_split_scan(self) -> bool: return False def is_template(self) -> bool: return False def is_extern(self) -> bool: return False def is_foreach(self) -> bool: return False def can_inplace(self, read_dep: dependencies.Dep) -> bool: return False def has_side_effects(self) -> bool: return False def decide_inplace_update(self) -> None: """ Decide if there should be inplace updates for the node and record the decision in the active kernel. """ from .codegen.wrapper import can_match_buffer_size if not ( isinstance(self, SchedulerNode) and config.inplace_buffers and V.graph.has_feature(self.get_device(), BackendFeature.INPLACE_BUFFERS) and ( not isinstance(V.kernel, torch._inductor.codegen.simd.SIMDKernel) or getattr(V.kernel, "mutations", None) is not None ) # hacky check for if V.kernel is a real kernel or NullHandler and hasattr(V.kernel, "args") ): return # NOTE remove V.graph.removed_operations once deps issue is fixed inconsequential_nodes = ( self.ancestors | V.graph.removed_operations | self.scheduler.completed_operations ) def single_index_in_fused_node(buf_to_be_inplaced: SchedulerBuffer) -> bool: # Inside of NodeUser, we track that the read and write are equivalent # before deciding if the use can be inplace. # But if that use is fused into a larger kernel, we need to check equivalence # of other accesses in fused scheduler node as well. fused_node = buf_to_be_inplaced.scheduler.get_fused_node(self) buf_name = buf_to_be_inplaced.get_name() # Dedup read/writes with equivalent indices # TODO - would be nice if we could just cache accesses on ReadWrites, # and enforce variant that this class & members are functional.. deps: OrderedSet[Dep] = OrderedSet() for user in buf_to_be_inplaced.users: user_node = user.node if not isinstance(user_node, BaseSchedulerNode): continue if ( user_node.get_first_name() not in buf_to_be_inplaced.scheduler.name_to_fused_node or buf_to_be_inplaced.scheduler.get_fused_node(user_node) is not fused_node ): continue deps |= ( o for o in user_node.read_writes.reads_and_writes() if o.name == buf_name ) if len(deps) > 1: return False return True for buf in self.get_outputs(): buf_node = buf.node assert buf_node is not None if ( not buf_node.should_allocate() or buf_node.get_inputs_that_alias_output() or buf_node.get_mutation_names() or buf.get_name() in V.graph.removed_buffers ): continue for read in self.read_writes.reads: input_buf: Optional[Union[SchedulerBuffer, SchedulerDonatedBuffer]] if read.name in self.scheduler.name_to_donated_buffer: input_buf = self.scheduler.name_to_donated_buffer[read.name] else: input_buf = self.scheduler.name_to_buf.get(read.name) if ( input_buf and V.graph.wrapper_code.can_reuse(input_buf, self) and not isinstance(input_buf.defining_op, NopKernelSchedulerNode) ): assert input_buf.users is not None remaining_uses = [ x for x in input_buf.users if x.node.get_name() not in inconsequential_nodes ] if ( len(remaining_uses) == 1 and remaining_uses[0].can_inplace and remaining_uses[0].node is self and input_buf.node is not None and not isinstance( input_buf.node.get_output_spec(), ( ir.NoneLayout, ir.MultiOutputLayout, ir.MutationLayoutSHOULDREMOVE, ), ) and not ( input_buf.defining_op and isinstance( input_buf.defining_op.node, (ir.FallbackKernel, ir.MultiOutput), ) and len(input_buf.node.get_inputs_that_alias_output()) > 0 ) and can_match_buffer_size(input_buf.node, buf.node) and single_index_in_fused_node(input_buf) ): # if there isn't a triton kernel, then we don't need to call triton-specific things. # but TODO this might be a convenient place to signal to the Collective kernels to inplace # (and, can we make "kernel" less generic of a name?) V.kernel.args.make_inplace(input_buf.get_name(), buf.get_name()) # mutations not tracked in cpp kernels if isinstance( V.kernel, torch._inductor.codegen.simd.SIMDKernel ): V.kernel.mutations.add(input_buf.get_name()) V.kernel.mutations.add(buf.get_name()) V.kernel.inplace_update_buffers[buf.get_name()] = ( input_buf.get_name() ) break def codegen_originating_info( self, buffer: IndentedBuffer, only_once: bool = True ) -> None: if not config.comment_origin: return if only_once and self.written: return assert self.node is not None origins = self.node.get_origins() out_lines = [] for o in origins: if o.op == "output": # These are boring and samey continue out_lines.append("") # TODO(voz): Should the pragma be constant somewhere? out_lines.append("#pragma CMT ORIGIN:") op_info_str = f"#pragma CMT {o.op} {o.target}" if "seq_nr" in o.meta: op_info_str = op_info_str + f" seq_nr:{o.meta['seq_nr']}" out_lines.append(op_info_str) if "stack_trace" in o.meta: stack_trace = f"{o.meta['stack_trace']}" stack_trace_last_line = stack_trace.rsplit("|", maxsplit=1)[-1] out_lines.append( "#pragma CMT " + stack_trace_last_line.replace("{", "{{") .replace("}", "}}") .replace("\n", "\\") .replace( "\\", "\\\\" ) # For windows safe path, avoid for example \x, \U. ) out_lines.append("#pragma CMT END ORIGIN") out_lines.append("") if len(out_lines) == 0: return # TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does # not use BracesBuffer, so we have no good indicator of a C++ buffer atm. buffer.writelines(out_lines) self.written = True @cache_on_self def get_read_write_buffers_sizes(self) -> int: return self.get_read_write_buffers_sizes_impl( include_reads=True, include_writes=True ) @cache_on_self def get_read_buffer_sizes(self) -> int: return self.get_read_write_buffers_sizes_impl( include_reads=True, include_writes=False ) @cache_on_self def get_write_buffer_sizes(self) -> int: return self.get_read_write_buffers_sizes_impl( include_reads=False, include_writes=True ) def get_read_write_buffers_sizes_impl( self, include_reads: bool, include_writes: bool ) -> int: return sum( self.get_read_write_buffer_accesses( include_reads=include_reads, include_writes=include_writes ).values(), start=0, ) def get_read_write_buffer_accesses( self, include_reads: bool, include_writes: bool ) -> dict[str, int]: """ Counting the number of bytes accessed for a kernel is surprisingly tricky. In particular, there is a differentiation between 'theoretical' memory accesses and practical memory accesses. For example, a layernorm kernel may actually access an input 3 times, but in theory, it only needs to access its input once (and may be optimized to do so through say, persistent reductions) Another example is that even though a buffer is passed in, we may not access the entire buffer. This may occur if we are accessing a slice of the buffer. Another tricky case is for indirect indexing, where the amount of bytes accessed depends on the values of the input. What this function aims to compute is the memory accesses for worst-case inputs, best-case optimization. What this means is that for each buffer we compute the amount of potential accesses in two ways and take the minimum. 1. Numel in ranges multiplied by number of deps the buffer has 2. The buffer size Returns memory accesses per buffer. """ if isinstance(self, NopKernelSchedulerNode): return {} if isinstance(self, ExternKernelSchedulerNode) and isinstance( self.node, MultiOutput ): # todo: Calculate this - it's kinda annoying. return {} if ( isinstance(self, ExternKernelSchedulerNode) and isinstance(self.node, ir.FallbackKernel) and self.node.op_overload is torch._prims.rng_prims.graphsafe_run_with_rng_state ): return {} def try_size_hint(s: sympy.Expr) -> int: return V.graph.sizevars.size_hint(s, fallback=0) if isinstance(self, SchedulerNode): node_numel = try_size_hint( sympy_product(self.get_ranges()[0]) * sympy_product(self.get_ranges()[1]), ) else: node_numel = int(1e9) buf_accesses = collections.defaultdict(list) if include_reads: for dep in self.read_writes.reads: buf_accesses[dep.name].append(dep) if include_writes: for dep in self.read_writes.writes: buf_accesses[dep.name].append(dep) reads = ( OrderedSet(dep.name for dep in self.read_writes.reads) if include_reads else OrderedSet() ) writes = ( OrderedSet(dep.name for dep in self.read_writes.writes) if include_writes else OrderedSet() ) def is_materialized(buf: str, snodes: Sequence[BaseSchedulerNode]) -> bool: users = self.scheduler.name_to_buf[buf].users buf_uses = OrderedSet(user.node for user in users) return len(buf_uses - OrderedSet(snodes)) > 0 if isinstance(self, FusedSchedulerNode): removed_buffers = OrderedSet( dep for dep in writes if not is_materialized(dep, self.snodes) ) writes = writes - removed_buffers reads = reads - removed_buffers buf_byte_accesses: dict[str, int] = {} for buf_name in reads | writes: buf_accessed_elems = sum(node_numel for dep in buf_accesses[buf_name]) buf: Union[ir.Buffer, ir.TensorBox, ir.TorchBindObject] if buf_name in V.graph.name_to_buffer: buf = V.graph.name_to_buffer[buf_name] elif buf_name in V.graph.graph_inputs: buf = V.graph.graph_inputs[buf_name] else: continue def get_buf_bytes( buf: Optional[Union[ir.Buffer, ir.TensorBox, ir.TorchBindObject]], ) -> int: if not buf: return 0 if isinstance(buf, ir.TorchBindObject): return buf.get_buf_bytes() elif isinstance(buf.layout, MultiOutputLayout): # Kind of a lazy way to get the MultiOutput nodes corresponding to # a MultiOutputLayout users = self.scheduler.name_to_buf[buf.get_name()].users tot = 0 for user in users: assert isinstance(user.node, BaseSchedulerNode) if isinstance(user.node.node, MultiOutput): for sched_buf in user.node.get_outputs(): tot += get_buf_bytes(sched_buf.node) else: # Buf is a MultiOutputLayout but not all of its # users are MultiOutputs... # TODO: Figure out what's going on return 0 return tot elif isinstance(buf.layout, ir.NoneLayout): return sum( get_buf_bytes(V.graph.get_buffer(mut_name)) for mut_name in buf.get_mutation_names() ) else: buf_elems = try_size_hint(sympy_product(buf.get_size())) return get_dtype_size(buf.get_dtype()) * min( buf_accessed_elems, buf_elems ) buf_bytes = get_buf_bytes(buf) if buf_name not in buf_byte_accesses: buf_byte_accesses[buf_name] = buf_bytes else: buf_byte_accesses[buf_name] += buf_bytes return buf_byte_accesses @cache_on_self def estimate_flops(self) -> int | None: if self.node is None: return None fx_node = self.node.get_origin_node() if fx_node is None: return None flops = count_flops_fx(fx_node) if flops is None: return None resolved_flops = V.graph.sizevars.size_hint(flops, fallback=0) counters["inductor"]["flop_count"] += resolved_flops return resolved_flops def get_estimated_runtime(self) -> float: if self.override_estimated_runtime is not None: return self.override_estimated_runtime return self._get_estimated_runtime() @cache_on_self def _get_estimated_runtime(self) -> float: """ Returns estimated op runtime in milliseconds (ms) """ buf = self.get_nodes()[0].get_outputs()[0] layout = buf.node.get_output_spec() if not is_gpu(get_device_type(layout)): # default to no reordering based on runtime return 0 # Collective kernels if is_collective(self.node): assert isinstance(self.node, ir.IRNode) try: if config_comms.runtime_estimations_use_nccl_lib_estimations: cache_key = get_estimate_runtime_cache_key_from_snode(self) cache = get_estimate_runtime_cache() cache_val = cache.lookup(cache_key) if cache_val is not None: assert isinstance(cache_val, float) return cache_val ms = estimate_nccl_collective_runtime_nccl_estimator(self) if ms is None: # NCCL estimations fail: fallback to in-tree algorithmic estimation. ms = estimate_nccl_collective_runtime(self.node) cache.set_value(cache_key, value=ms) return ms return estimate_nccl_collective_runtime(self.node) except ValueError as e: # We don't know how to estimate runtime for this collective, # falling back to 0 log.info(e) # noqa: G200 return 0 except TypeError as e: # this happens when the collective is not of type ir._CollectiveKernel log.info(e) # noqa: G200 return 0 elif is_wait(self.node): # ir.Wait is only used for collective ops. # The time needed for the collective op is already estimated and considered # when we are processing the collective op IR node, so ir.Wait takes 0 time # since it doesn't take extra time to get the result after the collective is completed. return 0 ret = maybe_estimate_runtime_benchmark(self) if ret is not None: return ret dtype = buf.node.maybe_get_dtype() try: gpu_memory_bandwidth = get_gpu_dram_gbps() gpu_flops = get_device_tflops(dtype) * 10**12 # If cudaGetDeviceProperties returns 0 for gpu_memory_bandwidth or gpu_flops # there is a chance to continue execution successfully. Otherwise, it would fail with # ZeroDivisionError below. if gpu_memory_bandwidth <= 0: raise AssertionError( f"gpu_memory_bandwidth cannot be <= 0, but got {gpu_memory_bandwidth}" ) if gpu_flops <= 0: raise AssertionError(f"gpu_flops cannot be <= 0, but got {gpu_flops}") except Exception: return 0 flops_est = self.estimate_flops() if flops_est == 0 or flops_est is None: # no flops estimate, so fall back to memory estimate ns = self.get_read_write_buffers_sizes() / gpu_memory_bandwidth ms = ns / 1e6 return ms # TODO(xmfan): find a better heuristic to model FLOPS/latency relationship factor = 1.0 counted_bytes = self.get_read_write_buffers_sizes() counted_bytes = 0 if counted_bytes is None else counted_bytes compute_time = (factor * flops_est / gpu_flops) * 1e9 transfer_time = counted_bytes / gpu_memory_bandwidth # Return estimated runtime in milliseconds ns = max(compute_time, transfer_time) ms = ns / 1e6 return ms def get_template_node(self) -> Optional[ir.TemplateBuffer]: return None def get_template_node_or_throw(self) -> ir.TemplateBuffer: template = self.get_template_node() assert template is not None return template @staticmethod def get_prologue_template_epilogue( nodes: list[BaseSchedulerNode], ) -> tuple[list[BaseSchedulerNode], BaseSchedulerNode, list[BaseSchedulerNode]]: """ For the list of nodes, get the prologue, template, and epilogue """ template_index = next(i for i, n in enumerate(nodes) if n.is_template()) prologue = nodes[:template_index] template_node = nodes[template_index] epilogue = nodes[template_index + 1 :] return prologue, template_node, epilogue @functools.cache def get_estimate_runtime_cache() -> torch._inductor.codecache.LocalCache: return torch._inductor.codecache.LocalCache() def get_estimate_runtime_cache_key_from_snode(snode: BaseSchedulerNode) -> str: python_kernel_name = getattr(snode.node, "python_kernel_name", "") args = snode.node.inputs # type: ignore[union-attr] args = snode.node.fill_non_provided_args( # type: ignore[union-attr] [*args, *snode.node.constant_args], # type: ignore[union-attr] snode.node.kwargs, # type: ignore[union-attr] ) kwargs = snode.node.kwargs # type: ignore[union-attr] flat_args, flat_args_pytree_spec = pytree.tree_flatten((args, kwargs)) def _is_tensor_ir(x) -> bool: # type: ignore[no-untyped-def] return isinstance(x, ir.IRNode) and not isinstance(x, ir.GeneratorState) cache_key = str( (python_kernel_name,) + tuple(tuple(a.get_size()) if _is_tensor_ir(a) else None for a in flat_args) ) return cache_key def _get_mm_like_fn(snode: BaseSchedulerNode) -> Optional[Callable[[Any], Any]]: if not isinstance(snode, ExternKernelSchedulerNode): return None mms_fns = { "extern_kernels.mm": torch.ops.aten.mm, "extern_kernels.bmm": torch.ops.aten.bmm, "extern_kernels.addmm": torch.ops.aten.addmm, } python_kernel_name = getattr(snode.node, "python_kernel_name", "") if python_kernel_name not in mms_fns: return None if not isinstance(snode.node, ir.ExternKernel): return None return mms_fns[python_kernel_name] def maybe_estimate_runtime_benchmark(snode: BaseSchedulerNode) -> Optional[float]: bench_fn = None args_kwargs_fn = None if config.runtime_estimations_mms_benchmark: mm_fn = _get_mm_like_fn(snode) if mm_fn is None: return None bench_fn = mm_fn # pyrefly: ignore [unbound-name] args_kwargs_fn = lambda: snode_args_kwargs(snode) # noqa: E731 else: return None cache_key = get_estimate_runtime_cache_key_from_snode(snode) cache = get_estimate_runtime_cache() cache_val = cache.lookup(cache_key) if cache_val is not None: assert isinstance(cache_val, float) return cache_val from .utils import snode_args_kwargs args, kwargs = args_kwargs_fn() from torch._inductor.runtime.benchmarking import benchmarker ms = benchmarker.benchmark(bench_fn, args, kwargs) # type: ignore[arg-type] cache.set_value(cache_key, value=ms) return ms @dataclasses.dataclass(slots=True)
BaseSchedulerNode
python
langchain-ai__langchain
libs/langchain/langchain_classic/chains/combine_documents/reduce.py
{ "start": 5102, "end": 14286 }
class ____(BaseCombineDocumentsChain): """Combine documents by recursively reducing them. This involves - `combine_documents_chain` - `collapse_documents_chain` `combine_documents_chain` is ALWAYS provided. This is final chain that is called. We pass all previous results to this chain, and the output of this chain is returned as a final result. `collapse_documents_chain` is used if the documents passed in are too many to all be passed to `combine_documents_chain` in one go. In this case, `collapse_documents_chain` is called recursively on as big of groups of documents as are allowed. Example: ```python from langchain_classic.chains import ( StuffDocumentsChain, LLMChain, ReduceDocumentsChain, ) from langchain_core.prompts import PromptTemplate from langchain_openai import OpenAI # This controls how each document will be formatted. Specifically, # it will be passed to `format_document` - see that function for more # details. document_prompt = PromptTemplate( input_variables=["page_content"], template="{page_content}" ) document_variable_name = "context" model = OpenAI() # The prompt here should take as an input variable the # `document_variable_name` prompt = PromptTemplate.from_template("Summarize this content: {context}") llm_chain = LLMChain(llm=model, prompt=prompt) combine_documents_chain = StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, document_variable_name=document_variable_name, ) chain = ReduceDocumentsChain( combine_documents_chain=combine_documents_chain, ) # If we wanted to, we could also pass in collapse_documents_chain # which is specifically aimed at collapsing documents BEFORE # the final call. prompt = PromptTemplate.from_template("Collapse this content: {context}") llm_chain = LLMChain(llm=model, prompt=prompt) collapse_documents_chain = StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, document_variable_name=document_variable_name, ) chain = ReduceDocumentsChain( combine_documents_chain=combine_documents_chain, collapse_documents_chain=collapse_documents_chain, ) ``` """ combine_documents_chain: BaseCombineDocumentsChain """Final chain to call to combine documents. This is typically a `StuffDocumentsChain`. """ collapse_documents_chain: BaseCombineDocumentsChain | None = None """Chain to use to collapse documents if needed until they can all fit. If `None`, will use the `combine_documents_chain`. This is typically a `StuffDocumentsChain`. """ token_max: int = 3000 """The maximum number of tokens to group documents into. For example, if set to 3000 then documents will be grouped into chunks of no greater than 3000 tokens before trying to combine them into a smaller chunk. """ collapse_max_retries: int | None = None """The maximum number of retries to collapse documents to fit `token_max`. If `None`, it will keep trying to collapse documents to fit `token_max`. Otherwise, after it reaches the max number, it will throw an error. """ model_config = ConfigDict( arbitrary_types_allowed=True, extra="forbid", ) @property def _collapse_chain(self) -> BaseCombineDocumentsChain: if self.collapse_documents_chain is not None: return self.collapse_documents_chain return self.combine_documents_chain def combine_docs( self, docs: list[Document], token_max: int | None = None, callbacks: Callbacks = None, **kwargs: Any, ) -> tuple[str, dict]: """Combine multiple documents recursively. Args: docs: List of documents to combine, assumed that each one is less than `token_max`. token_max: Recursively creates groups of documents less than this number of tokens. callbacks: Callbacks to be passed through **kwargs: additional parameters to be passed to LLM calls (like other input variables besides the documents) Returns: The first element returned is the single string output. The second element returned is a dictionary of other keys to return. """ result_docs, _ = self._collapse( docs, token_max=token_max, callbacks=callbacks, **kwargs, ) return self.combine_documents_chain.combine_docs( docs=result_docs, callbacks=callbacks, **kwargs, ) async def acombine_docs( self, docs: list[Document], token_max: int | None = None, callbacks: Callbacks = None, **kwargs: Any, ) -> tuple[str, dict]: """Async combine multiple documents recursively. Args: docs: List of documents to combine, assumed that each one is less than `token_max`. token_max: Recursively creates groups of documents less than this number of tokens. callbacks: Callbacks to be passed through **kwargs: additional parameters to be passed to LLM calls (like other input variables besides the documents) Returns: The first element returned is the single string output. The second element returned is a dictionary of other keys to return. """ result_docs, _ = await self._acollapse( docs, token_max=token_max, callbacks=callbacks, **kwargs, ) return await self.combine_documents_chain.acombine_docs( docs=result_docs, callbacks=callbacks, **kwargs, ) def _collapse( self, docs: list[Document], token_max: int | None = None, callbacks: Callbacks = None, **kwargs: Any, ) -> tuple[list[Document], dict]: result_docs = docs length_func = self.combine_documents_chain.prompt_length num_tokens = length_func(result_docs, **kwargs) def _collapse_docs_func(docs: list[Document], **kwargs: Any) -> str: return self._collapse_chain.run( input_documents=docs, callbacks=callbacks, **kwargs, ) _token_max = token_max or self.token_max retries: int = 0 while num_tokens is not None and num_tokens > _token_max: new_result_doc_list = split_list_of_docs( result_docs, length_func, _token_max, **kwargs, ) result_docs = [ collapse_docs(docs_, _collapse_docs_func, **kwargs) for docs_ in new_result_doc_list ] num_tokens = length_func(result_docs, **kwargs) retries += 1 if self.collapse_max_retries and retries == self.collapse_max_retries: msg = f"Exceed {self.collapse_max_retries} tries to \ collapse document to {_token_max} tokens." raise ValueError(msg) return result_docs, {} async def _acollapse( self, docs: list[Document], token_max: int | None = None, callbacks: Callbacks = None, **kwargs: Any, ) -> tuple[list[Document], dict]: result_docs = docs length_func = self.combine_documents_chain.prompt_length num_tokens = length_func(result_docs, **kwargs) async def _collapse_docs_func(docs: list[Document], **kwargs: Any) -> str: return await self._collapse_chain.arun( input_documents=docs, callbacks=callbacks, **kwargs, ) _token_max = token_max or self.token_max retries: int = 0 while num_tokens is not None and num_tokens > _token_max: new_result_doc_list = split_list_of_docs( result_docs, length_func, _token_max, **kwargs, ) result_docs = [ await acollapse_docs(docs_, _collapse_docs_func, **kwargs) for docs_ in new_result_doc_list ] num_tokens = length_func(result_docs, **kwargs) retries += 1 if self.collapse_max_retries and retries == self.collapse_max_retries: msg = f"Exceed {self.collapse_max_retries} tries to \ collapse document to {_token_max} tokens." raise ValueError(msg) return result_docs, {} @property def _chain_type(self) -> str: return "reduce_documents_chain"
ReduceDocumentsChain
python
sympy__sympy
sympy/printing/codeprinter.py
{ "start": 966, "end": 1086 }
class ____(Exception): """ Raised if an assignment variable for a loop is missing. """ pass
AssignmentError
python
catalyst-team__catalyst
examples/catalyst_rl/misc.py
{ "start": 3040, "end": 7083 }
class ____(dl.Callback): def __init__( self, *, sampler_fn: Callable, env, replay_buffer: "OffpolicyReplayBuffer", db_server: "IRLDatabase", actor_key: str, num_samplers: int = 1, min_transactions_num: int = int(1e3), ): super().__init__(order=0) self.sampler_fn = sampler_fn self.env = env self.replay_buffer = replay_buffer self.db_server = db_server self.actor_key = actor_key self.num_samplers = num_samplers self.min_transactions_num = min_transactions_num self.samplers = [] self._db_loop_thread = threading.Thread( target=db2buffer_loop, kwargs={"db_server": self.db_server, "buffer": self.replay_buffer}, ) def _sync_checkpoint(self, runner: dl.IRunner): actor = copy.deepcopy(runner.model[self.actor_key]).to("cpu") checkpoint = {self.actor_key: actor.state_dict()} self.db_server.add_checkpoint( checkpoint=checkpoint, epoch=runner.stage_epoch_step ) def _fetch_initial_buffer(self): buffer_size = self.replay_buffer.length while buffer_size < self.min_transactions_num: self.replay_buffer.recalculate_index() num_trajectories = self.replay_buffer.num_trajectories num_transitions = self.replay_buffer.num_transitions buffer_size = self.replay_buffer.length metrics = [ f"fps: {0:7.1f}", f"updates per sample: {0:7.1f}", f"trajectories: {num_trajectories:09d}", f"transitions: {num_transitions:09d}", f"buffer size: " f"{buffer_size:09d}/{self.min_transactions_num:09d}", ] metrics = " | ".join(metrics) print(f"--- {metrics}") time.sleep(1.0) def on_experiment_start(self, runner: dl.IRunner) -> None: # db sync self._sync_checkpoint(runner=runner) # self.db_server.add_message(IRLDatabaseMessage.ENABLE_TRAINING) # deprecated? # self.db_server.add_message(IRLDatabaseMessage.ENABLE_SAMPLING) # deprecated? # samplers for i in range(self.num_samplers): p = mp.Process( target=run_sampler, kwargs=dict( sampler_fn=self.sampler_fn, env=copy.deepcopy(self.env), actor=copy.deepcopy(runner.model[self.actor_key]).to("cpu"), db_server=self.db_server, sampler_index=i, weights_key=self.actor_key, weights_sync_period=10, device="cpu", ), daemon=True, ) p.start() self.samplers.append(p) # for p in self.samplers: # p.join() # db -> local storage self._db_loop_thread.start() # init local storage self._fetch_initial_buffer() def on_epoch_end(self, runner: dl.IRunner): runner.epoch_metrics["_epoch_"][ "num_trajectories" ] = self.replay_buffer.num_trajectories runner.epoch_metrics["_epoch_"][ "num_transitions" ] = self.replay_buffer.num_transitions runner.epoch_metrics["_epoch_"]["updates_per_sample"] = ( runner.loader_sample_step / self.replay_buffer.num_transitions ) runner.epoch_metrics["_epoch_"]["reward"] = np.mean( self.replay_buffer._trajectories_rewards[-100:] ) self._sync_checkpoint(runner=runner) self.replay_buffer.recalculate_index() def on_experiment_end(self, runner: dl.IRunner) -> None: from db import IRLDatabaseMessage for p in self.samplers: p.terminate() self.db_server.add_message(IRLDatabaseMessage.DISABLE_TRAINING) self.db_server.add_message(IRLDatabaseMessage.DISABLE_SAMPLING)
GameCallback
python
kamyu104__LeetCode-Solutions
Python/preimage-size-of-factorial-zeroes-function.py
{ "start": 36, "end": 638 }
class ____(object): def preimageSizeFZF(self, K): """ :type K: int :rtype: int """ def count_of_factorial_primes(n, p): cnt = 0 while n > 0: cnt += n//p n //= p return cnt p = 5 left, right = 0, p*K while left <= right: mid = left + (right-left)//2 if count_of_factorial_primes(mid, p) >= K: right = mid-1 else: left = mid+1 return p if count_of_factorial_primes(left, p) == K else 0
Solution
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/cloud_build.py
{ "start": 19434, "end": 22806 }
class ____(GoogleCloudBaseOperator): """ Returns information about a previously requested build. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:CloudBuildGetBuildOperator` :param id_: The ID of the build. :param project_id: Optional, Google Cloud Project project_id where the function belongs. If set to None or missing, the default project_id from the GCP connection is used. :param retry: Optional, a retry object used to retry requests. If `None` is specified, requests will not be retried. :param timeout: Optional, the amount of time, in seconds, to wait for the request to complete. Note that if `retry` is specified, the timeout applies to each individual attempt. :param metadata: Optional, additional metadata that is provided to the method. :param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). :param location: The location of the project. """ template_fields: Sequence[str] = ("project_id", "id_", "gcp_conn_id", "location") operator_extra_links = (CloudBuildLink(),) def __init__( self, *, id_: str, project_id: str = PROVIDE_PROJECT_ID, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, location: str = "global", **kwargs, ) -> None: super().__init__(**kwargs) self.id_ = id_ self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain self.location = location @property def extra_links_params(self) -> dict[str, Any]: return { "region": self.location, } def execute(self, context: Context): hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) result = hook.get_build( id_=self.id_, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, location=self.location, ) project_id = self.project_id or hook.project_id if project_id: CloudBuildLink.persist( context=context, project_id=project_id, build_id=result.id, ) return Build.to_dict(result)
CloudBuildGetBuildOperator
python
dagster-io__dagster
python_modules/libraries/dagster-airlift/dagster_airlift/test/airflow_test_instance.py
{ "start": 619, "end": 923 }
class ____(AirflowAuthBackend): def get_session(self) -> requests.Session: raise NotImplementedError("This shouldn't be called from this mock context.") def get_webserver_url(self) -> str: return "http://dummy.domain" DEFAULT_FAKE_INSTANCE_NAME = "test_instance"
DummyAuthBackend
python
sqlalchemy__sqlalchemy
test/orm/inheritance/test_basic.py
{ "start": 70491, "end": 73064 }
class ____(fixtures.MappedTest): """test that syncrules compile properly on custom inherit conds""" @classmethod def define_tables(cls, metadata): global _a_table, _b_table, _c_table _a_table = Table( "a", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("data1", String(128)), ) _b_table = Table( "b", metadata, Column("a_id", Integer, ForeignKey("a.id"), primary_key=True), Column("data2", String(128)), ) _c_table = Table( "c", metadata, # Column('a_id', Integer, ForeignKey('b.a_id'), # primary_key=True), #works Column("b_a_id", Integer, ForeignKey("b.a_id"), primary_key=True), Column("data3", String(128)), ) @testing.combinations( lambda _a_table, _b_table: None, lambda _a_table, _b_table: _b_table.c.a_id == _a_table.c.id, lambda _a_table, _b_table: _a_table.c.id == _b_table.c.a_id, argnames="j1", ) @testing.combinations( lambda _b_table, _c_table: None, lambda _b_table, _c_table: _b_table.c.a_id == _c_table.c.b_a_id, lambda _b_table, _c_table: _c_table.c.b_a_id == _b_table.c.a_id, argnames="j2", ) def test_joins(self, j1, j2): _a_table, _b_table, _c_table = self.tables("a", "b", "c") j1 = testing.resolve_lambda(j1, **locals()) j2 = testing.resolve_lambda(j2, **locals()) class A: def __init__(self, **kwargs): for key, value in list(kwargs.items()): setattr(self, key, value) class B(A): pass class C(B): pass self.mapper_registry.map_imperatively(A, _a_table) self.mapper_registry.map_imperatively( B, _b_table, inherits=A, inherit_condition=j1 ) self.mapper_registry.map_imperatively( C, _c_table, inherits=B, inherit_condition=j2 ) session = fixture_session() a = A(data1="a1") session.add(a) b = B(data1="b1", data2="b2") session.add(b) c = C(data1="c1", data2="c2", data3="c3") session.add(c) session.flush() session.expunge_all() assert len(session.query(A).all()) == 3 assert len(session.query(B).all()) == 2 assert len(session.query(C).all()) == 1
SyncCompileTest
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/lists.py
{ "start": 2318, "end": 2705 }
class ____: def __repr__(self) -> str: return "" def inconsistent_redirect_expressions_in_condition(l: List[HasRepr]) -> None: # Demonstrate a (fixed) inconsistency in how we handle generators. # Call graph, forward and backward analysis need to agree on whether # `str(x)` resolves to `x.__str__()` or `x.__repr__()` [x for x in l if str(x) == "123"]
HasRepr
python
wandb__wandb
wandb/vendor/pygments/lexers/haskell.py
{ "start": 18941, "end": 21348 }
class ____(Lexer): """ Base class for lexers of literate file formats based on LaTeX or Bird-style (prefixing each code line with ">"). Additional options accepted: `litstyle` If given, must be ``"bird"`` or ``"latex"``. If not given, the style is autodetected: if the first non-whitespace character in the source is a backslash or percent character, LaTeX is assumed, else Bird. """ bird_re = re.compile(r'(>[ \t]*)(.*\n)') def __init__(self, baselexer, **options): self.baselexer = baselexer Lexer.__init__(self, **options) def get_tokens_unprocessed(self, text): style = self.options.get('litstyle') if style is None: style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird' code = '' insertions = [] if style == 'bird': # bird-style for match in line_re.finditer(text): line = match.group() m = self.bird_re.match(line) if m: insertions.append((len(code), [(0, Comment.Special, m.group(1))])) code += m.group(2) else: insertions.append((len(code), [(0, Text, line)])) else: # latex-style from pygments.lexers.markup import TexLexer lxlexer = TexLexer(**self.options) codelines = 0 latex = '' for match in line_re.finditer(text): line = match.group() if codelines: if line.lstrip().startswith('\\end{code}'): codelines = 0 latex += line else: code += line elif line.lstrip().startswith('\\begin{code}'): codelines = 1 latex += line insertions.append((len(code), list(lxlexer.get_tokens_unprocessed(latex)))) latex = '' else: latex += line insertions.append((len(code), list(lxlexer.get_tokens_unprocessed(latex)))) for item in do_insertions(insertions, self.baselexer.get_tokens_unprocessed(code)): yield item
LiterateLexer
python
google__jax
tests/pmap_test.py
{ "start": 120549, "end": 127610 }
class ____(jtu.JaxTestCase): @jtu.ignore_warning(category=DeprecationWarning) def test_pmap_input_array_output_array(self): input_shape = (jax.device_count(), 2) input_array, input_data = create_input_array_for_pmap(input_shape) f = jax.pmap(lambda x, y: x * y) out = f(input_array, input_array) expected = input_data * input_data self.assertIsInstance(out, array.ArrayImpl) for s in out.addressable_shards: self.assertArraysEqual(s.data, expected[s.index]) self.assertArraysEqual(out, expected) @jtu.ignore_warning(category=DeprecationWarning) def test_pmap_double_input_array_output_array(self): input_shape = (jax.device_count(), 2) input_array, input_data = create_input_array_for_pmap(input_shape) def f(x, y): assert x.shape == (2,) assert y.shape == (2,) return x, y f = jax.pmap(f) out1, out2 = f(input_array, input_array) self.assertIsInstance(out1, array.ArrayImpl) self.assertIsInstance(out2, array.ArrayImpl) for s1, s2 in safe_zip(out1.addressable_shards, out2.addressable_shards): self.assertArraysEqual(s1.data, input_data[s1.index]) self.assertArraysEqual(s2.data, input_data[s2.index]) self.assertArraysEqual(out1, input_data) self.assertArraysEqual(out2, input_data) @jtu.ignore_warning(category=DeprecationWarning) def test_pmap_array_in_axes_out_axes(self): dc = jax.device_count() input_shape = (dc, 2) a1, input_data = create_input_array_for_pmap(input_shape, in_axes=0) a2, _ = create_input_array_for_pmap(input_shape, in_axes=None, sharded_dim_size=a1.shape[0]) def f(x, y): assert x.shape == (2,) assert y.shape == input_shape return x, y f = jax.pmap(f, in_axes=(0, None), out_axes=(None, 0)) out1, out2 = f(a1, a2) self.assertIsInstance(out1, array.ArrayImpl) self.assertIsInstance(out2, array.ArrayImpl) self.assertEqual(out1.shape, (2,)) self.assertEqual(out2.shape, (dc, dc, 2)) for i, (s1, s2) in enumerate(safe_zip(out1.addressable_shards, out2.addressable_shards)): self.assertArraysEqual(s1.data, input_data[i]) if config.pmap_no_rank_reduction.value: self.assertArraysEqual(s2.data, input_data[None]) else: self.assertArraysEqual(s2.data, input_data) @jtu.ignore_warning(category=DeprecationWarning) def test_pmap_array_sharding_mismatch(self): input_shape = (jax.device_count(), 2) a1, inp_data = create_input_array_for_pmap(input_shape, in_axes=None, sharded_dim_size=input_shape[0]) f = jax.pmap(lambda x: x, in_axes=0, out_axes=0) out_array = f(a1) self.assertArraysEqual(out_array, inp_data) @jtu.ignore_warning(category=DeprecationWarning) def test_pmap_array_devices_mismatch(self): if jax.device_count() <= 1: raise unittest.SkipTest('Skipping because this test needs more than ' '1 device.') input_shape = (jax.device_count(), 2) a1, inp_data = create_input_array_for_pmap(input_shape) f = jax.pmap(lambda x: x, devices=jax.devices()[::-1]) out_array = f(a1) self.assertArraysEqual(out_array, inp_data) @jtu.ignore_warning(category=DeprecationWarning) def test_amap(self): # Copied from an example mattjj@ posted in a chat thread. if jax.device_count() < 2: self.skipTest('Test requires >= 2 devices.') def amap(f, xs): ys = [f(jax.device_put(x, list(x.devices())[0])) for x in xs] return jax.device_put_sharded(ys, jax.local_devices()[:2]) # leading axis is batch dim (i.e. mapped/parallel dim), of size 2 x = jnp.array([[1., 0., 0.], [0., 2., 3.]]) # first pmapped computation y = jax.pmap(jnp.sin)(x) def dynamic_shape_function(y): nonzero_idx = y != 0 results = y[nonzero_idx] ** 2 return y.at[nonzero_idx].set(results) z = amap(dynamic_shape_function, y) # second pmapped computation w = jax.pmap(jnp.cos)(z) self.assertArraysEqual(w, jnp.cos(jnp.sin(x) ** 2)) @jtu.ignore_warning(category=DeprecationWarning) def test_same_out_sharding_id(self): if config.disable_jit.value: self.skipTest('Skip this under eager pmap mode.') shape = (jax.device_count(), 2) arr, inp_data = create_input_array_for_pmap(shape) f = pmap(lambda x: x) out1 = f(arr) self.assertArraysEqual(out1, inp_data) out1_sharding_id = id(out1.sharding) out2 = f(out1) self.assertArraysEqual(out2, inp_data) out2_sharding_id = id(out2.sharding) out3 = f(out2) self.assertArraysEqual(out3, inp_data) out3_sharding_id = id(out3.sharding) self.assertEqual(out1_sharding_id, out2_sharding_id) self.assertEqual(out1_sharding_id, out3_sharding_id) self.assertEqual(out2_sharding_id, out3_sharding_id) @jtu.ignore_warning(category=DeprecationWarning) def test_array_with_pmap_sharding_copy_without_round_trip(self): def _compare_if_equal(out, out_copy): self.assertArraysEqual(out, out_copy) self.assertEqual(out.sharding, out_copy.sharding) for o, o_copy in safe_zip(out.addressable_shards, out_copy.addressable_shards): self.assertArraysEqual(o.data, o_copy.data) self.assertEqual(o.device, o_copy.device) self.assertEqual(o.index, o_copy.index) self.assertEqual(o.replica_id, o_copy.replica_id) self.assertNotEqual(o.data.unsafe_buffer_pointer(), o_copy.data.unsafe_buffer_pointer()) if config.pmap_shmap_merge.value: sharding = jax.sharding.NamedSharding( jax.sharding.Mesh(np.array(jax.devices()), 'x'), jax.sharding.PartitionSpec('x')) out = jax.device_put(jnp.ones((jax.device_count(),)), sharding) else: out, _ = create_input_array_for_pmap((jax.device_count(),)) out_copy = jnp.copy(out) _compare_if_equal(out, out_copy) if config.pmap_shmap_merge.value: sharding = jax.sharding.NamedSharding( jax.sharding.Mesh(np.array(jax.devices()).reshape(1, -1), ('x', 'y')), jax.sharding.PartitionSpec('x', 'y')) out1 = jax.device_put(jnp.ones((1, jax.device_count())), sharding) else: out1, _ = create_input_array_for_pmap((1, jax.device_count(),), in_axes=1) out_copy1 = jnp.copy(out1) _compare_if_equal(out1, out_copy1) @jtu.ignore_warning(category=DeprecationWarning) def test_device_put_sharded_transfer_guard(self): inp = jnp.arange(jax.device_count()) arr_inp = [jax.device_put(i, d) for i, d in zip(inp, jax.devices())] with jax.transfer_guard("disallow_explicit"): jax.device_put_sharded(arr_inp, jax.devices()) def test_jnp_stack(self): @jax.pmap def something(x): return (x + x).reshape([]) z = something(np.arange(jax.device_count())) self.assertArraysEqual(jnp.stack([z[i] for i in range(jax.device_count())]), np.arange(jax.device_count()) * 2)
ArrayPmapTest
python
cython__cython
Cython/Compiler/StringEncoding.py
{ "start": 2391, "end": 3891 }
class ____(str): # unicode string subclass to keep track of the original encoding. # 'encoding' is None for unicode strings and the source encoding # otherwise encoding = None def __deepcopy__(self, memo): return self def byteencode(self): assert self.encoding is not None return self.encode(self.encoding) def utf8encode(self): assert self.encoding is None return self.encode("UTF-8") @property def is_unicode(self): return self.encoding is None def as_utf8_string(self): return bytes_literal(self.utf8encode(), 'utf8') def as_c_string_literal(self): # first encodes the string then produces a c string literal if self.encoding is None: s = self.as_utf8_string() else: s = bytes_literal(self.byteencode(), self.encoding) return s.as_c_string_literal() def string_contains_lone_surrogates(ustring): """ Check if the unicode string contains lone surrogate code points on a CPython platform with wide (UCS-4) or narrow (UTF-16) Unicode, i.e. characters that would be spelled as two separate code units on a narrow platform, but that do not form a pair. """ for c in map(ord, ustring): # Surrogates tend to be rare, so we use separate conditions. if 0xD800 <= c and c <= 0xDFFF: # on 32bit Unicode platforms, there is never a pair return True return False
EncodedString
python
zarr-developers__zarr-python
tests/package_with_entrypoint/__init__.py
{ "start": 1686, "end": 1924 }
class ____: class Codec(BytesCodec): pass class Buffer(zarr.core.buffer.Buffer): pass class NDBuffer(zarr.core.buffer.NDBuffer): pass class Pipeline(CodecPipeline): pass
TestEntrypointGroup
python
getsentry__sentry
src/sentry/grouping/fingerprinting/rules.py
{ "start": 340, "end": 414 }
class ____(TypedDict): title: NotRequired[str]
FingerprintRuleAttributes
python
numba__numba
numba/core/typing/cmathdecl.py
{ "start": 896, "end": 971 }
class ____(CMath_predicate): pass @infer_global(cmath.log)
CMath_isfinite
python
pytorch__pytorch
test/test_legacy_vmap.py
{ "start": 770, "end": 32817 }
class ____(TestCase): def test_non_tensor_output_raises(self): with self.assertRaisesRegex( ValueError, "got type <class 'float'> as the return" ): output = vmap(lambda x: 3.14)(torch.ones(3)) def multiple_outputs(x): return x, 3 with self.assertRaisesRegex(ValueError, "got type <class 'int'> for return 1"): vmap(multiple_outputs)(torch.ones(3)) def test_different_map_dim_size_raises(self): x = torch.randn(2) y = torch.randn(3) expected_msg = ( "Expected all tensors to have the same size in the mapped dimension" ) with self.assertRaisesRegex(ValueError, expected_msg): vmap(torch.mul)(x, y) with self.assertRaisesRegex(ValueError, expected_msg): vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y)) with self.assertRaisesRegex(ValueError, expected_msg): vmap(lambda z: z["x"] + z["y"], in_dims=({"x": 0, "y": 0},))( {"x": x, "y": y} ) def test_func_with_no_inputs(self): expected_msg = "got no inputs" def foo(): return torch.randn(3) def bar(x): return torch.randn(3) with self.assertRaisesRegex(ValueError, expected_msg): vmap(foo)() with self.assertRaisesRegex(ValueError, expected_msg): vmap(bar)() def test_constant_function(self): output = vmap(lambda x: torch.tensor(3.14))(torch.ones(3)) self.assertEqual(output, torch.tensor([3.14, 3.14, 3.14])) def test_single_input(self): x = torch.randn(2, 3) def square(x): return x * x output = vmap(square)(x) self.assertEqual(output, x * x) def test_multiple_inputs(self): x = torch.randn(2, 3) y = torch.randn(2, 3) output = vmap(torch.mul)(x, y) self.assertEqual(output, x * y) def test_multiple_outputs(self): def foo(x): return x * x, x * x * x x = torch.randn(3) outputs = vmap(foo)(x) self.assertEqual(outputs[0], x * x) self.assertEqual(outputs[1], x * x * x) def test_multiple_outputs_error_cases(self): # This is the same thing as # def returns_tuple_of_tensors(x): # return x, x def returns_tuple_of_tensors(x): return (x, x) def returns_list_of_two_tensors(x): return [x, x] def returns_list_of_one_tensor(x): return [x] x = torch.randn(3) # should not throw vmap(returns_tuple_of_tensors)(x) # jax supports these, but we don't yet msg = "must only return Tensors, got type <class 'list'>" with self.assertRaisesRegex(ValueError, msg): vmap(returns_list_of_two_tensors)(x) with self.assertRaisesRegex(ValueError, msg): vmap(returns_list_of_one_tensor)(x) def test_nested_with_same_map_dim(self): x = torch.randn(2, 3, 5) y = torch.randn(2, 3, 5) output = vmap(vmap(torch.mul))(x, y) self.assertEqual(output, x * y) output = vmap(vmap(vmap(torch.mul)))(x, y) self.assertEqual(output, x * y) def test_nested_with_different_map_dim(self): x = torch.randn(2, 3) y = torch.randn(5, 3) output = vmap(lambda x: vmap(lambda y: x * y)(y))(x) self.assertEqual(output.shape, (2, 5, 3)) self.assertEqual(output, x.view(2, 1, 3) * y) z = torch.randn(7, 3) output = vmap(lambda x: vmap(lambda y: vmap(lambda z: x * y * z)(z))(y))(x) self.assertEqual(output.shape, (2, 5, 7, 3)) self.assertEqual(output, x.view(2, 1, 1, 3) * y.view(5, 1, 3) * z) def test_noop_in_inner_vmap(self): x = torch.randn(3) y = torch.randn(5) output = vmap(lambda x: vmap(lambda y: x)(y))(x) self.assertEqual(output, x.view(3, 1).expand(3, 5)) def test_unsupported_op_err_msg(self): # Unsupported view op tensor = torch.randn(2, 3) msg = ( r"Batching rule not implemented for aten::.+; the " r"fallback path doesn't work on out= or view ops" ) with self.assertRaisesRegex(RuntimeError, msg): vmap(torch.ravel)(tensor) def out_op(x, y): return torch.abs(x, out=y) with self.assertRaisesRegex(RuntimeError, msg): vmap(out_op)(tensor, tensor) tensor = torch.randn(2) # The fallback doesn't support TensorList with self.assertRaisesRegex(RuntimeError, "Batching rule not implemented"): vmap(lambda t: torch.atleast_1d([t]))(tensor) # Don't support non-tensor returns. This is a limitation of vmap; # functions that don't return tensors must be special cased with self.assertRaisesRegex(RuntimeError, "Batching rule not implemented"): vmap(torch.Tensor.item)(tensor) def test_nonzero_out_dims(self): # Basic test tensor = torch.randn(2, 3) result = vmap(lambda x: x, out_dims=1)(tensor) self.assertEqual(result, tensor.permute(1, 0)) self.assertEqual(result.data_ptr(), tensor.data_ptr()) # Test that the batch dimension gets permuted to dim 2 tensor = torch.randn(2, 3, 5, 7) result = vmap(lambda x: x, out_dims=2)(tensor) self.assertEqual(result, tensor.permute(1, 2, 0, 3)) self.assertEqual(result.data_ptr(), tensor.data_ptr()) # negative out_dim tensor = torch.randn(2, 3, 5, 7) result = vmap(lambda x: x, out_dims=-1)(tensor) self.assertEqual(result, tensor.permute(1, 2, 3, 0)) self.assertEqual(result.data_ptr(), tensor.data_ptr()) # check that out_dims works on ALL outputs tensor = torch.randn(2, 3, 5, 7) other = torch.randn(2, 3, 5, 7) result = vmap(lambda x, y: (x, y), out_dims=2)(tensor, other) self.assertEqual( result, (tensor.permute(1, 2, 0, 3), other.permute(1, 2, 0, 3)) ) # use out_dims with the maximum vmap-able tensor dims (64 dims) ndims = 64 shape = [2] + [1] * (ndims - 1) expected_shape = [1, 1, 2] + [1] * (ndims - 3) tensor = torch.randn(shape) result = vmap(lambda x: x, out_dims=2)(tensor) self.assertEqual(result.shape, expected_shape) # test something that is not the identity function def foo(x, y): return x, x * y, x * y * y x = torch.randn(2, 3, 5) y = torch.randn(2, 3, 5) result = vmap(foo, out_dims=1)(x, y) self.assertEqual( result, ( x.permute(1, 0, 2), (x * y).permute(1, 0, 2), (x * y * y).permute(1, 0, 2), ), ) def test_multiple_out_dims(self): def foo(x): return x, x def bar(x, y): return x, x, x, x * y x = torch.randn(2, 3, 5) y = torch.randn(2, 3, 5) result = vmap(foo, out_dims=(0, 1))(x) self.assertEqual(result, (x, x.permute(1, 0, 2))) result = vmap(bar, out_dims=(-1, 0, 1, 2))(x, y) expected = ( x.permute(1, 2, 0), x, x.permute(1, 0, 2), (x * y).permute(1, 2, 0), ) self.assertEqual(result, expected) def test_nested_out_dims(self): y = torch.randn(2, 3, 5, 7) # Inner vmap has non-zero out_dim result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y))(y) self.assertEqual(result.shape, (2, 5, 3, 7)) self.assertEqual(result, y.permute(0, 2, 1, 3)) # all vmaps have non-zero out_dim result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y), out_dims=1)(y) self.assertEqual(result.shape, (5, 2, 3, 7)) self.assertEqual(result, y.permute(2, 0, 1, 3)) # throwing in some negative out_dims result = vmap(lambda y: vmap(lambda x: x, out_dims=-1)(y), out_dims=-1)(y) self.assertEqual(result.shape, (5, 7, 3, 2)) self.assertEqual(result, y.permute(2, 3, 1, 0)) # testing fn that isn't the identity x = torch.randn(2, 3) y = torch.randn(5, 3) result = vmap(lambda y: vmap(lambda x: x * y, out_dims=1)(x), out_dims=-1)(y) self.assertEqual(result.shape, (3, 2, 5)) self.assertEqual(result, (y.view(5, 1, 3) * x).permute(2, 1, 0)) def test_out_dims_edge_case(self): def foo(x): return x # Test that we accept out_dims=(1,) for a function with one output. tensor = torch.randn(2, 3) expected = vmap(foo, out_dims=1)(tensor) result = vmap(foo, out_dims=(1,))(tensor) self.assertEqual(result, expected) def test_out_dims_must_be_int_or_tuple_of_int_err_msg(self): msg = "`out_dims` must be an int or a tuple of int" tensor = torch.randn(2, 3) with self.assertRaisesRegex(ValueError, msg): vmap(lambda x: x, out_dims="lol")(tensor) with self.assertRaisesRegex(ValueError, msg): vmap(lambda x: x, out_dims=("lol",))(tensor) with self.assertRaisesRegex(ValueError, msg): vmap(lambda x: x, out_dims=None)(tensor) with self.assertRaisesRegex(ValueError, msg): vmap(lambda x: x, out_dims=(None,))(tensor) def test_out_dims_and_num_outputs_mismatch_err_msg(self): msg = "`out_dims` must have one dim per output" x = torch.randn(2, 3, 5) # Too many out_dims with self.assertRaisesRegex(ValueError, msg): vmap(lambda x: x, out_dims=(0, 0))(x) with self.assertRaisesRegex(ValueError, msg): vmap(lambda x: (x, x, x), out_dims=(0, 0, 0, 0))(x) # Too few out_dims with self.assertRaisesRegex(ValueError, msg): vmap(lambda x: (x, x), out_dims=(0,))(x) with self.assertRaisesRegex(ValueError, msg): vmap(lambda x: (x, x, x), out_dims=(0, 0))(x) def test_out_dim_out_of_bounds_err_msg(self): # TODO(rzou): This error message isn't that great. It comes straight # from maybe_wrap_dim. Consider doing a try-catch-(add some context) to # the error message in the future in C++ msg = "Dimension out of range" x = torch.randn(2, 3, 5) with self.assertRaisesRegex(IndexError, msg): vmap(lambda x: x, out_dims=3)(x) with self.assertRaisesRegex(IndexError, msg): vmap(lambda x: x, out_dims=-4)(x) def test_non_zero_in_dims(self): tensor = torch.randn(2, 3, 5) # Implicit out_dims = 0; vmap will move the batch dim to the front. output = vmap(lambda x: x, (1,))(tensor) self.assertEqual(output, tensor.permute(1, 0, 2)) self.assertEqual(output.data_ptr(), tensor.data_ptr()) x = torch.randn(2, 3) y = torch.randn(3, 2) output = vmap(torch.mul, (0, 1))(x, y) self.assertEqual(output, x * y.t()) output = vmap(torch.mul, (1, 0))(x, y) self.assertEqual(output, x.t() * y) def test_none_in_dims(self): x = torch.randn(2, 3) y = torch.randn(2, 3) # None in_dim for a Tensor means we don't map over it output = vmap(torch.mul, (0, None))(x, y) self.assertEqual(output.shape, (2, 2, 3)) self.assertEqual(output, x.view(2, 1, 3) * y) # None in_dim for non-tensor arguments output = vmap(torch.mul, (0, None))(x, 2) self.assertEqual(output, x * 2) def test_nested_non_default_in_dims(self): x = torch.rand(5, 2, 3) y = torch.rand(3, 5, 2) result = vmap(vmap(vmap(torch.mul), (1, 0)), (1, 2))(x, y) self.assertEqual(result, x.permute(1, 2, 0) * y.permute(2, 0, 1)) def test_non_default_in_dims_out_dims(self): x = torch.randn(2, 3, 5) # Same in_dim as out_dim, vmap over identity result = vmap(lambda x: x, in_dims=1, out_dims=1)(x) self.assertEqual(result, x) self.assertEqual(result.data_ptr(), x.data_ptr()) # Different in_dim from out_dim, vmap over identity result = vmap(lambda x: x, in_dims=2, out_dims=1)(x) self.assertEqual(result.shape, (2, 5, 3)) self.assertEqual(result, x.transpose(1, 2)) self.assertEqual(result.data_ptr(), x.data_ptr()) def foo(x): return x * 2 # Same in_dim as out_dim, vmap over operation result = vmap(foo, in_dims=1, out_dims=1)(x) self.assertEqual(result, x * 2) # Different in_dim as out_dim, vmap over operation result = vmap(foo, in_dims=2, out_dims=1)(x) self.assertEqual(result.shape, (2, 5, 3)) self.assertEqual(result, (x * 2).transpose(1, 2)) # Basic nested test. result = vmap(vmap(foo, 1, 1), 1, 1)(x) self.assertEqual(result, x * 2) def test_accepts_nested_inputs(self): B0 = 2 x = torch.randn(2, 3) y = torch.randn(2, 3) # Single layer of nesting out = vmap(lambda z: z[0] + z[1])((x, y)) self.assertEqual(out, x + y) out = vmap(lambda z: z[0] + z[1], in_dims=(0,))((x, y)) self.assertEqual(out, x + y) out = vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y)) self.assertEqual(out, x + y) out = vmap(lambda z: z[0] + z[1])([x, y]) self.assertEqual(out, x + y) out = vmap(lambda z: z[0] + z[1], in_dims=(0,))([x, y]) self.assertEqual(out, x + y) out = vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, y]) self.assertEqual(out, x + y) out = vmap(lambda z: z["x"] + z["y"])({"x": x, "y": y}) self.assertEqual(out, x + y) out = vmap(lambda z: z["x"] + z["y"], in_dims=(0,))({"x": x, "y": y}) self.assertEqual(out, x + y) out = vmap(lambda z: z["x"] + z["y"], in_dims=({"x": 0, "y": 0},))( {"x": x, "y": y} ) self.assertEqual(out, x + y) # Multiple layers of nesting out_fn = vmap(lambda z: z["x"][0] + z["x"][1][0] + z["y"][0] + z["y"][1]) out = out_fn({"x": [x, (x,)], "y": [y, y]}) self.assertEqual(out, x + x + y + y) def test_in_dims_wrong_type_err_msg(self): x = torch.randn(3) y = torch.randn(3) msg = r"expected `in_dims` to be int or a \(potentially nested\) tuple" with self.assertRaisesRegex(ValueError, msg): vmap(torch.mul, [0, 0])(x, y) with self.assertRaisesRegex(ValueError, msg): vmap(torch.mul, set({0}))(x, y) with self.assertRaisesRegex(ValueError, msg): vmap(torch.mul, "lol")(x, y) with self.assertRaisesRegex(ValueError, msg): vmap(lambda z: z[0] + z[1], in_dims=[0, 0])([x, y]) # The following should not throw vmap(torch.mul, (0, 0))(x, y) def test_not_enough_in_dims_err_msg(self): x = torch.randn(3) y = torch.randn(3) msg = r"in_dims is not compatible with the structure of `inputs`" with self.assertRaisesRegex(ValueError, msg): vmap(torch.mul, (0,))(x, y) with self.assertRaisesRegex(ValueError, msg): vmap(torch.mul, (0, 0, 0))(x, y) with self.assertRaisesRegex(ValueError, msg): vmap(lambda z: z[0] + z[1], in_dims=([0],))([x, y]) with self.assertRaisesRegex(ValueError, msg): vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))([x, y]) # The following should not throw vmap(torch.mul, (0, 0))(x, y) def test_integer_in_dim_but_not_tensor_input_err_msg(self): def foo(xy): return xy[0] * xy[1] def bar(x, yz): return x * yz[0] * yz[1] x = torch.randn(2, 3) y = torch.randn(2, 3) # the following are errors in jax (and will always be errors) msg = "Got in_dim=0 for an input but the input is of type" with self.assertRaisesRegex(ValueError, msg): vmap(torch.sum)(x, 0) with self.assertRaisesRegex(ValueError, msg): vmap(torch.sum, (0, 0))(x, 0) with self.assertRaisesRegex(ValueError, msg): vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, 1]) # The following should not throw vmap(torch.sum, (0, None))(x, 0) def test_in_dim_not_in_tensor_err_msg(self): def foo(x): return x * x x = torch.randn(2, 3) y = torch.randn(2, 3) msg = r"Got in_dim=-?\w for some input, but that input is a Tensor of dimensionality \w" with self.assertRaisesRegex(ValueError, msg): vmap(foo)(torch.randn([])) with self.assertRaisesRegex(ValueError, msg): vmap(foo, in_dims=(0,))(torch.randn([])) with self.assertRaisesRegex(ValueError, msg): vmap(foo, in_dims=(-1,))(x) with self.assertRaisesRegex(ValueError, msg): vmap(foo, in_dims=(2,))(y) with self.assertRaisesRegex(ValueError, msg): vmap(lambda z: z[0] + z[1], in_dims=([3, 0],))([x, y]) # the following should not throw vmap(foo, in_dims=(0,))(torch.randn(2, 3)) vmap(foo, in_dims=(1,))(torch.randn(2, 3)) def test_fallback_does_not_warn_by_default(self): # NB: One day we will implement a batching rule for torch.atan2. # If/when we do, this test should be replaced to test the fallback # path on another operator to avoid bitrot. op = torch.atan2 x = torch.randn(11) y = torch.randn(11) with warnings.catch_warnings(record=True) as wa: result = vmap(op)(x, y) # The single warning here is the "vmap is experimental" # warning, not a warning from the vmap fallback path. self.assertEqual(len(wa), 1) def test_fallback_warns_when_warnings_are_enabled(self): # NB: One day we will implement a batching rule for torch.atan2. # If/when we do, this test should be replaced to test the fallback # path on another operator to avoid bitrot. op = torch.atan2 x = torch.randn(11) y = torch.randn(11) with warnings.catch_warnings(record=True) as wa: with EnableVmapFallbackWarnings(): result = vmap(op)(x, y) self.assertEqual(len(wa), 2) self.assertRegex(str(wa[-1].message), FALLBACK_REGEX) def _assert_uses_vmap_fallback(self, vmap_args, inputs): with warnings.catch_warnings(record=True) as wa: with EnableVmapFallbackWarnings(): result = vmap(*vmap_args)(*inputs) self.assertEqual(len(wa), 2) self.assertRegex(str(wa[-1].message), FALLBACK_REGEX) def test_fallback_zero_dim(self): # NB: One day we will implement a batching rule for torch.atan2. # If/when we do, this test should be replaced to test the fallback # path on another operator to avoid bitrot. op = torch.atan2 x = torch.randn(11) y = torch.randn(11) self._assert_uses_vmap_fallback((op,), (x, y)) B0, B1 = 0, 3 x = torch.randn(B0, 11) y = torch.randn(11) msg = "The fallback path does not support vmap over dims of size 0" with self.assertRaisesRegex(RuntimeError, msg): vmap(op, (0, None))(x, y) with self.assertRaisesRegex(RuntimeError, msg): vmap(op, (None, 0))(y, x) with self.assertRaisesRegex(RuntimeError, msg): vmap(op)(x, x) x = torch.randn(B0, B1, 11) y = torch.randn(B1, 11) with self.assertRaisesRegex(RuntimeError, msg): vmap(op, (0, None))(x, y) with self.assertRaisesRegex(RuntimeError, msg): vmap(op, (None, 0))(y, x) with self.assertRaisesRegex(RuntimeError, msg): vmap(op)(x, x) def test_fallback_atan2(self): # NB: One day we will implement a batching rule for torch.atan2. # If/when we do, this test should be replaced to test the fallback # path on another operator to avoid bitrot. op = torch.atan2 x = torch.randn(5, 7, 11) y = torch.randn(5, 7, 11) self._assert_uses_vmap_fallback((op,), (x, y)) # fallback on torch.atan2 x = torch.randn(7, 11, 5) y = torch.randn(5, 7, 11) result = vmap(op, (2, 0))(x, y) self.assertEqual(result, op(x.permute(2, 0, 1), y)) # fallback on torch.atan2, nested vmap x = torch.randn(7, 11, 5) y = torch.randn(5, 7, 11) result = vmap(vmap(op), (2, 0))(x, y) self.assertEqual(result, op(x.permute(2, 0, 1), y)) # big batch size (total 10000) x = torch.randn(100, 10, 10, 5) y = torch.randn(100, 10, 10) result = vmap(vmap(vmap(op)))(x, y) self.assertEqual(result, op(x, y.view(100, 10, 10, 1))) def test_fallback_masked_fill(self): # NB: One day we will implement a batching rule for masked_fill # If/when we do, this test should be replaced to test the fallback # path on another operator to avoid bitrot. def run_test(batch_size): B0 = batch_size x = torch.randn(B0, 7, 11, 13) dim = 0 index = torch.tensor([0, 4, 2]) values = torch.randn(B0, 3, 11, 13) self._assert_uses_vmap_fallback( (torch.index_add, (0, None, None, 0)), (x, dim, index, values) ) result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values) expected = torch.index_add(x, dim + 1, index, values.view(B0, 3, 11, 13)) self.assertEqual(result, expected) run_test(batch_size=5) run_test(batch_size=1237) def test_fallback_multiple_returns(self): # NB: One day we will implement a batching rule for torch.var_mean # If/when we do, this test should be replaced to test the fallback # path on another operator to avoid bitrot. B0, B1, B2 = 2, 3, 1237 tensor = torch.randn(B0, 10) self._assert_uses_vmap_fallback((torch.var_mean,), (tensor,)) # fallback correctness on torch.var_mean result = vmap(torch.var_mean)(tensor) expected = torch.var_mean(tensor, dim=1) self.assertEqual(result, expected) # nested vmap tensor = torch.randn(B0, B1, 10) result = vmap(vmap(torch.var_mean))(tensor) expected = torch.var_mean(tensor, dim=2) self.assertEqual(result, expected) # big batch size, nested vmap tensor = torch.randn(B0, B1, B2, 10) result = vmap(vmap(vmap(torch.var_mean)))(tensor) expected = torch.var_mean(tensor, dim=3) self.assertEqual(result, expected) def test_inplace_fallback_unary(self): # Test the in-place fallback on an in-place method that takes no # additional Tensor arguments. This is the simplest case of the fallback. # NB: One day we will implement a batching rule for acos_. # If/when we do, this test should be replaced to test the fallback # path on another operator to avoid bitrot. op = Tensor.acos_ B0, B1, B2 = 2, 3, 10000 x = torch.randn(B0, 5) self._assert_uses_vmap_fallback((op,), (x,)) # Single vmap x_orig = torch.rand(B0, 5) x = x_orig.clone() result = vmap(op)(x) self.assertTrue(result is x) self.assertEqual(result, x_orig.acos()) # Single vmap + different out_dim produces a view(!) x_orig = torch.rand(B0, 5) x = x_orig.clone() result = vmap(op, out_dims=(1,))(x) self.assertTrue(result._base is x) self.assertEqual(result, x_orig.t().acos()) # Nested vmap x_orig = torch.randn(B0, B1, 5) x = x_orig.clone() result = vmap(vmap(op))(x) self.assertTrue(result is x) self.assertEqual(result, x_orig.acos()) # Nested vmap, large batch size x_orig = torch.randn(B0, B1, B2, 5) x = x_orig.clone() result = vmap(vmap(vmap(op)))(x) self.assertTrue(result is x) self.assertEqual(result, x_orig.acos()) def test_inplace_fallback_nary_same_levels(self): # NB: One day we will implement a batching rule for atan2_ # If/when we do, this test should be replaced to test the fallback # path on another operator to avoid bitrot. op = Tensor.atan2_ outplace_op = torch.atan2 x = torch.randn(5, 7, 11) y = torch.randn(5, 7, 11) self._assert_uses_vmap_fallback((op,), (x, y)) # Single vmap B0 = 5 x_orig = torch.randn(7, 11, B0) x = x_orig.clone() y = torch.randn(B0, 7, 11) vmap(op, (2, 0))(x, y) self.assertEqual(x, outplace_op(x_orig, y.movedim(0, 2))) # Nested vmap B0, B1 = 5, 7 x_orig = torch.randn(B1, 11, B0) x = x_orig.clone() y = torch.randn(B0, B1, 11) vmap(vmap(op), (2, 0))(x, y) self.assertEqual(x, outplace_op(x_orig, y.movedim([0, 1], [2, 0]))) # big batch size (total 10000) B0, B1, B2 = 100, 10, 10 x_orig = torch.randn(B0, B1, B2, 5) x = x_orig.clone() y = torch.randn(B0, B1, B2) result = vmap(vmap(vmap(op)))(x, y) self.assertEqual(x, outplace_op(x_orig, y.view(B0, B1, B2, 1))) def test_inplace_fallback_nary_different_levels(self): # NB: One day we will implement a batching rule for atan2_ # If/when we do, this test should be replaced to test the fallback # path on another operator to avoid bitrot. op = Tensor.atan2_ outplace_op = torch.atan2 B0, B1, B2 = 2, 3, 5 x = torch.rand(B0, 7) y = torch.rand(7) self._assert_uses_vmap_fallback((op, (0, None)), (x, y)) # op(left, right): All of the levels in right are found in left x_orig = torch.rand(B0, 7) x = x_orig.clone() y = torch.rand(7) vmap(op, in_dims=(0, None))(x, y) self.assertEqual(x, outplace_op(x_orig, y)) x_orig = torch.rand(B0, B1, 7) x = x_orig.clone() y = torch.rand(B0, 7) vmap(vmap(op, in_dims=(0, None)))(x, y) self.assertEqual(x, outplace_op(x_orig, y.view(B0, 1, 7))) # op(left, right): Some of the levels in right are not found in left msg = r"vmap: aten::atan2_\(self, \*extra_args\) is not possible" x = torch.rand(7) y = torch.rand(B0, 7) with self.assertRaisesRegex(RuntimeError, msg): vmap(op, in_dims=(None, 0))(x, y) x = torch.rand(B1, 7) y = torch.rand(B0, 7) with self.assertRaisesRegex(RuntimeError, msg): vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 0))(x, y) x = torch.rand(B1, 7) y = torch.rand(7, B0) with self.assertRaisesRegex(RuntimeError, msg): vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 1))(x, y) x = torch.rand(B0, 7) y = torch.rand(B0, B1, 7) with self.assertRaisesRegex(RuntimeError, msg): vmap(vmap(op, in_dims=(None, 0)))(x, y) def test_backward_unsupported_interaction(self): x = torch.randn(3, requires_grad=True) y = torch.randn(5) grad = torch.randn_like(x) err_msg = r"backward\(\) called inside torch.vmap" def backward_on_vmapped_tensor(x): x.sum().backward() with self.assertRaisesRegex(RuntimeError, err_msg): vmap(backward_on_vmapped_tensor)(x) def backward_with_vmapped_grad(x, grad): x.backward(grad) with self.assertRaisesRegex(RuntimeError, err_msg): vmap(backward_with_vmapped_grad)(x, grad) def completely_unrelated_backward(y): x.sum().backward() with self.assertRaisesRegex(RuntimeError, err_msg): vmap(completely_unrelated_backward)(y) def test_grad_unsupported_interaction(self): input_tensor = torch.randn(3, requires_grad=True) err_msg = "autograd.grad.* called inside torch.vmap" captured = torch.randn(3, requires_grad=True) def output_to_grad_is_vmapped(input_tensor): output = (captured * input_tensor).sum() return torch.autograd.grad([output], [captured])[0] with self.assertRaisesRegex(RuntimeError, err_msg): vmap(output_to_grad_is_vmapped)(input_tensor) output = (input_tensor**2).sum() def input_to_grad_is_vmapped(input_tensor): return torch.autograd.grad([output], [input_tensor])[0] with self.assertRaisesRegex(RuntimeError, err_msg): vmap(input_to_grad_is_vmapped)(input_tensor) def test_batched_gradient_basic(self): N = 3 x = torch.randn(N, requires_grad=True) y = torch.randn(N) def vjp_mul(v): return torch.autograd.grad([x * y], [x], grad_outputs=[v])[0] batched_v = torch.eye(N) jacobian = vmap(vjp_mul)(batched_v) self.assertEqual(jacobian, torch.diagflat(y)) def test_functools_partial(self): x = torch.randn(3) y = torch.randn(2, 3) result = vmap(functools.partial(torch.mul, x))(y) self.assertEqual(result, x * y) def test_nn_module(self): tensor = torch.randn(2, 3) model = torch.nn.Linear(3, 3, bias=False) result = vmap(model)(tensor) self.assertEqual(result, model(tensor)) def test_fallback_with_undefined_grad(self): B0 = 7 x = torch.randn(2, 3, 4, 5, requires_grad=True) weight = torch.randn(3, 3, 1, 1) v = torch.randn(B0, 2, 3, 4, 5) def get_vjp(v): result = torch.nn.functional.conv2d(x, weight) (grad_x,) = torch.autograd.grad(result, x, v) return grad_x # Runs vmap(get_vjp)(v), which should not error out. # The backward formula for convolution returns an undefined # Tensor for grad_bias because the original bias does not exist. # # In the future we'll probably add a batching rule for convolution # backward. When this happens, we should modify this test to use a # different op (and/or create and use a dummy operator) to avoid bitrot. self._assert_uses_vmap_fallback([get_vjp], [v]) def slice_inputs(inputs, bdims, i): result = [] for inp, bdim in zip(inputs, bdims): if bdim is None: result.append(inp) else: result.append(inp.select(bdim, i)) return tuple(result) def reference_vmap(op, inputs, in_dims=0, out_dims=0): if isinstance(in_dims, int): in_dims = (in_dims,) * len(inputs) bdim_sizes = [inp.size(dim) for inp, dim in zip(inputs, in_dims) if dim is not None] assert all(bdim_size == bdim_sizes[0] for bdim_size in bdim_sizes) bdim_size = bdim_sizes[0] results = tuple(op(*slice_inputs(inputs, in_dims, i)) for i in range(bdim_size)) assert len(results) > 0 op_has_single_return = not isinstance(results[0], tuple) if op_has_single_return: assert all(isinstance(result, torch.Tensor) for result in results) if isinstance(out_dims, int): out_dims = (out_dims,) * 1 return torch.stack(results, dim=out_dims[0]) assert all(isinstance(result, tuple) for result in results) num_returns = len(results[0]) assert all(len(result) == num_returns for result in results) if isinstance(out_dims, int): out_dims = (out_dims,) * num_returns return tuple( torch.stack(result_shards, out_dim) for result_shards, out_dim in zip(zip(*results), out_dims) )
TestVmapAPILegacy
python
Textualize__textual
src/textual/_node_list.py
{ "start": 531, "end": 647 }
class ____(AttributeError): """Raise if you try to mutate the list.""" @rich.repr.auto(angular=True)
ReadOnlyError
python
walkccc__LeetCode
solutions/1849. Splitting a String Into Descending Consecutive Values/1849.py
{ "start": 0, "end": 493 }
class ____: def splitString(self, s: str) -> bool: def isValid(s: str, start: int, prev: int, segment: int) -> bool: if start == len(s) and segment > 1: return True curr = 0 for i in range(start, len(s)): curr = curr * 10 + int(s[i]) if curr > 9999999999: return False if (prev == -1 or curr == prev - 1) and isValid(s, i + 1, curr, segment + 1): return True return False return isValid(s, 0, -1, 0)
Solution
python
huggingface__transformers
src/transformers/models/led/modeling_led.py
{ "start": 53460, "end": 55677 }
class ____(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. encoder_global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None encoder_global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" Base class for outputs of sequence-to-sequence sentence classification models. """ )
LEDSeq2SeqLMOutput
python
langchain-ai__langchain
libs/langchain/langchain_classic/callbacks/tracers/logging.py
{ "start": 333, "end": 1694 }
class ____(FunctionCallbackHandler): """Tracer that logs via the input Logger.""" name: str = "logging_callback_handler" def __init__( self, logger: logging.Logger, log_level: int = logging.INFO, extra: dict | None = None, **kwargs: Any, ) -> None: """Initialize the LoggingCallbackHandler. Args: logger: the logger to use for logging log_level: the logging level (default: logging.INFO) extra: the extra context to log (default: None) **kwargs: additional keyword arguments. """ log_method = getattr(logger, logging.getLevelName(level=log_level).lower()) def callback(text: str) -> None: log_method(text, extra=extra) super().__init__(function=callback, **kwargs) @override def on_text( self, text: str, *, run_id: UUID, parent_run_id: UUID | None = None, **kwargs: Any, ) -> None: try: crumbs_str = f"[{self.get_breadcrumbs(run=self._get_run(run_id=run_id))}] " except TracerException: crumbs_str = "" self.function_callback( f"{get_colored_text('[text]', color='blue')}" f" {get_bolded_text(f'{crumbs_str}New text:')}\n{text}", )
LoggingCallbackHandler
python
scrapy__scrapy
tests/test_scheduler_base.py
{ "start": 1463, "end": 1748 }
class ____(Spider): name = "paths" def __init__(self, mockserver, *args, **kwargs): super().__init__(*args, **kwargs) self.start_urls = map(mockserver.url, PATHS) def parse(self, response): return {"path": urlparse_cached(response).path}
PathsSpider
python
google__jax
docs/autodidax.py
{ "start": 62772, "end": 71490 }
class ____: buf: Any aval: ShapedArray def __init__(self, aval, buf): self.aval = aval self.buf = buf dtype = property(lambda self: self.aval.dtype) shape = property(lambda self: self.aval.shape) ndim = property(lambda self: self.aval.ndim) def __array__(self): return np.asarray(self.buf) def __repr__(self): return repr(np.asarray(self.buf)) def __str__(self): return str(np.asarray(self.buf)) _neg = staticmethod(neg) _add = staticmethod(add) _radd = staticmethod(add) _mul = staticmethod(mul) _rmul = staticmethod(mul) _gt = staticmethod(greater) _lt = staticmethod(less) input_handlers[Array] = lambda x: x.buf jax_types.add(Array) # + @jit def f(x): y = sin(x) * 2. z = - y + x return z x, xdot = 3., 1. y, ydot = jvp(f, (x,), (xdot,)) print(y) print(ydot) # + tags=["hide-input"] def pprint_xla_call(names: defaultdict[Var, str], eqn: JaxprEqn) -> PPrint: lhs = pp(' '.join(var_str(names, v) for v in eqn.out_binders)) params_without_jaxpr = {k:v for k, v in eqn.params.items() if k != 'jaxpr'} rhs = (pp(eqn.primitive.name) >> pp_params(params_without_jaxpr) >> pp(' '.join(names[x] if isinstance(x, Var) else str(x.val) for x in eqn.inputs))) return vcat([lhs >> pp(' = ') >> rhs, pp_jaxpr(eqn.params['jaxpr']).indent(2)]) pp_rules[xla_call_p] = pprint_xla_call # - # ## Part 4: `linearize` and `vjp` (and `grad`!) # # The `linearize` and `vjp` autodiff functions are built on `jvp`, but involve # jaxprs as well. That's because both involve staging out, or delaying, # computation. # ### `linearize` # # In the case of `linearize`, we want to stage out the linear part of a `jvp` # computation. That is, in terms of # [Haskell-like type signatures](https://wiki.haskell.org/Type_signature), # if we have `jvp : (a -> b) -> (a, T a) -> (b, T b)`, # then we write `linearize : (a -> b) -> a -> (b, T a -o T b)`, using `T a` to # mean "the tangent type of `a`" and using the "lollipop" `-o` rather than the # arrow `->` to indicate a _linear_ function. We define the semantics of # `linearize` in terms of `jvp` too: # ```python # y, f_lin = linearize(f, x) # y_dot = f_lin(x_dot) # ``` # gives the same result for `(y, y_dot)` as # ``` # y, y_dot = jvp(f, (x,), (x_dot,)) # ``` # where the application of `f_lin` does not redo any of the linearization work. # We'll represent the delayed linear part `f_lin : T a -o T b` as a jaxpr. # # Tangentially, now that we have linear arrows `-o`, we can provide a slightly # more informative type for `jvp`: # ``` # jvp : (a -> b) -> (UnrestrictedUse a, T a) -o (UnrestrictedUse b, T b) # ``` # Here we're writing `UnrestrictedUse` just to indicate that we have a special # pair where the first element can be used in an unrestricted (nonlinear) way. # In conjunction with the linear arrow, this notation is just meant to express # that the function `jvp f` uses its first input in a nonlinear way but its # second input in a linear way, producing a corresponding nonlinear output # (which can be used in a nonlinear way) paired with a linear output. This more # refined type signature encodes the data dependencies in `jvp f`, which are # useful for partial evaluation. # # To build the `f_lin` jaxpr from a JVP, we need to perform partial evaluation: # we evaluate all the primal values as we trace, but stage the tangent # computations into a jaxpr. This is our second way to build jaxprs. But where # `make_jaxpr` and its underlying `JaxprTrace`/`JaxprTracer` interpreters aim # to stage out every primitive bind, this second approach stages out only those # primitive binds with a data dependence on tangent inputs. # # First, some utilities: # + def split_half(lst: list[Any]) -> tuple[list[Any], list[Any]]: assert not len(lst) % 2 return split_list(lst, len(lst) // 2) def merge_lists(which: list[bool], l1: list[Any], l2: list[Any]) -> list[Any]: l1, l2 = iter(l1), iter(l2) out = [next(l2) if b else next(l1) for b in which] assert next(l1, None) is next(l2, None) is None return out # - # Next, we'll write `linearize` by combining `jvp` together with a general # partial evaluation transformation, to be added next: # + def linearize_flat(f, *primals_in): pvals_in = ([PartialVal.known(x) for x in primals_in] + [PartialVal.unknown(vspace(get_aval(x))) for x in primals_in]) def f_jvp(*primals_tangents_in): primals_out, tangents_out = jvp(f, *split_half(primals_tangents_in)) return [*primals_out, *tangents_out] jaxpr, pvals_out, consts = partial_eval_flat(f_jvp, pvals_in) primal_pvals, _ = split_half(pvals_out) assert all(pval.is_known for pval in primal_pvals) primals_out = [pval.const for pval in primal_pvals] f_lin = lambda *tangents: eval_jaxpr(jaxpr, [*consts, *tangents]) return primals_out, f_lin def linearize(f, *primals_in): primals_in_flat, in_tree = tree_flatten(primals_in) f, out_tree = flatten_fun(f, in_tree) primals_out_flat, f_lin_flat = linearize_flat(f, *primals_in_flat) primals_out = tree_unflatten(out_tree(), primals_out_flat) def f_lin(*tangents_in): tangents_in_flat, in_tree2 = tree_flatten(tangents_in) if in_tree != in_tree2: raise TypeError tangents_out_flat = f_lin_flat(*tangents_in_flat) return tree_unflatten(out_tree(), tangents_out_flat) return primals_out, f_lin def vspace(aval: ShapedArray) -> ShapedArray: return raise_to_shaped(aval) # TODO handle integers? # - # Now we turn to the general partial evaluation transformation. The goal is to # accept a Python callable and a list of inputs, some known and some unknown, # and to produce (1) all the outputs which can be computed from the known # inputs, together with (2) a jaxpr representing the part of the Python # callable's computation which can only be performed after the remaining inputs # are known. # # This transformation is tricky to summarize in a type signature. If we # assume the input function's type signature is `(a1, a2) -> (b1, b2)`, where # `a1` and `a2` represent the known and unknown inputs, respectively, and where # `b1` only has a data dependency on `a1` while `b2` has some data dependency on # `a2`, then we might write # # ``` # partial_eval : ((a1, a2) -> (b1, b2)) -> a1 -> exists r. (b1, r, (r, a2) -> b2) # ``` # # In words, given values for the inputs of type `a1`, `partial_eval` produces # the outputs of type `b1` along with "residual" values of # existentially-quantified type `r` representing the intermediates required to # complete the computation in the second stage. It also produces a function of # type `(r, a2) -> b2` which accepts the residual values as well as the # remaining inputs and produces the remaining outputs. # # We like to think of partial evaluation as "unzipping" one computation into # two. For example, consider this jaxpr: # ``` # { lambda a:float64[] . # let b:float64[] = sin a # c:float64[] = neg b # in ( c ) } # ``` # A jaxpr for the JVP would look like: # ``` # { lambda a:float64[] b:float64[] . # let c:float64[] = sin a # d:float64[] = cos a # e:float64[] = mul d b # f:float64[] = neg c # g:float64[] = neg e # in ( f, g ) } # ``` # If we imagine applying partial evaluation to this jaxpr with the first input # known and the second unknown, we end up 'unzipping' the JVP jaxpr into primal # and tangent jaxprs: # ``` # { lambda a:float64[] . # let c:float64[] = sin a # d:float64[] = cos a # f:float64[] = neg c # in ( f, d ) } # ``` # ``` # { lambda d:float64[] b:float64[] . # let e:float64[] = mul d b # g:float64[] = neg e # in ( g ) } # ``` # This second jaxpr represents the linear computation that we want from # `linearize`. # # However, unlike in this jaxpr example, we want the computation on known values # to occur while evaluating the input Python callable. That is, rather than # forming a jaxpr for the entire function `(a1, a2) -> (b1, b2)`, staging all # operations out of Python first before sorting out what can be evaluated now # and what must be delayed, we want only to form a jaxpr for those operations # that _must_ be delayed due to a dependence on unknown inputs. In the context # of automatic differentiation, this is the feature that ultimately enables us # to handle functions like `grad(lambda x: x**2 if x > 0 else 0.)`. Python # control flow works because partial evaluation keeps the primal computation in # Python. As a consequence, our `Trace` and `Tracer` subclasses must on the fly # sort out what can be evaluated and what must be staged out into a jaxpr. # # First, we start with a `PartialVal` class, which represents a value that can # be either known or unknown:
Array
python
sanic-org__sanic
sanic/worker/process.py
{ "start": 404, "end": 7105 }
class ____: """A worker process.""" THRESHOLD = 300 # == 30 seconds SERVER_LABEL = "Server" SERVER_IDENTIFIER = "Srv" def __init__( self, factory, name, ident, target, kwargs, worker_state, restartable: bool = False, ): self.state = ProcessState.IDLE self.factory = factory self.name = name self.ident = ident self.target = target self.kwargs = kwargs self.worker_state = worker_state self.restartable = restartable if self.name not in self.worker_state: self.worker_state[self.name] = { "server": self.SERVER_LABEL in self.name } self.spawn() def set_state(self, state: ProcessState, force=False): if not force and state < self.state: raise Exception("...") self.state = state self.worker_state[self.name] = { **self.worker_state[self.name], "state": self.state.name, } def start(self): os.environ["SANIC_WORKER_NAME"] = self.name os.environ["SANIC_WORKER_IDENTIFIER"] = self.ident logger.debug( f"{Colors.BLUE}Starting a process: {Colors.BOLD}" f"{Colors.SANIC}%s{Colors.END}", self.name, ) self.set_state(ProcessState.STARTING) self._current_process.start() self.set_state(ProcessState.STARTED) if not self.worker_state[self.name].get("starts"): self.worker_state[self.name] = { **self.worker_state[self.name], "pid": self.pid, "start_at": get_now(), "starts": 1, } del os.environ["SANIC_WORKER_NAME"] del os.environ["SANIC_WORKER_IDENTIFIER"] def join(self): self.set_state(ProcessState.JOINED) self._current_process.join() def exit(self): limit = 100 while self.is_alive() and limit > 0: sleep(0.1) limit -= 1 if not self.is_alive(): try: del self.worker_state[self.name] except ConnectionRefusedError: logger.debug("Monitor process has already exited.") except KeyError: logger.debug("Could not find worker state to delete.") def terminate(self): if self.state is not ProcessState.TERMINATED: logger.debug( f"{Colors.BLUE}Terminating a process: " f"{Colors.BOLD}{Colors.SANIC}" f"%s {Colors.BLUE}[%s]{Colors.END}", self.name, self.pid, ) self.set_state(ProcessState.TERMINATED, force=True) try: os.kill(self.pid, SIGINT) except (KeyError, AttributeError, ProcessLookupError): ... def restart(self, restart_order=RestartOrder.SHUTDOWN_FIRST, **kwargs): logger.debug( f"{Colors.BLUE}Restarting a process: {Colors.BOLD}{Colors.SANIC}" f"%s {Colors.BLUE}[%s]{Colors.END}", self.name, self.pid, ) self.set_state(ProcessState.RESTARTING, force=True) if restart_order is RestartOrder.SHUTDOWN_FIRST: self._terminate_now() else: self._old_process = self._current_process if self._add_config(): self.kwargs.update( {"config": {k.upper(): v for k, v in kwargs.items()}} ) try: self.spawn() self.start() except AttributeError: raise RuntimeError("Restart failed") if restart_order is RestartOrder.STARTUP_FIRST: self._terminate_soon() self.worker_state[self.name] = { **self.worker_state[self.name], "pid": self.pid, "starts": self.worker_state[self.name]["starts"] + 1, "restart_at": get_now(), } def is_alive(self): try: return self._current_process.is_alive() except AssertionError: return False def spawn(self): if self.state not in (ProcessState.IDLE, ProcessState.RESTARTING): raise Exception("Cannot spawn a worker process until it is idle.") self._current_process = self.factory( name=self.name, target=self.target, kwargs=self.kwargs, daemon=True, ) @property def pid(self): return self._current_process.pid @property def exitcode(self): return self._current_process.exitcode def _terminate_now(self): if not self._current_process.is_alive(): return logger.debug( f"{Colors.BLUE}Begin restart termination: " f"{Colors.BOLD}{Colors.SANIC}" f"%s {Colors.BLUE}[%s]{Colors.END}", self.name, self._current_process.pid, ) self._current_process.terminate() def _terminate_soon(self): logger.debug( f"{Colors.BLUE}Begin restart termination: " f"{Colors.BOLD}{Colors.SANIC}" f"%s {Colors.BLUE}[%s]{Colors.END}", self.name, self._current_process.pid, ) termination_thread = Thread(target=self._wait_to_terminate) termination_thread.start() def _wait_to_terminate(self): logger.debug( f"{Colors.BLUE}Waiting for process to be acked: " f"{Colors.BOLD}{Colors.SANIC}" f"%s {Colors.BLUE}[%s]{Colors.END}", self.name, self._old_process.pid, ) misses = 0 while self.state is not ProcessState.ACKED: sleep(0.1) misses += 1 if misses > self.THRESHOLD: raise TimeoutError( f"Worker {self.name} failed to come ack within " f"{self.THRESHOLD / 10} seconds" ) else: logger.debug( f"{Colors.BLUE}Process acked. Terminating: " f"{Colors.BOLD}{Colors.SANIC}" f"%s {Colors.BLUE}[%s]{Colors.END}", self.name, self._old_process.pid, ) self._old_process.terminate() delattr(self, "_old_process") def _add_config(self) -> bool: sig = signature(self.target) if "config" in sig.parameters or any( param.kind == param.VAR_KEYWORD for param in sig.parameters.values() ): return True return False
WorkerProcess
python
ethereum__web3.py
tests/ens/test_offchain_resolution.py
{ "start": 2899, "end": 3465 }
class ____: status_code = 200 def __init__(self, request_type, *args, **_kwargs): # validate the expected urls if request_type == "get": assert args[1] == EXPECTED_GET_URL elif request_type == "post": assert args[1] == EXPECTED_POST_URL @staticmethod def raise_for_status(): pass # noqa: E704 @staticmethod async def json(): return {"data": OFFCHAIN_RESOLVER_DATA} # noqa: E704 @property def status(self): return self.status_code
AsyncMockHttpSuccessResponse
python
ApeWorX__ape
src/ape/contracts/base.py
{ "start": 5831, "end": 10429 }
class ____(ManagerAccessMixin): contract: "ContractInstance" abis: list["MethodABI"] def __init__(self, contract: "ContractInstance", abis: list["MethodABI"]) -> None: super().__init__() self.contract = contract self.abis = abis # If there is a natspec, inject it as the "doc-str" for this method. # This greatly helps integrate with IPython. self.__doc__ = self.info @log_instead_of_fail(default="<ContractMethodHandler>") def __repr__(self) -> str: # `<ContractName 0x1234...AbCd>.method_name` return f"{self.contract.__repr__()}.{self.abis[-1].name}" @log_instead_of_fail() def _repr_pretty_(self, printer, cycle): """ Show the NatSpec of a Method in any IPython console (including ``ape console``). """ console = get_rich_console() output = self._get_info(enrich=True) or "\n".join(abi.signature for abi in self.abis) console.print(output) def __str__(self) -> str: # `method_name(type1 arg1, ...) -> return_type` abis = sorted(self.abis, key=lambda abi: len(abi.inputs or [])) return abis[-1].signature @property def info(self) -> str: """ The NatSpec documentation of the method, if one exists. Else, returns the empty string. """ return self._get_info() def _get_info(self, enrich: bool = False) -> str: infos: list[str] = [] for abi in self.abis: if abi.selector not in self.contract.contract_type.natspecs: continue natspec = self.contract.contract_type.natspecs[abi.selector] header = abi.signature natspec_str = natspec.replace("\n", "\n ") infos.append(f"{header}\n {natspec_str}") if enrich: infos = [_enrich_natspec(n) for n in infos] # Same as number of ABIs, regardless of NatSpecs. number_infos = len(infos) if number_infos == 1: return infos[0] # Ensure some distinction of the infos using number-prefixes. numeric_infos = [] for idx, info in enumerate(infos): num_info = f"{idx + 1}: {info}" numeric_infos.append(num_info) return "\n\n".join(numeric_infos) def encode_input(self, *args) -> HexBytes: selected_abi = _select_method_abi(self.abis, args) arguments = self.conversion_manager.convert_method_args(selected_abi, args) ecosystem = self.provider.network.ecosystem encoded_calldata = ecosystem.encode_calldata(selected_abi, *arguments) method_id = ecosystem.get_method_selector(selected_abi) return HexBytes(method_id + encoded_calldata) def decode_input(self, calldata: bytes) -> tuple[str, dict[str, Any]]: matching_abis = [] rest_calldata = None err = ContractDataError( f"Unable to find matching method ABI for calldata '{to_hex(calldata)}'. " "Try prepending a method ID to the beginning of the calldata." ) for abi in self.abis: selector = self.provider.network.ecosystem.get_method_selector(abi) if calldata.startswith(selector): cutoff = len(selector) rest_calldata = calldata[cutoff:] matching_abis.append(abi) if len(matching_abis) == 1: abi = matching_abis[0] decoded_input = self.provider.network.ecosystem.decode_calldata( matching_abis[0], HexBytes(rest_calldata or "") ) return abi.selector, decoded_input elif len(matching_abis) > 1: raise err # Brute-force find method ABI valid_results = [] for abi in self.abis: try: decoded_calldata = self.provider.network.ecosystem.decode_calldata( abi, HexBytes(calldata) ) except Exception: continue if decoded_calldata: valid_results.append((abi, decoded_calldata)) if len(valid_results) == 1: selected_abi, decoded_calldata = valid_results[0] return selected_abi.selector, decoded_calldata raise err def _validate_is_contract(self): if not self.contract.is_contract: raise ContractNotFoundError( self.contract.address, self.provider.network.explorer is not None, self.provider.network_choice, )
ContractMethodHandler
python
PrefectHQ__prefect
src/prefect/_vendor/croniter/croniter.py
{ "start": 4809, "end": 49664 }
class ____(object): MONTHS_IN_YEAR = 12 # This helps with expanding `*` fields into `lower-upper` ranges. Each item # in this tuple maps to the corresponding field index RANGES = ( (0, 59), (0, 23), (1, 31), (1, 12), (0, 6), (0, 59), (1970, 2099), ) DAYS = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) ALPHACONV = ( {}, # 0: min {}, # 1: hour {"l": "l"}, # 2: dom # 3: mon copy.deepcopy(M_ALPHAS), # 4: dow copy.deepcopy(DOW_ALPHAS), # 5: second {}, # 6: year {}, ) LOWMAP = ( {}, {}, {0: 1}, {0: 1}, {7: 0}, {}, {}, ) LEN_MEANS_ALL = ( 60, 24, 31, 12, 7, 60, 130, ) def __init__( self, expr_format, start_time=None, ret_type=float, day_or=True, max_years_between_matches=None, is_prev=False, hash_id=None, implement_cron_bug=False, second_at_beginning=None, expand_from_start_time=False, ): self._ret_type = ret_type self._day_or = day_or self._implement_cron_bug = implement_cron_bug self.second_at_beginning = bool(second_at_beginning) self._expand_from_start_time = expand_from_start_time if hash_id: if not isinstance(hash_id, (bytes, str)): raise TypeError("hash_id must be bytes or UTF-8 string") if not isinstance(hash_id, bytes): hash_id = hash_id.encode("UTF-8") self._max_years_btw_matches_explicitly_set = ( max_years_between_matches is not None ) if not self._max_years_btw_matches_explicitly_set: max_years_between_matches = 50 self._max_years_between_matches = max(int(max_years_between_matches), 1) if start_time is None: start_time = time() self.tzinfo = None self.start_time = None self.dst_start_time = None self.cur = None self.set_current(start_time, force=False) self.expanded, self.nth_weekday_of_month = self.expand( expr_format, hash_id=hash_id, from_timestamp=self.dst_start_time if self._expand_from_start_time else None, second_at_beginning=second_at_beginning, ) self.fields = CRON_FIELDS[len(self.expanded)] self.expressions = EXPRESSIONS[(expr_format, hash_id, second_at_beginning)] self._is_prev = is_prev @classmethod def _alphaconv(cls, index, key, expressions): try: return cls.ALPHACONV[index][key] except KeyError: raise CroniterNotAlphaError( "[{0}] is not acceptable".format(" ".join(expressions)) ) def get_next(self, ret_type=None, start_time=None, update_current=True): if start_time and self._expand_from_start_time: raise ValueError( "start_time is not supported when using expand_from_start_time = True." ) return self._get_next( ret_type=ret_type, start_time=start_time, is_prev=False, update_current=update_current, ) def get_prev(self, ret_type=None, start_time=None, update_current=True): return self._get_next( ret_type=ret_type, start_time=start_time, is_prev=True, update_current=update_current, ) def get_current(self, ret_type=None): ret_type = ret_type or self._ret_type if issubclass(ret_type, datetime.datetime): return self.timestamp_to_datetime(self.cur) return self.cur def set_current(self, start_time, force=True): if (force or (self.cur is None)) and start_time is not None: if isinstance(start_time, datetime.datetime): self.tzinfo = start_time.tzinfo start_time = self.datetime_to_timestamp(start_time) self.start_time = start_time self.dst_start_time = start_time self.cur = start_time return self.cur @staticmethod def datetime_to_timestamp(d): """ Converts a `datetime` object `d` into a UNIX timestamp. """ return datetime_to_timestamp(d) _datetime_to_timestamp = datetime_to_timestamp # retrocompat def timestamp_to_datetime(self, timestamp, tzinfo=MARKER): """ Converts a UNIX `timestamp` into a `datetime` object. """ if tzinfo is MARKER: # allow to give tzinfo=None even if self.tzinfo is set tzinfo = self.tzinfo k = timestamp if tzinfo: k = (timestamp, repr(tzinfo)) try: return TIMESTAMP_TO_DT_CACHE[k] except KeyError: pass if OVERFLOW32B_MODE: # degraded mode to workaround Y2038 # see https://github.com/python/cpython/issues/101069 result = EPOCH.replace(tzinfo=None) + datetime.timedelta(seconds=timestamp) else: result = datetime.datetime.fromtimestamp(timestamp, tz=tzutc()).replace( tzinfo=None ) if tzinfo: result = result.replace(tzinfo=UTC_DT).astimezone(tzinfo) TIMESTAMP_TO_DT_CACHE[(result, repr(result.tzinfo))] = result return result _timestamp_to_datetime = timestamp_to_datetime # retrocompat @staticmethod def timedelta_to_seconds(td): """ Converts a 'datetime.timedelta' object `td` into seconds contained in the duration. Note: We cannot use `timedelta.total_seconds()` because this is not supported by Python 2.6. """ return timedelta_to_seconds(td) _timedelta_to_seconds = timedelta_to_seconds # retrocompat def _get_next( self, ret_type=None, start_time=None, is_prev=None, update_current=None, ): if update_current is None: update_current = True self.set_current(start_time, force=True) if is_prev is None: is_prev = self._is_prev self._is_prev = is_prev expanded = self.expanded[:] nth_weekday_of_month = self.nth_weekday_of_month.copy() ret_type = ret_type or self._ret_type if not issubclass(ret_type, (float, datetime.datetime)): raise TypeError( "Invalid ret_type, only 'float' or 'datetime' is acceptable." ) # exception to support day of month and day of week as defined in cron dom_dow_exception_processed = False if ( expanded[DAY_FIELD][0] != "*" and expanded[DOW_FIELD][0] != "*" ) and self._day_or: # If requested, handle a bug in vixie cron/ISC cron where day_of_month and day_of_week form # an intersection (AND) instead of a union (OR) if either field is an asterisk or starts with an asterisk # (https://crontab.guru/cron-bug.html) if self._implement_cron_bug and ( re_star.match(self.expressions[DAY_FIELD]) or re_star.match(self.expressions[DOW_FIELD]) ): # To produce a schedule identical to the cron bug, we'll bypass the code that # makes a union of DOM and DOW, and instead skip to the code that does an intersect instead pass else: bak = expanded[DOW_FIELD] expanded[DOW_FIELD] = ["*"] t1 = self._calc(self.cur, expanded, nth_weekday_of_month, is_prev) expanded[DOW_FIELD] = bak expanded[DAY_FIELD] = ["*"] t2 = self._calc(self.cur, expanded, nth_weekday_of_month, is_prev) if not is_prev: result = t1 if t1 < t2 else t2 else: result = t1 if t1 > t2 else t2 dom_dow_exception_processed = True if not dom_dow_exception_processed: result = self._calc(self.cur, expanded, nth_weekday_of_month, is_prev) # DST Handling for cron job spanning across days dtstarttime = self._timestamp_to_datetime(self.dst_start_time) dtstarttime_utcoffset = dtstarttime.utcoffset() or datetime.timedelta(0) dtresult = self.timestamp_to_datetime(result) lag = lag_hours = 0 # do we trigger DST on next crontab (handle backward changes) dtresult_utcoffset = dtstarttime_utcoffset if dtresult and self.tzinfo: dtresult_utcoffset = dtresult.utcoffset() lag_hours = self._timedelta_to_seconds(dtresult - dtstarttime) / (60 * 60) lag = self._timedelta_to_seconds(dtresult_utcoffset - dtstarttime_utcoffset) hours_before_midnight = 24 - dtstarttime.hour if dtresult_utcoffset != dtstarttime_utcoffset: if (lag > 0 and abs(lag_hours) >= hours_before_midnight) or ( lag < 0 and ((3600 * abs(lag_hours) + abs(lag)) >= hours_before_midnight * 3600) ): dtresult_adjusted = dtresult - datetime.timedelta(seconds=lag) result_adjusted = self._datetime_to_timestamp(dtresult_adjusted) # Do the actual adjust only if the result time actually exists if ( self._timestamp_to_datetime(result_adjusted).tzinfo == dtresult_adjusted.tzinfo ): dtresult = dtresult_adjusted result = result_adjusted self.dst_start_time = result if update_current: self.cur = result if issubclass(ret_type, datetime.datetime): result = dtresult return result # iterator protocol, to enable direct use of croniter # objects in a loop, like "for dt in croniter("5 0 * * *'): ..." # or for combining multiple croniters into single # dates feed using 'itertools' module def all_next(self, ret_type=None, start_time=None, update_current=None): """ Returns a generator yielding consecutive dates. May be used instead of an implicit call to __iter__ whenever a non-default `ret_type` needs to be specified. """ # In a Python 3.7+ world: contextlib.suppress and contextlib.nullcontext could be used instead try: while True: self._is_prev = False yield self._get_next( ret_type=ret_type, start_time=start_time, update_current=update_current, ) start_time = None except CroniterBadDateError: if self._max_years_btw_matches_explicitly_set: return raise def all_prev(self, ret_type=None, start_time=None, update_current=None): """ Returns a generator yielding previous dates. """ try: while True: self._is_prev = True yield self._get_next( ret_type=ret_type, start_time=start_time, update_current=update_current, ) start_time = None except CroniterBadDateError: if self._max_years_btw_matches_explicitly_set: return raise def iter(self, *args, **kwargs): return self.all_prev if self._is_prev else self.all_next def __iter__(self): return self __next__ = next = _get_next def _calc(self, now, expanded, nth_weekday_of_month, is_prev): if is_prev: now = math.ceil(now) nearest_diff_method = self._get_prev_nearest_diff sign = -1 offset = 1 if (len(expanded) > UNIX_CRON_LEN or now % 60 > 0) else 60 else: now = math.floor(now) nearest_diff_method = self._get_next_nearest_diff sign = 1 offset = 1 if (len(expanded) > UNIX_CRON_LEN) else 60 dst = now = self.timestamp_to_datetime(now + sign * offset) month, year = dst.month, dst.year current_year = now.year DAYS = self.DAYS def proc_year(d): if len(expanded) == YEAR_CRON_LEN: try: expanded[YEAR_FIELD].index("*") except ValueError: # use None as range_val to indicate no loop diff_year = nearest_diff_method(d.year, expanded[YEAR_FIELD], None) if diff_year is None: return None, d if diff_year != 0: if is_prev: d += relativedelta( years=diff_year, month=12, day=31, hour=23, minute=59, second=59, ) else: d += relativedelta( years=diff_year, month=1, day=1, hour=0, minute=0, second=0, ) return True, d return False, d def proc_month(d): try: expanded[MONTH_FIELD].index("*") except ValueError: diff_month = nearest_diff_method( d.month, expanded[MONTH_FIELD], self.MONTHS_IN_YEAR ) reset_day = 1 if diff_month is not None and diff_month != 0: if is_prev: d += relativedelta(months=diff_month) reset_day = DAYS[d.month - 1] if d.month == 2 and self.is_leap(d.year) is True: reset_day += 1 d += relativedelta(day=reset_day, hour=23, minute=59, second=59) else: d += relativedelta( months=diff_month, day=reset_day, hour=0, minute=0, second=0 ) return True, d return False, d def proc_day_of_month(d): try: expanded[DAY_FIELD].index("*") except ValueError: days = DAYS[month - 1] if month == 2 and self.is_leap(year) is True: days += 1 if "l" in expanded[DAY_FIELD] and days == d.day: return False, d if is_prev: days_in_prev_month = DAYS[(month - 2) % self.MONTHS_IN_YEAR] diff_day = nearest_diff_method( d.day, expanded[DAY_FIELD], days_in_prev_month ) else: diff_day = nearest_diff_method(d.day, expanded[DAY_FIELD], days) if diff_day is not None and diff_day != 0: if is_prev: d += relativedelta(days=diff_day, hour=23, minute=59, second=59) else: d += relativedelta(days=diff_day, hour=0, minute=0, second=0) return True, d return False, d def proc_day_of_week(d): try: expanded[DOW_FIELD].index("*") except ValueError: diff_day_of_week = nearest_diff_method( d.isoweekday() % 7, expanded[DOW_FIELD], 7 ) if diff_day_of_week is not None and diff_day_of_week != 0: if is_prev: d += relativedelta( days=diff_day_of_week, hour=23, minute=59, second=59 ) else: d += relativedelta( days=diff_day_of_week, hour=0, minute=0, second=0 ) return True, d return False, d def proc_day_of_week_nth(d): if "*" in nth_weekday_of_month: s = nth_weekday_of_month["*"] for i in range(0, 7): if i in nth_weekday_of_month: nth_weekday_of_month[i].update(s) else: nth_weekday_of_month[i] = s del nth_weekday_of_month["*"] candidates = [] for wday, nth in nth_weekday_of_month.items(): c = self._get_nth_weekday_of_month(d.year, d.month, wday) for n in nth: if n == "l": candidate = c[-1] elif len(c) < n: continue else: candidate = c[n - 1] if (is_prev and candidate <= d.day) or ( not is_prev and d.day <= candidate ): candidates.append(candidate) if not candidates: if is_prev: d += relativedelta(days=-d.day, hour=23, minute=59, second=59) else: days = DAYS[month - 1] if month == 2 and self.is_leap(year) is True: days += 1 d += relativedelta( days=(days - d.day + 1), hour=0, minute=0, second=0 ) return True, d candidates.sort() diff_day = (candidates[-1] if is_prev else candidates[0]) - d.day if diff_day != 0: if is_prev: d += relativedelta(days=diff_day, hour=23, minute=59, second=59) else: d += relativedelta(days=diff_day, hour=0, minute=0, second=0) return True, d return False, d def proc_hour(d): try: expanded[HOUR_FIELD].index("*") except ValueError: diff_hour = nearest_diff_method(d.hour, expanded[HOUR_FIELD], 24) if diff_hour is not None and diff_hour != 0: if is_prev: d += relativedelta(hours=diff_hour, minute=59, second=59) else: d += relativedelta(hours=diff_hour, minute=0, second=0) return True, d return False, d def proc_minute(d): try: expanded[MINUTE_FIELD].index("*") except ValueError: diff_min = nearest_diff_method(d.minute, expanded[MINUTE_FIELD], 60) if diff_min is not None and diff_min != 0: if is_prev: d += relativedelta(minutes=diff_min, second=59) else: d += relativedelta(minutes=diff_min, second=0) return True, d return False, d def proc_second(d): if len(expanded) > UNIX_CRON_LEN: try: expanded[SECOND_FIELD].index("*") except ValueError: diff_sec = nearest_diff_method(d.second, expanded[SECOND_FIELD], 60) if diff_sec is not None and diff_sec != 0: d += relativedelta(seconds=diff_sec) return True, d else: d += relativedelta(second=0) return False, d procs = [ proc_year, proc_month, proc_day_of_month, (proc_day_of_week_nth if nth_weekday_of_month else proc_day_of_week), proc_hour, proc_minute, proc_second, ] while abs(year - current_year) <= self._max_years_between_matches: next = False stop = False for proc in procs: (changed, dst) = proc(dst) # `None` can be set mostly for year processing # so please see proc_year / _get_prev_nearest_diff / _get_next_nearest_diff if changed is None: stop = True break if changed: month, year = dst.month, dst.year next = True break if stop: break if next: continue return self.datetime_to_timestamp(dst.replace(microsecond=0)) if is_prev: raise CroniterBadDateError("failed to find prev date") raise CroniterBadDateError("failed to find next date") @staticmethod def _get_next_nearest(x, to_check): small = [item for item in to_check if item < x] large = [item for item in to_check if item >= x] large.extend(small) return large[0] @staticmethod def _get_prev_nearest(x, to_check): small = [item for item in to_check if item <= x] large = [item for item in to_check if item > x] small.reverse() large.reverse() small.extend(large) return small[0] @staticmethod def _get_next_nearest_diff(x, to_check, range_val): """ `range_val` is the range of a field. If no available time, we can move to next loop(like next month). `range_val` can also be set to `None` to indicate that there is no loop. ( Currently, should only used for `year` field ) """ for i, d in enumerate(to_check): if d == "l" and range_val is not None: # if 'l' then it is the last day of month # => its value of range_val d = range_val if d >= x: return d - x # When range_val is None and x not exists in to_check, # `None` will be returned to suggest no more available time if range_val is None: return None return to_check[0] - x + range_val @staticmethod def _get_prev_nearest_diff(x, to_check, range_val): """ `range_val` is the range of a field. If no available time, we can move to previous loop(like previous month). Range_val can also be set to `None` to indicate that there is no loop. ( Currently should only used for `year` field ) """ candidates = to_check[:] candidates.reverse() for d in candidates: if d != "l" and d <= x: return d - x if "l" in candidates: return -x # When range_val is None and x not exists in to_check, # `None` will be returned to suggest no more available time if range_val is None: return None candidate = candidates[0] for c in candidates: # fixed: c < range_val # this code will reject all 31 day of month, 12 month, 59 second, # 23 hour and so on. # if candidates has just a element, this will not harmful. # but candidates have multiple elements, then values equal to # range_val will rejected. if c <= range_val: candidate = c break # fix crontab "0 6 30 3 *" condidates only a element, then get_prev error return 2021-03-02 06:00:00 if candidate > range_val: return -range_val return candidate - x - range_val @staticmethod def _get_nth_weekday_of_month(year, month, day_of_week): """For a given year/month return a list of days in nth-day-of-month order. The last weekday of the month is always [-1]. """ w = (day_of_week + 6) % 7 c = calendar.Calendar(w).monthdayscalendar(year, month) if c[0][0] == 0: c.pop(0) return tuple(i[0] for i in c) @staticmethod def is_leap(year): return bool(year % 400 == 0 or (year % 4 == 0 and year % 100 != 0)) @classmethod def value_alias(cls, val, field_index, len_expressions=UNIX_CRON_LEN): if isinstance(len_expressions, (list, dict, tuple, set)): len_expressions = len(len_expressions) if val in cls.LOWMAP[field_index] and not ( # do not support 0 as a month either for classical 5 fields cron, # 6fields second repeat form or 7 fields year form # but still let conversion happen if day field is shifted ( field_index in [DAY_FIELD, MONTH_FIELD] and len_expressions == UNIX_CRON_LEN ) or ( field_index in [MONTH_FIELD, DOW_FIELD] and len_expressions == SECOND_CRON_LEN ) or ( field_index in [DAY_FIELD, MONTH_FIELD, DOW_FIELD] and len_expressions == YEAR_CRON_LEN ) ): val = cls.LOWMAP[field_index][val] return val @classmethod def _expand( cls, expr_format, hash_id=None, second_at_beginning=False, from_timestamp=None, ): # Split the expression in components, and normalize L -> l, MON -> mon, # etc. Keep expr_format untouched so we can use it in the exception # messages. expr_aliases = { "@midnight": ("0 0 * * *", "h h(0-2) * * * h"), "@hourly": ("0 * * * *", "h * * * * h"), "@daily": ("0 0 * * *", "h h * * * h"), "@weekly": ("0 0 * * 0", "h h * * h h"), "@monthly": ("0 0 1 * *", "h h h * * h"), "@yearly": ("0 0 1 1 *", "h h h h * h"), "@annually": ("0 0 1 1 *", "h h h h * h"), } efl = expr_format.lower() hash_id_expr = 1 if hash_id is not None else 0 try: efl = expr_aliases[efl][hash_id_expr] except KeyError: pass expressions = efl.split() if len(expressions) not in VALID_LEN_EXPRESSION: raise CroniterBadCronError( "Exactly 5, 6 or 7 columns has to be specified for iterator expression." ) if len(expressions) > UNIX_CRON_LEN and second_at_beginning: # move second to it's own(6th) field to process by same logical expressions.insert(SECOND_FIELD, expressions.pop(0)) expanded = [] nth_weekday_of_month = {} for field_index, expr in enumerate(expressions): for expanderid, expander in EXPANDERS.items(): expr = expander(cls).expand( efl, field_index, expr, hash_id=hash_id, from_timestamp=from_timestamp, ) if "?" in expr: if expr != "?": raise CroniterBadCronError( "[{0}] is not acceptable. Question mark can not used with other characters".format( expr_format ) ) if field_index not in [DAY_FIELD, DOW_FIELD]: raise CroniterBadCronError( "[{0}] is not acceptable. Question mark can only used in day_of_month or day_of_week".format( expr_format ) ) # currently just trade `?` as `*` expr = "*" e_list = expr.split(",") res = [] while len(e_list) > 0: e = e_list.pop() nth = None if field_index == DOW_FIELD: # Handle special case in the dow expression: 2#3, l3 special_dow_rem = special_dow_re.match(str(e)) if special_dow_rem: g = special_dow_rem.groupdict() he, last = g.get("he", ""), g.get("last", "") if he: e = he try: nth = int(last) assert 5 >= nth >= 1 except (KeyError, ValueError, AssertionError): raise CroniterBadCronError( "[{0}] is not acceptable. Invalid day_of_week value: '{1}'".format( expr_format, nth ) ) elif last: e = last nth = g["pre"] # 'l' # Before matching step_search_re, normalize "*" to "{min}-{max}". # Example: in the minute field, "*/5" normalizes to "0-59/5" t = re.sub( r"^\*(\/.+)$", r"%d-%d\1" % (cls.RANGES[field_index][0], cls.RANGES[field_index][1]), str(e), ) m = step_search_re.search(t) if not m: # Before matching step_search_re, # normalize "{start}/{step}" to "{start}-{max}/{step}". # Example: in the minute field, "10/5" normalizes to "10-59/5" t = re.sub( r"^(.+)\/(.+)$", r"\1-%d/\2" % (cls.RANGES[field_index][1]), str(e), ) m = step_search_re.search(t) if m: # early abort if low/high are out of bounds (low, high, step) = m.group(1), m.group(2), m.group(4) or 1 if field_index == DAY_FIELD and high == "l": high = "31" if not only_int_re.search(low): low = "{0}".format( cls._alphaconv(field_index, low, expressions) ) if not only_int_re.search(high): high = "{0}".format( cls._alphaconv(field_index, high, expressions) ) # normally, it's already guarded by the RE that should not accept not-int values. if not only_int_re.search(str(step)): raise CroniterBadCronError( "[{0}] step '{2}' in field {1} is not acceptable".format( expr_format, field_index, step ) ) step = int(step) for band in low, high: if not only_int_re.search(str(band)): raise CroniterBadCronError( "[{0}] bands '{2}-{3}' in field {1} are not acceptable".format( expr_format, field_index, low, high ) ) low, high = [ cls.value_alias(int(_val), field_index, expressions) for _val in (low, high) ] if max(low, high) > max( cls.RANGES[field_index][0], cls.RANGES[field_index][1] ): raise CroniterBadCronError( "{0} is out of bands".format(expr_format) ) if from_timestamp: low = cls._get_low_from_current_date_number( field_index, int(step), int(from_timestamp) ) # Handle when the second bound of the range is in backtracking order: # eg: X-Sun or X-7 (Sat-Sun) in DOW, or X-Jan (Apr-Jan) in MONTH if low > high: whole_field_range = list( range( cls.RANGES[field_index][0], cls.RANGES[field_index][1] + 1, 1, ) ) # Add FirstBound -> ENDRANGE, respecting step rng = list(range(low, cls.RANGES[field_index][1] + 1, step)) # Then 0 -> SecondBound, but skipping n first occurences according to step # EG to respect such expressions : Apr-Jan/3 to_skip = 0 if rng: already_skipped = list(reversed(whole_field_range)).index( rng[-1] ) curpos = whole_field_range.index(rng[-1]) if ((curpos + step) > len(whole_field_range)) and ( already_skipped < step ): to_skip = step - already_skipped rng += list( range(cls.RANGES[field_index][0] + to_skip, high + 1, step) ) # if we include a range type: Jan-Jan, or Sun-Sun, # it means the whole cycle (all days of week, # all monthes of year, etc) elif low == high: rng = list( range( cls.RANGES[field_index][0], cls.RANGES[field_index][1] + 1, step, ) ) else: try: rng = list(range(low, high + 1, step)) except ValueError as exc: raise CroniterBadCronError("invalid range: {0}".format(exc)) rng = ( ["{0}#{1}".format(item, nth) for item in rng] if field_index == DOW_FIELD and nth and nth != "l" else rng ) e_list += [a for a in rng if a not in e_list] else: if t.startswith("-"): raise CroniterBadCronError( "[{0}] is not acceptable," "negative numbers not allowed".format(expr_format) ) if not star_or_int_re.search(t): t = cls._alphaconv(field_index, t, expressions) try: t = int(t) except ValueError: pass t = cls.value_alias(t, field_index, expressions) if t not in ["*", "l"] and ( int(t) < cls.RANGES[field_index][0] or int(t) > cls.RANGES[field_index][1] ): raise CroniterBadCronError( "[{0}] is not acceptable, out of range".format(expr_format) ) res.append(t) if field_index == DOW_FIELD and nth: if t not in nth_weekday_of_month: nth_weekday_of_month[t] = set() nth_weekday_of_month[t].add(nth) res = set(res) res = sorted( res, key=lambda i: "{:02}".format(i) if isinstance(i, int) else i ) if len(res) == cls.LEN_MEANS_ALL[field_index]: # Make sure the wildcard is used in the correct way (avoid over-optimization) if (field_index == DAY_FIELD and "*" not in expressions[DOW_FIELD]) or ( field_index == DOW_FIELD and "*" not in expressions[DAY_FIELD] ): pass else: res = ["*"] expanded.append(["*"] if (len(res) == 1 and res[0] == "*") else res) # Check to make sure the dow combo in use is supported if nth_weekday_of_month: dow_expanded_set = set(expanded[DOW_FIELD]) dow_expanded_set = dow_expanded_set.difference(nth_weekday_of_month.keys()) dow_expanded_set.discard("*") # Skip: if it's all weeks instead of wildcard if ( dow_expanded_set and len(set(expanded[DOW_FIELD])) != cls.LEN_MEANS_ALL[DOW_FIELD] ): raise CroniterUnsupportedSyntaxError( "day-of-week field does not support mixing literal values and nth day of week syntax. " "Cron: '{}' dow={} vs nth={}".format( expr_format, dow_expanded_set, nth_weekday_of_month ) ) EXPRESSIONS[(expr_format, hash_id, second_at_beginning)] = expressions return expanded, nth_weekday_of_month @classmethod def expand( cls, expr_format, hash_id=None, second_at_beginning=False, from_timestamp=None, ): """ Expand a cron expression format into a noramlized format of list[list[int | 'l' | '*']]. The first list representing each element of the epxression, and each sub-list representing the allowed values for that expression component. A tuple is returned, the first value being the expanded epxression list, and the second being a `nth_weekday_of_month` mapping. Examples: # Every minute >>> croniter.expand('* * * * *') ([['*'], ['*'], ['*'], ['*'], ['*']], {}) # On the hour >>> croniter.expand('0 0 * * *') ([[0], [0], ['*'], ['*'], ['*']], {}) # Hours 0-5 and 10 monday through friday >>> croniter.expand('0-5,10 * * * mon-fri') ([[0, 1, 2, 3, 4, 5, 10], ['*'], ['*'], ['*'], [1, 2, 3, 4, 5]], {}) Note that some special values such as nth day of week are expanded to a special mapping format for later processing: # Every minute on the 3rd tuesday of the month >>> croniter.expand('* * * * 2#3') ([['*'], ['*'], ['*'], ['*'], [2]], {2: {3}}) # Every hour on the last day of the month >>> croniter.expand('0 * l * *') ([[0], ['*'], ['l'], ['*'], ['*']], {}) # On the hour every 15 seconds >>> croniter.expand('0 0 * * * */15') ([[0], [0], ['*'], ['*'], ['*'], [0, 15, 30, 45]], {}) """ try: return cls._expand( expr_format, hash_id=hash_id, second_at_beginning=second_at_beginning, from_timestamp=from_timestamp, ) except (ValueError,) as exc: if isinstance(exc, CroniterError): raise if int(sys.version[0]) >= 3: trace = _traceback.format_exc() raise CroniterBadCronError(trace) raise CroniterBadCronError("{0}".format(exc)) @classmethod def _get_low_from_current_date_number(cls, field_index, step, from_timestamp): dt = datetime.datetime.fromtimestamp(from_timestamp, tz=UTC_DT) if field_index == MINUTE_FIELD: return dt.minute % step if field_index == HOUR_FIELD: return dt.hour % step if field_index == DAY_FIELD: return ((dt.day - 1) % step) + 1 if field_index == MONTH_FIELD: return dt.month % step if field_index == DOW_FIELD: return (dt.weekday() + 1) % step raise ValueError("Can't get current date number for index larger than 4") @classmethod def is_valid( cls, expression, hash_id=None, encoding="UTF-8", second_at_beginning=False, ): if hash_id: if not isinstance(hash_id, (bytes, str)): raise TypeError("hash_id must be bytes or UTF-8 string") if not isinstance(hash_id, bytes): hash_id = hash_id.encode(encoding) try: cls.expand( expression, hash_id=hash_id, second_at_beginning=second_at_beginning ) except CroniterError: return False return True @classmethod def match(cls, cron_expression, testdate, day_or=True, second_at_beginning=False): return cls.match_range( cron_expression, testdate, testdate, day_or, second_at_beginning ) @classmethod def match_range( cls, cron_expression, from_datetime, to_datetime, day_or=True, second_at_beginning=False, ): cron = cls( cron_expression, to_datetime, ret_type=datetime.datetime, day_or=day_or, second_at_beginning=second_at_beginning, ) tdp = cron.get_current(datetime.datetime) if not tdp.microsecond: tdp += relativedelta(microseconds=1) cron.set_current(tdp, force=True) try: tdt = cron.get_prev() except CroniterBadDateError: return False precision_in_seconds = 1 if len(cron.expanded) > UNIX_CRON_LEN else 60 duration_in_second = ( to_datetime - from_datetime ).total_seconds() + precision_in_seconds return (max(tdp, tdt) - min(tdp, tdt)).total_seconds() < duration_in_second def croniter_range( start, stop, expr_format, ret_type=None, day_or=True, exclude_ends=False, _croniter=None, second_at_beginning=False, expand_from_start_time=False, ): """ Generator that provides all times from start to stop matching the given cron expression. If the cron expression matches either 'start' and/or 'stop', those times will be returned as well unless 'exclude_ends=True' is passed. You can think of this function as sibling to the builtin range function for datetime objects. Like range(start,stop,step), except that here 'step' is a cron expression. """ _croniter = _croniter or croniter auto_rt = datetime.datetime # type is used in first if branch for perfs reasons if type(start) is not type(stop) and not ( isinstance(start, type(stop)) or isinstance(stop, type(start)) ): raise CroniterBadTypeRangeError( "The start and stop must be same type. {0} != {1}".format( type(start), type(stop) ) ) if isinstance(start, (float, int)): start, stop = ( datetime.datetime.fromtimestamp(t, tzutc()).replace(tzinfo=None) for t in (start, stop) ) auto_rt = float if ret_type is None: ret_type = auto_rt if not exclude_ends: ms1 = relativedelta(microseconds=1) if start < stop: # Forward (normal) time order start -= ms1 stop += ms1 else: # Reverse time order start += ms1 stop -= ms1 year_span = math.floor(abs(stop.year - start.year)) + 1 ic = _croniter( expr_format, start, ret_type=datetime.datetime, day_or=day_or, max_years_between_matches=year_span, second_at_beginning=second_at_beginning, expand_from_start_time=expand_from_start_time, ) # define a continue (cont) condition function and step function for the main while loop if start < stop: # Forward def cont(v): return v < stop step = ic.get_next else: # Reverse def cont(v): return v > stop step = ic.get_prev try: dt = step() while cont(dt): if ret_type is float: yield ic.get_current(float) else: yield dt dt = step() except CroniterBadDateError: # Stop iteration when this exception is raised; no match found within the given year range return
croniter
python
sqlalchemy__sqlalchemy
test/dialect/oracle/test_compiler.py
{ "start": 63625, "end": 65279 }
class ____(fixtures.TestBase, AssertsCompiledSQL): def test_basic(self): seq = Sequence("my_seq_no_schema") dialect = oracle.OracleDialect() assert ( dialect.identifier_preparer.format_sequence(seq) == "my_seq_no_schema" ) seq = Sequence("my_seq", schema="some_schema") assert ( dialect.identifier_preparer.format_sequence(seq) == "some_schema.my_seq" ) seq = Sequence("My_Seq", schema="Some_Schema") assert ( dialect.identifier_preparer.format_sequence(seq) == '"Some_Schema"."My_Seq"' ) def test_compile(self): self.assert_compile( ddl.CreateSequence( Sequence("my_seq", nomaxvalue=True, nominvalue=True) ), "CREATE SEQUENCE my_seq NOMINVALUE NOMAXVALUE", dialect=oracle.OracleDialect(), ) def test_compile_dialect_args(self): self.assert_compile( ddl.CreateSequence(Sequence("my_seq", oracle_order=False)), "CREATE SEQUENCE my_seq NOORDER", dialect=oracle.OracleDialect(), ) self.assert_compile( ddl.CreateSequence( Sequence("my_seq", nominvalue=True, oracle_order=True) ), "CREATE SEQUENCE my_seq NOMINVALUE ORDER", dialect=oracle.OracleDialect(), ) def test_deprecated_options(self): with expect_deprecated(".+use ``oracle_order`` instead"): seq = Sequence("foo", order=False) eq_(seq.dialect_options["oracle"]["order"], False)
SequenceTest
python
ray-project__ray
python/ray/train/_internal/worker_group.py
{ "start": 522, "end": 1078 }
class ____: """A class to execute arbitrary functions. Does not hold any state.""" def __execute(self, func: Callable[..., T], *args, **kwargs) -> T: """Executes the input function and returns the output. Args: func: The function to execute. args, kwargs: The arguments to pass into func. """ try: return func(*args, **kwargs) except Exception as e: skipped = skip_exceptions(e) raise skipped from exception_cause(skipped) @dataclass
RayTrainWorker
python
scrapy__scrapy
tests/test_spidermiddleware_referer.py
{ "start": 38369, "end": 40764 }
class ____(TestReferrerOnRedirect): """ Strict Origin policy will always send the "origin" as referrer (think of it as the parent URL without the path part), unless the security level is lower and no "Referer" is sent. Redirections from secure to non-secure URLs should have the "Referrer" header removed if necessary. """ settings = {"REFERRER_POLICY": POLICY_STRICT_ORIGIN} scenarii = [ ( "http://scrapytest.org/101", "http://scrapytest.org/102", ( (301, "http://scrapytest.org/103"), (301, "http://scrapytest.org/104"), ), b"http://scrapytest.org/", # send origin b"http://scrapytest.org/", # redirects to same origin: send origin ), ( "https://scrapytest.org/201", "https://scrapytest.org/202", ( # redirecting to non-secure URL: no referrer (301, "http://scrapytest.org/203"), ), b"https://scrapytest.org/", None, ), ( "https://scrapytest.org/301", "https://scrapytest.org/302", ( # redirecting to non-secure URL (different domain): no referrer (301, "http://example.com/303"), ), b"https://scrapytest.org/", None, ), ( "http://scrapy.org/401", "http://example.com/402", ((301, "http://scrapytest.org/403"),), b"http://scrapy.org/", b"http://scrapy.org/", ), ( "https://scrapy.org/501", "https://example.com/502", ( # HTTPS all along, so origin referrer is kept as-is (301, "https://google.com/503"), (301, "https://facebook.com/504"), ), b"https://scrapy.org/", b"https://scrapy.org/", ), ( "https://scrapytest.org/601", "http://scrapytest.org/602", # TLS to non-TLS: no referrer ( ( 301, "https://scrapytest.org/603", ), # TLS URL again: (still) no referrer ), None, None, ), ]
TestReferrerOnRedirectStrictOrigin
python
instagram__MonkeyType
monkeytype/typing.py
{ "start": 15996, "end": 16079 }
class ____(TypeRewriter): def rewrite(self, typ): return typ
NoOpRewriter
python
huggingface__transformers
src/transformers/models/glpn/modeling_glpn.py
{ "start": 17605, "end": 19392 }
class ____(nn.Module): """ Selective Feature Fusion module, as explained in the [paper](https://huggingface.co/papers/2201.07436) (section 3.4). This module adaptively selects and integrates local and global features by attaining an attention map for each feature. """ def __init__(self, in_channel=64): super().__init__() self.convolutional_layer1 = nn.Sequential( nn.Conv2d(in_channels=int(in_channel * 2), out_channels=in_channel, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(in_channel), nn.ReLU(), ) self.convolutional_layer2 = nn.Sequential( nn.Conv2d(in_channels=in_channel, out_channels=int(in_channel / 2), kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(int(in_channel / 2)), nn.ReLU(), ) self.convolutional_layer3 = nn.Conv2d( in_channels=int(in_channel / 2), out_channels=2, kernel_size=3, stride=1, padding=1 ) self.sigmoid = nn.Sigmoid() def forward(self, local_features, global_features): # concatenate features along the channel dimension features = torch.cat((local_features, global_features), dim=1) # pass through convolutional layers features = self.convolutional_layer1(features) features = self.convolutional_layer2(features) features = self.convolutional_layer3(features) # apply sigmoid to get two-channel attention map attn = self.sigmoid(features) # construct hybrid features by adding element-wise hybrid_features = local_features * attn[:, 0, :, :].unsqueeze(1) + global_features * attn[ :, 1, :, : ].unsqueeze(1) return hybrid_features
GLPNSelectiveFeatureFusion
python
realpython__materials
python-copy/emoji.py
{ "start": 21, "end": 446 }
class ____: def __init__(self, name): self.name = name def __repr__(self): return self._glyph @property def name(self): return unicodedata.name(self._glyph).title() @name.setter def name(self, value): self._glyph = unicodedata.lookup(value) if __name__ == "__main__": emoji = Emoji("tangerine") print(emoji) emoji.name = "clown face" print(emoji)
Emoji
python
ray-project__ray
python/ray/data/_internal/datasource/mcap_datasource.py
{ "start": 1580, "end": 9923 }
class ____(FileBasedDatasource): """MCAP (Message Capture) datasource for Ray Data. This datasource provides reading of MCAP files with predicate pushdown optimization for filtering by topics, time ranges, and message types. MCAP is a standardized format for storing timestamped messages from robotics and autonomous systems, commonly used for sensor data, control commands, and other time-series data. Examples: Basic usage: >>> import ray # doctest: +SKIP >>> ds = ray.data.read_mcap("/path/to/data.mcap") # doctest: +SKIP With topic filtering and time range: >>> from ray.data.datasource import TimeRange # doctest: +SKIP >>> ds = ray.data.read_mcap( # doctest: +SKIP ... "/path/to/data.mcap", ... topics={"/camera/image_raw", "/lidar/points"}, ... time_range=TimeRange(start_time=1000000000, end_time=2000000000) ... ) # doctest: +SKIP With multiple files and metadata: >>> ds = ray.data.read_mcap( # doctest: +SKIP ... ["file1.mcap", "file2.mcap"], ... topics={"/camera/image_raw", "/lidar/points"}, ... message_types={"sensor_msgs/Image", "sensor_msgs/PointCloud2"}, ... include_metadata=True ... ) # doctest: +SKIP """ _FILE_EXTENSIONS = ["mcap"] def __init__( self, paths: Union[str, List[str]], topics: Optional[Union[List[str], Set[str]]] = None, time_range: Optional[TimeRange] = None, message_types: Optional[Union[List[str], Set[str]]] = None, include_metadata: bool = True, **file_based_datasource_kwargs, ): """Initialize MCAP datasource. Args: paths: Path or list of paths to MCAP files. topics: Optional list/set of topic names to include. If specified, only messages from these topics will be read. time_range: Optional TimeRange for filtering messages by timestamp. TimeRange contains start_time and end_time in nanoseconds, where both values must be non-negative and start_time < end_time. message_types: Optional list/set of message type names (schema names) to include. Only messages with matching schema names will be read. include_metadata: Whether to include MCAP metadata fields in the output. Defaults to True. When True, includes schema, channel, and message metadata. **file_based_datasource_kwargs: Additional arguments for FileBasedDatasource. """ super().__init__(paths, **file_based_datasource_kwargs) _check_import(self, module="mcap", package="mcap") # Convert to sets for faster lookup self._topics = set(topics) if topics else None self._message_types = set(message_types) if message_types else None self._time_range = time_range self._include_metadata = include_metadata def _read_stream(self, f: "pyarrow.NativeFile", path: str) -> Iterator[Block]: """Read MCAP file and yield blocks of message data. This method implements efficient MCAP reading with predicate pushdown. It uses MCAP's built-in filtering capabilities for optimal performance and applies additional filters when needed. Args: f: File-like object to read from. Must be seekable for MCAP reading. path: Path to the MCAP file being processed. Yields: Block: Blocks of MCAP message data as pyarrow Tables. Raises: ValueError: If the MCAP file cannot be read or has invalid format. """ from mcap.reader import make_reader reader = make_reader(f) # Note: MCAP summaries are optional and iter_messages works without them # We don't need to validate the summary since it's not required # Use MCAP's built-in filtering for topics and time range messages = reader.iter_messages( topics=list(self._topics) if self._topics else None, start_time=self._time_range.start_time if self._time_range else None, end_time=self._time_range.end_time if self._time_range else None, log_time_order=True, reverse=False, ) builder = DelegatingBlockBuilder() for schema, channel, message in messages: # Apply filters that couldn't be pushed down to MCAP level if not self._should_include_message(schema, channel, message): continue # Convert message to dictionary format message_data = self._message_to_dict(schema, channel, message, path) builder.add(message_data) # Yield the block if we have any messages if builder.num_rows() > 0: yield builder.build() def _should_include_message( self, schema: "Schema", channel: "Channel", message: "Message" ) -> bool: """Check if a message should be included based on filters. This method applies Python-level filtering that cannot be pushed down to the MCAP library level. Topic filters are already handled by the MCAP reader, so only message_types filtering is needed here. Args: schema: MCAP schema object containing message type information. channel: MCAP channel object containing topic and metadata. message: MCAP message object containing the actual data. Returns: True if the message should be included, False otherwise. """ # Message type filter (cannot be pushed down to MCAP reader) if self._message_types and schema and schema.name not in self._message_types: return False return True def _message_to_dict( self, schema: "Schema", channel: "Channel", message: "Message", path: str ) -> Dict[str, Any]: """Convert MCAP message to dictionary format. This method converts MCAP message objects into a standardized dictionary format suitable for Ray Data processing. Args: schema: MCAP schema object containing message type and encoding info. channel: MCAP channel object containing topic and channel metadata. message: MCAP message object containing the actual message data. path: Path to the source file (for include_paths functionality). Returns: Dictionary containing message data in Ray Data format. """ # Decode message data based on encoding decoded_data = message.data if channel.message_encoding == "json" and isinstance(message.data, bytes): try: decoded_data = json.loads(message.data.decode("utf-8")) except (json.JSONDecodeError, UnicodeDecodeError): # Keep raw bytes if decoding fails decoded_data = message.data # Core message data message_data = { "data": decoded_data, "topic": channel.topic, "log_time": message.log_time, "publish_time": message.publish_time, "sequence": message.sequence, } # Add metadata if requested if self._include_metadata: message_data.update( { "channel_id": message.channel_id, "message_encoding": channel.message_encoding, "schema_name": schema.name if schema else None, "schema_encoding": schema.encoding if schema else None, "schema_data": schema.data if schema else None, } ) # Add file path if include_paths is enabled (from FileBasedDatasource) if getattr(self, "include_paths", False): message_data["path"] = path return message_data def get_name(self) -> str: """Return a human-readable name for this datasource.""" return "MCAP" @property def supports_distributed_reads(self) -> bool: """Whether this datasource supports distributed reads. MCAP files can be read in parallel across multiple files. """ return True
MCAPDatasource
python
numba__numba
numba/cuda/tests/cudapy/test_cffi.py
{ "start": 294, "end": 938 }
class ____(CUDATestCase): def test_from_buffer(self): import cffi ffi = cffi.FFI() link = str(test_data_dir / 'jitlink.ptx') sig = types.void(types.CPointer(types.int32)) array_mutator = cuda.declare_device('array_mutator', sig) @cuda.jit(link=[link]) def mutate_array(x): x_ptr = ffi.from_buffer(x) array_mutator(x_ptr) x = np.arange(2).astype(np.int32) mutate_array[1, 1](x) # The foreign function should have copied element 1 to element 0 self.assertEqual(x[0], x[1]) if __name__ == '__main__': unittest.main()
TestCFFI
python
dagster-io__dagster
python_modules/dagster/dagster/_core/snap/execution_plan_snapshot.py
{ "start": 5076, "end": 5534 }
class ____( NamedTuple("_ExecutionPlanSnapshotErrorData", [("error", Optional[SerializableErrorInfo])]) ): def __new__(cls, error: Optional[SerializableErrorInfo]): return super().__new__( cls, error=check.opt_inst_param(error, "error", SerializableErrorInfo), ) @whitelist_for_serdes( storage_field_names={"node_handle_id": "solid_handle_id"}, skip_when_none_fields={"pool"} )
ExecutionPlanSnapshotErrorData
python
spyder-ide__spyder
spyder/utils/stylesheet.py
{ "start": 24340, "end": 25921 }
class ____(SpecialTabBarStyleSheet): """Style for horizontal dockwidget tab bars.""" def set_stylesheet(self): super().set_stylesheet() # Main constants css = self.get_stylesheet() margin_size = AppStyle.MarginSize # Tabs style css['QTabBar::tab'].setValues( # No margins to left/right but top/bottom to separate tabbar from # the dockwidget areas. # Notes: # * Top margin is half the one at the bottom so that we can show # a bottom margin on dockwidgets that are not tabified. # * The other half is added through the _margin_bottom attribute of # PluginMainWidget. margin=f'{margin_size}px 0px {2 * margin_size}px 0px', # Remove a colored border added by QDarkStyle borderTop='0px', ) # Add margin to first and last tabs to avoid them touching the left and # right dockwidget areas, respectively. css['QTabBar::tab:first'].setValues( marginLeft=f'{2 * margin_size}px', ) css['QTabBar::tab:last'].setValues( marginRight=f'{2 * margin_size}px', ) # Make top and bottom margins for scroll buttons even. # This is necessary since the tabbar top margin is half the one at the # bottom (see the notes in the 'QTabBar::tab' style above). css['QTabBar QToolButton'].setValues( marginTop='0px', marginBottom=f'{margin_size}px', )
HorizontalDockTabBarStyleSheet
python
django-extensions__django-extensions
django_extensions/management/commands/runserver_plus.py
{ "start": 6139, "end": 27261 }
class ____(BaseCommand): help = "Starts a lightweight Web server for development." # Validation is called explicitly each time the server is reloaded. requires_system_checks: List[str] = [] DEFAULT_CRT_EXTENSION = ".crt" DEFAULT_KEY_EXTENSION = ".key" def add_arguments(self, parser): super().add_arguments(parser) parser.add_argument( "addrport", nargs="?", help="Optional port number, or ipaddr:port" ) parser.add_argument( "--ipv6", "-6", action="store_true", dest="use_ipv6", default=False, help="Tells Django to use a IPv6 address.", ) parser.add_argument( "--noreload", action="store_false", dest="use_reloader", default=True, help="Tells Django to NOT use the auto-reloader.", ) parser.add_argument( "--browser", action="store_true", dest="open_browser", help="Tells Django to open a browser.", ) parser.add_argument( "--nothreading", action="store_false", dest="threaded", help="Do not run in multithreaded mode.", ) parser.add_argument( "--threaded", action="store_true", dest="threaded", help="Run in multithreaded mode.", ) parser.add_argument( "--output", dest="output_file", default=None, help="Specifies an output file to send a copy of all messages " "(not flushed immediately).", ) parser.add_argument( "--print-sql", action="store_true", default=False, help="Print SQL queries as they're executed", ) parser.add_argument( "--truncate-sql", action="store", type=int, help="Truncate SQL queries to a number of characters.", ) parser.add_argument( "--print-sql-location", action="store_true", default=False, help="Show location in code where SQL query generated from", ) cert_group = parser.add_mutually_exclusive_group() cert_group.add_argument( "--cert", dest="cert_path", action="store", type=str, help="Deprecated alias for --cert-file option.", ) cert_group.add_argument( "--cert-file", dest="cert_path", action="store", type=str, help="SSL .crt file path. If not provided path from --key-file will be " "selected. Either --cert-file or --key-file must be provided to use SSL.", ) parser.add_argument( "--key-file", dest="key_file_path", action="store", type=str, help="SSL .key file path. If not provided path from --cert-file " "will be selected. Either --cert-file or --key-file must be provided " "to use SSL.", ) parser.add_argument( "--extra-file", dest="extra_files", action="append", type=str, default=[], help="auto-reload whenever the given file changes too" " (can be specified multiple times)", ) parser.add_argument( "--exclude-pattern", dest="exclude_patterns", action="append", type=str, default=[], help="ignore reload on changes to files matching this pattern" " (can be specified multiple times)", ) parser.add_argument( "--reloader-interval", dest="reloader_interval", action="store", type=int, default=DEFAULT_POLLER_RELOADER_INTERVAL, help="After how many seconds auto-reload should scan for updates" " in poller-mode [default=%s]" % DEFAULT_POLLER_RELOADER_INTERVAL, ) parser.add_argument( "--reloader-type", dest="reloader_type", action="store", type=str, default=DEFAULT_POLLER_RELOADER_TYPE, help="Werkzeug reloader type " "[options are auto, watchdog, or stat, default=%s]" % DEFAULT_POLLER_RELOADER_TYPE, ) parser.add_argument( "--pdb", action="store_true", dest="pdb", default=False, help="Drop into pdb shell at the start of any view.", ) parser.add_argument( "--ipdb", action="store_true", dest="ipdb", default=False, help="Drop into ipdb shell at the start of any view.", ) parser.add_argument( "--pm", action="store_true", dest="pm", default=False, help="Drop into (i)pdb shell if an exception is raised in a view.", ) parser.add_argument( "--startup-messages", dest="startup_messages", action="store", default="reload", help="When to show startup messages: " "reload [default], once, always, never.", ) parser.add_argument( "--keep-meta-shutdown", dest="keep_meta_shutdown_func", action="store_true", default=False, help="Keep request.META['werkzeug.server.shutdown'] function which is " "automatically removed because Django debug pages tries to call the " "function and unintentionally shuts down the Werkzeug server.", ) parser.add_argument( "--nopin", dest="nopin", action="store_true", default=False, help="Disable the PIN in werkzeug. USE IT WISELY!", ) if USE_STATICFILES: parser.add_argument( "--nostatic", action="store_false", dest="use_static_handler", default=True, help="Tells Django to NOT automatically serve static files.", ) parser.add_argument( "--insecure", action="store_true", dest="insecure_serving", default=False, help="Allows serving static files even if DEBUG is False.", ) @signalcommand def handle(self, *args, **options): addrport = options["addrport"] startup_messages = options["startup_messages"] if startup_messages == "reload": self.show_startup_messages = os.environ.get("RUNSERVER_PLUS_SHOW_MESSAGES") elif startup_messages == "once": self.show_startup_messages = not os.environ.get( "RUNSERVER_PLUS_SHOW_MESSAGES" ) elif startup_messages == "never": self.show_startup_messages = False else: self.show_startup_messages = True os.environ["RUNSERVER_PLUS_SHOW_MESSAGES"] = "1" setup_logger( logger, self.stderr, filename=options["output_file"] ) # , fmt="[%(name)s] %(message)s") logredirect = RedirectHandler(__name__) # Redirect werkzeug log items werklogger = logging.getLogger("werkzeug") werklogger.setLevel(logging.INFO) werklogger.addHandler(logredirect) werklogger.propagate = False pdb_option = options["pdb"] ipdb_option = options["ipdb"] pm = options["pm"] try: from django_pdb.middleware import PdbMiddleware except ImportError: if pdb_option or ipdb_option or pm: raise CommandError( "django-pdb is required for --pdb, --ipdb and --pm options. " "Please visit https://pypi.python.org/pypi/django-pdb or install " "via pip. (pip install django-pdb)" ) pm = False else: # Add pdb middleware if --pdb is specified or if in DEBUG mode if pdb_option or ipdb_option or settings.DEBUG: middleware = "django_pdb.middleware.PdbMiddleware" settings_middleware = ( getattr(settings, "MIDDLEWARE", None) or settings.MIDDLEWARE_CLASSES ) if middleware not in settings_middleware: if isinstance(settings_middleware, tuple): settings_middleware += (middleware,) else: settings_middleware += [middleware] # If --pdb is specified then always break at the start of views. # Otherwise break only if a 'pdb' query parameter is set in the url if pdb_option: PdbMiddleware.always_break = "pdb" elif ipdb_option: PdbMiddleware.always_break = "ipdb" def postmortem(request, exc_type, exc_value, tb): if has_ipdb(): import ipdb p = ipdb else: import pdb p = pdb print( "Exception occurred: %s, %s" % (exc_type, exc_value), file=sys.stderr, ) p.post_mortem(tb) # usurp django's handler django_views_debug.technical_500_response = ( postmortem if pm else null_technical_500_response ) self.use_ipv6 = options["use_ipv6"] if self.use_ipv6 and not socket.has_ipv6: raise CommandError("Your Python does not support IPv6.") self._raw_ipv6 = False if not addrport: addrport = getattr( settings, "RUNSERVER_PLUS_SERVER_ADDRESS_PORT", getattr(settings, "RUNSERVERPLUS_SERVER_ADDRESS_PORT", None), ) if not addrport: self.addr = "" self.port = DEFAULT_PORT else: m = re.match(naiveip_re, addrport) if m is None: raise CommandError( '"%s" is not a valid port number or address:port pair.' % addrport ) self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups() if not self.port.isdigit(): raise CommandError("%r is not a valid port number." % self.port) if self.addr: if _ipv6: self.addr = self.addr[1:-1] self.use_ipv6 = True self._raw_ipv6 = True elif self.use_ipv6 and not _fqdn: raise CommandError('"%s" is not a valid IPv6 address.' % self.addr) if not self.addr: self.addr = "::1" if self.use_ipv6 else "127.0.0.1" self._raw_ipv6 = True print_sql = ( getattr(settings, "RUNSERVER_PLUS_PRINT_SQL", False) or options["print_sql"] ) truncate = None if options["truncate_sql"] == 0 else options["truncate_sql"] with monkey_patch_cursordebugwrapper( print_sql=print_sql, print_sql_location=options["print_sql_location"], truncate=truncate, logger=logger.info, confprefix="RUNSERVER_PLUS", ): self.inner_run(options) def get_handler(self, *args, **options): """Return the default WSGI handler for the runner.""" return get_internal_wsgi_application() def get_error_handler(self, exc, **options): def application(env, start_response): if isinstance(exc, SystemCheckError): error_message = ansi_escape.sub("", str(exc)) raise SystemCheckError(error_message) raise exc return application def inner_run(self, options): if not HAS_WERKZEUG: raise CommandError( "Werkzeug is required to use runserver_plus. " "Please visit https://werkzeug.palletsprojects.com/ or install via pip." " (pip install Werkzeug)" ) # Set colored output if settings.DEBUG: try: set_werkzeug_log_color() except ( Exception ): # We are dealing with some internals, anything could go wrong if self.show_startup_messages: print( "Wrapping internal werkzeug logger " "for color highlighting has failed!" ) class WSGIRequestHandler(_WSGIRequestHandler): def make_environ(self): environ = super().make_environ() if ( not options["keep_meta_shutdown_func"] and "werkzeug.server.shutdown" in environ ): del environ["werkzeug.server.shutdown"] remote_user = os.getenv("REMOTE_USER") if remote_user is not None: environ["REMOTE_USER"] = remote_user return environ threaded = options["threaded"] use_reloader = options["use_reloader"] open_browser = options["open_browser"] quit_command = "CONTROL-C" if sys.platform != "win32" else "CTRL-BREAK" reloader_interval = options["reloader_interval"] reloader_type = options["reloader_type"] self.extra_files = set(options["extra_files"]) exclude_patterns = set(options["exclude_patterns"]) self.nopin = options["nopin"] if self.show_startup_messages: print("Performing system checks...\n") try: check_errors(self.check)(display_num_errors=self.show_startup_messages) check_errors(self.check_migrations)() handler = check_errors(self.get_handler)(**options) except Exception as exc: self.stderr.write("Error occurred during checks: %r" % exc, ending="\n\n") handler = self.get_error_handler(exc, **options) if USE_STATICFILES: use_static_handler = options["use_static_handler"] insecure_serving = options["insecure_serving"] if use_static_handler and (settings.DEBUG or insecure_serving): handler = StaticFilesHandler(handler) if options["cert_path"] or options["key_file_path"]: if not HAS_OPENSSL: raise CommandError( "Python OpenSSL Library is " "required to use runserver_plus with ssl support. " "Install via pip (pip install pyOpenSSL)." ) certfile, keyfile = self.determine_ssl_files_paths(options) dir_path, root = os.path.split(certfile) root, _ = os.path.splitext(root) try: if os.path.exists(certfile) and os.path.exists(keyfile): ssl_context = (certfile, keyfile) else: # Create cert, key files ourselves. ssl_context = make_ssl_devcert( os.path.join(dir_path, root), host="localhost" ) except ImportError: if self.show_startup_messages: print( "Werkzeug version is less than 0.9, trying adhoc certificate." ) ssl_context = "adhoc" else: ssl_context = None bind_url = "%s://%s:%s/" % ( "https" if ssl_context else "http", self.addr if not self._raw_ipv6 else "[%s]" % self.addr, self.port, ) if self.show_startup_messages: print( "\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE) ) print("Development server is running at %s" % (bind_url,)) print("Using the Werkzeug debugger (https://werkzeug.palletsprojects.com/)") print("Quit the server with %s." % quit_command) if open_browser: webbrowser.open(bind_url) if use_reloader and settings.USE_I18N: self.extra_files |= set( filter(lambda filename: str(filename).endswith(".mo"), gen_filenames()) ) if getattr(settings, "RUNSERVER_PLUS_EXTRA_FILES", []): self.extra_files |= set(settings.RUNSERVER_PLUS_EXTRA_FILES) exclude_patterns |= set( getattr(settings, "RUNSERVER_PLUS_EXCLUDE_PATTERNS", []) ) # Werkzeug needs to be clued in its the main instance if running # without reloader or else it won't show key. # https://git.io/vVIgo if not use_reloader: os.environ["WERKZEUG_RUN_MAIN"] = "true" # Don't run a second instance of the debugger / reloader # See also: https://github.com/django-extensions/django-extensions/issues/832 if os.environ.get("WERKZEUG_RUN_MAIN") != "true": if self.nopin: os.environ["WERKZEUG_DEBUG_PIN"] = "off" handler = DebuggedApplication(handler, True) # Set trusted_hosts (for Werkzeug 3.0.3+) handler.trusted_hosts = getattr( settings, "RUNSERVERPLUS_SERVER_ADDRESS_PORT", getattr(settings, "RUNSERVERPLUS_TRUSTED_HOSTS", None), ) runserver_plus_started.send(sender=self) run_simple( self.addr, int(self.port), handler, use_reloader=use_reloader, use_debugger=True, extra_files=self.extra_files, exclude_patterns=exclude_patterns, reloader_interval=reloader_interval, reloader_type=reloader_type, threaded=threaded, request_handler=WSGIRequestHandler, ssl_context=ssl_context, ) @classmethod def determine_ssl_files_paths(cls, options): key_file_path = os.path.expanduser(options.get("key_file_path") or "") cert_path = os.path.expanduser(options.get("cert_path") or "") cert_file = cls._determine_path_for_file( cert_path, key_file_path, cls.DEFAULT_CRT_EXTENSION ) key_file = cls._determine_path_for_file( key_file_path, cert_path, cls.DEFAULT_KEY_EXTENSION ) return cert_file, key_file @classmethod def _determine_path_for_file( cls, current_file_path, other_file_path, expected_extension ): directory = cls._get_directory_basing_on_file_paths( current_file_path, other_file_path ) file_name = cls._get_file_name(current_file_path) or cls._get_file_name( other_file_path ) extension = cls._get_extension(current_file_path) or expected_extension return os.path.join(directory, file_name + extension) @classmethod def _get_directory_basing_on_file_paths(cls, current_file_path, other_file_path): return ( cls._get_directory(current_file_path) or cls._get_directory(other_file_path) or os.getcwd() ) @classmethod def _get_directory(cls, file_path): return os.path.split(file_path)[0] @classmethod def _get_file_name(cls, file_path): return os.path.splitext(os.path.split(file_path)[1])[0] @classmethod def _get_extension(cls, file_path): return os.path.splitext(file_path)[1] def set_werkzeug_log_color(): """Try to set color to the werkzeug log.""" _style = color_style() _orig_log = _WSGIRequestHandler.log def werk_log(self, type, message, *args): try: msg = "%s - - [%s] %s" % ( self.address_string(), self.log_date_time_string(), message % args, ) http_code = str(args[1]) except Exception: return _orig_log(type, message, *args) # Utilize terminal colors, if available if http_code[0] == "2": # Put 2XX first, since it should be the common case msg = _style.HTTP_SUCCESS(msg) elif http_code[0] == "1": msg = _style.HTTP_INFO(msg) elif http_code == "304": msg = _style.HTTP_NOT_MODIFIED(msg) elif http_code[0] == "3": msg = _style.HTTP_REDIRECT(msg) elif http_code == "404": msg = _style.HTTP_NOT_FOUND(msg) elif http_code[0] == "4": msg = _style.HTTP_BAD_REQUEST(msg) else: # Any 5XX, or any other response msg = _style.HTTP_SERVER_ERROR(msg) _log(type, msg) _WSGIRequestHandler.log = werk_log
Command
python
PrefectHQ__prefect
src/prefect/server/database/orm_models.py
{ "start": 38838, "end": 40130 }
class ____(Base): """SQLAlchemy model of an worker""" name: Mapped[str] description: Mapped[Optional[str]] type: Mapped[str] = mapped_column(index=True) base_job_template: Mapped[dict[str, Any]] = mapped_column( JSON, server_default="{}", default={} ) is_paused: Mapped[bool] = mapped_column(server_default="0", default=False) default_queue_id: Mapped[Optional[uuid.UUID]] = mapped_column( UUID, sa.ForeignKey("work_queue.id", ondelete="RESTRICT", use_alter=True), nullable=True, ) concurrency_limit: Mapped[Optional[int]] status: Mapped[WorkPoolStatus] = mapped_column( sa.Enum(WorkPoolStatus, name="work_pool_status"), default=WorkPoolStatus.NOT_READY, server_default=WorkPoolStatus.NOT_READY, ) last_transitioned_status_at: Mapped[Optional[DateTime]] last_status_event_id: Mapped[Optional[uuid.UUID]] storage_configuration: Mapped[schemas.core.WorkPoolStorageConfiguration] = ( mapped_column( Pydantic(schemas.core.WorkPoolStorageConfiguration), server_default="{}", default=schemas.core.WorkPoolStorageConfiguration, nullable=False, ) ) __table_args__: Any = (sa.UniqueConstraint("name"),)
WorkPool
python
django__django
tests/contenttypes_tests/test_management.py
{ "start": 478, "end": 5365 }
class ____(TestCase): # Speed up tests by avoiding retrieving ContentTypes for all test apps. available_apps = [ "contenttypes_tests", "empty_models", "no_models", "django.contrib.contenttypes", ] @classmethod def setUpTestData(cls): with captured_stdout(): call_command( "remove_stale_contenttypes", interactive=False, include_stale_apps=True, verbosity=2, ) cls.before_count = ContentType.objects.count() cls.content_type = ContentType.objects.create( app_label="contenttypes_tests", model="Fake" ) def setUp(self): self.app_config = apps.get_app_config("contenttypes_tests") def test_interactive_true_with_dependent_objects(self): """ interactive mode (the default) deletes stale content types and warns of dependent objects. """ post = Post.objects.create(title="post", content_type=self.content_type) # A related object is needed to show that a custom collector with # can_fast_delete=False is needed. ModelWithNullFKToSite.objects.create(post=post) with mock.patch("builtins.input", return_value="yes"): with captured_stdout() as stdout: call_command("remove_stale_contenttypes", verbosity=2, stdout=stdout) self.assertEqual(Post.objects.count(), 0) output = stdout.getvalue() self.assertIn("- Content type for contenttypes_tests.Fake", output) self.assertIn("- 1 contenttypes_tests.Post object(s)", output) self.assertIn("- 1 contenttypes_tests.ModelWithNullFKToSite", output) self.assertIn("Deleting stale content type", output) self.assertEqual(ContentType.objects.count(), self.before_count) def test_interactive_true_without_dependent_objects(self): """ interactive mode deletes stale content types even if there aren't any dependent objects. """ with mock.patch("builtins.input", return_value="yes"): with captured_stdout() as stdout: call_command("remove_stale_contenttypes", verbosity=2) self.assertIn("Deleting stale content type", stdout.getvalue()) self.assertEqual(ContentType.objects.count(), self.before_count) def test_interactive_false(self): """non-interactive mode deletes stale content types.""" with captured_stdout() as stdout: call_command("remove_stale_contenttypes", interactive=False, verbosity=2) self.assertIn("Deleting stale content type", stdout.getvalue()) self.assertEqual(ContentType.objects.count(), self.before_count) def test_unavailable_content_type_model(self): """A ContentType isn't created if the model isn't available.""" apps = Apps() with self.assertNumQueries(0): contenttypes_management.create_contenttypes( self.app_config, interactive=False, verbosity=0, apps=apps ) self.assertEqual(ContentType.objects.count(), self.before_count + 1) @modify_settings(INSTALLED_APPS={"remove": ["empty_models"]}) def test_contenttypes_removed_in_installed_apps_without_models(self): ContentType.objects.create(app_label="empty_models", model="Fake 1") ContentType.objects.create(app_label="no_models", model="Fake 2") with ( mock.patch("builtins.input", return_value="yes"), captured_stdout() as stdout, ): call_command("remove_stale_contenttypes", verbosity=2) self.assertNotIn( "Deleting stale content type 'empty_models | Fake 1'", stdout.getvalue(), ) self.assertIn( "Deleting stale content type 'no_models | Fake 2'", stdout.getvalue(), ) self.assertEqual(ContentType.objects.count(), self.before_count + 1) @modify_settings(INSTALLED_APPS={"remove": ["empty_models"]}) def test_contenttypes_removed_for_apps_not_in_installed_apps(self): ContentType.objects.create(app_label="empty_models", model="Fake 1") ContentType.objects.create(app_label="no_models", model="Fake 2") with ( mock.patch("builtins.input", return_value="yes"), captured_stdout() as stdout, ): call_command( "remove_stale_contenttypes", include_stale_apps=True, verbosity=2 ) self.assertIn( "Deleting stale content type 'empty_models | Fake 1'", stdout.getvalue(), ) self.assertIn( "Deleting stale content type 'no_models | Fake 2'", stdout.getvalue(), ) self.assertEqual(ContentType.objects.count(), self.before_count)
RemoveStaleContentTypesTests
python
run-llama__llama_index
llama-index-core/llama_index/core/schema.py
{ "start": 7200, "end": 7600 }
class ____(BaseComponent): node_id: str node_type: Annotated[ObjectType, EnumNameSerializer] | str | None = None metadata: Dict[str, Any] = Field(default_factory=dict) hash: Optional[str] = None @classmethod def class_name(cls) -> str: return "RelatedNodeInfo" RelatedNodeType = Union[RelatedNodeInfo, List[RelatedNodeInfo]] # Node classes for indexes
RelatedNodeInfo
python
django__django
tests/generic_relations_regress/models.py
{ "start": 4192, "end": 4336 }
class ____(models.Model): b = models.ForeignKey(B, models.SET_NULL, null=True) class Meta: ordering = ("id",) # Ticket #22998
D
python
apache__airflow
airflow-core/src/airflow/models/callback.py
{ "start": 2201, "end": 2560 }
class ____(str, Enum): """Methods used to fetch callback at runtime.""" # For future use once Dag Processor callbacks (on_success_callback/on_failure_callback) get moved to executors DAG_ATTRIBUTE = "dag_attribute" # For deadline callbacks since they import callbacks through the import path IMPORT_PATH = "import_path"
CallbackFetchMethod
python
pytorch__pytorch
test/torch_np/numpy_tests/core/test_shape_base.py
{ "start": 3758, "end": 4736 }
class ____(TestCase): def test_0D_array(self): a = array(1) b = array(2) res = [atleast_3d(a), atleast_3d(b)] desired = [array([[[1]]]), array([[[2]]])] assert_array_equal(res, desired) def test_1D_array(self): a = array([1, 2]) b = array([2, 3]) res = [atleast_3d(a), atleast_3d(b)] desired = [array([[[1], [2]]]), array([[[2], [3]]])] assert_array_equal(res, desired) def test_2D_array(self): a = array([[1, 2], [1, 2]]) b = array([[2, 3], [2, 3]]) res = [atleast_3d(a), atleast_3d(b)] desired = [a[:, :, newaxis], b[:, :, newaxis]] assert_array_equal(res, desired) def test_3D_array(self): a = array([[1, 2], [1, 2]]) b = array([[2, 3], [2, 3]]) a = array([a, a]) b = array([b, b]) res = [atleast_3d(a), atleast_3d(b)] desired = [a, b] assert_array_equal(res, desired)
TestAtleast3d
python
pyinstaller__pyinstaller
PyInstaller/utils/win32/versioninfo.py
{ "start": 5560, "end": 11050 }
class ____: """ DWORD dwSignature; //Contains the value 0xFEEFO4BD DWORD dwStrucVersion; // binary version number of this structure. // The high-order word of this member contains // the major version number, and the low-order // word contains the minor version number. DWORD dwFileVersionMS; // most significant 32 bits of the file's binary // version number DWORD dwFileVersionLS; // DWORD dwProductVersionMS; // most significant 32 bits of the binary version // number of the product with which this file was // distributed DWORD dwProductVersionLS; // DWORD dwFileFlagsMask; // bitmask that specifies the valid bits in // dwFileFlags. A bit is valid only if it was // defined when the file was created. DWORD dwFileFlags; // VS_FF_DEBUG, VS_FF_PATCHED etc. DWORD dwFileOS; // VOS_NT, VOS_WINDOWS32 etc. DWORD dwFileType; // VFT_APP etc. DWORD dwFileSubtype; // 0 unless VFT_DRV or VFT_FONT or VFT_VXD DWORD dwFileDateMS; DWORD dwFileDateLS; """ def __init__( self, filevers=(0, 0, 0, 0), prodvers=(0, 0, 0, 0), mask=0x3f, flags=0x0, OS=0x40004, fileType=0x1, subtype=0x0, date=(0, 0) ): self.sig = 0xfeef04bd self.strucVersion = 0x10000 self.fileVersionMS = (filevers[0] << 16) | (filevers[1] & 0xffff) self.fileVersionLS = (filevers[2] << 16) | (filevers[3] & 0xffff) self.productVersionMS = (prodvers[0] << 16) | (prodvers[1] & 0xffff) self.productVersionLS = (prodvers[2] << 16) | (prodvers[3] & 0xffff) self.fileFlagsMask = mask self.fileFlags = flags self.fileOS = OS self.fileType = fileType self.fileSubtype = subtype self.fileDateMS = date[0] self.fileDateLS = date[1] def fromRaw(self, data, i): ( self.sig, self.strucVersion, self.fileVersionMS, self.fileVersionLS, self.productVersionMS, self.productVersionLS, self.fileFlagsMask, self.fileFlags, self.fileOS, self.fileType, self.fileSubtype, self.fileDateMS, self.fileDateLS, ) = struct.unpack('13L', data[i:i + 52]) return i + 52 def toRaw(self): return struct.pack( '13L', self.sig, self.strucVersion, self.fileVersionMS, self.fileVersionLS, self.productVersionMS, self.productVersionLS, self.fileFlagsMask, self.fileFlags, self.fileOS, self.fileType, self.fileSubtype, self.fileDateMS, self.fileDateLS, ) def __eq__(self, other): return self.toRaw() == other def __str__(self, indent=''): fv = ( self.fileVersionMS >> 16, self.fileVersionMS & 0xffff, self.fileVersionLS >> 16, self.fileVersionLS & 0xffff, ) # yapf: disable pv = ( self.productVersionMS >> 16, self.productVersionMS & 0xffff, self.productVersionLS >> 16, self.productVersionLS & 0xffff, ) # yapf: disable fd = (self.fileDateMS, self.fileDateLS) tmp = [ 'FixedFileInfo(', '# filevers and prodvers should be always a tuple with four items: (1, 2, 3, 4)', '# Set not needed items to zero 0.', 'filevers=%s,' % (fv,), 'prodvers=%s,' % (pv,), "# Contains a bitmask that specifies the valid bits 'flags'r", 'mask=%s,' % hex(self.fileFlagsMask), '# Contains a bitmask that specifies the Boolean attributes of the file.', 'flags=%s,' % hex(self.fileFlags), '# The operating system for which this file was designed.', '# 0x4 - NT and there is no need to change it.', 'OS=%s,' % hex(self.fileOS), '# The general type of file.', '# 0x1 - the file is an application.', 'fileType=%s,' % hex(self.fileType), '# The function of the file.', '# 0x0 - the function is not defined for this fileType', 'subtype=%s,' % hex(self.fileSubtype), '# Creation date and time stamp.', 'date=%s' % (fd,), ')', ] return f'\n{indent} '.join(tmp) def __repr__(self): fv = ( self.fileVersionMS >> 16, self.fileVersionMS & 0xffff, self.fileVersionLS >> 16, self.fileVersionLS & 0xffff, ) # yapf: disable pv = ( self.productVersionMS >> 16, self.productVersionMS & 0xffff, self.productVersionLS >> 16, self.productVersionLS & 0xffff, ) # yapf: disable fd = (self.fileDateMS, self.fileDateLS) return ( 'versioninfo.FixedFileInfo(filevers=%r, prodvers=%r, ' 'mask=0x%x, flags=0x%x, OS=0x%x, ' 'fileType=%r, subtype=0x%x, date=%r)' % (fv, pv, self.fileFlagsMask, self.fileFlags, self.fileOS, self.fileType, self.fileSubtype, fd) )
FixedFileInfo
python
wandb__wandb
wandb/vendor/graphql-core-1.1/wandb_graphql/execution/experimental/fragment.py
{ "start": 6653, "end": 8329 }
class ____(object): def __init__(self, abstract_type, field_asts, context=None, info=None): self.abstract_type = abstract_type self.field_asts = field_asts self.context = context self.info = info self._fragments = {} @cached_property def possible_types(self): return self.context.schema.get_possible_types(self.abstract_type) @cached_property def possible_types_with_is_type_of(self): return [ (type, type.is_type_of) for type in self.possible_types if callable(type.is_type_of) ] def get_fragment(self, type): if isinstance(type, str): type = self.context.schema.get_type(type) if type not in self._fragments: assert type in self.possible_types, ( 'Runtime Object type "{}" is not a possible type for "{}".' ).format(type, self.abstract_type) self._fragments[type] = Fragment( type, self.field_asts, self.context, self.info ) return self._fragments[type] def resolve_type(self, result): return_type = self.abstract_type context = self.context.context_value if return_type.resolve_type: return return_type.resolve_type(result, context, self.info) for type, is_type_of in self.possible_types_with_is_type_of: if is_type_of(result, context, self.info): return type def resolve(self, root): _type = self.resolve_type(root) fragment = self.get_fragment(_type) return fragment.resolve(root)
AbstractFragment
python
tensorflow__tensorflow
tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py
{ "start": 26034, "end": 32870 }
class ____(LayerRNNCell): """DEPRECATED: Please use `tf.compat.v1.nn.rnn_cell.LSTMCell` instead. Basic LSTM recurrent network cell. The implementation is based on We add forget_bias (default: 1) to the biases of the forget gate in order to reduce the scale of forgetting in the beginning of the training. It does not allow cell clipping, a projection layer, and does not use peep-hole connections: it is the basic baseline. For advanced models, please use the full `tf.compat.v1.nn.rnn_cell.LSTMCell` that follows. Note that this cell is not optimized for performance. Please use `tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or `tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for better performance on CPU. """ def __init__(self, num_units, forget_bias=1.0, state_is_tuple=True, activation=None, reuse=None, name=None, dtype=None, **kwargs): """Initialize the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). Must set to `0.0` manually when restoring from CudnnLSTM-trained checkpoints. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. activation: Activation function of the inner states. Default: `tanh`. It could also be string that is within Keras activation function names. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. dtype: Default dtype of the layer (default of `None` means use the type of the first input). Required when `build` is called before `call`. **kwargs: Dict, keyword named properties for common layer attributes, like `trainable` etc when constructing the cell from configs of get_config(). When restoring from CudnnLSTM-trained checkpoints, must use `CudnnCompatibleLSTMCell` instead. """ warnings.warn("`tf.nn.rnn_cell.BasicLSTMCell` is deprecated and will be " "removed in a future version. This class " "is equivalent as `tf.keras.layers.LSTMCell`, " "and will be replaced by that in Tensorflow 2.0.") super(BasicLSTMCell, self).__init__( _reuse=reuse, name=name, dtype=dtype, **kwargs) _check_supported_dtypes(self.dtype) if not state_is_tuple: logging.warning( "%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if context.executing_eagerly() and tf_config.list_logical_devices("GPU"): logging.warning( "%s: Note that this cell is not optimized for performance. " "Please use tf.contrib.cudnn_rnn.CudnnLSTM for better " "performance on GPU.", self) # Inputs must be 2-dimensional. self.input_spec = input_spec.InputSpec(ndim=2) self._num_units = num_units self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple if activation: self._activation = activations.get(activation) else: self._activation = math_ops.tanh @property def state_size(self): return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units) @property def output_size(self): return self._num_units @tf_utils.shape_type_conversion def build(self, inputs_shape): if inputs_shape[-1] is None: raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" % str(inputs_shape)) _check_supported_dtypes(self.dtype) input_depth = inputs_shape[-1] h_depth = self._num_units self._kernel = self.add_variable( _WEIGHTS_VARIABLE_NAME, shape=[input_depth + h_depth, 4 * self._num_units]) self._bias = self.add_variable( _BIAS_VARIABLE_NAME, shape=[4 * self._num_units], initializer=init_ops.zeros_initializer(dtype=self.dtype)) self.built = True def call(self, inputs, state): """Long short-term memory cell (LSTM). Args: inputs: `2-D` tensor with shape `[batch_size, input_size]`. state: An `LSTMStateTuple` of state tensors, each shaped `[batch_size, num_units]`, if `state_is_tuple` has been set to `True`. Otherwise, a `Tensor` shaped `[batch_size, 2 * num_units]`. Returns: A pair containing the new hidden state, and the new state (either a `LSTMStateTuple` or a concatenated state, depending on `state_is_tuple`). """ _check_rnn_cell_input_dtypes([inputs, state]) sigmoid = math_ops.sigmoid one = constant_op.constant(1, dtype=dtypes.int32) # Parameters of gates are concatenated into one multiply for efficiency. if self._state_is_tuple: c, h = state else: c, h = array_ops.split(value=state, num_or_size_splits=2, axis=one) gate_inputs = math_ops.matmul( array_ops.concat([inputs, h], 1), self._kernel) gate_inputs = nn_ops.bias_add(gate_inputs, self._bias) # i = input_gate, j = new_input, f = forget_gate, o = output_gate i, j, f, o = array_ops.split( value=gate_inputs, num_or_size_splits=4, axis=one) forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype) # Note that using `add` and `multiply` instead of `+` and `*` gives a # performance improvement. So using those at the cost of readability. add = math_ops.add multiply = math_ops.multiply new_c = add( multiply(c, sigmoid(add(f, forget_bias_tensor))), multiply(sigmoid(i), self._activation(j))) new_h = multiply(self._activation(new_c), sigmoid(o)) if self._state_is_tuple: new_state = LSTMStateTuple(new_c, new_h) else: new_state = array_ops.concat([new_c, new_h], 1) return new_h, new_state def get_config(self): config = { "num_units": self._num_units, "forget_bias": self._forget_bias, "state_is_tuple": self._state_is_tuple, "activation": activations.serialize(self._activation), "reuse": self._reuse, } base_config = super(BasicLSTMCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) @tf_export(v1=["nn.rnn_cell.LSTMCell"])
BasicLSTMCell
python
kubernetes-client__python
kubernetes/client/models/v1_priority_level_configuration_list.py
{ "start": 383, "end": 7283 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'items': 'list[V1PriorityLevelConfiguration]', 'kind': 'str', 'metadata': 'V1ListMeta' } attribute_map = { 'api_version': 'apiVersion', 'items': 'items', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501 """V1PriorityLevelConfigurationList - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._items = None self._kind = None self._metadata = None self.discriminator = None if api_version is not None: self.api_version = api_version self.items = items if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): """Gets the api_version of this V1PriorityLevelConfigurationList. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1PriorityLevelConfigurationList. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1PriorityLevelConfigurationList. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1PriorityLevelConfigurationList. # noqa: E501 :type: str """ self._api_version = api_version @property def items(self): """Gets the items of this V1PriorityLevelConfigurationList. # noqa: E501 `items` is a list of request-priorities. # noqa: E501 :return: The items of this V1PriorityLevelConfigurationList. # noqa: E501 :rtype: list[V1PriorityLevelConfiguration] """ return self._items @items.setter def items(self, items): """Sets the items of this V1PriorityLevelConfigurationList. `items` is a list of request-priorities. # noqa: E501 :param items: The items of this V1PriorityLevelConfigurationList. # noqa: E501 :type: list[V1PriorityLevelConfiguration] """ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501 raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501 self._items = items @property def kind(self): """Gets the kind of this V1PriorityLevelConfigurationList. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1PriorityLevelConfigurationList. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1PriorityLevelConfigurationList. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1PriorityLevelConfigurationList. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V1PriorityLevelConfigurationList. # noqa: E501 :return: The metadata of this V1PriorityLevelConfigurationList. # noqa: E501 :rtype: V1ListMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V1PriorityLevelConfigurationList. :param metadata: The metadata of this V1PriorityLevelConfigurationList. # noqa: E501 :type: V1ListMeta """ self._metadata = metadata def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1PriorityLevelConfigurationList): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1PriorityLevelConfigurationList): return True return self.to_dict() != other.to_dict()
V1PriorityLevelConfigurationList
python
numpy__numpy
numpy/_core/tests/test_numeric.py
{ "start": 37720, "end": 59006 }
class ____: def check_promotion_cases(self, promote_func): # tests that the scalars get coerced correctly. b = np.bool(0) i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0) u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0) f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0) c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0) # coercion within the same kind assert_equal(promote_func(i8, i16), np.dtype(np.int16)) assert_equal(promote_func(i32, i8), np.dtype(np.int32)) assert_equal(promote_func(i16, i64), np.dtype(np.int64)) assert_equal(promote_func(u8, u32), np.dtype(np.uint32)) assert_equal(promote_func(f32, f64), np.dtype(np.float64)) assert_equal(promote_func(fld, f32), np.dtype(np.longdouble)) assert_equal(promote_func(f64, fld), np.dtype(np.longdouble)) assert_equal(promote_func(c128, c64), np.dtype(np.complex128)) assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble)) assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble)) # coercion between kinds assert_equal(promote_func(b, i32), np.dtype(np.int32)) assert_equal(promote_func(b, u8), np.dtype(np.uint8)) assert_equal(promote_func(i8, u8), np.dtype(np.int16)) assert_equal(promote_func(u8, i32), np.dtype(np.int32)) assert_equal(promote_func(i64, u32), np.dtype(np.int64)) assert_equal(promote_func(u64, i32), np.dtype(np.float64)) assert_equal(promote_func(i32, f32), np.dtype(np.float64)) assert_equal(promote_func(i64, f32), np.dtype(np.float64)) assert_equal(promote_func(f32, i16), np.dtype(np.float32)) assert_equal(promote_func(f32, u32), np.dtype(np.float64)) assert_equal(promote_func(f32, c64), np.dtype(np.complex64)) assert_equal(promote_func(c128, f32), np.dtype(np.complex128)) assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble)) # coercion between scalars and 1-D arrays assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8)) assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8)) assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32)) assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32)) assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int64)) # unsigned and signed unfortunately tend to promote to float64: assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.float64)) assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.int64)) assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.int32)) assert_equal(promote_func(np.int32(-1), np.array([u64])), np.dtype(np.float64)) assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float64)) assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.longdouble)) assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.longdouble)) assert_equal(promote_func(fld, np.array([c64])), np.dtype(np.clongdouble)) assert_equal(promote_func(c64, np.array([f64])), np.dtype(np.complex128)) assert_equal(promote_func(np.complex64(3j), np.array([f64])), np.dtype(np.complex128)) assert_equal(promote_func(np.array([f32]), c128), np.dtype(np.complex128)) # coercion between scalars and 1-D arrays, where # the scalar has greater kind than the array assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64)) assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64)) assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64)) assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64)) assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64)) def test_coercion(self): def res_type(a, b): return np.add(a, b).dtype self.check_promotion_cases(res_type) # Use-case: float/complex scalar * bool/int8 array # shouldn't narrow the float/complex type for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]: b = 1.234 * a assert_equal(b.dtype, np.dtype('f8'), f"array type {a.dtype}") b = np.longdouble(1.234) * a assert_equal(b.dtype, np.dtype(np.longdouble), f"array type {a.dtype}") b = np.float64(1.234) * a assert_equal(b.dtype, np.dtype('f8'), f"array type {a.dtype}") b = np.float32(1.234) * a assert_equal(b.dtype, np.dtype('f4'), f"array type {a.dtype}") b = np.float16(1.234) * a assert_equal(b.dtype, np.dtype('f2'), f"array type {a.dtype}") b = 1.234j * a assert_equal(b.dtype, np.dtype('c16'), f"array type {a.dtype}") b = np.clongdouble(1.234j) * a assert_equal(b.dtype, np.dtype(np.clongdouble), f"array type {a.dtype}") b = np.complex128(1.234j) * a assert_equal(b.dtype, np.dtype('c16'), f"array type {a.dtype}") b = np.complex64(1.234j) * a assert_equal(b.dtype, np.dtype('c8'), f"array type {a.dtype}") # The following use-case is problematic, and to resolve its # tricky side-effects requires more changes. # # Use-case: (1-t)*a, where 't' is a boolean array and 'a' is # a float32, shouldn't promote to float64 # # a = np.array([1.0, 1.5], dtype=np.float32) # t = np.array([True, False]) # b = t*a # assert_equal(b, [1.0, 0.0]) # assert_equal(b.dtype, np.dtype('f4')) # b = (1-t)*a # assert_equal(b, [0.0, 1.5]) # assert_equal(b.dtype, np.dtype('f4')) # # Probably ~t (bitwise negation) is more proper to use here, # but this is arguably less intuitive to understand at a glance, and # would fail if 't' is actually an integer array instead of boolean: # # b = (~t)*a # assert_equal(b, [0.0, 1.5]) # assert_equal(b.dtype, np.dtype('f4')) def test_result_type(self): self.check_promotion_cases(np.result_type) assert_(np.result_type(None) == np.dtype(None)) def test_promote_types_endian(self): # promote_types should always return native-endian types assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8')) assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8')) assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21')) assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21')) assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21')) assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21')) assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8')) assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8')) assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8')) assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8')) assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8')) assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8')) assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8')) assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8')) assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8')) assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8')) def test_can_cast_and_promote_usertypes(self): # The rational type defines safe casting for signed integers, # boolean. Rational itself *does* cast safely to double. # (rational does not actually cast to all signed integers, e.g. # int64 can be both long and longlong and it registers only the first) valid_types = ["int8", "int16", "int32", "int64", "bool"] invalid_types = "BHILQP" + "FDG" + "mM" + "f" + "V" rational_dt = np.dtype(rational) for numpy_dtype in valid_types: numpy_dtype = np.dtype(numpy_dtype) assert np.can_cast(numpy_dtype, rational_dt) assert np.promote_types(numpy_dtype, rational_dt) is rational_dt for numpy_dtype in invalid_types: numpy_dtype = np.dtype(numpy_dtype) assert not np.can_cast(numpy_dtype, rational_dt) with pytest.raises(TypeError): np.promote_types(numpy_dtype, rational_dt) double_dt = np.dtype("double") assert np.can_cast(rational_dt, double_dt) assert np.promote_types(double_dt, rational_dt) is double_dt @pytest.mark.parametrize("swap", ["", "swap"]) @pytest.mark.parametrize("string_dtype", ["U", "S"]) def test_promote_types_strings(self, swap, string_dtype): if swap == "swap": promote_types = lambda a, b: np.promote_types(b, a) else: promote_types = np.promote_types S = string_dtype # Promote numeric with unsized string: assert_equal(promote_types('bool', S), np.dtype(S + '5')) assert_equal(promote_types('b', S), np.dtype(S + '4')) assert_equal(promote_types('u1', S), np.dtype(S + '3')) assert_equal(promote_types('u2', S), np.dtype(S + '5')) assert_equal(promote_types('u4', S), np.dtype(S + '10')) assert_equal(promote_types('u8', S), np.dtype(S + '20')) assert_equal(promote_types('i1', S), np.dtype(S + '4')) assert_equal(promote_types('i2', S), np.dtype(S + '6')) assert_equal(promote_types('i4', S), np.dtype(S + '11')) assert_equal(promote_types('i8', S), np.dtype(S + '21')) # Promote numeric with sized string: assert_equal(promote_types('bool', S + '1'), np.dtype(S + '5')) assert_equal(promote_types('bool', S + '30'), np.dtype(S + '30')) assert_equal(promote_types('b', S + '1'), np.dtype(S + '4')) assert_equal(promote_types('b', S + '30'), np.dtype(S + '30')) assert_equal(promote_types('u1', S + '1'), np.dtype(S + '3')) assert_equal(promote_types('u1', S + '30'), np.dtype(S + '30')) assert_equal(promote_types('u2', S + '1'), np.dtype(S + '5')) assert_equal(promote_types('u2', S + '30'), np.dtype(S + '30')) assert_equal(promote_types('u4', S + '1'), np.dtype(S + '10')) assert_equal(promote_types('u4', S + '30'), np.dtype(S + '30')) assert_equal(promote_types('u8', S + '1'), np.dtype(S + '20')) assert_equal(promote_types('u8', S + '30'), np.dtype(S + '30')) # Promote with object: assert_equal(promote_types('O', S + '30'), np.dtype('O')) @pytest.mark.parametrize(["dtype1", "dtype2"], [[np.dtype("V6"), np.dtype("V10")], # mismatch shape # Mismatching names: [np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])], ]) def test_invalid_void_promotion(self, dtype1, dtype2): with pytest.raises(TypeError): np.promote_types(dtype1, dtype2) @pytest.mark.parametrize(["dtype1", "dtype2"], [[np.dtype("V10"), np.dtype("V10")], [np.dtype([("name1", "i8")]), np.dtype([("name1", np.dtype("i8").newbyteorder())])], [np.dtype("i8,i8"), np.dtype("i8,>i8")], [np.dtype("i8,i8"), np.dtype("i4,i4")], ]) def test_valid_void_promotion(self, dtype1, dtype2): assert np.promote_types(dtype1, dtype2) == dtype1 @pytest.mark.parametrize("dtype", list(np.typecodes["All"]) + ["i,i", "10i", "S3", "S100", "U3", "U100", rational]) def test_promote_identical_types_metadata(self, dtype): # The same type passed in twice to promote types always # preserves metadata metadata = {1: 1} dtype = np.dtype(dtype, metadata=metadata) res = np.promote_types(dtype, dtype) assert res.metadata == dtype.metadata # byte-swapping preserves and makes the dtype native: dtype = dtype.newbyteorder() if dtype.isnative: # The type does not have byte swapping return res = np.promote_types(dtype, dtype) # Metadata is (currently) generally lost on byte-swapping (except for # unicode. if dtype.char != "U": assert res.metadata is None else: assert res.metadata == metadata assert res.isnative @pytest.mark.slow @pytest.mark.filterwarnings('ignore:Promotion of numbers:FutureWarning') @pytest.mark.parametrize(["dtype1", "dtype2"], itertools.product( list(np.typecodes["All"]) + ["i,i", "S3", "S100", "U3", "U100", rational], repeat=2)) def test_promote_types_metadata(self, dtype1, dtype2): """Metadata handling in promotion does not appear formalized right now in NumPy. This test should thus be considered to document behaviour, rather than test the correct definition of it. This test is very ugly, it was useful for rewriting part of the promotion, but probably should eventually be replaced/deleted (i.e. when metadata handling in promotion is better defined). """ metadata1 = {1: 1} metadata2 = {2: 2} dtype1 = np.dtype(dtype1, metadata=metadata1) dtype2 = np.dtype(dtype2, metadata=metadata2) try: res = np.promote_types(dtype1, dtype2) except TypeError: # Promotion failed, this test only checks metadata return if res.char not in "USV" or res.names is not None or res.shape != (): # All except string dtypes (and unstructured void) lose metadata # on promotion (unless both dtypes are identical). # At some point structured ones did not, but were restrictive. assert res.metadata is None elif res == dtype1: # If one result is the result, it is usually returned unchanged: assert res is dtype1 elif res == dtype2: # dtype1 may have been cast to the same type/kind as dtype2. # If the resulting dtype is identical we currently pick the cast # version of dtype1, which lost the metadata: if np.promote_types(dtype1, dtype2.kind) == dtype2: res.metadata is None else: res.metadata == metadata2 else: assert res.metadata is None # Try again for byteswapped version dtype1 = dtype1.newbyteorder() assert dtype1.metadata == metadata1 res_bs = np.promote_types(dtype1, dtype2) assert res_bs == res assert res_bs.metadata == res.metadata def test_can_cast(self): assert_(np.can_cast(np.int32, np.int64)) assert_(np.can_cast(np.float64, complex)) assert_(not np.can_cast(complex, float)) assert_(np.can_cast('i8', 'f8')) assert_(not np.can_cast('i8', 'f4')) assert_(np.can_cast('i4', 'S11')) assert_(np.can_cast('i8', 'i8', 'no')) assert_(not np.can_cast('<i8', '>i8', 'no')) assert_(np.can_cast('<i8', '>i8', 'equiv')) assert_(not np.can_cast('<i4', '>i8', 'equiv')) assert_(np.can_cast('<i4', '>i8', 'safe')) assert_(not np.can_cast('<i8', '>i4', 'safe')) assert_(np.can_cast('<i8', '>i4', 'same_kind')) assert_(not np.can_cast('<i8', '>u4', 'same_kind')) assert_(np.can_cast('<i8', '>u4', 'unsafe')) assert_(np.can_cast('bool', 'S5')) assert_(not np.can_cast('bool', 'S4')) assert_(np.can_cast('b', 'S4')) assert_(not np.can_cast('b', 'S3')) assert_(np.can_cast('u1', 'S3')) assert_(not np.can_cast('u1', 'S2')) assert_(np.can_cast('u2', 'S5')) assert_(not np.can_cast('u2', 'S4')) assert_(np.can_cast('u4', 'S10')) assert_(not np.can_cast('u4', 'S9')) assert_(np.can_cast('u8', 'S20')) assert_(not np.can_cast('u8', 'S19')) assert_(np.can_cast('i1', 'S4')) assert_(not np.can_cast('i1', 'S3')) assert_(np.can_cast('i2', 'S6')) assert_(not np.can_cast('i2', 'S5')) assert_(np.can_cast('i4', 'S11')) assert_(not np.can_cast('i4', 'S10')) assert_(np.can_cast('i8', 'S21')) assert_(not np.can_cast('i8', 'S20')) assert_(np.can_cast('bool', 'S5')) assert_(not np.can_cast('bool', 'S4')) assert_(np.can_cast('b', 'U4')) assert_(not np.can_cast('b', 'U3')) assert_(np.can_cast('u1', 'U3')) assert_(not np.can_cast('u1', 'U2')) assert_(np.can_cast('u2', 'U5')) assert_(not np.can_cast('u2', 'U4')) assert_(np.can_cast('u4', 'U10')) assert_(not np.can_cast('u4', 'U9')) assert_(np.can_cast('u8', 'U20')) assert_(not np.can_cast('u8', 'U19')) assert_(np.can_cast('i1', 'U4')) assert_(not np.can_cast('i1', 'U3')) assert_(np.can_cast('i2', 'U6')) assert_(not np.can_cast('i2', 'U5')) assert_(np.can_cast('i4', 'U11')) assert_(not np.can_cast('i4', 'U10')) assert_(np.can_cast('i8', 'U21')) assert_(not np.can_cast('i8', 'U20')) assert_raises(TypeError, np.can_cast, 'i4', None) assert_raises(TypeError, np.can_cast, None, 'i4') # Also test keyword arguments assert_(np.can_cast(from_=np.int32, to=np.int64)) def test_can_cast_simple_to_structured(self): # Non-structured can only be cast to structured in 'unsafe' mode. assert_(not np.can_cast('i4', 'i4,i4')) assert_(not np.can_cast('i4', 'i4,i2')) assert_(np.can_cast('i4', 'i4,i4', casting='unsafe')) assert_(np.can_cast('i4', 'i4,i2', casting='unsafe')) # Even if there is just a single field which is OK. assert_(not np.can_cast('i2', [('f1', 'i4')])) assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind')) assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe')) # It should be the same for recursive structured or subarrays. assert_(not np.can_cast('i2', [('f1', 'i4,i4')])) assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe')) assert_(not np.can_cast('i2', [('f1', '(2,3)i4')])) assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe')) def test_can_cast_structured_to_simple(self): # Need unsafe casting for structured to simple. assert_(not np.can_cast([('f1', 'i4')], 'i4')) assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe')) assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe')) # Since it is unclear what is being cast, multiple fields to # single should not work even for unsafe casting. assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe')) # But a single field inside a single field is OK. assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4')) assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe')) # And a subarray is fine too - it will just take the first element # (arguably not very consistently; might also take the first field). assert_(not np.can_cast([('f0', '(3,)i4')], 'i4')) assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe')) # But a structured subarray with multiple fields should fail. assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4', casting='unsafe')) def test_can_cast_values(self): # With NumPy 2 and NEP 50, can_cast errors on Python scalars. We could # define this as (usually safe) at some point, and already do so # in `copyto` and ufuncs (but there an error is raised if the integer # is out of bounds and a warning for out-of-bound floats). # Raises even for unsafe, previously checked within range (for floats # that was approximately whether it would overflow to inf). with pytest.raises(TypeError): np.can_cast(4, "int8", casting="unsafe") with pytest.raises(TypeError): np.can_cast(4.0, "float64", casting="unsafe") with pytest.raises(TypeError): np.can_cast(4j, "complex128", casting="unsafe") @pytest.mark.parametrize("dtype", list("?bhilqBHILQefdgFDG") + [rational]) def test_can_cast_scalars(self, dtype): # Basic test to ensure that scalars are supported in can-cast # (does not check behavior exhaustively). dtype = np.dtype(dtype) scalar = dtype.type(0) assert np.can_cast(scalar, "int64") == np.can_cast(dtype, "int64") assert np.can_cast(scalar, "float32", casting="unsafe") # Custom exception class to test exception propagation in fromiter
TestTypes
python
Unity-Technologies__ml-agents
ml-agents-envs/mlagents_envs/exception.py
{ "start": 109, "end": 242 }
class ____(UnityException): """ Related to errors starting and closing environment. """ pass
UnityEnvironmentException
python
apache__airflow
airflow-core/src/airflow/models/taskinstancehistory.py
{ "start": 1891, "end": 8398 }
class ____(Base): """ Store old tries of TaskInstances. :meta private: """ __tablename__ = "task_instance_history" task_instance_id: Mapped[str] = mapped_column( String(36).with_variant(postgresql.UUID(as_uuid=False), "postgresql"), nullable=False, primary_key=True, ) task_id: Mapped[str] = mapped_column(StringID(), nullable=False) dag_id: Mapped[str] = mapped_column(StringID(), nullable=False) run_id: Mapped[str] = mapped_column(StringID(), nullable=False) map_index: Mapped[int] = mapped_column(Integer, nullable=False, server_default=text("-1")) try_number: Mapped[int] = mapped_column(Integer, nullable=False) start_date: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True) end_date: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True) duration: Mapped[float | None] = mapped_column(Float, nullable=True) state: Mapped[str | None] = mapped_column(String(20), nullable=True) max_tries: Mapped[int | None] = mapped_column(Integer, server_default=text("-1"), nullable=True) hostname: Mapped[str | None] = mapped_column(String(1000), nullable=True) unixname: Mapped[str | None] = mapped_column(String(1000), nullable=True) pool: Mapped[str] = mapped_column(String(256), nullable=False) pool_slots: Mapped[int] = mapped_column(Integer, default=1, nullable=False) queue: Mapped[str | None] = mapped_column(String(256), nullable=True) priority_weight: Mapped[int | None] = mapped_column(Integer, nullable=True) operator: Mapped[str | None] = mapped_column(String(1000), nullable=True) custom_operator_name: Mapped[str | None] = mapped_column(String(1000), nullable=True) queued_dttm: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True) scheduled_dttm: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True) queued_by_job_id: Mapped[int | None] = mapped_column(Integer, nullable=True) pid: Mapped[int | None] = mapped_column(Integer, nullable=True) executor: Mapped[str | None] = mapped_column(String(1000), nullable=True) executor_config: Mapped[dict | None] = mapped_column(ExecutorConfigType(pickler=dill), nullable=True) updated_at: Mapped[datetime | None] = mapped_column( UtcDateTime, default=timezone.utcnow, onupdate=timezone.utcnow, nullable=True ) rendered_map_index: Mapped[str | None] = mapped_column(String(250), nullable=True) context_carrier: Mapped[dict | None] = mapped_column(MutableDict.as_mutable(ExtendedJSON), nullable=True) span_status: Mapped[str] = mapped_column( String(250), server_default=SpanStatus.NOT_STARTED, nullable=False ) external_executor_id: Mapped[str | None] = mapped_column(StringID(), nullable=True) trigger_id: Mapped[int | None] = mapped_column(Integer, nullable=True) trigger_timeout: Mapped[DateTime | None] = mapped_column(DateTime, nullable=True) next_method: Mapped[str | None] = mapped_column(String(1000), nullable=True) next_kwargs: Mapped[dict | None] = mapped_column(MutableDict.as_mutable(ExtendedJSON), nullable=True) task_display_name: Mapped[str | None] = mapped_column(String(2000), nullable=True) dag_version_id: Mapped[str | None] = mapped_column(UUIDType(binary=False), nullable=True) dag_version = relationship( "DagVersion", primaryjoin="TaskInstanceHistory.dag_version_id == DagVersion.id", viewonly=True, foreign_keys=[dag_version_id], ) dag_run = relationship( "DagRun", primaryjoin="and_(TaskInstanceHistory.run_id == DagRun.run_id, DagRun.dag_id == TaskInstanceHistory.dag_id)", viewonly=True, foreign_keys=[run_id, dag_id], ) hitl_detail = relationship("HITLDetailHistory", lazy="noload", uselist=False) def __init__( self, ti: TaskInstance, state: str | None = None, ): super().__init__() for column in self.__table__.columns: if column.name == "id": continue if column.name == "task_instance_id": setattr(self, column.name, ti.id) continue setattr(self, column.name, getattr(ti, column.name)) if state: self.state = state __table_args__ = ( ForeignKeyConstraint( [dag_id, task_id, run_id, map_index], [ "task_instance.dag_id", "task_instance.task_id", "task_instance.run_id", "task_instance.map_index", ], name="task_instance_history_ti_fkey", ondelete="CASCADE", onupdate="CASCADE", ), UniqueConstraint( "dag_id", "task_id", "run_id", "map_index", "try_number", name="task_instance_history_dtrt_uq", ), Index("idx_tih_dag_run", dag_id, run_id), ) @property def id(self) -> str: """Alias for primary key field to support TaskInstance.""" return self.task_instance_id @staticmethod @provide_session def record_ti(ti: TaskInstance, session: Session = NEW_SESSION) -> None: """Record a TaskInstance to TaskInstanceHistory.""" exists_q = session.scalar( select(func.count(TaskInstanceHistory.task_id)).where( TaskInstanceHistory.dag_id == ti.dag_id, TaskInstanceHistory.task_id == ti.task_id, TaskInstanceHistory.run_id == ti.run_id, TaskInstanceHistory.map_index == ti.map_index, TaskInstanceHistory.try_number == ti.try_number, ) ) if exists_q: return ti_history_state = ti.state if ti.state not in State.finished: ti_history_state = TaskInstanceState.FAILED ti.end_date = timezone.utcnow() ti.set_duration() ti_history = TaskInstanceHistory(ti, state=ti_history_state) session.add(ti_history) ti_hitl_detail = session.scalar(select(HITLDetail).where(HITLDetail.ti_id == ti.id)) if ti_hitl_detail is not None: session.add(HITLDetailHistory(ti_hitl_detail)) @provide_session def get_dagrun(self, session: Session = NEW_SESSION) -> DagRun: """Return the DagRun for this TaskInstanceHistory, matching TaskInstance.""" return self.dag_run
TaskInstanceHistory
python
walkccc__LeetCode
solutions/1214. Two Sum BSTs/1214.py
{ "start": 0, "end": 572 }
class ____: def __init__(self, root: TreeNode | None, leftToRight: bool): self.stack = [] self.leftToRight = leftToRight self._pushUntilNone(root) def hasNext(self) -> bool: return len(self.stack) > 0 def next(self) -> int: node = self.stack.pop() if self.leftToRight: self._pushUntilNone(node.right) else: self._pushUntilNone(node.left) return node.val def _pushUntilNone(self, root: TreeNode | None): while root: self.stack.append(root) root = root.left if self.leftToRight else root.right
BSTIterator
python
walkccc__LeetCode
solutions/1602. Find Nearest Right Node in Binary Tree/1602.py
{ "start": 0, "end": 525 }
class ____: def findNearestRightNode( self, root: TreeNode, u: TreeNode, ) -> TreeNode | None: ans = None targetDepth = -1 def dfs(root: TreeNode, depth: int) -> None: nonlocal ans nonlocal targetDepth if not root: return if root == u: targetDepth = depth return if depth == targetDepth and not ans: ans = root return dfs(root.left, depth + 1) dfs(root.right, depth + 1) dfs(root, 0) return ans
Solution
python
pallets__click
examples/complex/complex/cli.py
{ "start": 666, "end": 1608 }
class ____(click.Group): def list_commands(self, ctx): rv = [] for filename in os.listdir(cmd_folder): if filename.endswith(".py") and filename.startswith("cmd_"): rv.append(filename[4:-3]) rv.sort() return rv def get_command(self, ctx, name): try: mod = __import__(f"complex.commands.cmd_{name}", None, None, ["cli"]) except ImportError: return return mod.cli @click.command(cls=ComplexCLI, context_settings=CONTEXT_SETTINGS) @click.option( "--home", type=click.Path(exists=True, file_okay=False, resolve_path=True), help="Changes the folder to operate on.", ) @click.option("-v", "--verbose", is_flag=True, help="Enables verbose mode.") @pass_environment def cli(ctx, verbose, home): """A complex command line interface.""" ctx.verbose = verbose if home is not None: ctx.home = home
ComplexCLI
python
gevent__gevent
src/gevent/tests/test__threadpool.py
{ "start": 9637, "end": 10109 }
class ____(TestPool): size = 10 # class TestJoinSleep(greentest.GenericGetTestCase): # # def wait(self, timeout): # pool = ThreadPool(1) # pool.spawn(gevent.sleep, 10) # pool.join(timeout=timeout) # # # class TestJoinSleep_raise_error(greentest.GenericWaitTestCase): # # def wait(self, timeout): # pool = ThreadPool(1) # g = pool.spawn(gevent.sleep, 10) # pool.join(timeout=timeout, raise_error=True)
TestPool10
python
great-expectations__great_expectations
great_expectations/expectations/legacy_row_conditions.py
{ "start": 2770, "end": 5841 }
class ____(SerializableDictDot): """Condition that can be used to filter rows in a data set. Attributes: condition: String of the condition condition_type: Format of the condition e.g. for parsing """ condition: str condition_type: RowConditionParserType @override def to_dict(self) -> dict: """ Returns dictionary equivalent of this object. """ return { "condition": self.condition, "condition_type": self.condition_type.value, } @override def to_json_dict(self) -> dict: """ Returns JSON dictionary equivalent of this object. """ return convert_to_json_serializable(data=self.to_dict()) def parse_great_expectations_condition(row_condition: str): try: return condition.parseString(row_condition) except ParseException: raise ConditionParserError(f"unable to parse condition: {row_condition}") # noqa: TRY003 # FIXME CoP def parse_condition_to_spark( row_condition: str, ) -> pyspark.Column: parsed = parse_great_expectations_condition(row_condition) column = parsed["column"] if "condition_value" in parsed: return generate_condition_by_operator( F.col(column), parsed["op"], F.lit(parsed["condition_value"]) ) elif "fnumber" in parsed: try: num: int | float = int(parsed["fnumber"]) except ValueError: num = float(parsed["fnumber"]) return generate_condition_by_operator(F.col(column), parsed["op"], F.lit(num)) elif "notnull" in parsed and parsed["notnull"] is True: return F.col(column).isNotNull() else: raise ConditionParserError(f"unrecognized column condition: {row_condition}") # noqa: TRY003 # FIXME CoP def generate_condition_by_operator(column, op, value): operators = { "==": operator.eq, "<": operator.lt, ">": operator.gt, ">=": operator.ge, "<=": operator.le, "!=": operator.ne, } return operators[op](column, value) def parse_condition_to_sqlalchemy( row_condition: str, ) -> sqlalchemy.ColumnElement: parsed = parse_great_expectations_condition(row_condition) column = parsed["column"] if "condition_value" in parsed: return generate_condition_by_operator( sa.column(column), parsed["op"], parsed["condition_value"] ) elif "fnumber" in parsed: number_value = parsed["fnumber"] num = int(number_value) if number_value.isdigit() else float(number_value) return generate_condition_by_operator(sa.column(column), parsed["op"], num) elif "notnull" in parsed and parsed["notnull"] is True: return sa.not_(sa.column(column).is_(None)) else: raise ConditionParserError(f"unrecognized column condition: {row_condition}") # noqa: TRY003 # FIXME CoP # Re-export Column from conditions for backwards compatibility from great_expectations.expectations.conditions import Column # noqa: F401
RowCondition
python
numba__llvmlite
llvmlite/ir/instructions.py
{ "start": 9697, "end": 10687 }
class ____(PredictableInstr, Terminator): def __init__(self, parent, opname, val, default): super(SwitchInstr, self).__init__(parent, opname, [val]) self.default = default self.cases = [] @property def value(self): return self.operands[0] def add_case(self, val, block): assert isinstance(block, Block) if not isinstance(val, Value): val = Constant(self.value.type, val) self.cases.append((val, block)) def descr(self, buf): cases = ["{0} {1}, label {2}".format(val.type, val.get_reference(), blk.get_reference()) for val, blk in self.cases] buf.append("switch {0} {1}, label {2} [{3}] {4}\n".format( self.value.type, self.value.get_reference(), self.default.get_reference(), ' '.join(cases), self._stringify_metadata(leading_comma=True), ))
SwitchInstr
python
coleifer__peewee
peewee.py
{ "start": 39300, "end": 40166 }
class ____(WrappedNode): c = _DynamicEntity() def __init__(self, node, alias): super(Alias, self).__init__(node) self._alias = alias def __hash__(self): return hash(self._alias) @property def name(self): return self._alias @name.setter def name(self, value): self._alias = value def alias(self, alias=None): if alias is None: return self.node else: return Alias(self.node, alias) def unalias(self): return self.node def is_alias(self): return True def __sql__(self, ctx): if ctx.scope == SCOPE_SOURCE: return (ctx .sql(self.node) .literal(' AS ') .sql(Entity(self._alias))) else: return ctx.sql(Entity(self._alias))
Alias
python
sqlalchemy__sqlalchemy
test/typing/plain_files/orm/composite_dc.py
{ "start": 331, "end": 424 }
class ____: def __init__(self, x: int, y: int): self.x = x self.y = y
Point
python
pytorch__pytorch
benchmarks/operator_benchmark/pt/batchnorm_test.py
{ "start": 1211, "end": 2754 }
class ____(op_bench.TorchBenchmarkBase): def init(self, M, N, K, device, training, cudnn): self.inputs = { "input_one": torch.rand( M, N, K, device=device, requires_grad=self.auto_set() ), "mean": torch.rand(N, device=device), "var": torch.rand(N, device=device), "weight": torch.rand(N, device=device), "bias": torch.rand(N, device=device), "training": training, "cudnn": cudnn, } self.set_module_name("batchnorm") def forward(self, input_one, mean, var, weight, bias, training, cudnn): with torch.backends.cudnn.flags(enabled=cudnn): return F.batch_norm(input_one, mean, var, weight, bias, training) op_bench.generate_pt_test( batchnorm_configs_short + batchnorm_configs_long, BatchNormBenchmark ) op_bench.generate_pt_gradient_test( batchnorm_configs_short + batchnorm_configs_long, BatchNormBenchmark ) batchnorm1d_configs_short = cudnn_benchmark_configs( op_bench.config_list( attr_names=["N", "C"], attrs=[ [3136, 256], ], cross_product_configs={ "device": ["cpu", "cuda"], "training": [True, False], }, tags=["short"], ) ) batchnorm1d_configs_long = cudnn_benchmark_configs( op_bench.cross_product_configs( N=[2, 128], C=[8192, 2048], device=["cpu", "cuda"], training=[True, False], tags=["long"], ) )
BatchNormBenchmark
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/dep_with_variants_if_develop/package.py
{ "start": 216, "end": 466 }
class ____(Package): """Package that adds a dependency with many variants only at @develop""" homepage = "https://dev.null" version("develop") version("1.0") depends_on("dep-with-variants", when="@develop")
DepWithVariantsIfDevelop
python
scrapy__scrapy
scrapy/core/downloader/handlers/http11.py
{ "start": 4477, "end": 4581 }
class ____(Exception): """An HTTP CONNECT tunnel could not be established by the proxy."""
TunnelError
python
ray-project__ray
python/ray/_private/thirdparty/dacite/config.py
{ "start": 115, "end": 407 }
class ____: type_hooks: Dict[Type, Callable[[Any], Any]] = field(default_factory=dict) cast: List[Type] = field(default_factory=list) forward_references: Optional[Dict[str, Any]] = None check_types: bool = True strict: bool = False strict_unions_match: bool = False
Config
python
Lightning-AI__lightning
src/lightning/pytorch/loggers/tensorboard.py
{ "start": 1464, "end": 10417 }
class ____(Logger, FabricTensorBoardLogger): r"""Log to local or remote file system in `TensorBoard <https://www.tensorflow.org/tensorboard>`_ format. Implemented using :class:`~tensorboardX.SummaryWriter`. Logs are saved to ``os.path.join(save_dir, name, version)``. This is the default logger in Lightning, it comes preinstalled. This logger supports logging to remote filesystems via ``fsspec``. Make sure you have it installed and you don't have tensorflow (otherwise it will use tf.io.gfile instead of fsspec). Example: .. testcode:: :skipif: not _TENSORBOARD_AVAILABLE or not _TENSORBOARDX_AVAILABLE from lightning.pytorch import Trainer from lightning.pytorch.loggers import TensorBoardLogger logger = TensorBoardLogger("tb_logs", name="my_model") trainer = Trainer(logger=logger) Args: save_dir: Save directory name: Experiment name. Defaults to ``'default'``. If it is the empty string then no per-experiment subdirectory is used. version: Experiment version. If version is not specified the logger inspects the save directory for existing versions, then automatically assigns the next available version. If it is a string then it is used as the run-specific subdirectory name, otherwise ``'version_${version}'`` is used. log_graph: Adds the computational graph to tensorboard. This requires that the user has defined the `self.example_input_array` attribute in their model. default_hp_metric: Enables a placeholder metric with key `hp_metric` when `log_hyperparams` is called without a metric (otherwise calls to log_hyperparams without a metric are ignored). prefix: A string to put at the beginning of metric keys. sub_dir: Sub-directory to group TensorBoard logs. If a sub_dir argument is passed then logs are saved in ``/save_dir/name/version/sub_dir/``. Defaults to ``None`` in which logs are saved in ``/save_dir/name/version/``. \**kwargs: Additional arguments used by :class:`tensorboardX.SummaryWriter` can be passed as keyword arguments in this logger. To automatically flush to disk, `max_queue` sets the size of the queue for pending logs before flushing. `flush_secs` determines how many seconds elapses before flushing. """ NAME_HPARAMS_FILE = "hparams.yaml" def __init__( self, save_dir: _PATH, name: Optional[str] = "lightning_logs", version: Optional[Union[int, str]] = None, log_graph: bool = False, default_hp_metric: bool = True, prefix: str = "", sub_dir: Optional[_PATH] = None, **kwargs: Any, ): super().__init__( root_dir=save_dir, name=name, version=version, default_hp_metric=default_hp_metric, prefix=prefix, sub_dir=sub_dir, **kwargs, ) if log_graph and not _TENSORBOARD_AVAILABLE: rank_zero_warn( "You set `TensorBoardLogger(log_graph=True)` but `tensorboard` is not available.\n" f"{str(_TENSORBOARD_AVAILABLE)}" ) self._log_graph = log_graph and _TENSORBOARD_AVAILABLE self.hparams: Union[dict[str, Any], Namespace] = {} @property @override def root_dir(self) -> str: """Parent directory for all tensorboard checkpoint subdirectories. If the experiment name parameter is an empty string, no experiment subdirectory is used and the checkpoint will be saved in "save_dir/version" """ return os.path.join(super().root_dir, self.name) @property @override def log_dir(self) -> str: """The directory for this run's tensorboard checkpoint. By default, it is named ``'version_${self.version}'`` but it can be overridden by passing a string value for the constructor's version parameter instead of ``None`` or an int. """ # create a pseudo standard path ala test-tube version = self.version if isinstance(self.version, str) else f"version_{self.version}" log_dir = os.path.join(self.root_dir, version) if isinstance(self.sub_dir, str): log_dir = os.path.join(log_dir, self.sub_dir) log_dir = os.path.expandvars(log_dir) log_dir = os.path.expanduser(log_dir) return log_dir @property @override def save_dir(self) -> str: """Gets the save directory where the TensorBoard experiments are saved. Returns: The local path to the save directory where the TensorBoard experiments are saved. """ return self._root_dir @override @rank_zero_only def log_hyperparams( self, params: Union[dict[str, Any], Namespace], metrics: Optional[dict[str, Any]] = None, step: Optional[int] = None, ) -> None: """Record hyperparameters. TensorBoard logs with and without saved hyperparameters are incompatible, the hyperparameters are then not displayed in the TensorBoard. Please delete or move the previously saved logs to display the new ones with hyperparameters. Args: params: A dictionary-like container with the hyperparameters metrics: Dictionary with metric names as keys and measured quantities as values step: Optional global step number for the logged metrics """ if _OMEGACONF_AVAILABLE: from omegaconf import Container, OmegaConf params = _convert_params(params) # store params to output if _OMEGACONF_AVAILABLE and isinstance(params, Container): self.hparams = OmegaConf.merge(self.hparams, params) else: self.hparams.update(params) return super().log_hyperparams(params=params, metrics=metrics, step=step) @override @rank_zero_only def log_graph( # type: ignore[override] self, model: "pl.LightningModule", input_array: Optional[Tensor] = None ) -> None: if not self._log_graph: return input_array = model.example_input_array if input_array is None else input_array if input_array is None: rank_zero_warn( "Could not log computational graph to TensorBoard: The `model.example_input_array` attribute" " is not set or `input_array` was not given." ) elif not isinstance(input_array, (Tensor, tuple)): rank_zero_warn( "Could not log computational graph to TensorBoard: The `input_array` or `model.example_input_array`" f" has type {type(input_array)} which can't be traced by TensorBoard. Make the input array a tuple" f" representing the positional arguments to the model's `forward()` implementation." ) else: input_array = model._on_before_batch_transfer(input_array) input_array = model._apply_batch_transfer_handler(input_array) with pl.core.module._jit_is_scripting(): self.experiment.add_graph(model, input_array) @override @rank_zero_only def save(self) -> None: super().save() dir_path = self.log_dir # prepare the file path hparams_file = os.path.join(dir_path, self.NAME_HPARAMS_FILE) # save the metatags file if it doesn't exist and the log directory exists if _is_dir(self._fs, dir_path) and not self._fs.isfile(hparams_file): save_hparams_to_yaml(hparams_file, self.hparams) @override @rank_zero_only def finalize(self, status: str) -> None: super().finalize(status) if status == "success": # saving hparams happens independent of experiment manager self.save() @override def after_save_checkpoint(self, checkpoint_callback: ModelCheckpoint) -> None: """Called after model checkpoint callback saves a new checkpoint. Args: checkpoint_callback: the model checkpoint callback instance """ pass @override def _get_next_version(self) -> int: root_dir = self.root_dir try: listdir_info = self._fs.listdir(root_dir) except OSError: return 0 existing_versions = [] for listing in listdir_info: d = listing["name"] bn = os.path.basename(d) if _is_dir(self._fs, d) and bn.startswith("version_"): dir_ver = bn.split("_")[1].replace("/", "") if dir_ver.isdigit(): existing_versions.append(int(dir_ver)) if len(existing_versions) == 0: return 0 return max(existing_versions) + 1
TensorBoardLogger
python
huggingface__transformers
src/transformers/integrations/integration_utils.py
{ "start": 91487, "end": 96190 }
class ____(TrainerCallback): """ A [`TrainerCallback`] that sends the logs to [DVCLive](https://www.dvc.org/doc/dvclive). Use the environment variables below in `setup` to configure the integration. To customize this callback beyond those environment variables, see [here](https://dvc.org/doc/dvclive/ml-frameworks/huggingface). Args: live (`dvclive.Live`, *optional*, defaults to `None`): Optional Live instance. If None, a new instance will be created using **kwargs. log_model (Union[Literal["all"], bool], *optional*, defaults to `None`): Whether to use `dvclive.Live.log_artifact()` to log checkpoints created by [`Trainer`]. If set to `True`, the final checkpoint is logged at the end of training. If set to `"all"`, the entire [`TrainingArguments`]'s `output_dir` is logged at each checkpoint. """ def __init__( self, live: Any | None = None, log_model: Literal["all"] | bool | None = None, **kwargs, ): if not is_dvclive_available(): raise RuntimeError("DVCLiveCallback requires dvclive to be installed. Run `pip install dvclive`.") from dvclive import Live self._initialized = False self.live = None if isinstance(live, Live): self.live = live elif live is not None: raise RuntimeError(f"Found class {live.__class__} for live, expected dvclive.Live") self._log_model = log_model if self._log_model is None: log_model_env = os.getenv("HF_DVCLIVE_LOG_MODEL", "FALSE") if log_model_env.upper() in ENV_VARS_TRUE_VALUES: self._log_model = True elif log_model_env.lower() == "all": self._log_model = "all" def setup(self, args, state, model): """ Setup the optional DVCLive integration. To customize this callback beyond the environment variables below, see [here](https://dvc.org/doc/dvclive/ml-frameworks/huggingface). Environment: - **HF_DVCLIVE_LOG_MODEL** (`str`, *optional*): Whether to use `dvclive.Live.log_artifact()` to log checkpoints created by [`Trainer`]. If set to `True` or *1*, the final checkpoint is logged at the end of training. If set to `all`, the entire [`TrainingArguments`]'s `output_dir` is logged at each checkpoint. """ from dvclive import Live self._initialized = True if state.is_world_process_zero: if not self.live: self.live = Live() self.live.log_params(args.to_dict()) def on_train_begin(self, args, state, control, model=None, **kwargs): if not self._initialized: self.setup(args, state, model) def on_log(self, args, state, control, model=None, logs=None, **kwargs): if not self._initialized: self.setup(args, state, model) if state.is_world_process_zero: from dvclive.plots import Metric from dvclive.utils import standardize_metric_name for key, value in logs.items(): if Metric.could_log(value): self.live.log_metric(standardize_metric_name(key, "dvclive.huggingface"), value) else: logger.warning( "Trainer is attempting to log a value of " f'"{value}" of type {type(value)} for key "{key}" as a scalar. ' "This invocation of DVCLive's Live.log_metric() " "is incorrect so we dropped this attribute." ) self.live.next_step() def on_save(self, args, state, control, **kwargs): if self._log_model == "all" and self._initialized and state.is_world_process_zero: self.live.log_artifact(args.output_dir) def on_train_end(self, args, state, control, **kwargs): if self._initialized and state.is_world_process_zero: from transformers.trainer import Trainer if self._log_model is True: fake_trainer = Trainer( args=args, model=kwargs.get("model"), processing_class=kwargs.get("processing_class"), eval_dataset=["fake"], ) name = "best" if args.load_best_model_at_end else "last" output_dir = os.path.join(args.output_dir, name) fake_trainer.save_model(output_dir) self.live.log_artifact(output_dir, name=name, type="model", copy=True) self.live.end()
DVCLiveCallback
python
psf__black
tests/data/cases/no_blank_line_before_docstring.py
{ "start": 217, "end": 312 }
class ____: """I want to be treated the same as if I were closer"""
TwoLinesBeforeDocstring
python
django__django
tests/bulk_create/models.py
{ "start": 4636, "end": 4824 }
class ____(models.Model): id = models.DateTimeField(primary_key=True, db_default=Now()) class Meta: required_db_features = {"supports_expression_defaults"}
DbDefaultPrimaryKey
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/distributions/bijector_test.py
{ "start": 6841, "end": 7024 }
class ____(BijectorCachingTestBase, test.TestCase): """Test caching with BrokenBijector.""" @property def broken_bijector_cls(self): return BrokenBijector
BijectorCachingTest
python
getsentry__sentry
src/sentry/organizations/services/organization/model.py
{ "start": 12700, "end": 13081 }
class ____(RpcUserOrganizationContext): """ A context containing an intended organization member object as a potential invite, and the true inner organization member state as found for a given user_id if it exists, or just the organization member state of the invite if none such exists. """ invite_organization_member_id: int | None = 0
RpcUserInviteContext