language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
getsentry__sentry
src/sentry_plugins/pagerduty/client.py
{ "start": 233, "end": 1287 }
class ____(ApiClient): client = "sentry" plugin_name = "pagerduty" allow_redirects = False def __init__(self, service_key=None): self.service_key = service_key super().__init__() def build_url(self, path): return INTEGRATION_API_URL def request(self, data): payload = {"service_key": self.service_key} payload.update(data) return self._request(path="", method="post", data=payload) def trigger_incident( self, description, event_type, details, incident_key, client=None, client_url=None, contexts=None, ): return self.request( { "event_type": event_type, "description": description, "details": details, "incident_key": incident_key, "client": client or self.client, "client_url": client_url or absolute_uri(), "contexts": contexts, } )
PagerDutyPluginClient
python
scikit-learn__scikit-learn
sklearn/linear_model/_least_angle.py
{ "start": 71523, "end": 83135 }
class ____(LassoLars): """Lasso model fit with Lars using BIC or AIC for model selection. The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 AIC is the Akaike information criterion [2]_ and BIC is the Bayes Information criterion [3]_. Such criteria are useful to select the value of the regularization parameter by making a trade-off between the goodness of fit and the complexity of the model. A good model should explain well the data while being simple. Read more in the :ref:`User Guide <lasso_lars_ic>`. Parameters ---------- criterion : {'aic', 'bic'}, default='aic' The type of criterion to use. fit_intercept : bool, default=True Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (i.e. data is expected to be centered). verbose : bool or int, default=False Sets the verbosity amount. precompute : bool, 'auto' or array-like, default='auto' Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : int, default=500 Maximum number of iterations to perform. Can be used for early stopping. eps : float, default=np.finfo(float).eps The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. copy_X : bool, default=True If True, X will be copied; else, it may be overwritten. positive : bool, default=False Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. Under the positive restriction the model coefficients do not converge to the ordinary-least-squares solution for small values of alpha. Only coefficients up to the smallest alpha value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent Lasso estimator. As a consequence using LassoLarsIC only makes sense for problems where a sparse solution is expected and/or reached. noise_variance : float, default=None The estimated noise variance of the data. If `None`, an unbiased estimate is computed by an OLS model. However, it is only possible in the case where `n_samples > n_features + fit_intercept`. .. versionadded:: 1.1 Attributes ---------- coef_ : array-like of shape (n_features,) parameter vector (w in the formulation formula) intercept_ : float independent term in decision function. alpha_ : float the alpha parameter chosen by the information criterion alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays Maximum of covariances (in absolute value) at each iteration. ``n_alphas`` is either ``max_iter``, ``n_features`` or the number of nodes in the path with ``alpha >= alpha_min``, whichever is smaller. If a list, it will be of length `n_targets`. n_iter_ : int number of iterations run by lars_path to find the grid of alphas. criterion_ : array-like of shape (n_alphas,) The value of the information criteria ('aic', 'bic') across all alphas. The alpha which has the smallest information criterion is chosen, as specified in [1]_. noise_variance_ : float The estimated noise variance from the data used to compute the criterion. .. versionadded:: 1.1 n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. lasso_path : Compute Lasso path with coordinate descent. Lasso : Linear Model trained with L1 prior as regularizer (aka the Lasso). LassoCV : Lasso linear model with iterative fitting along a regularization path. LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. LassoLarsCV: Cross-validated Lasso, using the LARS algorithm. sklearn.decomposition.sparse_encode : Sparse coding. Notes ----- The number of degrees of freedom is computed as in [1]_. To have more details regarding the mathematical formulation of the AIC and BIC criteria, please refer to :ref:`User Guide <lasso_lars_ic>`. References ---------- .. [1] :arxiv:`Zou, Hui, Trevor Hastie, and Robert Tibshirani. "On the degrees of freedom of the lasso." The Annals of Statistics 35.5 (2007): 2173-2192. <0712.0881>` .. [2] `Wikipedia entry on the Akaike information criterion <https://en.wikipedia.org/wiki/Akaike_information_criterion>`_ .. [3] `Wikipedia entry on the Bayesian information criterion <https://en.wikipedia.org/wiki/Bayesian_information_criterion>`_ Examples -------- >>> from sklearn import linear_model >>> reg = linear_model.LassoLarsIC(criterion='bic') >>> X = [[-2, 2], [-1, 1], [0, 0], [1, 1], [2, 2]] >>> y = [-2.2222, -1.1111, 0, -1.1111, -2.2222] >>> reg.fit(X, y) LassoLarsIC(criterion='bic') >>> print(reg.coef_) [ 0. -1.11] For a detailed example of using this class, see :ref:`sphx_glr_auto_examples_linear_model_plot_lasso_lars_ic.py`. """ _parameter_constraints: dict = { **LassoLars._parameter_constraints, "criterion": [StrOptions({"aic", "bic"})], "noise_variance": [Interval(Real, 0, None, closed="left"), None], } for parameter in ["jitter", "fit_path", "alpha", "random_state"]: _parameter_constraints.pop(parameter) def __init__( self, criterion="aic", *, fit_intercept=True, verbose=False, precompute="auto", max_iter=500, eps=np.finfo(float).eps, copy_X=True, positive=False, noise_variance=None, ): self.criterion = criterion self.fit_intercept = fit_intercept self.positive = positive self.max_iter = max_iter self.verbose = verbose self.copy_X = copy_X self.precompute = precompute self.eps = eps self.fit_path = True self.noise_variance = noise_variance def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.target_tags.multi_output = False return tags @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y, copy_X=None): """Fit the model using X, y as training data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. Will be cast to X's dtype if necessary. copy_X : bool, default=None If provided, this parameter will override the choice of copy_X made at instance creation. If ``True``, X will be copied; else, it may be overwritten. Returns ------- self : object Returns an instance of self. """ if copy_X is None: copy_X = self.copy_X X, y = validate_data(self, X, y, force_writeable=True, y_numeric=True) X, y, Xmean, ymean, _, _ = _preprocess_data( X, y, fit_intercept=self.fit_intercept, copy=copy_X ) Gram = self.precompute alphas_, _, coef_path_, self.n_iter_ = lars_path( X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0, method="lasso", verbose=self.verbose, max_iter=self.max_iter, eps=self.eps, return_n_iter=True, positive=self.positive, ) n_samples = X.shape[0] if self.criterion == "aic": criterion_factor = 2 elif self.criterion == "bic": criterion_factor = log(n_samples) else: raise ValueError( f"criterion should be either bic or aic, got {self.criterion!r}" ) residuals = y[:, np.newaxis] - np.dot(X, coef_path_) residuals_sum_squares = np.sum(residuals**2, axis=0) degrees_of_freedom = np.zeros(coef_path_.shape[1], dtype=int) for k, coef in enumerate(coef_path_.T): mask = np.abs(coef) > np.finfo(coef.dtype).eps if not np.any(mask): continue # get the number of degrees of freedom equal to: # Xc = X[:, mask] # Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs degrees_of_freedom[k] = np.sum(mask) self.alphas_ = alphas_ if self.noise_variance is None: self.noise_variance_ = self._estimate_noise_variance( X, y, positive=self.positive ) else: self.noise_variance_ = self.noise_variance self.criterion_ = ( n_samples * np.log(2 * np.pi * self.noise_variance_) + residuals_sum_squares / self.noise_variance_ + criterion_factor * degrees_of_freedom ) n_best = np.argmin(self.criterion_) self.alpha_ = alphas_[n_best] self.coef_ = coef_path_[:, n_best] self._set_intercept(Xmean, ymean) return self def _estimate_noise_variance(self, X, y, positive): """Compute an estimate of the variance with an OLS model. Parameters ---------- X : ndarray of shape (n_samples, n_features) Data to be fitted by the OLS model. We expect the data to be centered. y : ndarray of shape (n_samples,) Associated target. positive : bool, default=False Restrict coefficients to be >= 0. This should be inline with the `positive` parameter from `LassoLarsIC`. Returns ------- noise_variance : float An estimator of the noise variance of an OLS model. """ if X.shape[0] <= X.shape[1] + self.fit_intercept: raise ValueError( f"You are using {self.__class__.__name__} in the case where the number " "of samples is smaller than the number of features. In this setting, " "getting a good estimate for the variance of the noise is not " "possible. Provide an estimate of the noise variance in the " "constructor." ) # X and y are already centered and we don't need to fit with an intercept ols_model = LinearRegression(positive=positive, fit_intercept=False) y_pred = ols_model.fit(X, y).predict(X) return np.sum((y - y_pred) ** 2) / ( X.shape[0] - X.shape[1] - self.fit_intercept )
LassoLarsIC
python
ray-project__ray
python/ray/serve/batching.py
{ "start": 19759, "end": 20862 }
class ____(Protocol, Generic[SelfType, T, R]): async def __call__(self, self_: SelfType, __batch: List[T], /) -> List[R]: ... @overload # Sync function for `batch` called WITHOUT arguments def batch(_sync_func: Callable[[List[T]], List[R]], /) -> Callable[[T], R]: ... @overload # Async function for `batch` called WITHOUT arguments def batch( _async_func: Callable[[List[T]], Coroutine[Any, Any, List[R]]], / ) -> Callable[[T], Coroutine[Any, Any, R]]: ... @overload # Sync method for `batch` called WITHOUT arguments def batch( _sync_meth: _SyncBatchingMethod[SelfType, T, R], / ) -> Callable[[SelfType, T], R]: ... @overload # Async method for `batch` called WITHOUT arguments def batch( _async_meth: _AsyncBatchingMethod[SelfType, T, R], / ) -> Callable[[SelfType, T], Coroutine[Any, Any, R]]: ... @overload # `batch` called WITH arguments def batch( _: Literal[None] = None, /, max_batch_size: int = 10, batch_wait_timeout_s: float = 0.01, max_concurrent_batches: int = 1, ) -> "_BatchDecorator": ...
_AsyncBatchingMethod
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_image07.py
{ "start": 315, "end": 958 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("image07.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet1 = workbook.add_worksheet() worksheet2 = workbook.add_worksheet() worksheet1.insert_image("E9", self.image_dir + "red.png") worksheet2.insert_image("E9", self.image_dir + "yellow.png") workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
pyca__cryptography
src/cryptography/hazmat/primitives/asymmetric/ec.py
{ "start": 608, "end": 1622 }
class ____: SECP192R1 = ObjectIdentifier("1.2.840.10045.3.1.1") SECP224R1 = ObjectIdentifier("1.3.132.0.33") SECP256K1 = ObjectIdentifier("1.3.132.0.10") SECP256R1 = ObjectIdentifier("1.2.840.10045.3.1.7") SECP384R1 = ObjectIdentifier("1.3.132.0.34") SECP521R1 = ObjectIdentifier("1.3.132.0.35") BRAINPOOLP256R1 = ObjectIdentifier("1.3.36.3.3.2.8.1.1.7") BRAINPOOLP384R1 = ObjectIdentifier("1.3.36.3.3.2.8.1.1.11") BRAINPOOLP512R1 = ObjectIdentifier("1.3.36.3.3.2.8.1.1.13") SECT163K1 = ObjectIdentifier("1.3.132.0.1") SECT163R2 = ObjectIdentifier("1.3.132.0.15") SECT233K1 = ObjectIdentifier("1.3.132.0.26") SECT233R1 = ObjectIdentifier("1.3.132.0.27") SECT283K1 = ObjectIdentifier("1.3.132.0.16") SECT283R1 = ObjectIdentifier("1.3.132.0.17") SECT409K1 = ObjectIdentifier("1.3.132.0.36") SECT409R1 = ObjectIdentifier("1.3.132.0.37") SECT571K1 = ObjectIdentifier("1.3.132.0.38") SECT571R1 = ObjectIdentifier("1.3.132.0.39")
EllipticCurveOID
python
doocs__leetcode
solution/1100-1199/1106.Parsing A Boolean Expression/Solution.py
{ "start": 0, "end": 697 }
class ____: def parseBoolExpr(self, expression: str) -> bool: stk = [] for c in expression: if c in 'tf!&|': stk.append(c) elif c == ')': t = f = 0 while stk[-1] in 'tf': t += stk[-1] == 't' f += stk[-1] == 'f' stk.pop() match stk.pop(): case '!': c = 't' if f else 'f' case '&': c = 'f' if f else 't' case '|': c = 't' if t else 'f' stk.append(c) return stk[0] == 't'
Solution
python
pypa__warehouse
warehouse/manage/forms.py
{ "start": 15351, "end": 17376 }
class ____(wtforms.Form): __params__ = ["add_existing_project", "existing_project_name", "new_project_name"] add_existing_project = wtforms.RadioField( "Add existing or new project?", choices=[("true", "Existing project"), ("false", "New project")], coerce=lambda string: True if string == "true" else False, default="true", validators=[wtforms.validators.InputRequired()], ) existing_project_name = wtforms.SelectField( "Select project", choices=[("", "Select project")], default="", # Set default to avoid error when there are no project choices. ) new_project_name = wtforms.StringField() def __init__(self, *args, project_choices, project_factory, **kwargs): super().__init__(*args, **kwargs) self.existing_project_name.choices += [ (name, name) for name in sorted(project_choices) ] self.project_factory = project_factory def validate_existing_project_name(self, field): if self.add_existing_project.data: if not field.data: raise wtforms.validators.StopValidation(_("Select project")) def validate_new_project_name(self, field): if not self.add_existing_project.data: if not field.data: raise wtforms.validators.StopValidation(_("Specify project name")) if not PROJECT_NAME_RE.match(field.data): raise wtforms.validators.ValidationError( _( "Start and end with a letter or numeral containing " "only ASCII numeric and '.', '_' and '-'." ) ) if field.data in self.project_factory: raise wtforms.validators.ValidationError( _( "This project name has already been used. " "Choose a different project name." ) )
AddOrganizationProjectForm
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/root/package.py
{ "start": 216, "end": 411 }
class ____(Package): homepage = "http://www.example.com" url = "http://www.example.com/root-1.0.tar.gz" version("1.0", md5="abcdef0123456789abcdef0123456789") depends_on("gmt")
Root
python
python-pillow__Pillow
src/PIL/PaletteFile.py
{ "start": 364, "end": 1216 }
class ____: """File handler for Teragon-style palette files.""" rawmode = "RGB" def __init__(self, fp: IO[bytes]) -> None: palette = [o8(i) * 3 for i in range(256)] while True: s = fp.readline() if not s: break if s.startswith(b"#"): continue if len(s) > 100: msg = "bad palette file" raise SyntaxError(msg) v = [int(x) for x in s.split()] try: [i, r, g, b] = v except ValueError: [i, r] = v g = b = r if 0 <= i <= 255: palette[i] = o8(r) + o8(g) + o8(b) self.palette = b"".join(palette) def getpalette(self) -> tuple[bytes, str]: return self.palette, self.rawmode
PaletteFile
python
coleifer__peewee
tests/postgres.py
{ "start": 28729, "end": 29894 }
class ____(ModelTestCase): database = db requires = [Register] def setUp(self): super(TestServerSide, self).setUp() with db.atomic(): for i in range(100): Register.create(value=i) def test_server_side_cursor(self): query = Register.select().order_by(Register.value) with self.assertQueryCount(1): data = [row.value for row in ServerSide(query)] self.assertEqual(data, list(range(100))) ss_query = ServerSide(query.limit(10), array_size=3) self.assertEqual([row.value for row in ss_query], list(range(10))) ss_query = ServerSide(query.where(SQL('1 = 0'))) self.assertEqual(list(ss_query), []) def test_lower_level_apis(self): query = Register.select(Register.value).order_by(Register.value) ssq = ServerSideQuery(query, array_size=10) curs_wrapper = ssq._execute(self.database) curs = curs_wrapper.cursor self.assertTrue(isinstance(curs, FetchManyCursor)) self.assertEqual(curs.fetchone(), (0,)) self.assertEqual(curs.fetchone(), (1,)) curs.close()
TestServerSide
python
PyCQA__pylint
tests/functional/i/iterable_context.py
{ "start": 3920, "end": 4058 }
class ____: field: list[int] | None = None def method(self): return [f + 1 for f in self.field] if self.field else None
Model
python
astropy__astropy
astropy/time/tests/test_quantity_interaction.py
{ "start": 4044, "end": 11078 }
class ____: """Test interaction of TimeDelta with Quantities""" def test_valid_quantity_input(self): """Test that TimeDelta can take quantity input.""" q = 500.25 * u.day dt1 = TimeDelta(q, format="jd") assert dt1.value == q.value dt2 = TimeDelta(q, format="sec") assert dt2.value == q.to_value(u.second) dt3 = TimeDelta(q) assert dt3.value == q.value def test_invalid_quantity_input(self): with pytest.raises(u.UnitsError): TimeDelta(2450000.0 * u.m, format="jd") with pytest.raises(u.UnitsError): Time(2450000.0 * u.dimensionless_unscaled, format="jd", scale="utc") with pytest.raises(TypeError): TimeDelta(100, format="sec") > 10.0 * u.m # noqa: B015 def test_quantity_output(self): q = 500.25 * u.day dt = TimeDelta(q) assert dt.to(u.day) == q assert dt.to_value(u.day) == q.value assert dt.to_value("day") == q.value assert dt.to(u.second).value == q.to_value(u.second) assert dt.to_value(u.second) == q.to_value(u.second) assert dt.to_value("s") == q.to_value(u.second) # Following goes through "format", but should be the same. assert dt.to_value("sec") == q.to_value(u.second) def test_quantity_output_errors(self): dt = TimeDelta(250.0, format="sec") with pytest.raises(u.UnitsError): dt.to(u.m) with pytest.raises(u.UnitsError): dt.to_value(u.m) with pytest.raises(u.UnitsError): dt.to_value(unit=u.m) with pytest.raises( ValueError, match="not one of the known formats.*failed to parse as a unit", ): dt.to_value("parrot") with pytest.raises(TypeError): dt.to_value("sec", unit=u.s) with pytest.raises( ValueError, match=r"cannot specify 'subfmt' and positional arg.*not a valid format", ): dt.to_value(u.s, subfmt="str") def test_valid_quantity_operations1(self): """Check adding/subtracting/comparing a time-valued quantity works with a TimeDelta. Addition/subtraction should give TimeDelta""" t0 = TimeDelta(106400.0, format="sec") q1 = 10.0 * u.second t1 = t0 + q1 assert isinstance(t1, TimeDelta) assert t1.value == t0.value + q1.to_value(u.second) q2 = 1.0 * u.day t2 = t0 - q2 assert isinstance(t2, TimeDelta) assert allclose_sec(t2.value, t0.value - q2.to_value(u.second)) # now comparisons assert t0 > q1 assert t0 < 1.0 * u.yr # and broadcasting q3 = np.arange(12.0).reshape(4, 3) * u.hour t3 = t0 + q3 assert isinstance(t3, TimeDelta) assert t3.shape == q3.shape assert allclose_sec(t3.value, t0.value + q3.to_value(u.second)) def test_valid_quantity_operations2(self): """Check that TimeDelta is treated as a quantity where possible.""" t0 = TimeDelta(100000.0, format="sec") f = 1.0 / t0 assert isinstance(f, u.Quantity) assert f.unit == 1.0 / u.day g = 10.0 * u.m / u.second**2 v = t0 * g assert isinstance(v, u.Quantity) assert u.allclose(v, t0.sec * g.value * u.m / u.second) q = np.log10(t0 / u.second) assert isinstance(q, u.Quantity) assert q.value == np.log10(t0.sec) s = 1.0 * u.m v = s / t0 assert isinstance(v, u.Quantity) assert u.allclose(v, 1.0 / t0.sec * u.m / u.s) t = 1.0 * u.s t2 = t0 * t assert isinstance(t2, u.Quantity) assert u.allclose(t2, t0.sec * u.s**2) t3 = [1] / t0 assert isinstance(t3, u.Quantity) assert u.allclose(t3, 1 / (t0.sec * u.s)) # broadcasting t1 = TimeDelta(np.arange(100000.0, 100012.0).reshape(6, 2), format="sec") f = np.array([1.0, 2.0]) * u.cycle * u.Hz phase = f * t1 assert isinstance(phase, u.Quantity) assert phase.shape == t1.shape assert u.allclose(phase, t1.sec * f.value * u.cycle) q = t0 * t1 assert isinstance(q, u.Quantity) assert np.all(q == t0.to(u.day) * t1.to(u.day)) q = t1 / t0 assert isinstance(q, u.Quantity) assert np.all(q == t1.to(u.day) / t0.to(u.day)) def test_valid_quantity_operations3(self): """Test a TimeDelta remains one if possible.""" t0 = TimeDelta(10.0, format="jd") q = 10.0 * u.one t1 = q * t0 assert isinstance(t1, TimeDelta) assert t1 == TimeDelta(100.0, format="jd") t2 = t0 * q assert isinstance(t2, TimeDelta) assert t2 == TimeDelta(100.0, format="jd") t3 = t0 / q assert isinstance(t3, TimeDelta) assert t3 == TimeDelta(1.0, format="jd") q2 = 1.0 * u.percent t4 = t0 * q2 assert isinstance(t4, TimeDelta) assert abs(t4 - TimeDelta(0.1, format="jd")) < 1.0 * u.ns q3 = 1.0 * u.hr / (36.0 * u.s) t5 = q3 * t0 assert isinstance(t4, TimeDelta) assert abs(t5 - TimeDelta(1000.0, format="jd")) < 1.0 * u.ns # Test multiplication with a unit. t6 = t0 * u.one assert isinstance(t6, TimeDelta) assert t6 == TimeDelta(10.0, format="jd") t7 = u.one * t0 assert isinstance(t7, TimeDelta) assert t7 == TimeDelta(10.0, format="jd") t8 = t0 * "" assert isinstance(t8, TimeDelta) assert t8 == TimeDelta(10.0, format="jd") t9 = "" * t0 assert isinstance(t9, TimeDelta) assert t9 == TimeDelta(10.0, format="jd") t10 = t0 / u.one assert isinstance(t10, TimeDelta) assert t6 == TimeDelta(10.0, format="jd") t11 = t0 / "" assert isinstance(t11, TimeDelta) assert t11 == TimeDelta(10.0, format="jd") t12 = t0 / [1] assert isinstance(t12, TimeDelta) assert t12 == TimeDelta(10.0, format="jd") t13 = [1] * t0 assert isinstance(t13, TimeDelta) assert t13 == TimeDelta(10.0, format="jd") def test_invalid_quantity_operations(self): """Check comparisons of TimeDelta with non-time quantities fails.""" with pytest.raises(TypeError): TimeDelta(100000.0, format="sec") > 10.0 * u.m # noqa: B015 def test_invalid_quantity_operations2(self): """Check that operations with non-time/quantity fail.""" td = TimeDelta(100000.0, format="sec") with pytest.raises(TypeError): td * object() with pytest.raises(TypeError): td / object() def test_invalid_quantity_broadcast(self): """Check broadcasting rules in interactions with Quantity.""" t0 = TimeDelta(np.arange(12.0).reshape(4, 3), format="sec") with pytest.raises(ValueError): t0 + np.arange(4.0) * u.s
TestTimeDeltaQuantity
python
dateutil__dateutil
tests/_common.py
{ "start": 3690, "end": 4433 }
class ____(TZContextBase): """ Context manager that temporarily sets the `TZ` variable (for use on *nix-like systems). Because the effect is local to the shell anyway, this will apply *unless* a guard is set. If you do not want the TZ environment variable set, you may set the ``DATEUTIL_MAY_NOT_CHANGE_TZ_VAR`` variable to a truthy value. """ _guard_var_name = "DATEUTIL_MAY_NOT_CHANGE_TZ_VAR" _guard_allows_change = False def get_current_tz(self): return os.environ.get('TZ', UnsetTz) def set_current_tz(self, tzval): if tzval is UnsetTz and 'TZ' in os.environ: del os.environ['TZ'] else: os.environ['TZ'] = tzval time.tzset()
TZEnvContext
python
google__jax
docs/sphinxext/source_include.py
{ "start": 2600, "end": 3521 }
class ____(SphinxDirective): has_content = False required_arguments = 2 optional_arguments = 1 option_spec = { "hl_lines": str, } def run(self): source_dir = Path(self.env.srcdir) filepath = source_dir / self.arguments[0] tag = self.arguments[1] lines_spec = self.arguments[2] if len(self.arguments) > 2 else None code = get_tagged_block(filepath, tag, lines_spec) literal = nodes.literal_block(code, code) literal["language"] = "python" if "hl_lines" in self.options: highlight_lines = parse_lines_spec(self.options["hl_lines"]) literal["highlight_args"] = {"hl_lines": highlight_lines} return [literal] def setup(app): app.add_directive("tagged-block", TaggedBlockDirective) # This dictionary fixes the "parallel reading" warning return { "version": "0.1", "parallel_read_safe": True, "parallel_write_safe": True, }
TaggedBlockDirective
python
scipy__scipy
scipy/linalg/tests/test_fblas.py
{ "start": 3412, "end": 3562 }
class ____(BaseAxpy): blas_func = fblas.zaxpy dtype = complex128 ################################################## # Test blas ?scal
TestZaxpy
python
PrefectHQ__prefect
tests/server/orchestration/test_core_policy.py
{ "start": 115631, "end": 121648 }
class ____: async def test_rejects_cancelling_scheduled_flow_and_sets_to_cancelled( self, session, initialize_orchestration, ): """Scheduled flows should skip the cancelling state and be set immediately to cancelled because they don't have infra to shut down. """ intended_transition = (states.StateType.SCHEDULED, states.StateType.CANCELLING) ctx = await initialize_orchestration( session, "flow", *intended_transition, ) async with BypassCancellingFlowRunsWithNoInfra( ctx, *intended_transition ) as ctx: await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.REJECT assert ctx.validated_state_type == states.StateType.CANCELLED async def test_rejects_cancelling_suspended_flow_and_sets_to_cancelled( self, session, initialize_orchestration, ): """Suspended flows should skip the cancelling state and be set immediately to cancelled because they don't have infra to shut down. """ intended_transition = (states.StateType.PAUSED, states.StateType.CANCELLING) ctx = await initialize_orchestration( session, "flow", *intended_transition, initial_details={"pause_reschedule": True}, ) async with BypassCancellingFlowRunsWithNoInfra( ctx, *intended_transition ) as ctx: await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.REJECT assert ctx.validated_state_type == states.StateType.CANCELLED async def test_rejects_cancelling_resuming_flow_and_sets_to_cancelled( self, session, initialize_orchestration, ): """Suspended flows should skip the cancelling state and be set immediately to cancelled because they don't have infra to shut down. """ intended_transition = (states.StateType.SCHEDULED, states.StateType.CANCELLING) ctx = await initialize_orchestration( session, "flow", *intended_transition, initial_state_name="Resuming", ) # Resuming flows have infra pids ctx.run.infrastructure_pid = "my-pid-42" async with BypassCancellingFlowRunsWithNoInfra( ctx, *intended_transition ) as ctx: await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.REJECT assert ctx.validated_state_type == states.StateType.CANCELLED async def test_accepts_cancelling_flow_run_with_pid( self, session, initialize_orchestration, ): """Flow runs awaiting retry should still go into a cancelling state as they have an associated pid, even though they are technically "Scheduled". """ intended_transition = (states.StateType.SCHEDULED, states.StateType.CANCELLING) # Make sure that the transition is rejected with the PID ctx = await initialize_orchestration( session, "flow", *intended_transition, ) async with BypassCancellingFlowRunsWithNoInfra( ctx, *intended_transition ) as ctx: await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.REJECT assert ctx.validated_state_type == states.StateType.CANCELLED # Check that providing the run a PID will allow the transition to continue ctx = await initialize_orchestration( session, "flow", *intended_transition, ) ctx.run.infrastructure_pid = "my-pid-42" async with BypassCancellingFlowRunsWithNoInfra( ctx, *intended_transition ) as ctx: await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.ACCEPT assert ctx.validated_state_type == states.StateType.CANCELLING async def test_accepts_cancelling_paused_flow_run_with_no_reschedule( self, session, initialize_orchestration, ): """Flow runs awaiting retry should still go into a cancelling state as they have an associated pid, even though they are technically "Scheduled". """ intended_transition = (states.StateType.PAUSED, states.StateType.CANCELLING) # Check that leaving pause_reschedule as False will allow the transition to continue ctx = await initialize_orchestration( session, "flow", *intended_transition, ) async with BypassCancellingFlowRunsWithNoInfra( ctx, *intended_transition ) as ctx: await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.ACCEPT assert ctx.validated_state_type == states.StateType.CANCELLING @pytest.mark.parametrize( "initial_state_type", [ s for s in ALL_ORCHESTRATION_STATES if s not in (states.StateType.SCHEDULED, states.StateType.PAUSED) ], ) async def test_allows_all_other_transitions( self, session, initialize_orchestration, initial_state_type, ): """All other transitions should be left alone by this policy.""" intended_transition = (initial_state_type, states.StateType.CANCELLING) ctx = await initialize_orchestration( session, "flow", *intended_transition, ) async with BypassCancellingFlowRunsWithNoInfra( ctx, *intended_transition ) as ctx: await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.ACCEPT assert ctx.validated_state_type == states.StateType.CANCELLING
TestBypassCancellingFlowRunsWithNoInfra
python
joke2k__faker
tests/providers/test_address.py
{ "start": 41991, "end": 42751 }
class ____: """Test hr_HR address provider methods""" def test_city_name(self, faker, num_samples): for _ in range(num_samples): city_name = faker.city_name() assert isinstance(city_name, str) assert city_name in HrHrAddressProvider.cities def test_street_name(self, faker, num_samples): for _ in range(num_samples): street_name = faker.street_name() assert isinstance(street_name, str) assert street_name in HrHrAddressProvider.streets def test_state(self, faker, num_samples): for _ in range(num_samples): state = faker.state() assert isinstance(state, str) assert state in HrHrAddressProvider.states
TestHrHr
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 321177, "end": 321642 }
class ____(sgqlc.types.Input): """Autogenerated input type of UnpinIssue""" __schema__ = github_schema __field_names__ = ("issue_id", "client_mutation_id") issue_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="issueId") """The ID of the issue to be unpinned""" client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
UnpinIssueInput
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/variables/variables_test.py
{ "start": 29438, "end": 35434 }
class ____(test.TestCase): def testPartitionedVariable(self): with ops.Graph().as_default(): v0 = variables.Variable([0]) v1 = variables.Variable([1]) v0._set_save_slice_info( variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1])) v1._set_save_slice_info( variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1])) partitions = [2] # Pass variable_list as [v1, v0] to ensure they are properly # re-sorted to [v0, v1] based on their slice info offsets. partitioned_variable = variables.PartitionedVariable( name="two_vars", shape=[2], dtype=v0.dtype, variable_list=[v1, v0], partitions=partitions) concatenated = ops.convert_to_tensor(partitioned_variable) num_partitions = len(partitioned_variable) iterated_partitions = list(partitioned_variable) self.assertEqual(2, num_partitions) self.assertEqual([v0, v1], iterated_partitions) self.assertEqual([2], partitioned_variable.get_shape()) self.assertEqual([2], partitioned_variable.shape) self.assertEqual([2], concatenated.get_shape()) self.assertEqual([2], concatenated.shape) def testPartitionedVariableFailures(self): with ops.Graph().as_default(): with self.assertRaisesRegex(ValueError, "empty"): variables.PartitionedVariable( name="fail", shape=2, dtype=dtypes.int32, variable_list=[], partitions=[]) with self.assertRaisesRegex(ValueError, "must have a save_slice_info"): v0 = variables.Variable([0]) partitions = [1] variables.PartitionedVariable( name="two_vars", shape=[1], dtype=v0.dtype, variable_list=[v0], partitions=partitions) with self.assertRaisesRegex(ValueError, "full shapes must match"): v0 = variables.Variable([0]) v1 = variables.Variable([1]) v0._set_save_slice_info( variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1])) v1._set_save_slice_info( variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1])) partitions = [2] variables.PartitionedVariable( name="two_vars", shape=[3], dtype=v0.dtype, variable_list=[v1, v0], partitions=partitions) with self.assertRaisesRegex(ValueError, "must be positive"): v0 = variables.Variable([0]) v0._set_save_slice_info( variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1])) partitions = [0] variables.PartitionedVariable( name="two_vars", shape=[2], dtype=v0.dtype, variable_list=[v0], partitions=partitions) def testPartitionedVariableAssignments(self): with ops.Graph().as_default(), self.cached_session(): v0 = variables.Variable(initial_value=[0.0]) v1 = variables.Variable(initial_value=[1.0]) v2 = variables.Variable(initial_value=[20.0]) v3 = variables.Variable(initial_value=[30.0]) v0._set_save_slice_info( variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1])) v1._set_save_slice_info( variables.Variable.SaveSliceInfo(v1.name, [2], [1], [1])) v2._set_save_slice_info( variables.Variable.SaveSliceInfo(v2.name, [2], [0], [1])) v3._set_save_slice_info( variables.Variable.SaveSliceInfo(v3.name, [2], [1], [1])) partitions = [2] # Pass variable_list as [v1, v0] to ensure they are properly # re-sorted to [v0, v1] based on their slice info offsets. pv_0 = variables.PartitionedVariable( name="two_vars", shape=[2], dtype=v0.dtype, variable_list=[v0, v1], partitions=partitions) pv_1 = variables.PartitionedVariable( name="two_vars", shape=[2], dtype=v0.dtype, variable_list=[v2, v3], partitions=partitions) deltas_a = constant_op.constant([1.0, 2.0]) deltas_b = constant_op.constant([3.0, 4.0]) ones = array_ops.ones([2]) plus_delta = pv_0.assign_add(deltas_a) minus_delta = pv_0.assign_sub(deltas_b) assign_ones = pv_0.assign(ones) c_0 = constant_op.constant([2.0]) c_1 = constant_op.constant([3.0]) assign_list = pv_1.assign([c_0, c_1]) assign_part_value = pv_1.assign_add(assign_ones) assign_part_var = pv_1.assign_sub(pv_0) self.evaluate(variables.global_variables_initializer()) self.assertEqual([1.0], self.evaluate(plus_delta[0])) self.assertEqual([1.0], self.evaluate(v0)) self.assertEqual([3.0], self.evaluate(plus_delta[1])) self.assertEqual([3.0], self.evaluate(v1)) self.assertEqual([-2.0], self.evaluate(minus_delta[0])) self.assertEqual([-2.0], self.evaluate(v0)) self.assertEqual([-1.0], self.evaluate(minus_delta[1])) self.assertEqual([-1.0], self.evaluate(v1)) self.assertEqual([1.0], self.evaluate(assign_ones[0])) self.assertEqual([1.0], self.evaluate(v0)) self.assertEqual([1.0], self.evaluate(assign_ones[1])) self.assertEqual([1.0], self.evaluate(v1)) self.assertEqual([2.0], self.evaluate(assign_list[0])) self.assertEqual([2.0], self.evaluate(v2)) self.assertEqual([3.0], self.evaluate(assign_list[1])) self.assertEqual([3.0], self.evaluate(v3)) self.assertEqual([3.0], self.evaluate(assign_part_value[0])) self.assertEqual([3.0], self.evaluate(v2)) self.assertEqual([4.0], self.evaluate(assign_part_value[1])) self.assertEqual([4.0], self.evaluate(v3)) self.assertEqual([2.0], self.evaluate(assign_part_var[0])) self.assertEqual([2.0], self.evaluate(v2)) self.assertEqual([3.0], self.evaluate(assign_part_var[1])) self.assertEqual([3.0], self.evaluate(v3))
PartitionedVariableTest
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/instigation_logger.py
{ "start": 2285, "end": 3185 }
class ____(logging.Handler): """Persist logging records to an IO stream controlled by the ComputeLogManager.""" def __init__(self, write_stream: IO): self._write_stream = write_stream self._has_logged = False super().__init__() @property def has_logged(self): return self._has_logged def emit(self, record: logging.LogRecord): self._has_logged = True record_dict = record.__dict__ exc_info = record_dict.get("exc_info") if exc_info: record_dict["exc_info"] = "".join(traceback.format_exception(*exc_info)) try: self._write_stream.write(seven.json.dumps(record_dict) + "\n") except Exception: sys.stderr.write( f"Exception writing to logger event stream: {serializable_error_info_from_exc_info(sys.exc_info())}\n" )
CapturedLogHandler
python
tensorflow__tensorflow
tensorflow/python/debug/lib/debug_service_pb2_grpc.py
{ "start": 2466, "end": 5031 }
class ____(object): """EventListener: Receives Event protos, e.g., from debugged TensorFlow runtime(s). """ def SendEvents(self, request_iterator, context): """Client(s) can use this RPC method to send the EventListener Event protos. The Event protos can hold information such as: 1) intermediate tensors from a debugged graph being executed, which can be sent from DebugIdentity ops configured with grpc URLs. 2) GraphDefs of partition graphs, which can be sent from special debug ops that get executed immediately after the beginning of the graph execution. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SendTracebacks(self, request, context): """Send the tracebacks of ops in a Python graph definition. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SendSourceFiles(self, request, context): """Send a collection of source code files being debugged. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_EventListenerServicer_to_server(servicer, server): rpc_method_handlers = { 'SendEvents': grpc.stream_stream_rpc_method_handler( servicer.SendEvents, request_deserializer=tensorflow_dot_core_dot_util_dot_event__pb2.Event.FromString, response_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.SerializeToString, ), 'SendTracebacks': grpc.unary_unary_rpc_method_handler( servicer.SendTracebacks, request_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.CallTraceback.FromString, response_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.SerializeToString, ), 'SendSourceFiles': grpc.unary_unary_rpc_method_handler( servicer.SendSourceFiles, request_deserializer=tensorflow_dot_core_dot_protobuf_dot_debug__pb2.DebuggedSourceFiles.FromString, response_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'tensorflow.EventListener', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))
EventListenerServicer
python
pandas-dev__pandas
pandas/tests/series/indexing/test_setitem.py
{ "start": 42417, "end": 42844 }
class ____(CoercionTest): # previously test_setitem_series_object in tests.indexing.test_coercion @pytest.fixture def obj(self): return Series(["a", "b", "c", "d"], dtype=object) @pytest.fixture def raises(self): return False @pytest.mark.parametrize( "val,exp_dtype,raises", [ (1, object, True), ("e", StringDtype(na_value=np.nan), False), ], )
TestCoercionObject
python
mlflow__mlflow
mlflow/store/tracking/dbmodels/models.py
{ "start": 25472, "end": 26391 }
class ____(Base): __tablename__ = "trace_tags" key = Column(String(250)) """ Tag key: `String` (limit 250 characters). """ value = Column(String(8000), nullable=True) """ Value associated with tag: `String` (limit 250 characters). Could be *null*. """ request_id = Column( String(50), ForeignKey("trace_info.request_id", ondelete="CASCADE"), nullable=False ) """ Request ID to which this tag belongs: *Foreign Key* into ``trace_info`` table. """ trace_info = relationship("SqlTraceInfo", backref=backref("tags", cascade="all")) """ SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlTraceInfo`. """ # Key is unique within a request_id __table_args__ = ( PrimaryKeyConstraint("request_id", "key", name="trace_tag_pk"), Index(f"index_{__tablename__}_request_id"), )
SqlTraceTag
python
joke2k__faker
tests/providers/test_automotive.py
{ "start": 8888, "end": 9199 }
class ____: def test_License_plate(self, faker, num_samples): pattern: Pattern = re.compile(r"{patterns}".format(patterns="|".join(faker.license_plate_regex_formats()))) for _ in range(num_samples): plate = faker.license_plate() assert pattern.fullmatch(plate)
TestPlPl
python
tensorflow__tensorflow
tensorflow/python/platform/benchmark.py
{ "start": 5115, "end": 5485 }
class ____(type): """The Benchmark class registrar. Used by abstract Benchmark class.""" def __new__(mcs, clsname, base, attrs): newclass = type.__new__(mcs, clsname, base, attrs) if not newclass.is_abstract(): GLOBAL_BENCHMARK_REGISTRY.add(newclass) return newclass @tf_export("__internal__.test.ParameterizedBenchmark", v1=[])
_BenchmarkRegistrar
python
networkx__networkx
networkx/algorithms/link_analysis/tests/test_pagerank.py
{ "start": 451, "end": 6233 }
class ____: @classmethod def setup_class(cls): G = nx.DiGraph() edges = [ (1, 2), (1, 3), # 2 is a dangling node (3, 1), (3, 2), (3, 5), (4, 5), (4, 6), (5, 4), (5, 6), (6, 4), ] G.add_edges_from(edges) cls.G = G cls.G.pagerank = dict( zip( sorted(G), [ 0.03721197, 0.05395735, 0.04150565, 0.37508082, 0.20599833, 0.28624589, ], ) ) cls.dangling_node_index = 1 cls.dangling_edges = {1: 2, 2: 3, 3: 0, 4: 0, 5: 0, 6: 0} cls.G.dangling_pagerank = dict( zip( sorted(G), [0.10844518, 0.18618601, 0.0710892, 0.2683668, 0.15919783, 0.20671497], ) ) @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) def test_pagerank(self, alg): G = self.G p = alg(G, alpha=0.9, tol=1.0e-08) for n in G: assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) nstart = {n: random.random() for n in G} p = alg(G, alpha=0.9, tol=1.0e-08, nstart=nstart) for n in G: assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) def test_pagerank_max_iter(self, alg): with pytest.raises(nx.PowerIterationFailedConvergence): alg(self.G, max_iter=0) def test_numpy_pagerank(self): G = self.G p = _pagerank_numpy(G, alpha=0.9) for n in G: assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) def test_google_matrix(self): G = self.G M = nx.google_matrix(G, alpha=0.9, nodelist=sorted(G)) _, ev = np.linalg.eig(M.T) p = ev[:, 0] / ev[:, 0].sum() for a, b in zip(p, self.G.pagerank.values()): assert a == pytest.approx(b, abs=1e-7) @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python, _pagerank_numpy)) def test_personalization(self, alg): G = nx.complete_graph(4) personalize = {0: 1, 1: 1, 2: 4, 3: 4} answer = { 0: 0.23246732615667579, 1: 0.23246732615667579, 2: 0.267532673843324, 3: 0.2675326738433241, } p = alg(G, alpha=0.85, personalization=personalize) for n in G: assert p[n] == pytest.approx(answer[n], abs=1e-4) @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python, nx.google_matrix)) def test_zero_personalization_vector(self, alg): G = nx.complete_graph(4) personalize = {0: 0, 1: 0, 2: 0, 3: 0} pytest.raises(ZeroDivisionError, alg, G, personalization=personalize) @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) def test_one_nonzero_personalization_value(self, alg): G = nx.complete_graph(4) personalize = {0: 0, 1: 0, 2: 0, 3: 1} answer = { 0: 0.22077931820379187, 1: 0.22077931820379187, 2: 0.22077931820379187, 3: 0.3376620453886241, } p = alg(G, alpha=0.85, personalization=personalize) for n in G: assert p[n] == pytest.approx(answer[n], abs=1e-4) @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) def test_incomplete_personalization(self, alg): G = nx.complete_graph(4) personalize = {3: 1} answer = { 0: 0.22077931820379187, 1: 0.22077931820379187, 2: 0.22077931820379187, 3: 0.3376620453886241, } p = alg(G, alpha=0.85, personalization=personalize) for n in G: assert p[n] == pytest.approx(answer[n], abs=1e-4) def test_dangling_matrix(self): """ Tests that the google_matrix doesn't change except for the dangling nodes. """ G = self.G dangling = self.dangling_edges dangling_sum = sum(dangling.values()) M1 = nx.google_matrix(G, personalization=dangling) M2 = nx.google_matrix(G, personalization=dangling, dangling=dangling) for i in range(len(G)): for j in range(len(G)): if i == self.dangling_node_index and (j + 1) in dangling: assert M2[i, j] == pytest.approx( dangling[j + 1] / dangling_sum, abs=1e-4 ) else: assert M2[i, j] == pytest.approx(M1[i, j], abs=1e-4) @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python, _pagerank_numpy)) def test_dangling_pagerank(self, alg): pr = alg(self.G, dangling=self.dangling_edges) for n in self.G: assert pr[n] == pytest.approx(self.G.dangling_pagerank[n], abs=1e-4) def test_empty(self): G = nx.Graph() assert nx.pagerank(G) == {} assert _pagerank_python(G) == {} assert _pagerank_numpy(G) == {} assert nx.google_matrix(G).shape == (0, 0) @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) def test_multigraph(self, alg): G = nx.MultiGraph() G.add_edges_from([(1, 2), (1, 2), (1, 2), (2, 3), (2, 3), ("3", 3), ("3", 3)]) answer = { 1: 0.21066048614468322, 2: 0.3395308825985378, 3: 0.28933951385531687, "3": 0.16046911740146227, } p = alg(G) for n in G: assert p[n] == pytest.approx(answer[n], abs=1e-4)
TestPageRank
python
numpy__numpy
numpy/_core/tests/test_errstate.py
{ "start": 470, "end": 4628 }
class ____: @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") @pytest.mark.skipif(arm_softfloat, reason='platform/cpu issue with FPU (gh-413,-15562)') def test_invalid(self): with np.errstate(all='raise', under='ignore'): a = -np.arange(3) # This should work with np.errstate(invalid='ignore'): np.sqrt(a) # While this should fail! with assert_raises(FloatingPointError): np.sqrt(a) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") @pytest.mark.skipif(arm_softfloat, reason='platform/cpu issue with FPU (gh-15562)') def test_divide(self): with np.errstate(all='raise', under='ignore'): a = -np.arange(3) # This should work with np.errstate(divide='ignore'): a // 0 # While this should fail! with assert_raises(FloatingPointError): a // 0 # As should this, see gh-15562 with assert_raises(FloatingPointError): a // a @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") @pytest.mark.skipif(arm_softfloat, reason='platform/cpu issue with FPU (gh-15562)') def test_errcall(self): count = 0 def foo(*args): nonlocal count count += 1 olderrcall = np.geterrcall() with np.errstate(call=foo): assert np.geterrcall() is foo with np.errstate(call=None): assert np.geterrcall() is None assert np.geterrcall() is olderrcall assert count == 0 with np.errstate(call=foo, invalid="call"): np.array(np.inf) - np.array(np.inf) assert count == 1 def test_errstate_decorator(self): @np.errstate(all='ignore') def foo(): a = -np.arange(3) a // 0 foo() def test_errstate_enter_once(self): errstate = np.errstate(invalid="warn") with errstate: pass # The errstate context cannot be entered twice as that would not be # thread-safe with pytest.raises(TypeError, match="Cannot enter `np.errstate` twice"): with errstate: pass @pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") def test_asyncio_safe(self): # asyncio may not always work, let's assume its fine if missing # Pyodide/wasm doesn't support it. If this test makes problems, # it should just be skipped liberally (or run differently). asyncio = pytest.importorskip("asyncio") @np.errstate(invalid="ignore") def decorated(): # Decorated non-async function (it is not safe to decorate an # async one) assert np.geterr()["invalid"] == "ignore" async def func1(): decorated() await asyncio.sleep(0.1) decorated() async def func2(): with np.errstate(invalid="raise"): assert np.geterr()["invalid"] == "raise" await asyncio.sleep(0.125) assert np.geterr()["invalid"] == "raise" # for good sport, a third one with yet another state: async def func3(): with np.errstate(invalid="print"): assert np.geterr()["invalid"] == "print" await asyncio.sleep(0.11) assert np.geterr()["invalid"] == "print" async def main(): # simply run all three function multiple times: await asyncio.gather( func1(), func2(), func3(), func1(), func2(), func3(), func1(), func2(), func3(), func1(), func2(), func3()) loop = asyncio.new_event_loop() with np.errstate(invalid="warn"): asyncio.run(main()) assert np.geterr()["invalid"] == "warn" assert np.geterr()["invalid"] == "warn" # the default loop.close()
TestErrstate
python
kamyu104__LeetCode-Solutions
Python/number-of-substrings-containing-all-three-characters.py
{ "start": 29, "end": 354 }
class ____(object): def numberOfSubstrings(self, s): """ :type s: str :rtype: int """ result, left = 0, [-1]*3 for right, c in enumerate(s): left[ord(c)-ord('a')] = right result += min(left)+1 return result # Time: O(n) # Space: O(1)
Solution
python
keras-team__keras
keras/src/layers/reshaping/cropping2d.py
{ "start": 258, "end": 9044 }
class ____(Layer): """Cropping layer for 2D input (e.g. picture). It crops along spatial dimensions, i.e. height and width. Example: >>> input_shape = (2, 28, 28, 3) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> y = keras.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x) >>> y.shape (2, 24, 20, 3) Args: cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric cropping is applied to height and width. - If tuple of 2 ints: interpreted as two different symmetric cropping values for height and width: `(symmetric_height_crop, symmetric_width_crop)`. - If tuple of 2 tuples of 2 ints: interpreted as `((top_crop, bottom_crop), (left_crop, right_crop))`. data_format: A string, one of `"channels_last"` (default) or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch_size, height, width, channels)` while `"channels_first"` corresponds to inputs with shape `(batch_size, channels, height, width)`. When unspecified, uses `image_data_format` value found in your Keras config file at `~/.keras/keras.json` (if exists). Defaults to `"channels_last"`. Input shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch_size, height, width, channels)` - If `data_format` is `"channels_first"`: `(batch_size, channels, height, width)` Output shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch_size, cropped_height, cropped_width, channels)` - If `data_format` is `"channels_first"`: `(batch_size, channels, cropped_height, cropped_width)` """ def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs): super().__init__(**kwargs) self.data_format = backend.standardize_data_format(data_format) if isinstance(cropping, int): if cropping < 0: raise ValueError( "`cropping` cannot be negative. " f"Received: cropping={cropping}." ) self.cropping = ((cropping, cropping), (cropping, cropping)) elif hasattr(cropping, "__len__"): if len(cropping) != 2: raise ValueError( "`cropping` should have two elements. " f"Received: cropping={cropping}." ) height_cropping = argument_validation.standardize_tuple( cropping[0], 2, "1st entry of cropping", allow_zero=True ) width_cropping = argument_validation.standardize_tuple( cropping[1], 2, "2nd entry of cropping", allow_zero=True ) self.cropping = (height_cropping, width_cropping) else: raise ValueError( "`cropping` should be either an int, a tuple of 2 ints " "(symmetric_height_crop, symmetric_width_crop), " "or a tuple of 2 tuples of 2 ints " "((top_crop, bottom_crop), (left_crop, right_crop)). " f"Received: cropping={cropping}." ) self.input_spec = InputSpec(ndim=4) def compute_output_shape(self, input_shape): if self.data_format == "channels_first": if ( input_shape[2] is not None and sum(self.cropping[0]) >= input_shape[2] ) or ( input_shape[3] is not None and sum(self.cropping[1]) >= input_shape[3] ): raise ValueError( "Values in `cropping` argument should be smaller than the " "corresponding spatial dimension of the input. Received: " f"input_shape={input_shape}, cropping={self.cropping}" ) return ( input_shape[0], input_shape[1], ( input_shape[2] - self.cropping[0][0] - self.cropping[0][1] if input_shape[2] is not None else None ), ( input_shape[3] - self.cropping[1][0] - self.cropping[1][1] if input_shape[3] is not None else None ), ) else: if ( input_shape[1] is not None and sum(self.cropping[0]) >= input_shape[1] ) or ( input_shape[2] is not None and sum(self.cropping[1]) >= input_shape[2] ): raise ValueError( "Values in `cropping` argument should be smaller than the " "corresponding spatial dimension of the input. Received: " f"input_shape={input_shape}, cropping={self.cropping}" ) return ( input_shape[0], ( input_shape[1] - self.cropping[0][0] - self.cropping[0][1] if input_shape[1] is not None else None ), ( input_shape[2] - self.cropping[1][0] - self.cropping[1][1] if input_shape[2] is not None else None ), input_shape[3], ) def call(self, inputs): if self.data_format == "channels_first": if ( inputs.shape[2] is not None and sum(self.cropping[0]) >= inputs.shape[2] ) or ( inputs.shape[3] is not None and sum(self.cropping[1]) >= inputs.shape[3] ): raise ValueError( "Values in `cropping` argument should be smaller than the " "corresponding spatial dimension of the input. Received: " f"inputs.shape={inputs.shape}, cropping={self.cropping}" ) if self.cropping[0][1] == self.cropping[1][1] == 0: return inputs[ :, :, self.cropping[0][0] :, self.cropping[1][0] : ] elif self.cropping[0][1] == 0: return inputs[ :, :, self.cropping[0][0] :, self.cropping[1][0] : -self.cropping[1][1], ] elif self.cropping[1][1] == 0: return inputs[ :, :, self.cropping[0][0] : -self.cropping[0][1], self.cropping[1][0] :, ] return inputs[ :, :, self.cropping[0][0] : -self.cropping[0][1], self.cropping[1][0] : -self.cropping[1][1], ] else: if ( inputs.shape[1] is not None and sum(self.cropping[0]) >= inputs.shape[1] ) or ( inputs.shape[2] is not None and sum(self.cropping[1]) >= inputs.shape[2] ): raise ValueError( "Values in `cropping` argument should be smaller than the " "corresponding spatial dimension of the input. Received: " f"inputs.shape={inputs.shape}, cropping={self.cropping}" ) if self.cropping[0][1] == self.cropping[1][1] == 0: return inputs[ :, self.cropping[0][0] :, self.cropping[1][0] :, : ] elif self.cropping[0][1] == 0: return inputs[ :, self.cropping[0][0] :, self.cropping[1][0] : -self.cropping[1][1], :, ] elif self.cropping[1][1] == 0: return inputs[ :, self.cropping[0][0] : -self.cropping[0][1], self.cropping[1][0] :, :, ] return inputs[ :, self.cropping[0][0] : -self.cropping[0][1], self.cropping[1][0] : -self.cropping[1][1], :, ] def get_config(self): config = {"cropping": self.cropping, "data_format": self.data_format} base_config = super().get_config() return {**base_config, **config}
Cropping2D
python
getsentry__sentry
src/sentry/testutils/cases.py
{ "start": 102015, "end": 104051 }
class ____(TestCase): @assume_test_silo_mode(SiloMode.CONTROL) def another_user(self, email_string, team=None, alt_email_string=None): user = self.create_user(email_string) if alt_email_string: UserEmail.objects.create(email=alt_email_string, user=user) assert UserEmail.objects.filter(user=user, email=alt_email_string).update( is_verified=True ) assert UserEmail.objects.filter(user=user, email=user.email).update(is_verified=True) self.create_member(user=user, organization=self.org, teams=[team] if team else None) return user def another_commit(self, order, name, user, repository, alt_email_string=None): commit = Commit.objects.create( key=name * 40, repository_id=repository.id, organization_id=self.org.id, author=CommitAuthor.objects.create( organization_id=self.org.id, name=user.name, email=alt_email_string or user.email, ), ) ReleaseCommit.objects.create( organization_id=self.org.id, release=self.release, commit=commit, order=order, ) return commit def another_release(self, name): release = Release.objects.create( version=name * 40, organization_id=self.project.organization_id, date_released=timezone.now(), ) release.add_project(self.project) release.add_project(self.project2) deploy = Deploy.objects.create( release=release, organization_id=self.org.id, environment_id=self.environment.id, ) return release, deploy def get_notification_uuid(self, text: str) -> str: # Allow notification\\_uuid and notification_uuid result = re.search("notification.*_uuid=([a-zA-Z0-9-]+)", text) assert result is not None return result[1]
ActivityTestCase
python
tensorflow__tensorflow
tensorflow/python/keras/engine/training_utils_v1.py
{ "start": 68807, "end": 76022 }
class ____(object): """Encapsulates model inputs. Allows for transforming model inputs while keeping the same structure. """ def __init__(self, inputs): self._inputs = inputs self._is_dict = isinstance(self._inputs, dict) self._is_single_input = not isinstance(self._inputs, (list, tuple, dict)) self._flattened_inputs = [] self._input_names = [] if self._is_dict: for k in sorted(self._inputs.keys()): self._flattened_inputs.append(self._inputs[k]) self._input_names.append(k) else: self._flattened_inputs = nest.flatten(self._inputs) self._input_names = [ 'input_%d' % (i + 1) for i in range(len(self._flattened_inputs)) ] def get_input_names(self): """Returns keys to name inputs by. In case inputs provided were a list, tuple or single entry, we make up a key 'input_%d'. For dictionary case, we return a sorted list of keys. """ return self._input_names def get_symbolic_inputs(self, return_single_as_list=False): """Returns inputs to be set as self.inputs for a model.""" # TODO(karmel): There is a side-effect here where what you get # with as_list and as_dict depends on whether you have called this # method first, since it modifies in place. for i, (k, v) in enumerate(zip(self._input_names, self._flattened_inputs)): if isinstance(v, (list, float, int)): v = numpy_compat.np_asarray(v) if v.ndim == 1: v = np.expand_dims(v, 1) if isinstance(v, np.ndarray): # We fix the placeholder shape except the batch size. # This is suboptimal, but it is the best we can do with the info # we have. The user should call `model._set_inputs(placeholders)` # to specify custom placeholders if the need arises. shape = (None,) + tuple(v.shape[1:]) if shape == (None,): shape = (None, 1) dtype = dtypes.as_dtype(v.dtype) if dtype.is_floating: dtype = backend.floatx() v = backend.placeholder(shape=shape, name=k, dtype=dtype) elif isinstance(v, tensor_spec.TensorSpec): shape = (None,) + tuple(v.shape.as_list()[1:]) if shape == (None,): shape = (None, 1) v = backend.placeholder(shape=shape, name=k, dtype=v.dtype) self._flattened_inputs[i] = v if self._is_dict: return dict(zip(self._input_names, self._flattened_inputs)) if self._is_single_input and not return_single_as_list: return self._flattened_inputs[0] return self._flattened_inputs def as_dict(self): """An iterable over a dictionary version of inputs.""" for k, v in zip(self._input_names, self._flattened_inputs): yield k, v def as_list(self): """Returning the inputs as a list.""" return self._flattened_inputs # Allow use of methods not exposed to the user. # pylint: disable=protected-access # pylint: enable=protected-access def generic_output_names(outputs_list): return ['output_%d' % (i + 1) for i in range(len(outputs_list))] def should_run_validation(validation_freq, epoch): """Checks if validation should be run this epoch. Args: validation_freq: Integer or list. If an integer, specifies how many training epochs to run before a new validation run is performed. If a list, specifies the epochs on which to run validation. epoch: Integer, the number of the training epoch just completed. Returns: Bool, True if validation should be run. Raises: ValueError: if `validation_freq` is an Integer and less than 1, or if it is neither an Integer nor a Sequence. """ # `epoch` is 0-indexed internally but 1-indexed in the public API. one_indexed_epoch = epoch + 1 if isinstance(validation_freq, int): if validation_freq < 1: raise ValueError('`validation_freq` can not be less than 1.') return one_indexed_epoch % validation_freq == 0 if not isinstance(validation_freq, collections.abc.Container): raise ValueError('`validation_freq` must be an Integer or ' '`collections.abc.Container` (e.g. list, tuple, etc.)') return one_indexed_epoch in validation_freq def split_training_and_validation_data(x, y, sample_weights, validation_split): """Split input data into train/eval section based on validation_split.""" if has_symbolic_tensors(x): raise ValueError('If your data is in the form of symbolic tensors, ' 'you cannot use `validation_split`.') if hasattr(x[0], 'shape'): split_at = int(x[0].shape[0] * (1. - validation_split)) else: split_at = int(len(x[0]) * (1. - validation_split)) x, val_x = (generic_utils.slice_arrays(x, 0, split_at), generic_utils.slice_arrays(x, split_at)) y, val_y = (generic_utils.slice_arrays(y, 0, split_at), generic_utils.slice_arrays(y, split_at)) if sample_weights: sample_weights, val_sample_weights = ( generic_utils.slice_arrays(sample_weights, 0, split_at), generic_utils.slice_arrays(sample_weights, split_at), ) else: val_sample_weights = None return x, y, sample_weights, val_x, val_y, val_sample_weights def unpack_validation_data(validation_data, raise_if_ambiguous=True): """Unpack validation data based input type. The validation data is not touched if its dataset or dataset iterator. For other type of input (Numpy or tensor), it will be unpacked into tuple of 3 which is x, y and sample weights. Args: validation_data: dataset, dataset iterator, or numpy, tensor tuple. raise_if_ambiguous: boolean on whether to fail if validation_data cannot be parsed. Otherwise simply return validation_data, None, None and defer the decision to the caller. Returns: tuple of 3, (x, y, sample_weights) for numpy and tensor input. """ if (isinstance(validation_data, (iterator_ops.Iterator, iterator_ops.IteratorBase, data_types.DatasetV2, data_utils.Sequence)) or not hasattr(validation_data, '__len__')): val_x = validation_data val_y = None val_sample_weight = None elif len(validation_data) == 2: try: val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence val_sample_weight = None except ValueError: val_x, val_y, val_sample_weight = validation_data, None, None elif len(validation_data) == 3: try: val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence except ValueError: val_x, val_y, val_sample_weight = validation_data, None, None else: if raise_if_ambiguous: raise ValueError( 'When passing a `validation_data` argument, ' 'it must contain either 2 items (x_val, y_val), ' 'or 3 items (x_val, y_val, val_sample_weights), ' 'or alternatively it could be a dataset or a ' 'dataset or a dataset iterator. ' 'However we received `validation_data=%s`' % validation_data) val_x, val_y, val_sample_weight = validation_data, None, None return val_x, val_y, val_sample_weight
ModelInputs
python
facebook__pyre-check
client/tests/timer_test.py
{ "start": 411, "end": 1337 }
class ____(testslide.TestCase): def test_resolution(self) -> None: base_interval = 1000000000 ticker = regular_interval_ticker(base_interval) timer = Timer(get_current_time_in_nanosecond=lambda: next(ticker)) self.assertEqual(timer.stop_in_nanosecond(), base_interval) self.assertEqual(timer.stop_in_microsecond(), 2 * base_interval / 1000.0) self.assertEqual(timer.stop_in_millisecond(), 3 * base_interval / 1000000.0) self.assertEqual(timer.stop_in_second(), 4 * base_interval / 1000000000.0) def test_reset(self) -> None: ticker = regular_interval_ticker(100) timer = Timer(get_current_time_in_nanosecond=lambda: next(ticker)) self.assertEqual( timer.stop_in_nanosecond(), 100, ) timer.reset() self.assertEqual( timer.stop_in_nanosecond(), 100, )
TimerTest
python
pytorch__pytorch
test/optim/test_lrscheduler.py
{ "start": 1055, "end": 101450 }
class ____(TestCase): class SchedulerTestNet(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) def forward(self, x): return self.conv2(F.relu(self.conv1(x))) class LambdaLRTestObject: def __init__(self, value): self.value = value def __call__(self, epoch): return self.value * epoch def __eq__(self, other): if isinstance(other, self.__class__): return self.__dict__ == other.__dict__ else: return False exact_dtype = True def setUp(self): super().setUp() self.net = self.SchedulerTestNet() self.opt = SGD( [ {"params": self.net.conv1.parameters()}, {"params": self.net.conv2.parameters(), "lr": torch.tensor(0.5)}, ], lr=0.05, ) def _check_warning_is_epoch_deprecation_warning(self, w, *, num_warnings: int = 1): """This function swallows the epoch deprecation warning which is produced when we call `scheduler.step(epoch)` with some not `None` value of `epoch`. this is deprecated, and this function will need to be removed/updated when the schedulers no longer accept the parameter at all. """ self.assertEqual(len(w), num_warnings) for warning in w: self.assertEqual(len(warning.message.args), 1) self.assertEqual(warning.message.args[0], EPOCH_DEPRECATION_WARNING) def test_error_when_getlr_has_epoch(self): class MultiStepLR(torch.optim.lr_scheduler.LRScheduler): def __init__(self, optimizer, gamma, milestones, last_epoch=-1): self.init_lr = [group["lr"] for group in optimizer.param_groups] self.gamma = gamma self.milestones = milestones super().__init__(optimizer, last_epoch) def get_lr(self, step): global_step = self.last_epoch gamma_power = ( [0] + [i + 1 for i, m in enumerate(self.milestones) if global_step >= m] )[-1] return [init_lr * (self.gamma**gamma_power) for init_lr in self.init_lr] optimizer = SGD([torch.rand(1)], lr=1) with self.assertRaises(TypeError): scheduler = MultiStepLR(optimizer, gamma=1, milestones=[10, 20]) @skipIfTorchDynamo( "Torchdynamo keeps references to optim in the guards and the stack of the graph break frames" ) def test_no_cyclic_references(self): import gc param = Parameter(torch.empty(10)) optim = SGD([param], lr=0.5) scheduler = LambdaLR(optim, lambda epoch: 1.0) del scheduler self.assertTrue( len(gc.get_referrers(optim)) == 0, "Optimizer should contain no cyclic references", ) gc.collect() del optim self.assertEqual( gc.collect(), 0, msg="Optimizer should be garbage-collected on __del__" ) @skipIfTorchDynamo( "Torchdynamo keeps references to optim in the guards and the stack of the graph break frames" ) def test_no_cyclic_references_in_step(self): import gc import weakref def run(): param = torch.empty(10, requires_grad=True) optim = SGD(params=[param], lr=0.5) scheduler = LambdaLR(optim, lambda epoch: 1.0) param.sum().backward() optim.step() scheduler.step() return weakref.ref(scheduler) # To ensure that there are no reference cycles in scheduler, # we need to turn off the garbage collector. Since gc will # automatically collect unreachable objects. gc.disable() ref = run() assert ref() is None gc.enable() # restore def test_old_pattern_warning(self): epochs = 35 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3) self.assertTrue(len(ws) == 0, "No warning should be raised") def old_pattern(): for _ in range(epochs): scheduler.step() self.opt.step() self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern) def test_old_pattern_warning_with_arg(self): epochs = 35 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3) self.assertTrue(len(ws) == 0, "No warning should be raised") def old_pattern2(): for _ in range(epochs): scheduler.step() self.opt.step() self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern2) def test_old_pattern_warning_resuming(self): epochs = 35 for group in self.opt.param_groups: group["initial_lr"] = 0.01 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3, last_epoch=10) self.assertTrue(len(ws) == 0, "No warning should be raised") def old_pattern(): for _ in range(epochs): scheduler.step() self.opt.step() self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern) def test_old_pattern_warning_resuming_with_arg(self): epochs = 35 for group in self.opt.param_groups: group["initial_lr"] = 0.01 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3, last_epoch=10) self.assertTrue(len(ws) == 0, "No warning should be raised") def old_pattern2(): for _ in range(epochs): scheduler.step() self.opt.step() self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern2) def test_old_pattern_warning_with_overridden_optim_step(self): epochs = 35 for group in self.opt.param_groups: group["initial_lr"] = 0.01 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3, last_epoch=10) self.assertTrue(len(ws) == 0, "No warning should be raised") # emulate use-case with optimizer.step overridden import types old_step = self.opt.step def new_step(o, *args, **kwargs): retval = old_step(*args, **kwargs) return retval self.opt.step = types.MethodType(new_step, self.opt) def old_pattern2(): for _ in range(epochs): scheduler.step() self.opt.step() self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern2) def test_new_pattern_no_warning(self): epochs = 35 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3) self.assertTrue(len(ws) == 0, "No warning should be raised") with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised for _ in range(epochs): self.opt.step() scheduler.step() self.assertTrue(len(ws) == 0, "No warning should be raised") def test_new_pattern_no_warning_with_arg(self): epochs = 35 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3) self.assertTrue(len(ws) == 0, "No warning should be raised") with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised for _ in range(epochs): self.opt.step() scheduler.step() self.assertTrue(len(ws) == 0, "No warning should be raised") def test_new_pattern_no_warning_with_overridden_optim_step(self): epochs = 35 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3) self.assertTrue(len(ws) == 0, "No warning should be raised") # emulate use-case with optimizer.step overridden import types old_step = self.opt.step def new_step(o, *args, **kwargs): retval = old_step(*args, **kwargs) return retval self.opt.step = types.MethodType(new_step, self.opt) def new_pattern(): for _ in range(epochs): self.opt.step() scheduler.step() self.assertWarnsRegex( UserWarning, r"`optimizer.step\(\)` has been overridden", new_pattern ) def _test_lr_is_constant_for_constant_epoch(self, scheduler): l = [] for _ in range(10): scheduler.optimizer.step() with warnings.catch_warnings(record=True) as w: scheduler.step(2) self._check_warning_is_epoch_deprecation_warning(w) l.append(self.opt.param_groups[0]["lr"]) self.assertEqual(min(l), max(l)) def test_step_lr_is_constant_for_constant_epoch(self): scheduler = StepLR(self.opt, 2) self._test_lr_is_constant_for_constant_epoch(scheduler) def test_exponential_lr_is_constant_for_constant_epoch(self): scheduler = ExponentialLR(self.opt, gamma=0.9) self._test_lr_is_constant_for_constant_epoch(scheduler) def test_constantlr_is_constant_for_constant_epoch(self): scheduler = ConstantLR(self.opt) self._test_lr_is_constant_for_constant_epoch(scheduler) def test_linear_linearlr_is_constant_for_constant_epoch(self): scheduler = LinearLR(self.opt) self._test_lr_is_constant_for_constant_epoch(scheduler) def test_polynomial_lr_is_constant_for_constant_epoch(self): scheduler = PolynomialLR(self.opt, power=0.9) self._test_lr_is_constant_for_constant_epoch(scheduler) def test_step_lr(self): # lr = 0.05 if epoch < 3 # lr = 0.005 if 30 <= epoch < 6 # lr = 0.0005 if epoch >= 9 epochs = 10 single_targets = [0.05] * 3 + [0.005] * 3 + [0.0005] * 3 + [0.00005] * 3 targets = [single_targets, [x * epochs for x in single_targets]] scheduler = StepLR(self.opt, gamma=0.1, step_size=3) self._test(scheduler, targets, epochs) def test_get_last_lr_step_lr(self): from torch.nn import Parameter epochs = 10 optimizer = SGD([Parameter(torch.randn(2, 2, requires_grad=True))], 0.1) targets = [[0.1] * 3 + [0.01] * 3 + [0.001] * 3 + [0.0001]] scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 3, gamma=0.1) self._test_get_last_lr(scheduler, targets, epochs) def test_get_last_lr_multi_step_lr(self): # lr = 0.05 if epoch < 2 # lr = 0.005 if 2 <= epoch < 5 # lr = 0.0005 if 5 <= epoch < 9 # lr = 0.00005 if 9 <= epoch epochs = 10 single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 1 targets = [single_targets, [x * epochs for x in single_targets]] scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) self._test_get_last_lr(scheduler, targets, epochs) def test_raise_error_when_last_epoch_is_greater_than_0_and_initial_lr_is_not_specified( self, ): optimizer = SGD([Parameter(torch.randn(2, 2, requires_grad=True))], 0.1) with self.assertRaisesRegex( KeyError, r"param \'initial_lr\' is not specified in param_groups\[0\] when resuming scheduler with last_epoch >= 0", ): StepLR(optimizer, step_size=3, gamma=0.1, last_epoch=1) def test_multi_step_lr(self): # lr = 0.05 if epoch < 2 # lr = 0.005 if 2 <= epoch < 5 # lr = 0.0005 if epoch < 9 # lr = 0.00005 if epoch >= 9 epochs = 10 single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 3 targets = [single_targets, [x * epochs for x in single_targets]] scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) self._test(scheduler, targets, epochs) def test_multi_step_lr_with_epoch(self): # lr = 0.05 if epoch < 2 # lr = 0.005 if 2 <= epoch < 5 # lr = 0.0005 if epoch < 9 # lr = 0.00005 if epoch >= 9 epochs = 10 single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 3 targets = [single_targets, [x * epochs for x in single_targets]] scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) self._test_with_epoch(scheduler, targets, epochs) def test_get_last_lr_constantlr(self): # lr = 0.025 if epoch < 5 # lr = 0.005 if 5 <= epoch epochs = 10 single_targets = [0.025] * 5 + [0.05] * 5 targets = [single_targets, [x * epochs for x in single_targets]] scheduler = ConstantLR(self.opt, factor=1.0 / 2, total_iters=5) self._test_get_last_lr(scheduler, targets, epochs) def test_get_last_lr_linearlr(self): # lr = 0.025 if epoch == 0 # lr = 0.03125 if epoch == 1 # lr = 0.0375 if epoch == 2 # lr = 0.04375 if epoch == 3 # lr = 0.005 if 4 <= epoch epochs = 10 start_factor = 1.0 / 4 end_factor = 3.0 / 5 iters = 4 interpolation = [ start_factor + i * (end_factor - start_factor) / iters for i in range(iters) ] single_targets = [x * 0.05 for x in interpolation] + [0.05 * end_factor] * ( epochs - iters ) targets = [single_targets, [x * epochs for x in single_targets]] scheduler = LinearLR( self.opt, start_factor=start_factor, end_factor=end_factor, total_iters=iters, ) self._test_get_last_lr(scheduler, targets, epochs) def test_constantlr(self): # lr = 0.025 if epoch < 5 # lr = 0.005 if 5 <= epoch epochs = 10 single_targets = [0.025] * 5 + [0.05] * 5 targets = [single_targets, [x * epochs for x in single_targets]] scheduler = ConstantLR(self.opt, factor=1.0 / 2, total_iters=5) self._test(scheduler, targets, epochs) def test_linearlr(self): # lr = 0.025 if epoch == 0 # lr = 0.03125 if epoch == 1 # lr = 0.0375 if epoch == 2 # lr = 0.04375 if epoch == 3 # lr = 0.005 if 4 <= epoch epochs = 10 start_factor = 1.0 / 2 iters = 4 interpolation = [ start_factor + i * (1 - start_factor) / iters for i in range(iters) ] single_targets = [x * 0.05 for x in interpolation] + [0.05] * (epochs - iters) targets = [single_targets, [x * epochs for x in single_targets]] scheduler = LinearLR(self.opt, start_factor=start_factor, total_iters=iters) self._test(scheduler, targets, epochs) def test_linearlr_start_factor_limits1(self): start_factor = 0.0 iters = 4 with self.assertRaises(ValueError): LinearLR(self.opt, start_factor=start_factor, total_iters=iters) def test_linearlr_start_factor_limits2(self): start_factor = 1.1 iters = 4 with self.assertRaises(ValueError): LinearLR(self.opt, start_factor=start_factor, total_iters=iters) def test_constantlr_with_epoch(self): # lr = 0.025 if epoch < 5 # lr = 0.005 if 5 <= epoch epochs = 10 single_targets = [0.025] * 5 + [0.05] * 5 targets = [single_targets, [x * epochs for x in single_targets]] scheduler = ConstantLR(self.opt, factor=1.0 / 2, total_iters=5) self._test_with_epoch(scheduler, targets, epochs) def test_linearlr_with_epoch(self): # lr = 0.025 if epoch == 0 # lr = 0.03125 if epoch == 1 # lr = 0.0375 if epoch == 2 # lr = 0.04375 if epoch == 3 # lr = 0.005 if 4 <= epoch epochs = 10 start_factor = 1.0 / 2 end_factor = 1.0 iters = 4 interpolation = [ start_factor + i * (end_factor - start_factor) / iters for i in range(iters) ] single_targets = [x * 0.05 for x in interpolation] + [0.05] * (epochs - iters) targets = [single_targets, [x * epochs for x in single_targets]] scheduler = LinearLR(self.opt, start_factor=start_factor, total_iters=iters) self._test_with_epoch(scheduler, targets, epochs) def test_exp_lr(self): epochs = 10 single_targets = [0.05 * (0.9**x) for x in range(epochs)] targets = [single_targets, [x * epochs for x in single_targets]] scheduler = ExponentialLR(self.opt, gamma=0.9) self._test(scheduler, targets, epochs) def test_poly_lr(self): epochs = 10 power = 0.9 total_iters = 5 single_targets = [ (1.0 - x / total_iters) ** power * 0.05 for x in range(total_iters) ] + [0.0] * (epochs - total_iters) targets = [single_targets, [x * epochs for x in single_targets]] scheduler = PolynomialLR(self.opt, power=power, total_iters=total_iters) self._test(scheduler, targets, epochs) def test_cos_anneal_lr(self): epochs = 10 eta_min = 1e-10 single_targets = [ eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2 for x in range(epochs) ] targets = [single_targets, [x * epochs for x in single_targets]] scheduler = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min) self._test(scheduler, targets, epochs) def test_closed_form_step_lr(self): scheduler = StepLR(self.opt, gamma=0.1, step_size=3) closed_form_scheduler = StepLR(self.opt, gamma=0.1, step_size=3) self._test_against_closed_form(scheduler, closed_form_scheduler, 20) def test_closed_form_linearlr(self): scheduler = LinearLR( self.opt, start_factor=1.0 / 3, end_factor=0.7, total_iters=4 ) closed_form_scheduler = LinearLR( self.opt, start_factor=1.0 / 3, end_factor=0.7, total_iters=4 ) self._test_against_closed_form(scheduler, closed_form_scheduler, 20) def test_closed_form_constantlr(self): scheduler = ConstantLR(self.opt, factor=1.0 / 3, total_iters=4) closed_form_scheduler = ConstantLR(self.opt, factor=1.0 / 3, total_iters=4) self._test_against_closed_form(scheduler, closed_form_scheduler, 20) def test_closed_form_multi_step_lr(self): scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) closed_form_scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) self._test_against_closed_form(scheduler, closed_form_scheduler, 20) def test_closed_form_exp_lr(self): scheduler = ExponentialLR(self.opt, gamma=0.9) closed_form_scheduler = ExponentialLR(self.opt, gamma=0.9) self._test_against_closed_form(scheduler, closed_form_scheduler, 20) def test_closed_form_poly_lr(self): scheduler = PolynomialLR(self.opt, power=0.9) closed_form_scheduler = PolynomialLR(self.opt, power=0.9) self._test_against_closed_form(scheduler, closed_form_scheduler, 20) def test_closed_form_cos_anneal_lr(self): eta_min = 1e-10 epochs = 20 T_max = 5 scheduler = CosineAnnealingLR(self.opt, T_max=T_max, eta_min=eta_min) closed_form_scheduler = CosineAnnealingLR( self.opt, T_max=T_max, eta_min=eta_min ) self._test_against_closed_form(scheduler, closed_form_scheduler, epochs) def test_cos_anneal_lr_continue(self): eta_min = 0.1 T_max = 5 scheduler = CosineAnnealingLR(self.opt, T_max=T_max, eta_min=eta_min) self.opt.step() scheduler.step() original_lrs = scheduler._last_lr new_scheduler = CosineAnnealingLR( self.opt, T_max=T_max, eta_min=eta_min, last_epoch=0 ) new_lrs = new_scheduler._last_lr torch.testing.assert_close(original_lrs, new_lrs, rtol=1e-4, atol=1e-5) def test_reduce_lr_on_plateau1(self): epochs = 10 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * 20] metrics = [10 - i * 0.0167 for i in range(20)] scheduler = ReduceLROnPlateau( self.opt, threshold_mode="abs", mode="min", threshold=0.01, patience=5, cooldown=5, ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs) def test_reduce_lr_on_plateau2(self): epochs = 22 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * 6 + [0.05] * 7 + [0.005] * 7 + [0.0005] * 2] metrics = [10 - i * 0.0165 for i in range(22)] scheduler = ReduceLROnPlateau( self.opt, patience=5, cooldown=0, threshold_mode="abs", mode="min", threshold=0.1, ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs) def test_reduce_lr_on_plateau3(self): epochs = 22 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * (2 + 6) + [0.05] * (5 + 6) + [0.005] * 4] metrics = [-0.8] * 2 + [-0.234] * 20 scheduler = ReduceLROnPlateau( self.opt, mode="max", patience=5, cooldown=5, threshold_mode="abs" ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs) def test_reduce_lr_on_plateau4(self): epochs = 20 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * 20] metrics = [1.5 * (1.025**i) for i in range(20)] # 1.025 > 1.1**0.25 scheduler = ReduceLROnPlateau( self.opt, mode="max", patience=3, threshold_mode="rel", threshold=0.1 ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs) def test_reduce_lr_on_plateau5(self): epochs = 20 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * 6 + [0.05] * (5 + 6) + [0.005] * 4] metrics = [1.5 * (1.005**i) for i in range(20)] scheduler = ReduceLROnPlateau( self.opt, mode="max", threshold_mode="rel", threshold=0.1, patience=5, cooldown=5, ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs) def test_reduce_lr_on_plateau6(self): epochs = 20 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * 20] metrics = [1.5 * (0.85**i) for i in range(20)] scheduler = ReduceLROnPlateau( self.opt, mode="min", threshold_mode="rel", threshold=0.1 ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs) def test_reduce_lr_on_plateau7(self): epochs = 20 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * 6 + [0.05] * (5 + 6) + [0.005] * 4] metrics = [1] * 7 + [0.6] + [0.5] * 12 scheduler = ReduceLROnPlateau( self.opt, mode="min", threshold_mode="rel", threshold=0.1, patience=5, cooldown=5, ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs) def test_reduce_lr_on_plateau8(self): epochs = 20 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * 6 + [0.4] * 14, [0.5] * 6 + [0.3] * 14] metrics = [1.5 * (1.005**i) for i in range(20)] scheduler = ReduceLROnPlateau( self.opt, mode="max", threshold_mode="rel", min_lr=[0.4, 0.3], threshold=0.1, patience=5, cooldown=5, ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs) def test_reduce_lr_on_plateau_get_last_lr_before_step(self): for param_group in self.opt.param_groups: param_group["lr"] = 0.5 scheduler = ReduceLROnPlateau( self.opt, ) self.assertEqual( scheduler.get_last_lr(), [0.5 for param_group in self.opt.param_groups] ) def test_reduce_lr_on_plateau_preserves_lr_type(self): # Ensures that tensor lrs are preserved, preventing recompilations. types = [type(group["lr"]) for group in self.opt.param_groups] scheduler = ReduceLROnPlateau(self.opt, mode="min", patience=0) scheduler.step(1.0) scheduler.step(2.0) # Triggers scheduler._reduce_lr for group, type_ in zip(self.opt.param_groups, types): self.assertEqual(type(group["lr"]), type_) def test_sequentiallr1(self): epochs = 19 schedulers = [None] * 2 targets = [ [0.05, 0.04, 0.032] + [0.05 for x in range(4)] + [0.05 * 0.1 for x in range(4)] + [0.05 * 0.01 for x in range(4)] + [0.05 * 0.001 for x in range(4)] ] milestones = [3] schedulers[0] = ExponentialLR(self.opt, gamma=0.8) schedulers[1] = StepLR(self.opt, gamma=0.1, step_size=4) scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones) self._test(scheduler, targets, epochs) def test_sequentiallr2(self): epochs = 13 schedulers = [None] * 2 targets = [[0.005, 0.005, 0.005] + [0.05 * 0.9**x for x in range(10)]] milestones = [3] schedulers[0] = ConstantLR(self.opt, factor=0.1, total_iters=3) schedulers[1] = ExponentialLR(self.opt, gamma=0.9) scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones) self._test(scheduler, targets, epochs) def test_sequentiallr3(self): epochs = 12 schedulers = [None] * 3 targets = [ [0.005, 0.005, 0.005] + [0.05, 0.04, 0.032] + [0.05, 0.05, 0.005, 0.005, 0.0005, 0.0005] ] milestones = [3, 6] schedulers[0] = ConstantLR(self.opt, factor=0.1, total_iters=3) schedulers[1] = ExponentialLR(self.opt, gamma=0.8) schedulers[2] = StepLR(self.opt, gamma=0.1, step_size=2) scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones) self._test(scheduler, targets, epochs) def test_sequentiallr4(self): optimizer = SGD([torch.tensor(0.5)], lr=0.1) prev_lr = optimizer.param_groups[0]["lr"] schedulers = [ torch.optim.lr_scheduler.ConstantLR(optimizer, factor=1), torch.optim.lr_scheduler.ConstantLR(optimizer, factor=0.1), ] scheduler = torch.optim.lr_scheduler.SequentialLR( optimizer, schedulers, milestones=[10] ) new_lr = optimizer.param_groups[0]["lr"] # Ensure that multiple schedulers does not affect the initial learning rate self.assertEqual(prev_lr, new_lr) def test_sequentiallr5(self): """ Test SequentialLR with a ChainedScheduler. """ epochs = 10 schedulers = [] milestones = [] targets = [ [0.0005, 0.0014, 0.0023, 0.0032, 0.0041] + [0.025, 0.025, 0.025, 0.025, 0.025] ] const_sched = ConstantLR(optimizer=self.opt, factor=0.1, total_iters=5) lin_sched = LinearLR(optimizer=self.opt, start_factor=0.1, total_iters=5) milestones.append(5) chained = ChainedScheduler([lin_sched, const_sched]) schedulers.append(chained) const_sched2 = ConstantLR(optimizer=self.opt, factor=0.5, total_iters=5) schedulers.append(const_sched2) scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones) self._test(scheduler, targets, epochs) def test_sequentiallr_no_warnings(self): scheduler1 = LinearLR(self.opt, start_factor=0.5, end_factor=0.1, total_iters=5) scheduler2 = ExponentialLR(self.opt, gamma=0.9) scheduler = SequentialLR( self.opt, schedulers=[scheduler1, scheduler2], milestones=[5] ) for _ in range(10): self.opt.step() with warnings.catch_warnings(record=True) as ws: scheduler.step() self.assertTrue(len(ws) == 0, "No warning should be raised") def test_get_last_lr_sequentiallr(self): epochs = 12 milestones = [3, 6] schedulers = [None] * 3 schedulers[0] = ConstantLR(self.opt, factor=0.1, total_iters=3) schedulers[1] = ExponentialLR(self.opt, gamma=0.8) schedulers[2] = StepLR(self.opt, gamma=0.1, step_size=2) scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones) constant_lr_target = [0.005] * 3 exponential_lr_target = [0.05, 0.04, 0.032] step_lr_target = [0.05, 0.05, 0.005, 0.005, 0.0005, 0.0005] single_targets = constant_lr_target + exponential_lr_target + step_lr_target targets = [single_targets, [x * 10 for x in single_targets]] self._test_get_last_lr(scheduler, targets, epochs) def test_sequentiallr_does_not_alias_lr_and_initial_lr(self): # The TestLRScheduler object uses self.opt to avoid instantiating a new optimizer for each test. # self.opt has a float lr, and we need to use a Tensor lr to ensure that a former SequentialLR bug is fixed. # For more context, see https://github.com/pytorch/pytorch/issues/162359 old_opt = self.opt lr = torch.tensor(2.0) self.opt = SGD(self.net.parameters(), lr=lr) milestone = 4 epochs = 8 start, end = 0.1, 0.8 schedulers = [ LinearLR(self.opt, start, end, total_iters=milestone), LinearLR(self.opt, end, start, total_iters=epochs - milestone), ] targets = [[0.2, 0.55, 0.9, 1.25, 1.6, 1.25, 0.9, 0.55]] scheduler = SequentialLR(self.opt, schedulers, milestones=[milestone]) self._test(scheduler, targets, epochs) self.opt = old_opt def test_chained_lr2_get_last_lr_before_step(self): schedulers = [ LinearLR(self.opt, start_factor=0.4, total_iters=3), MultiStepLR(self.opt, milestones=[4, 8, 10], gamma=0.1), ] scheduler = ChainedScheduler(schedulers) self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr()) def test_chained_lr1(self): epochs = 10 schedulers = [None] * 1 targets = [[0.05] * 3 + [0.005] * 3 + [0.0005] * 3 + [0.00005] * 3] schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3) scheduler = ChainedScheduler(schedulers) self._test([scheduler], targets, epochs) self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr()) def test_chained_lr2(self): epochs = 10 schedulers = [None] * 1 targets = [[0.02, 0.03, 0.04] + [0.05] * 9] schedulers[0] = LinearLR(self.opt, start_factor=0.4, total_iters=3) scheduler = ChainedScheduler(schedulers) self._test([scheduler], targets, epochs) self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr()) def test_chained_lr3(self): epochs = 10 schedulers = [None] * 2 targets = [ [0.02, 0.03, 0.04, 0.05] + [0.005] * 4 + [0.0005] * 3 + [0.00005] * 3 ] schedulers[0] = LinearLR(self.opt, start_factor=0.4, total_iters=3) schedulers[1] = MultiStepLR(self.opt, milestones=[4, 8, 10], gamma=0.1) scheduler = ChainedScheduler(schedulers) self._test([scheduler], targets, epochs) self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr()) def test_chained_lr4(self): epochs = 9 schedulers = [None] * 3 targets = [ [0.05 * 0.2 * 0.9**x for x in range(3)] + [0.05 * 0.2 * 0.9**3 * 0.1] + [0.05 * 0.9**x * 0.1 for x in range(4, 6)] + [0.05 * 0.9**x * 0.01 for x in range(6, 9)] ] schedulers[0] = ExponentialLR(self.opt, gamma=0.9) schedulers[1] = ConstantLR(self.opt, factor=0.2, total_iters=4) schedulers[2] = StepLR(self.opt, gamma=0.1, step_size=3) scheduler = ChainedScheduler(schedulers) self._test([scheduler], targets, epochs) self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr()) def test_chained_lr5(self): def poly_lr(lr: float): return [ (lr * ((1.0 - x / total_iters) ** power)) for x in range(total_iters) ] + [0.0] * (epochs - total_iters) schedulers = [None] * 2 epochs = 10 power = 0.9 total_iters = 5 const_factor = 0.1 single_targets = [x * const_factor for x in poly_lr(lr=0.05)] targets = [single_targets, [x * const_factor for x in poly_lr(0.5)]] schedulers[0] = PolynomialLR(self.opt, power=power, total_iters=total_iters) schedulers[1] = ConstantLR(self.opt, factor=const_factor) scheduler = ChainedScheduler(schedulers) self._test(scheduler, targets, epochs) self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr()) def test_compound_step_and_multistep_lr(self): epochs = 10 schedulers = [None] * 2 schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3) schedulers[1] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) targets = [[0.05] * 2 + [0.005] * 1 + [5e-4] * 2 + [5e-5] + [5e-6] * 3 + [5e-8]] self._test(schedulers, targets, epochs) def test_compound_step_and_exp_lr(self): epochs = 10 schedulers = [None] * 2 single_targets = [0.05 * (0.9**x) for x in range(3)] single_targets += [0.005 * (0.9**x) for x in range(3, 6)] single_targets += [0.0005 * (0.9**x) for x in range(6, 9)] single_targets += [0.00005 * (0.9**x) for x in range(9, 12)] targets = [single_targets, [x * epochs for x in single_targets]] schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3) schedulers[1] = ExponentialLR(self.opt, gamma=0.9) self._test(schedulers, targets, epochs) def test_compound_exp_and_multistep_lr(self): epochs = 10 schedulers = [None] * 2 single_targets = [0.05 * (0.9**x) for x in range(2)] single_targets += [0.005 * (0.9**x) for x in range(2, 5)] single_targets += [0.0005 * (0.9**x) for x in range(5, 9)] single_targets += [0.00005 * (0.9**x) for x in range(9, 11)] targets = [single_targets, [x * epochs for x in single_targets]] schedulers[0] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) schedulers[1] = ExponentialLR(self.opt, gamma=0.9) self._test(schedulers, targets, epochs) def test_compound_exp_and_linearlr(self): epochs = 10 iters = 4 start_factor = 0.4 end_factor = 0.9 schedulers = [None] * 2 single_targets = [0.05 * (0.9**x) for x in range(11)] for i in range(iters): single_targets[i] *= start_factor + i / iters * (end_factor - start_factor) for i in range(iters, 11): single_targets[i] *= end_factor targets = [single_targets, [x * epochs for x in single_targets]] schedulers[0] = LinearLR( self.opt, start_factor=start_factor, end_factor=end_factor, total_iters=iters, ) schedulers[1] = ExponentialLR(self.opt, gamma=0.9) self._test(schedulers, targets, epochs) def test_compound_step_and_constantlr(self): epochs = 10 iters = 4 factor = 0.4 schedulers = [None] * 2 single_targets = ( [0.05 * 0.4] * 3 + [0.005 * 0.4] + [0.005] * 2 + [0.0005] * 3 + [0.00005] * 3 ) targets = [single_targets, [x * epochs for x in single_targets]] schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3) schedulers[1] = ConstantLR(self.opt, factor=0.4, total_iters=4) self._test(schedulers, targets, epochs) def test_compound_linearlr_and_multistep_lr(self): epochs = 10 iters = 4 start_factor = 0.4 schedulers = [None] * 2 single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 2 for i in range(iters): single_targets[i] *= start_factor + i / iters * (1 - start_factor) targets = [single_targets, [x * epochs for x in single_targets]] schedulers[0] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) schedulers[1] = LinearLR(self.opt, start_factor=start_factor, total_iters=iters) self._test(schedulers, targets, epochs) def test_compound_cosanneal_and_step_lr(self): epochs = 10 eta_min = 1e-10 single_targets = [ eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2 for x in range(epochs) ] single_targets = [x * 0.1 ** (i // 3) for i, x in enumerate(single_targets)] targets = [single_targets, [x * epochs for x in single_targets]] schedulers = [None] * 2 schedulers[0] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min) schedulers[1] = StepLR(self.opt, gamma=0.1, step_size=3) self._test(schedulers, targets, epochs) def test_compound_cosanneal_and_multistep_lr(self): epochs = 10 eta_min = 1e-10 single_targets = [ eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2 for x in range(epochs) ] multipliers = [1] * 2 + [0.1] * 3 + [0.01] * 4 + [0.001] single_targets = [x * y for x, y in zip(single_targets, multipliers)] targets = [single_targets, [x * epochs for x in single_targets]] schedulers = [None] * 2 schedulers[0] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min) schedulers[1] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) self._test(schedulers, targets, epochs) def test_compound_cosanneal_and_linearlr(self): epochs = 10 iters = 4 start_factor = 0.4 eta_min = 1e-10 schedulers = [None] * 2 single_targets = [ eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2 for x in range(epochs) ] for i in range(iters): single_targets[i] *= start_factor + i / iters * (1 - start_factor) targets = [single_targets, [x * epochs for x in single_targets]] schedulers[0] = LinearLR(self.opt, start_factor=start_factor, total_iters=iters) schedulers[1] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min) self._test(schedulers, targets, epochs) def test_compound_cosanneal_and_exp_lr(self): epochs = 10 eta_min = 1e-10 single_targets = [ eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2 for x in range(epochs) ] multipliers = [0.1**i for i in range(epochs)] single_targets = [x * y for x, y in zip(single_targets, multipliers)] targets = [single_targets, [x * epochs for x in single_targets]] schedulers = [None] * 2 schedulers[0] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min) schedulers[1] = ExponentialLR(self.opt, gamma=0.1) self._test(schedulers, targets, epochs) def test_compound_reduce_lr_on_plateau1(self): epochs = 10 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 single_targets = [0.5] * 20 multipliers = [0.1 ** (i // 3) for i in range(20)] single_targets = [x * y for x, y in zip(multipliers, single_targets)] targets = [single_targets] targets = targets[1:] # test runs step before checking lr metrics = [10 - i * 0.0167 for i in range(20)] schedulers = [None, None] schedulers[0] = ReduceLROnPlateau( self.opt, threshold_mode="abs", mode="min", threshold=0.01, patience=5, cooldown=5, ) schedulers[1] = StepLR(self.opt, gamma=0.1, step_size=3) self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs) def test_compound_reduce_lr_on_plateau2(self): epochs = 22 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 single_targets = [0.5] * 6 + [0.05] * 7 + [0.005] * 7 + [0.0005] * 2 multipliers = [1] * 3 + [0.1] * 5 + [0.01] * 4 + [0.001] * 10 single_targets = [x * y for x, y in zip(single_targets, multipliers)] targets = [single_targets] targets = targets[1:] # test runs step before checking lr metrics = [10 - i * 0.0165 for i in range(22)] schedulers = [None] * 2 schedulers[0] = ReduceLROnPlateau( self.opt, patience=5, cooldown=0, threshold_mode="abs", mode="min", threshold=0.1, ) schedulers[1] = MultiStepLR(self.opt, gamma=0.1, milestones=[3, 8, 12]) self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs) def test_compound_reduce_lr_on_plateau3(self): epochs = 22 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 single_targets = [0.5] * (2 + 6) + [0.05] * (5 + 6) + [0.005] * 4 multipliers = [0.1**i for i in range(epochs)] single_targets = [x * y for x, y in zip(multipliers, single_targets)] targets = [single_targets] targets = targets[1:] # test runs step before checking lr metrics = [-0.8] * 2 + [-0.234] * 20 schedulers = [None, None] schedulers[0] = ReduceLROnPlateau( self.opt, mode="max", patience=5, cooldown=5, threshold_mode="abs" ) schedulers[1] = ExponentialLR(self.opt, gamma=0.1) self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs) def test_compound_reduce_lr_on_plateau4(self): epochs = 20 for param_group in self.opt.param_groups: param_group["lr"] = 0.05 epochs = 10 eta_min = 1e-10 single_targets = [ eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2 for x in range(epochs) ] targets = [single_targets] targets = targets[1:] # test runs step before checking lr metrics = [1.5 * (1.025**i) for i in range(20)] # 1.025 > 1.1**0.25 schedulers = [None, None] schedulers[0] = ReduceLROnPlateau( self.opt, mode="max", patience=3, threshold_mode="rel", threshold=0.1 ) schedulers[1] = CosineAnnealingLR(self.opt, epochs, eta_min) self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs) def test_compound_reduce_lr_on_plateau5(self): iters = 4 start_factor = 0.4 epochs = 22 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 single_targets = [0.5] * 6 + [0.05] * 7 + [0.005] * 7 + [0.0005] * 2 multipliers = [1] * 22 for i in range(iters): multipliers[i] *= start_factor + i / iters * (1 - start_factor) single_targets = [x * y for x, y in zip(single_targets, multipliers)] targets = [single_targets] targets = targets[1:] # test runs step before checking lr metrics = [10 - i * 0.0165 for i in range(22)] schedulers = [None] * 2 schedulers[0] = ReduceLROnPlateau( self.opt, patience=5, cooldown=0, threshold_mode="abs", mode="min", threshold=0.1, ) schedulers[1] = LinearLR(self.opt, start_factor=start_factor, total_iters=iters) self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs) def test_cycle_lr_invalid_mode(self): with self.assertRaises(ValueError): scheduler = CyclicLR(self.opt, base_lr=0, max_lr=0, mode="CATS") def test_cycle_lr_triangular_mode_one_lr(self): lr_target = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3] momentum_target = [5, 4, 3, 2, 1, 2, 3, 4, 5, 4, 3] lr_targets = [lr_target, lr_target] momentum_targets = [momentum_target, momentum_target] scheduler = CyclicLR( self.opt, base_lr=1, max_lr=5, step_size_up=4, cycle_momentum=True, base_momentum=1, max_momentum=5, mode="triangular", ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target)) def test_cycle_lr_triangular_mode_one_lr_no_momentum(self): lr_target = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3] lr_targets = [lr_target, lr_target] momentum_target = [self.opt.defaults["momentum"]] * len(lr_target) momentum_targets = [momentum_target, momentum_target] scheduler = CyclicLR( self.opt, base_lr=1, max_lr=5, step_size_up=4, cycle_momentum=False, mode="triangular", ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target)) def test_cycle_lr_triangular2_mode_one_lr(self): lr_target = [ 1, 2, 3, 4, 5, 4, 3, 2, 1, 1.5, 2.0, 2.5, 3.0, 2.5, 2.0, 1.5, 1, 1.25, 1.50, 1.75, 2.00, 1.75, ] momentum_target = [ 5.0, 4.0, 3.0, 2.0, 1.0, 2.0, 3.0, 4.0, 5.0, 4.5, 4.0, 3.5, 3.0, 3.5, 4.0, 4.5, 5.0, 4.75, 4.5, 4.25, 4.0, 4.25, ] lr_targets = [lr_target, lr_target] momentum_targets = [momentum_target, momentum_target] scheduler = CyclicLR( self.opt, base_lr=1, max_lr=5, step_size_up=4, cycle_momentum=True, base_momentum=1, max_momentum=5, mode="triangular2", ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target)) def test_cycle_lr_exp_range_mode_one_lr(self): base_lr, max_lr = 1, 5 diff_lr = max_lr - base_lr gamma = 0.9 xs = [0, 0.25, 0.5, 0.75, 1, 0.75, 0.50, 0.25, 0, 0.25, 0.5, 0.75, 1] lr_target = [base_lr + x * diff_lr * gamma**i for i, x in enumerate(xs)] momentum_target = [max_lr - x * diff_lr * gamma**i for i, x in enumerate(xs)] lr_targets = [lr_target, lr_target] momentum_targets = [momentum_target, momentum_target] scheduler = CyclicLR( self.opt, base_lr=base_lr, max_lr=max_lr, step_size_up=4, cycle_momentum=True, base_momentum=base_lr, max_momentum=max_lr, mode="exp_range", gamma=gamma, ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target)) def test_cycle_lr_triangular_mode(self): lr_target_1 = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3] lr_target_2 = [x + 1 for x in lr_target_1] lr_targets = [lr_target_1, lr_target_2] momentum_target_1 = [5, 4, 3, 2, 1, 2, 3, 4, 5, 4, 3] momentum_target_2 = [x + 1 for x in momentum_target_1] momentum_targets = [momentum_target_1, momentum_target_2] scheduler = CyclicLR( self.opt, base_lr=[1, 2], max_lr=[5, 6], step_size_up=4, cycle_momentum=True, base_momentum=[1, 2], max_momentum=[5, 6], mode="triangular", ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target_1)) def test_cycle_lr_triangular2_mode(self): lr_target_1 = [ 1, 2, 3, 4, 5, 4, 3, 2, 1, 1.5, 2.0, 2.5, 3.0, 2.5, 2.0, 1.5, 1, 1.25, 1.50, 1.75, 2.00, 1.75, ] lr_target_2 = [x + 2 for x in lr_target_1] lr_targets = [lr_target_1, lr_target_2] momentum_target_1 = [ 5.0, 4.0, 3.0, 2.0, 1.0, 2.0, 3.0, 4.0, 5.0, 4.5, 4.0, 3.5, 3.0, 3.5, 4.0, 4.5, 5.0, 4.75, 4.5, 4.25, 4.0, 4.25, ] momentum_target_2 = [x + 2 for x in momentum_target_1] momentum_targets = [momentum_target_1, momentum_target_2] scheduler = CyclicLR( self.opt, base_lr=[1, 3], max_lr=[5, 7], step_size_up=4, cycle_momentum=True, base_momentum=[1, 3], max_momentum=[5, 7], mode="triangular2", ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target_1)) def test_cycle_lr_exp_range_mode(self): base_lr_1, max_lr_1 = 1, 5 base_lr_2, max_lr_2 = 5, 12 diff_lr_1 = max_lr_1 - base_lr_1 diff_lr_2 = max_lr_2 - base_lr_2 gamma = 0.9 xs = [0, 0.25, 0.5, 0.75, 1, 0.75, 0.50, 0.25, 0, 0.25, 0.5, 0.75, 1] lr_target_1 = [base_lr_1 + x * diff_lr_1 * gamma**i for i, x in enumerate(xs)] lr_target_2 = [base_lr_2 + x * diff_lr_2 * gamma**i for i, x in enumerate(xs)] lr_targets = [lr_target_1, lr_target_2] momentum_target_1 = [ max_lr_1 - x * diff_lr_1 * gamma**i for i, x in enumerate(xs) ] momentum_target_2 = [ max_lr_2 - x * diff_lr_2 * gamma**i for i, x in enumerate(xs) ] momentum_targets = [momentum_target_1, momentum_target_2] scheduler = CyclicLR( self.opt, base_lr=[base_lr_1, base_lr_2], max_lr=[max_lr_1, max_lr_2], step_size_up=4, cycle_momentum=True, base_momentum=[base_lr_1, base_lr_2], max_momentum=[max_lr_1, max_lr_2], mode="exp_range", gamma=gamma, ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target_1)) def test_cycle_lr_triangular_mode_step_size_up_down(self): lr_target = [ 1.0, 2.0, 3.0, 4.0, 5.0, 13.0 / 3, 11.0 / 3, 9.0 / 3, 7.0 / 3, 5.0 / 3, 1.0, ] lr_targets = [lr_target, lr_target] momentum_target = [ 5.0, 4.0, 3.0, 2.0, 1.0, 5.0 / 3, 7.0 / 3, 3.0, 11.0 / 3, 13.0 / 3, 5.0, ] momentum_targets = [momentum_target, momentum_target] scheduler = CyclicLR( self.opt, base_lr=1, max_lr=5, step_size_up=4, step_size_down=6, cycle_momentum=True, base_momentum=1, max_momentum=5, mode="triangular", ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target)) def test_cycle_lr_triangular2_mode_step_size_up_down(self): lr_base_target = [ 1.0, 3.0, 5.0, 13.0 / 3, 11.0 / 3, 9.0 / 3, 7.0 / 3, 5.0 / 3, 1.0, 2.0, 3.0, 8.0 / 3, 7.0 / 3, 6.0 / 3, 5.0 / 3, 4.0 / 3, 1.0, 3.0 / 2, 2.0, 11.0 / 6, 10.0 / 6, 9.0 / 6, 8.0 / 6, 7.0 / 6, ] momentum_base_target = [ 5.0, 3.0, 1.0, 5.0 / 3, 7.0 / 3, 3.0, 11.0 / 3, 13.0 / 3, 5.0, 4.0, 3.0, 10.0 / 3, 11.0 / 3, 4.0, 13.0 / 3, 14.0 / 3, 5.0, 4.5, 4.0, 25.0 / 6, 13.0 / 3, 4.5, 14.0 / 3, 29.0 / 6, ] deltas = [2 * i for i in range(2)] base_lrs = [1 + delta for delta in deltas] max_lrs = [5 + delta for delta in deltas] lr_targets = [[x + delta for x in lr_base_target] for delta in deltas] momentum_targets = [ [x + delta for x in momentum_base_target] for delta in deltas ] scheduler = CyclicLR( self.opt, base_lr=base_lrs, max_lr=max_lrs, step_size_up=2, step_size_down=6, cycle_momentum=True, base_momentum=base_lrs, max_momentum=max_lrs, mode="triangular2", ) self._test_cycle_lr( scheduler, lr_targets, momentum_targets, len(lr_base_target) ) def test_cycle_lr_exp_range_mode_step_size_up_down(self): base_lr, max_lr = 1, 5 diff_lr = max_lr - base_lr gamma = 0.9 xs = [ 0.0, 0.5, 1.0, 5.0 / 6, 4.0 / 6, 3.0 / 6, 2.0 / 6, 1.0 / 6, 0.0, 0.5, 1.0, 5.0 / 6, 4.0 / 6, ] lr_target = [base_lr + x * diff_lr * gamma**i for i, x in enumerate(xs)] lr_targets = [lr_target, lr_target] momentum_target = [max_lr - x * diff_lr * gamma**i for i, x in enumerate(xs)] momentum_targets = [momentum_target, momentum_target] scheduler = CyclicLR( self.opt, base_lr=base_lr, max_lr=max_lr, step_size_up=2, step_size_down=6, cycle_momentum=True, base_momentum=base_lr, max_momentum=max_lr, mode="exp_range", gamma=gamma, ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target)) def test_cycle_lr_with_momentumless_optimizer(self): # Note [Temporarily set optimizer to Adam] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # The TestLRScheduler object carries around an SGD optimizer to avoid having to # instantiate one for every test. This gets in the way for our very specific case # in which we need to use Adam (or really any optimizer that doesn't use momentum) # in order to test that the momentum bug in CyclicLR is fixed (the bug is described # in more detail in https://github.com/pytorch/pytorch/issues/19003 ). old_opt = self.opt self.opt = Adam( [ {"params": self.net.conv1.parameters()}, {"params": self.net.conv2.parameters(), "lr": 0.5}, ], lr=0.05, ) lr_target = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3] lr_targets = [lr_target, lr_target] momentum_target = [None] * len(lr_target) momentum_targets = [momentum_target, momentum_target] scheduler = CyclicLR( self.opt, base_lr=1, max_lr=5, step_size_up=4, cycle_momentum=False, mode="triangular", ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target)) self.opt = old_opt # set optimizer back to SGD def test_cycle_lr_cycle_momentum_fail_with_momentumless_optimizer(self): with self.assertRaises(ValueError): rprop_opt = Rprop(self.net.parameters()) scheduler = CyclicLR(rprop_opt, base_lr=1, max_lr=5, cycle_momentum=True) def test_cycle_lr_cycle_momentum_with_beta1_optimizer(self): adam_opt = Adam(self.net.parameters()) scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=True) def test_cycle_lr_removed_after_out_of_scope(self): import gc import weakref gc.disable() def test(): adam_opt = Adam(self.net.parameters()) scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False) return weakref.ref(scheduler) ref = test() assert ref() is None gc.enable() def test_cycle_lr_state_dict_picklable(self): adam_opt = Adam(self.net.parameters()) # Case 1: Built-in mode scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False) self.assertIsInstance(scheduler._scale_fn_ref, types.FunctionType) state = scheduler.state_dict() self.assertNotIn("_scale_fn_ref", state) self.assertIs(state["_scale_fn_custom"], None) pickle.dumps(state) # Case 2: Custom `scale_fn`, a function object def scale_fn(_): return 0.5 scheduler = CyclicLR( adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn ) state = scheduler.state_dict() self.assertNotIn("_scale_fn_ref", state) self.assertIs(state["_scale_fn_custom"], None) pickle.dumps(state) # Case 3: Custom `scale_fn`, a callable class class ScaleFn: def __init__(self) -> None: self.x = 0.5 def __call__(self, _): return self.x scale_fn = ScaleFn() scheduler = CyclicLR( adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn ) state = scheduler.state_dict() self.assertNotIn("_scale_fn_ref", state) self.assertEqual(state["_scale_fn_custom"], scale_fn.__dict__) pickle.dumps(state) def test_cycle_lr_scale_fn_restored_from_state_dict(self): adam_opt = Adam(self.net.parameters()) # Case 1: Built-in mode scheduler = CyclicLR( adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, mode="triangular2" ) restored_scheduler = CyclicLR( adam_opt, base_lr=1, max_lr=5, cycle_momentum=False ) restored_scheduler.load_state_dict(scheduler.state_dict()) self.assertTrue(restored_scheduler.mode == scheduler.mode == "triangular2") self.assertIsNotNone(restored_scheduler._scale_fn_ref) and self.assertIsNotNone( scheduler._scale_fn_ref ) self.assertIs(restored_scheduler._scale_fn_custom, None) self.assertIs(scheduler._scale_fn_custom, None) # Case 2: Custom `scale_fn` def scale_fn(_): return 0.5 scheduler = CyclicLR( adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn ) restored_scheduler = CyclicLR( adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn ) restored_scheduler.load_state_dict(scheduler.state_dict()) self.assertIs(scheduler._scale_fn_custom, scale_fn) self.assertIs(restored_scheduler._scale_fn_custom, scale_fn) def test_onecycle_lr_invalid_anneal_strategy(self): with self.assertRaises(ValueError): scheduler = OneCycleLR( self.opt, max_lr=1e-3, total_steps=10, anneal_strategy="CATS" ) def test_onecycle_lr_invalid_pct_start(self): with self.assertRaises(ValueError): scheduler = OneCycleLR(self.opt, max_lr=1e-3, total_steps=10, pct_start=1.1) def test_onecycle_lr_cannot_calculate_total_steps(self): with self.assertRaises(ValueError): scheduler = OneCycleLR(self.opt, max_lr=1e-3) def test_onecycle_lr_linear_annealing(self): lr_target = [1, 13, 25, 21.5, 18, 14.5, 11, 7.5, 4, 0.5] momentum_target = [22, 11.5, 1, 4, 7, 10, 13, 16, 19, 22] lr_targets = [lr_target, lr_target] momentum_targets = [momentum_target, momentum_target] scheduler = OneCycleLR( self.opt, max_lr=25, final_div_factor=2, base_momentum=1, max_momentum=22, total_steps=10, anneal_strategy="linear", ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10) def test_onecycle_lr_linear_annealing_three_phases(self): lr_target = [1, 9, 17, 25, 17, 9, 1, 0.75, 0.5, 0.25] momentum_target = [22, 15, 8, 1, 8, 15, 22, 22, 22, 22] lr_targets = [lr_target, lr_target] momentum_targets = [momentum_target, momentum_target] scheduler = OneCycleLR( self.opt, max_lr=25, div_factor=25, base_momentum=1, max_momentum=22, total_steps=10, anneal_strategy="linear", pct_start=0.4, final_div_factor=4, three_phase=True, ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10) def test_onecycle_lr_cosine_annealing(self): def annealing_cos(start, end, pct): cos_out = math.cos(math.pi * pct) + 1 return end + (start - end) / 2.0 * cos_out lr_target = [ 1, 13, 25, annealing_cos(25, 0.5, 1 / 7.0), annealing_cos(25, 0.5, 2 / 7.0), annealing_cos(25, 0.5, 3 / 7.0), annealing_cos(25, 0.5, 4 / 7.0), annealing_cos(25, 0.5, 5 / 7.0), annealing_cos(25, 0.5, 6 / 7.0), 0.5, ] momentum_target = [ 22, 11.5, 1, annealing_cos(1, 22, 1 / 7.0), annealing_cos(1, 22, 2 / 7.0), annealing_cos(1, 22, 3 / 7.0), annealing_cos(1, 22, 4 / 7.0), annealing_cos(1, 22, 5 / 7.0), annealing_cos(1, 22, 6 / 7.0), 22, ] lr_targets = [lr_target, lr_target] momentum_targets = [momentum_target, momentum_target] scheduler = OneCycleLR( self.opt, max_lr=25, final_div_factor=2, base_momentum=1, max_momentum=22, total_steps=10, ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10) def test_onecycle_lr_legacy_state_dict(self): scheduler = OneCycleLR( self.opt, max_lr=25, final_div_factor=2, base_momentum=1, max_momentum=22, total_steps=10, anneal_strategy="cos", ) delattr(scheduler, "_anneal_func_type") state_dict = scheduler.state_dict() self.assertNotIn("anneal_func_type", state_dict) state_dict["anneal_func"] = OneCycleLR._annealing_cos scheduler.load_state_dict(state_dict) def annealing_cos(start, end, pct): cos_out = math.cos(math.pi * pct) + 1 return end + (start - end) / 2.0 * cos_out lr_target = [ 1, 13, 25, annealing_cos(25, 0.5, 1 / 7.0), annealing_cos(25, 0.5, 2 / 7.0), annealing_cos(25, 0.5, 3 / 7.0), annealing_cos(25, 0.5, 4 / 7.0), annealing_cos(25, 0.5, 5 / 7.0), annealing_cos(25, 0.5, 6 / 7.0), 0.5, ] momentum_target = [ 22, 11.5, 1, annealing_cos(1, 22, 1 / 7.0), annealing_cos(1, 22, 2 / 7.0), annealing_cos(1, 22, 3 / 7.0), annealing_cos(1, 22, 4 / 7.0), annealing_cos(1, 22, 5 / 7.0), annealing_cos(1, 22, 6 / 7.0), 22, ] lr_targets = [lr_target, lr_target] momentum_targets = [momentum_target, momentum_target] self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10) def test_cycle_lr_with_adam(self): old_opt = self.opt self.opt = Adam( [ {"params": self.net.conv1.parameters()}, {"params": self.net.conv2.parameters(), "lr": 0.5}, ], lr=0.05, ) lr_target = [1, 13, 25, 21.5, 18, 14.5, 11, 7.5, 4, 0.5] momentum_target = [22, 11.5, 1, 4, 7, 10, 13, 16, 19, 22] lr_targets = [lr_target, lr_target] momentum_targets = [momentum_target, momentum_target] scheduler = OneCycleLR( self.opt, max_lr=25, final_div_factor=2, base_momentum=1, max_momentum=22, total_steps=10, anneal_strategy="linear", ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10, use_beta1=True) self.opt = old_opt # set optimizer back to SGD def test_lambda_lr(self): epochs = 10 self.opt.param_groups[0]["lr"] = 0.05 self.opt.param_groups[1]["lr"] = 0.4 targets = [ [0.05 * (0.9**x) for x in range(epochs)], [0.4 * (0.8**x) for x in range(epochs)], ] scheduler = LambdaLR( self.opt, lr_lambda=[lambda x1: 0.9**x1, lambda x2: 0.8**x2] ) self._test(scheduler, targets, epochs) def test_multiplicative_lr(self): epochs = 10 self.opt.param_groups[0]["lr"] = 0.05 self.opt.param_groups[1]["lr"] = 0.4 targets = [ [0.05 * (0.9**x) for x in range(epochs)], [0.4 * (0.8**x) for x in range(epochs)], ] scheduler = MultiplicativeLR( self.opt, lr_lambda=[lambda x1: 0.9, lambda x2: 0.8] ) self._test(scheduler, targets, epochs) def test_multiplicative_lr_with_lr_lambda(self): lr_lambda = 0.95 with self.assertRaisesRegex(TypeError, "lr_lambda should be a function"): MultiplicativeLR(self.opt, lr_lambda) lr_lambda2 = 0.95 with self.assertRaisesRegex(TypeError, "lr_lambda should be a function"): MultiplicativeLR(self.opt, [lr_lambda, lr_lambda2]) @parametrize("T_mult", [1, 2, 4]) def test_CosineAnnealingWarmRestarts_lr1(self, T_mult): iters = 100 eta_min = 1e-10 T_i = 10 T_cur = 0 targets = [[0.05], [0.5]] scheduler = CosineAnnealingWarmRestarts( self.opt, T_0=T_i, T_mult=T_mult, eta_min=eta_min ) for _ in range(1, iters, 1): T_cur += 1 if T_cur >= T_i: T_cur = T_cur - T_i T_i = int(T_mult) * T_i targets[0] += [ eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2 ] targets[1] += [ eta_min + (0.5 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2 ] self._test(scheduler, targets, iters) def test_CosineAnnealingWarmRestarts_lr2(self): iters = 30 eta_min = 1e-10 T_mults = [1, 2, 4] for T_mult in T_mults: T_i = 10 T_cur = 0 targets = [[0.05], [0.5]] scheduler = CosineAnnealingWarmRestarts( self.opt, T_0=T_i, T_mult=T_mult, eta_min=eta_min ) for _ in torch.arange(0.1, iters, 0.1): T_cur = round(T_cur + 0.1, 1) if T_cur >= T_i: T_cur = T_cur - T_i T_i = int(T_mult) * T_i targets[0] += [ eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2 ] targets[1] += [ eta_min + (0.5 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2 ] self._test_CosineAnnealingWarmRestarts(scheduler, targets, iters) def test_CosineAnnealingWarmRestarts_lr3(self): epochs_for_T_mults = [ [0, 1, 2, 3, 4, 5, 12, 27, 3, 4, 5, 6, 13], [0, 1, 2, 3, 4, 5, 25, 32, 33, 34, 80, 81, 3], [0, 0.1, 0.2, 0.3, 1.3, 2.3, 17.5, 18.5, 19.5, 29.5, 30.5, 31.5, 50], ] T_curs_for_T_mults = [ [1, 2, 3, 4, 5, 2, 7, 3, 4, 5, 6, 3], [1, 2, 3, 4, 5, 15, 2, 3, 4, 10, 11, 3], [0.1, 0.2, 0.3, 1.3, 2.3, 7.5, 8.5, 9.5, 19.5, 20.5, 21.5, 10], ] T_is_for_T_mults = [ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10], [10, 10, 10, 10, 10, 20, 40, 40, 40, 80, 80, 10], [10, 10, 10, 10, 10, 30, 30, 30, 30, 30, 30, 90], ] eta_min = 1e-10 T_mults = [1, 2, 3] for epochs, T_mult, T_curs, T_is in zip( epochs_for_T_mults, T_mults, T_curs_for_T_mults, T_is_for_T_mults ): targets = [[0.05], [0.5]] scheduler = CosineAnnealingWarmRestarts( self.opt, T_0=10, T_mult=T_mult, eta_min=eta_min ) for T_cur, T_i in zip(T_curs, T_is): targets[0] += [ eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2 ] targets[1] += [ eta_min + (0.5 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2 ] self._test_interleaved_CosineAnnealingWarmRestarts( scheduler, targets, epochs ) def test_CosineAnnealingWarmRestarts_T_cur_reset(self): sch = CosineAnnealingWarmRestarts(self.opt, T_0=4) for epoch in [7, 8, 9]: sch.T_cur = epoch sch.step() expect_T_cur = (epoch + 1) % sch.T_0 self.assertEqual(sch.T_cur, expect_T_cur) def test_swalr_no_anneal(self): epochs, swa_start, swa_lr = 10, 5, 0.01 initial_lrs = [group["lr"] for group in self.opt.param_groups] targets = [ [lr] * (swa_start + 1) + [swa_lr] * (epochs - swa_start - 1) for lr in initial_lrs ] swa_scheduler = SWALR(self.opt, anneal_epochs=1, swa_lr=swa_lr) self._test_swalr(swa_scheduler, None, targets, swa_start, epochs) def test_swalr_cosine_anneal_after_multiplicative(self): # same swa_lr for different param_groups epochs, swa_start, swa_lr, anneal_epochs = 15, 5, 0.01, 5 mult_factor = 0.9 scheduler = MultiplicativeLR(self.opt, lr_lambda=lambda epoch: mult_factor) swa_scheduler = SWALR(self.opt, anneal_epochs=anneal_epochs, swa_lr=swa_lr) def anneal_coef(t): if t + 1 >= anneal_epochs: return 0.0 return (1 + math.cos(math.pi * (t + 1) / anneal_epochs)) / 2 initial_lrs = [group["lr"] for group in self.opt.param_groups] targets_before_swa = [ [lr * mult_factor**i for i in range(swa_start + 1)] for lr in initial_lrs ] swa_epochs = epochs - swa_start - 1 targets = [ lrs + [ lrs[-1] * anneal_coef(t) + swa_lr * (1 - anneal_coef(t)) for t in range(swa_epochs) ] for lrs in targets_before_swa ] self._test_swalr(swa_scheduler, scheduler, targets, swa_start, epochs) def test_swalr_linear_anneal_after_multiplicative(self): # separate swa_lr for different param_groups epochs, swa_start, swa_lrs, anneal_epochs = 15, 5, [0.01, 0.02], 4 mult_factor = 0.9 scheduler = MultiplicativeLR(self.opt, lr_lambda=lambda epoch: mult_factor) swa_scheduler = SWALR( self.opt, anneal_epochs=anneal_epochs, anneal_strategy="linear", swa_lr=swa_lrs, ) def anneal_coef(t): if t + 1 >= anneal_epochs: return 0.0 return 1 - (t + 1) / anneal_epochs initial_lrs = [group["lr"] for group in self.opt.param_groups] targets_before_swa = [ [lr * mult_factor**i for i in range(swa_start + 1)] for lr in initial_lrs ] swa_epochs = epochs - swa_start - 1 targets = [ lrs + [ lrs[-1] * anneal_coef(t) + swa_lr * (1 - anneal_coef(t)) for t in range(swa_epochs) ] for lrs, swa_lr in zip(targets_before_swa, swa_lrs) ] self._test_swalr(swa_scheduler, scheduler, targets, swa_start, epochs) def _test_swalr(self, swa_scheduler, scheduler, targets, swa_start, epochs): for epoch in range(epochs): for param_group, target in zip(self.opt.param_groups, targets): self.assertEqual( target[epoch], param_group["lr"], msg="LR is wrong in epoch {}: expected {}, got {}".format( epoch, target[epoch], param_group["lr"] ), atol=1e-5, rtol=0, ) if epoch >= swa_start: self.opt.step() swa_scheduler.step() elif scheduler is not None: self.opt.step() scheduler.step() def test_swalr_hypers(self): # Test that SWALR raises errors for incorrect hyper-parameters with self.assertRaisesRegex(ValueError, "anneal_strategy must"): swa_scheduler = SWALR(self.opt, anneal_strategy="exponential", swa_lr=1.0) with self.assertRaisesRegex(ValueError, "anneal_epochs must"): swa_scheduler = SWALR(self.opt, anneal_epochs=-1, swa_lr=1.0) with self.assertRaisesRegex(ValueError, "anneal_epochs must"): swa_scheduler = SWALR(self.opt, anneal_epochs=1.7, swa_lr=1.0) with self.assertRaisesRegex(ValueError, "swa_lr must"): swa_scheduler = SWALR(self.opt, swa_lr=[1.0, 0.1, 0.01]) def test_step_lr_state_dict(self): self._check_scheduler_state_dict( lambda: StepLR(self.opt, gamma=0.1, step_size=3), lambda: StepLR(self.opt, gamma=0.01 / 2, step_size=1), ) def test_multi_step_lr_state_dict(self): self._check_scheduler_state_dict( lambda: MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]), lambda: MultiStepLR(self.opt, gamma=0.01, milestones=[1, 4, 6]), ) def test_exp_step_lr_state_dict(self): self._check_scheduler_state_dict( lambda: ExponentialLR(self.opt, gamma=0.1), lambda: ExponentialLR(self.opt, gamma=0.01), ) def test_cosine_lr_state_dict(self): epochs = 10 eta_min = 1e-10 self._check_scheduler_state_dict( lambda: CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min), lambda: CosineAnnealingLR(self.opt, T_max=epochs // 2, eta_min=eta_min / 2), epochs=epochs, ) def test_reduce_lr_on_plateau_state_dict(self): scheduler = ReduceLROnPlateau(self.opt, mode="min", factor=0.1, patience=2) for score in [1.0, 2.0, 3.0, 4.0, 3.0, 4.0, 5.0, 3.0, 2.0, 1.0]: scheduler.step(score) scheduler_copy = ReduceLROnPlateau( self.opt, mode="max", factor=0.5, patience=10 ) scheduler_copy.load_state_dict(scheduler.state_dict()) for key in scheduler.__dict__: if key not in {"optimizer", "is_better"}: self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key]) def test_lambda_lr_state_dict_fn(self): scheduler = LambdaLR(self.opt, lr_lambda=lambda x: x) state = scheduler.state_dict() self.assertIsNone(state["lr_lambdas"][0]) scheduler_copy = LambdaLR(self.opt, lr_lambda=lambda x: x) scheduler_copy.load_state_dict(state) for key in scheduler.__dict__: if key not in {"optimizer", "lr_lambdas"}: self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key]) def test_lambda_lr_state_dict_obj(self): scheduler = LambdaLR(self.opt, lr_lambda=self.LambdaLRTestObject(10)) state = scheduler.state_dict() self.assertIsNotNone(state["lr_lambdas"][0]) scheduler_copy = LambdaLR(self.opt, lr_lambda=self.LambdaLRTestObject(-1)) scheduler_copy.load_state_dict(state) for key in scheduler.__dict__: if key not in {"optimizer"}: self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key]) def test_CosineAnnealingWarmRestarts_lr_state_dict(self): self._check_scheduler_state_dict( lambda: CosineAnnealingWarmRestarts(self.opt, T_0=10, T_mult=2), lambda: CosineAnnealingWarmRestarts(self.opt, T_0=100), ) def test_swa_lr_state_dict(self): self._check_scheduler_state_dict( lambda: SWALR(self.opt, anneal_epochs=3, swa_lr=0.5), lambda: SWALR( self.opt, anneal_epochs=10, anneal_strategy="linear", swa_lr=5.0 ), ) def _check_scheduler_state_dict(self, constr, constr2, epochs=10): scheduler = constr() for _ in range(epochs): scheduler.optimizer.step() scheduler.step() scheduler_copy = constr2() scheduler_copy.load_state_dict(scheduler.state_dict()) for key in scheduler.__dict__: if key != "optimizer": self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key]) self.assertEqual(scheduler.get_last_lr(), scheduler_copy.get_last_lr()) def _test_get_last_lr(self, schedulers, targets, epochs=10): if isinstance(schedulers, LRScheduler): schedulers = [schedulers] optimizers = {scheduler.optimizer for scheduler in schedulers} for epoch in range(epochs): result = [scheduler.get_last_lr() for scheduler in schedulers] [optimizer.step() for optimizer in optimizers] [scheduler.step() for scheduler in schedulers] target = [[t[epoch] for t in targets]] * len(schedulers) for t, r in zip(target, result): self.assertEqual( t, r, msg=f"LR is wrong in epoch {epoch}: expected {t}, got {r}", atol=1e-5, rtol=0, ) def _test_with_epoch(self, schedulers, targets, epochs=10): if isinstance(schedulers, LRScheduler): schedulers = [schedulers] optimizers = {scheduler.optimizer for scheduler in schedulers} for epoch in range(epochs): [optimizer.step() for optimizer in optimizers] with warnings.catch_warnings(record=True) as w: [ scheduler.step(epoch) for scheduler in schedulers ] # step before assert: skip initial lr self._check_warning_is_epoch_deprecation_warning( w, num_warnings=len(schedulers) ) for param_group, target in zip(self.opt.param_groups, targets): self.assertEqual( target[epoch], param_group["lr"], msg="LR is wrong in epoch {}: expected {}, got {}".format( epoch, target[epoch], param_group["lr"] ), atol=1e-5, rtol=0, ) def _test(self, schedulers, targets, epochs=10): if isinstance(schedulers, LRScheduler): schedulers = [schedulers] for epoch in range(epochs): for param_group, target in zip(self.opt.param_groups, targets): self.assertEqual( target[epoch], param_group["lr"], msg="LR is wrong in epoch {}: expected {}, got {}".format( epoch, target[epoch], param_group["lr"] ), atol=1e-5, rtol=0, ) [scheduler.step() for scheduler in schedulers] def _test_CosineAnnealingWarmRestarts(self, scheduler, targets, epochs=10): for index, epoch in enumerate(torch.arange(0, epochs, 0.1)): epoch = round(epoch.item(), 1) scheduler.step(epoch) for param_group, target in zip(self.opt.param_groups, targets): self.assertEqual( target[index], param_group["lr"], msg="LR is wrong in epoch {}: expected {}, got {}".format( epoch, target[index], param_group["lr"] ), atol=1e-5, rtol=0, ) def _test_interleaved_CosineAnnealingWarmRestarts(self, scheduler, targets, epochs): for index, epoch in enumerate(epochs): scheduler.step(epoch) for param_group, target in zip(self.opt.param_groups, targets): self.assertEqual( target[index], param_group["lr"], msg="LR is wrong in epoch {}: expected {}, got {}".format( epoch, target[index], param_group["lr"] ), atol=1e-5, rtol=0, ) def _test_against_closed_form(self, scheduler, closed_form_scheduler, epochs=10): self.setUp() targets = [] for epoch in range(epochs): closed_form_scheduler.optimizer.step() with warnings.catch_warnings(record=True) as w: closed_form_scheduler.step(epoch) self._check_warning_is_epoch_deprecation_warning(w) targets.append([group["lr"] for group in self.opt.param_groups]) self.setUp() for epoch in range(epochs): self.opt.step() scheduler.step() for i, param_group in enumerate(self.opt.param_groups): self.assertEqual( targets[epoch][i], param_group["lr"], msg="LR is wrong in epoch {}: expected {}, got {}".format( epoch, targets[epoch][i], param_group["lr"] ), atol=1e-5, rtol=0, ) def _test_reduce_lr_on_plateau( self, schedulers, targets, metrics, epochs=10, verbose=False ): if isinstance(schedulers, (LRScheduler, ReduceLROnPlateau)): schedulers = [schedulers] for epoch in range(epochs): self.opt.step() for scheduler in schedulers: if isinstance(scheduler, ReduceLROnPlateau): scheduler.step(metrics[epoch]) else: scheduler.step() if verbose: print("epoch{}:\tlr={}".format(epoch, self.opt.param_groups[0]["lr"])) for param_group, target in zip(self.opt.param_groups, targets): self.assertEqual( target[epoch], param_group["lr"], msg="LR is wrong in epoch {}: expected {}, got {}".format( epoch, target[epoch], param_group["lr"] ), atol=1e-5, rtol=0, ) def _test_cycle_lr( self, scheduler, lr_targets, momentum_targets, batch_iterations, verbose=False, use_beta1=False, ): for batch_num in range(batch_iterations): if verbose: if "momentum" in self.opt.param_groups[0]: print( "batch{}:\tlr={},momentum={}".format( batch_num, self.opt.param_groups[0]["lr"], self.opt.param_groups[0]["momentum"], ) ) elif use_beta1 and "betas" in self.opt.param_groups[0]: print( "batch{}:\tlr={},beta1={}".format( batch_num, self.opt.param_groups[0]["lr"], self.opt.param_groups[0]["betas"][0], ) ) else: print( "batch{}:\tlr={}".format( batch_num, self.opt.param_groups[0]["lr"] ) ) for param_group, lr_target, momentum_target in zip( self.opt.param_groups, lr_targets, momentum_targets ): self.assertEqual( lr_target[batch_num], param_group["lr"], msg="LR is wrong in batch_num {}: expected {}, got {}".format( batch_num, lr_target[batch_num], param_group["lr"] ), atol=1e-5, rtol=0, ) if use_beta1 and "betas" in param_group: self.assertEqual( momentum_target[batch_num], param_group["betas"][0], msg="Beta1 is wrong in batch_num {}: expected {}, got {}".format( batch_num, momentum_target[batch_num], param_group["betas"][0], ), atol=1e-5, rtol=0, ) elif "momentum" in param_group: self.assertEqual( momentum_target[batch_num], param_group["momentum"], msg="Momentum is wrong in batch_num {}: expected {}, got {}".format( batch_num, momentum_target[batch_num], param_group["momentum"], ), atol=1e-5, rtol=0, ) self.opt.step() scheduler.step() def test_cosine_then_cyclic(self): # https://github.com/pytorch/pytorch/issues/21965 max_lr = 0.3 base_lr = 0.1 optim_lr = 0.5 model = torch.nn.Linear(2, 1) optimizer = SGD(model.parameters(), lr=optim_lr) lr_scheduler_1 = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=20, eta_min=0.1 ) lr_scheduler_2 = torch.optim.lr_scheduler.CyclicLR( optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=1, step_size_down=3 ) for i in range(40): optimizer.step() if i <= lr_scheduler_1.T_max: lr_scheduler_1.step() else: lr_scheduler_2.step() last_lr = optimizer.param_groups[0]["lr"] self.assertLessEqual(last_lr, max_lr) @parametrize( "LRClass", [ partial(LambdaLR, lr_lambda=lambda e: e // 10), partial(MultiplicativeLR, lr_lambda=lambda: 0.95), partial(StepLR, step_size=30), partial(MultiStepLR, milestones=[30, 80]), ConstantLR, LinearLR, partial(ExponentialLR, gamma=0.9), PolynomialLR, partial(CosineAnnealingLR, T_max=10), lambda opt, **kwargs: ChainedScheduler( schedulers=[ConstantLR(opt), ConstantLR(opt)], **kwargs ), lambda opt, **kwargs: SequentialLR( opt, schedulers=[ConstantLR(opt), ConstantLR(opt)], milestones=[2], **kwargs, ), ReduceLROnPlateau, partial(CyclicLR, base_lr=0.01, max_lr=0.1), partial(OneCycleLR, max_lr=0.01, total_steps=10, anneal_strategy="linear"), partial(CosineAnnealingWarmRestarts, T_0=20), partial(SWALR, swa_lr=0.01), ], ) @parametrize("weights_only", [True, False]) def test_lr_scheduler_state_dict_load(self, LRClass, weights_only): scheduler = LRClass(self.opt) state_dict = scheduler.state_dict() with tempfile.TemporaryFile() as f: torch.save(state_dict, f) f.seek(0) state_dict_loaded = torch.load(f, weights_only=weights_only) self.assertEqual(state_dict, state_dict_loaded) # Make sure state_dict can be loaded scheduler2 = LRClass(self.opt) scheduler2.load_state_dict(state_dict_loaded) self.assertEqual(scheduler2.state_dict(), state_dict) @parametrize("min_lr", ["scalar", "list"]) def test_add_param_group_does_not_break_reduce_lr_on_plateau(self, min_lr): epochs = 20 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * 6 + [0.05] * (5 + 6) + [0.005] * 4] metrics = [1] * 7 + [0.6] + [0.5] * 12 scheduler = ReduceLROnPlateau( self.opt, mode="min", threshold_mode="rel", threshold=0.1, patience=5, cooldown=5, min_lr=0 if min_lr == "scalar" else [1e-5, 1e-4], ) for epoch in range(epochs): # Point is to test the use case in #104361 if epoch == 8: param = torch.nn.Parameter(torch.rand(2, 3)) self.opt.add_param_group({"params": [param], "lr": 0.05}) if min_lr == "list": scheduler.min_lrs.append(1e-6) self.opt.step() scheduler.step(metrics[epoch]) for param_group, target in zip(self.opt.param_groups, targets): self.assertEqual( target[epoch], param_group["lr"], msg="LR is wrong in epoch {}: expected {}, got {}".format( epoch, target[epoch], param_group["lr"] ), atol=1e-5, rtol=0, ) def test_add_param_group_errors_reduce_lr_on_plateau(self): scheduler = ReduceLROnPlateau( self.opt, mode="min", threshold_mode="rel", threshold=1e-5, patience=0, cooldown=0, min_lr=[1e-5, 1e-4], ) param = torch.nn.Parameter(torch.rand(2, 3)) self.opt.add_param_group({"params": [param], "lr": 0.05}) self.opt.step() scheduler.step(1) with self.assertRaisesRegex(RuntimeError, "The number of param groups in the"): self.opt.step() scheduler.step(1.3) @parametrize( "LRClass", [ partial(LambdaLR, lr_lambda=lambda e: e // 10), partial(MultiplicativeLR, lr_lambda=lambda e: 0.95), partial(StepLR, step_size=30), partial(MultiStepLR, milestones=[30, 80]), ConstantLR, LinearLR, partial(ExponentialLR, gamma=0.9), PolynomialLR, partial(CosineAnnealingLR, T_max=10), partial(CosineAnnealingWarmRestarts, T_0=20), ], ) def test_constant_initial_lr(self, LRClass): # Test that the initial learning rate is constant and that it does not alias base_lrs lr = torch.as_tensor(0.1) opt = SGD([torch.nn.Parameter(torch.randn(1))], lr=lr) sch = LRClass(opt) ori_param_groups = copy.deepcopy(opt.param_groups) for i in range(2): opt.step() sch.step(i) lr.multiply_(0.1) for group, ori_group in zip(opt.param_groups, ori_param_groups): self.assertEqual(group["initial_lr"], ori_group["initial_lr"]) self.assertEqual(sch.base_lrs, [0.1]) self.assertIsNot(sch.base_lrs[0], group["initial_lr"]) def test_constant_initial_params_cyclelr(self): # Test that the initial learning rate is constant lr = torch.as_tensor(0.1) max_lr = torch.as_tensor(0.2) base_momentum = torch.as_tensor(0.8) max_momentum = torch.as_tensor(0.9) opt = SGD([torch.nn.Parameter(torch.randn(1))], lr=lr) sch = CyclicLR( opt, base_lr=lr, max_lr=max_lr, base_momentum=base_momentum, max_momentum=max_momentum, ) ori_param_groups = copy.deepcopy(opt.param_groups) for i in range(2): lr.multiply_(0.5) max_lr.multiply_(0.5) base_momentum.multiply_(0.5) max_momentum.multiply_(0.5) opt.step() sch.step(i) for group, ori_group in zip(opt.param_groups, ori_param_groups): self.assertEqual(group["initial_lr"], ori_group["initial_lr"]) self.assertEqual(group["max_momentum"], ori_group["max_momentum"]) self.assertEqual(group["base_momentum"], ori_group["base_momentum"]) self.assertEqual(sch.base_lrs, [0.1]) self.assertEqual(sch.max_lrs, [0.2]) self.assertEqual(group["max_momentum"], 0.9) self.assertEqual(group["base_momentum"], 0.8) def test_constant_initial_params_onecyclelr(self): # Test that the initial learning rate is constant lr = torch.as_tensor(0.1) base_momentum = torch.as_tensor(0.85) max_momentum = torch.as_tensor(0.95) opt = SGD([torch.nn.Parameter(torch.randn(1))], lr=lr) sch = OneCycleLR( opt, max_lr=lr, total_steps=10, base_momentum=base_momentum, max_momentum=max_momentum, ) ori_param_groups = copy.deepcopy(opt.param_groups) for i in range(2): lr.multiply_(0.5) base_momentum.multiply_(0.5) max_momentum.multiply_(0.5) opt.step() sch.step(i) for group, ori_group in zip(opt.param_groups, ori_param_groups): self.assertEqual(group["initial_lr"], ori_group["initial_lr"]) self.assertEqual(group["max_lr"], ori_group["max_lr"]) self.assertEqual(group["min_lr"], ori_group["min_lr"]) self.assertEqual(group["max_momentum"], ori_group["max_momentum"]) self.assertEqual(group["base_momentum"], ori_group["base_momentum"]) self.assertEqual(group["max_momentum"], 0.95) self.assertEqual(group["base_momentum"], 0.85) def test_constant_initial_params_swalr(self): # Test that the initial learning rate is constant lr = torch.as_tensor(0.1) swa_lr = torch.as_tensor(0.05) opt = SGD([torch.nn.Parameter(torch.randn(1))], lr=lr) sch = SWALR(opt, swa_lr=swa_lr) ori_param_groups = copy.deepcopy(opt.param_groups) for _ in range(2): lr.multiply_(0.5) swa_lr.multiply_(0.5) opt.step() sch.step() for group, ori_group in zip(opt.param_groups, ori_param_groups): self.assertEqual(group["initial_lr"], ori_group["initial_lr"]) self.assertEqual(group["swa_lr"], ori_group["swa_lr"]) self.assertEqual(group["swa_lr"], 0.05) self.assertEqual(sch.base_lrs, [0.1]) @parametrize( "LRClass", [ partial(ExponentialLR, gamma=0.999), partial(LambdaLR, lr_lambda=lambda epoch: epoch // 30), partial(MultiplicativeLR, lr_lambda=lambda epoch: 0.95), partial(StepLR, step_size=30), partial(MultiStepLR, milestones=[30, 80]), ConstantLR, LinearLR, PolynomialLR, partial(CosineAnnealingLR, T_max=10), partial(CosineAnnealingWarmRestarts, T_0=20), partial(CyclicLR, base_lr=0.01, max_lr=0.1), partial(OneCycleLR, max_lr=0.01, total_steps=10), partial(SWALR, swa_lr=0.01), ], ) def test_lr_scheduler_checkpoint(self, LRClass): model = torch.nn.Linear(3, 3) optim = torch.optim.AdamW(model.parameters()) sch = LRClass(optim) optim.step() sch.step() optim2 = torch.optim.AdamW(model.parameters()) optim2.load_state_dict(optim.state_dict()) sch2 = LRClass(optim2, last_epoch=0) self.assertEqual( sch2._get_closed_form_lr()[0] if hasattr(self, "_get_closed_form_lr") else sch2.get_last_lr()[0], optim.param_groups[0]["lr"], ) def test_lr_scheduler_checkpoint_on_plateau(self): model = torch.nn.Linear(3, 3) optim = torch.optim.AdamW(model.parameters()) sch = ReduceLROnPlateau(optim, mode="min") optim.step() sch.step(1) optim2 = torch.optim.AdamW(model.parameters()) optim2.load_state_dict(optim.state_dict()) sch2 = ReduceLROnPlateau(optim2, mode="min") self.assertEqual( sch2._get_closed_form_lr()[0] if hasattr(self, "_get_closed_form_lr") else sch2.get_last_lr()[0], optim.param_groups[0]["lr"], ) instantiate_parametrized_tests(TestLRScheduler) if __name__ == "__main__": print("These tests should be run through test/test_optim.py instead")
TestLRScheduler
python
PyCQA__pylint
tests/functional/t/too/too_few_public_methods_37.py
{ "start": 934, "end": 992 }
class ____: x: float y: float @frozen
AttrsBarePoint
python
run-llama__llama_index
llama-index-core/llama_index/core/data_structs/data_structs.py
{ "start": 6440, "end": 6709 }
class ____(IndexDict): """A simple dictionary of documents, but loads a MultiModelVectorStore.""" @classmethod def get_type(cls) -> IndexStructType: """Get type.""" return IndexStructType.MULTIMODAL_VECTOR_STORE @dataclass
MultiModelIndexDict
python
fluentpython__example-code-2e
17-it-generator/sentence_iter.py
{ "start": 524, "end": 1520 }
class ____: def __init__(self, words): self.words = words # <3> self.index = 0 # <4> def __next__(self): try: word = self.words[self.index] # <5> except IndexError: raise StopIteration() # <6> self.index += 1 # <7> return word # <8> def __iter__(self): # <9> return self # end::SENTENCE_ITER[] def main(): import sys import warnings try: filename = sys.argv[1] word_number = int(sys.argv[2]) except (IndexError, ValueError): print(f'Usage: {sys.argv[0]} <file-name> <word-number>') sys.exit(2) # command line usage error with open(filename, 'rt', encoding='utf-8') as text_file: s = Sentence(text_file.read()) for n, word in enumerate(s, 1): if n == word_number: print(word) break else: warnings.warn(f'last word is #{n}, {word!r}') if __name__ == '__main__': main()
SentenceIterator
python
huggingface__transformers
tests/models/dpt/test_modeling_dpt_hybrid.py
{ "start": 11219, "end": 12191 }
class ____(unittest.TestCase): def test_inference_depth_estimation(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 384, 384)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(torch_device) torch.testing.assert_close(outputs.predicted_depth[:3, :3, :3] / 100, expected_slice, rtol=1e-4, atol=1e-4)
DPTModelIntegrationTest
python
streamlit__streamlit
lib/streamlit/elements/widgets/radio.py
{ "start": 2054, "end": 2623 }
class ____(Generic[T]): options: Sequence[T] index: int | None def serialize(self, v: object) -> int | None: if v is None: return None return 0 if len(self.options) == 0 else index_(self.options, v) def deserialize(self, ui_value: int | None) -> T | None: idx = ui_value if ui_value is not None else self.index return ( self.options[idx] if idx is not None and len(self.options) > 0 and self.options[idx] is not None else None )
RadioSerde
python
fastai__fastai
fastai/text/models/awdlstm.py
{ "start": 3148, "end": 4040 }
class ____(Module): "Apply dropout with probability `embed_p` to an embedding layer `emb`." def __init__(self, emb:nn.Embedding, # Wrapped embedding layer embed_p:float # Embdedding layer dropout probability ): self.emb,self.embed_p = emb,embed_p def forward(self, words, scale=None): if self.training and self.embed_p != 0: size = (self.emb.weight.size(0),1) mask = dropout_mask(self.emb.weight.data, size, self.embed_p) masked_embed = self.emb.weight * mask else: masked_embed = self.emb.weight if scale: masked_embed.mul_(scale) return F.embedding(words, masked_embed, ifnone(self.emb.padding_idx, -1), self.emb.max_norm, self.emb.norm_type, self.emb.scale_grad_by_freq, self.emb.sparse) # %% ../../../nbs/32_text.models.awdlstm.ipynb 17
EmbeddingDropout
python
getsentry__sentry
src/sentry/apidocs/examples/explore_saved_query_examples.py
{ "start": 4052, "end": 4773 }
class ____: EXPLORE_SAVED_QUERY_GET_RESPONSE = [ OpenApiExample( "Explore Saved Query GET response", value=EXPLORE_SAVED_QUERY_OBJ, status_codes=["200"], response_only=True, ) ] EXPLORE_SAVED_QUERY_POST_RESPONSE = [ OpenApiExample( "Create Explore Saved Query", value=EXPLORE_SAVED_QUERY_OBJ, status_codes=["201"], response_only=True, ) ] EXPLORE_SAVED_QUERIES_QUERY_RESPONSE = [ OpenApiExample( "Get Explore Saved Queries", value=SAVED_QUERIES, status_codes=["200"], response_only=True, ) ]
ExploreExamples
python
great-expectations__great_expectations
great_expectations/_version.py
{ "start": 1097, "end": 1608 }
class ____: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "" cfg.parentdir_prefix = "great_expectations-" cfg.versionfile_source = "great_expectations/_version.py" cfg.verbose = False return cfg
VersioneerConfig
python
redis__redis-py
tests/test_cache.py
{ "start": 32417, "end": 43995 }
class ____: def test_get_eviction_policy(self): cache = DefaultCache(CacheConfig(max_size=5)) assert isinstance(cache.eviction_policy, LRUPolicy) def test_get_max_size(self): cache = DefaultCache(CacheConfig(max_size=5)) assert cache.config.get_max_size() == 5 def test_get_size(self): cache = DefaultCache(CacheConfig(max_size=5)) assert cache.size == 0 @pytest.mark.parametrize( "cache_key", [{"command": "GET", "redis_keys": ("bar",)}], indirect=True ) def test_set_non_existing_cache_key(self, cache_key, mock_connection): cache = DefaultCache(CacheConfig(max_size=5)) assert cache.set( CacheEntry( cache_key=cache_key, cache_value=b"val", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.get(cache_key).cache_value == b"val" @pytest.mark.parametrize( "cache_key", [{"command": "GET", "redis_keys": ("bar",)}], indirect=True ) def test_set_updates_existing_cache_key(self, cache_key, mock_connection): cache = DefaultCache(CacheConfig(max_size=5)) assert cache.set( CacheEntry( cache_key=cache_key, cache_value=b"val", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.get(cache_key).cache_value == b"val" cache.set( CacheEntry( cache_key=cache_key, cache_value=b"new_val", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.get(cache_key).cache_value == b"new_val" @pytest.mark.parametrize( "cache_key", [{"command": "HRANDFIELD", "redis_keys": ("bar",)}], indirect=True ) def test_set_does_not_store_not_allowed_key(self, cache_key, mock_connection): cache = DefaultCache(CacheConfig(max_size=5)) assert not cache.set( CacheEntry( cache_key=cache_key, cache_value=b"val", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) def test_set_evict_lru_cache_key_on_reaching_max_size(self, mock_connection): cache = DefaultCache(CacheConfig(max_size=3)) cache_key1 = CacheKey( command="GET", redis_keys=("foo",), redis_args=("GET", "foo") ) cache_key2 = CacheKey( command="GET", redis_keys=("foo1",), redis_args=("GET", "foo1") ) cache_key3 = CacheKey( command="GET", redis_keys=("foo2",), redis_args=("GET", "foo2") ) # Set 3 different keys assert cache.set( CacheEntry( cache_key=cache_key1, cache_value=b"bar", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.set( CacheEntry( cache_key=cache_key2, cache_value=b"bar1", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.set( CacheEntry( cache_key=cache_key3, cache_value=b"bar2", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) # Accessing key in the order that it makes 2nd key LRU assert cache.get(cache_key1).cache_value == b"bar" assert cache.get(cache_key2).cache_value == b"bar1" assert cache.get(cache_key3).cache_value == b"bar2" assert cache.get(cache_key1).cache_value == b"bar" cache_key4 = CacheKey( command="GET", redis_keys=("foo3",), redis_args=("GET", "foo3") ) assert cache.set( CacheEntry( cache_key=cache_key4, cache_value=b"bar3", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) # Make sure that new key was added and 2nd is evicted assert cache.get(cache_key4).cache_value == b"bar3" assert cache.get(cache_key2) is None @pytest.mark.parametrize( "cache_key", [{"command": "GET", "redis_keys": ("bar",)}], indirect=True ) def test_get_return_correct_value(self, cache_key, mock_connection): cache = DefaultCache(CacheConfig(max_size=5)) assert cache.set( CacheEntry( cache_key=cache_key, cache_value=b"val", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.get(cache_key).cache_value == b"val" wrong_key = CacheKey( command="HGET", redis_keys=("foo",), redis_args=("HGET", "foo", "bar") ) assert cache.get(wrong_key) is None result = cache.get(cache_key) assert cache.set( CacheEntry( cache_key=cache_key, cache_value=b"new_val", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) # Make sure that result is immutable. assert result.cache_value != cache.get(cache_key).cache_value def test_delete_by_cache_keys_removes_associated_entries(self, mock_connection): cache = DefaultCache(CacheConfig(max_size=5)) cache_key1 = CacheKey( command="GET", redis_keys=("foo",), redis_args=("GET", "foo") ) cache_key2 = CacheKey( command="GET", redis_keys=("foo1",), redis_args=("GET", "foo1") ) cache_key3 = CacheKey( command="GET", redis_keys=("foo2",), redis_args=("GET", "foo2") ) cache_key4 = CacheKey( command="GET", redis_keys=("foo3",), redis_args=("GET", "foo3") ) # Set 3 different keys assert cache.set( CacheEntry( cache_key=cache_key1, cache_value=b"bar", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.set( CacheEntry( cache_key=cache_key2, cache_value=b"bar1", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.set( CacheEntry( cache_key=cache_key3, cache_value=b"bar2", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.delete_by_cache_keys([cache_key1, cache_key2, cache_key4]) == [ True, True, False, ] assert len(cache.collection) == 1 assert cache.get(cache_key3).cache_value == b"bar2" def test_delete_by_redis_keys_removes_associated_entries(self, mock_connection): cache = DefaultCache(CacheConfig(max_size=5)) cache_key1 = CacheKey( command="GET", redis_keys=("foo",), redis_args=("GET", "foo") ) cache_key2 = CacheKey( command="GET", redis_keys=("foo1",), redis_args=("GET", "foo1") ) cache_key3 = CacheKey( command="MGET", redis_keys=("foo", "foo3"), redis_args=("MGET", "foo", "foo3"), ) cache_key4 = CacheKey( command="MGET", redis_keys=("foo2", "foo3"), redis_args=("MGET", "foo2", "foo3"), ) # Set 3 different keys assert cache.set( CacheEntry( cache_key=cache_key1, cache_value=b"bar", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.set( CacheEntry( cache_key=cache_key2, cache_value=b"bar1", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.set( CacheEntry( cache_key=cache_key3, cache_value=b"bar2", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.set( CacheEntry( cache_key=cache_key4, cache_value=b"bar3", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.delete_by_redis_keys([b"foo", b"foo1"]) == [True, True, True] assert len(cache.collection) == 1 assert cache.get(cache_key4).cache_value == b"bar3" def test_delete_by_redis_keys_with_non_utf8_bytes_key(self, mock_connection): """cache fails to invalidate entries when redis_keys contain non-UTF-8 bytes.""" cache = DefaultCache(CacheConfig(max_size=5)) # Valid UTF-8 key works utf8_key = b"foo" utf8_cache_key = CacheKey(command="GET", redis_keys=(utf8_key,)) assert cache.set( CacheEntry( cache_key=utf8_cache_key, cache_value=b"bar", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) # Non-UTF-8 bytes key bad_key = b"f\xffoo" bad_cache_key = CacheKey(command="GET", redis_keys=(bad_key,)) assert cache.set( CacheEntry( cache_key=bad_cache_key, cache_value=b"bar2", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) # Delete both keys: utf8 should succeed, non-utf8 exposes bug results = cache.delete_by_redis_keys([utf8_key, bad_key]) assert results[0] is True assert results[1] is True, "Cache did not remove entry for non-UTF8 bytes key" def test_flush(self, mock_connection): cache = DefaultCache(CacheConfig(max_size=5)) cache_key1 = CacheKey( command="GET", redis_keys=("foo",), redis_args=("GET", "foo") ) cache_key2 = CacheKey( command="GET", redis_keys=("foo1",), redis_args=("GET", "foo1") ) cache_key3 = CacheKey( command="GET", redis_keys=("foo2",), redis_args=("GET", "foo2") ) # Set 3 different keys assert cache.set( CacheEntry( cache_key=cache_key1, cache_value=b"bar", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.set( CacheEntry( cache_key=cache_key2, cache_value=b"bar1", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.set( CacheEntry( cache_key=cache_key3, cache_value=b"bar2", status=CacheEntryStatus.VALID, connection_ref=mock_connection, ) ) assert cache.flush() == 3 assert len(cache.collection) == 0
TestUnitDefaultCache
python
scipy__scipy
scipy/linalg/tests/test_decomp_update.py
{ "start": 66294, "end": 66357 }
class ____(BaseQRupdate): dtype = np.dtype('F')
TestQRupdate_F
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/orm/query.py
{ "start": 5719, "end": 117496 }
class ____( _SelectFromElements, SupportsCloneAnnotations, HasPrefixes, HasSuffixes, HasHints, EventTarget, log.Identified, Generative, Executable, Generic[_T], ): """ORM-level SQL construction object. .. legacy:: The ORM :class:`.Query` object is a legacy construct as of SQLAlchemy 2.0. See the notes at the top of :ref:`query_api_toplevel` for an overview, including links to migration documentation. :class:`_query.Query` objects are normally initially generated using the :meth:`~.Session.query` method of :class:`.Session`, and in less common cases by instantiating the :class:`_query.Query` directly and associating with a :class:`.Session` using the :meth:`_query.Query.with_session` method. """ # elements that are in Core and can be cached in the same way _where_criteria: Tuple[ColumnElement[Any], ...] = () _having_criteria: Tuple[ColumnElement[Any], ...] = () _order_by_clauses: Tuple[ColumnElement[Any], ...] = () _group_by_clauses: Tuple[ColumnElement[Any], ...] = () _limit_clause: Optional[ColumnElement[Any]] = None _offset_clause: Optional[ColumnElement[Any]] = None _distinct: bool = False _distinct_on: Tuple[ColumnElement[Any], ...] = () _for_update_arg: Optional[ForUpdateArg] = None _correlate: Tuple[FromClause, ...] = () _auto_correlate: bool = True _from_obj: Tuple[FromClause, ...] = () _setup_joins: Tuple[_SetupJoinsElement, ...] = () _label_style: SelectLabelStyle = SelectLabelStyle.LABEL_STYLE_LEGACY_ORM _memoized_select_entities = () _syntax_extensions: Tuple[SyntaxExtension, ...] = () _compile_options: Union[Type[CacheableOptions], CacheableOptions] = ( _ORMCompileState.default_compile_options ) _with_options: Tuple[ExecutableOption, ...] load_options = QueryContext.default_load_options + { "_legacy_uniquing": True } _params: util.immutabledict[str, Any] = util.EMPTY_DICT # local Query builder state, not needed for # compilation or execution _enable_assertions = True _statement: Optional[ExecutableReturnsRows] = None session: Session dispatch: dispatcher[Query[_T]] # mirrors that of ClauseElement, used to propagate the "orm" # plugin as well as the "subject" of the plugin, e.g. the mapper # we are querying against. @util.memoized_property def _propagate_attrs(self) -> _PropagateAttrsType: return util.EMPTY_DICT def __init__( self, entities: Union[ _ColumnsClauseArgument[Any], Sequence[_ColumnsClauseArgument[Any]] ], session: Optional[Session] = None, ): """Construct a :class:`_query.Query` directly. E.g.:: q = Query([User, Address], session=some_session) The above is equivalent to:: q = some_session.query(User, Address) :param entities: a sequence of entities and/or SQL expressions. :param session: a :class:`.Session` with which the :class:`_query.Query` will be associated. Optional; a :class:`_query.Query` can be associated with a :class:`.Session` generatively via the :meth:`_query.Query.with_session` method as well. .. seealso:: :meth:`.Session.query` :meth:`_query.Query.with_session` """ # session is usually present. There's one case in subqueryloader # where it stores a Query without a Session and also there are tests # for the query(Entity).with_session(session) API which is likely in # some old recipes, however these are legacy as select() can now be # used. self.session = session # type: ignore self._set_entities(entities) def _set_propagate_attrs(self, values: Mapping[str, Any]) -> Self: self._propagate_attrs = util.immutabledict(values) return self def _set_entities( self, entities: Union[ _ColumnsClauseArgument[Any], Iterable[_ColumnsClauseArgument[Any]] ], ) -> None: self._raw_columns = [ coercions.expect( roles.ColumnsClauseRole, ent, apply_propagate_attrs=self, post_inspect=True, ) for ent in util.to_list(entities) ] @deprecated( "2.1.0", "The :meth:`.Query.tuples` method is deprecated, :class:`.Row` " "now behaves like a tuple and can unpack types directly.", ) def tuples(self: Query[_O]) -> Query[Tuple[_O]]: """return a tuple-typed form of this :class:`.Query`. This method invokes the :meth:`.Query.only_return_tuples` method with a value of ``True``, which by itself ensures that this :class:`.Query` will always return :class:`.Row` objects, even if the query is made against a single entity. It then also at the typing level will return a "typed" query, if possible, that will type result rows as ``Tuple`` objects with typed elements. This method can be compared to the :meth:`.Result.tuples` method, which returns "self", but from a typing perspective returns an object that will yield typed ``Tuple`` objects for results. Typing takes effect only if this :class:`.Query` object is a typed query object already. .. versionadded:: 2.0 .. seealso:: :ref:`change_10635` - describes a migration path from this workaround for SQLAlchemy 2.1. :meth:`.Result.tuples` - v2 equivalent method. """ return self.only_return_tuples(True) # type: ignore def _entity_from_pre_ent_zero(self) -> Optional[_InternalEntityType[Any]]: if not self._raw_columns: return None ent = self._raw_columns[0] if "parententity" in ent._annotations: return ent._annotations["parententity"] # type: ignore elif "bundle" in ent._annotations: return ent._annotations["bundle"] # type: ignore else: # label, other SQL expression for element in visitors.iterate(ent): if "parententity" in element._annotations: return element._annotations["parententity"] # type: ignore # noqa: E501 else: return None def _only_full_mapper_zero(self, methname: str) -> Mapper[Any]: if ( len(self._raw_columns) != 1 or "parententity" not in self._raw_columns[0]._annotations or not self._raw_columns[0].is_selectable ): raise sa_exc.InvalidRequestError( "%s() can only be used against " "a single mapped class." % methname ) return self._raw_columns[0]._annotations["parententity"] # type: ignore # noqa: E501 def _set_select_from( self, obj: Iterable[_FromClauseArgument], set_base_alias: bool ) -> None: fa = [ coercions.expect( roles.FromClauseRole, elem, apply_propagate_attrs=self, ) for elem in obj ] self._compile_options += {"_set_base_alias": set_base_alias} self._from_obj = tuple(fa) @_generative def _set_lazyload_from(self, state: InstanceState[Any]) -> Self: self.load_options += {"_lazy_loaded_from": state} return self def _get_condition(self) -> None: """used by legacy BakedQuery""" self._no_criterion_condition("get", order_by=False, distinct=False) def _get_existing_condition(self) -> None: self._no_criterion_assertion("get", order_by=False, distinct=False) def _no_criterion_assertion( self, meth: str, order_by: bool = True, distinct: bool = True ) -> None: if not self._enable_assertions: return if ( self._where_criteria or self._statement is not None or self._from_obj or self._setup_joins or self._limit_clause is not None or self._offset_clause is not None or self._group_by_clauses or (order_by and self._order_by_clauses) or (distinct and self._distinct) ): raise sa_exc.InvalidRequestError( "Query.%s() being called on a " "Query with existing criterion. " % meth ) def _no_criterion_condition( self, meth: str, order_by: bool = True, distinct: bool = True ) -> None: self._no_criterion_assertion(meth, order_by, distinct) self._from_obj = self._setup_joins = () if self._statement is not None: self._compile_options += {"_statement": None} self._where_criteria = () self._distinct = False self._order_by_clauses = self._group_by_clauses = () def _no_clauseelement_condition(self, meth: str) -> None: if not self._enable_assertions: return if self._order_by_clauses: raise sa_exc.InvalidRequestError( "Query.%s() being called on a " "Query with existing criterion. " % meth ) self._no_criterion_condition(meth) def _no_statement_condition(self, meth: str) -> None: if not self._enable_assertions: return if self._statement is not None: raise sa_exc.InvalidRequestError( ( "Query.%s() being called on a Query with an existing full " "statement - can't apply criterion." ) % meth ) def _no_limit_offset(self, meth: str) -> None: if not self._enable_assertions: return if self._limit_clause is not None or self._offset_clause is not None: raise sa_exc.InvalidRequestError( "Query.%s() being called on a Query which already has LIMIT " "or OFFSET applied. Call %s() before limit() or offset() " "are applied." % (meth, meth) ) @property def _has_row_limiting_clause(self) -> bool: return ( self._limit_clause is not None or self._offset_clause is not None ) def _get_options( self, populate_existing: Optional[bool] = None, version_check: Optional[bool] = None, only_load_props: Optional[Sequence[str]] = None, refresh_state: Optional[InstanceState[Any]] = None, identity_token: Optional[Any] = None, ) -> Self: load_options: Dict[str, Any] = {} compile_options: Dict[str, Any] = {} if version_check: load_options["_version_check"] = version_check if populate_existing: load_options["_populate_existing"] = populate_existing if refresh_state: load_options["_refresh_state"] = refresh_state compile_options["_for_refresh_state"] = True if only_load_props: compile_options["_only_load_props"] = frozenset(only_load_props) if identity_token: load_options["_identity_token"] = identity_token if load_options: self.load_options += load_options if compile_options: self._compile_options += compile_options return self def _clone(self, **kw: Any) -> Self: return self._generate() def _get_select_statement_only(self) -> Select[_T]: if self._statement is not None: raise sa_exc.InvalidRequestError( "Can't call this method on a Query that uses from_statement()" ) return cast("Select[_T]", self.statement) @property def statement(self) -> Union[Select[_T], FromStatement[_T], UpdateBase]: """The full SELECT statement represented by this Query. The statement by default will not have disambiguating labels applied to the construct unless with_labels(True) is called first. """ # .statement can return the direct future.Select() construct here, as # long as we are not using subsequent adaption features that # are made against raw entities, e.g. from_self(), with_polymorphic(), # select_entity_from(). If these features are being used, then # the Select() we return will not have the correct .selected_columns # collection and will not embed in subsequent queries correctly. # We could find a way to make this collection "correct", however # this would not be too different from doing the full compile as # we are doing in any case, the Select() would still not have the # proper state for other attributes like whereclause, order_by, # and these features are all deprecated in any case. # # for these reasons, Query is not a Select, it remains an ORM # object for which __clause_element__() must be called in order for # it to provide a real expression object. # # from there, it starts to look much like Query itself won't be # passed into the execute process and won't generate its own cache # key; this will all occur in terms of the ORM-enabled Select. stmt: Union[Select[_T], FromStatement[_T], UpdateBase] if not self._compile_options._set_base_alias: # if we don't have legacy top level aliasing features in use # then convert to a future select() directly stmt = self._statement_20(for_statement=True) else: stmt = self._compile_state(for_statement=True).statement if self._params: stmt = stmt.params(self._params) return stmt def _final_statement( self, legacy_query_style: bool = True ) -> Select[Unpack[TupleAny]]: """Return the 'final' SELECT statement for this :class:`.Query`. This is used by the testing suite only and is fairly inefficient. This is the Core-only select() that will be rendered by a complete compilation of this query, and is what .statement used to return in 1.3. """ q = self._clone() return q._compile_state( use_legacy_query_style=legacy_query_style ).statement # type: ignore def _statement_20( self, for_statement: bool = False, use_legacy_query_style: bool = True ) -> Union[Select[_T], FromStatement[_T]]: # TODO: this event needs to be deprecated, as it currently applies # only to ORM query and occurs at this spot that is now more # or less an artificial spot if self.dispatch.before_compile: for fn in self.dispatch.before_compile: new_query = fn(self) if new_query is not None and new_query is not self: self = new_query if not fn._bake_ok: # type: ignore self._compile_options += {"_bake_ok": False} compile_options = self._compile_options compile_options += { "_for_statement": for_statement, "_use_legacy_query_style": use_legacy_query_style, } stmt: Union[Select[_T], FromStatement[_T]] if self._statement is not None: stmt = FromStatement(self._raw_columns, self._statement) stmt.__dict__.update( _with_options=self._with_options, _with_context_options=self._compile_state_funcs, _compile_options=compile_options, _execution_options=self._execution_options, _propagate_attrs=self._propagate_attrs, ) else: # Query / select() internal attributes are 99% cross-compatible stmt = Select._create_raw_select(**self.__dict__) stmt.__dict__.update( _label_style=self._label_style, _compile_options=compile_options, _propagate_attrs=self._propagate_attrs, ) for ext in self._syntax_extensions: stmt._apply_syntax_extension_to_self(ext) stmt.__dict__.pop("session", None) # ensure the ORM context is used to compile the statement, even # if it has no ORM entities. This is so ORM-only things like # _legacy_joins are picked up that wouldn't be picked up by the # Core statement context if "compile_state_plugin" not in stmt._propagate_attrs: stmt._propagate_attrs = stmt._propagate_attrs.union( {"compile_state_plugin": "orm", "plugin_subject": None} ) return stmt def subquery( self, name: Optional[str] = None, with_labels: bool = False, reduce_columns: bool = False, ) -> Subquery: """Return the full SELECT statement represented by this :class:`_query.Query`, embedded within an :class:`_expression.Alias`. Eager JOIN generation within the query is disabled. .. seealso:: :meth:`_sql.Select.subquery` - v2 comparable method. :param name: string name to be assigned as the alias; this is passed through to :meth:`_expression.FromClause.alias`. If ``None``, a name will be deterministically generated at compile time. :param with_labels: if True, :meth:`.with_labels` will be called on the :class:`_query.Query` first to apply table-qualified labels to all columns. :param reduce_columns: if True, :meth:`_expression.Select.reduce_columns` will be called on the resulting :func:`_expression.select` construct, to remove same-named columns where one also refers to the other via foreign key or WHERE clause equivalence. """ q = self.enable_eagerloads(False) if with_labels: q = q.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) stmt = q._get_select_statement_only() if TYPE_CHECKING: assert isinstance(stmt, Select) if reduce_columns: stmt = stmt.reduce_columns() return stmt.subquery(name=name) def cte( self, name: Optional[str] = None, recursive: bool = False, nesting: bool = False, ) -> CTE: r"""Return the full SELECT statement represented by this :class:`_query.Query` represented as a common table expression (CTE). Parameters and usage are the same as those of the :meth:`_expression.SelectBase.cte` method; see that method for further details. Here is the `PostgreSQL WITH RECURSIVE example <https://www.postgresql.org/docs/current/static/queries-with.html>`_. Note that, in this example, the ``included_parts`` cte and the ``incl_alias`` alias of it are Core selectables, which means the columns are accessed via the ``.c.`` attribute. The ``parts_alias`` object is an :func:`_orm.aliased` instance of the ``Part`` entity, so column-mapped attributes are available directly:: from sqlalchemy.orm import aliased class Part(Base): __tablename__ = "part" part = Column(String, primary_key=True) sub_part = Column(String, primary_key=True) quantity = Column(Integer) included_parts = ( session.query(Part.sub_part, Part.part, Part.quantity) .filter(Part.part == "our part") .cte(name="included_parts", recursive=True) ) incl_alias = aliased(included_parts, name="pr") parts_alias = aliased(Part, name="p") included_parts = included_parts.union_all( session.query( parts_alias.sub_part, parts_alias.part, parts_alias.quantity ).filter(parts_alias.part == incl_alias.c.sub_part) ) q = session.query( included_parts.c.sub_part, func.sum(included_parts.c.quantity).label("total_quantity"), ).group_by(included_parts.c.sub_part) .. seealso:: :meth:`_sql.Select.cte` - v2 equivalent method. """ # noqa: E501 return ( self.enable_eagerloads(False) ._get_select_statement_only() .cte(name=name, recursive=recursive, nesting=nesting) ) def label(self, name: Optional[str]) -> Label[Any]: """Return the full SELECT statement represented by this :class:`_query.Query`, converted to a scalar subquery with a label of the given name. .. seealso:: :meth:`_sql.Select.label` - v2 comparable method. """ return ( self.enable_eagerloads(False) ._get_select_statement_only() .label(name) ) @overload def as_scalar( # type: ignore[overload-overlap] self: Query[Tuple[_MAYBE_ENTITY]], ) -> ScalarSelect[_MAYBE_ENTITY]: ... @overload def as_scalar( self: Query[Tuple[_NOT_ENTITY]], ) -> ScalarSelect[_NOT_ENTITY]: ... @overload def as_scalar(self) -> ScalarSelect[Any]: ... @util.deprecated( "1.4", "The :meth:`_query.Query.as_scalar` method is deprecated and will be " "removed in a future release. Please refer to " ":meth:`_query.Query.scalar_subquery`.", ) def as_scalar(self) -> ScalarSelect[Any]: """Return the full SELECT statement represented by this :class:`_query.Query`, converted to a scalar subquery. """ return self.scalar_subquery() @overload def scalar_subquery( self: Query[Tuple[_MAYBE_ENTITY]], ) -> ScalarSelect[Any]: ... @overload def scalar_subquery( self: Query[Tuple[_NOT_ENTITY]], ) -> ScalarSelect[_NOT_ENTITY]: ... @overload def scalar_subquery(self) -> ScalarSelect[Any]: ... def scalar_subquery(self) -> ScalarSelect[Any]: """Return the full SELECT statement represented by this :class:`_query.Query`, converted to a scalar subquery. Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.scalar_subquery`. .. versionchanged:: 1.4 The :meth:`_query.Query.scalar_subquery` method replaces the :meth:`_query.Query.as_scalar` method. .. seealso:: :meth:`_sql.Select.scalar_subquery` - v2 comparable method. """ return ( self.enable_eagerloads(False) ._get_select_statement_only() .scalar_subquery() ) @property def selectable(self) -> Union[Select[_T], FromStatement[_T], UpdateBase]: """Return the :class:`_expression.Select` object emitted by this :class:`_query.Query`. Used for :func:`_sa.inspect` compatibility, this is equivalent to:: query.enable_eagerloads(False).with_labels().statement """ return self.__clause_element__() def __clause_element__( self, ) -> Union[Select[_T], FromStatement[_T], UpdateBase]: return ( self._with_compile_options( _enable_eagerloads=False, _render_for_subquery=True ) .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) .statement ) @overload def only_return_tuples( self: Query[_O], value: Literal[True] ) -> RowReturningQuery[_O]: ... @overload def only_return_tuples( self: Query[_O], value: Literal[False] ) -> Query[_O]: ... @_generative def only_return_tuples(self, value: bool) -> Query[Any]: """When set to True, the query results will always be a :class:`.Row` object. This can change a query that normally returns a single entity as a scalar to return a :class:`.Row` result in all cases. .. seealso:: :meth:`.Query.tuples` - returns tuples, but also at the typing level will type results as ``Tuple``. :meth:`_query.Query.is_single_entity` :meth:`_engine.Result.tuples` - v2 comparable method. """ self.load_options += dict(_only_return_tuples=value) return self @property def is_single_entity(self) -> bool: """Indicates if this :class:`_query.Query` returns tuples or single entities. Returns True if this query returns a single entity for each instance in its result list, and False if this query returns a tuple of entities for each result. .. seealso:: :meth:`_query.Query.only_return_tuples` """ return ( not self.load_options._only_return_tuples and len(self._raw_columns) == 1 and "parententity" in self._raw_columns[0]._annotations and isinstance( self._raw_columns[0]._annotations["parententity"], ORMColumnsClauseRole, ) ) @_generative def enable_eagerloads(self, value: bool) -> Self: """Control whether or not eager joins and subqueries are rendered. When set to False, the returned Query will not render eager joins regardless of :func:`~sqlalchemy.orm.joinedload`, :func:`~sqlalchemy.orm.subqueryload` options or mapper-level ``lazy='joined'``/``lazy='subquery'`` configurations. This is used primarily when nesting the Query's statement into a subquery or other selectable, or when using :meth:`_query.Query.yield_per`. """ self._compile_options += {"_enable_eagerloads": value} return self @_generative def _with_compile_options(self, **opt: Any) -> Self: self._compile_options += opt return self @util.became_legacy_20( ":meth:`_orm.Query.with_labels` and :meth:`_orm.Query.apply_labels`", alternative="Use set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) " "instead.", ) def with_labels(self) -> Self: return self.set_label_style( SelectLabelStyle.LABEL_STYLE_TABLENAME_PLUS_COL ) apply_labels = with_labels @property def get_label_style(self) -> SelectLabelStyle: """ Retrieve the current label style. .. versionadded:: 1.4 .. seealso:: :meth:`_sql.Select.get_label_style` - v2 equivalent method. """ return self._label_style def set_label_style(self, style: SelectLabelStyle) -> Self: """Apply column labels to the return value of Query.statement. Indicates that this Query's `statement` accessor should return a SELECT statement that applies labels to all columns in the form <tablename>_<columnname>; this is commonly used to disambiguate columns from multiple tables which have the same name. When the `Query` actually issues SQL to load rows, it always uses column labeling. .. note:: The :meth:`_query.Query.set_label_style` method *only* applies the output of :attr:`_query.Query.statement`, and *not* to any of the result-row invoking systems of :class:`_query.Query` itself, e.g. :meth:`_query.Query.first`, :meth:`_query.Query.all`, etc. To execute a query using :meth:`_query.Query.set_label_style`, invoke the :attr:`_query.Query.statement` using :meth:`.Session.execute`:: result = session.execute( query.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).statement ) .. versionadded:: 1.4 .. seealso:: :meth:`_sql.Select.set_label_style` - v2 equivalent method. """ # noqa if self._label_style is not style: self = self._generate() self._label_style = style return self @_generative def enable_assertions(self, value: bool) -> Self: """Control whether assertions are generated. When set to False, the returned Query will not assert its state before certain operations, including that LIMIT/OFFSET has not been applied when filter() is called, no criterion exists when get() is called, and no "from_statement()" exists when filter()/order_by()/group_by() etc. is called. This more permissive mode is used by custom Query subclasses to specify criterion or other modifiers outside of the usual usage patterns. Care should be taken to ensure that the usage pattern is even possible. A statement applied by from_statement() will override any criterion set by filter() or order_by(), for example. """ self._enable_assertions = value return self @property def whereclause(self) -> Optional[ColumnElement[bool]]: """A readonly attribute which returns the current WHERE criterion for this Query. This returned value is a SQL expression construct, or ``None`` if no criterion has been established. .. seealso:: :attr:`_sql.Select.whereclause` - v2 equivalent property. """ return BooleanClauseList._construct_for_whereclause( self._where_criteria ) @_generative def _with_current_path(self, path: PathRegistry) -> Self: """indicate that this query applies to objects loaded within a certain path. Used by deferred loaders (see strategies.py) which transfer query options from an originating query to a newly generated query intended for the deferred load. """ self._compile_options += {"_current_path": path} return self @_generative def yield_per(self, count: int) -> Self: r"""Yield only ``count`` rows at a time. The purpose of this method is when fetching very large result sets (> 10K rows), to batch results in sub-collections and yield them out partially, so that the Python interpreter doesn't need to declare very large areas of memory which is both time consuming and leads to excessive memory use. The performance from fetching hundreds of thousands of rows can often double when a suitable yield-per setting (e.g. approximately 1000) is used, even with DBAPIs that buffer rows (which are most). As of SQLAlchemy 1.4, the :meth:`_orm.Query.yield_per` method is equivalent to using the ``yield_per`` execution option at the ORM level. See the section :ref:`orm_queryguide_yield_per` for further background on this option. .. seealso:: :ref:`orm_queryguide_yield_per` """ self.load_options += {"_yield_per": count} return self @util.became_legacy_20( ":meth:`_orm.Query.get`", alternative="The method is now available as :meth:`_orm.Session.get`", ) def get(self, ident: _PKIdentityArgument) -> Optional[Any]: """Return an instance based on the given primary key identifier, or ``None`` if not found. E.g.:: my_user = session.query(User).get(5) some_object = session.query(VersionedFoo).get((5, 10)) some_object = session.query(VersionedFoo).get({"id": 5, "version_id": 10}) :meth:`_query.Query.get` is special in that it provides direct access to the identity map of the owning :class:`.Session`. If the given primary key identifier is present in the local identity map, the object is returned directly from this collection and no SQL is emitted, unless the object has been marked fully expired. If not present, a SELECT is performed in order to locate the object. :meth:`_query.Query.get` also will perform a check if the object is present in the identity map and marked as expired - a SELECT is emitted to refresh the object as well as to ensure that the row is still present. If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. :meth:`_query.Query.get` is only used to return a single mapped instance, not multiple instances or individual column constructs, and strictly on a single primary key value. The originating :class:`_query.Query` must be constructed in this way, i.e. against a single mapped entity, with no additional filtering criterion. Loading options via :meth:`_query.Query.options` may be applied however, and will be used if the object is not yet locally present. :param ident: A scalar, tuple, or dictionary representing the primary key. For a composite (e.g. multiple column) primary key, a tuple or dictionary should be passed. For a single-column primary key, the scalar calling form is typically the most expedient. If the primary key of a row is the value "5", the call looks like:: my_object = query.get(5) The tuple form contains primary key values typically in the order in which they correspond to the mapped :class:`_schema.Table` object's primary key columns, or if the :paramref:`_orm.Mapper.primary_key` configuration parameter were used, in the order used for that parameter. For example, if the primary key of a row is represented by the integer digits "5, 10" the call would look like:: my_object = query.get((5, 10)) The dictionary form should include as keys the mapped attribute names corresponding to each element of the primary key. If the mapped class has the attributes ``id``, ``version_id`` as the attributes which store the object's primary key value, the call would look like:: my_object = query.get({"id": 5, "version_id": 10}) :return: The object instance, or ``None``. """ # noqa: E501 self._no_criterion_assertion("get", order_by=False, distinct=False) # we still implement _get_impl() so that baked query can override # it return self._get_impl(ident, loading._load_on_pk_identity) def _get_impl( self, primary_key_identity: _PKIdentityArgument, db_load_fn: Callable[..., Any], identity_token: Optional[Any] = None, ) -> Optional[Any]: mapper = self._only_full_mapper_zero("get") return self.session._get_impl( mapper, primary_key_identity, db_load_fn, populate_existing=self.load_options._populate_existing, with_for_update=self._for_update_arg, options=self._with_options, identity_token=identity_token, execution_options=self._execution_options, ) @property def lazy_loaded_from(self) -> Optional[InstanceState[Any]]: """An :class:`.InstanceState` that is using this :class:`_query.Query` for a lazy load operation. .. deprecated:: 1.4 This attribute should be viewed via the :attr:`.ORMExecuteState.lazy_loaded_from` attribute, within the context of the :meth:`.SessionEvents.do_orm_execute` event. .. seealso:: :attr:`.ORMExecuteState.lazy_loaded_from` """ return self.load_options._lazy_loaded_from # type: ignore @property def _current_path(self) -> PathRegistry: return self._compile_options._current_path # type: ignore @_generative def correlate( self, *fromclauses: Union[Literal[None, False], _FromClauseArgument], ) -> Self: """Return a :class:`.Query` construct which will correlate the given FROM clauses to that of an enclosing :class:`.Query` or :func:`~.expression.select`. The method here accepts mapped classes, :func:`.aliased` constructs, and :class:`_orm.Mapper` constructs as arguments, which are resolved into expression constructs, in addition to appropriate expression constructs. The correlation arguments are ultimately passed to :meth:`_expression.Select.correlate` after coercion to expression constructs. The correlation arguments take effect in such cases as when :meth:`_query.Query.from_self` is used, or when a subquery as returned by :meth:`_query.Query.subquery` is embedded in another :func:`_expression.select` construct. .. seealso:: :meth:`_sql.Select.correlate` - v2 equivalent method. """ self._auto_correlate = False if fromclauses and fromclauses[0] in {None, False}: self._correlate = () else: self._correlate = self._correlate + tuple( coercions.expect(roles.FromClauseRole, f) for f in fromclauses ) return self @_generative def autoflush(self, setting: bool) -> Self: """Return a Query with a specific 'autoflush' setting. As of SQLAlchemy 1.4, the :meth:`_orm.Query.autoflush` method is equivalent to using the ``autoflush`` execution option at the ORM level. See the section :ref:`orm_queryguide_autoflush` for further background on this option. """ self.load_options += {"_autoflush": setting} return self @_generative def populate_existing(self) -> Self: """Return a :class:`_query.Query` that will expire and refresh all instances as they are loaded, or reused from the current :class:`.Session`. As of SQLAlchemy 1.4, the :meth:`_orm.Query.populate_existing` method is equivalent to using the ``populate_existing`` execution option at the ORM level. See the section :ref:`orm_queryguide_populate_existing` for further background on this option. """ self.load_options += {"_populate_existing": True} return self @_generative def _with_invoke_all_eagers(self, value: bool) -> Self: """Set the 'invoke all eagers' flag which causes joined- and subquery loaders to traverse into already-loaded related objects and collections. Default is that of :attr:`_query.Query._invoke_all_eagers`. """ self.load_options += {"_invoke_all_eagers": value} return self @util.became_legacy_20( ":meth:`_orm.Query.with_parent`", alternative="Use the :func:`_orm.with_parent` standalone construct.", ) @util.preload_module("sqlalchemy.orm.relationships") def with_parent( self, instance: object, property: Optional[ # noqa: A002 attributes.QueryableAttribute[Any] ] = None, from_entity: Optional[_ExternalEntityType[Any]] = None, ) -> Self: """Add filtering criterion that relates the given instance to a child object or collection, using its attribute state as well as an established :func:`_orm.relationship()` configuration. The method uses the :func:`.with_parent` function to generate the clause, the result of which is passed to :meth:`_query.Query.filter`. Parameters are the same as :func:`.with_parent`, with the exception that the given property can be None, in which case a search is performed against this :class:`_query.Query` object's target mapper. :param instance: An instance which has some :func:`_orm.relationship`. :param property: Class bound attribute which indicates what relationship from the instance should be used to reconcile the parent/child relationship. :param from_entity: Entity in which to consider as the left side. This defaults to the "zero" entity of the :class:`_query.Query` itself. """ relationships = util.preloaded.orm_relationships if from_entity: entity_zero = inspect(from_entity) else: entity_zero = _legacy_filter_by_entity_zero(self) if property is None: # TODO: deprecate, property has to be supplied mapper = object_mapper(instance) for prop in mapper.iterate_properties: if ( isinstance(prop, relationships.RelationshipProperty) and prop.mapper is entity_zero.mapper # type: ignore ): property = prop # type: ignore # noqa: A001 break else: raise sa_exc.InvalidRequestError( "Could not locate a property which relates instances " "of class '%s' to instances of class '%s'" % ( entity_zero.mapper.class_.__name__, # type: ignore instance.__class__.__name__, ) ) return self.filter( with_parent( instance, property, # type: ignore entity_zero.entity, # type: ignore ) ) @_generative def add_entity( self, entity: _EntityType[Any], alias: Optional[Union[Alias, Subquery]] = None, ) -> Query[Any]: """add a mapped entity to the list of result columns to be returned. .. seealso:: :meth:`_sql.Select.add_columns` - v2 comparable method. """ if alias is not None: # TODO: deprecate entity = AliasedClass(entity, alias) self._raw_columns = list(self._raw_columns) self._raw_columns.append( coercions.expect( roles.ColumnsClauseRole, entity, apply_propagate_attrs=self ) ) return self @_generative def with_session(self, session: Session) -> Self: """Return a :class:`_query.Query` that will use the given :class:`.Session`. While the :class:`_query.Query` object is normally instantiated using the :meth:`.Session.query` method, it is legal to build the :class:`_query.Query` directly without necessarily using a :class:`.Session`. Such a :class:`_query.Query` object, or any :class:`_query.Query` already associated with a different :class:`.Session`, can produce a new :class:`_query.Query` object associated with a target session using this method:: from sqlalchemy.orm import Query query = Query([MyClass]).filter(MyClass.id == 5) result = query.with_session(my_session).one() """ self.session = session return self def _legacy_from_self( self, *entities: _ColumnsClauseArgument[Any] ) -> Self: # used for query.count() as well as for the same # function in BakedQuery, as well as some old tests in test_baked.py. fromclause = ( self.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) .correlate(None) .subquery() ._anonymous_fromclause() ) q = self._from_selectable(fromclause) if entities: q._set_entities(entities) return q @_generative def _set_enable_single_crit(self, val: bool) -> Self: self._compile_options += {"_enable_single_crit": val} return self @_generative def _from_selectable( self, fromclause: FromClause, set_entity_from: bool = True ) -> Self: for attr in ( "_where_criteria", "_order_by_clauses", "_group_by_clauses", "_limit_clause", "_offset_clause", "_last_joined_entity", "_setup_joins", "_memoized_select_entities", "_distinct", "_distinct_on", "_having_criteria", "_prefixes", "_suffixes", "_syntax_extensions", ): self.__dict__.pop(attr, None) self._set_select_from([fromclause], set_entity_from) self._compile_options += { "_enable_single_crit": False, } return self @util.deprecated( "1.4", ":meth:`_query.Query.values` " "is deprecated and will be removed in a " "future release. Please use :meth:`_query.Query.with_entities`", ) def values(self, *columns: _ColumnsClauseArgument[Any]) -> Iterable[Any]: """Return an iterator yielding result tuples corresponding to the given list of columns """ return self._values_no_warn(*columns) _values = values def _values_no_warn( self, *columns: _ColumnsClauseArgument[Any] ) -> Iterable[Any]: if not columns: return iter(()) q = self._clone().enable_eagerloads(False) q._set_entities(columns) if not q.load_options._yield_per: q.load_options += {"_yield_per": 10} return iter(q) @util.deprecated( "1.4", ":meth:`_query.Query.value` " "is deprecated and will be removed in a " "future release. Please use :meth:`_query.Query.with_entities` " "in combination with :meth:`_query.Query.scalar`", ) def value(self, column: _ColumnExpressionArgument[Any]) -> Any: """Return a scalar result corresponding to the given column expression. """ try: return next(self._values_no_warn(column))[0] # type: ignore except StopIteration: return None @overload def with_entities(self, _entity: _EntityType[_O]) -> Query[_O]: ... @overload def with_entities( self, _colexpr: roles.TypedColumnsClauseRole[_T], ) -> RowReturningQuery[Tuple[_T]]: ... # START OVERLOADED FUNCTIONS self.with_entities RowReturningQuery 2-8 # code within this block is **programmatically, # statically generated** by tools/generate_tuple_map_overloads.py @overload def with_entities( self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], / ) -> RowReturningQuery[_T0, _T1]: ... @overload def with_entities( self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2], / ) -> RowReturningQuery[_T0, _T1, _T2]: ... @overload def with_entities( self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2], __ent3: _TCCA[_T3], /, ) -> RowReturningQuery[_T0, _T1, _T2, _T3]: ... @overload def with_entities( self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2], __ent3: _TCCA[_T3], __ent4: _TCCA[_T4], /, ) -> RowReturningQuery[_T0, _T1, _T2, _T3, _T4]: ... @overload def with_entities( self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2], __ent3: _TCCA[_T3], __ent4: _TCCA[_T4], __ent5: _TCCA[_T5], /, ) -> RowReturningQuery[_T0, _T1, _T2, _T3, _T4, _T5]: ... @overload def with_entities( self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2], __ent3: _TCCA[_T3], __ent4: _TCCA[_T4], __ent5: _TCCA[_T5], __ent6: _TCCA[_T6], /, ) -> RowReturningQuery[_T0, _T1, _T2, _T3, _T4, _T5, _T6]: ... @overload def with_entities( self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2], __ent3: _TCCA[_T3], __ent4: _TCCA[_T4], __ent5: _TCCA[_T5], __ent6: _TCCA[_T6], __ent7: _TCCA[_T7], /, *entities: _ColumnsClauseArgument[Any], ) -> RowReturningQuery[ _T0, _T1, _T2, _T3, _T4, _T5, _T6, _T7, Unpack[TupleAny] ]: ... # END OVERLOADED FUNCTIONS self.with_entities @overload def with_entities( self, *entities: _ColumnsClauseArgument[Any] ) -> Query[Any]: ... @_generative def with_entities( self, *entities: _ColumnsClauseArgument[Any], **__kw: Any ) -> Query[Any]: r"""Return a new :class:`_query.Query` replacing the SELECT list with the given entities. e.g.:: # Users, filtered on some arbitrary criterion # and then ordered by related email address q = ( session.query(User) .join(User.address) .filter(User.name.like("%ed%")) .order_by(Address.email) ) # given *only* User.id==5, Address.email, and 'q', what # would the *next* User in the result be ? subq = ( q.with_entities(Address.email) .order_by(None) .filter(User.id == 5) .subquery() ) q = q.join((subq, subq.c.email < Address.email)).limit(1) .. seealso:: :meth:`_sql.Select.with_only_columns` - v2 comparable method. """ if __kw: raise _no_kw() # Query has all the same fields as Select for this operation # this could in theory be based on a protocol but not sure if it's # worth it _MemoizedSelectEntities._generate_for_statement(self) # type: ignore self._set_entities(entities) return self @_generative def add_columns( self, *column: _ColumnExpressionArgument[Any] ) -> Query[Any]: """Add one or more column expressions to the list of result columns to be returned. .. seealso:: :meth:`_sql.Select.add_columns` - v2 comparable method. """ self._raw_columns = list(self._raw_columns) self._raw_columns.extend( coercions.expect( roles.ColumnsClauseRole, c, apply_propagate_attrs=self, post_inspect=True, ) for c in column ) return self @util.deprecated( "1.4", ":meth:`_query.Query.add_column` " "is deprecated and will be removed in a " "future release. Please use :meth:`_query.Query.add_columns`", ) def add_column(self, column: _ColumnExpressionArgument[Any]) -> Query[Any]: """Add a column expression to the list of result columns to be returned. """ return self.add_columns(column) @_generative def options(self, *args: ExecutableOption) -> Self: """Return a new :class:`_query.Query` object, applying the given list of mapper options. Most supplied options regard changing how column- and relationship-mapped attributes are loaded. .. seealso:: :ref:`loading_columns` :ref:`relationship_loader_options` """ opts = tuple(util.flatten_iterator(args)) if self._compile_options._current_path: # opting for lower method overhead for the checks for opt in opts: if not opt._is_core and opt._is_legacy_option: # type: ignore opt.process_query_conditionally(self) # type: ignore else: for opt in opts: if not opt._is_core and opt._is_legacy_option: # type: ignore opt.process_query(self) # type: ignore self._with_options += opts return self def with_transformation( self, fn: Callable[[Query[Any]], Query[Any]] ) -> Query[Any]: """Return a new :class:`_query.Query` object transformed by the given function. E.g.:: def filter_something(criterion): def transform(q): return q.filter(criterion) return transform q = q.with_transformation(filter_something(x == 5)) This allows ad-hoc recipes to be created for :class:`_query.Query` objects. """ return fn(self) def get_execution_options(self) -> _ImmutableExecuteOptions: """Get the non-SQL options which will take effect during execution. .. seealso:: :meth:`_query.Query.execution_options` :meth:`_sql.Select.get_execution_options` - v2 comparable method. """ return self._execution_options @overload def execution_options( self, *, compiled_cache: Optional[CompiledCacheType] = ..., logging_token: str = ..., isolation_level: IsolationLevel = ..., no_parameters: bool = False, stream_results: bool = False, max_row_buffer: int = ..., yield_per: int = ..., driver_column_names: bool = ..., insertmanyvalues_page_size: int = ..., schema_translate_map: Optional[SchemaTranslateMapType] = ..., populate_existing: bool = False, autoflush: bool = False, preserve_rowcount: bool = False, **opt: Any, ) -> Self: ... @overload def execution_options(self, **opt: Any) -> Self: ... @_generative def execution_options(self, **kwargs: Any) -> Self: """Set non-SQL options which take effect during execution. Options allowed here include all of those accepted by :meth:`_engine.Connection.execution_options`, as well as a series of ORM specific options: ``populate_existing=True`` - equivalent to using :meth:`_orm.Query.populate_existing` ``autoflush=True|False`` - equivalent to using :meth:`_orm.Query.autoflush` ``yield_per=<value>`` - equivalent to using :meth:`_orm.Query.yield_per` Note that the ``stream_results`` execution option is enabled automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()` method or execution option is used. .. versionadded:: 1.4 - added ORM options to :meth:`_orm.Query.execution_options` The execution options may also be specified on a per execution basis when using :term:`2.0 style` queries via the :paramref:`_orm.Session.execution_options` parameter. .. warning:: The :paramref:`_engine.Connection.execution_options.stream_results` parameter should not be used at the level of individual ORM statement executions, as the :class:`_orm.Session` will not track objects from different schema translate maps within a single session. For multiple schema translate maps within the scope of a single :class:`_orm.Session`, see :ref:`examples_sharding`. .. seealso:: :ref:`engine_stream_results` :meth:`_query.Query.get_execution_options` :meth:`_sql.Select.execution_options` - v2 equivalent method. """ self._execution_options = self._execution_options.union(kwargs) return self @_generative def with_for_update( self, *, nowait: bool = False, read: bool = False, of: Optional[_ForUpdateOfArgument] = None, skip_locked: bool = False, key_share: bool = False, ) -> Self: """return a new :class:`_query.Query` with the specified options for the ``FOR UPDATE`` clause. The behavior of this method is identical to that of :meth:`_expression.GenerativeSelect.with_for_update`. When called with no arguments, the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause appended. When additional arguments are specified, backend-specific options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE`` can take effect. E.g.:: q = ( sess.query(User) .populate_existing() .with_for_update(nowait=True, of=User) ) The above query on a PostgreSQL backend will render like: .. sourcecode:: sql SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT .. warning:: Using ``with_for_update`` in the context of eager loading relationships is not officially supported or recommended by SQLAlchemy and may not work with certain queries on various database backends. When ``with_for_update`` is successfully used with a query that involves :func:`_orm.joinedload`, SQLAlchemy will attempt to emit SQL that locks all involved tables. .. note:: It is generally a good idea to combine the use of the :meth:`_orm.Query.populate_existing` method when using the :meth:`_orm.Query.with_for_update` method. The purpose of :meth:`_orm.Query.populate_existing` is to force all the data read from the SELECT to be populated into the ORM objects returned, even if these objects are already in the :term:`identity map`. .. seealso:: :meth:`_expression.GenerativeSelect.with_for_update` - Core level method with full argument and behavioral description. :meth:`_orm.Query.populate_existing` - overwrites attributes of objects already loaded in the identity map. """ # noqa: E501 self._for_update_arg = ForUpdateArg( read=read, nowait=nowait, of=of, skip_locked=skip_locked, key_share=key_share, ) return self @_generative def params( self, __params: Optional[Dict[str, Any]] = None, /, **kw: Any ) -> Self: r"""Add values for bind parameters which may have been specified in filter(). Parameters may be specified using \**kwargs, or optionally a single dictionary as the first positional argument. The reason for both is that \**kwargs is convenient, however some parameter dictionaries contain unicode keys in which case \**kwargs cannot be used. """ if __params: kw.update(__params) self._params = self._params.union(kw) return self def where(self, *criterion: _ColumnExpressionArgument[bool]) -> Self: """A synonym for :meth:`.Query.filter`. .. versionadded:: 1.4 .. seealso:: :meth:`_sql.Select.where` - v2 equivalent method. """ return self.filter(*criterion) @_generative @_assertions(_no_statement_condition, _no_limit_offset) def filter(self, *criterion: _ColumnExpressionArgument[bool]) -> Self: r"""Apply the given filtering criterion to a copy of this :class:`_query.Query`, using SQL expressions. e.g.:: session.query(MyClass).filter(MyClass.name == "some name") Multiple criteria may be specified as comma separated; the effect is that they will be joined together using the :func:`.and_` function:: session.query(MyClass).filter(MyClass.name == "some name", MyClass.id > 5) The criterion is any SQL expression object applicable to the WHERE clause of a select. String expressions are coerced into SQL expression constructs via the :func:`_expression.text` construct. .. seealso:: :meth:`_query.Query.filter_by` - filter on keyword expressions. :meth:`_sql.Select.where` - v2 equivalent method. """ # noqa: E501 for crit in list(criterion): crit = coercions.expect( roles.WhereHavingRole, crit, apply_propagate_attrs=self ) self._where_criteria += (crit,) return self @util.memoized_property def _last_joined_entity( self, ) -> Optional[Union[_InternalEntityType[Any], _JoinTargetElement]]: if self._setup_joins: return _determine_last_joined_entity( self._setup_joins, ) else: return None def _filter_by_zero(self) -> Any: """for the filter_by() method, return the target entity for which we will attempt to derive an expression from based on string name. """ if self._setup_joins: _last_joined_entity = self._last_joined_entity if _last_joined_entity is not None: return _last_joined_entity # discussion related to #7239 # special check determines if we should try to derive attributes # for filter_by() from the "from object", i.e., if the user # called query.select_from(some selectable).filter_by(some_attr=value). # We don't want to do that in the case that methods like # from_self(), select_entity_from(), or a set op like union() were # called; while these methods also place a # selectable in the _from_obj collection, they also set up # the _set_base_alias boolean which turns on the whole "adapt the # entity to this selectable" thing, meaning the query still continues # to construct itself in terms of the lead entity that was passed # to query(), e.g. query(User).from_self() is still in terms of User, # and not the subquery that from_self() created. This feature of # "implicitly adapt all occurrences of entity X to some arbitrary # subquery" is the main thing I am trying to do away with in 2.0 as # users should now used aliased() for that, but I can't entirely get # rid of it due to query.union() and other set ops relying upon it. # # compare this to the base Select()._filter_by_zero() which can # just return self._from_obj[0] if present, because there is no # "_set_base_alias" feature. # # IOW, this conditional essentially detects if # "select_from(some_selectable)" has been called, as opposed to # "select_entity_from()", "from_self()" # or "union() / some_set_op()". if self._from_obj and not self._compile_options._set_base_alias: return self._from_obj[0] return self._raw_columns[0] def filter_by(self, **kwargs: Any) -> Self: r"""Apply the given filtering criterion to a copy of this :class:`_query.Query`, using keyword expressions. e.g.:: session.query(MyClass).filter_by(name="some name") Multiple criteria may be specified as comma separated; the effect is that they will be joined together using the :func:`.and_` function:: session.query(MyClass).filter_by(name="some name", id=5) The keyword expressions are extracted from the primary entity of the query, or the last entity that was the target of a call to :meth:`_query.Query.join`. .. seealso:: :meth:`_query.Query.filter` - filter on SQL expressions. :meth:`_sql.Select.filter_by` - v2 comparable method. """ from_entity = self._filter_by_zero() clauses = [ _entity_namespace_key(from_entity, key) == value for key, value in kwargs.items() ] return self.filter(*clauses) @_generative def order_by( self, __first: Union[ Literal[None, False, _NoArg.NO_ARG], _ColumnExpressionOrStrLabelArgument[Any], ] = _NoArg.NO_ARG, /, *clauses: _ColumnExpressionOrStrLabelArgument[Any], ) -> Self: """Apply one or more ORDER BY criteria to the query and return the newly resulting :class:`_query.Query`. e.g.:: q = session.query(Entity).order_by(Entity.id, Entity.name) Calling this method multiple times is equivalent to calling it once with all the clauses concatenated. All existing ORDER BY criteria may be cancelled by passing ``None`` by itself. New ORDER BY criteria may then be added by invoking :meth:`_orm.Query.order_by` again, e.g.:: # will erase all ORDER BY and ORDER BY new_col alone q = q.order_by(None).order_by(new_col) .. seealso:: These sections describe ORDER BY in terms of :term:`2.0 style` invocation but apply to :class:`_orm.Query` as well: :ref:`tutorial_order_by` - in the :ref:`unified_tutorial` :ref:`tutorial_order_by_label` - in the :ref:`unified_tutorial` :meth:`_sql.Select.order_by` - v2 equivalent method. """ for assertion in (self._no_statement_condition, self._no_limit_offset): assertion("order_by") if not clauses and (__first is None or __first is False): self._order_by_clauses = () elif __first is not _NoArg.NO_ARG: criterion = tuple( coercions.expect(roles.OrderByRole, clause) for clause in (__first,) + clauses ) self._order_by_clauses += criterion return self @_generative def group_by( self, __first: Union[ Literal[None, False, _NoArg.NO_ARG], _ColumnExpressionOrStrLabelArgument[Any], ] = _NoArg.NO_ARG, /, *clauses: _ColumnExpressionOrStrLabelArgument[Any], ) -> Self: """Apply one or more GROUP BY criterion to the query and return the newly resulting :class:`_query.Query`. All existing GROUP BY settings can be suppressed by passing ``None`` - this will suppress any GROUP BY configured on mappers as well. .. seealso:: These sections describe GROUP BY in terms of :term:`2.0 style` invocation but apply to :class:`_orm.Query` as well: :ref:`tutorial_group_by_w_aggregates` - in the :ref:`unified_tutorial` :ref:`tutorial_order_by_label` - in the :ref:`unified_tutorial` :meth:`_sql.Select.group_by` - v2 equivalent method. """ for assertion in (self._no_statement_condition, self._no_limit_offset): assertion("group_by") if not clauses and (__first is None or __first is False): self._group_by_clauses = () elif __first is not _NoArg.NO_ARG: criterion = tuple( coercions.expect(roles.GroupByRole, clause) for clause in (__first,) + clauses ) self._group_by_clauses += criterion return self @_generative @_assertions(_no_statement_condition, _no_limit_offset) def having(self, *having: _ColumnExpressionArgument[bool]) -> Self: r"""Apply a HAVING criterion to the query and return the newly resulting :class:`_query.Query`. :meth:`_query.Query.having` is used in conjunction with :meth:`_query.Query.group_by`. HAVING criterion makes it possible to use filters on aggregate functions like COUNT, SUM, AVG, MAX, and MIN, eg.:: q = ( session.query(User.id) .join(User.addresses) .group_by(User.id) .having(func.count(Address.id) > 2) ) .. seealso:: :meth:`_sql.Select.having` - v2 equivalent method. """ for criterion in having: having_criteria = coercions.expect( roles.WhereHavingRole, criterion ) self._having_criteria += (having_criteria,) return self def _set_op(self, expr_fn: Any, *q: Query[Any]) -> Self: list_of_queries = (self,) + q return self._from_selectable(expr_fn(*(list_of_queries)).subquery()) def union(self, *q: Query[Any]) -> Self: """Produce a UNION of this Query against one or more queries. e.g.:: q1 = sess.query(SomeClass).filter(SomeClass.foo == "bar") q2 = sess.query(SomeClass).filter(SomeClass.bar == "foo") q3 = q1.union(q2) The method accepts multiple Query objects so as to control the level of nesting. A series of ``union()`` calls such as:: x.union(y).union(z).all() will nest on each ``union()``, and produces: .. sourcecode:: sql SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y) UNION SELECT * FROM Z) Whereas:: x.union(y, z).all() produces: .. sourcecode:: sql SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION SELECT * FROM Z) Note that many database backends do not allow ORDER BY to be rendered on a query called within UNION, EXCEPT, etc. To disable all ORDER BY clauses including those configured on mappers, issue ``query.order_by(None)`` - the resulting :class:`_query.Query` object will not render ORDER BY within its SELECT statement. .. seealso:: :meth:`_sql.Select.union` - v2 equivalent method. """ return self._set_op(expression.union, *q) def union_all(self, *q: Query[Any]) -> Self: """Produce a UNION ALL of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. .. seealso:: :meth:`_sql.Select.union_all` - v2 equivalent method. """ return self._set_op(expression.union_all, *q) def intersect(self, *q: Query[Any]) -> Self: """Produce an INTERSECT of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. .. seealso:: :meth:`_sql.Select.intersect` - v2 equivalent method. """ return self._set_op(expression.intersect, *q) def intersect_all(self, *q: Query[Any]) -> Self: """Produce an INTERSECT ALL of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. .. seealso:: :meth:`_sql.Select.intersect_all` - v2 equivalent method. """ return self._set_op(expression.intersect_all, *q) def except_(self, *q: Query[Any]) -> Self: """Produce an EXCEPT of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. .. seealso:: :meth:`_sql.Select.except_` - v2 equivalent method. """ return self._set_op(expression.except_, *q) def except_all(self, *q: Query[Any]) -> Self: """Produce an EXCEPT ALL of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. .. seealso:: :meth:`_sql.Select.except_all` - v2 equivalent method. """ return self._set_op(expression.except_all, *q) @_generative @_assertions(_no_statement_condition, _no_limit_offset) def join( self, target: _JoinTargetArgument, onclause: Optional[_OnClauseArgument] = None, *, isouter: bool = False, full: bool = False, ) -> Self: r"""Create a SQL JOIN against this :class:`_query.Query` object's criterion and apply generatively, returning the newly resulting :class:`_query.Query`. **Simple Relationship Joins** Consider a mapping between two classes ``User`` and ``Address``, with a relationship ``User.addresses`` representing a collection of ``Address`` objects associated with each ``User``. The most common usage of :meth:`_query.Query.join` is to create a JOIN along this relationship, using the ``User.addresses`` attribute as an indicator for how this should occur:: q = session.query(User).join(User.addresses) Where above, the call to :meth:`_query.Query.join` along ``User.addresses`` will result in SQL approximately equivalent to: .. sourcecode:: sql SELECT user.id, user.name FROM user JOIN address ON user.id = address.user_id In the above example we refer to ``User.addresses`` as passed to :meth:`_query.Query.join` as the "on clause", that is, it indicates how the "ON" portion of the JOIN should be constructed. To construct a chain of joins, multiple :meth:`_query.Query.join` calls may be used. The relationship-bound attribute implies both the left and right side of the join at once:: q = ( session.query(User) .join(User.orders) .join(Order.items) .join(Item.keywords) ) .. note:: as seen in the above example, **the order in which each call to the join() method occurs is important**. Query would not, for example, know how to join correctly if we were to specify ``User``, then ``Item``, then ``Order``, in our chain of joins; in such a case, depending on the arguments passed, it may raise an error that it doesn't know how to join, or it may produce invalid SQL in which case the database will raise an error. In correct practice, the :meth:`_query.Query.join` method is invoked in such a way that lines up with how we would want the JOIN clauses in SQL to be rendered, and each call should represent a clear link from what precedes it. **Joins to a Target Entity or Selectable** A second form of :meth:`_query.Query.join` allows any mapped entity or core selectable construct as a target. In this usage, :meth:`_query.Query.join` will attempt to create a JOIN along the natural foreign key relationship between two entities:: q = session.query(User).join(Address) In the above calling form, :meth:`_query.Query.join` is called upon to create the "on clause" automatically for us. This calling form will ultimately raise an error if either there are no foreign keys between the two entities, or if there are multiple foreign key linkages between the target entity and the entity or entities already present on the left side such that creating a join requires more information. Note that when indicating a join to a target without any ON clause, ORM configured relationships are not taken into account. **Joins to a Target with an ON Clause** The third calling form allows both the target entity as well as the ON clause to be passed explicitly. A example that includes a SQL expression as the ON clause is as follows:: q = session.query(User).join(Address, User.id == Address.user_id) The above form may also use a relationship-bound attribute as the ON clause as well:: q = session.query(User).join(Address, User.addresses) The above syntax can be useful for the case where we wish to join to an alias of a particular target entity. If we wanted to join to ``Address`` twice, it could be achieved using two aliases set up using the :func:`~sqlalchemy.orm.aliased` function:: a1 = aliased(Address) a2 = aliased(Address) q = ( session.query(User) .join(a1, User.addresses) .join(a2, User.addresses) .filter(a1.email_address == "ed@foo.com") .filter(a2.email_address == "ed@bar.com") ) The relationship-bound calling form can also specify a target entity using the :meth:`_orm.PropComparator.of_type` method; a query equivalent to the one above would be:: a1 = aliased(Address) a2 = aliased(Address) q = ( session.query(User) .join(User.addresses.of_type(a1)) .join(User.addresses.of_type(a2)) .filter(a1.email_address == "ed@foo.com") .filter(a2.email_address == "ed@bar.com") ) **Augmenting Built-in ON Clauses** As a substitute for providing a full custom ON condition for an existing relationship, the :meth:`_orm.PropComparator.and_` function may be applied to a relationship attribute to augment additional criteria into the ON clause; the additional criteria will be combined with the default criteria using AND:: q = session.query(User).join( User.addresses.and_(Address.email_address != "foo@bar.com") ) .. versionadded:: 1.4 **Joining to Tables and Subqueries** The target of a join may also be any table or SELECT statement, which may be related to a target entity or not. Use the appropriate ``.subquery()`` method in order to make a subquery out of a query:: subq = ( session.query(Address) .filter(Address.email_address == "ed@foo.com") .subquery() ) q = session.query(User).join(subq, User.id == subq.c.user_id) Joining to a subquery in terms of a specific relationship and/or target entity may be achieved by linking the subquery to the entity using :func:`_orm.aliased`:: subq = ( session.query(Address) .filter(Address.email_address == "ed@foo.com") .subquery() ) address_subq = aliased(Address, subq) q = session.query(User).join(User.addresses.of_type(address_subq)) **Controlling what to Join From** In cases where the left side of the current state of :class:`_query.Query` is not in line with what we want to join from, the :meth:`_query.Query.select_from` method may be used:: q = ( session.query(Address) .select_from(User) .join(User.addresses) .filter(User.name == "ed") ) Which will produce SQL similar to: .. sourcecode:: sql SELECT address.* FROM user JOIN address ON user.id=address.user_id WHERE user.name = :name_1 .. seealso:: :meth:`_sql.Select.join` - v2 equivalent method. :param \*props: Incoming arguments for :meth:`_query.Query.join`, the props collection in modern use should be considered to be a one or two argument form, either as a single "target" entity or ORM attribute-bound relationship, or as a target entity plus an "on clause" which may be a SQL expression or ORM attribute-bound relationship. :param isouter=False: If True, the join used will be a left outer join, just as if the :meth:`_query.Query.outerjoin` method were called. :param full=False: render FULL OUTER JOIN; implies ``isouter``. """ join_target = coercions.expect( roles.JoinTargetRole, target, apply_propagate_attrs=self, legacy=True, ) if onclause is not None: onclause_element = coercions.expect( roles.OnClauseRole, onclause, legacy=True ) else: onclause_element = None self._setup_joins += ( ( join_target, onclause_element, None, { "isouter": isouter, "full": full, }, ), ) self.__dict__.pop("_last_joined_entity", None) return self def outerjoin( self, target: _JoinTargetArgument, onclause: Optional[_OnClauseArgument] = None, *, full: bool = False, ) -> Self: """Create a left outer join against this ``Query`` object's criterion and apply generatively, returning the newly resulting ``Query``. Usage is the same as the ``join()`` method. .. seealso:: :meth:`_sql.Select.outerjoin` - v2 equivalent method. """ return self.join(target, onclause=onclause, isouter=True, full=full) @_generative @_assertions(_no_statement_condition) def reset_joinpoint(self) -> Self: """Return a new :class:`.Query`, where the "join point" has been reset back to the base FROM entities of the query. This method is usually used in conjunction with the ``aliased=True`` feature of the :meth:`~.Query.join` method. See the example in :meth:`~.Query.join` for how this is used. """ self._last_joined_entity = None return self @_generative @_assertions(_no_clauseelement_condition) def select_from(self, *from_obj: _FromClauseArgument) -> Self: r"""Set the FROM clause of this :class:`.Query` explicitly. :meth:`.Query.select_from` is often used in conjunction with :meth:`.Query.join` in order to control which entity is selected from on the "left" side of the join. The entity or selectable object here effectively replaces the "left edge" of any calls to :meth:`~.Query.join`, when no joinpoint is otherwise established - usually, the default "join point" is the leftmost entity in the :class:`~.Query` object's list of entities to be selected. A typical example:: q = ( session.query(Address) .select_from(User) .join(User.addresses) .filter(User.name == "ed") ) Which produces SQL equivalent to: .. sourcecode:: sql SELECT address.* FROM user JOIN address ON user.id=address.user_id WHERE user.name = :name_1 :param \*from_obj: collection of one or more entities to apply to the FROM clause. Entities can be mapped classes, :class:`.AliasedClass` objects, :class:`.Mapper` objects as well as core :class:`.FromClause` elements like subqueries. .. seealso:: :meth:`~.Query.join` :meth:`.Query.select_entity_from` :meth:`_sql.Select.select_from` - v2 equivalent method. """ self._set_select_from(from_obj, False) return self def __getitem__(self, item: Any) -> Any: return orm_util._getitem( self, item, ) @_generative @_assertions(_no_statement_condition) def slice( self, start: int, stop: int, ) -> Self: """Computes the "slice" of the :class:`_query.Query` represented by the given indices and returns the resulting :class:`_query.Query`. The start and stop indices behave like the argument to Python's built-in :func:`range` function. This method provides an alternative to using ``LIMIT``/``OFFSET`` to get a slice of the query. For example, :: session.query(User).order_by(User.id).slice(1, 3) renders as .. sourcecode:: sql SELECT users.id AS users_id, users.name AS users_name FROM users ORDER BY users.id LIMIT ? OFFSET ? (2, 1) .. seealso:: :meth:`_query.Query.limit` :meth:`_query.Query.offset` :meth:`_sql.Select.slice` - v2 equivalent method. """ self._limit_clause, self._offset_clause = sql_util._make_slice( self._limit_clause, self._offset_clause, start, stop ) return self @_generative @_assertions(_no_statement_condition) def limit(self, limit: _LimitOffsetType) -> Self: """Apply a ``LIMIT`` to the query and return the newly resulting ``Query``. .. seealso:: :meth:`_sql.Select.limit` - v2 equivalent method. """ self._limit_clause = sql_util._offset_or_limit_clause(limit) return self @_generative @_assertions(_no_statement_condition) def offset(self, offset: _LimitOffsetType) -> Self: """Apply an ``OFFSET`` to the query and return the newly resulting ``Query``. .. seealso:: :meth:`_sql.Select.offset` - v2 equivalent method. """ self._offset_clause = sql_util._offset_or_limit_clause(offset) return self @_generative @_assertions(_no_statement_condition) def distinct(self, *expr: _ColumnExpressionArgument[Any]) -> Self: r"""Apply a ``DISTINCT`` to the query and return the newly resulting ``Query``. .. note:: The ORM-level :meth:`.distinct` call includes logic that will automatically add columns from the ORDER BY of the query to the columns clause of the SELECT statement, to satisfy the common need of the database backend that ORDER BY columns be part of the SELECT list when DISTINCT is used. These columns *are not* added to the list of columns actually fetched by the :class:`_query.Query`, however, so would not affect results. The columns are passed through when using the :attr:`_query.Query.statement` accessor, however. .. deprecated:: 2.0 This logic is deprecated and will be removed in SQLAlchemy 2.0. See :ref:`migration_20_query_distinct` for a description of this use case in 2.0. .. seealso:: :meth:`_sql.Select.distinct` - v2 equivalent method. :param \*expr: optional column expressions. When present, the PostgreSQL dialect will render a ``DISTINCT ON (<expressions>)`` construct. .. deprecated:: 2.1 Passing expressions to :meth:`_orm.Query.distinct` is deprecated, use :func:`_postgresql.distinct_on` instead. """ if expr: warn_deprecated( "Passing expression to ``distinct`` to generate a DISTINCT " "ON clause is deprecated. Use instead the " "``postgresql.distinct_on`` function as an extension.", "2.1", ) self._distinct = True self._distinct_on = self._distinct_on + tuple( coercions.expect(roles.ByOfRole, e) for e in expr ) else: self._distinct = True return self @_generative def ext(self, extension: SyntaxExtension) -> Self: """Applies a SQL syntax extension to this statement. .. seealso:: :ref:`examples_syntax_extensions` :func:`_mysql.limit` - DML LIMIT for MySQL :func:`_postgresql.distinct_on` - DISTINCT ON for PostgreSQL .. versionadded:: 2.1 """ extension = coercions.expect(roles.SyntaxExtensionRole, extension) self._syntax_extensions += (extension,) return self def all(self) -> List[_T]: """Return the results represented by this :class:`_query.Query` as a list. This results in an execution of the underlying SQL statement. .. warning:: The :class:`_query.Query` object, when asked to return either a sequence or iterator that consists of full ORM-mapped entities, will **deduplicate entries based on primary key**. See the FAQ for more details. .. seealso:: :ref:`faq_query_deduplicating` .. seealso:: :meth:`_engine.Result.all` - v2 comparable method. :meth:`_engine.Result.scalars` - v2 comparable method. """ return self._iter().all() # type: ignore @_generative @_assertions(_no_clauseelement_condition) def from_statement(self, statement: ExecutableReturnsRows) -> Self: """Execute the given SELECT statement and return results. This method bypasses all internal statement compilation, and the statement is executed without modification. The statement is typically either a :func:`_expression.text` or :func:`_expression.select` construct, and should return the set of columns appropriate to the entity class represented by this :class:`_query.Query`. .. seealso:: :meth:`_sql.Select.from_statement` - v2 comparable method. """ statement = coercions.expect( roles.SelectStatementRole, statement, apply_propagate_attrs=self ) self._statement = statement return self def first(self) -> Optional[_T]: """Return the first result of this ``Query`` or None if the result doesn't contain any row. first() applies a limit of one within the generated SQL, so that only one primary entity row is generated on the server side (note this may consist of multiple result rows if join-loaded collections are present). Calling :meth:`_query.Query.first` results in an execution of the underlying query. .. seealso:: :meth:`_query.Query.one` :meth:`_query.Query.one_or_none` :meth:`_engine.Result.first` - v2 comparable method. :meth:`_engine.Result.scalars` - v2 comparable method. """ # replicates limit(1) behavior if self._statement is not None: return self._iter().first() # type: ignore else: return self.limit(1)._iter().first() # type: ignore def one_or_none(self) -> Optional[_T]: """Return at most one result or raise an exception. Returns ``None`` if the query selects no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound`` if multiple object identities are returned, or if multiple rows are returned for a query that returns only scalar values as opposed to full identity-mapped entities. Calling :meth:`_query.Query.one_or_none` results in an execution of the underlying query. .. seealso:: :meth:`_query.Query.first` :meth:`_query.Query.one` :meth:`_engine.Result.one_or_none` - v2 comparable method. :meth:`_engine.Result.scalar_one_or_none` - v2 comparable method. """ return self._iter().one_or_none() # type: ignore def one(self) -> _T: """Return exactly one result or raise an exception. Raises :class:`_exc.NoResultFound` if the query selects no rows. Raises :class:`_exc.MultipleResultsFound` if multiple object identities are returned, or if multiple rows are returned for a query that returns only scalar values as opposed to full identity-mapped entities. Calling :meth:`.one` results in an execution of the underlying query. .. seealso:: :meth:`_query.Query.first` :meth:`_query.Query.one_or_none` :meth:`_engine.Result.one` - v2 comparable method. :meth:`_engine.Result.scalar_one` - v2 comparable method. """ return self._iter().one() # type: ignore def scalar(self) -> Any: """Return the first element of the first result or None if no rows present. If multiple rows are returned, raises :class:`_exc.MultipleResultsFound`. >>> session.query(Item).scalar() <Item> >>> session.query(Item.id).scalar() 1 >>> session.query(Item.id).filter(Item.id < 0).scalar() None >>> session.query(Item.id, Item.name).scalar() 1 >>> session.query(func.count(Parent.id)).scalar() 20 This results in an execution of the underlying query. .. seealso:: :meth:`_engine.Result.scalar` - v2 comparable method. """ # TODO: not sure why we can't use result.scalar() here try: ret = self.one() if not isinstance(ret, collections_abc.Sequence): return ret return ret[0] except sa_exc.NoResultFound: return None def __iter__(self) -> Iterator[_T]: result = self._iter() try: yield from result # type: ignore except GeneratorExit: # issue #8710 - direct iteration is not re-usable after # an iterable block is broken, so close the result result._soft_close() raise def _iter(self) -> Union[ScalarResult[_T], Result[_T]]: # new style execution. params = self._params statement = self._statement_20() result: Union[ScalarResult[_T], Result[_T]] = self.session.execute( statement, params, execution_options={"_sa_orm_load_options": self.load_options}, ) # legacy: automatically set scalars, unique if result._attributes.get("is_single_entity", False): result = cast("Result[_T]", result).scalars() if ( result._attributes.get("filtered", False) and not self.load_options._yield_per ): result = result.unique() return result def __str__(self) -> str: statement = self._statement_20() try: bind = ( self.session.get_bind(clause=statement) if self.session else None ) except sa_exc.UnboundExecutionError: bind = None return str(statement.compile(bind)) @property def column_descriptions(self) -> List[ORMColumnDescription]: """Return metadata about the columns which would be returned by this :class:`_query.Query`. Format is a list of dictionaries:: user_alias = aliased(User, name="user2") q = sess.query(User, User.id, user_alias) # this expression: q.column_descriptions # would return: [ { "name": "User", "type": User, "aliased": False, "expr": User, "entity": User, }, { "name": "id", "type": Integer(), "aliased": False, "expr": User.id, "entity": User, }, { "name": "user2", "type": User, "aliased": True, "expr": user_alias, "entity": user_alias, }, ] .. seealso:: This API is available using :term:`2.0 style` queries as well, documented at: * :ref:`queryguide_inspection` * :attr:`.Select.column_descriptions` """ return _column_descriptions(self, legacy=True) @util.deprecated( "2.0", "The :meth:`_orm.Query.instances` method is deprecated and will " "be removed in a future release. " "Use the Select.from_statement() method or aliased() construct in " "conjunction with Session.execute() instead.", ) def instances( self, result_proxy: CursorResult[Any], context: Optional[QueryContext] = None, ) -> Any: """Return an ORM result given a :class:`_engine.CursorResult` and :class:`.QueryContext`. """ if context is None: util.warn_deprecated( "Using the Query.instances() method without a context " "is deprecated and will be disallowed in a future release. " "Please make use of :meth:`_query.Query.from_statement` " "for linking ORM results to arbitrary select constructs.", version="1.4", ) compile_state = self._compile_state(for_statement=False) context = QueryContext( compile_state, compile_state.statement, compile_state.statement, self._params, self.session, self.load_options, ) result = loading.instances(result_proxy, context) # legacy: automatically set scalars, unique if result._attributes.get("is_single_entity", False): result = result.scalars() # type: ignore if result._attributes.get("filtered", False): result = result.unique() # TODO: isn't this supposed to be a list? return result @util.became_legacy_20( ":meth:`_orm.Query.merge_result`", alternative="The method is superseded by the " ":func:`_orm.merge_frozen_result` function.", enable_warnings=False, # warnings occur via loading.merge_result ) def merge_result( self, iterator: Union[ FrozenResult[Any], Iterable[Sequence[Any]], Iterable[object] ], load: bool = True, ) -> Union[FrozenResult[Any], Iterable[Any]]: """Merge a result into this :class:`_query.Query` object's Session. Given an iterator returned by a :class:`_query.Query` of the same structure as this one, return an identical iterator of results, with all mapped instances merged into the session using :meth:`.Session.merge`. This is an optimized method which will merge all mapped instances, preserving the structure of the result rows and unmapped columns with less method overhead than that of calling :meth:`.Session.merge` explicitly for each value. The structure of the results is determined based on the column list of this :class:`_query.Query` - if these do not correspond, unchecked errors will occur. The 'load' argument is the same as that of :meth:`.Session.merge`. For an example of how :meth:`_query.Query.merge_result` is used, see the source code for the example :ref:`examples_caching`, where :meth:`_query.Query.merge_result` is used to efficiently restore state from a cache back into a target :class:`.Session`. """ return loading.merge_result(self, iterator, load) def exists(self) -> Exists: """A convenience method that turns a query into an EXISTS subquery of the form EXISTS (SELECT 1 FROM ... WHERE ...). e.g.:: q = session.query(User).filter(User.name == "fred") session.query(q.exists()) Producing SQL similar to: .. sourcecode:: sql SELECT EXISTS ( SELECT 1 FROM users WHERE users.name = :name_1 ) AS anon_1 The EXISTS construct is usually used in the WHERE clause:: session.query(User.id).filter(q.exists()).scalar() Note that some databases such as SQL Server don't allow an EXISTS expression to be present in the columns clause of a SELECT. To select a simple boolean value based on the exists as a WHERE, use :func:`.literal`:: from sqlalchemy import literal session.query(literal(True)).filter(q.exists()).scalar() .. seealso:: :meth:`_sql.Select.exists` - v2 comparable method. """ # .add_columns() for the case that we are a query().select_from(X), # so that ".statement" can be produced (#2995) but also without # omitting the FROM clause from a query(X) (#2818); # .with_only_columns() after we have a core select() so that # we get just "SELECT 1" without any entities. inner = ( self.enable_eagerloads(False) .add_columns(sql.literal_column("1")) .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) ._get_select_statement_only() .with_only_columns(1) ) ezero = self._entity_from_pre_ent_zero() if ezero is not None: inner = inner.select_from(ezero) return sql.exists(inner) def count(self) -> int: r"""Return a count of rows this the SQL formed by this :class:`Query` would return. This generates the SQL for this Query as follows: .. sourcecode:: sql SELECT count(1) AS count_1 FROM ( SELECT <rest of query follows...> ) AS anon_1 The above SQL returns a single row, which is the aggregate value of the count function; the :meth:`_query.Query.count` method then returns that single integer value. .. warning:: It is important to note that the value returned by count() is **not the same as the number of ORM objects that this Query would return from a method such as the .all() method**. The :class:`_query.Query` object, when asked to return full entities, will **deduplicate entries based on primary key**, meaning if the same primary key value would appear in the results more than once, only one object of that primary key would be present. This does not apply to a query that is against individual columns. .. seealso:: :ref:`faq_query_deduplicating` For fine grained control over specific columns to count, to skip the usage of a subquery or otherwise control of the FROM clause, or to use other aggregate functions, use :attr:`~sqlalchemy.sql.expression.func` expressions in conjunction with :meth:`~.Session.query`, i.e.:: from sqlalchemy import func # count User records, without # using a subquery. session.query(func.count(User.id)) # return count of user "id" grouped # by "name" session.query(func.count(User.id)).group_by(User.name) from sqlalchemy import distinct # count distinct "name" values session.query(func.count(distinct(User.name))) .. seealso:: :ref:`migration_20_query_usage` """ col = sql.func.count(sql.literal_column("*")) return ( # type: ignore self._legacy_from_self(col).enable_eagerloads(False).scalar() ) def delete( self, synchronize_session: SynchronizeSessionArgument = "auto", delete_args: Optional[Dict[Any, Any]] = None, ) -> int: r"""Perform a DELETE with an arbitrary WHERE clause. Deletes rows matched by this query from the database. E.g.:: sess.query(User).filter(User.age == 25).delete(synchronize_session=False) sess.query(User).filter(User.age == 25).delete( synchronize_session="evaluate" ) .. warning:: See the section :ref:`orm_expression_update_delete` for important caveats and warnings, including limitations when using bulk UPDATE and DELETE with mapper inheritance configurations. :param synchronize_session: chooses the strategy to update the attributes on objects in the session. See the section :ref:`orm_expression_update_delete` for a discussion of these strategies. :param delete_args: Optional dictionary, if present will be passed to the underlying :func:`_expression.delete` construct as the ``**kw`` for the object. May be used to pass dialect-specific arguments such as ``mysql_limit``. .. versionadded:: 2.0.37 :return: the count of rows matched as returned by the database's "row count" feature. .. seealso:: :ref:`orm_expression_update_delete` """ # noqa: E501 bulk_del = BulkDelete(self, delete_args) if self.dispatch.before_compile_delete: for fn in self.dispatch.before_compile_delete: new_query = fn(bulk_del.query, bulk_del) if new_query is not None: bulk_del.query = new_query self = bulk_del.query delete_ = sql.delete(*self._raw_columns) # type: ignore if delete_args: delete_ = delete_.with_dialect_options(**delete_args) delete_._where_criteria = self._where_criteria for ext in self._syntax_extensions: delete_._apply_syntax_extension_to_self(ext) result = cast( "CursorResult[Any]", self.session.execute( delete_, self._params, execution_options=self._execution_options.union( {"synchronize_session": synchronize_session} ), ), ) bulk_del.result = result # type: ignore self.session.dispatch.after_bulk_delete(bulk_del) result.close() return result.rowcount def update( self, values: Dict[_DMLColumnArgument, Any], synchronize_session: SynchronizeSessionArgument = "auto", update_args: Optional[Dict[Any, Any]] = None, ) -> int: r"""Perform an UPDATE with an arbitrary WHERE clause. Updates rows matched by this query in the database. E.g.:: sess.query(User).filter(User.age == 25).update( {User.age: User.age - 10}, synchronize_session=False ) sess.query(User).filter(User.age == 25).update( {"age": User.age - 10}, synchronize_session="evaluate" ) .. warning:: See the section :ref:`orm_expression_update_delete` for important caveats and warnings, including limitations when using arbitrary UPDATE and DELETE with mapper inheritance configurations. :param values: a dictionary with attributes names, or alternatively mapped attributes or SQL expressions, as keys, and literal values or sql expressions as values. If :ref:`parameter-ordered mode <tutorial_parameter_ordered_updates>` is desired, the values can be passed as a list of 2-tuples; this requires that the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` flag is passed to the :paramref:`.Query.update.update_args` dictionary as well. :param synchronize_session: chooses the strategy to update the attributes on objects in the session. See the section :ref:`orm_expression_update_delete` for a discussion of these strategies. :param update_args: Optional dictionary, if present will be passed to the underlying :func:`_expression.update` construct as the ``**kw`` for the object. May be used to pass dialect-specific arguments such as ``mysql_limit``, as well as other special arguments such as :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`. :return: the count of rows matched as returned by the database's "row count" feature. .. seealso:: :ref:`orm_expression_update_delete` """ update_args = update_args or {} bulk_ud = BulkUpdate(self, values, update_args) if self.dispatch.before_compile_update: for fn in self.dispatch.before_compile_update: new_query = fn(bulk_ud.query, bulk_ud) if new_query is not None: bulk_ud.query = new_query self = bulk_ud.query upd = sql.update(*self._raw_columns) # type: ignore ppo = update_args.pop("preserve_parameter_order", False) if ppo: upd = upd.ordered_values(*values) # type: ignore else: upd = upd.values(values) if update_args: upd = upd.with_dialect_options(**update_args) upd._where_criteria = self._where_criteria for ext in self._syntax_extensions: upd._apply_syntax_extension_to_self(ext) result = cast( "CursorResult[Any]", self.session.execute( upd, self._params, execution_options=self._execution_options.union( {"synchronize_session": synchronize_session} ), ), ) bulk_ud.result = result # type: ignore self.session.dispatch.after_bulk_update(bulk_ud) result.close() return result.rowcount def _compile_state( self, for_statement: bool = False, **kw: Any ) -> _ORMCompileState: """Create an out-of-compiler ORMCompileState object. The ORMCompileState object is normally created directly as a result of the SQLCompiler.process() method being handed a Select() or FromStatement() object that uses the "orm" plugin. This method provides a means of creating this ORMCompileState object directly without using the compiler. This method is used only for deprecated cases, which include the .from_self() method for a Query that has multiple levels of .from_self() in use, as well as the instances() method. It is also used within the test suite to generate ORMCompileState objects for test purposes. """ stmt = self._statement_20(for_statement=for_statement, **kw) assert for_statement == stmt._compile_options._for_statement # this chooses between ORMFromStatementCompileState and # ORMSelectCompileState. We could also base this on # query._statement is not None as we have the ORM Query here # however this is the more general path. compile_state_cls = cast( _ORMCompileState, _ORMCompileState._get_plugin_class_for_plugin(stmt, "orm"), ) return compile_state_cls._create_orm_context( stmt, toplevel=True, compiler=None ) def _compile_context(self, for_statement: bool = False) -> QueryContext: compile_state = self._compile_state(for_statement=for_statement) context = QueryContext( compile_state, compile_state.statement, compile_state.statement, self._params, self.session, self.load_options, ) return context
Query
python
kamyu104__LeetCode-Solutions
Python/find-distance-in-a-binary-tree.py
{ "start": 1595, "end": 2440 }
class ____(object): def findDistance(self, root, p, q): """ :type root: TreeNode :type p: int :type q: int :rtype: int """ def dfs(node, p, q, result): if not node: return -1 left = dfs(node.left, p, q, result) right = dfs(node.right, p, q, result) if node.val in (p, q): if left == right == -1: return 0 result[0] = left+1 if left != -1 else right+1 if left != -1 and right != -1: result[0] = left+right+2 elif left != -1: return left+1 elif right != -1: return right+1 return -1 result = [0] dfs(root, p, q, result) return result[0]
Solution2
python
PyCQA__pyflakes
pyflakes/messages.py
{ "start": 2335, "end": 2542 }
class ____(Message): message = 'undefined name %r in __all__' def __init__(self, filename, loc, name): Message.__init__(self, filename, loc) self.message_args = (name,)
UndefinedExport
python
apache__airflow
airflow-core/src/airflow/api_fastapi/execution_api/routes/xcoms.py
{ "start": 4066, "end": 7589 }
class ____(BaseModel): """Class to house the params that can optionally be set for Get XCom.""" map_index: int = -1 include_prior_dates: bool = False offset: int | None = None @router.get( "/{dag_id}/{run_id}/{task_id}/{key}", description="Get a single XCom Value", ) def get_xcom( dag_id: str, run_id: str, task_id: str, key: Annotated[str, StringConstraints(min_length=1)], session: SessionDep, params: Annotated[GetXcomFilterParams, Query()], ) -> XComResponse: """Get an Airflow XCom from database - not other XCom Backends.""" xcom_query = XComModel.get_many( run_id=run_id, key=key, task_ids=task_id, dag_ids=dag_id, include_prior_dates=params.include_prior_dates, ) if params.offset is not None: xcom_query = xcom_query.where(XComModel.value.is_not(None)).order_by(None) if params.offset >= 0: xcom_query = xcom_query.order_by(XComModel.map_index.asc()).offset(params.offset) else: xcom_query = xcom_query.order_by(XComModel.map_index.desc()).offset(-1 - params.offset) else: xcom_query = xcom_query.where(XComModel.map_index == params.map_index) # We use `BaseXCom.get_many` to fetch XComs directly from the database, bypassing the XCom Backend. # This avoids deserialization via the backend (e.g., from a remote storage like S3) and instead # retrieves the raw serialized value from the database. By not relying on `XCom.get_many` or `XCom.get_one` # (which automatically deserializes using the backend), we avoid potential # performance hits from retrieving large data files into the API server. result = session.scalars(xcom_query).first() if result is None: if params.offset is None: message = ( f"XCom with {key=} map_index={params.map_index} not found for " f"task {task_id!r} in DAG run {run_id!r} of {dag_id!r}" ) else: message = ( f"XCom with {key=} offset={params.offset} not found for " f"task {task_id!r} in DAG run {run_id!r} of {dag_id!r}" ) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail={"reason": "not_found", "message": message}, ) return XComResponse(key=key, value=result.value) @router.get( "/{dag_id}/{run_id}/{task_id}/{key}/item/{offset}", description="Get a single XCom value from a mapped task by sequence index", ) def get_mapped_xcom_by_index( dag_id: str, run_id: str, task_id: str, key: str, offset: int, session: SessionDep, ) -> XComSequenceIndexResponse: xcom_query = XComModel.get_many( run_id=run_id, key=key, task_ids=task_id, dag_ids=dag_id, ) xcom_query = xcom_query.order_by(None) if offset >= 0: xcom_query = xcom_query.order_by(XComModel.map_index.asc()).offset(offset) else: xcom_query = xcom_query.order_by(XComModel.map_index.desc()).offset(-1 - offset) if (result := session.scalars(xcom_query).first()) is None: message = ( f"XCom with {key=} {offset=} not found for task {task_id!r} in DAG run {run_id!r} of {dag_id!r}" ) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail={"reason": "not_found", "message": message}, ) return XComSequenceIndexResponse(result.value)
GetXcomFilterParams
python
mkdocs__mkdocs
mkdocs/tests/structure/page_tests.py
{ "start": 32595, "end": 58576 }
class ____(unittest.TestCase): def get_rendered_result( self, *, content: str, files: list[str], logs: str = '', **kwargs ) -> str: cfg = load_config(docs_dir=DOCS_DIR, **kwargs) fs = [File(f, cfg.docs_dir, cfg.site_dir, cfg.use_directory_urls) for f in files] pg = Page('Foo', fs[0], cfg) with mock.patch('mkdocs.structure.files.open', mock.mock_open(read_data=content)): pg.read_source(cfg) if logs: with self.assertLogs('mkdocs.structure.pages') as cm: pg.render(cfg, Files(fs)) msgs = [f'{r.levelname}:{r.message}' for r in cm.records] self.assertEqual('\n'.join(msgs), textwrap.dedent(logs).strip('\n')) elif sys.version_info >= (3, 10): with self.assertNoLogs('mkdocs.structure.pages'): pg.render(cfg, Files(fs)) else: pg.render(cfg, Files(fs)) assert pg.content is not None content = pg.content if content.startswith('<p>') and content.endswith('</p>'): content = content[3:-4] return content def test_relative_html_link(self): self.assertEqual( self.get_rendered_result( content='[link](non-index.md)', files=['index.md', 'non-index.md'] ), '<a href="non-index/">link</a>', ) self.assertEqual( self.get_rendered_result( use_directory_urls=False, content='[link](non-index.md)', files=['index.md', 'non-index.md'], ), '<a href="non-index.html">link</a>', ) def test_relative_html_link_index(self): self.assertEqual( self.get_rendered_result( content='[link](index.md)', files=['non-index.md', 'index.md'] ), '<a href="../">link</a>', ) self.assertEqual( self.get_rendered_result( use_directory_urls=False, content='[link](index.md)', files=['non-index.md', 'index.md'], ), '<a href="index.html">link</a>', ) def test_relative_html_link_sub_index(self): self.assertEqual( self.get_rendered_result( content='[link](sub2/index.md)', files=['index.md', 'sub2/index.md'] ), '<a href="sub2/">link</a>', ) self.assertEqual( self.get_rendered_result( use_directory_urls=False, content='[link](sub2/index.md)', files=['index.md', 'sub2/index.md'], ), '<a href="sub2/index.html">link</a>', ) def test_relative_html_link_sub_page(self): self.assertEqual( self.get_rendered_result( content='[link](sub2/non-index.md)', files=['index.md', 'sub2/non-index.md'] ), '<a href="sub2/non-index/">link</a>', ) self.assertEqual( self.get_rendered_result( use_directory_urls=False, content='[link](sub2/non-index.md)', files=['index.md', 'sub2/non-index.md'], ), '<a href="sub2/non-index.html">link</a>', ) def test_relative_doc_link_without_extension(self): self.assertEqual( self.get_rendered_result( use_directory_urls=False, content='[link](bar/Dockerfile)', files=['foo/bar.md', 'foo/bar/Dockerfile'], ), '<a href="bar/Dockerfile">link</a>', ) self.assertEqual( self.get_rendered_result( content='[link](bar/Dockerfile)', files=['foo/bar.md', 'foo/bar/Dockerfile'], ), '<a href="Dockerfile">link</a>', ) def test_relative_html_link_with_encoded_space(self): self.assertEqual( self.get_rendered_result( content='[link](file%20name.md)', files=['index.md', 'file name.md'] ), '<a href="file%20name/">link</a>', ) def test_relative_html_link_with_unencoded_space(self): self.assertEqual( self.get_rendered_result( use_directory_urls=False, content='[link](file name.md)', files=['index.md', 'file name.md'], ), '<a href="file%20name.html">link</a>', ) def test_relative_html_link_parent_index(self): self.assertEqual( self.get_rendered_result( content='[link](../index.md)', files=['sub2/non-index.md', 'index.md'] ), '<a href="../../">link</a>', ) self.assertEqual( self.get_rendered_result( use_directory_urls=False, content='[link](../index.md)', files=['sub2/non-index.md', 'index.md'], ), '<a href="../index.html">link</a>', ) def test_relative_html_link_hash(self): self.assertEqual( self.get_rendered_result( content='[link](non-index.md#hash)', files=['index.md', 'non-index.md'] ), '<a href="non-index/#hash">link</a>', ) def test_relative_html_link_sub_index_hash(self): self.assertEqual( self.get_rendered_result( content='[link](sub2/index.md#hash)', files=['index.md', 'sub2/index.md'] ), '<a href="sub2/#hash">link</a>', ) self.assertEqual( self.get_rendered_result( use_directory_urls=False, content='[link](sub2/index.md#hash)', files=['index.md', 'sub2/index.md'], ), '<a href="sub2/index.html#hash">link</a>', ) def test_relative_html_link_sub_page_hash(self): self.assertEqual( self.get_rendered_result( content='[link](sub2/non-index.md#hash)', files=['index.md', 'sub2/non-index.md'] ), '<a href="sub2/non-index/#hash">link</a>', ) def test_relative_html_link_hash_only(self): for use_directory_urls in True, False: self.assertEqual( self.get_rendered_result( use_directory_urls=use_directory_urls, content='[link](#hash)', files=['index.md'], ), '<a href="#hash">link</a>', ) def test_relative_image_link_from_homepage(self): for use_directory_urls in True, False: self.assertEqual( self.get_rendered_result( use_directory_urls=use_directory_urls, content='![image](image.png)', files=['index.md', 'image.png'], ), '<img alt="image" src="image.png" />', # no opening ./ ) def test_relative_image_link_from_subpage(self): self.assertEqual( self.get_rendered_result( content='![image](../image.png)', files=['sub2/non-index.md', 'image.png'] ), '<img alt="image" src="../../image.png" />', ) def test_relative_image_link_from_sibling(self): self.assertEqual( self.get_rendered_result( content='![image](image.png)', files=['non-index.md', 'image.png'] ), '<img alt="image" src="../image.png" />', ) self.assertEqual( self.get_rendered_result( use_directory_urls=False, content='![image](image.png)', files=['non-index.md', 'image.png'], ), '<img alt="image" src="image.png" />', ) def test_no_links(self): self.assertEqual( self.get_rendered_result(content='*__not__ a link*.', files=['index.md']), '<em><strong>not</strong> a link</em>.', ) def test_bad_relative_doc_link(self): self.assertEqual( self.get_rendered_result( content='[link](non-existent.md)', files=['index.md'], logs="WARNING:Doc file 'index.md' contains a link 'non-existent.md', but the target is not found among documentation files.", ), '<a href="non-existent.md">link</a>', ) self.assertEqual( self.get_rendered_result( validation=dict(links=dict(not_found='info')), content='[link](../non-existent.md)', files=['sub/index.md'], logs="INFO:Doc file 'sub/index.md' contains a link '../non-existent.md', but the target 'non-existent.md' is not found among documentation files.", ), '<a href="../non-existent.md">link</a>', ) def test_relative_slash_link_with_suggestion(self): self.assertEqual( self.get_rendered_result( content='[link](../about/)', files=['foo/index.md', 'about.md'], logs="INFO:Doc file 'foo/index.md' contains an unrecognized relative link '../about/', it was left as is. Did you mean '../about.md'?", ), '<a href="../about/">link</a>', ) self.assertEqual( self.get_rendered_result( validation=dict(links=dict(unrecognized_links='warn')), content='[link](../#example)', files=['foo/bar.md', 'index.md'], logs="WARNING:Doc file 'foo/bar.md' contains an unrecognized relative link '../#example', it was left as is. Did you mean '../index.md#example'?", ), '<a href="../#example">link</a>', ) def test_self_anchor_link_with_suggestion(self): self.assertEqual( self.get_rendered_result( content='[link](./#test)', files=['index.md'], logs="INFO:Doc file 'index.md' contains an unrecognized relative link './#test', it was left as is. Did you mean '#test'?", ), '<a href="./#test">link</a>', ) def test_absolute_self_anchor_link_with_suggestion(self): self.assertEqual( self.get_rendered_result( content='[link](/index#test)', files=['index.md'], logs="INFO:Doc file 'index.md' contains an absolute link '/index#test', it was left as is. Did you mean '#test'?", ), '<a href="/index#test">link</a>', ) def test_absolute_self_anchor_link_with_validation_and_suggestion(self): self.assertEqual( self.get_rendered_result( validation=dict(links=dict(absolute_links='relative_to_docs')), content='[link](/index#test)', files=['index.md'], logs="WARNING:Doc file 'index.md' contains a link '/index#test', but the target 'index' is not found among documentation files. Did you mean '#test'?", ), '<a href="/index#test">link</a>', ) def test_absolute_anchor_link_with_validation(self): self.assertEqual( self.get_rendered_result( validation=dict(links=dict(absolute_links='relative_to_docs')), content='[link](/foo/bar.md#test)', files=['index.md', 'foo/bar.md'], ), '<a href="foo/bar/#test">link</a>', ) def test_absolute_anchor_link_with_validation_and_suggestion(self): self.assertEqual( self.get_rendered_result( validation=dict(links=dict(absolute_links='relative_to_docs')), content='[link](/foo/bar#test)', files=['zoo/index.md', 'foo/bar.md'], logs="WARNING:Doc file 'zoo/index.md' contains a link '/foo/bar#test', but the target 'foo/bar' is not found among documentation files. Did you mean '/foo/bar.md#test'?", ), '<a href="/foo/bar#test">link</a>', ) def test_external_link(self): self.assertEqual( self.get_rendered_result( content='[external](http://example.com/index.md)', files=['index.md'] ), '<a href="http://example.com/index.md">external</a>', ) def test_absolute_link_with_suggestion(self): self.assertEqual( self.get_rendered_result( content='[absolute link](/path/to/file.md)', files=['index.md', 'path/to/file.md'], logs="INFO:Doc file 'index.md' contains an absolute link '/path/to/file.md', it was left as is. Did you mean 'path/to/file.md'?", ), '<a href="/path/to/file.md">absolute link</a>', ) self.assertEqual( self.get_rendered_result( use_directory_urls=False, content='[absolute link](/path/to/file/)', files=['path/index.md', 'path/to/file.md'], logs="INFO:Doc file 'path/index.md' contains an absolute link '/path/to/file/', it was left as is.", ), '<a href="/path/to/file/">absolute link</a>', ) self.assertEqual( self.get_rendered_result( content='[absolute link](/path/to/file)', files=['path/index.md', 'path/to/file.md'], logs="INFO:Doc file 'path/index.md' contains an absolute link '/path/to/file', it was left as is. Did you mean 'to/file.md'?", ), '<a href="/path/to/file">absolute link</a>', ) def test_absolute_link_with_validation(self): self.assertEqual( self.get_rendered_result( validation=dict(links=dict(absolute_links='relative_to_docs')), content='[absolute link](/path/to/file.md)', files=['index.md', 'path/to/file.md'], ), '<a href="path/to/file/">absolute link</a>', ) self.assertEqual( self.get_rendered_result( validation=dict(links=dict(absolute_links='relative_to_docs')), use_directory_urls=False, content='[absolute link](/path/to/file.md)', files=['path/index.md', 'path/to/file.md'], ), '<a href="to/file.html">absolute link</a>', ) def test_absolute_link_with_validation_and_suggestion(self): self.assertEqual( self.get_rendered_result( validation=dict(links=dict(absolute_links='relative_to_docs')), use_directory_urls=False, content='[absolute link](/path/to/file/)', files=['path/index.md', 'path/to/file.md'], logs="WARNING:Doc file 'path/index.md' contains a link '/path/to/file/', but the target 'path/to/file' is not found among documentation files.", ), '<a href="/path/to/file/">absolute link</a>', ) self.assertEqual( self.get_rendered_result( validation=dict(links=dict(absolute_links='relative_to_docs')), content='[absolute link](/path/to/file)', files=['path/index.md', 'path/to/file.md'], logs="WARNING:Doc file 'path/index.md' contains a link '/path/to/file', but the target is not found among documentation files. Did you mean '/path/to/file.md'?", ), '<a href="/path/to/file">absolute link</a>', ) def test_absolute_link_with_validation_just_slash(self): self.assertEqual( self.get_rendered_result( validation=dict(links=dict(absolute_links='relative_to_docs')), content='[absolute link](/)', files=['path/to/file.md', 'index.md'], logs="WARNING:Doc file 'path/to/file.md' contains a link '/', but the target '.' is not found among documentation files. Did you mean '/index.md'?", ), '<a href="/">absolute link</a>', ) def test_absolute_link_preserved_and_warned(self): self.assertEqual( self.get_rendered_result( validation=dict(links=dict(absolute_links='warn')), content='[absolute link](/path/to/file.md)', files=['index.md'], logs="WARNING:Doc file 'index.md' contains an absolute link '/path/to/file.md', it was left as is.", ), '<a href="/path/to/file.md">absolute link</a>', ) self.assertEqual( self.get_rendered_result( validation=dict(links=dict(absolute_links='ignore')), content='[absolute link](/path/to/file.md)', files=['index.md'], ), '<a href="/path/to/file.md">absolute link</a>', ) def test_image_link_with_suggestion(self): self.assertEqual( self.get_rendered_result( content='![image](../image.png)', files=['foo/bar.md', 'foo/image.png'], logs="WARNING:Doc file 'foo/bar.md' contains a link '../image.png', but the target 'image.png' is not found among documentation files. Did you mean 'image.png'?", ), '<img alt="image" src="../image.png" />', ) self.assertEqual( self.get_rendered_result( content='![image](/image.png)', files=['foo/bar.md', 'image.png'], logs="INFO:Doc file 'foo/bar.md' contains an absolute link '/image.png', it was left as is. Did you mean '../image.png'?", ), '<img alt="image" src="/image.png" />', ) def test_absolute_win_local_path(self): for use_directory_urls in True, False: self.assertEqual( self.get_rendered_result( use_directory_urls=use_directory_urls, content='[absolute local path](\\image.png)', files=['index.md'], logs="INFO:Doc file 'index.md' contains an absolute link '\\image.png', it was left as is.", ), '<a href="\\image.png">absolute local path</a>', ) def test_email_link(self): self.assertEqual( self.get_rendered_result(content='<mail@example.com>', files=['index.md']), # Markdown's default behavior is to obscure email addresses by entity-encoding them. # The following is equivalent to: '<a href="mailto:mail@example.com">mail@example.com</a>' '<a href="&#109;&#97;&#105;&#108;&#116;&#111;&#58;&#109;&#97;&#105;&#108;&#64;&#101;' '&#120;&#97;&#109;&#112;&#108;&#101;&#46;&#99;&#111;&#109;">&#109;&#97;&#105;&#108;&#64;' '&#101;&#120;&#97;&#109;&#112;&#108;&#101;&#46;&#99;&#111;&#109;</a>', ) def test_invalid_email_link(self): self.assertEqual( self.get_rendered_result( content='[contact](mail@example.com)', files=['index.md'], logs="WARNING:Doc file 'index.md' contains a link 'mail@example.com', but the target is not found among documentation files. Did you mean 'mailto:mail@example.com'?", ), '<a href="mail@example.com">contact</a>', ) def test_possible_target_uris(self): def test(paths, expected='', exp_true=None, exp_false=None): """Test that `possible_target_uris` yields expected values, for use_directory_urls = true and false.""" for use_directory_urls, expected_paths in ( (True, exp_true or expected), (False, exp_false or expected), ): with self.subTest(paths, use_directory_urls=use_directory_urls): src_path, dest_path = paths f = File(src_path, '', '', use_directory_urls) actual = _RelativePathTreeprocessor._possible_target_uris( f, dest_path, use_directory_urls ) self.assertEqual(list(actual), expected_paths.split(', ')) test(('index.md', 'index.md'), expected='index.md') test(('index.md', 'foo/bar.md'), expected='foo/bar.md') test( ('index.md', 'foo/bar'), expected='foo/bar, foo/bar/index.md, foo/bar/README.md, foo/bar.md', ) test(('index.md', 'foo/bar.html'), expected='foo/bar.html, foo/bar.md') test( ('foo.md', 'foo/bar.html'), exp_true='foo/bar.html, foo/bar.md, foo/foo/bar.html, foo/foo/bar.md', exp_false='foo/bar.html, foo/bar.md', ) test(('foo.md', 'index.md'), exp_true='index.md, foo/index.md', exp_false='index.md') test(('foo.md', 'foo.md'), exp_true='foo.md, foo/foo.md', exp_false='foo.md') test(('foo.md', 'bar.md'), exp_true='bar.md, foo/bar.md', exp_false='bar.md') test( ('foo.md', 'foo/bar.md'), exp_true='foo/bar.md, foo/foo/bar.md', exp_false='foo/bar.md' ) test( ('foo.md', 'foo'), exp_true='foo, foo/index.md, foo/README.md, foo.md, foo/foo, foo/foo/index.md, foo/foo/README.md, foo/foo.md', exp_false='foo, foo/index.md, foo/README.md, foo.md', ) test( ('foo.md', 'foo/bar'), exp_true='foo/bar, foo/bar/index.md, foo/bar/README.md, foo/bar.md, foo/foo/bar, foo/foo/bar/index.md, foo/foo/bar/README.md, foo/foo/bar.md', exp_false='foo/bar, foo/bar/index.md, foo/bar/README.md, foo/bar.md', ) test( ('foo.md', 'foo/bar/'), exp_true='foo/bar, foo/bar/index.md, foo/bar/README.md, foo/bar.md, foo/foo/bar, foo/foo/bar/index.md, foo/foo/bar/README.md, foo/foo/bar.md', exp_false='foo/bar, foo/bar/index.md, foo/bar/README.md', ) test( ('foo.md', 'foo.html'), exp_true='foo.html, foo.md, foo/foo.html, foo/foo.md', exp_false='foo.html, foo.md', ) test( ('foo.md', '../foo/'), exp_true='../foo, foo, foo/index.md, foo/README.md, foo.md', exp_false='../foo', ) test( ('foo.md', 'foo/'), exp_true='foo, foo/index.md, foo/README.md, foo.md, foo/foo, foo/foo/index.md, foo/foo/README.md, foo/foo.md', exp_false='foo, foo/index.md, foo/README.md', ) test(('foo/index.md', 'index.md'), expected='foo/index.md') test(('foo/index.md', 'foo/bar.html'), expected='foo/foo/bar.html, foo/foo/bar.md') test(('foo/index.md', '../foo.html'), expected='foo.html, foo.md') test(('foo/index.md', '../'), expected='., index.md, README.md') test( ('foo/bar.md', 'index.md'), exp_true='foo/index.md, foo/bar/index.md', exp_false='foo/index.md', ) test( ('foo/bar.md', 'foo.md'), exp_true='foo/foo.md, foo/bar/foo.md', exp_false='foo/foo.md', ) test( ('foo/bar.md', 'bar.md'), exp_true='foo/bar.md, foo/bar/bar.md', exp_false='foo/bar.md', ) test( ('foo/bar.md', 'foo/bar.md'), exp_true='foo/foo/bar.md, foo/bar/foo/bar.md', exp_false='foo/foo/bar.md', ) test( ('foo/bar.md', 'foo'), exp_true='foo/foo, foo/foo/index.md, foo/foo/README.md, foo/foo.md, foo/bar/foo, foo/bar/foo/index.md, foo/bar/foo/README.md, foo/bar/foo.md', exp_false='foo/foo, foo/foo/index.md, foo/foo/README.md, foo/foo.md', ) test( ('foo/bar.md', 'foo/bar'), exp_true='foo/foo/bar, foo/foo/bar/index.md, foo/foo/bar/README.md, foo/foo/bar.md, foo/bar/foo/bar, foo/bar/foo/bar/index.md, foo/bar/foo/bar/README.md, foo/bar/foo/bar.md', exp_false='foo/foo/bar, foo/foo/bar/index.md, foo/foo/bar/README.md, foo/foo/bar.md', ) test( ('foo/bar.md', 'foo.html'), exp_true='foo/foo.html, foo/foo.md, foo/bar/foo.html, foo/bar/foo.md', exp_false='foo/foo.html, foo/foo.md', ) test( ('foo/bar.md', 'foo/bar.html'), exp_true='foo/foo/bar.html, foo/foo/bar.md, foo/bar/foo/bar.html, foo/bar/foo/bar.md', exp_false='foo/foo/bar.html, foo/foo/bar.md', ) test( ('foo/bar.md', '../foo/bar.html'), exp_true='foo/bar.html, foo/bar.md, foo/foo/bar.html, foo/foo/bar.md', exp_false='foo/bar.html, foo/bar.md', ) test( ('foo/bar.md', '../foo'), exp_true='foo, foo/index.md, foo/README.md, foo.md, foo/foo, foo/foo/index.md, foo/foo/README.md, foo/foo.md', exp_false='foo, foo/index.md, foo/README.md, foo.md', ) test( ('foo/bar.md', '../'), exp_true='., index.md, README.md, foo, foo/index.md, foo/README.md', exp_false='., index.md, README.md', ) for src in 'foo/bar.md', 'foo.md', 'foo/index.md': test((src, '/foo'), expected='foo, foo/index.md, foo/README.md, foo.md') test((src, '/foo/bar.md'), expected='foo/bar.md') test((src, '/foo/bar.html'), expected='foo/bar.html, foo/bar.md') for dest in '', '.', './': test(('index.md', dest), expected='., index.md') test(('foo/bar.md', dest), expected='foo, foo/bar.md') test( ('foo/bar.md', '../test.png'), exp_true='test.png, test.png.md, foo/test.png, foo/test.png.md', exp_false='test.png, test.png.md', )
RelativePathExtensionTests
python
pytransitions__transitions
tests/test_graphviz.py
{ "start": 13561, "end": 21934 }
class ____(TestDiagrams): machine_cls = HierarchicalGraphMachine \ # type: Type[Union[HierarchicalGraphMachine, LockedHierarchicalGraphMachine]] def setUp(self): super(TestDiagramsNested, self).setUp() self.states = ['A', 'B', {'name': 'C', 'children': [{'name': '1', 'children': ['a', 'b', 'c']}, '2', '3']}, 'D'] # type: List[Union[str, Collection[str]]] self.transitions = [ {'trigger': 'walk', 'source': 'A', 'dest': 'B'}, # 1 edge {'trigger': 'run', 'source': 'B', 'dest': 'C'}, # + 1 edge {'trigger': 'sprint', 'source': 'C', 'dest': 'D', # + 1 edge 'conditions': 'is_fast'}, {'trigger': 'sprint', 'source': 'C', 'dest': 'B'}, # + 1 edge {'trigger': 'reset', 'source': '*', 'dest': 'A'} # + 4 edges (from base state) = 8 ] # type: Sequence[TransitionConfigDict] def test_diagram(self): m = self.machine_cls(states=self.states, transitions=self.transitions, initial='A', auto_transitions=False, title='A test', show_conditions=True, graph_engine=self.graph_engine) graph = m.get_graph() self.assertIsNotNone(graph) self.assertTrue("digraph" in str(graph)) _, nodes, edges = self.parse_dot(graph) self.assertEqual(len(edges), 8) # Test that graph properties match the Machine self.assertEqual(set(m.get_nested_state_names()), nodes) m.walk() m.run() # write diagram to temp file target = tempfile.NamedTemporaryFile(suffix='.png', delete=False) m.get_graph().draw(target.name, prog='dot') self.assertTrue(os.path.getsize(target.name) > 0) # backwards compatibility check m.get_graph().draw(target.name, prog='dot') self.assertTrue(os.path.getsize(target.name) > 0) # cleanup temp file target.close() os.unlink(target.name) def test_roi(self): class Model: def is_fast(self, *args, **kwargs): return True model = Model() m = self.machine_cls(model, states=self.states, transitions=self.transitions, initial='A', title='A test', graph_engine=self.graph_engine, show_conditions=True) model.walk() model.run() g1 = model.get_graph(show_roi=True) _, nodes, edges = self.parse_dot(g1) self.assertEqual(len(edges), 4) self.assertEqual(len(nodes), 4) model.sprint() g2 = model.get_graph(show_roi=True) dot, nodes, edges = self.parse_dot(g2) self.assertEqual(len(edges), 2) self.assertEqual(len(nodes), 3) def test_roi_parallel(self): class Model: @staticmethod def is_fast(*args, **kwargs): return True self.states[0] = {"name": "A", "parallel": ["1", "2"]} model = Model() m = self.machine_cls(model, states=self.states, transitions=self.transitions, initial='A', title='A test', graph_engine=self.graph_engine, show_conditions=True) g1 = model.get_graph(show_roi=True) _, nodes, edges = self.parse_dot(g1) self.assertEqual(len(edges), 2) # reset and walk print(nodes) self.assertEqual(len(nodes), 4) model.walk() model.run() model.sprint() g2 = model.get_graph(show_roi=True) dot, nodes, edges = self.parse_dot(g2) self.assertEqual(len(edges), 2) self.assertEqual(len(nodes), 3) def test_roi_parallel_deeper(self): states = ['A', 'B', 'C', 'D', {'name': 'P', 'parallel': [ '1', {'name': '2', 'parallel': [ {'name': 'a'}, {'name': 'b', 'parallel': [ {'name': 'x', 'parallel': ['1', '2']}, 'y' ]} ]}, ]}] transitions = [["go", "A", "P"], ["reset", "*", "A"]] m = self.machine_cls(states=states, transitions=transitions, initial='A', title='A test', graph_engine=self.graph_engine, show_conditions=True) m.go() _, nodes, edges = self.parse_dot(m.get_graph(show_roi=True)) self.assertEqual(len(edges), 2) self.assertEqual(len(nodes), 10) def test_internal(self): states = ['A', 'B'] transitions = [ ['go', 'A', 'B'], dict(trigger='fail', source='A', dest=None, conditions=['failed']), dict(trigger='fail', source='A', dest='B', unless=['failed']) ] # type: Sequence[TransitionConfig] m = self.machine_cls(states=states, transitions=transitions, initial='A', show_conditions=True, graph_engine=self.graph_engine) _, nodes, edges = self.parse_dot(m.get_graph()) print(nodes) self.assertEqual(len(nodes), 2) self.assertEqual(len([e for e in edges if '[internal]' in e]), 1) def test_internal_wildcards(self): internal_only_once = r'^(?:(?!\[internal\]).)*\[internal\](?!.*\[internal\]).*$' states = [ "initial", "ready", "running" ] transitions = [ ["booted", "initial", "ready"], {"trigger": "polled", "source": "ready", "dest": "running", "conditions": "door_closed"}, ["done", "running", "ready"], ["polled", "*", None] ] # type: Sequence[TransitionConfig] m = self.machine_cls(states=states, transitions=transitions, show_conditions=True, graph_engine=self.graph_engine, initial='initial') _, nodes, edges = self.parse_dot(m.get_graph()) self.assertEqual(len(nodes), 3) self.assertEqual(len([e for e in edges if re.match(internal_only_once, e)]), 3) def test_nested_notebook(self): states = [{'name': 'caffeinated', 'on_enter': 'do_x', 'children': ['dithering', 'running'], 'transitions': [['walk', 'dithering', 'running'], ['drink', 'dithering', '=']], }, {'name': 'standing', 'on_enter': ['do_x', 'do_y'], 'on_exit': 'do_z'}, {'name': 'walking', 'tags': ['accepted', 'pending'], 'timeout': 5, 'on_timeout': 'do_z'}] transitions = [ ['walk', 'standing', 'walking'], ['go', 'standing', 'walking'], ['stop', 'walking', 'standing'], {'trigger': 'drink', 'source': '*', 'dest': 'caffeinated{0}dithering'.format(self.machine_cls.state_cls.separator), 'conditions': 'is_hot', 'unless': 'is_too_hot'}, ['relax', 'caffeinated', 'standing'], ['sip', 'standing', 'caffeinated'] ] @add_state_features(Timeout, Tags) class CustomStateMachine(self.machine_cls): # type: ignore def is_hot(self): return True def is_too_hot(self): return False def do_x(self): pass def do_z(self): pass extra_args = dict(auto_transitions=False, initial='standing', title='Mood Matrix', show_conditions=True, show_state_attributes=True, graph_engine=self.graph_engine) machine = CustomStateMachine(states=states, transitions=transitions, **extra_args) g1 = machine.get_graph() # dithering should have 4 'drink' edges, a) from walking, b) from initial, c) from running and d) from itself if self.graph_engine == "pygraphviz": dot_string = g1.string() else: dot_string = g1.source count = re.findall('-> "?caffeinated{0}dithering"?'.format(machine.state_cls.separator), dot_string) self.assertEqual(4, len(count)) self.assertTrue(True) machine.drink() machine.drink() g1 = machine.get_graph() self.assertIsNotNone(g1) @skipIf(pgv is None, 'NestedGraph diagram test requires graphviz')
TestDiagramsNested
python
tensorflow__tensorflow
tensorflow/python/framework/convert_to_constants.py
{ "start": 7532, "end": 12916 }
class ____(_Convertible): """A Convertible NodeDef.""" def __init__(self, node, function, enclosing_graph): super(_Node, self).__init__(enclosing_graph) self._node = node self._function = function def __str__(self): return self._node.name @staticmethod def new(node, function, enclosing_graph): """Creates a new _Node base on its operation type.""" if node.op in ["VariableV2", "VarHandleOp", "Placeholder"]: return _VarHandle(node, function, enclosing_graph) elif node.op == "Case": return _Case(node, function, enclosing_graph) elif node.op == "Merge": return _Merge(node, function, enclosing_graph) elif node.op == "PartitionedCall": return _PartitionedCall(node, function, enclosing_graph) elif node.op == "StatefulPartitionedCall": return _PartitionedCall(node, function, enclosing_graph) elif node.op == "ReadVariableOp": return _ReadVariable(node, function, enclosing_graph) elif node.op == "ResourceGather": return _ResourceGather(node, function, enclosing_graph) elif node.op == "ResourceGatherNd": return _ResourceGatherNd(node, function, enclosing_graph) elif node.op in ["If", "StatelessIf"]: return _If(node, function, enclosing_graph) elif node.op in ["While", "StatelessWhile"]: return _While(node, function, enclosing_graph) elif node.op in [ "Enter", "Exit", "Identity", "NextIteration", "Switch", "_SwitchN"]: return _Intermediate(node, function, enclosing_graph) else: return _Node(node, function, enclosing_graph) @property def node(self): return self._node @property def container(self): """The node container (either a graph or a function).""" if self._function is not None: return self._function.function return self._enclosing_graph.graph_def def converted_self(self): """The NodeDef to be converted. Returns: The NodeDef to be converted, which can come from either a graph for a function. Derived classes should call this (via 'super') to make sure the node is retrieved from the right place. """ if self._converted_self is None: source = self._function or self._enclosing_graph self._converted_self = source.converted_self().nodes[self._node.name] return self._converted_self def convert_variable_to_constant(self, incoming_edge, tensor_data): pass def create_edges(self): for index, name in enumerate(self._node.input): # Discard edges from control inputs. if name[0] == "^": continue source = self.resolve_input(name) source.convertible.add_outgoing_edge( _Edge(source, _EndPoint(self, index))) def resolve_input(self, input_name): """Resolves an input into its _EndPoint. A NodeDef's input name can refer to either global NodeDefs (in the GraphDef's node list), a NodeDef in a function's node list, or a Function (in the GraphDef's function library). The name can also carry semantic information, depending on whether it starts with "^". This method handles all that logic in order to find the object to which the input name refers to. Args: input_name: The input name to resolve. Returns: The object referred to by 'input_name'. """ # The logic below oversimplifies the semantics, but is good enough for the # purposes of converting to constants. The introduction of new types of # operations may change this, forcing the code to be more generic. # # In particular, we are assuming that the lack of an index suffix means # ":0", when it could mean "all the outputs of a node." This works now # because converting to constants relies very little on output types, and # when it does it specializes its treatment in dedicated classes. name_elts = input_name.split(":") source_name = name_elts[0] if source_name[0] == "^": source_name = source_name[1:] source_index = 0 if len(name_elts) > 1 and name_elts[-1].isnumeric(): source_index = int(name_elts[-1]) if self._function is None: return _EndPoint(self._enclosing_graph.nodes[source_name], source_index) if source_index != 0 or source_name in self._function.nodes: return _EndPoint(self._function.nodes[source_name], source_index) inputs = [i.name for i in self._function.function.signature.input_arg] return _EndPoint(self._function, inputs.index(source_name)) def update_dtype(self, attr_name, index, dtype): """Changes the type of a given input. Args: attr_name: The NodeDef attribute containing the type to change. index: The index of the input type to change. dtype: The type to change to. """ attr = self._node.attr[attr_name] num_types = 0 # Check for various 'oneof' possibilities, and update the type if # index in range. if attr.HasField("list"): types = attr.list.type num_types = len(types) if num_types > index: types[index] = dtype return elif attr.HasField("type"): num_types = 1 if index == 0: attr.type = dtype return raise ValueError(f"`index` {index:d} is out of range for " f"node({self._node.name}).attr({attr_name}), which has " f"{num_types:d} elements.")
_Node
python
pyqtgraph__pyqtgraph
pyqtgraph/parametertree/parameterTypes/list.py
{ "start": 192, "end": 2352 }
class ____(WidgetParameterItem): """ WidgetParameterItem subclass providing comboBox that lets the user select from a list of options. """ def __init__(self, param, depth): self.targetValue = None WidgetParameterItem.__init__(self, param, depth) def makeWidget(self): w = QtWidgets.QComboBox() w.setMaximumHeight(20) ## set to match height of spin box and line edit w.sigChanged = w.currentIndexChanged w.value = self.value w.setValue = self.setValue self.widget = w ## needs to be set before limits are changed self.limitsChanged(self.param, self.param.opts['limits']) if len(self.forward) > 0 and self.param.hasValue(): self.setValue(self.param.value()) return w def value(self): key = self.widget.currentText() return self.forward.get(key, None) def setValue(self, val): self.targetValue = val match = [fn.eq(val, limVal) for limVal in self.reverse[0]] if not any(match): self.widget.setCurrentIndex(0) else: idx = match.index(True) key = self.reverse[1][idx] ind = self.widget.findText(key) self.widget.setCurrentIndex(ind) def limitsChanged(self, param, limits): # set up forward / reverse mappings for name:value if len(limits) == 0: limits = [''] ## Can never have an empty list--there is always at least a singhe blank item. self.forward, self.reverse = ListParameter.mapping(limits) try: self.widget.blockSignals(True) val = self.targetValue self.widget.clear() for k in self.forward: self.widget.addItem(k) if k == val: self.widget.setCurrentIndex(self.widget.count()-1) self.updateDisplayLabel() finally: self.widget.blockSignals(False) def updateDisplayLabel(self, value=None): if value is None: value = self.widget.currentText() super().updateDisplayLabel(value)
ListParameterItem
python
HypothesisWorks__hypothesis
hypothesis-python/src/hypothesis/extra/lark.py
{ "start": 2202, "end": 9882 }
class ____(st.SearchStrategy): """Low-level strategy implementation wrapping a Lark grammar. See ``from_lark`` for details. """ def __init__( self, grammar: Lark, start: str | None, explicit: dict[str, st.SearchStrategy[str]], alphabet: st.SearchStrategy[str], ) -> None: super().__init__() assert isinstance(grammar, lark.lark.Lark) start: list[str] = grammar.options.start if start is None else [start] # This is a total hack, but working around the changes is a nicer user # experience than breaking for anyone who doesn't instantly update their # installation of Lark alongside Hypothesis. compile_args = signature(grammar.grammar.compile).parameters if "terminals_to_keep" in compile_args: terminals, rules, ignore_names = grammar.grammar.compile(start, ()) elif "start" in compile_args: # pragma: no cover # Support lark <= 0.10.0, without the terminals_to_keep argument. terminals, rules, ignore_names = grammar.grammar.compile(start) # type: ignore else: # pragma: no cover # This branch is to support lark <= 0.7.1, without the start argument. terminals, rules, ignore_names = grammar.grammar.compile() # type: ignore self.names_to_symbols: dict[str, Symbol] = {} for r in rules: self.names_to_symbols[r.origin.name] = r.origin disallowed = set() self.terminal_strategies: dict[str, st.SearchStrategy[str]] = {} for t in terminals: self.names_to_symbols[t.name] = Terminal(t.name) s = st.from_regex(t.pattern.to_regexp(), fullmatch=True, alphabet=alphabet) try: s.validate() except IncompatibleWithAlphabet: disallowed.add(t.name) else: self.terminal_strategies[t.name] = s self.ignored_symbols = tuple(self.names_to_symbols[n] for n in ignore_names) all_terminals = get_terminal_names(terminals, rules, ignore_names) if unknown_explicit := sorted(set(explicit) - all_terminals): raise InvalidArgument( "The following arguments were passed as explicit_strategies, but " f"there is no {unknown_explicit} terminal production in this grammar." ) if missing_declared := sorted( all_terminals - {t.name for t in terminals} - set(explicit) ): raise InvalidArgument( f"Undefined terminal{'s' * (len(missing_declared) > 1)} " f"{sorted(missing_declared)!r}. Generation does not currently " "support use of %declare unless you pass `explicit`, a dict of " f"names-to-strategies, such as `{{{missing_declared[0]!r}: " 'st.just("")}}`' ) self.terminal_strategies.update(explicit) # can in fact contain any symbol, despite its name. nonterminals: dict[str, list[tuple[Symbol, ...]]] = {} for rule in rules: if disallowed.isdisjoint(r.name for r in rule.expansion): nonterminals.setdefault(rule.origin.name, []).append( tuple(rule.expansion) ) allowed_rules = {*self.terminal_strategies, *nonterminals} while dict(nonterminals) != ( nonterminals := { k: clean for k, v in nonterminals.items() if (clean := [x for x in v if all(r.name in allowed_rules for r in x)]) } ): allowed_rules = {*self.terminal_strategies, *nonterminals} if set(start).isdisjoint(allowed_rules): raise InvalidArgument( f"No start rule {tuple(start)} is allowed by {alphabet=}" ) self.start = st.sampled_from( [self.names_to_symbols[s] for s in start if s in allowed_rules] ) self.nonterminal_strategies = { k: st.sampled_from(sorted(v, key=len)) for k, v in nonterminals.items() } self.__rule_labels: dict[str, int] = {} def do_draw(self, data: ConjectureData) -> str: state: list[str] = [] start = data.draw(self.start) self.draw_symbol(data, start, state) return "".join(state) def rule_label(self, name: str) -> int: try: return self.__rule_labels[name] except KeyError: return self.__rule_labels.setdefault( name, calc_label_from_name(f"LARK:{name}") ) def draw_symbol( self, data: ConjectureData, symbol: Symbol, draw_state: list[str], ) -> None: if isinstance(symbol, Terminal): strategy = self.terminal_strategies[symbol.name] draw_state.append(data.draw(strategy)) else: assert isinstance(symbol, NonTerminal) data.start_span(self.rule_label(symbol.name)) expansion = data.draw(self.nonterminal_strategies[symbol.name]) for e in expansion: self.draw_symbol(data, e, draw_state) self.gen_ignore(data, draw_state) data.stop_span() def gen_ignore(self, data: ConjectureData, draw_state: list[str]) -> None: if self.ignored_symbols and data.draw_boolean(1 / 4): emit = data.draw(st.sampled_from(self.ignored_symbols)) self.draw_symbol(data, emit, draw_state) def calc_has_reusable_values(self, recur): return True def check_explicit(name): def inner(value): check_type(str, value, "value drawn from " + name) return value return inner @cacheable @defines_strategy(force_reusable_values=True) def from_lark( grammar: lark.lark.Lark, *, start: str | None = None, explicit: dict[str, st.SearchStrategy[str]] | None = None, alphabet: st.SearchStrategy[str] = st.characters(codec="utf-8"), ) -> st.SearchStrategy[str]: """A strategy for strings accepted by the given context-free grammar. ``grammar`` must be a ``Lark`` object, which wraps an EBNF specification. The Lark EBNF grammar reference can be found `here <https://lark-parser.readthedocs.io/en/latest/grammar.html>`_. ``from_lark`` will automatically generate strings matching the nonterminal ``start`` symbol in the grammar, which was supplied as an argument to the Lark class. To generate strings matching a different symbol, including terminals, you can override this by passing the ``start`` argument to ``from_lark``. Note that Lark may remove unreachable productions when the grammar is compiled, so you should probably pass the same value for ``start`` to both. Currently ``from_lark`` does not support grammars that need custom lexing. Any lexers will be ignored, and any undefined terminals from the use of ``%declare`` will result in generation errors. To define strategies for such terminals, pass a dictionary mapping their name to a corresponding strategy as the ``explicit`` argument. The :pypi:`hypothesmith` project includes a strategy for Python source, based on a grammar and careful post-processing. """ check_type(lark.lark.Lark, grammar, "grammar") if explicit is None: explicit = {} else: check_type(dict, explicit, "explicit") explicit = { k: v.map(check_explicit(f"explicit[{k!r}]={v!r}")) for k, v in explicit.items() } return LarkStrategy(grammar, start, explicit, alphabet)
LarkStrategy
python
dask__dask
dask/tokenize.py
{ "start": 556, "end": 15056 }
class ____(RuntimeError): pass def _tokenize(*args: object, **kwargs: object) -> str: token: object = _normalize_seq_func(args) if kwargs: token = token, _normalize_seq_func(sorted(kwargs.items())) # Pass `usedforsecurity=False` to support FIPS builds of Python return hashlib.md5(str(token).encode(), usedforsecurity=False).hexdigest() tokenize_lock = threading.RLock() _SEEN: dict[int, tuple[int, object]] = {} _ENSURE_DETERMINISTIC: ContextVar[bool | None] = ContextVar("_ENSURE_DETERMINISTIC") def tokenize( *args: object, ensure_deterministic: bool | None = None, **kwargs: object ) -> str: """Deterministic token >>> tokenize([1, 2, '3']) # doctest: +SKIP '06961e8de572e73c2e74b51348177918' >>> tokenize('Hello') == tokenize('Hello') True Parameters ---------- args, kwargs: objects to tokenize ensure_deterministic: bool, optional If True, raise TokenizationError if the objects cannot be deterministically tokenized, e.g. two identical objects will return different tokens. Defaults to the `tokenize.ensure-deterministic` configuration parameter. """ global _SEEN with tokenize_lock: seen_before, _SEEN = _SEEN, {} token = None try: _ENSURE_DETERMINISTIC.get() except LookupError: token = _ENSURE_DETERMINISTIC.set(ensure_deterministic) try: return _tokenize(*args, **kwargs) finally: if token: _ENSURE_DETERMINISTIC.reset(token) _SEEN = seen_before def _maybe_raise_nondeterministic(msg: str) -> None: try: val = _ENSURE_DETERMINISTIC.get() except LookupError: val = None if val or val is None and config.get("tokenize.ensure-deterministic"): raise TokenizationError(msg) _IDENTITY_DISPATCH = ( int, float, str, bytes, type(None), slice, complex, type(Ellipsis), decimal.Decimal, datetime.date, datetime.time, datetime.datetime, datetime.timedelta, pathlib.PurePath, ) normalize_token = Dispatch() normalize_token.register( _IDENTITY_DISPATCH, identity, ) @normalize_token.register((types.MappingProxyType, dict)) def normalize_dict(d): with tokenize_lock: if id(d) in _SEEN: return "__seen", _SEEN[id(d)][0] _SEEN[id(d)] = len(_SEEN), d try: return "dict", _normalize_seq_func( sorted(d.items(), key=lambda kv: str(kv[0])) ) finally: _SEEN.pop(id(d), None) @normalize_token.register(OrderedDict) def normalize_ordered_dict(d): return _normalize_seq_func((type(d), list(d.items()))) @normalize_token.register(set) def normalize_set(s): # Note: in some Python version / OS combinations, set order changes every # time you recreate the set (even within the same interpreter). # In most other cases, set ordering is consistent within the same interpreter. return "set", _normalize_seq_func(sorted(s, key=str)) def _normalize_seq_func(seq: Iterable[object]) -> tuple[object, ...]: def _inner_normalize_token(item): # Don't go through Dispatch. That's slow if isinstance(item, _IDENTITY_DISPATCH): return item return normalize_token(item) with tokenize_lock: if id(seq) in _SEEN: return "__seen", _SEEN[id(seq)][0] _SEEN[id(seq)] = len(_SEEN), seq try: return tuple(map(_inner_normalize_token, seq)) finally: del _SEEN[id(seq)] @normalize_token.register((tuple, list)) def normalize_seq(seq): return type(seq).__name__, _normalize_seq_func(seq) @normalize_token.register(literal) def normalize_literal(lit): return "literal", normalize_token(lit()) @normalize_token.register(Compose) def normalize_compose(func): return _normalize_seq_func((func.first,) + func.funcs) @normalize_token.register((partial, curry)) def normalize_partial(func): return _normalize_seq_func((func.func, func.args, func.keywords)) @normalize_token.register((types.MethodType, types.MethodWrapperType)) def normalize_bound_method(meth): return normalize_token(meth.__self__), meth.__name__ @normalize_token.register(types.BuiltinFunctionType) def normalize_builtin_function_or_method(func): # Note: BuiltinMethodType is BuiltinFunctionType self = getattr(func, "__self__", None) if self is not None and not inspect.ismodule(self): return normalize_bound_method(func) else: return normalize_object(func) @normalize_token.register(object) def normalize_object(o): method = getattr(o, "__dask_tokenize__", None) if method is not None and not isinstance(o, type): return method() if type(o) is object: return _normalize_pure_object(o) if isinstance(o, type): copyreg._slotnames(o) if dataclasses.is_dataclass(o) and not isinstance(o, type): return _normalize_dataclass(o) try: return _normalize_pickle(o) except Exception: _maybe_raise_nondeterministic( f"Object {o!r} cannot be deterministically hashed. This likely " "indicates that the object cannot be serialized deterministically." ) return uuid.uuid4().hex _seen_objects = set() def _normalize_pure_object(o: object) -> tuple[str, int]: _maybe_raise_nondeterministic( "object() cannot be deterministically hashed. See " "https://docs.dask.org/en/latest/custom-collections.html#implementing-deterministic-hashing " "for more information." ) # Idempotent, but not deterministic. Make sure that the id is not reused. _seen_objects.add(o) return "object", id(o) def _normalize_pickle(o: object) -> tuple: buffers: list[pickle.PickleBuffer] = [] pik: int | None = None pik2: int | None = None for _ in range(3): buffers.clear() try: out = pickle.dumps(o, protocol=5, buffer_callback=buffers.append) if b"__main__" in out: # Use `cloudpickle` for objects defined in `__main__` buffers.clear() out = cloudpickle.dumps(o, protocol=5, buffer_callback=buffers.append) pickle.loads(out, buffers=buffers) pik2 = hash_buffer_hex(out) except Exception: buffers.clear() try: out = cloudpickle.dumps(o, protocol=5, buffer_callback=buffers.append) pickle.loads(out, buffers=buffers) pik2 = hash_buffer_hex(out) except Exception: break if pik and pik2 and pik == pik2: break pik = pik2 else: _maybe_raise_nondeterministic("Failed to tokenize deterministically") if pik is None: _maybe_raise_nondeterministic("Failed to tokenize deterministically") pik = int(uuid.uuid4()) return pik, [hash_buffer_hex(buf) for buf in buffers] def _normalize_dataclass(obj): fields = [ (field.name, normalize_token(getattr(obj, field.name, None))) for field in dataclasses.fields(obj) ] params = obj.__dataclass_params__ params = [(attr, getattr(params, attr)) for attr in params.__slots__] return normalize_object(type(obj)), params, fields @normalize_token.register_lazy("pandas") def register_pandas(): import pandas as pd # use dask._pandas_compat to avoid importing dask.dataframe here from dask._pandas_compat import PANDAS_GE_210 @normalize_token.register(pd.RangeIndex) def normalize_range_index(x): return type(x), x.start, x.stop, x.step, x.dtype, x.name @normalize_token.register(pd.Index) def normalize_index(ind): values = ind.array if isinstance(values, pd.arrays.ArrowExtensionArray): import pyarrow as pa # these are sensitive to fragmentation of the backing Arrow array. # Because common operations like DataFrame.getitem and DataFrame.setitem # result in fragmented Arrow arrays, we'll consolidate them here. if PANDAS_GE_210: # avoid combining chunks by using chunked_array values = pa.chunked_array([values._pa_array]).combine_chunks() else: values = pa.array(values) return type(ind), ind.name, normalize_token(values) @normalize_token.register(pd.MultiIndex) def normalize_index(ind): codes = ind.codes return ( [ind.name] + [normalize_token(x) for x in ind.levels] + [normalize_token(x) for x in codes] ) @normalize_token.register(pd.Categorical) def normalize_categorical(cat): return [normalize_token(cat.codes), normalize_token(cat.dtype)] @normalize_token.register(pd.arrays.PeriodArray) @normalize_token.register(pd.arrays.DatetimeArray) @normalize_token.register(pd.arrays.TimedeltaArray) def normalize_period_array(arr): return [normalize_token(arr.asi8), normalize_token(arr.dtype)] @normalize_token.register(pd.arrays.IntervalArray) def normalize_interval_array(arr): return [ normalize_token(arr.left), normalize_token(arr.right), normalize_token(arr.closed), ] @normalize_token.register(pd.Series) def normalize_series(s): return [ s.name, s.dtype, normalize_token(s._values), normalize_token(s.index), ] @normalize_token.register(pd.DataFrame) def normalize_dataframe(df): mgr = df._mgr data = list(mgr.arrays) + [df.columns, df.index] return list(map(normalize_token, data)) @normalize_token.register(pd.arrays.ArrowExtensionArray) def normalize_extension_array(arr): try: return (type(arr), normalize_token(arr._pa_array)) except AttributeError: return (type(arr), normalize_token(arr._data)) @normalize_token.register(pd.api.extensions.ExtensionArray) def normalize_extension_array(arr): import numpy as np return normalize_token(np.asarray(arr)) # Dtypes @normalize_token.register(pd.api.types.CategoricalDtype) def normalize_categorical_dtype(dtype): return [normalize_token(dtype.categories), normalize_token(dtype.ordered)] @normalize_token.register(pd.api.extensions.ExtensionDtype) def normalize_period_dtype(dtype): return normalize_token(dtype.name) @normalize_token.register(type(pd.NA)) def normalize_na(na): return pd.NA @normalize_token.register(pd.offsets.BaseOffset) def normalize_offset(offset): return offset.freqstr @normalize_token.register_lazy("numba") def register_numba(): import numba @normalize_token.register(numba.core.serialize.ReduceMixin) def normalize_numba_ufunc(obj): return normalize_token((obj._reduce_class(), obj._reduce_states())) @normalize_token.register_lazy("pyarrow") def register_pyarrow(): import pyarrow as pa @normalize_token.register(pa.DataType) def normalize_datatype(dt): return pickle.dumps(dt, protocol=4) @normalize_token.register(pa.Table) def normalize_table(dt): return ( "pa.Table", normalize_token(dt.schema), normalize_token(dt.columns), ) @normalize_token.register(pa.ChunkedArray) def normalize_chunked_array(arr): return ( "pa.ChunkedArray", normalize_token(arr.type), normalize_token(arr.chunks), ) @normalize_token.register(pa.Array) def normalize_array(arr): buffers = arr.buffers() # pyarrow does something clever when (de)serializing an array that has # an empty validity map: The buffers for the deserialized array will # have `None` instead of the empty validity map. # # We'll replicate that behavior here to ensure we get consistent # tokenization. buffers = arr.buffers() if len(buffers) and buffers[0] is not None and arr.null_count == 0: buffers[0] = None return ( "pa.Array", normalize_token(arr.type), normalize_token(buffers), ) @normalize_token.register(pa.Buffer) def normalize_buffer(buf): return ("pa.Buffer", hash_buffer_hex(buf)) @normalize_token.register_lazy("numpy") def register_numpy(): import numpy as np @normalize_token.register(np.ndarray) def normalize_array(x): if not x.shape: return (x.item(), x.dtype) if x.dtype.hasobject: try: try: # string fast-path data = hash_buffer_hex( "-".join(x.flat).encode( encoding="utf-8", errors="surrogatepass" ) ) except UnicodeDecodeError: # bytes fast-path data = hash_buffer_hex(b"-".join(x.flat)) except (TypeError, UnicodeDecodeError): return normalize_object(x) else: try: data = hash_buffer_hex(x.ravel(order="K").view("i1")) except (BufferError, AttributeError, ValueError): data = hash_buffer_hex(x.copy().ravel(order="K").view("i1")) return (data, x.dtype, x.shape) @normalize_token.register(np.memmap) def normalize_mmap(mm): return hash_buffer_hex(np.ascontiguousarray(mm)) @normalize_token.register(np.ufunc) def normalize_ufunc(func): try: return _normalize_pickle(func) except Exception: _maybe_raise_nondeterministic( f"Cannot tokenize numpy ufunc {func!r}. Please use functions " "of the dask.array.ufunc module instead. See also " "https://docs.dask.org/en/latest/array-numpy-compatibility.html" ) return uuid.uuid4().hex @normalize_token.register(np.dtype) def normalize_dtype(dtype): return dtype.str def _tokenize_deterministic(*args, **kwargs) -> str: # Utility to be strict about deterministic tokens return tokenize(*args, ensure_deterministic=True, **kwargs)
TokenizationError
python
Pylons__pyramid
src/pyramid/util.py
{ "start": 403, "end": 1708 }
class ____(_DottedNameResolver): def __init__( self, package=None ): # default to package = None for bw compat _DottedNameResolver.__init__(self, package) def text_(s, encoding='latin-1', errors='strict'): """If ``s`` is an instance of ``bytes``, return ``s.decode(encoding, errors)``, otherwise return ``s``""" if isinstance(s, bytes): return s.decode(encoding, errors) return s def bytes_(s, encoding='latin-1', errors='strict'): """If ``s`` is an instance of ``str``, return ``s.encode(encoding, errors)``, otherwise return ``s``""" if isinstance(s, str): return s.encode(encoding, errors) return s def ascii_(s): """ If ``s`` is an instance of ``str``, return ``s.encode('ascii')``, otherwise return ``str(s, 'ascii', 'strict')`` """ if isinstance(s, str): s = s.encode('ascii') return str(s, 'ascii', 'strict') def is_nonstr_iter(v): if isinstance(v, str): return False return hasattr(v, '__iter__') def is_string_or_iterable(v): if isinstance(v, str): return True if hasattr(v, '__iter__'): return True def as_sorted_tuple(val): if not is_nonstr_iter(val): val = (val,) val = tuple(sorted(val)) return val
DottedNameResolver
python
pytorch__pytorch
torch/_inductor/template_heuristics/triton.py
{ "start": 96838, "end": 97460 }
class ____( MMPlusMMTemplateConfigMixin, MTIAConfigHeuristic ): """MM Plus MM template heuristic for MTIA""" def __init__(self) -> None: super().__init__() # Override mm_configs to use mm_plus_mm_configs self.mm_configs = self.mm_plus_mm_configs # NOTE: overriding exhaustive configs here to be the same as mm_configs # as we haven't validated exhaustive support here yet # TODO(coconutruben): remove this once we have validated exhaustive support # for scaled_mm self.exhaustive_configs = self.mm_plus_mm_configs
MTIAMMPlusMMTemplateConfigHeuristic
python
eth-brownie__brownie
brownie/typing.py
{ "start": 3614, "end": 3869 }
class ____(TypedDict): evm_version: EvmVersion | None solc: NotRequired[SolcConfig] vyper: VyperConfig version: NotRequired[str] optimizer: NotRequired[OptimizerSettings] OutputSelection = Dict[str, Dict[str, List[str]]]
CompilerConfig
python
ansible__ansible
test/units/parsing/vault/test_vault.py
{ "start": 20705, "end": 23692 }
class ____(unittest.TestCase): def setUp(self): self.vault_cipher = vault.VaultAES256() def test(self): self.assertIsInstance(self.vault_cipher, vault.VaultAES256) # TODO: tag these as slow tests def test_create_key_cryptography(self): b_password = b'hunter42' b_salt = os.urandom(32) b_key_cryptography = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16) self.assertIsInstance(b_key_cryptography, bytes) def test_create_key_known_cryptography(self): b_password = b'hunter42' # A fixed salt b_salt = b'q' * 32 # q is the most random letter. b_key_1 = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16) self.assertIsInstance(b_key_1, bytes) # verify we get the same answer # we could potentially run a few iterations of this and time it to see if it's roughly constant time # and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI b_key_2 = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16) self.assertIsInstance(b_key_2, bytes) self.assertEqual(b_key_1, b_key_2) def test_is_equal_is_equal(self): self.assertTrue(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'abcdefghijklmnopqrstuvwxyz')) def test_is_equal_unequal_length(self): self.assertFalse(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'abcdefghijklmnopqrstuvwx and sometimes y')) def test_is_equal_not_equal(self): self.assertFalse(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'AbcdefghijKlmnopQrstuvwxZ')) def test_is_equal_empty(self): self.assertTrue(self.vault_cipher._is_equal(b'', b'')) def test_is_equal_non_ascii_equal(self): utf8_data = to_bytes(u'私はガラスを食べられます。それは私を傷つけません。') self.assertTrue(self.vault_cipher._is_equal(utf8_data, utf8_data)) def test_is_equal_non_ascii_unequal(self): utf8_data = to_bytes(u'私はガラスを食べられます。それは私を傷つけません。') utf8_data2 = to_bytes(u'Pot să mănânc sticlă și ea nu mă rănește.') # Test for the len optimization path self.assertFalse(self.vault_cipher._is_equal(utf8_data, utf8_data2)) # Test for the slower, char by char comparison path self.assertFalse(self.vault_cipher._is_equal(utf8_data, utf8_data[:-1] + b'P')) def test_is_equal_non_bytes(self): """ Anything not a byte string should raise a TypeError """ self.assertRaises(TypeError, self.vault_cipher._is_equal, u"One fish", b"two fish") self.assertRaises(TypeError, self.vault_cipher._is_equal, b"One fish", u"two fish") self.assertRaises(TypeError, self.vault_cipher._is_equal, 1, b"red fish") self.assertRaises(TypeError, self.vault_cipher._is_equal, b"blue fish", 2)
TestVaultCipherAes256
python
python__mypy
mypyc/test-data/fixtures/ir.py
{ "start": 7346, "end": 7784 }
class ____: @overload def __init__(self) -> None: pass @overload def __init__(self, x: object) -> None: pass @overload def __init__(self, string: str, encoding: str, err: str = ...) -> None: pass def __add__(self, s: bytes) -> bytearray: ... def __setitem__(self, i: int, o: int) -> None: ... def __getitem__(self, i: int) -> int: ... def decode(self, x: str = ..., y: str = ...) -> str: ...
bytearray
python
apache__airflow
airflow-core/tests/unit/dag_processing/bundles/test_dag_bundle_manager.py
{ "start": 7233, "end": 14443 }
class ____(BaseDagBundle): """Test bundle that provides a URL template.""" def __init__(self, *, subdir: str | None = None, **kwargs): super().__init__(**kwargs) self.subdir = subdir def refresh(self): pass def get_current_version(self): return "v1.0" @property def path(self): return "/tmp/test" TEMPLATE_BUNDLE_CONFIG = [ { "name": "template-bundle", "classpath": "unit.dag_processing.bundles.test_dag_bundle_manager.BundleWithTemplate", "kwargs": { "view_url_template": "https://github.com/example/repo/tree/{version}/{subdir}", "subdir": "dags", "refresh_interval": 1, }, } ] @pytest.mark.db_test @conf_vars({("core", "LOAD_EXAMPLES"): "False"}) def test_sync_bundles_to_db_with_template(clear_db, session): """Test that URL templates and parameters are stored in the database during sync.""" with patch.dict( os.environ, {"AIRFLOW__DAG_PROCESSOR__DAG_BUNDLE_CONFIG_LIST": json.dumps(TEMPLATE_BUNDLE_CONFIG)} ): manager = DagBundlesManager() manager.sync_bundles_to_db() # Check that the template and parameters were stored bundle_model = session.query(DagBundleModel).filter_by(name="template-bundle").first() session.merge(bundle_model) assert bundle_model is not None assert bundle_model.render_url(version="v1.0") == "https://github.com/example/repo/tree/v1.0/dags" assert bundle_model.template_params == {"subdir": "dags"} assert bundle_model.active is True @pytest.mark.db_test @conf_vars({("core", "LOAD_EXAMPLES"): "False"}) def test_bundle_model_render_url(clear_db, session): """Test the DagBundleModel render_url method.""" with patch.dict( os.environ, {"AIRFLOW__DAG_PROCESSOR__DAG_BUNDLE_CONFIG_LIST": json.dumps(TEMPLATE_BUNDLE_CONFIG)} ): manager = DagBundlesManager() manager.sync_bundles_to_db() bundle_model = session.query(DagBundleModel).filter_by(name="template-bundle").first() session.merge(bundle_model) assert bundle_model is not None url = bundle_model.render_url(version="main") assert url == "https://github.com/example/repo/tree/main/dags" url = bundle_model.render_url() assert url == "https://github.com/example/repo/tree/None/dags" @pytest.mark.db_test @conf_vars({("core", "LOAD_EXAMPLES"): "False"}) def test_template_params_update_on_sync(clear_db, session): """Test that template parameters are updated when bundle configuration changes.""" with patch.dict( os.environ, {"AIRFLOW__DAG_PROCESSOR__DAG_BUNDLE_CONFIG_LIST": json.dumps(TEMPLATE_BUNDLE_CONFIG)} ): manager = DagBundlesManager() manager.sync_bundles_to_db() # Verify initial template and parameters bundle_model = session.query(DagBundleModel).filter_by(name="template-bundle").first() url = bundle_model._unsign_url() assert url == "https://github.com/example/repo/tree/{version}/{subdir}" assert bundle_model.template_params == {"subdir": "dags"} # Update the bundle config with different parameters updated_config = [ { "name": "template-bundle", "classpath": "unit.dag_processing.bundles.test_dag_bundle_manager.BundleWithTemplate", "kwargs": { "view_url_template": "https://gitlab.com/example/repo/-/tree/{version}/{subdir}", "subdir": "workflows", "refresh_interval": 1, }, } ] with patch.dict( os.environ, {"AIRFLOW__DAG_PROCESSOR__DAG_BUNDLE_CONFIG_LIST": json.dumps(updated_config)} ): manager = DagBundlesManager() manager.sync_bundles_to_db() # Verify the template and parameters were updated bundle_model = session.query(DagBundleModel).filter_by(name="template-bundle").first() url = bundle_model._unsign_url() assert url == "https://gitlab.com/example/repo/-/tree/{version}/{subdir}" assert bundle_model.template_params == {"subdir": "workflows"} assert bundle_model.render_url(version="v1") == "https://gitlab.com/example/repo/-/tree/v1/workflows" @pytest.mark.db_test @conf_vars({("core", "LOAD_EXAMPLES"): "False"}) def test_template_update_on_sync(clear_db, session): """Test that templates are updated when bundle configuration changes.""" # First, sync with initial template with patch.dict( os.environ, {"AIRFLOW__DAG_PROCESSOR__DAG_BUNDLE_CONFIG_LIST": json.dumps(TEMPLATE_BUNDLE_CONFIG)} ): manager = DagBundlesManager() manager.sync_bundles_to_db() # Verify initial template bundle_model = session.query(DagBundleModel).filter_by(name="template-bundle").first() url = bundle_model._unsign_url() assert url == "https://github.com/example/repo/tree/{version}/{subdir}" assert bundle_model.render_url(version="v1") == "https://github.com/example/repo/tree/v1/dags" # Update the bundle config with a different template updated_config = [ { "name": "template-bundle", "classpath": "unit.dag_processing.bundles.test_dag_bundle_manager.BundleWithTemplate", "kwargs": { "view_url_template": "https://gitlab.com/example/repo/-/tree/{version}/{subdir}", "subdir": "dags", "refresh_interval": 1, }, } ] with patch.dict( os.environ, {"AIRFLOW__DAG_PROCESSOR__DAG_BUNDLE_CONFIG_LIST": json.dumps(updated_config)} ): manager = DagBundlesManager() manager.sync_bundles_to_db() # Verify the template was updated bundle_model = session.query(DagBundleModel).filter_by(name="template-bundle").first() url = bundle_model._unsign_url() assert url == "https://gitlab.com/example/repo/-/tree/{version}/{subdir}" assert bundle_model.render_url("v1") == "https://gitlab.com/example/repo/-/tree/v1/dags" def test_dag_bundle_model_render_url_with_invalid_template(): """Test that DagBundleModel.render_url handles invalid templates gracefully.""" bundle_model = DagBundleModel(name="test-bundle") bundle_model.signed_url_template = "https://github.com/example/repo/tree/{invalid_placeholder}" bundle_model.template_params = {"subdir": "dags"} # Should return None if rendering fails url = bundle_model.render_url("v1") assert url is None def test_example_dags_bundle_added(): manager = DagBundlesManager() manager.parse_config() assert "example_dags" in manager._bundle_config with conf_vars({("core", "LOAD_EXAMPLES"): "False"}): manager = DagBundlesManager() manager.parse_config() assert "example_dags" not in manager._bundle_config def test_example_dags_name_is_reserved(): reserved_name_config = [{"name": "example_dags", "classpath": "yo face", "kwargs": {}}] with conf_vars({("dag_processor", "dag_bundle_config_list"): json.dumps(reserved_name_config)}): with pytest.raises(AirflowConfigException, match="Bundle name 'example_dags' is a reserved name."): DagBundlesManager().parse_config()
BundleWithTemplate
python
jazzband__django-polymorphic
src/polymorphic/tests/models.py
{ "start": 6567, "end": 6792 }
class ____(models.Model): fk = models.ForeignKey( PlainParentModelWithManager, on_delete=models.CASCADE, related_name="childmodel_set", ) objects = PlainMyManager()
PlainChildModelWithManager
python
PrefectHQ__prefect
tests/_internal/pydantic/test_validated_func.py
{ "start": 16788, "end": 17170 }
class ____(BaseModel): b: str = Field() """, namespace, ) # Update the function's globals to include B namespace["process_model"].__globals__.update(namespace) # Now test that validation actually works at runtime result = vf.validate_call_args((), {"model": {"a": {"b": "test"}}}) assert result["model"].a.b == "test"
B
python
google__jax
jax/_src/profiler.py
{ "start": 15315, "end": 17446 }
class ____: def __init__(self, retries: int, percentile: int): self.retries: int = retries self.percentile: int = percentile self.collected_fdo: str | None = None self.called_times: int = 0 self.fdo_profiles: list[Any] = [] self.current_session: _profiler.ProfilerSession | None = None def consume_fdo_profile(self) -> str | None: if self.collected_fdo is not None: return self.collected_fdo if not self.is_enabled() or self.called_times != self.retries: return None self.collected_fdo = _profiler.aggregate_profiled_instructions( self.fdo_profiles, self.percentile ) return self.collected_fdo def is_fdo_consumed(self): return self.collected_fdo is not None def disable(self): self.retries = 0 def is_enabled(self): return self.retries > 0 def is_running(self): return self.current_session is not None @classmethod @contextmanager def trace(cls, runner: PGLEProfiler | None): if (runner is None or runner.is_running() or not runner.is_enabled() or runner.is_fdo_consumed()): yield else: options = _profiler.ProfileOptions() options.enable_hlo_proto = True options.raise_error_on_start_failure = True runner.current_session = _profiler.ProfilerSession(options) try: yield finally: xspace = runner.current_session.stop() runner.fdo_profiles.append( _profiler.get_fdo_profile(xspace) ) runner.current_session = None runner.called_times += 1 if runner.fdo_profiles[-1] == b'': warnings.warn( "PGLE collected an empty trace, may be due to contention with " "another tool that subscribes to CUPTI, such as Nsight Systems - check " "for CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED from XLA. " "Consider populating a persistent compilation cache with PGLE enabled, " "and then profiling a second run that has the " "JAX_COMPILATION_CACHE_EXPECT_PGLE option enabled.", RuntimeWarning)
PGLEProfiler
python
pytorch__pytorch
test/test_nn.py
{ "start": 645068, "end": 645254 }
class ____(TestCase): # issue gh-38137 def test_pickle_softsign(self): # Make sure it does not throw an exception s = pickle.dumps(F.softsign)
TestFunctionalPickle
python
mlflow__mlflow
mlflow/metrics/genai/prompt_template.py
{ "start": 39, "end": 2441 }
class ____: """A prompt template for a language model. A prompt template consists of an array of strings that will be concatenated together. It accepts a set of parameters from the user that can be used to generate a prompt for a language model. The template can be formatted using f-strings. Example: .. code-block:: python from mlflow.metrics.genai.prompt_template import PromptTemplate # Instantiation using initializer prompt = PromptTemplate(template_str="Say {foo} {baz}") # Instantiation using partial_fill prompt = PromptTemplate(template_str="Say {foo} {baz}").partial_fill(foo="bar") # Format the prompt prompt.format(baz="qux") """ def __init__(self, template_str: str | list[str]): self.template_strs = [template_str] if isinstance(template_str, str) else template_str @property def variables(self): return { fname for template_str in self.template_strs for _, fname, _, _ in string.Formatter().parse(template_str) if fname } def format(self, **kwargs: Any) -> str: safe_kwargs = {k: v for k, v in kwargs.items() if v is not None} formatted_strs = [] for template_str in self.template_strs: extracted_variables = [ fname for _, fname, _, _ in string.Formatter().parse(template_str) if fname ] if all(item in safe_kwargs.keys() for item in extracted_variables): formatted_strs.append(template_str.format(**safe_kwargs)) return "".join(formatted_strs) def partial_fill(self, **kwargs: Any) -> "PromptTemplate": safe_kwargs = {k: v for k, v in kwargs.items() if v is not None} new_template_strs = [] for template_str in self.template_strs: extracted_variables = [ fname for _, fname, _, _ in string.Formatter().parse(template_str) if fname ] safe_available_kwargs = { k: safe_kwargs.get(k, "{" + k + "}") for k in extracted_variables } new_template_strs.append(template_str.format_map(safe_available_kwargs)) return PromptTemplate(template_str=new_template_strs) def __str__(self) -> str: return "".join(self.template_strs)
PromptTemplate
python
python__mypy
mypyc/analysis/dataflow.py
{ "start": 12888, "end": 13847 }
class ____(BaseAnalysisVisitor[Value]): def visit_branch(self, op: Branch) -> GenAndKill[Value]: return set(), set() def visit_return(self, op: Return) -> GenAndKill[Value]: return set(), set() def visit_unreachable(self, op: Unreachable) -> GenAndKill[Value]: return set(), set() def visit_register_op(self, op: RegisterOp) -> GenAndKill[Value]: return set(), {op} if not op.is_void else set() def visit_assign(self, op: Assign) -> GenAndKill[Value]: return set(), {op.dest} def visit_assign_multi(self, op: AssignMulti) -> GenAndKill[Value]: return set(), {op.dest} def visit_set_mem(self, op: SetMem) -> GenAndKill[Value]: return set(), set() def non_trivial_sources(op: Op) -> set[Value]: result = set() for source in op.sources(): if not isinstance(source, (Integer, Float, Undef)): result.add(source) return result
UndefinedVisitor
python
sympy__sympy
sympy/strategies/tests/test_traverse.py
{ "start": 1387, "end": 2082 }
class ____(Basic): pass def rl(x): if x.args and not isinstance(x.args[0], Integer): return Basic2(*x.args) return x def test_top_down_once(): top_rl = top_down_once(rl) assert top_rl(Basic(S(1.0), S(2.0), Basic(S(3), S(4)))) == \ Basic2(S(1.0), S(2.0), Basic(S(3), S(4))) def test_bottom_up_once(): bottom_rl = bottom_up_once(rl) assert bottom_rl(Basic(S(1), S(2), Basic(S(3.0), S(4.0)))) == \ Basic(S(1), S(2), Basic2(S(3.0), S(4.0))) def test_expr_fns(): expr = x + y**3 e = bottom_up(lambda v: v + 1, expr_fns)(expr) b = bottom_up(lambda v: Basic.__new__(Add, v, S(1)), basic_fns)(expr) assert rebuild(b) == e
Basic2
python
huggingface__transformers
src/transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py
{ "start": 3759, "end": 4452 }
class ____(Wav2Vec2FeedForward): def __init__(self, config, act_fn=None, hidden_size=None): nn.Module.__init__(self) act_fn = act_fn if act_fn is not None else config.hidden_act hidden_size = hidden_size if hidden_size is not None else config.hidden_size self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(hidden_size, config.intermediate_size) self.intermediate_act_fn = ACT2FN[act_fn] if isinstance(act_fn, str) else act_fn self.output_dense = nn.Linear(config.intermediate_size, hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout)
Wav2Vec2BertFeedForward
python
tensorflow__tensorflow
tensorflow/python/ops/weak_tensor_math_ops_test.py
{ "start": 19909, "end": 20849 }
class ____(test_util.TensorFlowTestCase): def testAcceptsRefs(self): if context.executing_eagerly(): var = resource_variable_ops.ResourceVariable(10, name="var") else: var = variables.Variable(10) result = math_ops.scalar_mul(3, var) init = variables.global_variables_initializer() with test_util.device(use_gpu=True): self.evaluate(init) self.assertEqual(30, self.evaluate(result)) def testAcceptsIndexedSlices(self): values = constant_op.constant([2, 3, 5, 7, 0, -1], shape=[3, 2]) indices = constant_op.constant([0, 2, 5]) # Test that patched scalar_mul works with IndexedSlices. x = math_ops.scalar_mul(-3, indexed_slices.IndexedSlices(values, indices)) with test_util.device(use_gpu=True): self.assertAllEqual( self.evaluate(x.values), [[-6, -9], [-15, -21], [0, 3]] ) self.assertAllEqual(self.evaluate(x.indices), [0, 2, 5])
ScalarMulTest
python
tensorflow__tensorflow
tensorflow/python/compiler/tensorrt/trt_convert.py
{ "start": 3712, "end": 4977 }
class ____(object): FP32 = "FP32" FP16 = "FP16" INT8 = "INT8" @staticmethod def supported_precision_modes(): precisions = [ TrtPrecisionMode.FP32, TrtPrecisionMode.FP16, TrtPrecisionMode.INT8 ] return precisions + [p.lower() for p in precisions] # Use a large enough number as the default max_workspace_size for TRT engines, # so it can produce reasonable performance results with the default. # For TRT >= 8.4, the recommendation is MAX_INT. if (_pywrap_py_utils.is_tensorrt_enabled() and trt_utils.is_loaded_tensorrt_version_greater_equal(8, 4, 0)): # We must use `sys.maxsize - 512` to avoid overflow during casting. DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES = sys.maxsize - 512 else: DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES = 1 << 30 # 1,073,741,824 PROFILE_STRATEGY_RANGE = "Range" PROFILE_STRATEGY_OPTIMAL = "Optimal" PROFILE_STRATEGY_RANGE_OPTIMAL = "Range+Optimal" PROFILE_STRATEGY_IMPLICIT_BATCH_MODE_COMPATIBLE = "ImplicitBatchModeCompatible" def supported_profile_strategies(): return [ PROFILE_STRATEGY_RANGE, PROFILE_STRATEGY_OPTIMAL, PROFILE_STRATEGY_RANGE_OPTIMAL, PROFILE_STRATEGY_IMPLICIT_BATCH_MODE_COMPATIBLE ] @tf_export("experimental.tensorrt.ConversionParams", v1=[])
TrtPrecisionMode
python
huggingface__transformers
src/transformers/models/zamba2/modular_zamba2.py
{ "start": 53015, "end": 53223 }
class ____(ZambaForSequenceClassification): pass __all__ = [ "Zamba2ForCausalLM", "Zamba2ForSequenceClassification", "Zamba2Model", "Zamba2PreTrainedModel", ]
Zamba2ForSequenceClassification
python
tensorflow__tensorflow
tensorflow/python/keras/initializers/initializers_v2.py
{ "start": 25222, "end": 26671 }
class ____(VarianceScaling): """Lecun normal initializer. Also available via the shortcut function `tf.keras.initializers.lecun_normal`. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.LecunNormal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.LecunNormal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. Used to seed the random generator. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017] (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) ([pdf] (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) - Efficient Backprop, [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) """ def __init__(self, seed=None): super(LecunNormal, self).__init__( scale=1., mode='fan_in', distribution='truncated_normal', seed=seed) def get_config(self): return {'seed': self.seed}
LecunNormal
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 118713, "end": 119112 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("field", "direction") field = sgqlc.types.Field( sgqlc.types.non_null(OrganizationOrderField), graphql_name="field" ) direction = sgqlc.types.Field( sgqlc.types.non_null(OrderDirection), graphql_name="direction" )
OrganizationOrder
python
pytorch__pytorch
torch/fx/experimental/partitioner_utils.py
{ "start": 2164, "end": 2255 }
class ____(NamedTuple): name: str available_mem_bytes: int logical_id: int
Device
python
getsentry__sentry
tests/sentry/relocation/test_utils.py
{ "start": 1169, "end": 12045 }
class ____(RelocationUtilsTestCase): def test_bad_relocation_not_found(self, fake_message_builder: Mock) -> None: self.mock_message_builder(fake_message_builder) uuid = str(uuid4()) (rel, attempts_left) = start_relocation_task(uuid, OrderedTask.UPLOADING_COMPLETE, 3) assert fake_message_builder.call_count == 0 assert rel is None assert not attempts_left def test_bad_relocation_completed(self, fake_message_builder: Mock) -> None: self.mock_message_builder(fake_message_builder) self.relocation.status = Relocation.Status.FAILURE.value self.relocation.save() (rel, attempts_left) = start_relocation_task(self.uuid, OrderedTask.UPLOADING_COMPLETE, 3) assert fake_message_builder.call_count == 0 assert rel is None assert not attempts_left assert Relocation.objects.get(uuid=self.uuid).status == Relocation.Status.FAILURE.value def test_bad_unknown_task(self, fake_message_builder: Mock) -> None: self.mock_message_builder(fake_message_builder) (rel, attempts_left) = start_relocation_task(self.uuid, OrderedTask.NONE, 3) assert fake_message_builder.call_count == 1 assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed" fake_message_builder.return_value.send_async.assert_called_once_with( to=[self.owner.email, self.superuser.email] ) assert rel is None assert not attempts_left assert Relocation.objects.get(uuid=self.uuid).status == Relocation.Status.FAILURE.value def test_bad_task_out_of_order_future_step(self, fake_message_builder: Mock) -> None: self.mock_message_builder(fake_message_builder) self.relocation.latest_task = OrderedTask.PREPROCESSING_SCAN.name self.relocation.save() (rel, attempts_left) = start_relocation_task(self.uuid, OrderedTask.VALIDATING_START, 3) assert fake_message_builder.call_count == 1 assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed" fake_message_builder.return_value.send_async.assert_called_once_with( to=[self.owner.email, self.superuser.email] ) assert rel is None assert not attempts_left assert Relocation.objects.get(uuid=self.uuid).status == Relocation.Status.FAILURE.value def test_bad_task_out_of_order_past_step(self, fake_message_builder: Mock) -> None: self.mock_message_builder(fake_message_builder) self.relocation.latest_task = OrderedTask.PREPROCESSING_BASELINE_CONFIG.name self.relocation.save() (rel, attempts_left) = start_relocation_task(self.uuid, OrderedTask.UPLOADING_COMPLETE, 3) assert fake_message_builder.call_count == 0 assert rel is None assert attempts_left == 3 assert Relocation.objects.get(uuid=self.uuid).status == Relocation.Status.IN_PROGRESS.value def test_bad_task_attempts_exhausted(self, fake_message_builder: Mock) -> None: self.mock_message_builder(fake_message_builder) self.relocation.latest_task = OrderedTask.IMPORTING.name self.relocation.latest_task_attempts = 3 self.relocation.save() (rel, attempts_left) = start_relocation_task(self.uuid, OrderedTask.IMPORTING, 3) assert fake_message_builder.call_count == 1 assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed" fake_message_builder.return_value.send_async.assert_called_once_with( to=[self.owner.email, self.superuser.email] ) assert rel is None assert not attempts_left assert Relocation.objects.get(uuid=self.uuid).status == Relocation.Status.FAILURE.value def test_good_first_task(self, fake_message_builder: Mock) -> None: self.mock_message_builder(fake_message_builder) (rel, attempts_left) = start_relocation_task(self.uuid, OrderedTask.UPLOADING_START, 3) assert fake_message_builder.call_count == 0 assert attempts_left == 2 relocation: Relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.step == Relocation.Step.UPLOADING.value assert relocation.status != Relocation.Status.FAILURE.value def test_good_next_task(self, fake_message_builder: Mock) -> None: self.mock_message_builder(fake_message_builder) self.relocation.latest_task = OrderedTask.UPLOADING_COMPLETE.name self.relocation.save() (rel, attempts_left) = start_relocation_task(self.uuid, OrderedTask.PREPROCESSING_SCAN, 3) assert fake_message_builder.call_count == 0 assert attempts_left == 2 relocation: Relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.step == Relocation.Step.PREPROCESSING.value assert relocation.status != Relocation.Status.FAILURE.value def test_good_pause_at_scheduled_pause(self, fake_message_builder: Mock) -> None: self.mock_message_builder(fake_message_builder) self.relocation.latest_task = OrderedTask.UPLOADING_COMPLETE.name self.relocation.scheduled_pause_at_step = Relocation.Step.PREPROCESSING.value self.relocation.save() (rel, attempts_left) = start_relocation_task(self.uuid, OrderedTask.PREPROCESSING_SCAN, 3) assert fake_message_builder.call_count == 0 assert attempts_left == 0 relocation: Relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.step == Relocation.Step.PREPROCESSING.value assert relocation.latest_task == OrderedTask.PREPROCESSING_SCAN.name assert relocation.status == Relocation.Status.PAUSE.value assert relocation.scheduled_pause_at_step is None def test_good_already_paused(self, fake_message_builder: Mock) -> None: self.mock_message_builder(fake_message_builder) self.relocation.latest_task = OrderedTask.UPLOADING_COMPLETE.name self.relocation.status = Relocation.Status.PAUSE.value self.relocation.save() (rel, attempts_left) = start_relocation_task(self.uuid, OrderedTask.UPLOADING_COMPLETE, 3) assert fake_message_builder.call_count == 0 assert attempts_left == 0 relocation: Relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.step == Relocation.Step.UPLOADING.value assert relocation.latest_task == OrderedTask.UPLOADING_COMPLETE.name assert relocation.status == Relocation.Status.PAUSE.value assert relocation.scheduled_pause_at_step is None def test_good_cancel_at_scheduled_cancel(self, fake_message_builder: Mock) -> None: self.mock_message_builder(fake_message_builder) self.relocation.latest_task = OrderedTask.UPLOADING_COMPLETE.name self.relocation.scheduled_cancel_at_step = Relocation.Step.PREPROCESSING.value self.relocation.save() (_, attempts_left) = start_relocation_task(self.uuid, OrderedTask.PREPROCESSING_SCAN, 3) assert fake_message_builder.call_count == 0 assert attempts_left == 0 relocation: Relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.step == Relocation.Step.PREPROCESSING.value assert relocation.latest_task == OrderedTask.PREPROCESSING_SCAN.name assert relocation.status == Relocation.Status.FAILURE.value assert relocation.scheduled_cancel_at_step is None assert relocation.failure_reason == "This relocation was cancelled by an administrator." def test_good_already_cancelled(self, fake_message_builder: Mock) -> None: self.mock_message_builder(fake_message_builder) self.relocation.step = Relocation.Step.POSTPROCESSING.value self.relocation.latest_task = OrderedTask.POSTPROCESSING.name self.relocation.status = Relocation.Status.FAILURE.value self.relocation.failure_reason = "Cancelled" self.relocation.save() (_, attempts_left) = start_relocation_task(self.uuid, OrderedTask.POSTPROCESSING, 3) assert fake_message_builder.call_count == 0 assert attempts_left == 0 relocation: Relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.step == Relocation.Step.POSTPROCESSING.value assert relocation.latest_task == OrderedTask.POSTPROCESSING.name assert relocation.status == Relocation.Status.FAILURE.value assert relocation.scheduled_cancel_at_step is None assert self.relocation.failure_reason == "Cancelled" def test_good_cancel_before_pause(self, fake_message_builder: Mock) -> None: self.mock_message_builder(fake_message_builder) self.relocation.latest_task = OrderedTask.UPLOADING_COMPLETE.name self.relocation.scheduled_cancel_at_step = Relocation.Step.PREPROCESSING.value self.relocation.scheduled_pause_at_step = Relocation.Step.PREPROCESSING.value self.relocation.save() (_, attempts_left) = start_relocation_task(self.uuid, OrderedTask.PREPROCESSING_SCAN, 3) assert fake_message_builder.call_count == 0 assert attempts_left == 0 relocation: Relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.step == Relocation.Step.PREPROCESSING.value assert relocation.latest_task == OrderedTask.PREPROCESSING_SCAN.name assert relocation.status == Relocation.Status.FAILURE.value assert relocation.scheduled_pause_at_step is None assert relocation.scheduled_cancel_at_step is None assert relocation.failure_reason == "This relocation was cancelled by an administrator." def test_good_pause_before_cancel(self, fake_message_builder: Mock) -> None: self.mock_message_builder(fake_message_builder) self.relocation.latest_task = OrderedTask.UPLOADING_COMPLETE.name self.relocation.scheduled_cancel_at_step = Relocation.Step.POSTPROCESSING.value self.relocation.scheduled_pause_at_step = Relocation.Step.PREPROCESSING.value self.relocation.save() (_, attempts_left) = start_relocation_task(self.uuid, OrderedTask.PREPROCESSING_SCAN, 3) assert fake_message_builder.call_count == 0 assert attempts_left == 0 relocation: Relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.step == Relocation.Step.PREPROCESSING.value assert relocation.latest_task == OrderedTask.PREPROCESSING_SCAN.name assert relocation.status == Relocation.Status.PAUSE.value assert relocation.scheduled_pause_at_step is None assert relocation.scheduled_cancel_at_step == Relocation.Step.POSTPROCESSING.value assert relocation.failure_reason is None @patch("sentry.relocation.utils.MessageBuilder")
RelocationStartTestCase
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/util/_collections_cy.py
{ "start": 2515, "end": 9219 }
class ____(Set[_T]): """A set implementation that maintains insertion order.""" __slots__ = ("_list",) _list: List[_T] @classmethod def __class_getitem__(cls, key: Any) -> type[Self]: return cls def __init__(self, d: Optional[Iterable[_T]] = None) -> None: if d is not None: if isinstance(d, set) or isinstance(d, dict): self._list = list(d) else: self._list = unique_list(d) set.__init__(self, self._list) else: self._list = [] set.__init__(self) def copy(self) -> OrderedSet[_T]: return self._from_list(list(self._list)) @cython.final @cython.cfunc @cython.inline def _from_list(self, new_list: List[_T]) -> OrderedSet: # type: ignore[type-arg] # noqa: E501 new: OrderedSet = OrderedSet.__new__(OrderedSet) # type: ignore[type-arg] # noqa: E501 new._list = new_list set.update(new, new_list) return new def add(self, element: _T, /) -> None: if element not in self: self._list.append(element) set.add(self, element) def remove(self, element: _T, /) -> None: # set.remove will raise if element is not in self set.remove(self, element) self._list.remove(element) def pop(self) -> _T: try: value = self._list.pop() except IndexError: raise KeyError("pop from an empty set") from None set.remove(self, value) return value def insert(self, pos: cython.Py_ssize_t, element: _T, /) -> None: if element not in self: self._list.insert(pos, element) set.add(self, element) def discard(self, element: _T, /) -> None: if element in self: set.remove(self, element) self._list.remove(element) def clear(self) -> None: set.clear(self) self._list = [] def __getitem__(self, key: cython.Py_ssize_t) -> _T: return self._list[key] def __iter__(self) -> Iterator[_T]: return iter(self._list) def __add__(self, other: Iterator[_T]) -> OrderedSet[_T]: return self.union(other) def __repr__(self) -> str: return "%s(%r)" % (self.__class__.__name__, self._list) __str__ = __repr__ # @cython.ccall # cdef function cannot have star argument def update(self, *iterables: Iterable[_T]) -> None: for iterable in iterables: for element in iterable: # inline of add. mainly for python, since for cython we # could create an @cfunc @inline _add function that would # perform the same if element not in self: self._list.append(element) set.add(self, element) def __ior__( self: OrderedSet[Union[_T, _S]], iterable: AbstractSet[_S] ) -> OrderedSet[Union[_T, _S]]: self.update(iterable) return self # @cython.ccall # cdef function cannot have star argument def union(self, *other: Iterable[_S]) -> OrderedSet[Union[_T, _S]]: result: OrderedSet[Union[_T, _S]] = self._from_list(list(self._list)) result.update(*other) return result def __or__(self, other: AbstractSet[_S]) -> OrderedSet[Union[_T, _S]]: return self.union(other) # @cython.ccall # cdef function cannot have star argument def intersection(self, *other: Iterable[Hashable]) -> OrderedSet[_T]: other_set: Set[Any] = set.intersection(self, *other) return self._from_list([a for a in self._list if a in other_set]) def __and__(self, other: AbstractSet[Hashable]) -> OrderedSet[_T]: return self.intersection(other) @cython.ccall @cython.annotation_typing(False) # avoid cython crash from generic return def symmetric_difference( self, other: Iterable[_S], / ) -> OrderedSet[Union[_T, _S]]: collection: Iterable[Any] other_set: Set[_S] if isinstance(other, set): other_set = cython.cast(set, other) collection = other_set elif hasattr(other, "__len__"): collection = other other_set = set(other) else: collection = list(other) other_set = set(collection) result: OrderedSet[Union[_T, _S]] = self._from_list( [a for a in self._list if a not in other_set] ) result.update([a for a in collection if a not in self]) return result def __xor__(self, other: AbstractSet[_S]) -> OrderedSet[Union[_T, _S]]: return self.symmetric_difference(other) # @cython.ccall # cdef function cannot have star argument def difference(self, *other: Iterable[Hashable]) -> OrderedSet[_T]: other_set: Set[Any] = set.difference(self, *other) return self._from_list([a for a in self._list if a in other_set]) def __sub__(self, other: AbstractSet[Hashable]) -> OrderedSet[_T]: return self.difference(other) # @cython.ccall # cdef function cannot have star argument def intersection_update(self, *other: Iterable[Hashable]) -> None: set.intersection_update(self, *other) self._list = [a for a in self._list if a in self] def __iand__(self, other: AbstractSet[Hashable]) -> OrderedSet[_T]: self.intersection_update(other) return self @cython.ccall @cython.annotation_typing(False) # avoid cython crash from generic return def symmetric_difference_update(self, other: Iterable[_T], /) -> None: collection = other if hasattr(other, "__len__") else list(other) set.symmetric_difference_update(self, collection) self._list = [a for a in self._list if a in self] self._list += [a for a in collection if a in self] def __ixor__( self: OrderedSet[Union[_T, _S]], other: AbstractSet[_S] ) -> OrderedSet[Union[_T, _S]]: self.symmetric_difference_update(other) return self # @cython.ccall # cdef function cannot have star argument def difference_update(self, *other: Iterable[Hashable]) -> None: set.difference_update(self, *other) self._list = [a for a in self._list if a in self] def __isub__(self, other: AbstractSet[Hashable]) -> OrderedSet[_T]: self.difference_update(other) return self if cython.compiled: @cython.cfunc @cython.inline def _get_id(item: object, /) -> cython.ulonglong: return cython.cast( cython.ulonglong, cython.cast(cython.pointer(cython.void), item), ) else: _get_id = id @cython.cclass
OrderedSet
python
ansible__ansible
test/lib/ansible_test/_internal/metadata.py
{ "start": 414, "end": 4310 }
class ____: """Metadata object for passing data to delegated tests.""" def __init__(self, debugger_flags: DebuggerFlags) -> None: """Initialize metadata.""" self.changes: dict[str, tuple[tuple[int, int], ...]] = {} self.cloud_config: t.Optional[dict[str, dict[str, t.Union[int, str, bool]]]] = None self.change_description: t.Optional[ChangeDescription] = None self.ci_provider: t.Optional[str] = None self.session_id = generate_name() self.ansible_lib_root = ANSIBLE_LIB_ROOT self.ansible_test_root = ANSIBLE_TEST_ROOT self.collection_root: str | None = None self.debugger_flags = debugger_flags self.debugger_settings: DebuggerSettings | None = None self.loaded = False def populate_changes(self, diff: t.Optional[list[str]]) -> None: """Populate the changeset using the given diff.""" patches = parse_diff(diff) patches: list[FileDiff] = sorted(patches, key=lambda k: k.new.path) self.changes = dict((patch.new.path, tuple(patch.new.ranges)) for patch in patches) renames = [patch.old.path for patch in patches if patch.old.path != patch.new.path and patch.old.exists and patch.new.exists] deletes = [patch.old.path for patch in patches if not patch.new.exists] # make sure old paths which were renamed or deleted are registered in changes for path in renames + deletes: if path in self.changes: # old path was replaced with another file continue # failed tests involving deleted files should be using line 0 since there is no content remaining self.changes[path] = ((0, 0),) def to_dict(self) -> dict[str, t.Any]: """Return a dictionary representation of the metadata.""" return dict( changes=self.changes, cloud_config=self.cloud_config, ci_provider=self.ci_provider, change_description=self.change_description.to_dict() if self.change_description else None, session_id=self.session_id, ansible_lib_root=self.ansible_lib_root, ansible_test_root=self.ansible_test_root, collection_root=self.collection_root, debugger_flags=dataclasses.asdict(self.debugger_flags), debugger_settings=self.debugger_settings.as_dict() if self.debugger_settings else None, ) def to_file(self, path: str) -> None: """Write the metadata to the specified file.""" data = self.to_dict() display.info('>>> Metadata: %s\n%s' % (path, data), verbosity=3) write_json_file(path, data) @staticmethod def from_file(path: str) -> Metadata: """Return metadata loaded from the specified file.""" data = read_json_file(path) return Metadata.from_dict(data) @staticmethod def from_dict(data: dict[str, t.Any]) -> Metadata: """Return metadata loaded from the specified dictionary.""" from .debugging import DebuggerSettings metadata = Metadata( debugger_flags=DebuggerFlags(**data['debugger_flags']), ) metadata.changes = data['changes'] metadata.cloud_config = data['cloud_config'] metadata.ci_provider = data['ci_provider'] metadata.change_description = ChangeDescription.from_dict(data['change_description']) if data['change_description'] else None metadata.session_id = data['session_id'] metadata.ansible_lib_root = data['ansible_lib_root'] metadata.ansible_test_root = data['ansible_test_root'] metadata.collection_root = data['collection_root'] metadata.debugger_settings = DebuggerSettings.from_dict(data['debugger_settings']) if data['debugger_settings'] else None metadata.loaded = True return metadata
Metadata
python
numba__numba
numba/tests/npyufunc/test_parallel_low_work.py
{ "start": 255, "end": 1056 }
class ____(unittest.TestCase): _numba_parallel_test_ = False def test_low_workcount(self): # build parallel native code ufunc pv = Vectorize(vector_add, target='parallel') for ty in (int32, uint32, float32, float64): pv.add(ty(ty, ty)) para_ufunc = pv.build_ufunc() # build python ufunc np_ufunc = np.vectorize(vector_add) # test it out def test(ty): data = np.arange(1).astype(ty) # just one item result = para_ufunc(data, data) gold = np_ufunc(data, data) np.testing.assert_allclose(gold, result) test(np.double) test(np.float32) test(np.int32) test(np.uint32) if __name__ == '__main__': unittest.main()
TestParallelLowWorkCount
python
run-llama__llama_index
llama-index-integrations/callbacks/llama-index-callbacks-wandb/llama_index/callbacks/wandb/base.py
{ "start": 1446, "end": 2246 }
class ____(TypedDict): job_type: Optional[str] dir: Optional[str] config: Union[Dict, str, None] project: Optional[str] entity: Optional[str] reinit: Optional[bool] tags: Optional[Sequence] group: Optional[str] name: Optional[str] notes: Optional[str] magic: Optional[Union[dict, str, bool]] config_exclude_keys: Optional[List[str]] config_include_keys: Optional[List[str]] anonymous: Optional[str] mode: Optional[str] allow_val_change: Optional[bool] resume: Optional[Union[bool, str]] force: Optional[bool] tensorboard: Optional[bool] sync_tensorboard: Optional[bool] monitor_gym: Optional[bool] save_code: Optional[bool] id: Optional[str] settings: Union["WBSettings", Dict[str, Any], None]
WandbRunArgs
python
sqlalchemy__sqlalchemy
test/dialect/mssql/test_engine.py
{ "start": 28916, "end": 30245 }
class ____(fixtures.TestBase): def test_ignore_no_transaction_on_rollback(self): """test #8231""" class ProgrammingError(Exception): pass dialect = base.dialect(ignore_no_transaction_on_rollback=True) dialect.dbapi = mock.Mock(ProgrammingError=ProgrammingError) connection = mock.Mock( rollback=mock.Mock( side_effect=ProgrammingError("Error 111214 happened") ) ) with expect_warnings( "ProgrammingError 111214 'No corresponding transaction found.' " "has been suppressed via ignore_no_transaction_on_rollback=True" ): dialect.do_rollback(connection) def test_other_programming_error_on_rollback(self): """test #8231""" class ProgrammingError(Exception): pass dialect = base.dialect(ignore_no_transaction_on_rollback=True) dialect.dbapi = mock.Mock(ProgrammingError=ProgrammingError) connection = mock.Mock( rollback=mock.Mock( side_effect=ProgrammingError("Some other error happened") ) ) with expect_raises_message( ProgrammingError, "Some other error happened" ): dialect.do_rollback(connection)
IgnoreNotransOnRollbackTest
python
getsentry__sentry
src/sentry/api/serializers/rest_framework/groupsearchview.py
{ "start": 1928, "end": 2103 }
class ____(ViewValidator): starred = serializers.BooleanField(required=False) def validate(self, data): return super().validate(data)
GroupSearchViewPostValidator
python
getsentry__sentry
src/sentry/utils/github.py
{ "start": 429, "end": 1129 }
class ____: def __init__(self, *, client_id: str, client_secret: str) -> None: self._client_id = client_id self._client_secret = client_secret def get(self, url: str) -> dict[str, Any]: with build_session() as session: try: resp = session.get( f"https://api.github.com{url}", headers={"Accept": "application/vnd.github.valkyrie-preview+json"}, auth=(self._client_id, self._client_secret), allow_redirects=True, ) except HTTPError as e: raise ApiError.from_response(e.response) return resp.json()
_GitHubClient
python
cython__cython
Cython/Compiler/ExprNodes.py
{ "start": 522397, "end": 531938 }
class ____(NumBinopNode): # '/' or '//' operator. cdivision = None truedivision = None # == "unknown" if operator == '/' ctruedivision = False cdivision_warnings = False zerodivision_check = None def find_compile_time_binary_operator(self, op1, op2): func = compile_time_binary_operators[self.operator] if self.operator == '/' and self.truedivision is None: # => true div for floats, floor div for integers if isinstance(op1, int) and isinstance(op2, int): func = compile_time_binary_operators['//'] return func def calculate_constant_result(self): op1 = self.operand1.constant_result op2 = self.operand2.constant_result func = self.find_compile_time_binary_operator(op1, op2) self.constant_result = func(op1, op2) def compile_time_value(self, denv): operand1 = self.operand1.compile_time_value(denv) operand2 = self.operand2.compile_time_value(denv) func = self.find_compile_time_binary_operator(operand1, operand2) try: return func(operand1, operand2) except Exception as e: self.compile_time_value_error(e) def _check_truedivision(self, env): if self.cdivision or env.directives['cdivision']: self.ctruedivision = False else: self.ctruedivision = self.truedivision def infer_type(self, env): self._check_truedivision(env) return self.result_type( self.operand1.infer_type(env), self.operand2.infer_type(env), env) def infer_builtin_types_operation(self, type1, type2): result_type = super().infer_builtin_types_operation(type1, type2) if result_type is not None and self.operator == '/': if self.truedivision or self.ctruedivision: # Result of truedivision is not an integer if result_type is Builtin.int_type: return PyrexTypes.c_double_type elif result_type.is_int: return PyrexTypes.widest_numeric_type(PyrexTypes.c_double_type, result_type) elif result_type is Builtin.int_type or result_type.is_int: # Cannot infer 'int' since the result might be a 'float' in Python 3 result_type = None return result_type def analyse_operation(self, env): self._check_truedivision(env) result = NumBinopNode.analyse_operation(self, env) # The assumption here is that result is either 'self' or a coercion # node containing 'self'. Thus it is reasonable to keep manipulating # 'self' even if it's been replaced as the eventual result. if self.is_cpp_operation(): self.cdivision = True if not self.type.is_pyobject: self.zerodivision_check = ( self.cdivision is None and not env.directives['cdivision'] and (not self.operand2.has_constant_result() or self.operand2.constant_result == 0)) if self.zerodivision_check or env.directives['cdivision_warnings']: # Need to check ahead of time to warn or raise zero division error self.operand1 = self.operand1.coerce_to_simple(env) self.operand2 = self.operand2.coerce_to_simple(env) return result # should either be self, or wrap self def compute_c_result_type(self, type1, type2): if self.operator == '/' and self.ctruedivision and not type1.is_cpp_class and not type2.is_cpp_class: if not type1.is_float and not type2.is_float: widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type) widest_type = PyrexTypes.widest_numeric_type(type2, widest_type) return widest_type return NumBinopNode.compute_c_result_type(self, type1, type2) def zero_division_message(self): if self.type.is_int: return "integer division or modulo by zero" else: return "float division" def generate_evaluation_code(self, code): if not self.type.is_pyobject and not self.type.is_complex: if self.cdivision is None: self.cdivision = ( code.globalstate.directives['cdivision'] or self.type.is_float or ((self.type.is_numeric or self.type.is_enum) and not self.type.signed) ) if not self.cdivision: code.globalstate.use_utility_code( UtilityCode.load_cached("DivInt", "CMath.c").specialize(self.type)) NumBinopNode.generate_evaluation_code(self, code) self.generate_div_warning_code(code) def generate_div_warning_code(self, code): in_nogil = self.in_nogil_context if not self.type.is_pyobject: if self.zerodivision_check: if not self.infix: zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result()) else: zero_test = "%s == 0" % self.operand2.result() code.putln("if (unlikely(%s)) {" % zero_test) if in_nogil: code.put_ensure_gil() code.putln('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message()) if in_nogil: code.put_release_ensured_gil() code.putln(code.error_goto(self.pos)) code.putln("}") if self.type.is_int and self.type.signed and self.operator != '%': code.globalstate.use_utility_code(UtilityCode.load_cached("UnaryNegOverflows", "Overflow.c")) if self.operand2.type.signed == 2: # explicitly signed, no runtime check needed minus1_check = 'unlikely(%s == -1)' % self.operand2.result() else: type_of_op2 = self.operand2.type.empty_declaration_code() minus1_check = '(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % ( type_of_op2, self.operand2.result(), type_of_op2) code.putln("else if (sizeof(%s) == sizeof(long) && %s " " && unlikely(__Pyx_UNARY_NEG_WOULD_OVERFLOW(%s))) {" % ( self.type.empty_declaration_code(), minus1_check, self.operand1.result())) if in_nogil: code.put_ensure_gil() code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");') if in_nogil: code.put_release_ensured_gil() code.putln(code.error_goto(self.pos)) code.putln("}") if code.globalstate.directives['cdivision_warnings'] and self.operator != '/': code.globalstate.use_utility_code( UtilityCode.load_cached("CDivisionWarning", "CMath.c")) code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % ( self.operand1.result(), self.operand2.result())) warning_code = "__Pyx_cdivision_warning(%(FILENAME)s, %(LINENO)s)" % { 'FILENAME': Naming.filename_cname, 'LINENO': Naming.lineno_cname, } if in_nogil: result_code = 'result' code.putln("int %s;" % result_code) code.put_ensure_gil() code.putln(code.set_error_info(self.pos, used=True)) code.putln("%s = %s;" % (result_code, warning_code)) code.put_release_ensured_gil() else: result_code = warning_code code.putln(code.set_error_info(self.pos, used=True)) code.put("if (unlikely(%s)) " % result_code) code.put_goto(code.error_label) code.putln("}") def calculate_result_code(self): if self.type.is_complex or self.is_cpp_operation(): return NumBinopNode.calculate_result_code(self) op1 = self.operand1.result() op2 = self.operand2.result() if self.type.is_float and self.operator == '//': return f"floor({op1} / {op2})" elif self.truedivision or self.cdivision: if self.truedivision: if self.type != self.operand1.type: op1 = self.type.cast_code(op1) if self.type != self.operand2.type: op2 = self.type.cast_code(op2) return f"({op1} / {op2})" else: b_is_constant = self.operand2.has_constant_result() return f"__Pyx_div_{self.type.specialization_name()}({op1}, {op2}, {bool(b_is_constant):d})" _find_formatting_types = re.compile( br"%" br"(?:%|" # %% br"(?:\([^)]+\))?" # %(name) br"[-+#,0-9 ]*([a-z])" # %.2f etc. br")").findall # These format conversion types can never trigger a Unicode string conversion in Py2. _safe_bytes_formats = frozenset({ # Excludes 's' and 'r', which can generate non-bytes strings. b'd', b'i', b'o', b'u', b'x', b'X', b'e', b'E', b'f', b'F', b'g', b'G', b'c', b'b', b'a', })
DivNode
python
justquick__django-activity-stream
runtests/testapp/tests/test_django.py
{ "start": 441, "end": 2718 }
class ____(ActivityBaseTestCase): def setUp(self): super().setUp() self.user = self.User.objects.create(username='test') action.send(self.user, verb='was created') def test_accessor(self): self.assertEqual(len(Action.objects.testfoo(self.user)), 1) self.assertEqual( len(Action.objects.testfoo(self.user, datetime(1970, 1, 1))), 0 ) def test_mystream(self): self.assertEqual( len(self.user.actor_actions.testbar('was created')), 1 ) self.assertEqual( len(self.user.action_object_actions.testbar('was created')), 0 ) def test_registration(self): instance = Unregistered.objects.create(name='fubar') self.assertRaises(ImproperlyConfigured, actor_stream, instance) register(Unregistered) self.assertEqual(actor_stream(instance).count(), 0) self.assertRaises(RuntimeError, model_stream, Abstract) self.assertRaises(ImproperlyConfigured, register, Abstract) unregister(Unregistered) def test_tag_custom_activity_stream(self): stream = self.user.actor_actions.testbar('was created') output = render('''{% activity_stream 'testbar' 'was created' %} {% for action in stream %} {{ action }} {% endfor %} ''', user=self.user) self.assertAllIn([str(action) for action in stream], output) self.assertEqual( self.capture( 'testapp_custom_feed', 'was created')['totalItems'], 1 ) def test_customuser(self): self.assertEqual(self.User, MyUser) self.assertEqual(self.user.get_full_name(), 'test') @skipUnless(USE_JSONFIELD, 'Django jsonfield disabled') def test_jsonfield(self): action.send( self.user, verb='said', text='foobar', tags=['sayings'], more_data={'pk': self.user.pk} ) newaction = Action.objects.filter(verb='said')[0] self.assertEqual(newaction.data['text'], 'foobar') self.assertEqual(newaction.data['tags'], ['sayings']) self.assertEqual(newaction.data['more_data'], {'pk': self.user.pk})
TestAppTests
python
aimacode__aima-python
deep_learning4e.py
{ "start": 3130, "end": 3504 }
class ____(Layer): """1D softmax output layer in 19.3.2.""" def __init__(self, size=3): super().__init__(size) def forward(self, inputs, activation=SoftMax): assert len(self.nodes) == len(inputs) res = activation().function(inputs) for node, val in zip(self.nodes, res): node.value = val return res
OutputLayer
python
apache__airflow
task-sdk/src/airflow/sdk/exceptions.py
{ "start": 7007, "end": 7121 }
class ____(AirflowException): """Raised when a task failed during deferral for some reason."""
TaskDeferralError
python
facebook__pyre-check
client/commands/infer.py
{ "start": 1987, "end": 2142 }
class ____(json_mixins.CamlCaseAndExcludeJsonMixin): qualifier: str path: str line: int @dataclasses.dataclass(frozen=True)
RawAnnotationLocation
python
allegroai__clearml
clearml/backend_api/services/v2_13/tasks.py
{ "start": 165724, "end": 168637 }
class ____(Request): """ Delete task configuration items :param task: Task ID :type task: str :param configuration: List of configuration itemss to delete :type configuration: Sequence[str] :param force: If set to True then both new and running task configuration can be deleted. Otherwise only the new task ones. Default is False :type force: bool """ _service = "tasks" _action = "delete_configuration" _version = "2.13" _schema = { "definitions": {}, "properties": { "configuration": { "description": "List of configuration itemss to delete", "items": {"type": "string"}, "type": "array", }, "force": { "description": "If set to True then both new and running task configuration can be deleted. Otherwise only the new task ones. Default is False", "type": "boolean", }, "task": {"description": "Task ID", "type": "string"}, }, "required": ["task", "configuration"], "type": "object", } def __init__(self, task: str, configuration: List[str], force: Optional[bool] = None, **kwargs: Any) -> None: super(DeleteConfigurationRequest, self).__init__(**kwargs) self.task = task self.configuration = configuration self.force = force @schema_property("task") def task(self) -> str: return self._property_task @task.setter def task(self, value: str) -> None: if value is None: self._property_task = None return self.assert_isinstance(value, "task", six.string_types) self._property_task = value @schema_property("configuration") def configuration(self) -> List[str]: return self._property_configuration @configuration.setter def configuration(self, value: List[str]) -> None: if value is None: self._property_configuration = None return self.assert_isinstance(value, "configuration", dict) self.assert_isinstance(value.keys(), "configuration_keys", six.string_types, is_array=True) self.assert_isinstance( value.values(), "configuration_values", (ConfigurationItem, dict), is_array=True, ) value = dict(((k, ConfigurationItem(**v) if isinstance(v, dict) else v) for (k, v) in value.items())) self._property_configuration = value @schema_property("force") def force(self) -> Optional[bool]: return self._property_force @force.setter def force(self, value: Optional[bool]) -> None: if value is None: self._property_force = None return self.assert_isinstance(value, "force", (bool,)) self._property_force = value
DeleteConfigurationRequest
python
eventlet__eventlet
eventlet/queue.py
{ "start": 5302, "end": 15036 }
class ____: """ This is a variant of Queue that behaves mostly like the standard :class:`Stdlib_Queue`. It differs by not supporting the :meth:`task_done <Stdlib_Queue.task_done>` or :meth:`join <Stdlib_Queue.join>` methods, and is a little faster for not having that overhead. """ def __init__(self, maxsize=None): if maxsize is None or maxsize < 0: # None is not comparable in 3.x self.maxsize = None else: self.maxsize = maxsize self.getters = set() self.putters = set() self._event_unlock = None self._init(maxsize) # QQQ make maxsize into a property with setter that schedules unlock if necessary def _init(self, maxsize): self.queue = collections.deque() def _get(self): return self.queue.popleft() def _put(self, item): self.queue.append(item) def __repr__(self): return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._format()) def __str__(self): return '<%s %s>' % (type(self).__name__, self._format()) def _format(self): result = 'maxsize=%r' % (self.maxsize, ) if getattr(self, 'queue', None): result += ' queue=%r' % self.queue if self.getters: result += ' getters[%s]' % len(self.getters) if self.putters: result += ' putters[%s]' % len(self.putters) if self._event_unlock is not None: result += ' unlocking' return result def qsize(self): """Return the size of the queue.""" return len(self.queue) def resize(self, size): """Resizes the queue's maximum size. If the size is increased, and there are putters waiting, they may be woken up.""" # None is not comparable in 3.x if self.maxsize is not None and (size is None or size > self.maxsize): # Maybe wake some stuff up self._schedule_unlock() self.maxsize = size def putting(self): """Returns the number of greenthreads that are blocked waiting to put items into the queue.""" return len(self.putters) def getting(self): """Returns the number of greenthreads that are blocked waiting on an empty queue.""" return len(self.getters) def empty(self): """Return ``True`` if the queue is empty, ``False`` otherwise.""" return not self.qsize() def full(self): """Return ``True`` if the queue is full, ``False`` otherwise. ``Queue(None)`` is never full. """ # None is not comparable in 3.x return self.maxsize is not None and self.qsize() >= self.maxsize def put(self, item, block=True, timeout=None): """Put an item into the queue. If optional arg *block* is true and *timeout* is ``None`` (the default), block if necessary until a free slot is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :class:`Full` exception if no free slot was available within that time. Otherwise (*block* is false), put an item on the queue if a free slot is immediately available, else raise the :class:`Full` exception (*timeout* is ignored in that case). """ if self.maxsize is None or self.qsize() < self.maxsize: # there's a free slot, put an item right away self._put(item) if self.getters: self._schedule_unlock() elif not block and get_hub().greenlet is getcurrent(): # we're in the mainloop, so we cannot wait; we can switch() to other greenlets though # find a getter and deliver an item to it while self.getters: getter = self.getters.pop() if getter: self._put(item) item = self._get() getter.switch(item) return raise Full elif block: waiter = ItemWaiter(item, block) self.putters.add(waiter) timeout = Timeout(timeout, Full) try: if self.getters: self._schedule_unlock() result = waiter.wait() assert result is waiter, "Invalid switch into Queue.put: %r" % (result, ) if waiter.item is not _NONE: self._put(item) finally: timeout.cancel() self.putters.discard(waiter) elif self.getters: waiter = ItemWaiter(item, block) self.putters.add(waiter) self._schedule_unlock() result = waiter.wait() assert result is waiter, "Invalid switch into Queue.put: %r" % (result, ) if waiter.item is not _NONE: raise Full else: raise Full def put_nowait(self, item): """Put an item into the queue without blocking. Only enqueue the item if a free slot is immediately available. Otherwise raise the :class:`Full` exception. """ self.put(item, False) def get(self, block=True, timeout=None): """Remove and return an item from the queue. If optional args *block* is true and *timeout* is ``None`` (the default), block if necessary until an item is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :class:`Empty` exception if no item was available within that time. Otherwise (*block* is false), return an item if one is immediately available, else raise the :class:`Empty` exception (*timeout* is ignored in that case). """ if self.qsize(): if self.putters: self._schedule_unlock() return self._get() elif not block and get_hub().greenlet is getcurrent(): # special case to make get_nowait() runnable in the mainloop greenlet # there are no items in the queue; try to fix the situation by unlocking putters while self.putters: putter = self.putters.pop() if putter: putter.switch(putter) if self.qsize(): return self._get() raise Empty elif block: waiter = Waiter() timeout = Timeout(timeout, Empty) try: self.getters.add(waiter) if self.putters: self._schedule_unlock() try: return waiter.wait() except: self._schedule_unlock() raise finally: self.getters.discard(waiter) timeout.cancel() else: raise Empty def get_nowait(self): """Remove and return an item from the queue without blocking. Only get an item if one is immediately available. Otherwise raise the :class:`Empty` exception. """ return self.get(False) def _unlock(self): try: while True: if self.qsize() and self.getters: getter = self.getters.pop() if getter: try: item = self._get() except: getter.throw(*sys.exc_info()) else: getter.switch(item) elif self.putters and self.getters: putter = self.putters.pop() if putter: getter = self.getters.pop() if getter: item = putter.item # this makes greenlet calling put() not to call _put() again putter.item = _NONE self._put(item) item = self._get() getter.switch(item) putter.switch(putter) else: self.putters.add(putter) elif self.putters and (self.getters or self.maxsize is None or self.qsize() < self.maxsize): putter = self.putters.pop() putter.switch(putter) elif self.putters and not self.getters: full = [p for p in self.putters if not p.block] if not full: break for putter in full: self.putters.discard(putter) get_hub().schedule_call_global( 0, putter.greenlet.throw, Full) else: break finally: self._event_unlock = None # QQQ maybe it's possible to obtain this info from libevent? # i.e. whether this event is pending _OR_ currently executing # testcase: 2 greenlets: while True: q.put(q.get()) - nothing else has a change to execute # to avoid this, schedule unlock with timer(0, ...) once in a while def _schedule_unlock(self): if self._event_unlock is None: self._event_unlock = get_hub().schedule_call_global(0, self._unlock) # TODO(stephenfin): Remove conditional when we bump the minimum Python # version if sys.version_info >= (3, 9): __class_getitem__ = classmethod(types.GenericAlias)
LightQueue
python
wandb__wandb
wandb/vendor/watchdog_0_9_0/wandb_watchdog/events.py
{ "start": 7042, "end": 7529 }
class ____(FileSystemEvent): """ File system event representing directory modification on the file system. """ event_type = EVENT_TYPE_MODIFIED is_directory = True def __init__(self, src_path): super(DirModifiedEvent, self).__init__(src_path) def __repr__(self): return ("<%(class_name)s: src_path=%(src_path)r>" ) % (dict(class_name=self.__class__.__name__, src_path=self.src_path))
DirModifiedEvent
python
weaviate__weaviate-python-client
weaviate/collections/classes/config_vector_index.py
{ "start": 14183, "end": 20310 }
class ____: MultiVector = _VectorIndexMultiVector Quantizer = _VectorIndexQuantizer @staticmethod def none() -> _VectorIndexConfigSkipCreate: """Create a `_VectorIndexConfigSkipCreate` object to be used when configuring Weaviate to not index your vectors. Use this method when defining the `vector_index_config` argument in `collections.create()`. """ return _VectorIndexConfigSkipCreate( distance=None, quantizer=None, multivector=None, ) @overload @staticmethod @deprecated( 'Using the "multi_vector" argument is deprecated. Instead, specify it at the top-level in `multi_vector_index_config` when creating your `vector_config` with `MultiVectors.module()`' ) def hnsw( cleanup_interval_seconds: Optional[int] = None, distance_metric: Optional[VectorDistances] = None, dynamic_ef_factor: Optional[int] = None, dynamic_ef_max: Optional[int] = None, dynamic_ef_min: Optional[int] = None, ef: Optional[int] = None, ef_construction: Optional[int] = None, filter_strategy: Optional[VectorFilterStrategy] = None, flat_search_cutoff: Optional[int] = None, max_connections: Optional[int] = None, vector_cache_max_objects: Optional[int] = None, *, quantizer: Optional[_QuantizerConfigCreate] = None, multi_vector: _MultiVectorConfigCreate, ) -> _VectorIndexConfigHNSWCreate: ... @overload @staticmethod def hnsw( cleanup_interval_seconds: Optional[int] = None, distance_metric: Optional[VectorDistances] = None, dynamic_ef_factor: Optional[int] = None, dynamic_ef_max: Optional[int] = None, dynamic_ef_min: Optional[int] = None, ef: Optional[int] = None, ef_construction: Optional[int] = None, filter_strategy: Optional[VectorFilterStrategy] = None, flat_search_cutoff: Optional[int] = None, max_connections: Optional[int] = None, vector_cache_max_objects: Optional[int] = None, quantizer: Optional[_QuantizerConfigCreate] = None, multi_vector: Optional[_MultiVectorConfigCreate] = None, ) -> _VectorIndexConfigHNSWCreate: ... @staticmethod def hnsw( cleanup_interval_seconds: Optional[int] = None, distance_metric: Optional[VectorDistances] = None, dynamic_ef_factor: Optional[int] = None, dynamic_ef_max: Optional[int] = None, dynamic_ef_min: Optional[int] = None, ef: Optional[int] = None, ef_construction: Optional[int] = None, filter_strategy: Optional[VectorFilterStrategy] = None, flat_search_cutoff: Optional[int] = None, max_connections: Optional[int] = None, vector_cache_max_objects: Optional[int] = None, quantizer: Optional[_QuantizerConfigCreate] = None, multi_vector: Optional[_MultiVectorConfigCreate] = None, ) -> _VectorIndexConfigHNSWCreate: """Create a `_VectorIndexConfigHNSWCreate` object to be used when defining the HNSW vector index configuration of Weaviate. Use this method when defining the `vector_index_config` argument in `collections.create()`. Args: See [the docs](https://weaviate.io/developers/weaviate/configuration/indexes#how-to-configure-hnsw) for a more detailed view! """ # noqa: D417 (missing argument descriptions in the docstring) if multi_vector is not None: _Warnings.multi_vector_in_hnsw_config() return _VectorIndexConfigHNSWCreate( cleanupIntervalSeconds=cleanup_interval_seconds, distance=distance_metric, dynamicEfMin=dynamic_ef_min, dynamicEfMax=dynamic_ef_max, dynamicEfFactor=dynamic_ef_factor, efConstruction=ef_construction, ef=ef, filterStrategy=filter_strategy, flatSearchCutoff=flat_search_cutoff, maxConnections=max_connections, vectorCacheMaxObjects=vector_cache_max_objects, quantizer=quantizer, multivector=multi_vector, ) @staticmethod def flat( distance_metric: Optional[VectorDistances] = None, vector_cache_max_objects: Optional[int] = None, quantizer: Optional[_QuantizerConfigCreate] = None, ) -> _VectorIndexConfigFlatCreate: """Create a `_VectorIndexConfigFlatCreate` object to be used when defining the FLAT vector index configuration of Weaviate. Use this method when defining the `vector_index_config` argument in `collections.create()`. Args: See [the docs](https://weaviate.io/developers/weaviate/configuration/indexes#how-to-configure-hnsw) for a more detailed view! """ # noqa: D417 (missing argument descriptions in the docstring) return _VectorIndexConfigFlatCreate( distance=distance_metric, vectorCacheMaxObjects=vector_cache_max_objects, quantizer=quantizer, multivector=None, ) @staticmethod def dynamic( distance_metric: Optional[VectorDistances] = None, threshold: Optional[int] = None, hnsw: Optional[_VectorIndexConfigHNSWCreate] = None, flat: Optional[_VectorIndexConfigFlatCreate] = None, ) -> _VectorIndexConfigDynamicCreate: """Create a `_VectorIndexConfigDynamicCreate` object to be used when defining the DYNAMIC vector index configuration of Weaviate. Use this method when defining the `vector_index_config` argument in `collections.create()`. Args: See [the docs](https://weaviate.io/developers/weaviate/configuration/indexes#how-to-configure-hnsw) for a more detailed view! """ # noqa: D417 (missing argument descriptions in the docstring) return _VectorIndexConfigDynamicCreate( distance=distance_metric, threshold=threshold, hnsw=hnsw, flat=flat, quantizer=None, multivector=None, )
_VectorIndex
python
run-llama__llama_index
llama-index-integrations/llms/llama-index-llms-gemini/llama_index/llms/gemini/base.py
{ "start": 2226, "end": 19556 }
class ____(FunctionCallingLLM): """ Gemini LLM. Examples: `pip install llama-index-llms-gemini` ```python from llama_index.llms.gemini import Gemini llm = Gemini(model="models/gemini-ultra", api_key="YOUR_API_KEY") resp = llm.complete("Write a poem about a magic backpack") print(resp) ``` """ model: str = Field(default=GEMINI_MODELS[0], description="The Gemini model to use.") temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", ge=0.0, le=2.0, ) max_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, description="The number of tokens to generate.", gt=0, ) generate_kwargs: dict = Field( default_factory=dict, description="Kwargs for generation." ) _model: genai.GenerativeModel = PrivateAttr() _model_meta: genai.types.Model = PrivateAttr() _request_options: Optional[genai.types.RequestOptions] = PrivateAttr() def __init__( self, api_key: Optional[str] = None, model: str = GEMINI_MODELS[0], temperature: float = DEFAULT_TEMPERATURE, max_tokens: Optional[int] = None, generation_config: Optional[genai.types.GenerationConfigDict] = None, safety_settings: Optional[genai.types.SafetySettingDict] = None, callback_manager: Optional[CallbackManager] = None, api_base: Optional[str] = None, transport: Optional[str] = None, model_name: Optional[str] = None, default_headers: Optional[Dict[str, str]] = None, request_options: Optional[genai.types.RequestOptions] = None, **generate_kwargs: Any, ): """Creates a new Gemini model interface.""" if model_name is not None: warnings.warn( "model_name is deprecated, please use model instead", DeprecationWarning, ) model = model_name # API keys are optional. The API can be authorised via OAuth (detected # environmentally) or by the GOOGLE_API_KEY environment variable. config_params: Dict[str, Any] = { "api_key": api_key or os.getenv("GOOGLE_API_KEY"), } if api_base: config_params["client_options"] = {"api_endpoint": api_base} if transport: config_params["transport"] = transport if default_headers: default_metadata = [] for key, value in default_headers.items(): default_metadata.append((key, value)) # `default_metadata` contains (key, value) pairs that will be sent with every request. # When using `transport="rest"`, these will be sent as HTTP headers. config_params["default_metadata"] = default_metadata # transport: A string, one of: [`rest`, `grpc`, `grpc_asyncio`]. genai.configure(**config_params) base_gen_config = generation_config if generation_config else {} # Explicitly passed args take precedence over the generation_config. final_gen_config = cast( generation_types.GenerationConfigDict, {"temperature": temperature, **base_gen_config}, ) model_meta = genai.get_model(model) genai_model = genai.GenerativeModel( model_name=model, generation_config=final_gen_config, safety_settings=safety_settings, ) supported_methods = model_meta.supported_generation_methods if "generateContent" not in supported_methods: raise ValueError( f"Model {model} does not support content generation, only " f"{supported_methods}." ) if not max_tokens: max_tokens = model_meta.output_token_limit else: max_tokens = min(max_tokens, model_meta.output_token_limit) super().__init__( model=model, temperature=temperature, max_tokens=max_tokens, generate_kwargs=generate_kwargs, callback_manager=callback_manager, ) self._model_meta = model_meta self._model = genai_model self._request_options = request_options self._is_function_call_model = is_function_calling_model(model) @classmethod def class_name(cls) -> str: return "Gemini_LLM" @property def metadata(self) -> LLMMetadata: total_tokens = self._model_meta.input_token_limit + self.max_tokens return LLMMetadata( context_window=total_tokens, num_output=self.max_tokens, model_name=self.model, is_chat_model=True, # All gemini models support function calling is_function_calling_model=self._is_function_call_model, ) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: request_options = self._request_options or kwargs.pop("request_options", None) result = self._model.generate_content( prompt, request_options=request_options, **kwargs ) return completion_from_gemini_response(result) @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: request_options = self._request_options or kwargs.pop("request_options", None) result = await self._model.generate_content_async( prompt, request_options=request_options, **kwargs ) return completion_from_gemini_response(result) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: request_options = self._request_options or kwargs.pop("request_options", None) def gen(): text = "" it = self._model.generate_content( prompt, stream=True, request_options=request_options, **kwargs ) for r in it: delta = r.text or "" text += delta yield completion_from_gemini_response(r, text=text, delta=delta) return gen() @llm_completion_callback() def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: request_options = self._request_options or kwargs.pop("request_options", None) async def gen(): text = "" it = await self._model.generate_content_async( prompt, stream=True, request_options=request_options, **kwargs ) async for r in it: delta = r.text or "" text += delta yield completion_from_gemini_response(r, text=text, delta=delta) return gen() @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: request_options = self._request_options or kwargs.pop("request_options", None) merged_messages = merge_neighboring_same_role_messages(messages) *history, next_msg = map(chat_message_to_gemini, merged_messages) chat = self._model.start_chat(history=history) response = chat.send_message( next_msg, request_options=request_options, **kwargs, ) return chat_from_gemini_response(response) @llm_chat_callback() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: request_options = self._request_options or kwargs.pop("request_options", None) merged_messages = merge_neighboring_same_role_messages(messages) *history, next_msg = map(chat_message_to_gemini, merged_messages) chat = self._model.start_chat(history=history) response = await chat.send_message_async( next_msg, request_options=request_options, **kwargs ) return chat_from_gemini_response(response) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: request_options = self._request_options or kwargs.pop("request_options", None) merged_messages = merge_neighboring_same_role_messages(messages) *history, next_msg = map(chat_message_to_gemini, merged_messages) chat = self._model.start_chat(history=history) response = chat.send_message( next_msg, stream=True, request_options=request_options, **kwargs ) def gen() -> ChatResponseGen: content = "" existing_tool_calls = [] for r in response: top_candidate = r.candidates[0] content_delta = top_candidate.content.parts[0].text content += content_delta llama_resp = chat_from_gemini_response(r) existing_tool_calls.extend( llama_resp.message.additional_kwargs.get("tool_calls", []) ) llama_resp.delta = content_delta llama_resp.message.content = content llama_resp.message.additional_kwargs["tool_calls"] = existing_tool_calls yield llama_resp return gen() @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: request_options = self._request_options or kwargs.pop("request_options", None) merged_messages = merge_neighboring_same_role_messages(messages) *history, next_msg = map(chat_message_to_gemini, merged_messages) chat = self._model.start_chat(history=history) response = await chat.send_message_async( next_msg, stream=True, request_options=request_options, **kwargs ) async def gen() -> ChatResponseAsyncGen: content = "" existing_tool_calls = [] async for r in response: top_candidate = r.candidates[0] content_delta = top_candidate.content.parts[0].text content += content_delta llama_resp = chat_from_gemini_response(r) existing_tool_calls.extend( llama_resp.message.additional_kwargs.get("tool_calls", []) ) llama_resp.delta = content_delta llama_resp.message.content = content llama_resp.message.additional_kwargs["tool_calls"] = existing_tool_calls yield llama_resp return gen() def _to_function_calling_config( self, tool_required: bool, tool_choice: Optional[str] ) -> dict: if tool_choice and not isinstance(tool_choice, str): raise ValueError("Gemini only supports string tool_choices") tool_choice = tool_choice or ("any" if tool_required else "auto") if tool_choice == "auto": tool_mode = FunctionCallingMode.AUTO elif tool_choice == "none": tool_mode = FunctionCallingMode.NONE else: tool_mode = FunctionCallingMode.ANY allowed_function_names = None if tool_choice not in ["auto", "none", "any"]: allowed_function_names = [tool_choice] return { "mode": tool_mode, **( {"allowed_function_names": allowed_function_names} if allowed_function_names else {} ), } def _prepare_chat_with_tools( self, tools: Sequence["BaseTool"], user_msg: Optional[Union[str, ChatMessage]] = None, chat_history: Optional[List[ChatMessage]] = None, verbose: bool = False, allow_parallel_tool_calls: bool = False, tool_required: bool = False, tool_choice: Optional[Union[str, dict]] = None, strict: Optional[bool] = None, **kwargs: Any, ) -> Dict[str, Any]: """Predict and call the tool.""" tool_config = { "function_calling_config": self._to_function_calling_config( tool_required, tool_choice ), } tool_declarations = [] for tool in tools: descriptions = {} for param_name, param_schema in tool.metadata.get_parameters_dict()[ "properties" ].items(): param_description = param_schema.get("description", None) if param_description: descriptions[param_name] = param_description tool.metadata.fn_schema.__doc__ = tool.metadata.description tool_declarations.append( FunctionDeclaration.from_function(tool.metadata.fn_schema, descriptions) ) if isinstance(user_msg, str): user_msg = ChatMessage(role=MessageRole.USER, content=user_msg) messages = chat_history or [] if user_msg: messages.append(user_msg) return { "messages": messages, "tools": ( ToolDict(function_declarations=tool_declarations) if tool_declarations else None ), "tool_config": tool_config, **kwargs, } def get_tool_calls_from_response( self, response: ChatResponse, error_on_no_tool_call: bool = True, **kwargs: Any, ) -> List[ToolSelection]: """Predict and call the tool.""" tool_calls = response.message.additional_kwargs.get("tool_calls", []) if len(tool_calls) < 1: if error_on_no_tool_call: raise ValueError( f"Expected at least one tool call, but got {len(tool_calls)} tool calls." ) else: return [] tool_selections = [] for tool_call in tool_calls: if not isinstance(tool_call, genai.protos.FunctionCall): raise ValueError("Invalid tool_call object") tool_selections.append( ToolSelection( tool_id=str(uuid.uuid4()), tool_name=tool_call.name, tool_kwargs=dict(tool_call.args), ) ) return tool_selections @dispatcher.span def structured_predict( self, output_cls: Type[Model], prompt: PromptTemplate, llm_kwargs: Optional[Dict[str, Any]] = None, **prompt_args: Any, ) -> Model: """Structured predict.""" llm_kwargs = llm_kwargs or {} if self._is_function_call_model: llm_kwargs["tool_required"] = True # by default structured prediction uses function calling to extract structured outputs # here we force tool_choice to be required return super().structured_predict( output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args ) @dispatcher.span async def astructured_predict( self, output_cls: Type[Model], prompt: PromptTemplate, llm_kwargs: Optional[Dict[str, Any]] = None, **prompt_args: Any, ) -> Model: """Structured predict.""" llm_kwargs = llm_kwargs or {} if self._is_function_call_model: llm_kwargs["tool_required"] = True # by default structured prediction uses function calling to extract structured outputs # here we force tool_choice to be required return await super().astructured_predict( output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args ) @dispatcher.span def stream_structured_predict( self, output_cls: Type[Model], prompt: PromptTemplate, llm_kwargs: Optional[Dict[str, Any]] = None, **prompt_args: Any, ) -> Generator[Union[Model, FlexibleModel], None, None]: """Stream structured predict.""" llm_kwargs = llm_kwargs or {} if self._is_function_call_model: llm_kwargs["tool_required"] = True # by default structured prediction uses function calling to extract structured outputs # here we force tool_choice to be required return super().stream_structured_predict( output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args ) @dispatcher.span async def astream_structured_predict( self, output_cls: Type[Model], prompt: PromptTemplate, llm_kwargs: Optional[Dict[str, Any]] = None, **prompt_args: Any, ) -> AsyncGenerator[Union[Model, FlexibleModel], None]: """Stream structured predict.""" llm_kwargs = llm_kwargs or {} if self._is_function_call_model: llm_kwargs["tool_required"] = True # by default structured prediction uses function calling to extract structured outputs # here we force tool_choice to be required return await super().astream_structured_predict( output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args )
Gemini
python
huggingface__transformers
src/transformers/models/aria/modular_aria.py
{ "start": 40899, "end": 47800 }
class ____(ProcessorMixin): """ AriaProcessor is a processor for the Aria model which wraps the Aria image preprocessor and the LLama slow tokenizer. Args: image_processor (`AriaImageProcessor`, *optional*): The AriaImageProcessor to use for image preprocessing. tokenizer (`PreTrainedTokenizerBase`, *optional*): An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. size_conversion (`Dict`, *optional*): A dictionary indicating size conversions for images. """ def __init__( self, image_processor=None, tokenizer: Union[AutoTokenizer, str] = None, chat_template: Optional[str] = None, size_conversion: Optional[dict[Union[float, int], int]] = None, ): if size_conversion is None: size_conversion = {490: 128, 980: 256} self.size_conversion = {int(k): v for k, v in size_conversion.items()} self.image_token = tokenizer.image_token self.image_token_id = tokenizer.image_token_id if tokenizer is not None and tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.unk_token super().__init__(image_processor, tokenizer, chat_template=chat_template) def __call__( self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], images: Optional[ImageInput] = None, **kwargs: Unpack[AriaProcessorKwargs], ) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). Args: text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). images (`ImageInput`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **pixel_mask** -- Pixel mask to be fed to a model. Returned when `images` is not `None`. """ output_kwargs = self._merge_kwargs( AriaProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): raise TypeError("Invalid input text. Please provide a string, or a list of strings") if images is not None: image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"]) # expand the image_token according to the num_crops and tokens per image tokens_per_image = self.size_conversion[image_inputs.pixel_values.shape[2]] prompt_strings = [] num_crops = image_inputs.pop("num_crops") * tokens_per_image for sample in text: sample = sample.replace(self.tokenizer.image_token, self.tokenizer.image_token * num_crops) prompt_strings.append(sample) else: image_inputs = {} prompt_strings = text return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None) return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False) text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"], return_tensors=None) self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image"]) if return_mm_token_type_ids: array_ids = np.array(text_inputs["input_ids"]) mm_token_type_ids = np.zeros_like(text_inputs["input_ids"]) mm_token_type_ids[array_ids == self.image_token_id] = 1 text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist() return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors) def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (`list[list[int]]`, *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. """ vision_data = {} if image_sizes is not None: images_kwargs = AriaProcessorKwargs._defaults.get("images_kwargs", {}) images_kwargs.update(kwargs) max_size = images_kwargs.get("max_image_size", None) or self.image_processor.max_image_size num_image_patches = [ self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes ] num_image_tokens = [self.size_conversion[max_size] * num_patches for num_patches in num_image_patches] vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches}) return MultiModalData(**vision_data) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names # Remove `num_crops`, it is popped and used only when processing. Make a copy of list when removing # otherwise `self.image_processor.model_input_names` is also modified image_processor_input_names = [name for name in image_processor_input_names if name != "num_crops"] return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
AriaProcessor
python
realpython__materials
python-doctest/user.py
{ "start": 910, "end": 1278 }
class ____: def __init__(self, name, favorite_colors): """Initialize instances of User. Usage examples: >>> User( ... "John", {"#797EF6", "#4ADEDE", "#1AA7EC"} ... ) # doctest: +ELLIPSIS <sets.User object at 0x...> """ self.name = name self._favorite_colors = set(favorite_colors)
User_Three
python
tiangolo__fastapi
tests/test_jsonable_encoder.py
{ "start": 934, "end": 1093 }
class ____: def __iter__(self): raise NotImplementedError() @property def __dict__(self): raise NotImplementedError()
Unserializable
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 42780, "end": 43443 }
class ____(sgqlc.types.Enum): """The corresponding operation type for the action Enumeration Choices: * `ACCESS`: An existing resource was accessed * `AUTHENTICATION`: A resource performed an authentication event * `CREATE`: A new resource was created * `MODIFY`: An existing resource was modified * `REMOVE`: An existing resource was removed * `RESTORE`: An existing resource was restored * `TRANSFER`: An existing resource was transferred between multiple resources """ __schema__ = github_schema __choices__ = ("ACCESS", "AUTHENTICATION", "CREATE", "MODIFY", "REMOVE", "RESTORE", "TRANSFER")
OperationType
python
openai__openai-python
src/openai/types/evals/runs/output_item_list_response.py
{ "start": 1633, "end": 1842 }
class ____(BaseModel): content: Optional[str] = None """The content of the message.""" role: Optional[str] = None """The role of the message (e.g. "system", "assistant", "user")."""
SampleOutput
python
falconry__falcon
falcon/inspect.py
{ "start": 15318, "end": 16294 }
class ____(_Traversable): """Describes the middleware of the app. Args: middlewareTree (MiddlewareTreeInfo): The middleware tree of the app. middlewareClasses (list[MiddlewareClassInfo]): The middleware classes of the app. independent (bool): Whether or not the middleware components are executed independently. """ __visit_name__ = 'middleware' independent_text: str """Text created from the `independent` arg.""" def __init__( self, middleware_tree: MiddlewareTreeInfo, middleware_classes: list[MiddlewareClassInfo], independent: bool, ): self.middleware_tree = middleware_tree self.middleware_classes = middleware_classes self.independent = independent if independent: self.independent_text = 'Middleware are independent' else: self.independent_text = 'Middleware are dependent'
MiddlewareInfo
python
ipython__ipython
IPython/core/prefilter.py
{ "start": 19761, "end": 21347 }
class ____(Configurable): handler_name = Unicode("normal") esc_strings: List = List([]) shell = Instance( "IPython.core.interactiveshell.InteractiveShellABC", allow_none=True ) prefilter_manager = Instance( "IPython.core.prefilter.PrefilterManager", allow_none=True ) def __init__(self, shell=None, prefilter_manager=None, **kwargs): super(PrefilterHandler, self).__init__( shell=shell, prefilter_manager=prefilter_manager, **kwargs ) self.prefilter_manager.register_handler( self.handler_name, self, self.esc_strings ) def handle(self, line_info): # print("normal: ", line_info) """Handle normal input lines. Use as a template for handlers.""" # With autoindent on, we need some way to exit the input loop, and I # don't want to force the user to have to backspace all the way to # clear the line. The rule will be in this case, that either two # lines of pure whitespace in a row, or a line of pure whitespace but # of a size different to the indent level, will exit the input loop. line = line_info.line continue_prompt = line_info.continue_prompt if (continue_prompt and self.shell.autoindent and line.isspace() and 0 < abs(len(line) - self.shell.indent_current_nsp) <= 2): line = '' return line def __str__(self): return "<%s(name=%s)>" % (self.__class__.__name__, self.handler_name)
PrefilterHandler
python
pytorch__pytorch
benchmarks/tensorexpr/reduction.py
{ "start": 2587, "end": 2849 }
class ____(ReduceBench): def __init__(self, mode, device, dtype, M, N, K, skip_input_transform): super().__init__(mode, device, dtype, "mid", M, N, K, skip_input_transform) @staticmethod def module(): return "reduce_mid"
ReduceMidBench
python
facebook__pyre-check
tools/generate_taint_models/tests/get_dynamic_graphql_sources_test.py
{ "start": 1063, "end": 1894 }
class ____: id: int resolver1: bool resolver2: bool resolver3: bool resolver4: bool lambda_resolver: bool queryType = GraphQLObjectType( name="queryType", description="GraphQLObject directly created at top level", fields={ "no_resolver": GraphQLField(GraphQLNonNull(GraphQLID)), "resolver1": GraphQLField(GraphQLBoolean, resolve=function1), "resolver2": GraphQLField(GraphQLBoolean, resolve=function2), "resolver3": GraphQLField(GraphQLBoolean, resolve=TestClass.method1), "resolver4": GraphQLField(GraphQLBoolean, resolve=TestClass.method2), "lambda_resolver": GraphQLField(GraphQLBoolean, resolve=lambda x: x), "res": GraphQLField(GraphQLBoolean, resolve=excluded_function), }, ) SCHEMA = GraphQLSchema(query=queryType)
DirectObject