language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
tensorflow__tensorflow
tensorflow/compiler/tests/scatter_nd_op_test.py
{ "start": 8499, "end": 9461 }
class ____(xla_test.XLATestCase): def _runScatter(self, op): indices_np = np.array([[4], [3], [1], [7]], dtype=np.int32) updates_np = np.array(9, dtype=np.float32) with self.session() as sess, self.test_scope(): indices = array_ops.placeholder(indices_np.dtype, shape=indices_np.shape) updates = array_ops.placeholder(updates_np.dtype, shape=updates_np.shape) t = array_ops.ones([8], dtype=np.float32) out = op(t, indices, updates) return sess.run(out, feed_dict={indices: indices_np, updates: updates_np}) def testUpdate(self): self.assertAllEqual( self._runScatter(array_ops.tensor_scatter_update), np.array([1, 9, 1, 9, 9, 1, 1, 9], dtype=np.float32)) def testAdd(self): self.assertAllEqual( self._runScatter(array_ops.tensor_scatter_add), np.array([1, 10, 1, 10, 10, 1, 1, 10], dtype=np.float32)) if __name__ == "__main__": test.main()
ScatterNdTensorScalarUpdateTest
python
bokeh__bokeh
src/bokeh/core/property/visual.py
{ "start": 3762, "end": 4264 }
class ____(String): def validate(self, value: Any, detail: bool = True) -> None: super().validate(value, detail) if isinstance(value, str): if len(value) == 0: msg = "" if not detail else "empty string is not a valid font size value" raise ValueError(msg) elif not CSS_LENGTH_RE.match(value): msg = "" if not detail else f"{value!r} is not a valid font size value" raise ValueError(msg)
FontSize
python
scipy__scipy
scipy/signal/tests/test_signaltools.py
{ "start": 143320, "end": 155964 }
class ____: """Unit tests for function `._signaltools.envelope()`. """ @staticmethod def assert_close(actual, desired, msg, xp): a_r_tol = ({'atol': 1e-12, 'rtol': 1e-12} if xp_default_dtype(xp) == xp.float64 else {'atol': 1e-5, 'rtol': 1e-5} ) """Little helper to compare to arrays with proper tolerances""" xp_assert_close(actual, desired, **a_r_tol, err_msg=msg) def test_envelope_invalid_parameters(self, xp): """For `envelope()` Raise all exceptions that are used to verify function parameters. """ with pytest.raises(ValueError, match=r"Invalid parameter axis=2 for z.shape=.*"): envelope(np.ones(3), axis=2) with pytest.raises(ValueError, match=r"z.shape\[axis\] not > 0 for z.shape=.*"): envelope(xp.ones((3, 0)), axis=1) for bp_in in [(0, 1, 2), (0, 2.), (None, 2.)]: ts = ', '.join(map(str, bp_in)) with pytest.raises(ValueError, match=rf"bp_in=\({ts}\) isn't a 2-tuple of.*"): # noinspection PyTypeChecker envelope(xp.ones(4), bp_in=bp_in) with pytest.raises(ValueError, match="n_out=10.0 is not a positive integer or.*"): # noinspection PyTypeChecker envelope(xp.ones(4), n_out=10.) for bp_in in [(-1, 3), (1, 1), (0, 10)]: with pytest.raises(ValueError, match=r"`-n//2 <= bp_in\[0\] < bp_in\[1\] <=.*"): envelope(xp.ones(4), bp_in=bp_in) with pytest.raises(ValueError, match="residual='undefined' not in .*"): # noinspection PyTypeChecker envelope(xp.ones(4), residual='undefined') @skip_xp_backends("jax.numpy", reason="XXX: immutable arrays") def test_envelope_verify_parameters(self, xp): """Ensure that the various parametrizations produce compatible results. """ dt_r = xp_default_dtype(xp) dt_c = xp.complex64 if dt_r == xp.float32 else xp.complex128 Z = xp.asarray([4.0, 2, 2, 3, 0], dtype=dt_r) Zr_a = xp.asarray([4.0, 0, 0, 6, 0, 0, 0, 0], dtype=dt_r) z = sp_fft.irfft(Z) n = z.shape[0] # the reference envelope: ze2_0, zr_0 = xp.unstack(envelope(z, (1, 3), residual='all', squared=True)) self.assert_close(sp_fft.rfft(ze2_0), xp.asarray([4, 2, 0, 0, 0], dtype=dt_c), msg="Envelope calculation error", xp=xp) self.assert_close(sp_fft.rfft(zr_0), xp.asarray([4, 0, 0, 3, 0], dtype=dt_c), msg="Residual calculation error", xp=xp) ze_1, zr_1 = xp.unstack(envelope(z, (1, 3), residual='all', squared=False)) self.assert_close(ze_1**2, ze2_0, msg="Unsquared versus Squared envelope calculation error", xp=xp) self.assert_close(zr_1, zr_0, msg="Unsquared versus Squared residual calculation error", xp=xp) ze2_2, zr_2 = xp.unstack( envelope(z, (1, 3), residual='all', squared=True, n_out=3*n) ) self.assert_close(ze2_2[::3], ze2_0, msg="3x up-sampled envelope calculation error", xp=xp) self.assert_close(zr_2[::3], zr_0, msg="3x up-sampled residual calculation error", xp=xp) ze2_3, zr_3 = xp.unstack(envelope(z, (1, 3), residual='lowpass', squared=True)) self.assert_close(ze2_3, ze2_0, msg="`residual='lowpass'` envelope calculation error", xp=xp) self.assert_close(sp_fft.rfft(zr_3), xp.asarray([4, 0, 0, 0, 0], dtype=dt_c), msg="`residual='lowpass'` residual calculation error", xp=xp) ze2_4 = envelope(z, (1, 3), residual=None, squared=True) self.assert_close(ze2_4, ze2_0, msg="`residual=None` envelope calculation error", xp=xp) # compare complex analytic signal to real version Z_a = xp.asarray(Z, copy=True) Z_a[1:] *= 2 z_a = sp_fft.ifft(Z_a, n=n) # analytic signal of Z self.assert_close(xp.real(z_a), z, msg="Reference analytic signal error", xp=xp) ze2_a, zr_a = xp.unstack(envelope(z_a, (1, 3), residual='all', squared=True)) self.assert_close(ze2_a, xp.astype(ze2_0, dt_c), # dtypes must match msg="Complex envelope calculation error", xp=xp) self.assert_close(sp_fft.fft(zr_a), xp.asarray(Zr_a, dtype=dt_c), msg="Complex residual calculation error", xp=xp) @skip_xp_backends("jax.numpy", reason="XXX: immutable arrays") @pytest.mark.parametrize( " Z, bp_in, Ze2_desired, Zr_desired", [([1, 0, 2, 2, 0], (1, None), [4, 2, 0, 0, 0], [1, 0, 0, 0, 0]), ([4, 0, 2, 0, 0], (0, None), [4, 0, 2, 0, 0], [0, 0, 0, 0, 0]), ([4, 0, 0, 2, 0], (None, None), [4, 0, 0, 2, 0], [0, 0, 0, 0, 0]), ([0, 0, 2, 2, 0], (1, 3), [2, 0, 0, 0, 0], [0, 0, 0, 2, 0]), ([4, 0, 2, 2, 0], (-3, 3), [4, 0, 2, 0, 0], [0, 0, 0, 2, 0]), ([4, 0, 3, 4, 0], (None, 1), [2, 0, 0, 0, 0], [0, 0, 3, 4, 0]), ([4, 0, 3, 4, 0], (None, 0), [0, 0, 0, 0, 0], [4, 0, 3, 4, 0])]) def test_envelope_real_signals(self, Z, bp_in, Ze2_desired, Zr_desired, xp): """Test envelope calculation with real-valued test signals. The comparisons are performed in the Fourier space, since it makes evaluating the bandpass filter behavior straightforward. Note that also the squared envelope can be easily calculated by hand, if one recalls that coefficients of a complex-valued Fourier series representing the signal can be directly determined by an FFT and that the absolute square of a Fourier series is again a Fourier series. """ Z = xp.asarray(Z, dtype=xp.float64) Ze2_desired = xp.asarray(Ze2_desired, dtype=xp.float64) Zr_desired = xp.asarray(Zr_desired, dtype=xp.float64) z = sp_fft.irfft(Z) ze2, zr = xp.unstack(envelope(z, bp_in, residual='all', squared=True)) ze2_lp, zr_lp = xp.unstack(envelope(z, bp_in, residual='lowpass', squared=True)) Ze2, Zr, Ze2_lp, Zr_lp = (sp_fft.rfft(z_) for z_ in (ze2, zr, ze2_lp, zr_lp)) Ze2_desired = xp.asarray(Ze2_desired, dtype=xp.complex128) Zr_desired = xp.asarray(Zr_desired, dtype=xp.complex128) self.assert_close(Ze2, Ze2_desired, msg="Envelope calculation error (residual='all')", xp=xp) self.assert_close(Zr, Zr_desired, msg="Residual calculation error (residual='all')", xp=xp) if bp_in[1] is not None: Zr_desired[bp_in[1]:] = 0 self.assert_close(Ze2_lp, Ze2_desired, msg="Envelope calculation error (residual='lowpass')", xp=xp) self.assert_close(Zr_lp, Zr_desired, msg="Residual calculation error (residual='lowpass')", xp=xp) @skip_xp_backends("jax.numpy", reason="XXX: immutable arrays") @pytest.mark.parametrize( " Z, bp_in, Ze2_desired, Zr_desired", [([0, 5, 0, 5, 0], (None, None), [5, 0, 10, 0, 5], [0, 0, 0, 0, 0]), ([1, 5, 0, 5, 2], (-1, 2), [5, 0, 10, 0, 5], [1, 0, 0, 0, 2]), ([1, 2, 6, 0, 6, 3], (-1, 2), [0, 6, 0, 12, 0, 6], [1, 2, 0, 0, 0, 3]) ]) def test_envelope_complex_signals(self, Z, bp_in, Ze2_desired, Zr_desired, xp): """Test envelope calculation with complex-valued test signals. We only need to test for the complex envelope here, since the ``Nones``s in the bandpass filter were already tested in the previous test. """ Z = xp.asarray(Z, dtype=xp.float64) Ze2_desired = xp.asarray(Ze2_desired, dtype=xp.complex128) Zr_desired = xp.asarray(Zr_desired, dtype=xp.complex128) z = sp_fft.ifft(sp_fft.ifftshift(Z)) ze2, zr = xp.unstack(envelope(z, bp_in, residual='all', squared=True)) Ze2, Zr = (sp_fft.fftshift(sp_fft.fft(z_)) for z_ in (ze2, zr)) self.assert_close(Ze2, Ze2_desired, msg="Envelope calculation error", xp=xp) self.assert_close(Zr, Zr_desired, msg="Residual calculation error", xp=xp) @skip_xp_backends("jax.numpy", reason="XXX: immutable arrays") def test_envelope_verify_axis_parameter(self, xp): """Test for multi-channel envelope calculations. """ dt_r = xp_default_dtype(xp) dt_c = xp.complex64 if dt_r == xp.float32 else xp.complex128 z = sp_fft.irfft(xp.asarray([[1.0, 0, 2, 2, 0], [7, 0, 4, 4, 0]], dtype=dt_r)) Ze2_desired = xp.asarray([[4, 2, 0, 0, 0], [16, 8, 0, 0, 0]], dtype=dt_c) Zr_desired = xp.asarray([[1, 0, 0, 0, 0], [7, 0, 0, 0, 0]], dtype=dt_c) ze2, zr = xp.unstack(envelope(z, squared=True, axis=1)) ye2T, yrT = xp.unstack(envelope(z.T, squared=True, axis=0)) Ze2, Ye2, Zr, Yr = (sp_fft.rfft(z_) for z_ in (ze2, ye2T.T, zr, yrT.T)) self.assert_close(Ze2, Ze2_desired, msg="2d envelope calculation error", xp=xp) self.assert_close(Zr, Zr_desired, msg="2d residual calculation error", xp=xp) self.assert_close( Ye2, Ze2_desired, msg="Transposed 2d envelope calc. error", xp=xp ) self.assert_close( Yr, Zr_desired, msg="Transposed 2d residual calc. error", xp=xp ) @skip_xp_backends("jax.numpy", reason="XXX: immutable arrays") def test_envelope_verify_axis_parameter_complex(self, xp): """Test for multi-channel envelope calculations with complex values. """ dt_r = xp_default_dtype(xp) dt_c = xp.complex64 if dt_r == xp.float32 else xp.complex128 inp = xp.asarray([[1.0, 5, 0, 5, 2], [1, 10, 0, 10, 2]], dtype=dt_r) z = sp_fft.ifft(sp_fft.ifftshift(inp, axes=1)) Ze2_des = xp.asarray([[5, 0, 10, 0, 5], [20, 0, 40, 0, 20],], dtype=dt_c) Zr_des = xp.asarray([[1, 0, 0, 0, 2], [1, 0, 0, 0, 2]], dtype=dt_c) kw = dict(bp_in=(-1, 2), residual='all', squared=True) ze2, zr = xp.unstack(envelope(z, axis=1, **kw)) ye2T, yrT = xp.unstack(envelope(z.T, axis=0, **kw)) Ze2, Ye2, Zr, Yr = (sp_fft.fftshift(sp_fft.fft(z_), axes=1) for z_ in (ze2, ye2T.T, zr, yrT.T)) self.assert_close(Ze2, Ze2_des, msg="2d envelope calculation error", xp=xp) self.assert_close(Zr, Zr_des, msg="2d residual calculation error", xp=xp) self.assert_close( Ye2, Ze2_des, msg="Transposed 2d envelope calc. error", xp=xp ) self.assert_close(Yr, Zr_des, msg="Transposed 2d residual calc. error", xp=xp) @skip_xp_backends("jax.numpy", reason="XXX: immutable arrays") @pytest.mark.parametrize('X', [[4, 0, 0, 1, 2], [4, 0, 0, 2, 1, 2]]) def test_compare_envelope_hilbert(self, X, xp): """Compare output of `envelope()` and `hilbert()`. """ X = xp.asarray(X, dtype=xp.float64) x = sp_fft.irfft(X) e_hil = xp.abs(hilbert(x)) e_env = envelope(x, (None, None), residual=None) self.assert_close(e_hil, e_env, msg="Hilbert-Envelope comparison error", xp=xp) def test_nyquist(self): """Test behavior when input is a cosine at the Nyquist frequency. Resampling even length signals, requires accounting for unpaired bins at the Nyquist frequency (consults the source code of `resample`). Since `envelope` excludes the Nyquist frequency from the envelope calculation, only the residues need to be investigated. """ x4 = sp_fft.irfft([0, 0, 8]) # = [2, -2, 2, -2] x6 = signal.resample(x4, num=6) # = [2, -1, -1, 2, -1, -1] y6, y6_res = envelope(x4, n_out=6, residual='all') # real-valued case z6, z6_res = envelope(x4 + 0j, n_out=6, residual='all') # complex-valued case xp_assert_close(y6, np.zeros(6), atol=1e-12) xp_assert_close(y6_res, x6, atol=1e-12) xp_assert_close(z6, np.zeros(6, dtype=z6.dtype), atol=1e-12) xp_assert_close(z6_res, x6.astype(z6.dtype), atol=1e-12)
TestEnvelope
python
allegroai__clearml
clearml/backend_api/services/v2_13/tasks.py
{ "start": 366509, "end": 368403 }
class ____(Response): """ Response of tasks.set_requirements endpoint. :param updated: Number of tasks updated (0 or 1) :type updated: int :param fields: Updated fields names and values :type fields: dict """ _service = "tasks" _action = "set_requirements" _version = "2.13" _schema = { "definitions": {}, "properties": { "fields": { "additionalProperties": True, "description": "Updated fields names and values", "type": ["object", "null"], }, "updated": { "description": "Number of tasks updated (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, }, "type": "object", } def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None: super(SetRequirementsResponse, self).__init__(**kwargs) self.updated = updated self.fields = fields @schema_property("updated") def updated(self) -> Optional[int]: return self._property_updated @updated.setter def updated(self, value: Optional[int]) -> None: if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value @schema_property("fields") def fields(self) -> Optional[dict]: return self._property_fields @fields.setter def fields(self, value: Optional[dict]) -> None: if value is None: self._property_fields = None return self.assert_isinstance(value, "fields", (dict,)) self._property_fields = value
SetRequirementsResponse
python
joblib__joblib
joblib/externals/loky/backend/synchronize.py
{ "start": 4268, "end": 4803 }
class ____(SemLock): def __init__(self, value=1): SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX) def get_value(self): if sys.platform == "darwin": raise NotImplementedError("OSX does not implement sem_getvalue") return self._semlock._get_value() def __repr__(self): try: value = self._semlock._get_value() except Exception: value = "unknown" return f"<{self.__class__.__name__}(value={value})>" # # Bounded semaphore #
Semaphore
python
scipy__scipy
scipy/optimize/tests/test_minimize_constrained.py
{ "start": 12716, "end": 20805 }
class ____: list_of_problems = [Maratos(), Maratos(constr_hess='2-point'), Maratos(constr_hess=SR1()), Maratos(constr_jac='2-point', constr_hess=SR1()), MaratosGradInFunc(), HyperbolicIneq(), HyperbolicIneq(constr_hess='3-point'), HyperbolicIneq(constr_hess=BFGS()), HyperbolicIneq(constr_jac='3-point', constr_hess=BFGS()), Rosenbrock(), IneqRosenbrock(), EqIneqRosenbrock(), BoundedRosenbrock(), Elec(n_electrons=2), Elec(n_electrons=2, constr_hess='2-point'), Elec(n_electrons=2, constr_hess=SR1()), Elec(n_electrons=2, constr_jac='3-point', constr_hess=SR1())] @pytest.mark.parametrize('prob', list_of_problems) @pytest.mark.parametrize('grad', ('prob.grad', '3-point', False)) @pytest.mark.parametrize('hess', ("prob.hess", '3-point', lambda: SR1(), lambda: BFGS(exception_strategy='damp_update'), lambda: BFGS(exception_strategy='skip_update'))) def test_list_of_problems(self, prob, grad, hess): grad = prob.grad if grad == "prob.grad" else grad hess = hess() if callable(hess) else hess hess = prob.hess if hess == "prob.hess" else hess # Remove exceptions if (grad in {'2-point', '3-point', 'cs', False} and hess in {'2-point', '3-point', 'cs'}): pytest.skip("Numerical Hessian needs analytical gradient") if prob.grad is True and grad in {'3-point', False}: pytest.skip("prob.grad incompatible with grad in {'3-point', False}") sensitive = (isinstance(prob, BoundedRosenbrock) and grad == '3-point' and isinstance(hess, BFGS)) if sensitive: pytest.xfail("Seems sensitive to initial conditions w/ Accelerate") with warnings.catch_warnings(): warnings.filterwarnings("ignore", "delta_grad == 0.0", UserWarning) result = minimize(prob.fun, prob.x0, method='trust-constr', jac=grad, hess=hess, bounds=prob.bounds, constraints=prob.constr) if prob.x_opt is not None: assert_array_almost_equal(result.x, prob.x_opt, decimal=5) # gtol if result.status == 1: assert_array_less(result.optimality, 1e-8) # xtol if result.status == 2: assert_array_less(result.tr_radius, 1e-8) if result.method == "tr_interior_point": assert_array_less(result.barrier_parameter, 1e-8) # check for max iter message = f"Invalid termination condition: {result.status}." assert result.status not in {0, 3}, message def test_default_jac_and_hess(self): def fun(x): return (x - 1) ** 2 bounds = [(-2, 2)] res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr') assert_array_almost_equal(res.x, 1, decimal=5) def test_default_hess(self): def fun(x): return (x - 1) ** 2 bounds = [(-2, 2)] res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr', jac='2-point') assert_array_almost_equal(res.x, 1, decimal=5) def test_no_constraints(self): prob = Rosenbrock() result = minimize(prob.fun, prob.x0, method='trust-constr', jac=prob.grad, hess=prob.hess) result1 = minimize(prob.fun, prob.x0, method='L-BFGS-B', jac='2-point') result2 = minimize(prob.fun, prob.x0, method='L-BFGS-B', jac='3-point') assert_array_almost_equal(result.x, prob.x_opt, decimal=5) assert_array_almost_equal(result1.x, prob.x_opt, decimal=5) assert_array_almost_equal(result2.x, prob.x_opt, decimal=5) def test_hessp(self): prob = Maratos() def hessp(x, p): H = prob.hess(x) return H.dot(p) result = minimize(prob.fun, prob.x0, method='trust-constr', jac=prob.grad, hessp=hessp, bounds=prob.bounds, constraints=prob.constr) if prob.x_opt is not None: assert_array_almost_equal(result.x, prob.x_opt, decimal=2) # gtol if result.status == 1: assert_array_less(result.optimality, 1e-8) # xtol if result.status == 2: assert_array_less(result.tr_radius, 1e-8) if result.method == "tr_interior_point": assert_array_less(result.barrier_parameter, 1e-8) # max iter if result.status in (0, 3): raise RuntimeError("Invalid termination condition.") def test_args(self): prob = MaratosTestArgs("a", 234) result = minimize(prob.fun, prob.x0, ("a", 234), method='trust-constr', jac=prob.grad, hess=prob.hess, bounds=prob.bounds, constraints=prob.constr) if prob.x_opt is not None: assert_array_almost_equal(result.x, prob.x_opt, decimal=2) # gtol if result.status == 1: assert_array_less(result.optimality, 1e-8) # xtol if result.status == 2: assert_array_less(result.tr_radius, 1e-8) if result.method == "tr_interior_point": assert_array_less(result.barrier_parameter, 1e-8) # max iter if result.status in (0, 3): raise RuntimeError("Invalid termination condition.") def test_raise_exception(self): prob = Maratos() message = "Whenever the gradient is estimated via finite-differences" with pytest.raises(ValueError, match=message): minimize(prob.fun, prob.x0, method='trust-constr', jac='2-point', hess='2-point', constraints=prob.constr) def test_issue_9044(self): # https://github.com/scipy/scipy/issues/9044 # Test the returned `OptimizeResult` contains keys consistent with # other solvers. def callback(x, info): assert_('nit' in info) assert_('niter' in info) result = minimize(lambda x: x**2, [0], jac=lambda x: 2*x, hess=lambda x: 2, callback=callback, method='trust-constr') assert_(result.get('success')) assert_(result.get('nit', -1) == 1) # Also check existence of the 'niter' attribute, for backward # compatibility assert_(result.get('niter', -1) == 1) def test_issue_15093(self): # scipy docs define bounds as inclusive, so it shouldn't be # an issue to set x0 on the bounds even if keep_feasible is # True. Previously, trust-constr would treat bounds as # exclusive. x0 = np.array([0., 0.5]) def obj(x): x1 = x[0] x2 = x[1] return x1 ** 2 + x2 ** 2 bounds = Bounds(np.array([0., 0.]), np.array([1., 1.]), keep_feasible=True) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "delta_grad == 0.0", UserWarning) result = minimize( method='trust-constr', fun=obj, x0=x0, bounds=bounds) assert result['success']
TestTrustRegionConstr
python
huggingface__transformers
src/transformers/models/data2vec/modeling_data2vec_audio.py
{ "start": 3146, "end": 3515 }
class ____(nn.Module): def __init__(self, num_conv_pos_embeddings): super().__init__() self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def forward(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :, : -self.num_pad_remove] return hidden_states
Data2VecAudioPadLayer
python
django-haystack__django-haystack
haystack/fields.py
{ "start": 9831, "end": 10250 }
class ____(SearchField): field_type = "string" def __init__(self, **kwargs): if kwargs.get("facet_class") is None: kwargs["facet_class"] = FacetDecimalField super().__init__(**kwargs) def prepare(self, obj): return self.convert(super().prepare(obj)) def convert(self, value): if value is None: return None return str(value)
DecimalField
python
urllib3__urllib3
test/with_dummyserver/test_chunked_transfer.py
{ "start": 315, "end": 12644 }
class ____(SocketDummyServerTestCase): def start_chunked_handler(self) -> None: self.buffer = b"" def socket_handler(listener: socket.socket) -> None: sock = listener.accept()[0] while not self.buffer.endswith(b"\r\n0\r\n\r\n"): self.buffer += sock.recv(65536) sock.send( b"HTTP/1.1 200 OK\r\n" b"Content-type: text/plain\r\n" b"Content-Length: 0\r\n" b"\r\n" ) sock.close() self._start_server(socket_handler) @pytest.mark.parametrize( "chunks", [ ["foo", "bar", "", "bazzzzzzzzzzzzzzzzzzzzzz"], [b"foo", b"bar", b"", b"bazzzzzzzzzzzzzzzzzzzzzz"], ], ) def test_chunks(self, chunks: list[bytes | str]) -> None: self.start_chunked_handler() with HTTPConnectionPool(self.host, self.port, retries=False) as pool: pool.urlopen("GET", "/", body=chunks, headers=dict(DNT="1"), chunked=True) # type: ignore[arg-type] assert b"Transfer-Encoding" in self.buffer body = self.buffer.split(b"\r\n\r\n", 1)[1] lines = body.split(b"\r\n") # Empty chunks should have been skipped, as this could not be distinguished # from terminating the transmission for i, chunk in enumerate( [c.decode() if isinstance(c, bytes) else c for c in chunks if c] ): assert lines[i * 2] == hex(len(chunk))[2:].encode("utf-8") assert lines[i * 2 + 1] == chunk.encode("utf-8") def _test_body( self, data: ( bytes | str | io.BytesIO | io.StringIO | typing.Iterable[bytes] | typing.Iterable[str] | None ), expected_data: bytes | None = None, ) -> None: self.start_chunked_handler() with HTTPConnectionPool(self.host, self.port, retries=False) as pool: pool.urlopen("GET", "/", body=data, chunked=True) # type: ignore[arg-type] header, body = self.buffer.split(b"\r\n\r\n", 1) assert b"Transfer-Encoding: chunked" in header.split(b"\r\n") if data: if expected_data is not None: bdata = expected_data else: assert isinstance(data, (bytes, str)) bdata = data if isinstance(data, bytes) else data.encode("utf-8") assert b"\r\n" + bdata + b"\r\n" in body assert body.endswith(b"\r\n0\r\n\r\n") len_str = body.split(b"\r\n", 1)[0] stated_len = int(len_str, 16) assert stated_len == len(bdata) else: assert body == b"0\r\n\r\n" def test_bytestring_body(self) -> None: self._test_body(b"thisshouldbeonechunk\r\nasdf") def test_unicode_body(self) -> None: self._test_body( "thisshouldbeonechunk\r\näöüß\xff", expected_data=b"thisshouldbeonechunk\r\n\xc3\xa4\xc3\xb6\xc3\xbc\xc3\x9f\xc3\xbf", ) @pytest.mark.parametrize( "bytes_data", [ b"thisshouldbeonechunk\r\n\xc3\xa4\xc3\xb6\xc3\xbc\xc3\x9f\xc3\xbf", # utf-8 b"thisshouldbeonechunk\r\n\xe4\xf6\xfc\xdf\xff", # latin-1 ], ) def test_bytes_body_fileio(self, bytes_data: bytes) -> None: self._test_body(io.BytesIO(bytes_data), expected_data=bytes_data) def test_unicode_body_fileio(self) -> None: self._test_body( io.StringIO("thisshouldbeonechunk\r\näöüß\xff"), expected_data=b"thisshouldbeonechunk\r\n\xc3\xa4\xc3\xb6\xc3\xbc\xc3\x9f\xc3\xbf", ) @pytest.mark.parametrize( "bytes_data", [ b"thisshouldbeonechunk\r\n\xc3\xa4\xc3\xb6\xc3\xbc\xc3\x9f\xc3\xbf", # utf-8 b"thisshouldbeonechunk\r\n\xe4\xf6\xfc\xdf\xff", # latin-1 ], ) def test_bytes_body_iterable(self, bytes_data: bytes) -> None: def send_body() -> typing.Iterable[bytes]: yield bytes_data self._test_body(send_body(), expected_data=bytes_data) def test_unicode_body_iterable(self) -> None: def send_body() -> typing.Iterable[str]: yield "thisshouldbeonechunk\r\näöüß\xff" self._test_body( send_body(), expected_data=b"thisshouldbeonechunk\r\n\xc3\xa4\xc3\xb6\xc3\xbc\xc3\x9f\xc3\xbf", ) def test_empty_body(self) -> None: self._test_body(None) def test_empty_string_body(self) -> None: self._test_body("") def test_empty_iterable_body(self) -> None: self._test_body(None) def _get_header_lines(self, prefix: bytes) -> list[bytes]: header_block = self.buffer.split(b"\r\n\r\n", 1)[0].lower() header_lines = header_block.split(b"\r\n")[1:] return [x for x in header_lines if x.startswith(prefix)] def test_removes_duplicate_host_header(self) -> None: self.start_chunked_handler() chunks = [b"foo", b"bar", b"", b"bazzzzzzzzzzzzzzzzzzzzzz"] with HTTPConnectionPool(self.host, self.port, retries=False) as pool: pool.urlopen( "GET", "/", body=chunks, headers={"Host": "test.org"}, chunked=True ) host_headers = self._get_header_lines(b"host") assert len(host_headers) == 1 def test_provides_default_host_header(self) -> None: self.start_chunked_handler() chunks = [b"foo", b"bar", b"", b"bazzzzzzzzzzzzzzzzzzzzzz"] with HTTPConnectionPool(self.host, self.port, retries=False) as pool: pool.urlopen("GET", "/", body=chunks, chunked=True) host_headers = self._get_header_lines(b"host") assert len(host_headers) == 1 def test_provides_default_user_agent_header(self) -> None: self.start_chunked_handler() chunks = [b"foo", b"bar", b"", b"bazzzzzzzzzzzzzzzzzzzzzz"] with HTTPConnectionPool(self.host, self.port, retries=False) as pool: pool.urlopen("GET", "/", body=chunks, chunked=True) ua_headers = self._get_header_lines(b"user-agent") assert len(ua_headers) == 1 def test_preserve_user_agent_header(self) -> None: self.start_chunked_handler() chunks = [b"foo", b"bar", b"", b"bazzzzzzzzzzzzzzzzzzzzzz"] with HTTPConnectionPool(self.host, self.port, retries=False) as pool: pool.urlopen( "GET", "/", body=chunks, headers={"user-Agent": "test-agent"}, chunked=True, ) ua_headers = self._get_header_lines(b"user-agent") # Validate that there is only one User-Agent header. assert len(ua_headers) == 1 # Validate that the existing User-Agent header is the one that was # provided. assert ua_headers[0] == b"user-agent: test-agent" def test_remove_user_agent_header(self) -> None: self.start_chunked_handler() chunks = [b"foo", b"bar", b"", b"bazzzzzzzzzzzzzzzzzzzzzz"] with HTTPConnectionPool(self.host, self.port, retries=False) as pool: pool.urlopen( "GET", "/", body=chunks, headers={"User-Agent": SKIP_HEADER}, chunked=True, ) ua_headers = self._get_header_lines(b"user-agent") assert len(ua_headers) == 0 def test_provides_default_transfer_encoding_header(self) -> None: self.start_chunked_handler() chunks = [b"foo", b"bar", b"", b"bazzzzzzzzzzzzzzzzzzzzzz"] with HTTPConnectionPool(self.host, self.port, retries=False) as pool: pool.urlopen("GET", "/", body=chunks, chunked=True) te_headers = self._get_header_lines(b"transfer-encoding") assert len(te_headers) == 1 def test_preserve_transfer_encoding_header(self) -> None: self.start_chunked_handler() chunks = [b"foo", b"bar", b"", b"bazzzzzzzzzzzzzzzzzzzzzz"] with HTTPConnectionPool(self.host, self.port, retries=False) as pool: pool.urlopen( "GET", "/", body=chunks, headers={"transfer-Encoding": "test-transfer-encoding"}, chunked=True, ) te_headers = self._get_header_lines(b"transfer-encoding") # Validate that there is only one Transfer-Encoding header. assert len(te_headers) == 1 # Validate that the existing Transfer-Encoding header is the one that # was provided. assert te_headers[0] == b"transfer-encoding: test-transfer-encoding" def test_preserve_chunked_on_retry_after(self) -> None: self.chunked_requests = 0 self.socks: list[socket.socket] = [] def socket_handler(listener: socket.socket) -> None: for _ in range(2): sock = listener.accept()[0] self.socks.append(sock) request = consume_socket(sock) if b"Transfer-Encoding: chunked" in request.split(b"\r\n"): self.chunked_requests += 1 sock.send( b"HTTP/1.1 429 Too Many Requests\r\n" b"Content-Type: text/plain\r\n" b"Retry-After: 1\r\n" b"Content-Length: 0\r\n" b"Connection: close\r\n" b"\r\n" ) self._start_server(socket_handler) with HTTPConnectionPool(self.host, self.port) as pool: retries = Retry(total=1) pool.urlopen("GET", "/", chunked=True, retries=retries) for sock in self.socks: sock.close() assert self.chunked_requests == 2 def test_preserve_chunked_on_redirect( self, monkeypatch: pytest.MonkeyPatch ) -> None: self.chunked_requests = 0 def socket_handler(listener: socket.socket) -> None: for i in range(2): sock = listener.accept()[0] request = ConnectionMarker.consume_request(sock) if b"Transfer-Encoding: chunked" in request.split(b"\r\n"): self.chunked_requests += 1 if i == 0: sock.sendall( b"HTTP/1.1 301 Moved Permanently\r\n" b"Location: /redirect\r\n\r\n" ) else: sock.sendall(b"HTTP/1.1 200 OK\r\n\r\n") sock.close() self._start_server(socket_handler) with ConnectionMarker.mark(monkeypatch): with HTTPConnectionPool(self.host, self.port) as pool: retries = Retry(redirect=1) pool.urlopen( "GET", "/", chunked=True, preload_content=False, retries=retries ) assert self.chunked_requests == 2 def test_preserve_chunked_on_broken_connection( self, monkeypatch: pytest.MonkeyPatch ) -> None: self.chunked_requests = 0 def socket_handler(listener: socket.socket) -> None: for i in range(2): sock = listener.accept()[0] request = ConnectionMarker.consume_request(sock) if b"Transfer-Encoding: chunked" in request.split(b"\r\n"): self.chunked_requests += 1 if i == 0: # Bad HTTP version will trigger a connection close sock.sendall(b"HTTP/0.5 200 OK\r\n\r\n") else: sock.sendall(b"HTTP/1.1 200 OK\r\n\r\n") sock.close() self._start_server(socket_handler) with ConnectionMarker.mark(monkeypatch): with HTTPConnectionPool(self.host, self.port) as pool: retries = Retry(read=1) pool.urlopen( "GET", "/", chunked=True, preload_content=False, retries=retries ) assert self.chunked_requests == 2
TestChunkedTransfer
python
doocs__leetcode
solution/0400-0499/0429.N-ary Tree Level Order Traversal/Solution2.py
{ "start": 152, "end": 534 }
class ____: def levelOrder(self, root: 'Node') -> List[List[int]]: def dfs(root, i): if root is None: return if len(ans) <= i: ans.append([]) ans[i].append(root.val) for child in root.children: dfs(child, i + 1) ans = [] dfs(root, 0) return ans
Solution
python
scipy__scipy
scipy/sparse/linalg/_matfuncs.py
{ "start": 10215, "end": 29338 }
class ____: """ Help lazily evaluate a matrix exponential. The idea is to not do more work than we need for high expm precision, so we lazily compute matrix powers and store or precompute other properties of the matrix. """ def __init__(self, A, structure=None, use_exact_onenorm=False): """ Initialize the object. Parameters ---------- A : a dense or sparse square numpy matrix or ndarray The matrix to be exponentiated. structure : str, optional A string describing the structure of matrix `A`. Only `upper_triangular` is currently supported. use_exact_onenorm : bool, optional If True then only the exact one-norm of matrix powers and products will be used. Otherwise, the one-norm of powers and products may initially be estimated. """ self.A = A self._A2 = None self._A4 = None self._A6 = None self._A8 = None self._A10 = None self._d4_exact = None self._d6_exact = None self._d8_exact = None self._d10_exact = None self._d4_approx = None self._d6_approx = None self._d8_approx = None self._d10_approx = None self.ident = _ident_like(A) self.structure = structure self.use_exact_onenorm = use_exact_onenorm @property def A2(self): if self._A2 is None: self._A2 = _smart_matrix_product( self.A, self.A, structure=self.structure) return self._A2 @property def A4(self): if self._A4 is None: self._A4 = _smart_matrix_product( self.A2, self.A2, structure=self.structure) return self._A4 @property def A6(self): if self._A6 is None: self._A6 = _smart_matrix_product( self.A4, self.A2, structure=self.structure) return self._A6 @property def A8(self): if self._A8 is None: self._A8 = _smart_matrix_product( self.A6, self.A2, structure=self.structure) return self._A8 @property def A10(self): if self._A10 is None: self._A10 = _smart_matrix_product( self.A4, self.A6, structure=self.structure) return self._A10 @property def d4_tight(self): if self._d4_exact is None: self._d4_exact = _onenorm(self.A4)**(1/4.) return self._d4_exact @property def d6_tight(self): if self._d6_exact is None: self._d6_exact = _onenorm(self.A6)**(1/6.) return self._d6_exact @property def d8_tight(self): if self._d8_exact is None: self._d8_exact = _onenorm(self.A8)**(1/8.) return self._d8_exact @property def d10_tight(self): if self._d10_exact is None: self._d10_exact = _onenorm(self.A10)**(1/10.) return self._d10_exact @property def d4_loose(self): if self.use_exact_onenorm: return self.d4_tight if self._d4_exact is not None: return self._d4_exact else: if self._d4_approx is None: self._d4_approx = _onenormest_matrix_power(self.A2, 2, structure=self.structure)**(1/4.) return self._d4_approx @property def d6_loose(self): if self.use_exact_onenorm: return self.d6_tight if self._d6_exact is not None: return self._d6_exact else: if self._d6_approx is None: self._d6_approx = _onenormest_matrix_power(self.A2, 3, structure=self.structure)**(1/6.) return self._d6_approx @property def d8_loose(self): if self.use_exact_onenorm: return self.d8_tight if self._d8_exact is not None: return self._d8_exact else: if self._d8_approx is None: self._d8_approx = _onenormest_matrix_power(self.A4, 2, structure=self.structure)**(1/8.) return self._d8_approx @property def d10_loose(self): if self.use_exact_onenorm: return self.d10_tight if self._d10_exact is not None: return self._d10_exact else: if self._d10_approx is None: self._d10_approx = _onenormest_product((self.A4, self.A6), structure=self.structure)**(1/10.) return self._d10_approx def pade3(self): b = (120., 60., 12., 1.) U = _smart_matrix_product(self.A, b[3]*self.A2 + b[1]*self.ident, structure=self.structure) V = b[2]*self.A2 + b[0]*self.ident return U, V def pade5(self): b = (30240., 15120., 3360., 420., 30., 1.) U = _smart_matrix_product(self.A, b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident, structure=self.structure) V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident return U, V def pade7(self): b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.) U = _smart_matrix_product(self.A, b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident, structure=self.structure) V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident return U, V def pade9(self): b = (17643225600., 8821612800., 2075673600., 302702400., 30270240., 2162160., 110880., 3960., 90., 1.) U = _smart_matrix_product(self.A, (b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident), structure=self.structure) V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident) return U, V def pade13_scaled(self, s): b = (64764752532480000., 32382376266240000., 7771770303897600., 1187353796428800., 129060195264000., 10559470521600., 670442572800., 33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.) B = self.A * 2**-s B2 = self.A2 * 2**(-2*s) B4 = self.A4 * 2**(-4*s) B6 = self.A6 * 2**(-6*s) U2 = _smart_matrix_product(B6, b[13]*B6 + b[11]*B4 + b[9]*B2, structure=self.structure) U = _smart_matrix_product(B, (U2 + b[7]*B6 + b[5]*B4 + b[3]*B2 + b[1]*self.ident), structure=self.structure) V2 = _smart_matrix_product(B6, b[12]*B6 + b[10]*B4 + b[8]*B2, structure=self.structure) V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident return U, V def expm(A): """ Compute the matrix exponential using Pade approximation. Parameters ---------- A : (M,M) array_like or sparse array 2D Array or Matrix (sparse or dense) to be exponentiated Returns ------- expA : (M,M) ndarray Matrix exponential of `A` Notes ----- This is algorithm (6.1) which is a simplification of algorithm (5.1). .. versionadded:: 0.12.0 References ---------- .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009) "A New Scaling and Squaring Algorithm for the Matrix Exponential." SIAM Journal on Matrix Analysis and Applications. 31 (3). pp. 970-989. ISSN 1095-7162 Examples -------- >>> from scipy.sparse import csc_array >>> from scipy.sparse.linalg import expm >>> A = csc_array([[1, 0, 0], [0, 2, 0], [0, 0, 3]]) >>> A.toarray() array([[1, 0, 0], [0, 2, 0], [0, 0, 3]], dtype=int64) >>> Aexp = expm(A) >>> Aexp <Compressed Sparse Column sparse array of dtype 'float64' with 3 stored elements and shape (3, 3)> >>> Aexp.toarray() array([[ 2.71828183, 0. , 0. ], [ 0. , 7.3890561 , 0. ], [ 0. , 0. , 20.08553692]]) """ return _expm(A, use_exact_onenorm='auto') def _expm(A, use_exact_onenorm): # Core of expm, separated to allow testing exact and approximate # algorithms. # Avoid indiscriminate asarray() to allow sparse or other strange arrays. if isinstance(A, list | tuple | np.matrix): A = np.asarray(A) if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected a square matrix') # gracefully handle size-0 input, # carefully handling sparse scenario if A.shape == (0, 0): out = np.zeros([0, 0], dtype=A.dtype) if issparse(A) or is_pydata_spmatrix(A): return A.__class__(out) return out # Trivial case if A.shape == (1, 1): out = [[np.exp(A[0, 0])]] # Avoid indiscriminate casting to ndarray to # allow for sparse or other strange arrays if issparse(A) or is_pydata_spmatrix(A): return A.__class__(out) return np.array(out) # Ensure input is of float type, to avoid integer overflows etc. if ((isinstance(A, np.ndarray) or issparse(A) or is_pydata_spmatrix(A)) and not np.issubdtype(A.dtype, np.inexact)): A = A.astype(float) # Detect upper triangularity. structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None if use_exact_onenorm == "auto": # Hardcode a matrix order threshold for exact vs. estimated one-norms. use_exact_onenorm = A.shape[0] < 200 # Track functions of A to help compute the matrix exponential. h = _ExpmPadeHelper( A, structure=structure, use_exact_onenorm=use_exact_onenorm) # Try Pade order 3. eta_1 = max(h.d4_loose, h.d6_loose) if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0: U, V = h.pade3() return _solve_P_Q(U, V, structure=structure) # Try Pade order 5. eta_2 = max(h.d4_tight, h.d6_loose) if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0: U, V = h.pade5() return _solve_P_Q(U, V, structure=structure) # Try Pade orders 7 and 9. eta_3 = max(h.d6_tight, h.d8_loose) if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0: U, V = h.pade7() return _solve_P_Q(U, V, structure=structure) if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0: U, V = h.pade9() return _solve_P_Q(U, V, structure=structure) # Use Pade order 13. eta_4 = max(h.d8_loose, h.d10_loose) eta_5 = min(eta_3, eta_4) theta_13 = 4.25 # Choose smallest s>=0 such that 2**(-s) eta_5 <= theta_13 if eta_5 == 0: # Nilpotent special case s = 0 else: s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0) s = s + _ell(2**-s * h.A, 13) U, V = h.pade13_scaled(s) X = _solve_P_Q(U, V, structure=structure) if structure == UPPER_TRIANGULAR: # Invoke Code Fragment 2.1. X = _fragment_2_1(X, h.A, s) else: # X = r_13(A)^(2^s) by repeated squaring. for i in range(s): X = X.dot(X) return X def _solve_P_Q(U, V, structure=None): """ A helper function for expm_2009. Parameters ---------- U : ndarray Pade numerator. V : ndarray Pade denominator. structure : str, optional A string describing the structure of both matrices `U` and `V`. Only `upper_triangular` is currently supported. Notes ----- The `structure` argument is inspired by similar args for theano and cvxopt functions. """ P = U + V Q = -U + V if issparse(U) or is_pydata_spmatrix(U): return spsolve(Q, P) elif structure is None: return solve(Q, P) elif structure == UPPER_TRIANGULAR: return solve_triangular(Q, P) else: raise ValueError('unsupported matrix structure: ' + str(structure)) def _exp_sinch(a, x): """ Stably evaluate exp(a)*sinh(x)/x Notes ----- The strategy of falling back to a sixth order Taylor expansion was suggested by the Spallation Neutron Source docs which was found on the internet by google search. http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html The details of the cutoff point and the Horner-like evaluation was picked without reference to anything in particular. Note that sinch is not currently implemented in scipy.special, whereas the "engineer's" definition of sinc is implemented. The implementation of sinc involves a scaling factor of pi that distinguishes it from the "mathematician's" version of sinc. """ # If x is small then use sixth order Taylor expansion. # How small is small? I am using the point where the relative error # of the approximation is less than 1e-14. # If x is large then directly evaluate sinh(x) / x. if abs(x) < 0.0135: x2 = x*x return np.exp(a) * (1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.)))) else: return (np.exp(a + x) - np.exp(a - x)) / (2*x) def _eq_10_42(lam_1, lam_2, t_12): """ Equation (10.42) of Functions of Matrices: Theory and Computation. Notes ----- This is a helper function for _fragment_2_1 of expm_2009. Equation (10.42) is on page 251 in the section on Schur algorithms. In particular, section 10.4.3 explains the Schur-Parlett algorithm. expm([[lam_1, t_12], [0, lam_1]) = [[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)], [0, exp(lam_2)] """ # The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1) # apparently suffers from cancellation, according to Higham's textbook. # A nice implementation of sinch, defined as sinh(x)/x, # will apparently work around the cancellation. a = 0.5 * (lam_1 + lam_2) b = 0.5 * (lam_1 - lam_2) return t_12 * _exp_sinch(a, b) def _fragment_2_1(X, T, s): """ A helper function for expm_2009. Notes ----- The argument X is modified in-place, but this modification is not the same as the returned value of the function. This function also takes pains to do things in ways that are compatible with sparse arrays, for example by avoiding fancy indexing and by using methods of the matrices whenever possible instead of using functions of the numpy or scipy libraries themselves. """ # Form X = r_m(2^-s T) # Replace diag(X) by exp(2^-s diag(T)). n = X.shape[0] diag_T = np.ravel(T.diagonal().copy()) # Replace diag(X) by exp(2^-s diag(T)). scale = 2 ** -s exp_diag = np.exp(scale * diag_T) for k in range(n): X[k, k] = exp_diag[k] for i in range(s-1, -1, -1): X = X.dot(X) # Replace diag(X) by exp(2^-i diag(T)). scale = 2 ** -i exp_diag = np.exp(scale * diag_T) for k in range(n): X[k, k] = exp_diag[k] # Replace (first) superdiagonal of X by explicit formula # for superdiagonal of exp(2^-i T) from Eq (10.42) of # the author's 2008 textbook # Functions of Matrices: Theory and Computation. for k in range(n-1): lam_1 = scale * diag_T[k] lam_2 = scale * diag_T[k+1] t_12 = scale * T[k, k+1] value = _eq_10_42(lam_1, lam_2, t_12) X[k, k+1] = value # Return the updated X matrix. return X def _ell(A, m): """ A helper function for expm_2009. Parameters ---------- A : linear operator A linear operator whose norm of power we care about. m : int The power of the linear operator Returns ------- value : int A value related to a bound. """ if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected A to be like a square matrix') # The c_i are explained in (2.2) and (2.6) of the 2005 expm paper. # They are coefficients of terms of a generating function series expansion. c_i = {3: 100800., 5: 10059033600., 7: 4487938430976000., 9: 5914384781877411840000., 13: 113250775606021113483283660800000000. } abs_c_recip = c_i[m] # This is explained after Eq. (1.2) of the 2009 expm paper. # It is the "unit roundoff" of IEEE double precision arithmetic. u = 2**-53 # Compute the one-norm of matrix power p of abs(A). A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), 2*m + 1) # Treat zero norm as a special case. if not A_abs_onenorm: return 0 alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip) log2_alpha_div_u = np.log2(alpha/u) value = int(np.ceil(log2_alpha_div_u / (2 * m))) return max(value, 0) def matrix_power(A, power): """ Raise a square matrix to the integer power, `power`. For non-negative integers, ``A**power`` is computed using repeated matrix multiplications. Negative integers are not supported. Parameters ---------- A : (M, M) square sparse array or matrix sparse array that will be raised to power `power` power : int Exponent used to raise sparse array `A` Returns ------- A**power : (M, M) sparse array or matrix The output matrix will be the same shape as A, and will preserve the class of A, but the format of the output may be changed. Notes ----- This uses a recursive implementation of the matrix power. For computing the matrix power using a reasonably large `power`, this may be less efficient than computing the product directly, using A @ A @ ... @ A. This is contingent upon the number of nonzero entries in the matrix. .. versionadded:: 1.12.0 Examples -------- >>> from scipy import sparse >>> A = sparse.csc_array([[0,1,0],[1,0,1],[0,1,0]]) >>> A.todense() array([[0, 1, 0], [1, 0, 1], [0, 1, 0]]) >>> (A @ A).todense() array([[1, 0, 1], [0, 2, 0], [1, 0, 1]]) >>> A2 = sparse.linalg.matrix_power(A, 2) >>> A2.todense() array([[1, 0, 1], [0, 2, 0], [1, 0, 1]]) >>> A4 = sparse.linalg.matrix_power(A, 4) >>> A4.todense() array([[2, 0, 2], [0, 4, 0], [2, 0, 2]]) """ M, N = A.shape if M != N: raise TypeError('sparse matrix is not square') if isintlike(power): power = int(power) if power < 0: raise ValueError('exponent must be >= 0') if power == 0: return eye_array(M, dtype=A.dtype) if power == 1: return A.copy() tmp = matrix_power(A, power // 2) if power % 2: return A @ tmp @ tmp else: return tmp @ tmp else: raise ValueError("exponent must be an integer")
_ExpmPadeHelper
python
airbytehq__airbyte
airbyte-integrations/connectors/source-appsflyer/source_appsflyer/source.py
{ "start": 8051, "end": 8405 }
class ____: def request_params( self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None ) -> MutableMapping[str, Any]: params = super().request_params(stream_state, stream_slice, next_page_token) params["reattr"] = True return params
RetargetingMixin
python
ray-project__ray
python/ray/data/tests/preprocessors/test_scaler.py
{ "start": 10553, "end": 21696 }
class ____: """Test serialization/deserialization functionality for scaler preprocessors.""" def setup_method(self): """Set up test data.""" self.test_df = pd.DataFrame( { "feature1": [1, 2, 3, 4, 5], "feature2": [10, 20, 30, 40, 50], "feature3": [100, 200, 300, 400, 500], "other": ["a", "b", "c", "d", "e"], } ) self.test_dataset = ray.data.from_pandas(self.test_df) @pytest.mark.parametrize( "scaler_class,fit_data,expected_stats,transform_data", [ ( StandardScaler, None, # Use default self.test_df { "mean(feature1)": 3.0, "mean(feature2)": 30.0, "std(feature1)": np.sqrt(2.0), "std(feature2)": np.sqrt(200.0), }, pd.DataFrame( { "feature1": [6, 7, 8], "feature2": [60, 70, 80], "other": ["f", "g", "h"], } ), ), ( MinMaxScaler, None, # Use default self.test_df { "min(feature1)": 1, "min(feature2)": 10, "max(feature1)": 5, "max(feature2)": 50, }, pd.DataFrame( { "feature1": [6, 7, 8], "feature2": [60, 70, 80], "other": ["f", "g", "h"], } ), ), ( MaxAbsScaler, pd.DataFrame( { "feature1": [-5, -2, 0, 2, 5], "feature2": [-50, -20, 0, 20, 50], "other": ["a", "b", "c", "d", "e"], } ), { "abs_max(feature1)": 5, "abs_max(feature2)": 50, }, pd.DataFrame( { "feature1": [-6, 0, 6], "feature2": [-60, 0, 60], "other": ["f", "g", "h"], } ), ), ( RobustScaler, None, # Use default self.test_df { "low_quantile(feature1)": 2.0, "median(feature1)": 3.0, "high_quantile(feature1)": 4.0, "low_quantile(feature2)": 20.0, "median(feature2)": 30.0, "high_quantile(feature2)": 40.0, }, pd.DataFrame( { "feature1": [6, 7, 8], "feature2": [60, 70, 80], "other": ["f", "g", "h"], } ), ), ], ids=["StandardScaler", "MinMaxScaler", "MaxAbsScaler", "RobustScaler"], ) def test_scaler_serialization( self, scaler_class, fit_data, expected_stats, transform_data ): """Test scaler serialization for all scaler types.""" # Use custom fit data if provided, otherwise use default test dataset if fit_data is not None: fit_dataset = ray.data.from_pandas(fit_data) else: fit_dataset = self.test_dataset # Create and fit scaler scaler = scaler_class(columns=["feature1", "feature2"]) fitted_scaler = scaler.fit(fit_dataset) # Verify fitted stats match expected values assert fitted_scaler.stats_ == expected_stats, ( f"Stats mismatch for {scaler_class.__name__}:\n" f"Expected: {expected_stats}\n" f"Got: {fitted_scaler.stats_}" ) # Test CloudPickle serialization serialized = fitted_scaler.serialize() assert isinstance(serialized, bytes) assert serialized.startswith(SerializablePreprocessorBase.MAGIC_CLOUDPICKLE) # Test deserialization deserialized = SerializablePreprocessorBase.deserialize(serialized) assert deserialized.__class__.__name__ == scaler_class.__name__ assert deserialized.columns == ["feature1", "feature2"] assert deserialized._fitted # Verify stats are preserved after deserialization assert deserialized.stats_ == expected_stats, ( f"Deserialized stats mismatch for {scaler_class.__name__}:\n" f"Expected: {expected_stats}\n" f"Got: {deserialized.stats_}" ) # Verify each stat key exists and has correct value for stat_key, stat_value in expected_stats.items(): assert stat_key in deserialized.stats_ if isinstance(stat_value, float): assert np.isclose(deserialized.stats_[stat_key], stat_value) else: assert deserialized.stats_[stat_key] == stat_value # Test functional equivalence original_result = fitted_scaler.transform_batch(transform_data.copy()) deserialized_result = deserialized.transform_batch(transform_data.copy()) pd.testing.assert_frame_equal(original_result, deserialized_result) def test_scaler_with_output_columns_serialization(self): """Test scaler serialization with custom output columns.""" # Test with StandardScaler and output columns scaler = StandardScaler( columns=["feature1", "feature2"], output_columns=["scaled_feature1", "scaled_feature2"], ) fitted_scaler = scaler.fit(self.test_dataset) # Serialize and deserialize serialized = fitted_scaler.serialize() deserialized = SerializablePreprocessorBase.deserialize(serialized) # Verify output columns are preserved assert deserialized.output_columns == ["scaled_feature1", "scaled_feature2"] # Test functional equivalence test_df = pd.DataFrame( {"feature1": [6, 7, 8], "feature2": [60, 70, 80], "other": ["f", "g", "h"]} ) original_result = fitted_scaler.transform_batch(test_df.copy()) deserialized_result = deserialized.transform_batch(test_df.copy()) pd.testing.assert_frame_equal(original_result, deserialized_result) @pytest.mark.parametrize( "scaler_class", [StandardScaler, MinMaxScaler, MaxAbsScaler, RobustScaler], ids=["StandardScaler", "MinMaxScaler", "MaxAbsScaler", "RobustScaler"], ) def test_unfitted_scaler_serialization(self, scaler_class): """Test serialization of unfitted scalers.""" # Test unfitted scaler scaler = scaler_class(columns=["feature1", "feature2"]) # Serialize unfitted scaler serialized = scaler.serialize() deserialized = SerializablePreprocessorBase.deserialize(serialized) # Verify it's still unfitted assert not deserialized._fitted assert deserialized.columns == ["feature1", "feature2"] assert deserialized.__class__.__name__ == scaler_class.__name__ # Should raise error when trying to transform test_df = pd.DataFrame({"feature1": [1, 2, 3], "feature2": [10, 20, 30]}) with pytest.raises(PreprocessorNotFittedException): deserialized.transform_batch(test_df) @pytest.mark.parametrize( "scaler_class,expected_stats", [ ( StandardScaler, { "mean(feature1)": 3.0, "std(feature1)": np.sqrt(2.0), }, ), ( MinMaxScaler, { "min(feature1)": 1, "max(feature1)": 5, }, ), ( MaxAbsScaler, { "abs_max(feature1)": 5, }, ), ( RobustScaler, { "low_quantile(feature1)": 2.0, "median(feature1)": 3.0, "high_quantile(feature1)": 4.0, }, ), ], ids=["StandardScaler", "MinMaxScaler", "MaxAbsScaler", "RobustScaler"], ) def test_scaler_stats_preservation(self, scaler_class, expected_stats): """Test that scaler statistics are perfectly preserved during serialization.""" # Create scaler with known stats scaler = scaler_class(columns=["feature1"]) fitted_scaler = scaler.fit(self.test_dataset) # Verify fitted stats match expected values for stat_key, stat_value in expected_stats.items(): assert stat_key in fitted_scaler.stats_ if isinstance(stat_value, float): assert np.isclose(fitted_scaler.stats_[stat_key], stat_value) else: assert fitted_scaler.stats_[stat_key] == stat_value # Get original stats original_stats = fitted_scaler.stats_.copy() # Serialize and deserialize serialized = fitted_scaler.serialize() deserialized = SerializablePreprocessorBase.deserialize(serialized) # Verify stats are identical assert deserialized.stats_ == original_stats # Verify expected stat values are preserved for stat_key, stat_value in expected_stats.items(): assert stat_key in deserialized.stats_ if isinstance(stat_value, float): assert np.isclose(deserialized.stats_[stat_key], stat_value) else: assert deserialized.stats_[stat_key] == stat_value @pytest.mark.parametrize( "scaler_class", [StandardScaler, MinMaxScaler, MaxAbsScaler, RobustScaler], ids=["StandardScaler", "MinMaxScaler", "MaxAbsScaler", "RobustScaler"], ) def test_scaler_version_compatibility(self, scaler_class): """Test that scalers can be deserialized with version support.""" # Create and fit scaler scaler = scaler_class(columns=["feature1", "feature2"]) fitted_scaler = scaler.fit(self.test_dataset) # Serialize serialized = fitted_scaler.serialize() # Deserialize and verify version handling deserialized = SerializablePreprocessorBase.deserialize(serialized) assert deserialized.__class__.__name__ == scaler_class.__name__ assert deserialized._fitted # Test that it works correctly test_df = pd.DataFrame({"feature1": [6, 7, 8], "feature2": [60, 70, 80]}) result = deserialized.transform_batch(test_df) assert len(result.columns) == 2 # Should have the scaled columns assert "feature1" in result.columns assert "feature2" in result.columns if __name__ == "__main__": import sys sys.exit(pytest.main(["-sv", __file__]))
TestScalerSerialization
python
sphinx-doc__sphinx
sphinx/ext/doctest.py
{ "start": 5291, "end": 5431 }
class ____(TestDirective): option_spec: ClassVar[OptionSpec] = { 'skipif': directives.unchanged_required, }
TestsetupDirective
python
kamyu104__LeetCode-Solutions
Python/minimum-time-to-collect-all-apples-in-a-tree.py
{ "start": 3076, "end": 3837 }
class ____(object): def minTime(self, n, edges, hasApple): """ :type n: int :type edges: List[List[int]] :type hasApple: List[bool] :rtype: int """ def dfs(graph, par, node, has_subtree): result, extra = 0, int(hasApple[node]) for nei in graph[node]: if nei == par: continue count = dfs(graph, node, nei, hasApple) result += count extra |= bool(count) return result+extra graph = collections.defaultdict(list) for u, v in edges: graph[u].append(v) graph[v].append(u) return 2*max(dfs(graph, -1, 0, hasApple)-1, 0)
Solution2_Recu
python
airbytehq__airbyte
airbyte-integrations/connectors/source-microsoft-onedrive/source_microsoft_onedrive/stream_reader.py
{ "start": 2487, "end": 12079 }
class ____(AbstractFileBasedStreamReader): """ A stream reader for Microsoft OneDrive. Handles file enumeration and reading from OneDrive. """ ROOT_PATH = [".", "/"] def __init__(self): super().__init__() self._auth_client = None self._one_drive_client = None @property def config(self) -> SourceMicrosoftOneDriveSpec: return self._config @property def auth_client(self): # Lazy initialization of the auth_client if self._auth_client is None: self._auth_client = SourceMicrosoftOneDriveClient(self._config) return self._auth_client @property def one_drive_client(self): # Lazy initialization of the one_drive_client if self._one_drive_client is None: self._one_drive_client = self.auth_client.client return self._one_drive_client def get_access_token(self): # Directly fetch a new access token from the auth_client each time it's called return self.auth_client._get_access_token()["access_token"] @config.setter def config(self, value: SourceMicrosoftOneDriveSpec): """ The FileBasedSource reads and parses configuration from a file, then sets this configuration in its StreamReader. While it only uses keys from its abstract configuration, concrete StreamReader implementations may need additional keys for third-party authentication. Therefore, subclasses of AbstractFileBasedStreamReader should verify that the value in their config setter matches the expected config type for their StreamReader. """ assert isinstance(value, SourceMicrosoftOneDriveSpec) self._config = value @property @lru_cache(maxsize=None) def drives(self): """ Retrieves and caches OneDrive drives, including the user's drive based on authentication type. """ drives = self.one_drive_client.drives.get().execute_query() if self.config.credentials.auth_type == "Client": my_drive = self.one_drive_client.me.drive.get().execute_query() else: my_drive = ( self.one_drive_client.users.get_by_principal_name(self.config.credentials.user_principal_name).drive.get().execute_query() ) drives.add_child(my_drive) # filter only onedrive drives drives = list(filter(lambda drive: drive.drive_type in ["personal", "business"], drives)) return drives def _get_shared_drive_object(self, drive_id: str, object_id: str, path: str) -> List[Tuple[str, str, datetime]]: """ Retrieves a list of all nested files under the specified object. Args: drive_id: The ID of the drive containing the object. object_id: The ID of the object to start the search from. Returns: A list of tuples containing file information (name, download URL, and last modified datetime). Raises: RuntimeError: If an error occurs during the request. """ access_token = self.get_access_token() headers = {"Authorization": f"Bearer {access_token}"} base_url = f"https://graph.microsoft.com/v1.0/drives/{drive_id}" def get_files(url: str, path: str) -> List[Tuple[str, str, datetime]]: response = requests.get(url, headers=headers) if response.status_code != 200: error_info = response.json().get("error", {}).get("message", "No additional error information provided.") raise RuntimeError(f"Failed to retrieve files from URL '{url}'. HTTP status: {response.status_code}. Error: {error_info}") data = response.json() for child in data.get("value", []): new_path = path + "/" + child["name"] if child.get("file"): # Object is a file last_modified = datetime.strptime(child["lastModifiedDateTime"], "%Y-%m-%dT%H:%M:%SZ") yield (new_path, child["@microsoft.graph.downloadUrl"], last_modified) else: # Object is a folder, retrieve children child_url = f"{base_url}/items/{child['id']}/children" # Use item endpoint for nested objects yield from get_files(child_url, new_path) yield from [] # Initial request to item endpoint item_url = f"{base_url}/items/{object_id}" item_response = requests.get(item_url, headers=headers) if item_response.status_code != 200: error_info = item_response.json().get("error", {}).get("message", "No additional error information provided.") raise RuntimeError( f"Failed to retrieve the initial shared object with ID '{object_id}' from drive '{drive_id}'. " f"HTTP status: {item_response.status_code}. Error: {error_info}" ) # Check if the object is a file or a folder item_data = item_response.json() if item_data.get("file"): # Initial object is a file new_path = path + "/" + item_data["name"] last_modified = datetime.strptime(item_data["lastModifiedDateTime"], "%Y-%m-%dT%H:%M:%SZ") yield (new_path, item_data["@microsoft.graph.downloadUrl"], last_modified) else: # Initial object is a folder, start file retrieval yield from get_files(f"{item_url}/children", path) def list_directories_and_files(self, root_folder, path=None): """Enumerates folders and files starting from a root folder.""" drive_items = root_folder.children.get().execute_query() found_items = [] for item in drive_items: item_path = path + "/" + item.name if path else item.name if item.is_file: found_items.append((item_path, item.properties["@microsoft.graph.downloadUrl"], item.properties["lastModifiedDateTime"])) else: found_items.extend(self.list_directories_and_files(item, item_path)) return found_items def get_files_by_drive_name(self, drive_name, folder_path): """Yields files from the specified drive.""" path_levels = [level for level in folder_path.split("/") if level] folder_path = "/".join(path_levels) for drive in self.drives: if drive.name == drive_name: folder = drive.root if folder_path in self.ROOT_PATH else drive.root.get_by_path(folder_path).get().execute_query() yield from self.list_directories_and_files(folder) def _get_shared_files_from_all_drives(self, parsed_drive_id: str): shared_drive_items = self.one_drive_client.me.drive.shared_with_me().execute_query() for drive_item in shared_drive_items: parent_reference = drive_item.remote_item.parentReference # check if drive is already parsed if parent_reference and parent_reference["driveId"] != parsed_drive_id: yield from self._get_shared_drive_object(parent_reference["driveId"], drive_item.id, drive_item.web_url) def get_all_files(self): if self.config.search_scope in ("ACCESSIBLE_DRIVES", "ALL"): # Get files from accessible drives yield from self.get_files_by_drive_name(self.config.drive_name, self.config.folder_path) if self.config.search_scope in ("SHARED_ITEMS", "ALL"): selected_drive = list(filter(lambda drive: drive.name == self.config.drive_name, self.drives)) selected_drive_id = selected_drive[0].id if selected_drive else None if self.config.search_scope == "SHARED_ITEMS": selected_drive_id = None # Get files from shared items yield from self._get_shared_files_from_all_drives(selected_drive_id) def get_matching_files(self, globs: List[str], prefix: Optional[str], logger: logging.Logger) -> Iterable[RemoteFile]: """ Retrieve all files matching the specified glob patterns in OneDrive. """ files = self.get_all_files() try: path, download_url, last_modified = next(files) yield from self.filter_files_by_globs_and_start_date( [ MicrosoftOneDriveRemoteFile( uri=path, download_url=download_url, last_modified=last_modified, ) ], globs, ) except StopIteration as e: raise AirbyteTracedException( internal_message=str(e), message=f"Drive '{self.config.drive_name}' is empty or does not exist.", failure_type=FailureType.config_error, exception=e, ) yield from self.filter_files_by_globs_and_start_date( [ MicrosoftOneDriveRemoteFile( uri=path, download_url=download_url, last_modified=last_modified, ) for path, download_url, last_modified in files ], globs, ) def open_file(self, file: RemoteFile, mode: FileReadMode, encoding: Optional[str], logger: logging.Logger) -> IOBase: try: return smart_open.open(file.download_url, mode=mode.value, encoding=encoding) except Exception as e: logger.exception(f"Error opening file {file.uri}: {e}")
SourceMicrosoftOneDriveStreamReader
python
realpython__materials
inheritance-and-composition/choosing/hr.py
{ "start": 880, "end": 1026 }
class ____: def __init__(self): self.hours_worked = 0 def track_work(self, hours): self.hours_worked += hours
PayrollPolicy
python
pytorch__pytorch
torch/_inductor/metrics.py
{ "start": 2331, "end": 2790 }
class ____: """ The subset of metrics we want update across cache hits, e.g., the FxGraphCache. """ generated_kernel_count: int generated_cpp_vec_kernel_count: int ir_nodes_pre_fusion: int cpp_to_dtype_count: int num_bytes_accessed: int num_matches_for_scatter_upon_const_tensor: int def get_metric_fields() -> list[str]: return [field.name for field in dataclasses.fields(CachedMetricsDeltas)]
CachedMetricsDeltas
python
encode__django-rest-framework
rest_framework/exceptions.py
{ "start": 5356, "end": 5556 }
class ____(APIException): status_code = status.HTTP_403_FORBIDDEN default_detail = _('You do not have permission to perform this action.') default_code = 'permission_denied'
PermissionDenied
python
streamlit__streamlit
lib/streamlit/elements/lib/column_types.py
{ "start": 4451, "end": 4605 }
class ____(TypedDict): value: str label: NotRequired[str | None] color: NotRequired[str | Literal["auto"] | ThemeColor | None]
MultiselectOption
python
EpistasisLab__tpot
tpot/search_spaces/pipelines/union.py
{ "start": 3854, "end": 4357 }
class ____(SearchSpace): def __init__(self, search_spaces : List[SearchSpace] ) -> None: """ Takes in a list of search spaces. will produce a pipeline of Sequential length. Each step in the pipeline will correspond to the the search space provided in the same index. """ self.search_spaces = search_spaces def generate(self, rng=None): rng = np.random.default_rng(rng) return UnionPipelineIndividual(self.search_spaces, rng=rng)
UnionPipeline
python
huggingface__transformers
src/transformers/models/dpr/tokenization_dpr.py
{ "start": 1034, "end": 1438 }
class ____(BertTokenizer): r""" Construct a DPRContextEncoder tokenizer. [`DPRContextEncoderTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES
DPRContextEncoderTokenizer
python
pytorch__pytorch
test/inductor/test_distributed_patterns.py
{ "start": 4039, "end": 16968 }
class ____(TestCase): def test_intermediate_hook_with_closure(self): @dataclasses.dataclass class CustomObj: val: torch.Tensor def fn(x, obj): y = x.sin() closure_var = y + 1 y.register_hook(lambda grad: grad + obj.val + closure_var) z = y.sin() return z opt = torch.compile(fn, fullgraph=True) obj1 = CustomObj(torch.tensor(88)) obj2 = CustomObj(torch.tensor(99)) x0 = torch.ones(4, requires_grad=True) x1 = torch.ones(4, requires_grad=True) x2 = torch.ones(4, requires_grad=True) x3 = torch.ones(4, requires_grad=True) fn(x0, obj1).sum().backward() fn(x1, obj2).sum().backward() with compiled_autograd._enable( functools.partial(torch.compile, fullgraph=True) ): opt(x2, obj1).sum().backward() opt(x3, obj2).sum().backward() self.assertEqual(x0.grad, x2.grad) self.assertEqual(x1.grad, x3.grad) def test_intermediate_hook_with_nested_closure(self): @dataclasses.dataclass class CustomObj: val: torch.Tensor def fn(x, obj): def run(): y = x.sin() closure_var = y + 1 y.register_hook(lambda grad: grad + obj.val + closure_var) z = y.sin() return z return run() opt = torch.compile(fn, fullgraph=True) obj1 = CustomObj(torch.tensor(88)) obj2 = CustomObj(torch.tensor(99)) x0 = torch.ones(4, requires_grad=True) x1 = torch.ones(4, requires_grad=True) x2 = torch.ones(4, requires_grad=True) x3 = torch.ones(4, requires_grad=True) fn(x0, obj1).sum().backward() fn(x1, obj2).sum().backward() with compiled_autograd._enable( functools.partial(torch.compile, fullgraph=True) ): opt(x2, obj1).sum().backward() opt(x3, obj2).sum().backward() self.assertEqual(x0.grad, x2.grad) self.assertEqual(x1.grad, x3.grad) @torch.no_grad() def _test_storage_resize_zero(self, device): @torch.compile(fullgraph=True) def fn(x): y = torch.sin(x) x.untyped_storage().resize_(0) return torch.cos(y) x = torch.randn(10, device=device) expected = torch.cos(torch.sin(x)) y = fn(x) self.assertEqual(y, expected) self.assertEqual(x.untyped_storage().size(), 0) def test_storage_resize_zero_cpu(self): self._test_storage_resize_zero("cpu") @requires_gpu() def test_storage_resize_zero_gpu(self): self._test_storage_resize_zero(GPU_TYPE) @torch.no_grad() def _test_storage_resize_nonzero(self, device): @torch.compile(fullgraph=True) def fn(x, out): y = torch.sin(x) assert out.untyped_storage().size() == 0 out.untyped_storage().resize_(x.untyped_storage().size()) out.copy_(y.cos()) x = torch.randn(10, device=device) out = torch.randn(10, device=device) expected = torch.cos(torch.sin(x)) out.untyped_storage().resize_(0) fn(x, out) self.assertEqual(out.untyped_storage().size(), x.untyped_storage().size()) self.assertEqual(out, expected) def test_storage_resize_nonzero_cpu(self): self._test_storage_resize_nonzero("cpu") @requires_gpu() def test_storage_resize_nonzero_gpu(self): self._test_storage_resize_nonzero(GPU_TYPE) @torch.no_grad() def test_unsafe_set_version_counter1(self): cnt = CompileCounter() @torch.compile(backend=cnt, fullgraph=True) def fn(w, x): x = x.sin() v = w._version w.copy_(x + 1) torch._C._autograd._unsafe_set_version_counter((w,), (v,)) return w, v for v in (3, 0, 1): w1 = torch.randn(16) for i in range(v): w1.fill_(i) # bump w1._version self.assertEqual(w1._version, v) x1 = torch.randn(16) w2, v2 = fn(w1, x1) self.assertIs(w1, w2) self.assertEqual(w1, x1.sin() + 1) self.assertEqual(v2, v) self.assertEqual(w1._version, v) self.assertEqual(cnt.frame_count, 1) def test_unsafe_set_version_counter2(self): @torch.compile(backend="inductor", fullgraph=True) def fn(w, x): r = w.sin() with torch.no_grad(): v = w._version w.copy_(x) torch._C._autograd._unsafe_set_version_counter((w,), (v,)) return r w1 = torch.randn(1, requires_grad=True) x1 = torch.randn(1) expected_r1 = w1.detach().sin() r1 = fn(w1, x1) r1.backward() self.assertEqual(r1, expected_r1) self.assertEqual(w1, x1) self.assertEqual(w1.grad, x1.cos()) @torch.no_grad() def test_unsafe_preserve_version_counter1(self): @torch.compile(backend="eager", fullgraph=True) def fn(w, x): x = x.sin() with torch.autograd._unsafe_preserve_version_counter(w): w.copy_(x + 1) return w w1 = torch.randn(16).fill_(0).fill_(1) x1 = torch.randn(16) v1 = w1._version w2 = fn(w1, x1) v2 = w1._version self.assertIs(w1, w2) self.assertEqual(w1, x1.sin() + 1) self.assertEqual(v1, v2) def test_unsafe_preserve_version_counter2(self): @torch.compile(backend="inductor", fullgraph=True) def fn(w, x): r = w.sin() with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter(w): w.copy_(x) return r w1 = torch.randn(1, requires_grad=True) x1 = torch.randn(1) expected_r1 = w1.detach().sin() r1 = fn(w1, x1) r1.backward() self.assertEqual(r1, expected_r1) self.assertEqual(w1, x1) self.assertEqual(w1.grad, x1.cos()) def test_module_backward_hooks_eager(self): m1, inp1 = init_module_bw_hooks(True) out1 = steps(m1, inp1) m2, inp2 = init_module_bw_hooks(False) fw_cnt = CompileCounter() bw_cnt = CompileCounter() with compiled_autograd._enable(torch.compile(backend=bw_cnt, fullgraph=True)): m2 = torch.compile(m2, backend=fw_cnt, fullgraph=True) out2 = steps(m2, inp2) self.assertEqual(m1.hook_count_pre, m2.hook_count_pre) self.assertEqual(m1.hook_count_post, m2.hook_count_post) self.assertEqual(out1, out2) self.assertEqual(inp1.grad, inp2.grad) self.assertEqual(m1.weight.grad, m2.weight.grad) self.assertEqual(m1.bias.grad, m2.bias.grad) self.assertEqual(fw_cnt.frame_count, 1) self.assertEqual(fw_cnt.op_count, 5) self.assertEqual(bw_cnt.frame_count, 2) # grad=None and grad!=None self.assertEqual( bw_cnt.op_count, 111 ) # Number of ops in the Dynamo-produced graphs def test_module_backward_hooks_aot(self): m1, inp1 = init_module_bw_hooks(True) out1 = steps(m1, inp1) m2, inp2 = init_module_bw_hooks(True) m2 = torch.compile(m2, backend="aot_eager", fullgraph=True) with compiled_autograd._enable(lambda gm: gm): out2 = steps(m2, inp2) self.assertEqual(m1.hook_count_pre, m2.hook_count_pre) self.assertEqual(m1.hook_count_post, m2.hook_count_post) self.assertEqual(out1, out2) self.assertEqual(inp1.grad, inp2.grad) self.assertEqual(m1.weight.grad, m2.weight.grad) self.assertEqual(m1.bias.grad, m2.bias.grad) def test_module_backward_hooks_inductor(self): m1, inp1 = init_module_bw_hooks(True) out1 = steps(m1, inp1) m2, inp2 = init_module_bw_hooks(False) m2 = torch.compile(m2, fullgraph=True) with compiled_autograd._enable(torch.compile(fullgraph=True)): out2 = steps(m2, inp2) self.assertEqual(m1.hook_count_pre, m2.hook_count_pre) self.assertEqual(m1.hook_count_post, m2.hook_count_post) self.assertEqual(out1, out2) self.assertEqual(inp1.grad, inp2.grad) self.assertEqual(m1.weight.grad, m2.weight.grad) self.assertEqual(m1.bias.grad, m2.bias.grad) def test_module_backward_hooks_multi_layers(self): a1, inp1 = init_module_bw_hooks(True) b1, _ = init_module_bw_hooks(True) out1 = steps(torch.nn.Sequential(a1, b1), inp1) a2, inp2 = init_module_bw_hooks(False) b2, _ = init_module_bw_hooks(False) with compiled_autograd._enable(torch.compile(fullgraph=True)): out2 = steps( torch.compile(torch.nn.Sequential(a2, b2), fullgraph=True), inp2 ) self.assertEqual(a1.hook_count_pre, a2.hook_count_pre) self.assertEqual(a1.hook_count_post, a2.hook_count_post) self.assertEqual(b1.hook_count_pre, b2.hook_count_pre) self.assertEqual(b1.hook_count_post, b2.hook_count_post) self.assertEqual(out1, out2) self.assertEqual(inp1.grad, inp2.grad) self.assertEqual(a1.weight.grad, a2.weight.grad) self.assertEqual(a1.bias.grad, a2.bias.grad) self.assertEqual(b1.weight.grad, b2.weight.grad) self.assertEqual(b1.bias.grad, b2.bias.grad) # TODO(jansel): support bw hooks with graph break def _assert_same_grad(self, a, b): self.assertEqual(type(a), type(b)) self.assertEqual(a, b) self.assertEqual(a.grad, b.grad) self.assertEqual(a.requires_grad, b.requires_grad) def test_nn_param_return1(self): def fn(x): p = torch.nn.Parameter(x) return p, p.sin() opt = torch.compile(fn, fullgraph=True) x1 = torch.randn(16) x2 = x1.clone() p1, r1 = fn(x1) r1.sum().backward() p2, r2 = opt(x2) r2.sum().backward() self._assert_same_grad(r1, r2) self._assert_same_grad(p1, p2) def test_nn_param_return2(self): def fn(x): p = torch.nn.Parameter(x, requires_grad=False) return p, x + 1 opt = torch.compile(fn, fullgraph=True) x1 = torch.randn(16) x2 = x1.clone() p1, r1 = fn(x1) p2, r2 = opt(x2) self._assert_same_grad(r1, r2) self._assert_same_grad(p1, p2) @torch._dynamo.config.patch("graph_break_on_nn_param_ctor", False) def test_nn_param_return3(self): def fn(x): p = torch.nn.Parameter(x + 123) return p, p.sin() opt = torch.compile(fn, fullgraph=True) x1 = torch.randn(16) x2 = x1.clone() p1, r1 = fn(x1) r1.sum().backward() p2, r2 = opt(x2) r2.sum().backward() self._assert_same_grad(r1, r2) self._assert_same_grad(p1, p2) @torch._dynamo.config.patch("graph_break_on_nn_param_ctor", False) def test_nn_param_return4(self): def fn(x): p = torch.nn.Parameter(x + 123, requires_grad=False) return p, x + 1 opt = torch.compile(fn, fullgraph=True) x1 = torch.randn(16) x2 = x1.clone() p1, r1 = fn(x1) p2, r2 = opt(x2) self._assert_same_grad(r1, r2) self._assert_same_grad(p1, p2) @torch._functorch.config.patch(recompute_views=True) def test_fake_distributed_aot_eager(self): m1, inp1 = init_fake_distributed() out1 = steps(m1, inp1) m2, inp2 = init_fake_distributed() m2 = torch.compile(m2, backend="aot_eager", fullgraph=True) bw_cnt = CompileCounter() with compiled_autograd._enable(torch.compile(backend=bw_cnt, fullgraph=True)): out2 = steps(m2, inp2) self._assert_same_grad(m1.weight, m2.weight) self._assert_same_grad(inp1, inp2) self._assert_same_grad(out1, out2) # Recompile on grad==None/grad!=None self.assertEqual(bw_cnt.frame_count, 2) @requires_gpu() @torch._functorch.config.patch(recompute_views=True) def test_fake_distributed_inductor(self): m1, inp1 = init_fake_distributed(GPU_TYPE) out1 = steps(m1, inp1) m2, inp2 = init_fake_distributed(GPU_TYPE) m2 = torch.compile(m2, fullgraph=True) with compiled_autograd._enable(torch.compile(fullgraph=True)): out2 = steps(m2, inp2) self._assert_same_grad(m1.weight, m2.weight) self._assert_same_grad(inp1, inp2) self._assert_same_grad(out1, out2) if __name__ == "__main__": if HAS_CPU and not IS_MACOS: run_tests(needs="filelock")
DistributedPatternTests
python
keras-team__keras
keras/src/layers/reshaping/up_sampling3d_test.py
{ "start": 164, "end": 4838 }
class ____(testing.TestCase): @parameterized.product( data_format=["channels_first", "channels_last"], length_dim1=[2, 3], length_dim2=[2], length_dim3=[3], ) @pytest.mark.requires_trainable_backend def test_upsampling_3d( self, data_format, length_dim1, length_dim2, length_dim3 ): num_samples = 2 stack_size = 2 input_len_dim1 = 10 input_len_dim2 = 11 input_len_dim3 = 12 if data_format == "channels_first": inputs = np.random.rand( num_samples, stack_size, input_len_dim1, input_len_dim2, input_len_dim3, ) else: inputs = np.random.rand( num_samples, input_len_dim1, input_len_dim2, input_len_dim3, stack_size, ) # basic test if data_format == "channels_first": expected_output_shape = (2, 2, 20, 22, 24) else: expected_output_shape = (2, 20, 22, 24, 2) self.run_layer_test( layers.UpSampling3D, init_kwargs={"size": (2, 2, 2), "data_format": data_format}, input_shape=inputs.shape, expected_output_shape=expected_output_shape, expected_output_dtype="float32", expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=False, ) layer = layers.UpSampling3D( size=(length_dim1, length_dim2, length_dim3), data_format=data_format, ) layer.build(inputs.shape) np_output = layer(inputs=backend.Variable(inputs)) if data_format == "channels_first": assert np_output.shape[2] == length_dim1 * input_len_dim1 assert np_output.shape[3] == length_dim2 * input_len_dim2 assert np_output.shape[4] == length_dim3 * input_len_dim3 else: # tf assert np_output.shape[1] == length_dim1 * input_len_dim1 assert np_output.shape[2] == length_dim2 * input_len_dim2 assert np_output.shape[3] == length_dim3 * input_len_dim3 # compare with numpy if data_format == "channels_first": expected_out = np.repeat(inputs, length_dim1, axis=2) expected_out = np.repeat(expected_out, length_dim2, axis=3) expected_out = np.repeat(expected_out, length_dim3, axis=4) else: # tf expected_out = np.repeat(inputs, length_dim1, axis=1) expected_out = np.repeat(expected_out, length_dim2, axis=2) expected_out = np.repeat(expected_out, length_dim3, axis=3) self.assertAllClose(np_output, expected_out) def test_upsampling_3d_correctness(self): input_shape = (2, 1, 2, 1, 3) x = np.arange(np.prod(input_shape)).reshape(input_shape) expected_output = np.array( [ [ [ [[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]], [[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]], [[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]], [[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]], ], [ [[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]], [[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]], [[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]], [[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]], ], ], [ [ [[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]], [[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]], [[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]], [[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]], ], [ [[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]], [[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]], [[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]], [[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]], ], ], ] ) if backend.config.image_data_format() == "channels_first": expected_output = expected_output.transpose((0, 4, 1, 2, 3)) x = x.transpose((0, 4, 1, 2, 3)) self.assertAllClose( layers.UpSampling3D(size=(2, 2, 2))(x), expected_output )
UpSampling3dTest
python
PyCQA__pylint
tests/functional/i/invalid/invalid_hash_returned.py
{ "start": 1221, "end": 1348 }
class ____: """Potential uninferable return value""" def __hash__(self): return hash(Missing)
AnotherAmbiguousHash
python
huggingface__transformers
src/transformers/models/video_llava/video_processing_video_llava.py
{ "start": 812, "end": 1335 }
class ____(BaseVideoProcessor): resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {"shortest_edge": 224} default_to_square = False crop_size = {"height": 224, "width": 224} do_resize = True do_center_crop = True do_rescale = True do_normalize = True do_convert_rgb = True do_sample_frames = False # Set to False for BC, recommended to set `True` in new models __all__ = ["VideoLlavaVideoProcessor"]
VideoLlavaVideoProcessor
python
huggingface__transformers
tests/models/mllama/test_modeling_mllama.py
{ "start": 17952, "end": 30412 }
class ____(unittest.TestCase): def setUp(self): self.base_model_checkpoint = "meta-llama/Llama-3.2-11B-Vision" self.instruct_model_checkpoint = "meta-llama/Llama-3.2-11B-Vision-Instruct" def tearDown(self): cleanup(torch_device, gc_collect=True) @slow @require_torch_accelerator @require_bitsandbytes @require_read_token def test_11b_model_integration_generate(self): # Prepare inputs processor = AutoProcessor.from_pretrained(self.base_model_checkpoint) prompt = "<|image|>If I had to write a haiku for this one" url = "https://llava-vl.github.io/static/images/view.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(text=prompt, images=image, return_tensors="pt").to(torch_device) input_ids = inputs["input_ids"] # Check inputs ids expected_input_ids_all = Expectations( { ("xpu", 3): torch.tensor([[128000, 128256, 128000, 2746, 358, 1047, 311, 3350, 264, 6520, 39342, 369, 420, 832]], device=torch_device), ("cuda", 7): torch.tensor([[128000, 128256, 128000, 2746, 358, 1047, 311, 3350, 264, 6520, 39342, 369, 420, 832]], device=torch_device), ("cuda", 8): torch.tensor([[128000, 128256, 128000, 2746, 358, 1047, 311, 3350, 264, 6520, 39342, 369, 420, 832]], device=torch_device), } ) # fmt: skip expected_input_ids = expected_input_ids_all.get_expectation() self.assertTrue(torch.equal(input_ids, expected_input_ids)) # Load model in 4 bit quantization_config = BitsAndBytesConfig(load_in_4bit=True) model = MllamaForConditionalGeneration.from_pretrained( self.base_model_checkpoint, quantization_config=quantization_config ) # Generate output = model.generate(**inputs, do_sample=False, max_new_tokens=25) decoded_output = processor.decode(output[0], skip_special_tokens=True) expected_outputs = Expectations( { ("xpu", 3): "If I had to write a haiku for this one, it would be:.\\nA dock on a lake.\\nA mountain in the distance.\\nA long exposure.", ("cuda", 7): "If I had to write a haiku for this one, it would be:.\\nA dock in the lake.\\nA mountain in the distance.\\nA long exposure.", ("cuda", 8): 'If I had to write a haiku for this one, it would be:.\\nA dock in the lake.\\nA mountain in the distance.\\nA long exposure.', } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", ) @slow @require_torch_accelerator @require_bitsandbytes @require_read_token def test_11b_model_integration_generate_text_only(self): # Prepare inputs processor = AutoProcessor.from_pretrained(self.base_model_checkpoint) prompt = "If I had to write a haiku" inputs = processor(text=prompt, return_tensors="pt").to(torch_device) input_ids = inputs["input_ids"].cpu().squeeze().tolist() # Check inputs ids expected_input_ids_all = Expectations( { ("xpu", 3): [128000, 128000, 2746, 358, 1047, 311, 3350, 264, 6520, 39342], ("cuda", 7): [128000, 128000, 2746, 358, 1047, 311, 3350, 264, 6520, 39342], ("cuda", 8): [128000, 128000, 2746, 358, 1047, 311, 3350, 264, 6520, 39342], } ) expected_input_ids = expected_input_ids_all.get_expectation() self.assertEqual(input_ids, expected_input_ids) # Load model in 4 bit quantization_config = BitsAndBytesConfig(load_in_4bit=True) model = MllamaForConditionalGeneration.from_pretrained( self.base_model_checkpoint, quantization_config=quantization_config ) # Generate output = model.generate(**inputs, do_sample=False, max_new_tokens=25) decoded_output = processor.decode(output[0], skip_special_tokens=True) expected_outputs = Expectations( { ("xpu", 3): "If I had to write a haiku about my life, I would write:\nLife is a messy stream\nRipples of joy and pain\nFlowing, ever", ("cuda", 7): "If I had to write a haiku about my life, I would write:\nLife is a messy stream\nRipples of joy and pain\nFlowing, ever", ("cuda", 8): "If I had to write a haiku about my life, I would write:\nLife is a messy stream\nRipples of joy and pain\nFlowing, ever", } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", ) @slow @require_torch_accelerator @require_bitsandbytes @require_read_token def test_11b_model_integration_forward(self): # Prepare inputs processor = AutoProcessor.from_pretrained(self.base_model_checkpoint) prompt = "<|image|>If I had to write a haiku for this one" url = "https://llava-vl.github.io/static/images/view.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(text=prompt, images=image, return_tensors="pt").to(torch_device) # Load model in 4 bit quantization_config = BitsAndBytesConfig(load_in_4bit=True) model = MllamaForConditionalGeneration.from_pretrained( self.base_model_checkpoint, quantization_config=quantization_config ) # Forward with torch.inference_mode(): output = model(**inputs) actual_logits = output.logits[0, -1, :5].cpu() expected_logits_all = Expectations( { ("xpu", 3): torch.tensor([9.1562, 8.9141, 5.0664, 1.6855, 3.2324], dtype=actual_logits.dtype), ("cuda", 7): torch.tensor([9.0781, 8.8750, 5.0781, 1.6221, 3.2207], dtype=actual_logits.dtype), ("cuda", 8): torch.tensor([9.0703, 8.8750, 5.0781, 1.6279, 3.2207], dtype=actual_logits.dtype), } ) expected_logits = expected_logits_all.get_expectation() self.assertTrue( torch.allclose(actual_logits, expected_logits, atol=0.1), f"Actual logits: {actual_logits}" f"\nExpected logits: {expected_logits}" f"\nDifference: {torch.abs(actual_logits - expected_logits)}", ) @slow @require_torch_accelerator @require_bitsandbytes @require_read_token def test_11b_model_integration_batched_generate(self): processor = AutoProcessor.from_pretrained(self.base_model_checkpoint) # Prepare inputs prompt = [ "<|image|>If I had to write a haiku for this one", "<|image|>This image shows", ] image1 = Image.open(requests.get("https://llava-vl.github.io/static/images/view.jpg", stream=True).raw) image2 = Image.open( requests.get( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg", stream=True, ).raw ) inputs = processor(text=prompt, images=[[image1], [image2]], padding=True, return_tensors="pt").to( torch_device ) # Load model in 4 bit quantization_config = BitsAndBytesConfig(load_in_4bit=True) model = MllamaForConditionalGeneration.from_pretrained( self.base_model_checkpoint, quantization_config=quantization_config ) output = model.generate(**inputs, do_sample=False, max_new_tokens=25) # Check first output decoded_output = processor.decode(output[0], skip_special_tokens=True) expected_outputs = Expectations( { ("xpu", 3): "If I had to write a haiku for this one, it would be:.\\nA dock on a lake.\\nA mountain in the distance.\\nA long exposure.", ("cuda", 7): "If I had to write a haiku for this one, it would be:.\\nA dock on a lake.\\nA mountain in the distance.\\nA long exposure.", ("cuda", 8): 'If I had to write a haiku for this one, it would be:.\\nA dock in the lake.\\nA mountain in the distance.\\nA long exposure.', } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", ) # Check second output decoded_output = processor.decode(output[1], skip_special_tokens=True) expected_outputs = Expectations( { ("xpu", 3): "This image shows\nI'm not able to provide information on the person in this image. I can give you an idea of what's happening", ("cuda", 7): "This image shows\nI'm not able to provide information on the person in this image. I can give you an idea of what's happening", ("cuda", 8): "This image shows\nI'm not able to provide information on the person in this image. I can give you an idea of what's happening", } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", ) @slow @require_torch_accelerator @require_bitsandbytes @require_read_token def test_11b_model_integration_multi_image_generate(self): processor = AutoProcessor.from_pretrained(self.instruct_model_checkpoint) # Prepare inputs image1 = Image.open(requests.get("https://llava-vl.github.io/static/images/view.jpg", stream=True).raw) image2 = Image.open( requests.get( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg", stream=True, ).raw ) conversation = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What’s shown in this image?"}, ], }, { "role": "assistant", "content": [ {"type": "text", "text": "This image shows a long wooden dock extending out into a lake."} ], }, { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What about this one, what do you see here? Can you describe in detail?"}, ], }, ] prompt = processor.apply_chat_template(conversation, add_generation_prompt=True) inputs = processor(text=prompt, images=[[image1, image2]], return_tensors="pt").to(torch_device) prompt_len = inputs["input_ids"].shape[-1] # Load model in 4 bit quantization_config = BitsAndBytesConfig(load_in_4bit=True) model = MllamaForConditionalGeneration.from_pretrained( self.instruct_model_checkpoint, quantization_config=quantization_config ) output = model.generate(**inputs, do_sample=False, max_new_tokens=25) # Check first output generated_output = output[0][prompt_len:] decoded_output = processor.decode(generated_output, skip_special_tokens=False) # model should response about "stop sign", however it responses about "dock" # this happens only in quantized version, bfloat16 works fine expected_output = "This image shows a long wooden dock extending out into a lake. The dock is made of wooden planks and has a railing" self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", )
MllamaForConditionalGenerationIntegrationTest
python
apache__airflow
airflow-core/src/airflow/models/callback.py
{ "start": 3230, "end": 6873 }
class ____(Base): """Base class for callbacks.""" __tablename__ = "callback" id: Mapped[str] = mapped_column(UUIDType(binary=False), primary_key=True, default=uuid6.uuid7) # This is used by SQLAlchemy to be able to deserialize DB rows to subclasses __mapper_args__ = { "polymorphic_identity": "callback", "polymorphic_on": "type", } type: Mapped[str] = mapped_column(String(20), nullable=False) # Method used to fetch the callback, of type: CallbackFetchMethod fetch_method: Mapped[str] = mapped_column(String(20), nullable=False) # Used by subclasses to store information about how to run the callback data: Mapped[dict] = mapped_column(ExtendedJSON, nullable=False) # State of the Callback of type: CallbackState. Can be null for instances of DagProcessorCallback. state: Mapped[str | None] = mapped_column(String(10)) # Return value of the callback if successful, otherwise exception details output: Mapped[str | None] = mapped_column(Text, nullable=True) # Used for prioritization. Higher weight -> higher priority priority_weight: Mapped[int] = mapped_column(Integer, nullable=False) # Creation time of the callback created_at: Mapped[datetime] = mapped_column(UtcDateTime, default=timezone.utcnow, nullable=False) # Used for callbacks of type CallbackType.TRIGGERER trigger_id: Mapped[int] = mapped_column(Integer, ForeignKey("trigger.id"), nullable=True) trigger = relationship("Trigger", back_populates="callback", uselist=False) def __init__(self, priority_weight: int = 1, prefix: str = "", **kwargs): """ Initialize a Callback. This is the base class so it shouldn't usually need to be initialized. :param priority_weight: Priority for callback execution (higher value -> higher priority) :param prefix: Optional prefix for metric names :param kwargs: Additional data emitted in metric tags """ self.state = CallbackState.PENDING self.priority_weight = priority_weight self.data = kwargs # kwargs can be used to include additional info in metric tags if prefix: self.data["prefix"] = prefix def queue(self): self.state = CallbackState.QUEUED def get_metric_info(self, status: CallbackState, result: Any) -> dict: tags = {"result": result, **self.data} tags.pop("prefix", None) if "kwargs" in tags: # Remove the context (if exists) to keep the tags simple tags["kwargs"] = {k: v for k, v in tags["kwargs"].items() if k != "context"} prefix = self.data.get("prefix", "") name = f"{prefix}.callback_{status}" if prefix else f"callback_{status}" return {"stat": name, "tags": tags} @staticmethod def create_from_sdk_def(callback_def: CallbackDefinitionProtocol, **kwargs) -> Callback: # Cannot check actual type using isinstance() because that would require SDK import match type(callback_def).__name__: case "AsyncCallback": if TYPE_CHECKING: assert isinstance(callback_def, ImportPathCallbackDefProtocol) return TriggererCallback(callback_def, **kwargs) case "SyncCallback": if TYPE_CHECKING: assert isinstance(callback_def, ImportPathExecutorCallbackDefProtocol) return ExecutorCallback(callback_def, fetch_method=CallbackFetchMethod.IMPORT_PATH, **kwargs) case _: raise ValueError(f"Cannot handle Callback of type {type(callback_def)}")
Callback
python
great-expectations__great_expectations
contrib/time_series_expectations/time_series_expectations/generator/time_series_generator.py
{ "start": 416, "end": 1004 }
class ____(ABC): """Base class for time series generators.""" @abstractmethod def generate_df( self, *args, **kwargs, ) -> pd: """Generate a time series as a pandas DataFrame. Args: *args will differ depending on the specific generator class. Keyword Args: *kwargs will differ depending on the specific generator class. Returns: pd.DataFrame: A two-column pandas DataFrame with a datetime index and a column for the time series values """ pass
TimeSeriesGenerator
python
PyCQA__pylint
tests/functional/a/assigning/assigning_non_slot.py
{ "start": 1293, "end": 1627 }
class ____: """ Using properties is safe. """ __slots__ = ['tmp', '_value'] @property def test(self): return self._value @test.setter def test(self, value): # pylint: disable=attribute-defined-outside-init self._value = value def __init__(self): self.test = 42
PropertyGood
python
django-compressor__django-compressor
compressor/tests/test_base.py
{ "start": 952, "end": 1196 }
class ____: """A filter whose output is always the string 'OUTPUT'""" def __init__(self, content, attrs, filter_type=None, filename=None, charset=None): pass def input(self, **kwargs): return "OUTPUT"
TestPrecompiler
python
spyder-ide__spyder
spyder/widgets/mixins.py
{ "start": 59083, "end": 60948 }
class ____(object): """Mixin to make file names in tracebacks and anchors clickable.""" QT_CLASS = None # This signal emits a parsed error traceback text so we can then # request opening the file that traceback comes from in the Editor. sig_go_to_error_requested = None def __init__(self): self.__cursor_changed = False self.anchor = None self.setMouseTracking(True) def mouseReleaseEvent(self, event): """Go to error or link in anchor.""" self.QT_CLASS.mouseReleaseEvent(self, event) text = self.get_line_at(event.pos()) if get_error_match(text) and not self.has_selected_text(): if self.sig_go_to_error_requested is not None: self.sig_go_to_error_requested.emit(text) elif self.anchor: QDesktopServices.openUrl(QUrl(self.anchor)) QApplication.restoreOverrideCursor() self.anchor = None def mouseMoveEvent(self, event): """Show pointing hand cursor on error messages and anchors.""" text = self.get_line_at(event.pos()) self.anchor = self.anchorAt(event.pos()) if get_error_match(text) or self.anchor: if not self.__cursor_changed: QApplication.setOverrideCursor(Qt.PointingHandCursor) self.__cursor_changed = True event.accept() return if self.__cursor_changed: QApplication.restoreOverrideCursor() self.__cursor_changed = False self.QT_CLASS.mouseMoveEvent(self, event) def leaveEvent(self, event): """If cursor has not been restored yet, do it now""" if self.__cursor_changed: QApplication.restoreOverrideCursor() self.__cursor_changed = False self.QT_CLASS.leaveEvent(self, event)
TracebackLinksMixin
python
wandb__wandb
wandb/sdk/artifacts/_generated/update_user_registry_role.py
{ "start": 274, "end": 380 }
class ____(GQLResult): success: bool UpdateUserRegistryRole.model_rebuild()
UpdateUserRegistryRoleResult
python
scipy__scipy
scipy/stats/tests/test_distributions.py
{ "start": 94993, "end": 98782 }
class ____: def setup_method(self): self.rng = np.random.default_rng(2807014525) # gh-6226 def test_cdf_ppf(self): x = np.linspace(-20, 20) y = stats.logistic.cdf(x) xx = stats.logistic.ppf(y) assert_allclose(x, xx) def test_sf_isf(self): x = np.linspace(-20, 20) y = stats.logistic.sf(x) xx = stats.logistic.isf(y) assert_allclose(x, xx) def test_extreme_values(self): # p is chosen so that 1 - (1 - p) == p in double precision p = 9.992007221626409e-16 desired = 34.53957599234088 assert_allclose(stats.logistic.ppf(1 - p), desired) assert_allclose(stats.logistic.isf(p), desired) def test_logpdf_basic(self): logp = stats.logistic.logpdf([-15, 0, 10]) # Expected values computed with mpmath with 50 digits of precision. expected = [-15.000000611804547, -1.3862943611198906, -10.000090797798434] assert_allclose(logp, expected, rtol=1e-13) def test_logpdf_extreme_values(self): logp = stats.logistic.logpdf([800, -800]) # For such large arguments, logpdf(x) = -abs(x) when computed # with 64 bit floating point. assert_equal(logp, [-800, -800]) @pytest.mark.parametrize("loc_rvs,scale_rvs", [(0.4484955, 0.10216821), (0.62918191, 0.74367064)]) def test_fit(self, loc_rvs, scale_rvs): data = stats.logistic.rvs(size=100, loc=loc_rvs, scale=scale_rvs, random_state=self.rng) # test that result of fit method is the same as optimization def func(input, data): a, b = input n = len(data) x1 = np.sum(np.exp((data - a) / b) / (1 + np.exp((data - a) / b))) - n / 2 x2 = np.sum(((data - a) / b) * ((np.exp((data - a) / b) - 1) / (np.exp((data - a) / b) + 1))) - n return x1, x2 expected_solution = root(func, stats.logistic._fitstart(data), args=( data,)).x fit_method = stats.logistic.fit(data) # other than computational variances, the fit method and the solution # to this system of equations are equal assert_allclose(fit_method, expected_solution, atol=1e-30) def test_fit_comp_optimizer(self): data = stats.logistic.rvs(size=100, loc=0.5, scale=2, random_state=self.rng) _assert_less_or_close_loglike(stats.logistic, data) _assert_less_or_close_loglike(stats.logistic, data, floc=1) _assert_less_or_close_loglike(stats.logistic, data, fscale=1) @pytest.mark.parametrize('testlogcdf', [True, False]) def test_logcdfsf_tails(self, testlogcdf): # Test either logcdf or logsf. By symmetry, we can use the same # expected values for both by switching the sign of x for logsf. x = np.array([-10000, -800, 17, 50, 500]) if testlogcdf: y = stats.logistic.logcdf(x) else: y = stats.logistic.logsf(-x) # The expected values were computed with mpmath. expected = [-10000.0, -800.0, -4.139937633089748e-08, -1.9287498479639178e-22, -7.124576406741286e-218] assert_allclose(y, expected, rtol=2e-15) def test_fit_gh_18176(self): # logistic.fit returned `scale < 0` for this data. Check that this has # been fixed. data = np.array([-459, 37, 43, 45, 45, 48, 54, 55, 58] + [59] * 3 + [61] * 9) # If scale were negative, NLLF would be infinite, so this would fail _assert_less_or_close_loglike(stats.logistic, data)
TestLogistic
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/elements.py
{ "start": 166961, "end": 175926 }
class ____( roles.DDLReferredColumnRole, roles.LabeledColumnExprRole[_T], roles.StrAsPlainColumnRole, Immutable, NamedColumn[_T], ): """Represents a column expression from any textual string. The :class:`.ColumnClause`, a lightweight analogue to the :class:`_schema.Column` class, is typically invoked using the :func:`_expression.column` function, as in:: from sqlalchemy import column id, name = column("id"), column("name") stmt = select(id, name).select_from("user") The above statement would produce SQL like: .. sourcecode:: sql SELECT id, name FROM user :class:`.ColumnClause` is the immediate superclass of the schema-specific :class:`_schema.Column` object. While the :class:`_schema.Column` class has all the same capabilities as :class:`.ColumnClause`, the :class:`.ColumnClause` class is usable by itself in those cases where behavioral requirements are limited to simple SQL expression generation. The object has none of the associations with schema-level metadata or with execution-time behavior that :class:`_schema.Column` does, so in that sense is a "lightweight" version of :class:`_schema.Column`. Full details on :class:`.ColumnClause` usage is at :func:`_expression.column`. .. seealso:: :func:`_expression.column` :class:`_schema.Column` """ table: Optional[FromClause] is_literal: bool __visit_name__ = "column" _traverse_internals: _TraverseInternalsType = [ ("name", InternalTraversal.dp_anon_name), ("type", InternalTraversal.dp_type), ("table", InternalTraversal.dp_clauseelement), ("is_literal", InternalTraversal.dp_boolean), ] onupdate: Optional[DefaultGenerator] = None default: Optional[DefaultGenerator] = None server_default: Optional[FetchedValue] = None server_onupdate: Optional[FetchedValue] = None _is_multiparam_column = False @property def _is_star(self): # type: ignore[override] return self.is_literal and self.name == "*" def __init__( self, text: str, type_: Optional[_TypeEngineArgument[_T]] = None, is_literal: bool = False, _selectable: Optional[FromClause] = None, ): self.key = self.name = text self.table = _selectable # if type is None, we get NULLTYPE, which is our _T. But I don't # know how to get the overloads to express that correctly self.type = type_api.to_instance(type_) # type: ignore self.is_literal = is_literal def get_children(self, *, column_tables=False, **kw): # override base get_children() to not return the Table # or selectable that is parent to this column. Traversals # expect the columns of tables and subqueries to be leaf nodes. return [] @property def entity_namespace(self): if self.table is not None: return self.table.entity_namespace else: return super().entity_namespace def _clone(self, detect_subquery_cols=False, **kw): if ( detect_subquery_cols and self.table is not None and self.table._is_subquery ): clone = kw.pop("clone") table = clone(self.table, **kw) new = table.c.corresponding_column(self) return new return super()._clone(**kw) @HasMemoized_ro_memoized_attribute def _from_objects(self) -> List[FromClause]: t = self.table if t is not None: return [t] else: return [] @HasMemoized.memoized_attribute def _render_label_in_columns_clause(self): return self.table is not None @property def _ddl_label(self): return self._gen_tq_label(self.name, dedupe_on_key=False) def _compare_name_for_result(self, other): if ( self.is_literal or self.table is None or self.table._is_textual or not hasattr(other, "proxy_set") or ( isinstance(other, ColumnClause) and ( other.is_literal or other.table is None or other.table._is_textual ) ) ): return (hasattr(other, "name") and self.name == other.name) or ( hasattr(other, "_tq_label") and self._tq_label == other._tq_label ) else: return other.proxy_set.intersection(self.proxy_set) def _gen_tq_label( self, name: str, dedupe_on_key: bool = True ) -> Optional[str]: """generate table-qualified label for a table-bound column this is <tablename>_<columnname>. used primarily for LABEL_STYLE_TABLENAME_PLUS_COL as well as the .columns collection on a Join object. """ label: str t = self.table if self.is_literal: return None elif t is not None and is_named_from_clause(t): if has_schema_attr(t) and t.schema: label = ( t.schema.replace(".", "_") + "_" + t.name + ("_" + name) ) else: assert not TYPE_CHECKING or isinstance(t, NamedFromClause) label = t.name + ("_" + name) # propagate name quoting rules for labels. if is_quoted_name(name) and name.quote is not None: if is_quoted_name(label): label.quote = name.quote else: label = quoted_name(label, name.quote) elif is_quoted_name(t.name) and t.name.quote is not None: # can't get this situation to occur, so let's # assert false on it for now assert not isinstance(label, quoted_name) label = quoted_name(label, t.name.quote) if dedupe_on_key: # ensure the label name doesn't conflict with that of an # existing column. note that this implies that any Column # must **not** set up its _label before its parent table has # all of its other Column objects set up. There are several # tables in the test suite which will fail otherwise; example: # table "owner" has columns "name" and "owner_name". Therefore # column owner.name cannot use the label "owner_name", it has # to be "owner_name_1". if label in t.c: _label = label counter = 1 while _label in t.c: _label = label + f"_{counter}" counter += 1 label = _label return coercions.expect(roles.TruncatedLabelRole, label) else: return name def _make_proxy( self, selectable: FromClause, *, primary_key: ColumnSet, foreign_keys: Set[KeyedColumnElement[Any]], name: Optional[str] = None, key: Optional[str] = None, name_is_truncatable: bool = False, compound_select_cols: Optional[Sequence[ColumnElement[Any]]] = None, disallow_is_literal: bool = False, **kw: Any, ) -> typing_Tuple[str, ColumnClause[_T]]: # the "is_literal" flag normally should never be propagated; a proxied # column is always a SQL identifier and never the actual expression # being evaluated. however, there is a case where the "is_literal" flag # might be used to allow the given identifier to have a fixed quoting # pattern already, so maintain the flag for the proxy unless a # :class:`.Label` object is creating the proxy. See [ticket:4730]. is_literal = ( not disallow_is_literal and self.is_literal and ( # note this does not accommodate for quoted_name differences # right now name is None or name == self.name ) ) c = self._constructor( ( coercions.expect(roles.TruncatedLabelRole, name or self.name) if name_is_truncatable else (name or self.name) ), type_=self.type, _selectable=selectable, is_literal=is_literal, ) c._propagate_attrs = selectable._propagate_attrs if name is None: c.key = self.key if compound_select_cols: c._proxies = list(compound_select_cols) else: c._proxies = [self] if selectable._is_clone_of is not None: c._is_clone_of = selectable._is_clone_of.columns.get(c.key) return c.key, c
ColumnClause
python
ethereum__web3.py
web3/contract/base_contract.py
{ "start": 17083, "end": 29454 }
class ____: """ Base class for contract functions A function accessed via the api `contract.functions.myMethod(*args, **kwargs)` is a subclass of this class. """ address: ChecksumAddress = None fn_name: str = None name: str = None signature: str = None abi_element_identifier: ABIElementIdentifier = None w3: Union["Web3", "AsyncWeb3[Any]"] = None contract_abi: ABI = None abi: ABIFunction = None transaction: TxParams = None arguments: tuple[Any, ...] = None decode_tuples: bool | None = None argument_names: tuple[str, ...] = tuple() argument_types: tuple[str, ...] = tuple() args: Any = None kwargs: Any = None def __init__(self, abi: ABIFunction | None = None) -> None: if not self.abi_element_identifier: self.abi_element_identifier = type(self).__name__ self.name = get_name_from_abi_element_identifier(self.abi_element_identifier) self.fn_name = self.name if abi: self.abi = abi self.signature = abi_to_signature(self.abi) event_inputs = self.abi.get("inputs", []) self.argument_names = tuple([input.get("name", None) for input in event_inputs]) self.argument_types = tuple([input["type"] for input in event_inputs]) @combomethod def _get_abi(cls) -> ABIFunction: if not cls.args and not cls.kwargs: # If no args or kwargs are provided, get the ABI element by name return cast( ABIFunction, get_abi_element( cls.contract_abi, get_abi_element_signature(cls.abi_element_identifier), abi_codec=cls.w3.codec, ), ) return cast( ABIFunction, get_abi_element( cls.contract_abi, get_name_from_abi_element_identifier(cls.abi_element_identifier), *cls.args, abi_codec=cls.w3.codec, **cls.kwargs, ), ) def _set_function_info(self) -> None: self.selector = encode_hex(b"") if self.abi_element_identifier in [ "fallback", "receive", FallbackFn, ReceiveFn, ]: self.selector = encode_hex(function_abi_to_4byte_selector(self.abi)) self.arguments = None elif is_text(self.abi_element_identifier): self.selector = encode_hex(function_abi_to_4byte_selector(self.abi)) self.arguments = get_normalized_abi_inputs( self.abi, *self.args, **self.kwargs ) else: raise Web3TypeError("Unsupported function identifier") def _get_call_txparams(self, transaction: TxParams | None = None) -> TxParams: if transaction is None: call_transaction: TxParams = {} else: call_transaction = cast(TxParams, dict(**transaction)) if "data" in call_transaction: raise Web3ValueError("Cannot set 'data' field in call transaction") if self.address: call_transaction.setdefault("to", self.address) if self.w3.eth.default_account is not empty: call_transaction.setdefault( "from", cast(ChecksumAddress, self.w3.eth.default_account), ) if "to" not in call_transaction: if isinstance(self, type): raise Web3ValueError( "When using `Contract.[methodtype].[method].call()` from" " a contract factory you " "must provide a `to` address with the transaction" ) else: raise Web3ValueError( "Please ensure that this contract instance has an address." ) return call_transaction def _transact(self, transaction: TxParams | None = None) -> TxParams: if transaction is None: transact_transaction: TxParams = {} else: transact_transaction = cast(TxParams, dict(**transaction)) if "data" in transact_transaction: raise Web3ValueError("Cannot set 'data' field in transact transaction") if self.address is not None: transact_transaction.setdefault("to", self.address) if self.w3.eth.default_account is not empty: transact_transaction.setdefault( "from", cast(ChecksumAddress, self.w3.eth.default_account) ) if "to" not in transact_transaction: if isinstance(self, type): raise Web3ValueError( "When using `Contract.transact` from a contract factory you " "must provide a `to` address with the transaction" ) else: raise Web3ValueError( "Please ensure that this contract instance has an address." ) return transact_transaction def _estimate_gas(self, transaction: TxParams | None = None) -> TxParams: if transaction is None: estimate_gas_transaction: TxParams = {} else: estimate_gas_transaction = cast(TxParams, dict(**transaction)) if "data" in estimate_gas_transaction: raise Web3ValueError("Cannot set 'data' field in estimate_gas transaction") if "to" in estimate_gas_transaction: raise Web3ValueError("Cannot set to in estimate_gas transaction") if self.address: estimate_gas_transaction.setdefault("to", self.address) if self.w3.eth.default_account is not empty: estimate_gas_transaction.setdefault( "from", cast(ChecksumAddress, self.w3.eth.default_account) ) if "to" not in estimate_gas_transaction: if isinstance(self, type): raise Web3ValueError( "When using `Contract.estimate_gas` from a contract factory " "you must provide a `to` address with the transaction" ) else: raise Web3ValueError( "Please ensure that this contract instance has an address." ) return estimate_gas_transaction def _build_transaction(self, transaction: TxParams | None = None) -> TxParams: if transaction is None: built_transaction: TxParams = {} else: built_transaction = cast(TxParams, dict(**transaction)) if "data" in built_transaction: raise Web3ValueError("Cannot set 'data' field in build transaction") if not self.address and "to" not in built_transaction: raise Web3ValueError( "When using `ContractFunction.build_transaction` from a contract " "factory you must provide a `to` address with the transaction" ) if self.address and "to" in built_transaction: raise Web3ValueError( "Cannot set 'to' field in contract call build transaction" ) if self.address: built_transaction.setdefault("to", self.address) if "to" not in built_transaction: raise Web3ValueError( "Please ensure that this contract instance has an address." ) return built_transaction @combomethod def _encode_transaction_data(cls) -> HexStr: return add_0x_prefix(encode_abi(cls.w3, cls.abi, cls.arguments, cls.selector)) _return_data_normalizers: tuple[Callable[..., Any], ...] | None = tuple() def __repr__(self) -> str: if self.abi: _repr = f"<Function {abi_to_signature(self.abi)}" if self.arguments is not None: _repr += f" bound to {self.arguments!r}" return _repr + ">" return f"<Function {get_abi_element_signature(self.abi_element_identifier)}>" def __call__(self, *args: Any, **kwargs: Any) -> Self: # When a function is called, check arguments to obtain the correct function # in the contract. self will be used if all args and kwargs are # encodable to self.abi, otherwise the correct function is obtained from # the contract. if ( self.abi_element_identifier in [FallbackFn, ReceiveFn] or self.abi_element_identifier == "constructor" ): return copy_contract_function(self, *args, **kwargs) all_functions = cast( list[ABIFunction], filter_abi_by_type( "function", self.contract_abi, ), ) # Filter functions by name to obtain function signatures function_name = get_name_from_abi_element_identifier( self.abi_element_identifier ) function_abis = [ function for function in all_functions if function["name"] == function_name ] num_args = len(args) + len(kwargs) function_abis_with_arg_count = cast( list[ABIFunction], _filter_by_argument_count( num_args, function_abis, ), ) if not len(function_abis_with_arg_count): # Build an ABI without arguments to determine if one exists function_abis_with_arg_count = [ ABIFunction({"type": "function", "name": function_name}) ] function_abi_matches = [] contract_function = None for abi in function_abis_with_arg_count: try: # Search for a function ABI that matches the arguments used function_abi_matches.append( cast( ABIFunction, get_abi_element( function_abis, abi_to_signature(abi), *args, abi_codec=self.w3.codec, **kwargs, ), ) ) except MismatchedABI: # ignore exceptions continue if len(function_abi_matches) == 1: function_abi = function_abi_matches[0] if abi_to_signature(self.abi) == abi_to_signature(function_abi): contract_function = self else: # Found a match that is not self contract_function = self.__class__.factory( abi_to_signature(function_abi), w3=self.w3, contract_abi=self.contract_abi, address=self.address, abi_element_identifier=abi_to_signature(function_abi), abi=function_abi, ) else: for abi in function_abi_matches: if abi_to_signature(self.abi) == abi_to_signature(abi): contract_function = self break else: # Raise exception if multiple found raise MismatchedABI( _mismatched_abi_error_diagnosis( function_name, self.contract_abi, len(function_abi_matches), num_args, *args, abi_codec=self.w3.codec, **kwargs, ) ) return copy_contract_function(contract_function, *args, **kwargs) @classmethod def factory(cls, class_name: str, **kwargs: Any) -> Self: return PropertyCheckingFactory(class_name, (cls,), kwargs)() def call( self, transaction: TxParams | None = None, block_identifier: BlockIdentifier | None = None, state_override: StateOverride | None = None, ccip_read_enabled: bool | None = None, ) -> Any: """ Implementation of ``call`` should create a callable contract function and execute it using the `eth_call` interface. """ raise NotImplementedError( "This method should be implemented in the inherited class" )
BaseContractFunction
python
apache__airflow
airflow-core/tests/unit/always/test_project_structure.py
{ "start": 41025, "end": 41568 }
class ____: def test_no_illegal_suffixes(self): illegal_suffixes = ["_operator.py", "_hook.py", "_sensor.py"] files = itertools.chain.from_iterable( glob.glob(f"{AIRFLOW_ROOT_PATH}/{part}/providers/**/{resource_type}/*.py", recursive=True) for resource_type in ["operators", "hooks", "sensors", "example_dags"] for part in ["airflow", "tests"] ) invalid_files = [f for f in files if f.endswith(tuple(illegal_suffixes))] assert invalid_files == []
TestOperatorsHooks
python
zarr-developers__zarr-python
src/zarr/codecs/numcodecs/_codecs.py
{ "start": 8686, "end": 9356 }
class ____(_NumcodecsArrayArrayCodec, codec_name="fixedscaleoffset"): def resolve_metadata(self, chunk_spec: ArraySpec) -> ArraySpec: if astype := self.codec_config.get("astype"): dtype = parse_dtype(np.dtype(astype), zarr_format=3) # type: ignore[call-overload] return replace(chunk_spec, dtype=dtype) return chunk_spec def evolve_from_array_spec(self, array_spec: ArraySpec) -> FixedScaleOffset: if self.codec_config.get("dtype") is None: dtype = array_spec.dtype.to_native_dtype() return FixedScaleOffset(**{**self.codec_config, "dtype": str(dtype)}) return self
FixedScaleOffset
python
great-expectations__great_expectations
great_expectations/expectations/metrics/map_metric_provider/multicolumn_map_metric_provider.py
{ "start": 598, "end": 2808 }
class ____(MapMetricProvider): """Defines metrics that are evaluated for every row for a set of columns. All multi-column metrics require the domain key `column_list`. `expect_compound_columns_to_be_unique` is an example of an Expectation that uses this metric. """ # noqa: E501 # FIXME CoP condition_domain_keys: Tuple[str, ...] = ( "batch_id", "table", "column_list", "row_condition", "condition_parser", "ignore_row_if", ) function_domain_keys = ( "batch_id", "table", "column_list", "row_condition", "condition_parser", "ignore_row_if", ) condition_value_keys = tuple() function_value_keys = tuple() @classmethod @override def _get_evaluation_dependencies( cls, metric: MetricConfiguration, configuration: Optional[ExpectationConfiguration] = None, execution_engine: Optional[ExecutionEngine] = None, runtime_configuration: Optional[dict] = None, ): dependencies: dict = super()._get_evaluation_dependencies( metric=metric, configuration=configuration, execution_engine=execution_engine, runtime_configuration=runtime_configuration, ) table_domain_kwargs: dict = { k: v for k, v in metric.metric_domain_kwargs.items() if k not in ["column_list", "ignore_row_if"] } dependencies["table.column_types"] = MetricConfiguration( metric_name="table.column_types", metric_domain_kwargs=table_domain_kwargs, metric_value_kwargs={ "include_nested": True, }, ) dependencies["table.columns"] = MetricConfiguration( metric_name="table.columns", metric_domain_kwargs=table_domain_kwargs, metric_value_kwargs=None, ) dependencies["table.row_count"] = MetricConfiguration( metric_name="table.row_count", metric_domain_kwargs=table_domain_kwargs, metric_value_kwargs=None, ) return dependencies
MulticolumnMapMetricProvider
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py
{ "start": 8517, "end": 10269 }
class ____( linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest): """Most tests done in the base class LinearOperatorDerivedClassTest.""" def operator_and_matrix( self, build_info, dtype, use_placeholder, ensure_self_adjoint_and_pd=False): del ensure_self_adjoint_and_pd shape = list(build_info.shape) matrix = linear_operator_test_util.random_normal(shape, dtype=dtype) lin_op_matrix = matrix if use_placeholder: lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None) operator = linalg.LinearOperatorFullMatrix(lin_op_matrix, is_square=True) return operator, matrix def test_is_x_flags(self): matrix = [[3., 2., 1.], [1., 1., 1.]] operator = linalg.LinearOperatorFullMatrix( matrix, is_self_adjoint=False) self.assertEqual(operator.is_positive_definite, None) self.assertEqual(operator.is_non_singular, None) self.assertFalse(operator.is_self_adjoint) self.assertFalse(operator.is_square) def test_matrix_must_have_at_least_two_dims_or_raises(self): with self.assertRaisesRegex(ValueError, "at least 2 dimensions"): linalg.LinearOperatorFullMatrix([1.]) def test_tape_safe(self): matrix = variables_module.Variable([[2., 1.]]) operator = linalg.LinearOperatorFullMatrix(matrix) self.check_tape_safe(operator) if __name__ == "__main__": config.enable_tensor_float_32_execution(False) linear_operator_test_util.add_tests(SquareLinearOperatorFullMatrixTest) linear_operator_test_util.add_tests(NonSquareLinearOperatorFullMatrixTest) linear_operator_test_util.add_tests( SquareLinearOperatorFullMatrixSymmetricPositiveDefiniteTest) test.main()
NonSquareLinearOperatorFullMatrixTest
python
pandas-dev__pandas
pandas/core/indexes/multi.py
{ "start": 3251, "end": 3580 }
class ____(libindex.BaseMultiIndexCodesEngine, libindex.UInt16Engine): """Manages a MultiIndex by mapping label combinations to positive integers. The number of possible label combinations must not overflow the 16 bits integers. """ _base = libindex.UInt16Engine _codes_dtype = "uint16"
MultiIndexUInt16Engine
python
pytorch__pytorch
torch/testing/_internal/common_device_type.py
{ "start": 49837, "end": 50007 }
class ____(skipIf): def __init__(self, dep, reason): super().__init__(dep, reason, device_type="cpu") # Skips a test on CUDA if the condition is true.
skipCPUIf
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_simple05.py
{ "start": 315, "end": 2672 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("simple05.xlsx") def test_create_file(self): """Test font formatting.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.set_row(5, 18) worksheet.set_row(6, 18) format1 = workbook.add_format({"bold": 1}) format2 = workbook.add_format({"italic": 1}) format3 = workbook.add_format({"bold": 1, "italic": 1}) format4 = workbook.add_format({"underline": 1}) format5 = workbook.add_format({"font_strikeout": 1}) format6 = workbook.add_format({"font_script": 1}) format7 = workbook.add_format({"font_script": 2}) worksheet.write_string(0, 0, "Foo", format1) worksheet.write_string(1, 0, "Foo", format2) worksheet.write_string(2, 0, "Foo", format3) worksheet.write_string(3, 0, "Foo", format4) worksheet.write_string(4, 0, "Foo", format5) worksheet.write_string(5, 0, "Foo", format6) worksheet.write_string(6, 0, "Foo", format7) workbook.close() self.assertExcelEqual() def test_create_file_in_memory(self): """Test font formatting.""" workbook = Workbook(self.got_filename, {"in_memory": True}) worksheet = workbook.add_worksheet() worksheet.set_row(5, 18) worksheet.set_row(6, 18) format1 = workbook.add_format({"bold": 1}) format2 = workbook.add_format({"italic": 1}) format3 = workbook.add_format({"bold": 1, "italic": 1}) format4 = workbook.add_format({"underline": 1}) format5 = workbook.add_format({"font_strikeout": 1}) format6 = workbook.add_format({"font_script": 1}) format7 = workbook.add_format({"font_script": 2}) worksheet.write_string(0, 0, "Foo", format1) worksheet.write_string(1, 0, "Foo", format2) worksheet.write_string(2, 0, "Foo", format3) worksheet.write_string(3, 0, "Foo", format4) worksheet.write_string(4, 0, "Foo", format5) worksheet.write_string(5, 0, "Foo", format6) worksheet.write_string(6, 0, "Foo", format7) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
numpy__numpy
numpy/distutils/cpuinfo.py
{ "start": 2788, "end": 8761 }
class ____(CPUInfoBase): info = None def __init__(self): if self.info is not None: return info = [ {} ] ok, output = getoutput('uname -m') if ok: info[0]['uname_m'] = output.strip() try: fo = open('/proc/cpuinfo') except OSError as e: warnings.warn(str(e), UserWarning, stacklevel=2) else: for line in fo: name_value = [s.strip() for s in line.split(':', 1)] if len(name_value) != 2: continue name, value = name_value if not info or name in info[-1]: # next processor info.append({}) info[-1][name] = value fo.close() self.__class__.info = info def _not_impl(self): pass # Athlon def _is_AMD(self): return self.info[0]['vendor_id']=='AuthenticAMD' def _is_AthlonK6_2(self): return self._is_AMD() and self.info[0]['model'] == '2' def _is_AthlonK6_3(self): return self._is_AMD() and self.info[0]['model'] == '3' def _is_AthlonK6(self): return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None def _is_AthlonK7(self): return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None def _is_AthlonMP(self): return re.match(r'.*?Athlon\(tm\) MP\b', self.info[0]['model name']) is not None def _is_AMD64(self): return self.is_AMD() and self.info[0]['family'] == '15' def _is_Athlon64(self): return re.match(r'.*?Athlon\(tm\) 64\b', self.info[0]['model name']) is not None def _is_AthlonHX(self): return re.match(r'.*?Athlon HX\b', self.info[0]['model name']) is not None def _is_Opteron(self): return re.match(r'.*?Opteron\b', self.info[0]['model name']) is not None def _is_Hammer(self): return re.match(r'.*?Hammer\b', self.info[0]['model name']) is not None # Alpha def _is_Alpha(self): return self.info[0]['cpu']=='Alpha' def _is_EV4(self): return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' def _is_EV5(self): return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' def _is_EV56(self): return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' def _is_PCA56(self): return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' # Intel #XXX _is_i386 = _not_impl def _is_Intel(self): return self.info[0]['vendor_id']=='GenuineIntel' def _is_i486(self): return self.info[0]['cpu']=='i486' def _is_i586(self): return self.is_Intel() and self.info[0]['cpu family'] == '5' def _is_i686(self): return self.is_Intel() and self.info[0]['cpu family'] == '6' def _is_Celeron(self): return re.match(r'.*?Celeron', self.info[0]['model name']) is not None def _is_Pentium(self): return re.match(r'.*?Pentium', self.info[0]['model name']) is not None def _is_PentiumII(self): return re.match(r'.*?Pentium.*?II\b', self.info[0]['model name']) is not None def _is_PentiumPro(self): return re.match(r'.*?PentiumPro\b', self.info[0]['model name']) is not None def _is_PentiumMMX(self): return re.match(r'.*?Pentium.*?MMX\b', self.info[0]['model name']) is not None def _is_PentiumIII(self): return re.match(r'.*?Pentium.*?III\b', self.info[0]['model name']) is not None def _is_PentiumIV(self): return re.match(r'.*?Pentium.*?(IV|4)\b', self.info[0]['model name']) is not None def _is_PentiumM(self): return re.match(r'.*?Pentium.*?M\b', self.info[0]['model name']) is not None def _is_Prescott(self): return self.is_PentiumIV() and self.has_sse3() def _is_Nocona(self): return (self.is_Intel() and (self.info[0]['cpu family'] == '6' or self.info[0]['cpu family'] == '15') and (self.has_sse3() and not self.has_ssse3()) and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None) def _is_Core2(self): return (self.is_64bit() and self.is_Intel() and re.match(r'.*?Core\(TM\)2\b', self.info[0]['model name']) is not None) def _is_Itanium(self): return re.match(r'.*?Itanium\b', self.info[0]['family']) is not None def _is_XEON(self): return re.match(r'.*?XEON\b', self.info[0]['model name'], re.IGNORECASE) is not None _is_Xeon = _is_XEON # Varia def _is_singleCPU(self): return len(self.info) == 1 def _getNCPUs(self): return len(self.info) def _has_fdiv_bug(self): return self.info[0]['fdiv_bug']=='yes' def _has_f00f_bug(self): return self.info[0]['f00f_bug']=='yes' def _has_mmx(self): return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None def _has_sse(self): return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None def _has_sse2(self): return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None def _has_sse3(self): return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None def _has_ssse3(self): return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None def _has_3dnow(self): return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None def _has_3dnowext(self): return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None
LinuxCPUInfo
python
pandas-dev__pandas
pandas/io/formats/format.py
{ "start": 4972, "end": 12397 }
class ____: """ Implement the main logic of Series.to_string, which underlies Series.__repr__. """ def __init__( self, series: Series, *, length: bool | str = True, header: bool = True, index: bool = True, na_rep: str = "NaN", name: bool = False, float_format: str | None = None, dtype: bool = True, max_rows: int | None = None, min_rows: int | None = None, ) -> None: self.series = series self.buf = StringIO() self.name = name self.na_rep = na_rep self.header = header self.length = length self.index = index self.max_rows = max_rows self.min_rows = min_rows if float_format is None: float_format = get_option("display.float_format") self.float_format = float_format self.dtype = dtype self.adj = printing.get_adjustment() self._chk_truncate() def _chk_truncate(self) -> None: self.tr_row_num: int | None min_rows = self.min_rows max_rows = self.max_rows # truncation determined by max_rows, actual truncated number of rows # used below by min_rows is_truncated_vertically = max_rows and (len(self.series) > max_rows) series = self.series if is_truncated_vertically: max_rows = cast(int, max_rows) if min_rows: # if min_rows is set (not None or 0), set max_rows to minimum # of both max_rows = min(min_rows, max_rows) if max_rows == 1: row_num = max_rows series = series.iloc[:max_rows] else: row_num = max_rows // 2 _len = len(series) _slice = np.hstack( [np.arange(row_num), np.arange(_len - row_num, _len)] ) series = series.iloc[_slice] self.tr_row_num = row_num else: self.tr_row_num = None self.tr_series = series self.is_truncated_vertically = is_truncated_vertically def _get_footer(self) -> str: name = self.series.name footer = "" index = self.series.index if ( isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)) and index.freq is not None ): footer += f"Freq: {index.freqstr}" if self.name is not False and name is not None: if footer: footer += ", " series_name = printing.pprint_thing(name, escape_chars=("\t", "\r", "\n")) footer += f"Name: {series_name}" if self.length is True or ( self.length == "truncate" and self.is_truncated_vertically ): if footer: footer += ", " footer += f"Length: {len(self.series)}" if self.dtype is not False and self.dtype is not None: dtype_name = getattr(self.tr_series.dtype, "name", None) if dtype_name: if footer: footer += ", " footer += f"dtype: {printing.pprint_thing(dtype_name)}" # level infos are added to the end and in a new line, like it is done # for Categoricals if isinstance(self.tr_series.dtype, CategoricalDtype): level_info = self.tr_series._values._get_repr_footer() if footer: footer += "\n" footer += level_info return str(footer) def _get_formatted_values(self) -> list[str]: return format_array( self.tr_series._values, None, float_format=self.float_format, na_rep=self.na_rep, leading_space=self.index, ) def to_string(self) -> str: series = self.tr_series footer = self._get_footer() if len(series) == 0: return f"{type(self.series).__name__}([], {footer})" index = series.index have_header = _has_names(index) if isinstance(index, MultiIndex): fmt_index = index._format_multi(include_names=True, sparsify=None) adj = printing.get_adjustment() fmt_index = adj.adjoin(2, *fmt_index).split("\n") else: fmt_index = index._format_flat(include_name=True) fmt_values = self._get_formatted_values() if self.is_truncated_vertically: n_header_rows = 0 row_num = self.tr_row_num row_num = cast(int, row_num) width = self.adj.len(fmt_values[row_num - 1]) if width > 3: dot_str = "..." else: dot_str = ".." # Series uses mode=center because it has single value columns # DataFrame uses mode=left dot_str = self.adj.justify([dot_str], width, mode="center")[0] fmt_values.insert(row_num + n_header_rows, dot_str) fmt_index.insert(row_num + 1, "") if self.index: result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values]) else: result = self.adj.adjoin(3, fmt_values) if self.header and have_header: result = fmt_index[0] + "\n" + result if footer: result += "\n" + footer return str("".join(result)) def get_dataframe_repr_params() -> dict[str, Any]: """Get the parameters used to repr(dataFrame) calls using DataFrame.to_string. Supplying these parameters to DataFrame.to_string is equivalent to calling ``repr(DataFrame)``. This is useful if you want to adjust the repr output. Example ------- >>> import pandas as pd >>> >>> df = pd.DataFrame([[1, 2], [3, 4]]) >>> repr_params = pd.io.formats.format.get_dataframe_repr_params() >>> repr(df) == df.to_string(**repr_params) True """ from pandas.io.formats import console if get_option("display.expand_frame_repr"): line_width, _ = console.get_console_size() else: line_width = None return { "max_rows": get_option("display.max_rows"), "min_rows": get_option("display.min_rows"), "max_cols": get_option("display.max_columns"), "max_colwidth": get_option("display.max_colwidth"), "show_dimensions": get_option("display.show_dimensions"), "line_width": line_width, } def get_series_repr_params() -> dict[str, Any]: """Get the parameters used to repr(Series) calls using Series.to_string. Supplying these parameters to Series.to_string is equivalent to calling ``repr(series)``. This is useful if you want to adjust the series repr output. Example ------- >>> import pandas as pd >>> >>> ser = pd.Series([1, 2, 3, 4]) >>> repr_params = pd.io.formats.format.get_series_repr_params() >>> repr(ser) == ser.to_string(**repr_params) True """ width, height = get_terminal_size() max_rows_opt = get_option("display.max_rows") max_rows = height if max_rows_opt == 0 else max_rows_opt min_rows = height if max_rows_opt == 0 else get_option("display.min_rows") return { "name": True, "dtype": True, "min_rows": min_rows, "max_rows": max_rows, "length": get_option("display.show_dimensions"), }
SeriesFormatter
python
google__pytype
pytype/imports/typeshed.py
{ "start": 2923, "end": 4473 }
class ____(TypeshedFs): """Typeshed installation that ships with pytype.""" def get_root(self): return pytype_source_utils.get_full_path("typeshed") def _list_files(self, relpath): """Lists files recursively in a basedir relative to typeshed root.""" return pytype_source_utils.list_pytype_files( path_utils.join("typeshed", relpath) ) def list_files(self, relpath): return list(self._list_files(relpath)) def file_exists(self, relpath): try: # For a non-par pytype installation, load_text_file will either succeed, # raise FileNotFoundError, or raise IsADirectoryError. # For a par installation, load_text_file will raise FileNotFoundError for # both a nonexistent file and a directory. pytype_source_utils.load_text_file(path_utils.join("typeshed", relpath)) except FileNotFoundError: try: # For a non-par installation, we know at this point that relpath does # not exist, so _list_files will always raise NoSuchDirectory. For a par # installation, we use _list_files to check whether the directory # exists; a non-existent directory will produce an empty generator. next(self._list_files(relpath)) except (pytype_source_utils.NoSuchDirectory, StopIteration): return False except IsADirectoryError: return True return True def load_file(self, relpath) -> tuple[str, str]: filepath = self.filepath(relpath) return relpath, pytype_source_utils.load_text_file(filepath)
InternalTypeshedFs
python
ray-project__ray
python/ray/llm/_internal/batch/stages/vllm_engine_stage.py
{ "start": 1162, "end": 2017 }
class ____(BaseModel): """A request to the vLLM engine.""" # The request ID for the LLM engine (unique per replica). request_id: int # The index of the request in the batch. idx_in_batch: int # The full prompt string (with chat template applied if any). prompt: str # The images inputs for the multimodal model. Use Any to avoid importing PIL. images: List[Any] # The tokenized prompt IDs. If None, then the string prompt will be # tokenized by the LLM engine. This is not recommended for performance reasons. prompt_token_ids: Optional[List[int]] # The sampling or pooling parameters. Use Any to avoid importing vLLM. params: Any # LoRA request. lora_request: Optional[Any] = None class Config: validate_assignment = True arbitrary_types_allowed = True
vLLMEngineRequest
python
kamyu104__LeetCode-Solutions
Python/minimum-number-of-valid-strings-to-form-target-ii.py
{ "start": 3786, "end": 5088 }
class ____(object): def minValidStrings(self, words, target): """ :type words: List[str] :type target: str :rtype: int """ def getPrefix(pattern): prefix = [-1]*len(pattern) j = -1 for i in xrange(1, len(pattern)): while j+1 > 0 and pattern[j+1] != pattern[i]: j = prefix[j] if pattern[j+1] == pattern[i]: j += 1 prefix[i] = j return prefix def KMP(text, pattern, callback): prefix = getPrefix(pattern) j = -1 for i in xrange(len(text)): while j+1 > 0 and pattern[j+1] != text[i]: j = prefix[j] if pattern[j+1] == text[i]: j += 1 callback(i, j) if j+1 == len(pattern): j = prefix[j] def update(i, j): lookup[i] = max(lookup[i], j+1) lookup = [0]*len(target) for w in words: KMP(target, w, update) dp = [0]*(len(target)+1) for i in xrange(len(target)): if not lookup[i]: return -1 dp[i+1] = dp[(i-lookup[i])+1]+1 return dp[-1]
Solution3
python
sqlalchemy__sqlalchemy
test/sql/test_selectable.py
{ "start": 117458, "end": 127637 }
class ____(AssertsCompiledSQL, fixtures.TestBase): def _assert_result_keys(self, s, keys): compiled = s.compile() eq_(set(compiled._create_result_map()), set(keys)) def _assert_subq_result_keys(self, s, keys): compiled = s.subquery().select().compile() eq_(set(compiled._create_result_map()), set(keys)) def _names_overlap(self): m = MetaData() t1 = Table("t1", m, Column("x", Integer)) t2 = Table("t2", m, Column("x", Integer)) return select(t1, t2).set_label_style(LABEL_STYLE_NONE) def test_names_overlap_nolabel(self): sel = self._names_overlap() self._assert_result_keys(sel, ["x"]) self._assert_subq_result_keys(sel, ["x", "x_1"]) eq_(sel.selected_columns.keys(), ["x", "x"]) def test_names_overlap_label(self): sel = self._names_overlap().set_label_style( LABEL_STYLE_TABLENAME_PLUS_COL ) eq_(sel.selected_columns.keys(), ["t1_x", "t2_x"]) eq_(list(sel.selected_columns.keys()), ["t1_x", "t2_x"]) eq_(list(sel.subquery().c.keys()), ["t1_x", "t2_x"]) self._assert_result_keys(sel, ["t1_x", "t2_x"]) def _names_overlap_keys_dont(self): m = MetaData() t1 = Table("t1", m, Column("x", Integer, key="a")) t2 = Table("t2", m, Column("x", Integer, key="b")) return select(t1, t2).set_label_style(LABEL_STYLE_NONE) def test_names_overlap_keys_dont_nolabel(self): sel = self._names_overlap_keys_dont() eq_(sel.selected_columns.keys(), ["a", "b"]) eq_(list(sel.selected_columns.keys()), ["a", "b"]) eq_(list(sel.subquery().c.keys()), ["a", "b"]) self._assert_result_keys(sel, ["x"]) def test_names_overlap_keys_dont_label(self): sel = self._names_overlap_keys_dont().set_label_style( LABEL_STYLE_TABLENAME_PLUS_COL ) eq_(sel.selected_columns.keys(), ["t1_a", "t2_b"]) eq_(list(sel.selected_columns.keys()), ["t1_a", "t2_b"]) eq_(list(sel.subquery().c.keys()), ["t1_a", "t2_b"]) self._assert_result_keys(sel, ["t1_x", "t2_x"]) def _columns_repeated(self): m = MetaData() t1 = Table("t1", m, Column("x", Integer), Column("y", Integer)) return select(t1.c.x, t1.c.y, t1.c.x).set_label_style(LABEL_STYLE_NONE) def test_element_repeated_nolabels(self): sel = self._columns_repeated().set_label_style(LABEL_STYLE_NONE) eq_(sel.selected_columns.keys(), ["x", "y", "x"]) eq_(list(sel.selected_columns.keys()), ["x", "y", "x"]) eq_(list(sel.subquery().c.keys()), ["x", "y", "x_1"]) self._assert_result_keys(sel, ["x", "y"]) def test_element_repeated_disambiguate(self): sel = self._columns_repeated().set_label_style( LABEL_STYLE_DISAMBIGUATE_ONLY ) eq_(sel.selected_columns.keys(), ["x", "y", "x_1"]) eq_(list(sel.selected_columns.keys()), ["x", "y", "x_1"]) eq_(list(sel.subquery().c.keys()), ["x", "y", "x_1"]) self._assert_result_keys(sel, ["x", "y", "x__1"]) def test_element_repeated_labels(self): sel = self._columns_repeated().set_label_style( LABEL_STYLE_TABLENAME_PLUS_COL ) eq_(sel.selected_columns.keys(), ["t1_x", "t1_y", "t1_x_1"]) eq_(list(sel.selected_columns.keys()), ["t1_x", "t1_y", "t1_x_1"]) eq_(list(sel.subquery().c.keys()), ["t1_x", "t1_y", "t1_x_1"]) self._assert_result_keys(sel, ["t1_x__1", "t1_x", "t1_y"]) def _columns_repeated_identity(self): m = MetaData() t1 = Table("t1", m, Column("x", Integer), Column("y", Integer)) return select(t1.c.x, t1.c.y, t1.c.x, t1.c.x, t1.c.x).set_label_style( LABEL_STYLE_NONE ) def _anon_columns_repeated_identity_one(self): m = MetaData() t1 = Table("t1", m, Column("x", Integer), Column("y", Integer)) return select(t1.c.x, null(), null(), null()).set_label_style( LABEL_STYLE_NONE ) def _anon_columns_repeated_identity_two(self): fn = func.now() return select(fn, fn, fn, fn).set_label_style(LABEL_STYLE_NONE) def test_columns_repeated_identity_disambiguate(self): """test #7153""" sel = self._columns_repeated_identity().set_label_style( LABEL_STYLE_DISAMBIGUATE_ONLY ) self.assert_compile( sel, "SELECT t1.x, t1.y, t1.x AS x__1, t1.x AS x__2, " "t1.x AS x__3 FROM t1", ) def test_columns_repeated_identity_subquery_disambiguate(self): """test #7153""" sel = self._columns_repeated_identity() stmt = select(sel.subquery()).set_label_style( LABEL_STYLE_DISAMBIGUATE_ONLY ) # databases like MySQL won't allow the subquery to have repeated labels # even if we don't try to access them self.assert_compile( stmt, "SELECT anon_1.x, anon_1.y, anon_1.x AS x_1, anon_1.x AS x_2, " "anon_1.x AS x_3 FROM " "(SELECT t1.x AS x, t1.y AS y, t1.x AS x__1, t1.x AS x__2, " "t1.x AS x__3 FROM t1) AS anon_1", ) def _labels_overlap(self): m = MetaData() t1 = Table("t", m, Column("x_id", Integer)) t2 = Table("t_x", m, Column("id", Integer)) return select(t1, t2) def test_labels_overlap_nolabel(self): sel = self._labels_overlap() eq_(sel.selected_columns.keys(), ["x_id", "id"]) eq_(list(sel.selected_columns.keys()), ["x_id", "id"]) eq_(list(sel.subquery().c.keys()), ["x_id", "id"]) self._assert_result_keys(sel, ["x_id", "id"]) def test_labels_overlap_label(self): sel = self._labels_overlap().set_label_style( LABEL_STYLE_TABLENAME_PLUS_COL ) eq_( list(sel.selected_columns.keys()), ["t_x_id", "t_x_id_1"], ) eq_( list(sel.subquery().c.keys()), ["t_x_id", "t_x_id_1"], # ["t_x_id", "t_x_id"] # if we turn off deduping entirely, ) self._assert_result_keys(sel, ["t_x_id", "t_x_id_1"]) self._assert_subq_result_keys(sel, ["t_x_id", "t_x_id_1"]) def _labels_overlap_keylabels_dont(self): m = MetaData() t1 = Table("t", m, Column("x_id", Integer, key="a")) t2 = Table("t_x", m, Column("id", Integer, key="b")) return select(t1, t2) def test_labels_overlap_keylabels_dont_nolabel(self): sel = self._labels_overlap_keylabels_dont() eq_(list(sel.selected_columns.keys()), ["a", "b"]) eq_(list(sel.subquery().c.keys()), ["a", "b"]) self._assert_result_keys(sel, ["x_id", "id"]) def test_labels_overlap_keylabels_dont_label(self): sel = self._labels_overlap_keylabels_dont().set_label_style( LABEL_STYLE_TABLENAME_PLUS_COL ) eq_(list(sel.selected_columns.keys()), ["t_a", "t_x_b"]) eq_(list(sel.subquery().c.keys()), ["t_a", "t_x_b"]) self._assert_result_keys(sel, ["t_x_id", "t_x_id_1"]) def _keylabels_overlap_labels_dont(self): m = MetaData() t1 = Table("t", m, Column("a", Integer, key="x_id")) t2 = Table("t_x", m, Column("b", Integer, key="id")) return select(t1, t2) def test_keylabels_overlap_labels_dont_nolabel(self): sel = self._keylabels_overlap_labels_dont() eq_(list(sel.selected_columns.keys()), ["x_id", "id"]) eq_(list(sel.subquery().c.keys()), ["x_id", "id"]) self._assert_result_keys(sel, ["a", "b"]) def test_keylabels_overlap_labels_dont_label(self): sel = self._keylabels_overlap_labels_dont().set_label_style( LABEL_STYLE_TABLENAME_PLUS_COL ) eq_( list(sel.selected_columns.keys()), ["t_x_id", "t_x_id_1"], ) eq_( list(sel.subquery().c.keys()), ["t_x_id", "t_x_id_1"], ) self._assert_result_keys(sel, ["t_a", "t_x_b"]) self._assert_subq_result_keys(sel, ["t_a", "t_x_b"]) def _keylabels_overlap_labels_overlap(self): m = MetaData() t1 = Table("t", m, Column("x_id", Integer, key="x_a")) t2 = Table("t_x", m, Column("id", Integer, key="a")) return select(t1, t2) def test_keylabels_overlap_labels_overlap_nolabel(self): sel = self._keylabels_overlap_labels_overlap() eq_(list(sel.selected_columns.keys()), ["x_a", "a"]) eq_(list(sel.subquery().c.keys()), ["x_a", "a"]) self._assert_result_keys(sel, ["x_id", "id"]) self._assert_subq_result_keys(sel, ["x_id", "id"]) def test_keylabels_overlap_labels_overlap_label(self): sel = self._keylabels_overlap_labels_overlap().set_label_style( LABEL_STYLE_TABLENAME_PLUS_COL ) eq_( list(sel.selected_columns.keys()), ["t_x_a", "t_x_a_1"], ) # deduping for different cols but same label eq_(list(sel.subquery().c.keys()), ["t_x_a", "t_x_a_1"]) # if we turn off deduping entirely # eq_(list(sel.subquery().c.keys()), ["t_x_a", "t_x_a"]) self._assert_result_keys(sel, ["t_x_id", "t_x_id_1"]) self._assert_subq_result_keys(sel, ["t_x_id", "t_x_id_1"]) def _keys_overlap_names_dont(self): m = MetaData() t1 = Table("t1", m, Column("a", Integer, key="x")) t2 = Table("t2", m, Column("b", Integer, key="x")) return select(t1, t2) def test_keys_overlap_names_dont_nolabel(self): sel = self._keys_overlap_names_dont() eq_(sel.selected_columns.keys(), ["x", "x_1"]) self._assert_result_keys(sel, ["a", "b"]) def test_keys_overlap_names_dont_label(self): sel = self._keys_overlap_names_dont().set_label_style( LABEL_STYLE_TABLENAME_PLUS_COL ) eq_(list(sel.selected_columns.keys()), ["t1_x", "t2_x"]) eq_(list(sel.subquery().c.keys()), ["t1_x", "t2_x"]) self._assert_result_keys(sel, ["t1_a", "t2_b"])
WithLabelsTest
python
sqlalchemy__sqlalchemy
test/typing/plain_files/orm/write_only.py
{ "start": 683, "end": 1561 }
class ____(Base): __tablename__ = "user" id: Mapped[int] = mapped_column(primary_key=True) addresses: WriteOnlyMapped[Address] = relationship() with Session() as session: u = User() session.add(u) session.commit() if typing.TYPE_CHECKING: assert_type(u.addresses, WriteOnlyCollection[Address]) address = session.scalars( u.addresses.select().filter(Address.email_address.like("xyz")) ).one() if typing.TYPE_CHECKING: assert_type(address, Address) u.addresses.add(Address()) u.addresses.add_all([Address(), Address()]) # this should emit an error, because __iter__ is NoReturn, # however typing tools don't pick up on that right now current_addresses = list(u.addresses) u.addresses.add(Address()) session.commit() # test #9985 stmt = select(User).join(User.addresses)
User
python
pypa__warehouse
tests/unit/test_search.py
{ "start": 383, "end": 10075 }
class ____: def test_no_terms(self): opensearch = Search() query = queries.get_opensearch_query(opensearch, "", "", []) assert query.to_dict() == {"query": {"match_all": {}}} @pytest.mark.parametrize( ("terms", "expected_prefix", "expected_type"), [ ('"foo bar"', '"foo bar"', "phrase"), ('"a"', '"a"', "phrase"), ("foo bar", "foo bar", "best_fields"), ], ) def test_quoted_query(self, terms, expected_prefix, expected_type): opensearch = Search() query = queries.get_opensearch_query(opensearch, terms, "", []) assert query.to_dict() == { "query": { "bool": { "should": [ { "bool": { "must": [ { "multi_match": { "fields": EXPECTED_SEARCH_FIELDS, "query": ( "foo bar" if terms != '"a"' else "a" ), "type": expected_type, } }, ] } }, {"prefix": {"normalized_name": expected_prefix}}, ] } }, "suggest": {"name_suggestion": {"text": terms, "term": {"field": "name"}}}, } def test_single_not_quoted_character(self): opensearch = Search() terms = "a" query = queries.get_opensearch_query(opensearch, terms, "", []) assert query.to_dict() == { "query": { "bool": { "must": [ { "multi_match": { "fields": EXPECTED_SEARCH_FIELDS, "query": "a", "type": "best_fields", } }, ] } }, "suggest": {"name_suggestion": {"text": "a", "term": {"field": "name"}}}, } def test_mixed_quoted_query(self): opensearch = Search() terms = '"foo bar" baz' query = queries.get_opensearch_query(opensearch, terms, "", []) assert query.to_dict() == { "query": { "bool": { "should": [ { "bool": { "must": [ { "multi_match": { "fields": EXPECTED_SEARCH_FIELDS, "query": "foo bar", "type": "phrase", } }, { "multi_match": { "fields": EXPECTED_SEARCH_FIELDS, "query": "baz", "type": "best_fields", } }, ] } }, {"prefix": {"normalized_name": '"foo bar" baz'}}, ] } }, "suggest": { "name_suggestion": {"text": '"foo bar" baz', "term": {"field": "name"}} }, } @pytest.mark.parametrize(("order", "field"), [("created", "created")]) def test_sort_order(self, order, field): opensearch = Search() terms = "foo bar" query = queries.get_opensearch_query(opensearch, terms, order, []) assert query.to_dict() == { "query": { "bool": { "should": [ { "bool": { "must": [ { "multi_match": { "fields": EXPECTED_SEARCH_FIELDS, "query": terms, "type": "best_fields", } }, ] } }, {"prefix": {"normalized_name": terms}}, ] } }, "suggest": {"name_suggestion": {"text": terms, "term": {"field": "name"}}}, "sort": [ { field: { "order": "desc" if order.startswith("-") else "asc", "unmapped_type": "long", } } ], } def test_with_classifiers_with_terms(self): opensearch = Search() terms = "foo bar" classifiers = ["foo :: bar", "fiz :: buz"] query = queries.get_opensearch_query(opensearch, terms, "", classifiers) assert query.to_dict() == { "query": { "bool": { "should": [ { "bool": { "must": [ { "multi_match": { "fields": EXPECTED_SEARCH_FIELDS, "query": terms, "type": "best_fields", } }, { "bool": { "must": [ { "bool": { "should": [ { "term": { "classifiers": classifier # noqa } }, { "prefix": { "classifiers": classifier # noqa + " :: " } }, ] } } for classifier in classifiers ] } }, ] } }, {"prefix": {"normalized_name": terms}}, ] } }, "suggest": {"name_suggestion": {"text": terms, "term": {"field": "name"}}}, } def test_with_classifiers_with_no_terms(self): opensearch = Search() terms = "" classifiers = ["foo :: bar", "fiz :: buz"] query = queries.get_opensearch_query(opensearch, terms, "", classifiers) assert query.to_dict() == { "query": { "bool": { "must": [ { "bool": { "should": [ {"term": {"classifiers": classifier}}, {"prefix": {"classifiers": classifier + " :: "}}, ] } } for classifier in classifiers ] } } } def test_with_classifier_with_no_terms_and_order(self): opensearch = Search() terms = "" classifiers = ["foo :: bar"] query = queries.get_opensearch_query(opensearch, terms, "-created", classifiers) assert query.to_dict() == { "query": { "bool": { "must": [ { "bool": { "should": [ {"term": {"classifiers": "foo :: bar"}}, {"prefix": {"classifiers": "foo :: bar :: "}}, ] } } ] } }, "sort": [{"created": {"order": "desc", "unmapped_type": "long"}}], }
TestQueries
python
ray-project__ray
rllib/examples/envs/classes/look_and_push.py
{ "start": 1466, "end": 2121 }
class ____(gym.Wrapper): def __init__(self, env): super(OneHot, self).__init__(env) self.observation_space = gym.spaces.Box(0.0, 1.0, (env.observation_space.n,)) def reset(self, *, seed=None, options=None): obs, info = self.env.reset(seed=seed, options=options) return self._encode_obs(obs), info def step(self, action): obs, reward, terminated, truncated, info = self.env.step(action) return self._encode_obs(obs), reward, terminated, truncated, info def _encode_obs(self, obs): new_obs = np.ones(self.env.observation_space.n) new_obs[obs] = 1.0 return new_obs
OneHot
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/view_dir/package.py
{ "start": 228, "end": 626 }
class ____(Package): """Installs a <prefix>/bin/x where x is a dir, in contrast to view-file.""" has_code = False version("0.1.0") def install(self, spec, prefix): os.mkdir(os.path.join(prefix, "bin")) os.mkdir(os.path.join(prefix, "bin", "x")) with open(os.path.join(prefix, "bin", "x", "file_in_dir"), "wb") as f: f.write(b"hello world")
ViewDir
python
kamyu104__LeetCode-Solutions
Python/find-mode-in-binary-search-tree.py
{ "start": 29, "end": 905 }
class ____(object): def findMode(self, root): """ :type root: TreeNode :rtype: List[int] """ def inorder(root, prev, cnt, max_cnt, result): if not root: return prev, cnt, max_cnt prev, cnt, max_cnt = inorder(root.left, prev, cnt, max_cnt, result) if prev: if root.val == prev.val: cnt += 1 else: cnt = 1 if cnt > max_cnt: max_cnt = cnt del result[:] result.append(root.val) elif cnt == max_cnt: result.append(root.val) return inorder(root.right, root, cnt, max_cnt, result) if not root: return [] result = [] inorder(root, None, 1, 0, result) return result
Solution
python
ansible__ansible
lib/ansible/module_utils/yumdnf.py
{ "start": 2472, "end": 6854 }
class ____(metaclass=ABCMeta): """ Abstract class that handles the population of instance variables that should be identical between both YUM and DNF modules because of the feature parity and shared argument spec """ def __init__(self, module): self.module = module self.allow_downgrade = self.module.params['allow_downgrade'] self.allowerasing = self.module.params['allowerasing'] self.autoremove = self.module.params['autoremove'] self.best = self.module.params['best'] self.bugfix = self.module.params['bugfix'] self.cacheonly = self.module.params['cacheonly'] self.conf_file = self.module.params['conf_file'] self.disable_excludes = self.module.params['disable_excludes'] self.disable_gpg_check = self.module.params['disable_gpg_check'] self.disable_plugin = self.module.params['disable_plugin'] self.disablerepo = self.module.params.get('disablerepo', []) self.download_only = self.module.params['download_only'] self.download_dir = self.module.params['download_dir'] self.enable_plugin = self.module.params['enable_plugin'] self.enablerepo = self.module.params.get('enablerepo', []) self.exclude = self.module.params['exclude'] self.installroot = self.module.params['installroot'] self.install_weak_deps = self.module.params['install_weak_deps'] self.list = self.module.params['list'] self.names = [p.strip() for p in self.module.params['name']] self.nobest = self.module.params['nobest'] self.releasever = self.module.params['releasever'] self.security = self.module.params['security'] self.skip_broken = self.module.params['skip_broken'] self.state = self.module.params['state'] self.update_only = self.module.params['update_only'] self.update_cache = self.module.params['update_cache'] self.validate_certs = self.module.params['validate_certs'] self.sslverify = self.module.params['sslverify'] self.lock_timeout = self.module.params['lock_timeout'] # It's possible someone passed a comma separated string since it used # to be a string type, so we should handle that self.names = self.listify_comma_sep_strings_in_list(self.names) self.disablerepo = self.listify_comma_sep_strings_in_list(self.disablerepo) self.enablerepo = self.listify_comma_sep_strings_in_list(self.enablerepo) self.exclude = self.listify_comma_sep_strings_in_list(self.exclude) # Fail if someone passed a space separated string # https://github.com/ansible/ansible/issues/46301 for name in self.names: if ' ' in name and not any(spec in name for spec in ['@', '>', '<', '=']): module.fail_json( msg='It appears that a space separated string of packages was passed in ' 'as an argument. To operate on several packages, pass a comma separated ' 'string of packages or a list of packages.' ) # Sanity checking for autoremove if self.state is None: if self.autoremove: self.state = "absent" else: self.state = "present" if self.autoremove and (self.state != "absent"): self.module.fail_json( msg="Autoremove should be used alone or with state=absent", results=[], ) def listify_comma_sep_strings_in_list(self, some_list): """ method to accept a list of strings as the parameter, find any strings in that list that are comma separated, remove them from the list and add their comma separated elements to the original list """ new_list = [] remove_from_original_list = [] for element in some_list: if ',' in element: remove_from_original_list.append(element) new_list.extend([e.strip() for e in element.split(',')]) for element in remove_from_original_list: some_list.remove(element) some_list.extend(new_list) if some_list == [""]: return [] return some_list @abstractmethod def run(self): raise NotImplementedError
YumDnf
python
spack__spack
lib/spack/spack/bootstrap/environment.py
{ "start": 569, "end": 5576 }
class ____(spack.environment.Environment): """Environment to install dependencies of Spack for a given interpreter and architecture""" def __init__(self) -> None: if not self.spack_yaml().exists(): self._write_spack_yaml_file() super().__init__(self.environment_root()) # Remove python package roots created before python-venv was introduced for s in self.concrete_roots(): if "python" in s.package.extendees and not s.dependencies("python-venv"): self.deconcretize(s) @classmethod def spack_dev_requirements(cls) -> List[str]: """Spack development requirements""" return [ isort_root_spec(), mypy_root_spec(), black_root_spec(), flake8_root_spec(), pytest_root_spec(), ] @classmethod def environment_root(cls) -> pathlib.Path: """Environment root directory""" bootstrap_root_path = root_path() python_part = spec_for_current_python().replace("@", "") arch_part = spack.vendor.archspec.cpu.host().family interpreter_part = hashlib.md5(sys.exec_prefix.encode()).hexdigest()[:5] environment_dir = f"{python_part}-{arch_part}-{interpreter_part}" return pathlib.Path( spack.util.path.canonicalize_path( os.path.join(bootstrap_root_path, "environments", environment_dir) ) ) @classmethod def view_root(cls) -> pathlib.Path: """Location of the view""" return cls.environment_root().joinpath("view") @classmethod def bin_dir(cls) -> pathlib.Path: """Paths to be added to PATH""" return cls.view_root().joinpath("bin") def python_dirs(self) -> Iterable[pathlib.Path]: python = next(s for s in self.all_specs_generator() if s.name == "python-venv").package return {self.view_root().joinpath(p) for p in (python.platlib, python.purelib)} @classmethod def spack_yaml(cls) -> pathlib.Path: """Environment spack.yaml file""" return cls.environment_root().joinpath("spack.yaml") def update_installations(self) -> None: """Update the installations of this environment.""" log_enabled = tty.is_debug() or tty.is_verbose() with tty.SuppressOutput(msg_enabled=log_enabled, warn_enabled=log_enabled): specs = self.concretize() if specs: colorized_specs = [ spack.spec.Spec(x).cformat("{name}{@version}") for x in self.spack_dev_requirements() ] tty.msg(f"[BOOTSTRAPPING] Installing dependencies ({', '.join(colorized_specs)})") self.write(regenerate=False) with tty.SuppressOutput(msg_enabled=log_enabled, warn_enabled=log_enabled): self.install_all(fail_fast=True) self.write(regenerate=True) def load(self) -> None: """Update PATH and sys.path.""" # Make executables available (shouldn't need PYTHONPATH) os.environ["PATH"] = f"{self.bin_dir()}{os.pathsep}{os.environ.get('PATH', '')}" # Spack itself imports pytest sys.path.extend(str(p) for p in self.python_dirs()) def _write_spack_yaml_file(self) -> None: tty.msg( "[BOOTSTRAPPING] Spack has missing dependencies, creating a bootstrapping environment" ) env = spack.tengine.make_environment() template = env.get_template("bootstrap/spack.yaml") context = { "python_spec": f"{spec_for_current_python()}+ctypes", "python_prefix": sys.exec_prefix, "architecture": spack.vendor.archspec.cpu.host().family, "environment_path": self.environment_root(), "environment_specs": self.spack_dev_requirements(), "store_path": store_path(), } self.environment_root().mkdir(parents=True, exist_ok=True) self.spack_yaml().write_text(template.render(context), encoding="utf-8") def isort_root_spec() -> str: """Return the root spec used to bootstrap isort""" return _root_spec("py-isort@5") def mypy_root_spec() -> str: """Return the root spec used to bootstrap mypy""" return _root_spec("py-mypy@0.900: ^py-mypy-extensions@:1.0") def black_root_spec() -> str: """Return the root spec used to bootstrap black""" return _root_spec("py-black@:25.1.0") def flake8_root_spec() -> str: """Return the root spec used to bootstrap flake8""" return _root_spec("py-flake8@3.8.2:") def pytest_root_spec() -> str: """Return the root spec used to bootstrap flake8""" return _root_spec("py-pytest@6.2.4:") def ensure_environment_dependencies() -> None: """Ensure Spack dependencies from the bootstrap environment are installed and ready to use""" _add_externals_if_missing() with BootstrapEnvironment() as env: env.update_installations() env.load()
BootstrapEnvironment
python
joke2k__faker
faker/providers/address/hy_AM/__init__.py
{ "start": 74, "end": 15661 }
class ____(AddressProvider): city_formats = ("{{first_name}}",) city_prefixes = ("ք.",) city_suffixes = ("",) street_prefixes = ("փողոց", "պողոտա") street_suffixes = ("",) village_prefixes = ("գ.",) address_formats = ( "{{city_prefix}} {{city}}, {{street_name}} {{building_number}}", "{{city_prefix}} {{city}}, {{street_name}} {{building_number}}, {{secondary_address}}", "{{city_prefix}} {{city}}, {{postcode}}, {{street_name}} {{building_number}}", "{{city_prefix}} {{city}}, {{postcode}}, {{street_name}} {{building_number}}, {{secondary_address}}", "{{village_prefix}} {{village}}, {{state}}ի մարզ, {{postcode}}, {{street_name}} {{building_number}}", ) building_number_formats = ("#", "##", "###") postcode_formats = ("0###", "1###", "2###", "3###", "4###") secondary_address_formats = ("բն. #", "բն. ##", "բն. ##") street_address_formats = ("{{street_name}} {{building_number}}",) street_name_formats = ("{{street}}",) # Source: List of cities and towns in Armenia (Wikipedia) # https://en.wikipedia.org/wiki/List_of_cities_and_towns_in_Armenia cities = ( "Աբովյան", "Ագարակ", "Ալավերդի", "Ախթալա", "Այրում", "Աշտարակ", "Ապարան", "Արարատ", "Արթիկ", "Արմավիր", "Արտաշատ", "Բերդ", "Բյուրեղավան", "Գավառ", "Գյումրի", "Գորիս", "Դաստակերտ", "Դիլիջան", "Եղեգնաձոր", "Եղվարդ", "Երևան", "Վաղարշապատ", "Թալին", "Թումանյան", "Իջևան", "Ծաղկաձոր", "Կապան", "Հրազդան", "Ճամբարակ", "Մասիս", "Մարալիկ", "Մարտունի", "Մեծամոր", "Մեղրի", "Նոր Հաճն", "Նոյեմբերյան", "Շամլուղ", "Չարենցավան", "Ջերմուկ", "Սիսիան", "Սպիտակ", "Ստեփանավան", "Սևան", "Վայք", "Վանաձոր", "Վարդենիս", "Վեդի", "Տաշիր", "Քաջարան", ) # Source: Wikipedia's list of sovereign states # https://en.wikipedia.org/wiki/List_of_sovereign_states countries = ( "Աֆղանստան", "Ալբանիա", "Ալժիր", "Ամերիկյան Սամոա", "Անդորրա", "Անգոլա", "Անգիլիա", "Անտարկտիկա", "Անտիգուա և Բարբուդա", "Արգենտինա", "Հայաստան", "Արուբա", "Ավստրալիա", "Ավստրիա", "Ադրբեջան", "Բահամներ", "Բահրեյն", "Բանգլադեշ", "Բարբադոս", "Բելառուս", "Բելգիա", "Բելիզ", "Բենին", "Բերմուդա", "Բութան", "Բոլիվիա", "Բոսնիա և Հերցեգովինա", "Բոտսվանա", "Բրազիլիա", "Բրունեյ Դարուսսալամ", "Բուլղարիա", "Բուրկինա Ֆասո", "Բուրունդի", "Կամբոջա", "Կամերուն", "Կանադա", "Կաբո Վերդե", "Կայման Կղզիներ", "Կենտրոնական Աֆրիկյան Հանրապետություն", "Չադ", "Չիլի", "Չինաստան", "Սուրբ Ծննդյան Կղզի", "Կոկոս Կղզիներ", "Կոլումբիա", "Կոմորյան Կղզիներ", "Կոնգո", "Կուկի Կղզիներ", "Կոստա Ռիկա", "Կոտ դ'Իվուար", "Խորվաթիա", "Կուբա", "Կիպրոս", "Չեխիայի Հանրապետություն", "Դանիա", "Ջիբութի", "Դոմինիկա", "Դոմինիկյան Հանրապետություն", "Էկվադոր", "Եգիպտոս", "Սալվադոր", "Հասարակածային Գվինեա", "Էրիտրեա", "Էստոնիա", "Եթովպիա", "Ֆարերյան Կղզիներ", "Ֆոլկլենդյան Կղզիներ", "Ֆիջի", "Ֆինլանդիա", "Ֆրանսիա", "Ֆրանսիական Գվիանա", "Ֆրանսիական Պոլինեզիա", "Ֆրանսիական Հարավային Տարածքներ", "Գաբոն", "Գամբիա", "Վրաստան", "Գերմանիա", "Գանա", "Ջիբրալթար", "Հունաստան", "Գրենլանդիա", "Գրենադա", "Գվադելուպա", "Գուամ", "Գվատեմալա", "Գերնսի", "Գվինեա", "Գվինեա Բիսաու", "Գայանա", "Հաիթի", "Վատիկան", "Հոնդուրաս", "Հոնգ Կոնգ", "Հունգարիա", "Իսլանդիա", "Հնդկաստան", "Ինդոնեզիա", "Իրան", "Իրաք", "Իռլանիա", "Իսրայել", "Իտալիա", "Ջամայկա", "Ճապոնիա", "Հորդանան", "Ղազախստան", "Քենիա", "Կիրիբատի", "Հյուսիսային Կորեա", "Հարավային Կորեա", "Կոսովո", "Քուվեյթ", "Ղրղզստան", "Լաոս", "Լատվիա", "Լիբանան", "Լեսոտո", "Լիբերիա", "Լիբիական Արաբական Ջամահիրիա", "Լիխտենշտեյն", "Լիտվա", "Լյուքսեմբուրգ", "Մակաո", "Հյուսիսային Մակեդոնիա", "Մադագասկար", "Մալավի", "Մալազիա", "Մալդիվներ", "Մալի", "Մալթա", "Մարշալյան Կղզիներ", "Մարտինիկ", "Մավրիտանիա", "Մավրիկիոս", "Մայոտտե", "Մեքսիկա", "Միկրոնեզիա", "Մոլդովա", "Մոնակո", "Մոնղոլիա", "Չեռնոգորիա", "Մոնսերատ", "Մարոկկո", "Մոզամբիկ", "Մյանմա", "Նամիբիա", "Նաուրու", "Նեպալ", "Նիդեռլանդական Անտիլներ", "Նիդերլանդներ", "Նոր Կալեդոնիա", "Նոր Զելանդիա", "Նիկարագուա", "Նիգեր", "Նիգերիա", "Նիուե", "Նորֆոլկ Կղզի", "Հյուսիսային Մարիանյան Կղզիներ", "Նորվեգիա", "Օման", "Պակիստան", "Պալաու", "Պաղեստին", "Պանամա", "Պապուա Նոր Գվինեա", "Պարագվայ", "Պերու", "Ֆիլիպիններ", "Պիտկիրնյան Կղզիներ", "Լեհաստան", "Պորտուգալիա", "Պուերտո Ռիկո", "Կատար", "Ռումինիա", "Ռուսաստանի Դաշնություն", "Ռուանդա", "Սուրբ Բարդուղիմեոս", "Սուրբ Հելենա", "Սենտ Կիտս և Նևիս", "Սուրբ Լուչիա", "Սուրբ Մարտին", "Սեն Պիեռ և Միկելոն", "Սենթ Վինսենթ և Գրենադիններ", "Սամոա", "Սան Մարինո", "Սաուդյան Արաբիա", "Սենեգալ", "Սերբիա", "Սեյշելներ", "Սիերա Լեոնե", "Սինգապուր", "Սլովակիա", "Սլովենիա", "Սողոմոնյան Կղզիներ", "Սոմալի", "Հարավային Աֆրիկա", "Իսպանիա", "Շրի Լանկա", "Սուդան", "Սուրինամ", "Սվալբարդ և Յան Մայենյան Կղզիներ", "Սվազիլենդ", "Շվեդիա", "Շվեյցարիա", "Սիրիայի Արաբական Հանրապետություն", "Թայվան", "Տաջիկստան", "Տանզանիա", "Թաիլանդ", "Տոգո", "Տոկելաու", "Տոնգա", "Տրինիդադ և Տոբագո", "Թունիս", "Թուրքիա", "Թուրքմենստան", "Տուվալու", "Ուգանդա", "Ուկրաինա", "Արաբական Միացյալ Էմիրություններ", "Մեծ Բրիտանիա", "Ամերիկայի Միացյալ Նահանգներ", "Ուրուգվայ", "Ուզբեկստան", "Վենեսուելա", "Վիետնամ", "Ուոլիս և Ֆուտունա", "Արևմտյան Սահարա", "Եմեն", "Զամբիա", "Զիմբաբվե", ) # Source: Administrative divisions of Armenia (Wikipedia) # https://en.wikipedia.org/wiki/Administrative_divisions_of_Armenia states = ( "Արագածոտն", "Արարատ", "Արմավիր", "Գեղարքունիք", "Լոռի", "Կոտայք", "Շիրակ", "Սյունիք", "Տավուշ", "Վայոց Ձոր", ) states_abbr = ( "ԱԳ", "ԱՐ", "ԱՄ", "ԳՂ", "ԼՌ", "ԿՏ", "ՇԿ", "ՍՅ", "ՎՁ", "ՏՎ", ) # Source: Postal codes in Armenia (Wikipedia) # https://en.wikipedia.org/wiki/Postal_codes_in_Armenia states_postcode = { "ԱԳ": (200, 599), "ԱՐ": (600, 899), "ԱՄ": (900, 1199), "ԳՂ": (1200, 1699), "ԼՌ": (1700, 2199), "ԿՏ": (2200, 2599), "ՇԿ": (2600, 3199), "ՍՅ": (3200, 3599), "ՎՁ": (3600, 3899), "ՏՎ": (3900, 4299), } streets = ( "Ազատության", "Արշակունյաց", "Արցախի", "Գայի", "Ծովակալ Իսակովի", "Կոմիտասի", "Հյուսիսային", "Մաշտոցի", "Մարշալ Բաղրամյան", "Մյասնիկյան", "Սայաթ-Նովայի", "Տիգրան Մեծի", "Աբելյան", "Աբովյան", "Ագաթանգեղոսի", "Ազատամարտիկների", "Աթենքի", "Աթոյան", "Ալեք Մանուկյան", "Ալիխանյան", "Աղայան", "Աղյուսագործների", "Ամիրյան", "Այասի", "Անտառային", "Անրի Վեռնոյի", "Ավագ Պետրոսյան", "Արամ Խաչատրյան", "Արամի", "Արգիշտիի", "Արմենակյան", "Բայրոնի", "Բարձրաբերդի", "Բելինսկու", "Բեյրութի", "Բուդապեշտի", "Բուռնազյան", "Բրյուսովի", "Գալոյան Եղբայրների", "Գարեգին Նժդեհի", "Գետառի", "Գլինկայի", "Գյուլբենկյան", "Գրիգոր Լուսավորչի", "Գրիգոր Հարությունյան", "Գրիգոր Տեր-Գրիգորյան", "Գևորգ Էմինի", "Գևորգ Հովսեփյան", "Գևորգ Քոչարի", "Դեղատան", "Դերենիկ Դեմիրճյան", "Եզնիկ Կողբացու", "Եկմալյան", "Երվանդ Քոչարի", "Զավարյան", "Զարոբյան", "Զաքյան", "Էրեբունու", "Թաիրովի", "Թամանյան", "Թորամանյան", "Թումանյան", "Իսահակյան", "Իսրայելյան", "Իտալիայի", "Լամբրոնի", "Լենինգրադյան", "Լեոյի", "Լեոնիդ Ազգալդյան", "Լեռ Կամսարի", "Լիսինյան", "Լոմոնոսովի", "Լոռիս-Մելիքովի", "Լուսինյանց", "Խանզադյան", "Խանջյան", "Ծատուրյան", "Ծխախոտագործների", "Կալենցի", "Կասյան", "Կարեն Դեմիրճյան", "Կիևյան", "Կոնդի", "Կորի", "Կորյունի", "Կուստոյի", "Կռիլովի", "Հալաբյան", "Հակոբ Հակոբյան", "Հայրիկ Մուրադյան", "Հանրապետության", "Հերացու", "Հին Երևանցու", "Հնդկաստանի", "Հովհաննես Կոզեռնի", "Հրանտ Շահինյան", "Հրաչյա Քոչարի", "Ձորափի", "Ղազար Փարպեցու", "Մայիսյան", "Մարկ Գրիգորյան", "Մարտի 8-ի", "Մելիք-Ադամյան", "Միչուրինի", "Մհեր Մկրտչյան", "Մոնթե Մելքոնյան", "Մոսկովյան", "Մովսես Խորենացու", "Մուրացանի", "Նալբանդյան", "Նար-Դոսի", "Նորքի", "Շարա Տալյան", "Շարիմանյան", "Շուկայի", "Ոսկերիչների", "Չայկովսկու", "Չարենցի", "Չեռնիշևսկու", "Պարոնյան", "Պետրոս Ադամյան", "Պուշկինի", "Պռոշյան", "Պրահայի", "Ռոստոմի", "Ռոստովյան", "Ռուսթավելու", "Սասունցի Դավթի", "Սարալանջի", "Սարմենի", "Սարյան", "Սեբաստիայի", "Սերգեյ Փարաջանովի", "Սիլվա Կապուտիկյան", "Սիմեոն Երևանցու", "Սիսվանի", "Սոսեի", "Սուվորովի", "Սուրբ Հովհաննեսի", "Սպենդիարյան", "Ստեփան Զորյան", "Սևանի", "Վազգեն Սարգսյան", "Վահրամ Փափազյան", "Վաղարշյան", "Վարդան Աճեմյան", "Վարդանանց", "Վերֆելի", "Վրացյան", "Տարսոնի", "Տերյան", "Տոլստոյի", "Տպագրիչների", "Ցախի", "Փավստոս Բուզանդի", "Քաջազնունու", "Քոչինյան", "Քրիստափորի", "Օստրովսկու", "Օրբելի Եղբայրների", "Ֆիզկուլտուրնիկների", "Ֆիրդուսու", "Ֆրիկի", ) # Source: Villages in Armenia (Wikipedia) # http://www.armeniapedia.org/wiki/Armenian_Towns_and_Villages villages = ( "Ագարակ", "Անտառուտ", "Բերքառատ", "Գեղաձոր", "Գետափ", "Զովասար", "Լեռնապար", "Լուսագյուղ", "Կաթնաղբյուր", "Կաքավաձոր", "Հացաշեն", "Նորաշեն", "Շենավան", "Ոսկեվազ", "Ցամաքասար", "Այգեզարդ", "Բարձրաշեն", "Բերքանուշ", "Լանջանիստ", "Լուսաշող", "Ջրաշեն", "Քաղցրաշեն", "Այգեկ", "Առատաշեն", "Բամբակաշատ", "Գեղակերտ", "Լեռնամերձ", "Ծաղկալանջ", "Հացիկ", "Մերձավան", "Քարակերտ", "Անտառամեջ", "Արծվաշեն", "Գեղաքար", "Զովաբեր", "Լանջաղբյուր", "Շատջրեք", "Այգեհատ", "Դարպաս", "Լեռնահովիտ", "Հարթագյուղ", "Պաղաղբյուր", "Սարամեջ", "Քարաձոր", "Զովք", "Լեռնանիստ", "Մեղրաձոր", "Այգաբաց", "Թավշուտ", "Լանջիկ", "Կարմրավան", "Հայկասար", "Նահապետավան", "Վարդաղբյուր", "Քարաբերդ", "Արծվանիկ", "Բարձրավան", "Կաղնուտ", "Հացավան", "Նռնաձոր", "Սառնակունք", "Աղավնաձոր", "Սևաժայռ", "Վերնաշեն", "Այգեհովիտ", "Արծվաբերդ", "Բերքաբեր", "Գետահովիտ", "Ծաղկավան", "Հաղթանակ", "Ոսկեպար", "Սարիգյուղ", ) def city(self) -> str: """ :example: 'Բյուրեղավան' """ return self.random_element(self.cities) def city_prefix(self) -> str: """ :example: 'ք.' """ return self.random_element(self.city_prefixes) def postcode(self) -> str: """ :example: '3159' """ return "%04d" % self.generator.random.randint(200, 4299) def postcode_in_state(self, state_abbr: Optional[str] = None) -> str: """ :example: '4703' """ if state_abbr is None: state_abbr = self.random_element(self.states_abbr) if state_abbr in self.states_abbr: postcode = "%d" % ( self.generator.random.randint( self.states_postcode[state_abbr][0], self.states_postcode[state_abbr][1], ) ) if len(postcode) == 3: postcode = "0%s" % postcode return postcode else: raise Exception("State Abbreviation not found in list") def secondary_address(self) -> str: """ :example: 'բն. 49' """ return self.numerify(self.random_element(self.secondary_address_formats)) def administrative_unit(self) -> str: """ :example: 'Կոտայք' """ return self.random_element(self.states) state = administrative_unit def state_abbr(self) -> str: """ :example: 'ՎՁ' """ return self.random_element(self.states_abbr) def street(self) -> str: """ :example: 'Ոսկերիչների' """ return self.random_element(self.streets) def street_prefix(self) -> str: """ :example: 'փողոց' """ return self.random_element(self.street_prefixes) def village(self) -> str: """ :example: 'Ոսկեվազ' """ return self.random_element(self.villages) def village_prefix(self) -> str: """ :example: 'գ.' """ return self.random_element(self.village_prefixes)
Provider
python
sqlalchemy__sqlalchemy
test/ext/test_hybrid.py
{ "start": 47809, "end": 60399 }
class ____( fixtures.TestBase, AssertsCompiledSQL, testing.AssertsExecutionResults ): """updated DML test suite when #12496 was done, where we created the use cases of "expansive" and "derived" hybrids and how their use cases differ, and also added the bulk_dml hook as well as the from_dml_column construct. """ __dialect__ = "default" @testing.fixture def single_plain(self, decl_base): """fixture with a single-col hybrid""" class A(decl_base): __tablename__ = "a" id: Mapped[int] = mapped_column(primary_key=True) x: Mapped[int] @hybrid.hybrid_property def x_plain(self): return self.x return A @testing.fixture def expand_plain(self, decl_base): """fixture with an expand hybrid (deals w/ a value object that spans multiple columns)""" class A(decl_base): __tablename__ = "a" id: Mapped[int] = mapped_column(primary_key=True) x: Mapped[int] y: Mapped[int] @hybrid.hybrid_property def xy(self): return Point(self.x, self.y) return A @testing.fixture def expand_update(self, decl_base): """fixture with an expand hybrid (deals w/ a value object that spans multiple columns)""" class A(decl_base): __tablename__ = "a" id: Mapped[int] = mapped_column(primary_key=True) x: Mapped[int] y: Mapped[int] @hybrid.hybrid_property def xy(self): return Point(self.x, self.y) @xy.inplace.update_expression @classmethod def _xy(cls, value): return [(cls.x, value.x), (cls.y, value.y)] return A @testing.fixture def expand_dml(self, decl_base): """fixture with an expand hybrid (deals w/ a value object that spans multiple columns)""" class A(decl_base): __tablename__ = "a" id: Mapped[int] = mapped_column(primary_key=True) x: Mapped[int] y: Mapped[int] @hybrid.hybrid_property def xy(self): return Point(self.x, self.y) @xy.inplace.bulk_dml @classmethod def _xy(cls, mapping, value): mapping["x"] = value.x mapping["y"] = value.y return A @testing.fixture def derived_update(self, decl_base): """fixture with a derive hybrid (value is derived from other columns with data that's not in the value object itself) """ class A(decl_base): __tablename__ = "a" id: Mapped[int] = mapped_column(primary_key=True) amount: Mapped[int] rate: Mapped[float] @hybrid.hybrid_property def adjusted_amount(self): return self.amount * self.rate @adjusted_amount.inplace.update_expression @classmethod def _adjusted_amount(cls, value): return [(cls.amount, value / from_dml_column(cls.rate))] return A @testing.fixture def derived_dml(self, decl_base): """fixture with a derive hybrid (value is derived from other columns with data that's not in the value object itself) """ class A(decl_base): __tablename__ = "a" id: Mapped[int] = mapped_column(primary_key=True) amount: Mapped[int] rate: Mapped[float] @hybrid.hybrid_property def adjusted_amount(self): return self.amount * self.rate @adjusted_amount.inplace.bulk_dml @classmethod def _adjusted_amount(cls, mapping, value): mapping["amount"] = value / mapping["rate"] return A def test_single_plain_update_values(self, single_plain): A = single_plain self.assert_compile( update(A).values({A.x_plain: 10}), "UPDATE a SET x=:x", checkparams={"x": 10}, ) def test_single_plain_insert_values(self, single_plain): A = single_plain self.assert_compile( insert(A).values({A.x_plain: 10}), "INSERT INTO a (x) VALUES (:x)", checkparams={"x": 10}, ) @testing.variation("crud", ["insert", "update"]) def test_single_plain_bulk(self, crud, decl_base, single_plain): A = single_plain decl_base.metadata.create_all(testing.db) with expect_raises_message( exc.InvalidRequestError, "Can't evaluate bulk DML statement; " "please supply a bulk_dml decorated function", ): with Session(testing.db) as session: session.execute( insert(A) if crud.insert else update(A), [ {"x_plain": 10}, {"x_plain": 11}, ], ) @testing.variation("keytype", ["attr", "string"]) def test_expand_plain_update_values(self, expand_plain, keytype): A = expand_plain # SQL tuple_ update happens instead due to __clause_element__ self.assert_compile( update(A) .where(A.xy == Point(10, 12)) .values({"xy" if keytype.string else A.xy: Point(5, 6)}), "UPDATE a SET (x, y)=(:param_1, :param_2) " "WHERE a.x = :x_1 AND a.y = :y_1", {"param_1": 5, "param_2": 6, "x_1": 10, "y_1": 12}, ) @testing.variation("crud", ["insert", "update"]) def test_expand_update_bulk(self, crud, expand_update, decl_base): A = expand_update decl_base.metadata.create_all(testing.db) with expect_raises_message( exc.InvalidRequestError, "Can't evaluate bulk DML statement; " "please supply a bulk_dml decorated function", ): with Session(testing.db) as session: session.execute( insert(A) if crud.insert else update(A), [ {"xy": Point(3, 4)}, {"xy": Point(5, 6)}, ], ) @testing.variation("crud", ["insert", "update"]) def test_expand_dml_bulk(self, crud, expand_dml, decl_base, connection): A = expand_dml decl_base.metadata.create_all(connection) with self.sql_execution_asserter(connection) as asserter: with Session(connection) as session: session.execute( insert(A), [ {"id": 1, "xy": Point(3, 4)}, {"id": 2, "xy": Point(5, 6)}, ], ) if crud.update: session.execute( update(A), [ {"id": 1, "xy": Point(10, 9)}, {"id": 2, "xy": Point(7, 8)}, ], ) asserter.assert_( CompiledSQL( "INSERT INTO a (id, x, y) VALUES (:id, :x, :y)", [{"id": 1, "x": 3, "y": 4}, {"id": 2, "x": 5, "y": 6}], ), Conditional( crud.update, [ CompiledSQL( "UPDATE a SET x=:x, y=:y WHERE a.id = :a_id", [ {"x": 10, "y": 9, "a_id": 1}, {"x": 7, "y": 8, "a_id": 2}, ], ) ], [], ), ) @testing.variation("keytype", ["attr", "string"]) def test_expand_update_insert_values(self, expand_update, keytype): A = expand_update self.assert_compile( insert(A).values({"xy" if keytype.string else A.xy: Point(5, 6)}), "INSERT INTO a (x, y) VALUES (:x, :y)", checkparams={"x": 5, "y": 6}, ) @testing.variation("keytype", ["attr", "string"]) def test_expand_update_update_values(self, expand_update, keytype): A = expand_update self.assert_compile( update(A).values({"xy" if keytype.string else A.xy: Point(5, 6)}), "UPDATE a SET x=:x, y=:y", checkparams={"x": 5, "y": 6}, ) ##################################################### @testing.variation("keytype", ["attr", "string"]) def test_derived_update_insert_values(self, derived_update, keytype): A = derived_update self.assert_compile( insert(A).values( { "rate" if keytype.string else A.rate: 1.5, ( "adjusted_amount" if keytype.string else A.adjusted_amount ): 25, } ), "INSERT INTO a (amount, rate) VALUES " "((:param_1 / CAST(:rate AS FLOAT)), :rate)", checkparams={"param_1": 25, "rate": 1.5}, ) @testing.variation("keytype", ["attr", "string"]) @testing.variation("rate_present", [True, False]) def test_derived_update_update_values( self, derived_update, rate_present, keytype ): A = derived_update if rate_present: # when column is present in UPDATE SET, from_dml_column # uses that expression self.assert_compile( update(A).values( { "rate" if keytype.string else A.rate: 1.5, ( "adjusted_amount" if keytype.string else A.adjusted_amount ): 25, } ), "UPDATE a SET amount=(:param_1 / CAST(:rate AS FLOAT)), " "rate=:rate", checkparams={"param_1": 25, "rate": 1.5}, ) else: # when column is not present in UPDATE SET, from_dml_column # renders the column, which will work in an UPDATE, but not INSERT self.assert_compile( update(A).values( { ( "adjusted_amount" if keytype.string else A.adjusted_amount ): 25 } ), "UPDATE a SET amount=(:param_1 / CAST(a.rate AS FLOAT))", checkparams={"param_1": 25}, ) @testing.variation("crud", ["insert", "update"]) def test_derived_dml_bulk(self, crud, derived_dml, decl_base, connection): A = derived_dml decl_base.metadata.create_all(connection) with self.sql_execution_asserter(connection) as asserter: with Session(connection) as session: session.execute( insert(A), [ {"rate": 1.5, "adjusted_amount": 25}, {"rate": 2.5, "adjusted_amount": 25}, ], ) if crud.update: session.execute( update(A), [ {"id": 1, "rate": 1.8, "adjusted_amount": 30}, {"id": 2, "rate": 2.8, "adjusted_amount": 40}, ], ) asserter.assert_( CompiledSQL( "INSERT INTO a (amount, rate) VALUES (:amount, :rate)", [ {"amount": 25 / 1.5, "rate": 1.5}, {"amount": 25 / 2.5, "rate": 2.5}, ], ), Conditional( crud.update, [ CompiledSQL( "UPDATE a SET amount=:amount, rate=:rate " "WHERE a.id = :a_id", [ {"amount": 30 / 1.8, "rate": 1.8, "a_id": 1}, {"amount": 40 / 2.8, "rate": 2.8, "a_id": 2}, ], ) ], [], ), )
DMLTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 657580, "end": 658338 }
class ____(sgqlc.types.relay.Connection): """The connection type for ExternalIdentity.""" __schema__ = github_schema __field_names__ = ("edges", "nodes", "page_info", "total_count") edges = sgqlc.types.Field(sgqlc.types.list_of("ExternalIdentityEdge"), graphql_name="edges") """A list of edges.""" nodes = sgqlc.types.Field(sgqlc.types.list_of("ExternalIdentity"), graphql_name="nodes") """A list of nodes.""" page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo") """Information to aid in pagination.""" total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount") """Identifies the total count of items in the connection."""
ExternalIdentityConnection
python
sympy__sympy
sympy/polys/domains/domain.py
{ "start": 2393, "end": 2897 }
class ____(RingElement, Protocol): """An Euclidean domain element. Must support ``//``, ``%`` and ``divmod``. """ def __floordiv__(self, other: Self | int, /) -> Self: ... def __rfloordiv__(self, other: int, /) -> Self: ... def __mod__(self, other: Self | int, /) -> Self: ... def __rmod__(self, other: int, /) -> Self: ... def __divmod__(self, other: Self | int, /) -> tuple[Self, Self]: ... def __rdivmod__(self, other: int, /) -> tuple[Self, Self]: ...
EuclidElement
python
python-openxml__python-docx
tests/opc/parts/test_coreprops.py
{ "start": 434, "end": 2021 }
class ____: """Unit-test suite for `docx.opc.parts.coreprops.CorePropertiesPart` objects.""" def it_provides_access_to_its_core_props_object(self, CoreProperties_: Mock, package_: Mock): core_properties_part = CorePropertiesPart( PackURI("/part/name"), "content/type", element("cp:coreProperties"), package_ ) core_properties = core_properties_part.core_properties CoreProperties_.assert_called_once_with(core_properties_part.element) assert isinstance(core_properties, CoreProperties) def it_can_create_a_default_core_properties_part(self, package_: Mock): core_properties_part = CorePropertiesPart.default(package_) assert isinstance(core_properties_part, CorePropertiesPart) # -- core_properties = core_properties_part.core_properties assert core_properties.title == "Word Document" assert core_properties.last_modified_by == "python-docx" assert core_properties.revision == 1 assert core_properties.modified is not None delta = dt.datetime.now(dt.timezone.utc) - core_properties.modified max_expected_delta = dt.timedelta(seconds=2) assert delta < max_expected_delta # fixtures --------------------------------------------- @pytest.fixture def CoreProperties_(self, request: FixtureRequest): return class_mock(request, "docx.opc.parts.coreprops.CoreProperties") @pytest.fixture def package_(self, request: FixtureRequest): return instance_mock(request, OpcPackage)
DescribeCorePropertiesPart
python
scikit-learn__scikit-learn
sklearn/neighbors/_classification.py
{ "start": 1072, "end": 17109 }
class ____(KNeighborsMixin, ClassifierMixin, NeighborsBase): """Classifier implementing the k-nearest neighbors vote. Read more in the :ref:`User Guide <classification>`. Parameters ---------- n_neighbors : int, default=5 Number of neighbors to use by default for :meth:`kneighbors` queries. weights : {'uniform', 'distance'}, callable or None, default='uniform' Weight function used in prediction. Possible values: - 'uniform' : uniform weights. All points in each neighborhood are weighted equally. - 'distance' : weight points by the inverse of their distance. in this case, closer neighbors of a query point will have a greater influence than neighbors which are further away. - [callable] : a user-defined function which accepts an array of distances, and returns an array of the same shape containing the weights. Refer to the example entitled :ref:`sphx_glr_auto_examples_neighbors_plot_classification.py` showing the impact of the `weights` parameter on the decision boundary. algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' Algorithm used to compute the nearest neighbors: - 'ball_tree' will use :class:`BallTree` - 'kd_tree' will use :class:`KDTree` - 'brute' will use a brute-force search. - 'auto' will attempt to decide the most appropriate algorithm based on the values passed to :meth:`fit` method. Note: fitting on sparse input will override the setting of this parameter, using brute force. leaf_size : int, default=30 Leaf size passed to BallTree or KDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, default=2 Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected to be positive. metric : str or callable, default='minkowski' Metric to use for distance computation. Default is "minkowski", which results in the standard Euclidean distance when p = 2. See the documentation of `scipy.spatial.distance <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and the metrics listed in :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric values. If metric is "precomputed", X is assumed to be a distance matrix and must be square during fit. X may be a :term:`sparse graph`, in which case only "nonzero" elements may be considered neighbors. If metric is a callable function, it takes two arrays representing 1D vectors as inputs and must return one value indicating the distance between those vectors. This works for Scipy's metrics, but is less efficient than passing the metric name as a string. metric_params : dict, default=None Additional keyword arguments for the metric function. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Doesn't affect :meth:`fit` method. Attributes ---------- classes_ : array of shape (n_classes,) Class labels known to the classifier effective_metric_ : str or callble The distance metric used. It will be same as the `metric` parameter or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to 'minkowski' and `p` parameter set to 2. effective_metric_params_ : dict Additional keyword arguments for the metric function. For most metrics will be same with `metric_params` parameter, but may also contain the `p` parameter value if the `effective_metric_` attribute is set to 'minkowski'. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 n_samples_fit_ : int Number of samples in the fitted data. outputs_2d_ : bool False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit otherwise True. See Also -------- RadiusNeighborsClassifier: Classifier based on neighbors within a fixed radius. KNeighborsRegressor: Regression based on k-nearest neighbors. RadiusNeighborsRegressor: Regression based on neighbors within a fixed radius. NearestNeighbors: Unsupervised learner for implementing neighbor searches. Notes ----- See :ref:`Nearest Neighbors <neighbors>` in the online documentation for a discussion of the choice of ``algorithm`` and ``leaf_size``. .. warning:: Regarding the Nearest Neighbors algorithms, if it is found that two neighbors, neighbor `k+1` and `k`, have identical distances but different labels, the results will depend on the ordering of the training data. https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm Examples -------- >>> X = [[0], [1], [2], [3]] >>> y = [0, 0, 1, 1] >>> from sklearn.neighbors import KNeighborsClassifier >>> neigh = KNeighborsClassifier(n_neighbors=3) >>> neigh.fit(X, y) KNeighborsClassifier(...) >>> print(neigh.predict([[1.1]])) [0] >>> print(neigh.predict_proba([[0.9]])) [[0.666 0.333]] """ _parameter_constraints: dict = {**NeighborsBase._parameter_constraints} _parameter_constraints.pop("radius") _parameter_constraints.update( {"weights": [StrOptions({"uniform", "distance"}), callable, None]} ) def __init__( self, n_neighbors=5, *, weights="uniform", algorithm="auto", leaf_size=30, p=2, metric="minkowski", metric_params=None, n_jobs=None, ): super().__init__( n_neighbors=n_neighbors, algorithm=algorithm, leaf_size=leaf_size, metric=metric, p=p, metric_params=metric_params, n_jobs=n_jobs, ) self.weights = weights @_fit_context( # KNeighborsClassifier.metric is not validated yet prefer_skip_nested_validation=False ) def fit(self, X, y): """Fit the k-nearest neighbors classifier from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ (n_samples, n_samples) if metric='precomputed' Training data. y : {array-like, sparse matrix} of shape (n_samples,) or \ (n_samples, n_outputs) Target values. Returns ------- self : KNeighborsClassifier The fitted k-nearest neighbors classifier. """ return self._fit(X, y) def predict(self, X): """Predict the class labels for the provided data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), \ or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If `None`, predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- y : ndarray of shape (n_queries,) or (n_queries, n_outputs) Class labels for each data sample. """ check_is_fitted(self, "_fit_method") if self.weights == "uniform": if self._fit_method == "brute" and ArgKminClassMode.is_usable_for( X, self._fit_X, self.metric ): probabilities = self.predict_proba(X) if self.outputs_2d_: return np.stack( [ self.classes_[idx][np.argmax(probas, axis=1)] for idx, probas in enumerate(probabilities) ], axis=1, ) return self.classes_[np.argmax(probabilities, axis=1)] # In that case, we do not need the distances to perform # the weighting so we do not compute them. neigh_ind = self.kneighbors(X, return_distance=False) neigh_dist = None else: neigh_dist, neigh_ind = self.kneighbors(X) classes_ = self.classes_ _y = self._y if not self.outputs_2d_: _y = self._y.reshape((-1, 1)) classes_ = [self.classes_] n_outputs = len(classes_) n_queries = _num_samples(self._fit_X if X is None else X) weights = _get_weights(neigh_dist, self.weights) if weights is not None and _all_with_any_reduction_axis_1(weights, value=0): raise ValueError( "All neighbors of some sample is getting zero weights. " "Please modify 'weights' to avoid this case if you are " "using a user-defined function." ) y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype) for k, classes_k in enumerate(classes_): if weights is None: mode, _ = _mode(_y[neigh_ind, k], axis=1) else: mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1) mode = np.asarray(mode.ravel(), dtype=np.intp) y_pred[:, k] = classes_k.take(mode) if not self.outputs_2d_: y_pred = y_pred.ravel() return y_pred def predict_proba(self, X): """Return probability estimates for the test data X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), \ or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If `None`, predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- p : ndarray of shape (n_queries, n_classes), or a list of n_outputs \ of such arrays if n_outputs > 1. The class probabilities of the input samples. Classes are ordered by lexicographic order. """ check_is_fitted(self, "_fit_method") if self.weights == "uniform": # TODO: systematize this mapping of metric for # PairwiseDistancesReductions. metric, metric_kwargs = _adjusted_metric( metric=self.metric, metric_kwargs=self.metric_params, p=self.p ) if ( self._fit_method == "brute" and ArgKminClassMode.is_usable_for(X, self._fit_X, metric) # TODO: Implement efficient multi-output solution and not self.outputs_2d_ ): if self.metric == "precomputed": X = _check_precomputed(X) else: X = validate_data( self, X, accept_sparse="csr", reset=False, order="C" ) probabilities = ArgKminClassMode.compute( X, self._fit_X, k=self.n_neighbors, weights=self.weights, Y_labels=self._y, unique_Y_labels=self.classes_, metric=metric, metric_kwargs=metric_kwargs, # `strategy="parallel_on_X"` has in practice be shown # to be more efficient than `strategy="parallel_on_Y`` # on many combination of datasets. # Hence, we choose to enforce it here. # For more information, see: # https://github.com/scikit-learn/scikit-learn/pull/24076#issuecomment-1445258342 # TODO: adapt the heuristic for `strategy="auto"` for # `ArgKminClassMode` and use `strategy="auto"`. strategy="parallel_on_X", ) return probabilities # In that case, we do not need the distances to perform # the weighting so we do not compute them. neigh_ind = self.kneighbors(X, return_distance=False) neigh_dist = None else: neigh_dist, neigh_ind = self.kneighbors(X) classes_ = self.classes_ _y = self._y if not self.outputs_2d_: _y = self._y.reshape((-1, 1)) classes_ = [self.classes_] n_queries = _num_samples(self._fit_X if X is None else X) weights = _get_weights(neigh_dist, self.weights) if weights is None: weights = np.ones_like(neigh_ind) elif _all_with_any_reduction_axis_1(weights, value=0): raise ValueError( "All neighbors of some sample is getting zero weights. " "Please modify 'weights' to avoid this case if you are " "using a user-defined function." ) all_rows = np.arange(n_queries) probabilities = [] for k, classes_k in enumerate(classes_): pred_labels = _y[:, k][neigh_ind] proba_k = np.zeros((n_queries, classes_k.size)) # a simple ':' index doesn't work right for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors) proba_k[all_rows, idx] += weights[:, i] # normalize 'votes' into real [0,1] probabilities normalizer = proba_k.sum(axis=1)[:, np.newaxis] proba_k /= normalizer probabilities.append(proba_k) if not self.outputs_2d_: probabilities = probabilities[0] return probabilities # This function is defined here only to modify the parent docstring # and add information about X=None def score(self, X, y, sample_weight=None): """ Return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters ---------- X : array-like of shape (n_samples, n_features), or None Test samples. If `None`, predictions for all indexed points are used; in this case, points are not considered their own neighbors. This means that `knn.fit(X, y).score(None, y)` implicitly performs a leave-one-out cross-validation procedure and is equivalent to `cross_val_score(knn, X, y, cv=LeaveOneOut())` but typically much faster. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True labels for `X`. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float Mean accuracy of ``self.predict(X)`` w.r.t. `y`. """ return super().score(X, y, sample_weight) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.classifier_tags.multi_label = True tags.input_tags.pairwise = self.metric == "precomputed" return tags
KNeighborsClassifier
python
pytorch__pytorch
test/quantization/pt2e/test_quantize_pt2e_qat.py
{ "start": 33859, "end": 35156 }
class ____(TestQuantizePT2EQAT_ConvBn_Base): dim = 2 example_inputs = (torch.randn(1, 3, 5, 5),) conv_class = torch.nn.Conv2d conv_transpose_class = torch.nn.ConvTranspose2d bn_class = torch.nn.BatchNorm2d def _is_conv_node(n: torch.fx.Node): return n.op == "call_function" and n.target in [ torch.ops.aten.conv1d.default, torch.ops.aten.conv2d.default, torch.ops.aten.conv_transpose1d, torch.ops.aten.conv_transpose1d.default, torch.ops.aten.conv_transpose2d, torch.ops.aten.conv_transpose2d.input, ] def _get_conv_bn_getitem_nodes(model: torch.fx.GraphModule): """ Return a 3-tuple of (conv, bn, getitem) nodes from the graph. """ model.graph.eliminate_dead_code() model.recompile() conv_node = None bn_node = None getitem_node = None for n in model.graph.nodes: if _is_conv_node(n): conv_node = n if n.target in ( torch.ops.aten._native_batch_norm_legit.default, torch.ops.aten.batch_norm.default, ): bn_node = n if n.target == operator.getitem: getitem_node = n assert conv_node is not None, "bad test setup" return (conv_node, bn_node, getitem_node)
TestQuantizePT2EQAT_ConvBn2d
python
huggingface__transformers
src/transformers/models/clvp/feature_extraction_clvp.py
{ "start": 987, "end": 10930 }
class ____(SequenceFeatureExtractor): r""" Constructs a CLVP feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts log-mel-spectrogram features from raw speech using a custom numpy implementation of the `Short Time Fourier Transform` which should match pytorch's `torch.stft` equivalent. Args: feature_size (`int`, *optional*, defaults to 80): The feature dimension of the extracted features. sampling_rate (`int`, *optional*, defaults to 22050): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). default_audio_length (`int`, *optional*, defaults to 6): The default length of raw audio in seconds. If `max_length` is not set during `__call__` then it will automatically be set to default_audio_length * `self.sampling_rate`. hop_length (`int`, *optional*, defaults to 256): Length of the overlapping windows for the STFT used to obtain the Mel Frequency coefficients. chunk_length (`int`, *optional*, defaults to 30): The maximum number of chunks of `sampling_rate` samples used to trim and pad longer or shorter audio sequences. n_fft (`int`, *optional*, defaults to 1024): Size of the Fourier transform. padding_value (`float`, *optional*, defaults to 0.0): Padding value used to pad the audio. Should correspond to silences. mel_norms (`list` of length `feature_size`, *optional*): If `mel_norms` is provided then it will be used to normalize the log-mel spectrograms along each mel-filter. return_attention_mask (`bool`, *optional*, defaults to `False`): Whether to return the attention mask. If left to the default, it will return the attention mask. [What are attention masks?](../glossary#attention-mask) """ model_input_names = ["input_features", "attention_mask"] def __init__( self, feature_size=80, sampling_rate=22050, default_audio_length=6, hop_length=256, chunk_length=30, n_fft=1024, padding_value=0.0, mel_norms=None, return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask **kwargs, ): super().__init__( feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, return_attention_mask=return_attention_mask, **kwargs, ) self.n_fft = n_fft self.hop_length = hop_length self.chunk_length = chunk_length self.n_samples = chunk_length * sampling_rate self.nb_max_frames = self.n_samples // hop_length self.sampling_rate = sampling_rate self.default_audio_length = default_audio_length self.mel_norms = mel_norms self.mel_filters = mel_filter_bank( num_frequency_bins=1 + (n_fft // 2), num_mel_filters=feature_size, min_frequency=0.0, max_frequency=8000.0, sampling_rate=sampling_rate, norm="slaney", mel_scale="htk", ) def _np_extract_fbank_features(self, waveform: np.ndarray) -> np.ndarray: """ This method first computes the log-mel spectrogram of the provided audio then applies normalization along the each mel-filterbank, if `mel_norms` is provided. """ log_spec = spectrogram( waveform, window_function(self.n_fft, "hann"), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters, log_mel=None, ) log_spec = np.log(np.clip(log_spec, a_min=1e-5, a_max=None)) if self.mel_norms is not None: log_spec = log_spec / np.array(self.mel_norms)[:, None] return log_spec def __call__( self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], sampling_rate: Optional[int] = None, truncation: bool = True, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_attention_mask: Optional[bool] = True, padding: Optional[str] = "max_length", max_length: Optional[int] = None, **kwargs, ) -> BatchFeature: """ `ClvpFeatureExtractor` is used to extract various voice specific properties such as the pitch and tone of the voice, speaking speed, and even speaking defects like a lisp or stuttering from a sample voice or `raw_speech`. First the voice is padded or truncated in a way such that it becomes a waveform of `self.default_audio_length` seconds long and then the log-mel spectrogram is extracted from it. Args: raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition pipeline. truncation (`bool`, *optional*, default to `True`): Activates truncation to cut input sequences longer than *max_length* to *max_length*. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_attention_mask (`bool`, *optional*, defaults to `True`): Whether to return the attention mask. If left to the default, it will return the attention mask. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding values / vectors. max_length (`int`, *optional*): The maximum input length of the inputs. """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. " "Failing to do so can result in silent errors that might be hard to debug." ) is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") is_batched = is_batched_numpy or ( isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) ) if is_batched: raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech] elif not is_batched and not isinstance(raw_speech, np.ndarray): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) # always return batch if not is_batched: raw_speech = [np.asarray([raw_speech]).T] batched_speech = BatchFeature({"input_features": raw_speech}) max_length = self.default_audio_length * self.sampling_rate if max_length is None else max_length padded_inputs = self.pad( batched_speech, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) # make sure list is in array format input_features = padded_inputs.get("input_features").transpose(2, 0, 1) input_features = [ self._np_extract_fbank_features(waveform).astype(np.float32) for waveform in input_features[0] ] if isinstance(input_features[0], list): padded_inputs["input_features"] = [np.asarray(feature) for feature in input_features] else: padded_inputs["input_features"] = input_features return padded_inputs.convert_to_tensors(return_tensors) __all__ = ["ClvpFeatureExtractor"]
ClvpFeatureExtractor
python
sympy__sympy
sympy/simplify/hyperexpand.py
{ "start": 38770, "end": 39859 }
class ____(Operator): """ Increment a lower index. """ def __init__(self, ap, bq, i, z): """ Note: i counts from zero! """ ap, bq, i = list(map(sympify, [ap, bq, i])) self._ap = ap self._bq = bq self._i = i ap = list(ap) bq = list(bq) bi = bq.pop(i) + 1 if bi == 0: raise ValueError('Cannot increment -1 lower index.') m = Poly(_x*(bi - 1), _x) for b in bq: m *= Poly(_x + b - 1, _x) B = Dummy('B') D = Poly((bi - 1)*B - bi + 1, B) n = Poly(z, B) for a in ap: n *= (D + a.as_poly(B)) b0 = n.nth(0) if b0 == 0: raise ValueError('Cannot increment index: cancels with upper') n = Poly(Poly(n.all_coeffs()[:-1], B).as_expr().subs( B, _x/(bi - 1) + 1), _x) self._poly = Poly((m - n)/b0, _x) def __str__(self): return '<Increment lower index #%s of %s, %s.>' % (self._i, self._ap, self._bq)
UnShiftB
python
dagster-io__dagster
python_modules/libraries/dagster-shared/dagster_shared/yaml_utils/source_position.py
{ "start": 6871, "end": 9786 }
class ____: _source_position_and_key_path: Optional[SourcePositionAndKeyPath] = None @property def source_position(self) -> SourcePosition: """Returns the underlying source position of the object, including the source file and line number. """ assert self._source_position_and_key_path assert self._source_position_and_key_path.source_position return self._source_position_and_key_path.source_position @property def source_file(self) -> Path: """Path to the source file where the object is defined.""" assert self._source_position_and_key_path return Path(self.source_position.filename) @property def source_file_name(self) -> str: """Name of the source file where the object is defined.""" return self.source_file.name def populate_source_position_and_key_paths( obj: Any, source_position_tree: Optional[SourcePositionTree], key_path: KeyPath = [], ) -> None: """Populate the SourcePositionAndKeyPath for the given object and its children. This function recursively traverses the object and its children, setting the SourcePositionAndKeyPath on each object that subclasses HasSourcePositionAndKeyPath. If the obj is a collection, its children are the elements in the collection. If obj is an object, its children are its attributes. The SourcePositionAndKeyPath is set based on the provided source position tree, which contains the source position information for the object and its children. Args: obj (Any): The object to populate the source position and key path for. source_position_tree (Optional[SourcePositionTree]): The tree node containing the source position information for the object and its children. key_path (KeyPath): The path of keys that lead to the current object. """ if isinstance(obj, HasSourcePositionAndKeyPath): check.invariant( obj._source_position_and_key_path is None, # noqa: SLF001 "Cannot call populate_source_position_and_key_paths() more than once on the same object", ) if source_position_tree is not None: object.__setattr__( obj, "_source_position_and_key_path", SourcePositionAndKeyPath(key_path, source_position_tree.position), ) if source_position_tree is None: return for child_key_segment, child_tree in source_position_tree.children.items(): try: child_obj = cast("Any", obj)[child_key_segment] except TypeError: if not isinstance(child_key_segment, str): raise child_obj = getattr(obj, child_key_segment) populate_source_position_and_key_paths( child_obj, child_tree, [*key_path, child_key_segment] )
HasSourcePositionAndKeyPath
python
Netflix__metaflow
metaflow/plugins/cards/card_modules/basic.py
{ "start": 24276, "end": 26513 }
class ____(MetaflowCard): ALLOW_USER_COMPONENTS = True RUNTIME_UPDATABLE = True RELOAD_POLICY = MetaflowCard.RELOAD_POLICY_ONCHANGE type = "blank" def __init__(self, options=dict(title=""), components=[], graph=None, **kwargs): self._graph = None if graph is None else transform_flow_graph(graph) self._title = "" if "title" in options: self._title = options["title"] self._components = components def render(self, task, components=[], runtime=False): RENDER_TEMPLATE = read_file(RENDER_TEMPLATE_PATH) JS_DATA = read_file(JS_PATH) CSS_DATA = read_file(CSS_PATH) if type(components) != list: components = [] page_component = PageComponent( title=self._title, contents=components + self._components, ).render() final_component_dict = dict( metadata={ "pathspec": task.pathspec, }, components=[page_component], ) pt = self._get_mustache() data_dict = dict( task_data=base64.b64encode( json.dumps(final_component_dict).encode("utf-8") ).decode("utf-8"), javascript=JS_DATA, title=task.pathspec, css=CSS_DATA, card_data_id=uuid.uuid4(), RENDER_COMPLETE=not runtime, ) return pt.render(RENDER_TEMPLATE, data_dict) def render_runtime(self, task, data): return self.render(task, runtime=True) def refresh(self, task, data): return data["components"] def reload_content_token(self, task, data): """ The reload token will change when the component array has changed in the Metaflow card. The change in the component array is signified by the change in the component_update_ts. """ if task.finished: return "final" # `component_update_ts` will never be None. It is set to a default value when the `ComponentStore` is instantiated # And it is updated when components added / removed / changed from the `ComponentStore`. return "runtime-%s" % (str(data["component_update_ts"]))
BlankCard
python
qdrant__qdrant-client
qdrant_client/common/client_exceptions.py
{ "start": 57, "end": 555 }
class ____(QdrantException): def __init__(self, message: str, retry_after_s: int) -> None: self.message = message if message else "Resource Exhausted Response" try: self.retry_after_s = int(retry_after_s) except Exception as ex: raise QdrantException( f"Retry-After header value is not a valid integer: {retry_after_s}" ) from ex def __str__(self) -> str: return self.message.strip()
ResourceExhaustedResponse
python
kamyu104__LeetCode-Solutions
Python/minimum-edge-reversals-so-every-node-is-reachable.py
{ "start": 54, "end": 1148 }
class ____(object): def minEdgeReversals(self, n, edges): """ :type n: int :type edges: List[List[int]] :rtype: List[int] """ def iter_dfs1(): result = 0 stk = [(0, -1)] while stk: u, p = stk.pop() for v in adj[u].iterkeys(): if v == p: continue result += adj[u][v] stk.append((v, u)) return result def iter_dfs2(curr): result = [-1]*n stk = [(0, curr)] while stk: u, curr = stk.pop() result[u] = curr for v in adj[u].iterkeys(): if result[v] == -1: stk.append((v, curr-adj[u][v]+adj[v][u])) return result adj = collections.defaultdict(dict) for u, v in edges: adj[u][v] = 0 adj[v][u] = 1 return iter_dfs2(iter_dfs1()) # Time: O(n) # Space: O(n) # dfs, tree dp
Solution
python
cherrypy__cherrypy
cherrypy/_cpmodpy.py
{ "start": 10279, "end": 11761 }
class ____(object): """A server wrapper for ``mod_python``.""" template = """ # Apache2 server configuration file for running CherryPy with mod_python. DocumentRoot "/" Listen %(port)s LoadModule python_module modules/mod_python.so <Location %(loc)s> SetHandler python-program PythonHandler %(handler)s PythonDebug On %(opts)s </Location> """ def __init__( self, loc='/', port=80, opts=None, apache_path='apache', handler='cherrypy._cpmodpy::handler', ): """Initialize a ``mod_python`` server.""" self.loc = loc self.port = port self.opts = opts self.apache_path = apache_path self.handler = handler def start(self): """Start an Apache2/httpd server.""" opts = ''.join( [' PythonOption %s %s\n' % (k, v) for k, v in self.opts], ) conf_data = self.template % { 'port': self.port, 'loc': self.loc, 'opts': opts, 'handler': self.handler, } mpconf = os.path.join(os.path.dirname(__file__), 'cpmodpy.conf') with open(mpconf, 'wb') as f: f.write(conf_data) response = read_process(self.apache_path, '-k start -f %s' % mpconf) self.ready = True return response def stop(self): """Stop an Apache2/httpd server.""" os.popen('apache -k stop') self.ready = False
ModPythonServer
python
pytorch__pytorch
test/test_sympy_utils.py
{ "start": 29847, "end": 30180 }
class ____(TestCase): def test_pickle(self): x = OpaqueUnaryFn_cos(sympy.Symbol("a")) r = pickle.loads(pickle.dumps(x)) self.assertEqual(x, r) x = BitwiseFn_bitwise_and(sympy.Symbol("a"), sympy.Symbol("b")) r = pickle.loads(pickle.dumps(x)) self.assertEqual(x, r)
TestSympyFunctions
python
Textualize__textual
src/textual/widgets/_option_list.py
{ "start": 1155, "end": 1296 }
class ____(OptionListError): """Raised when a request has been made for an option that doesn't exist.""" @rich.repr.auto
OptionDoesNotExist
python
doocs__leetcode
solution/1100-1199/1121.Divide Array Into Increasing Sequences/Solution.py
{ "start": 0, "end": 182 }
class ____: def canDivideIntoSubsequences(self, nums: List[int], k: int) -> bool: mx = max(len(list(x)) for _, x in groupby(nums)) return mx * k <= len(nums)
Solution
python
mwaskom__seaborn
tests/_core/test_properties.py
{ "start": 1883, "end": 8710 }
class ____(DataFixtures): def assert_same_rgb(self, a, b): assert_array_equal(a[:, :3], b[:, :3]) def test_nominal_default_palette(self, cat_vector, cat_order): m = Color().get_mapping(Nominal(), cat_vector) n = len(cat_order) actual = m(np.arange(n)) expected = color_palette(None, n) for have, want in zip(actual, expected): assert same_color(have, want) def test_nominal_default_palette_large(self): vector = pd.Series(list("abcdefghijklmnopqrstuvwxyz")) m = Color().get_mapping(Nominal(), vector) actual = m(np.arange(26)) expected = color_palette("husl", 26) for have, want in zip(actual, expected): assert same_color(have, want) def test_nominal_named_palette(self, cat_vector, cat_order): palette = "Blues" m = Color().get_mapping(Nominal(palette), cat_vector) n = len(cat_order) actual = m(np.arange(n)) expected = color_palette(palette, n) for have, want in zip(actual, expected): assert same_color(have, want) def test_nominal_list_palette(self, cat_vector, cat_order): palette = color_palette("Reds", len(cat_order)) m = Color().get_mapping(Nominal(palette), cat_vector) actual = m(np.arange(len(palette))) expected = palette for have, want in zip(actual, expected): assert same_color(have, want) def test_nominal_dict_palette(self, cat_vector, cat_order): colors = color_palette("Greens") palette = dict(zip(cat_order, colors)) m = Color().get_mapping(Nominal(palette), cat_vector) n = len(cat_order) actual = m(np.arange(n)) expected = colors for have, want in zip(actual, expected): assert same_color(have, want) def test_nominal_dict_with_missing_keys(self, cat_vector, cat_order): palette = dict(zip(cat_order[1:], color_palette("Purples"))) with pytest.raises(ValueError, match="No entry in color dict"): Color("color").get_mapping(Nominal(palette), cat_vector) def test_nominal_list_too_short(self, cat_vector, cat_order): n = len(cat_order) - 1 palette = color_palette("Oranges", n) msg = rf"The edgecolor list has fewer values \({n}\) than needed \({n + 1}\)" with pytest.warns(UserWarning, match=msg): Color("edgecolor").get_mapping(Nominal(palette), cat_vector) def test_nominal_list_too_long(self, cat_vector, cat_order): n = len(cat_order) + 1 palette = color_palette("Oranges", n) msg = rf"The edgecolor list has more values \({n}\) than needed \({n - 1}\)" with pytest.warns(UserWarning, match=msg): Color("edgecolor").get_mapping(Nominal(palette), cat_vector) def test_continuous_default_palette(self, num_vector): cmap = color_palette("ch:", as_cmap=True) m = Color().get_mapping(Continuous(), num_vector) self.assert_same_rgb(m(num_vector), cmap(num_vector)) def test_continuous_named_palette(self, num_vector): pal = "flare" cmap = color_palette(pal, as_cmap=True) m = Color().get_mapping(Continuous(pal), num_vector) self.assert_same_rgb(m(num_vector), cmap(num_vector)) def test_continuous_tuple_palette(self, num_vector): vals = ("blue", "red") cmap = color_palette("blend:" + ",".join(vals), as_cmap=True) m = Color().get_mapping(Continuous(vals), num_vector) self.assert_same_rgb(m(num_vector), cmap(num_vector)) def test_continuous_callable_palette(self, num_vector): cmap = get_colormap("viridis") m = Color().get_mapping(Continuous(cmap), num_vector) self.assert_same_rgb(m(num_vector), cmap(num_vector)) def test_continuous_missing(self): x = pd.Series([1, 2, np.nan, 4]) m = Color().get_mapping(Continuous(), x) assert np.isnan(m(x)[2]).all() def test_bad_scale_values_continuous(self, num_vector): with pytest.raises(TypeError, match="Scale values for color with a Continuous"): Color().get_mapping(Continuous(["r", "g", "b"]), num_vector) def test_bad_scale_values_nominal(self, cat_vector): with pytest.raises(TypeError, match="Scale values for color with a Nominal"): Color().get_mapping(Nominal(get_colormap("viridis")), cat_vector) def test_bad_inference_arg(self, cat_vector): with pytest.raises(TypeError, match="A single scale argument for color"): Color().infer_scale(123, cat_vector) @pytest.mark.parametrize( "data_type,scale_class", [("cat", Nominal), ("num", Continuous), ("bool", Boolean)] ) def test_default(self, data_type, scale_class, vectors): scale = Color().default_scale(vectors[data_type]) assert isinstance(scale, scale_class) def test_default_numeric_data_category_dtype(self, num_vector): scale = Color().default_scale(num_vector.astype("category")) assert isinstance(scale, Nominal) def test_default_binary_data(self): x = pd.Series([0, 0, 1, 0, 1], dtype=int) scale = Color().default_scale(x) assert isinstance(scale, Continuous) @pytest.mark.parametrize( "values,data_type,scale_class", [ ("viridis", "cat", Nominal), # Based on variable type ("viridis", "num", Continuous), # Based on variable type ("viridis", "bool", Boolean), # Based on variable type ("muted", "num", Nominal), # Based on qualitative palette (["r", "g", "b"], "num", Nominal), # Based on list palette ({2: "r", 4: "g", 8: "b"}, "num", Nominal), # Based on dict palette (("r", "b"), "num", Continuous), # Based on tuple / variable type (("g", "m"), "cat", Nominal), # Based on tuple / variable type (("c", "y"), "bool", Boolean), # Based on tuple / variable type (get_colormap("inferno"), "num", Continuous), # Based on callable ] ) def test_inference(self, values, data_type, scale_class, vectors): scale = Color().infer_scale(values, vectors[data_type]) assert isinstance(scale, scale_class) assert scale.values == values def test_standardization(self): f = Color().standardize assert f("C3") == to_rgb("C3") assert f("dodgerblue") == to_rgb("dodgerblue") assert f((.1, .2, .3)) == (.1, .2, .3) assert f((.1, .2, .3, .4)) == (.1, .2, .3, .4) assert f("#123456") == to_rgb("#123456") assert f("#12345678") == to_rgba("#12345678") assert f("#123") == to_rgb("#123") assert f("#1234") == to_rgba("#1234")
TestColor
python
coleifer__peewee
playhouse/postgres_ext.py
{ "start": 10326, "end": 10984 }
class ____(IndexedFieldMixin, TextField): field_type = 'TSVECTOR' __hash__ = Field.__hash__ def match(self, query, language=None, plain=False): params = (language, query) if language is not None else (query,) func = fn.plainto_tsquery if plain else fn.to_tsquery return Expression(self, TS_MATCH, func(*params)) def Match(field, query, language=None): params = (language, query) if language is not None else (query,) field_params = (language, field) if language is not None else (field,) return Expression( fn.to_tsvector(*field_params), TS_MATCH, fn.to_tsquery(*params))
TSVectorField
python
spack__spack
lib/spack/spack/reporters/cdash.py
{ "start": 2328, "end": 20271 }
class ____(Reporter): """Generate reports of spec installations for CDash. To use this reporter, pass the ``--cdash-upload-url`` argument to ``spack install``:: spack install --cdash-upload-url=\\ https://example.com/cdash/submit.php?project=Spack <spec> In this example, results will be uploaded to the *Spack* project on the CDash instance hosted at ``https://example.com/cdash``. """ def __init__(self, configuration: CDashConfiguration): #: Set to False if any error occurs when building the CDash report self.success = True # Jinja2 expects `/` path separators self.template_dir = "reports/cdash" self.cdash_upload_url = configuration.upload_url if self.cdash_upload_url: self.buildid_regexp = re.compile("<buildId>([0-9]+)</buildId>") self.phase_regexp = re.compile(r"Executing phase: '(.*)'") self.authtoken = None if "SPACK_CDASH_AUTH_TOKEN" in os.environ: tty.verbose("Using CDash auth token from environment") self.authtoken = os.environ.get("SPACK_CDASH_AUTH_TOKEN") self.install_command = " ".join(configuration.packages) self.base_buildname = configuration.build or self.install_command self.site = configuration.site or socket.gethostname() self.osname = platform.system() self.osrelease = platform.release() self.target = spack.platforms.host().default_target() self.starttime = int(time.time()) self.endtime = self.starttime self.buildstamp = ( configuration.buildstamp if configuration.buildstamp else build_stamp(configuration.track, self.starttime) ) self.buildIds: Dict[str, str] = {} self.revision = "" git = spack.util.git.git(required=True) with working_dir(spack.paths.spack_root): self.revision = git("rev-parse", "HEAD", output=str).strip() self.generator = "spack-{0}".format(spack.get_version()) self.multiple_packages = False def report_build_name(self, pkg_name): buildname = ( "{0} - {1}".format(self.base_buildname, pkg_name) if self.multiple_packages else self.base_buildname ) if len(buildname) > 190: warnings.warn("Build name exceeds CDash 190 character maximum and will be truncated.") buildname = buildname[:190] return buildname def build_report_for_package(self, report_dir, package, duration): if "stdout" not in package: # Skip reporting on packages that do not generate output. return self.current_package_name = package["name"] self.buildname = self.report_build_name(self.current_package_name) report_data = self.initialize_report(report_dir) for phase in CDASH_PHASES: report_data[phase] = {} report_data[phase]["loglines"] = [] report_data[phase]["status"] = 0 report_data[phase]["starttime"] = self.starttime # Track the phases we perform so we know what reports to create. # We always report the update step because this is how we tell CDash # what revision of Spack we are using. phases_encountered = ["update"] # Generate a report for this package. current_phase = "" cdash_phase = "" for line in package["stdout"].splitlines(): match = None if line.find("Executing phase: '") != -1: match = self.phase_regexp.search(line) if match: current_phase = match.group(1) if current_phase not in MAP_PHASES_TO_CDASH: current_phase = "" continue cdash_phase = MAP_PHASES_TO_CDASH[current_phase] if cdash_phase not in phases_encountered: phases_encountered.append(cdash_phase) report_data[cdash_phase]["loglines"].append( str("{0} output for {1}:".format(cdash_phase, package["name"])) ) elif cdash_phase: report_data[cdash_phase]["loglines"].append(xml.sax.saxutils.escape(line)) # something went wrong pre-cdash "configure" phase b/c we have an exception and only # "update" was encounterd. # dump the report in the configure line so teams can see what the issue is if len(phases_encountered) == 1 and package.get("exception"): # TODO this mapping is not ideal since these are pre-configure errors # we need to determine if a more appropriate cdash phase can be utilized # for now we will add a message to the log explaining this cdash_phase = "configure" phases_encountered.append(cdash_phase) log_message = ( "Pre-configure errors occured in Spack's process that terminated the " "build process prematurely.\nSpack output::\n{0}".format( xml.sax.saxutils.escape(package["exception"]) ) ) report_data[cdash_phase]["loglines"].append(log_message) # Move the build phase to the front of the list if it occurred. # This supports older versions of CDash that expect this phase # to be reported before all others. if "build" in phases_encountered: build_pos = phases_encountered.index("build") phases_encountered.insert(0, phases_encountered.pop(build_pos)) self.endtime = self.starttime + duration for phase in phases_encountered: report_data[phase]["endtime"] = self.endtime report_data[phase]["log"] = "\n".join(report_data[phase]["loglines"]) errors, warnings = parse_log_events(report_data[phase]["loglines"]) # Convert errors to warnings if the package reported success. if package["result"] == "success": warnings = errors + warnings errors = [] # Cap the number of errors and warnings at 50 each. errors = errors[:50] warnings = warnings[:50] nerrors = len(errors) if nerrors > 0: self.success = False if phase == "configure": report_data[phase]["status"] = 1 if phase == "build": # Convert log output from ASCII to Unicode and escape for XML. def clean_log_event(event): event = vars(event) event["text"] = xml.sax.saxutils.escape(event["text"]) event["pre_context"] = xml.sax.saxutils.escape("\n".join(event["pre_context"])) event["post_context"] = xml.sax.saxutils.escape( "\n".join(event["post_context"]) ) # source_file and source_line_no are either strings or # the tuple (None,). Distinguish between these two cases. if event["source_file"][0] is None: event["source_file"] = "" event["source_line_no"] = "" else: event["source_file"] = xml.sax.saxutils.escape(event["source_file"]) return event report_data[phase]["errors"] = [] report_data[phase]["warnings"] = [] for error in errors: report_data[phase]["errors"].append(clean_log_event(error)) for warning in warnings: report_data[phase]["warnings"].append(clean_log_event(warning)) if phase == "update": report_data[phase]["revision"] = self.revision # Write the report. report_name = phase.capitalize() + ".xml" if self.multiple_packages: report_file_name = package["name"] + "_" + report_name else: report_file_name = report_name phase_report = os.path.join(report_dir, report_file_name) with open(phase_report, "w", encoding="utf-8") as f: env = spack.tengine.make_environment() if phase != "update": # Update.xml stores site information differently # than the rest of the CTest XML files. site_template = posixpath.join(self.template_dir, "Site.xml") t = env.get_template(site_template) f.write(t.render(report_data)) phase_template = posixpath.join(self.template_dir, report_name) t = env.get_template(phase_template) f.write(t.render(report_data)) self.upload(phase_report) def build_report(self, report_dir, specs): # Do an initial scan to determine if we are generating reports for more # than one package. When we're only reporting on a single package we # do not explicitly include the package's name in the CDash build name. self.multiple_packages = False num_packages = 0 for spec in specs: spec.summarize() # Do not generate reports for packages that were installed # from the binary cache. spec["packages"] = [ x for x in spec["packages"] if "installed_from_binary_cache" not in x or not x["installed_from_binary_cache"] ] for package in spec["packages"]: if "stdout" in package: num_packages += 1 if num_packages > 1: self.multiple_packages = True break if self.multiple_packages: break # Generate reports for each package in each spec. for spec in specs: duration = 0 if "time" in spec: duration = int(spec["time"]) for package in spec["packages"]: self.build_report_for_package(report_dir, package, duration) self.finalize_report() def extract_standalone_test_data(self, package, phases, report_data): """Extract stand-alone test outputs for the package.""" testing = {} report_data["testing"] = testing testing["starttime"] = self.starttime testing["endtime"] = self.starttime testing["generator"] = self.generator testing["parts"] = extract_test_parts(package["name"], package["stdout"].splitlines()) def report_test_data(self, report_dir, package, phases, report_data): """Generate and upload the test report(s) for the package.""" for phase in phases: # Write the report. report_name = phase.capitalize() + ".xml" report_file_name = "_".join([package["name"], package["id"], report_name]) phase_report = os.path.join(report_dir, report_file_name) with open(phase_report, "w", encoding="utf-8") as f: env = spack.tengine.make_environment() if phase not in ["update", "testing"]: # Update.xml stores site information differently # than the rest of the CTest XML files. site_template = posixpath.join(self.template_dir, "Site.xml") t = env.get_template(site_template) f.write(t.render(report_data)) phase_template = posixpath.join(self.template_dir, report_name) t = env.get_template(phase_template) f.write(t.render(report_data)) tty.debug("Preparing to upload {0}".format(phase_report)) self.upload(phase_report) def test_report_for_package(self, report_dir, package, duration): if "stdout" not in package: # Skip reporting on packages that did not generate any output. tty.debug("Skipping report for {0}: No generated output".format(package["name"])) return self.current_package_name = package["name"] if self.base_buildname == self.install_command: # The package list is NOT all that helpful in this case self.buildname = "{0}-{1}".format(self.current_package_name, package["id"]) else: self.buildname = self.report_build_name(self.current_package_name) self.endtime = self.starttime + duration report_data = self.initialize_report(report_dir) report_data["hostname"] = socket.gethostname() phases = ["testing"] self.extract_standalone_test_data(package, phases, report_data) self.report_test_data(report_dir, package, phases, report_data) def test_report(self, report_dir, specs): """Generate reports for each package in each spec.""" tty.debug("Processing test report") for spec in specs: spec.summarize() duration = 0 if "time" in spec: duration = int(spec["time"]) for package in spec["packages"]: self.test_report_for_package(report_dir, package, duration) self.finalize_report() def test_skipped_report( self, report_dir: str, spec: spack.spec.Spec, reason: Optional[str] = None ): """Explicitly report spec as being skipped (e.g., CI). Examples are the installation failed or the package is known to have broken tests. Args: report_dir: directory where the report is to be written spec: spec being tested reason: optional reason the test is being skipped """ output = "Skipped {0} package".format(spec.name) if reason: output += "\n{0}".format(reason) package = {"name": spec.name, "id": spec.dag_hash(), "result": "skipped", "stdout": output} self.test_report_for_package(report_dir, package, duration=0.0) def concretization_report(self, report_dir, msg): self.buildname = self.base_buildname report_data = self.initialize_report(report_dir) report_data["update"] = {} report_data["update"]["starttime"] = self.starttime report_data["update"]["endtime"] = self.endtime report_data["update"]["revision"] = self.revision report_data["update"]["log"] = msg env = spack.tengine.make_environment() update_template = posixpath.join(self.template_dir, "Update.xml") t = env.get_template(update_template) output_filename = os.path.join(report_dir, "Update.xml") with open(output_filename, "w", encoding="utf-8") as f: f.write(t.render(report_data)) # We don't have a current package when reporting on concretization # errors so refer to this report with the base buildname instead. self.current_package_name = self.base_buildname self.upload(output_filename) self.success = False self.finalize_report() def initialize_report(self, report_dir): if not os.path.exists(report_dir): os.mkdir(report_dir) report_data = {} report_data["buildname"] = self.buildname report_data["buildstamp"] = self.buildstamp report_data["install_command"] = self.install_command report_data["generator"] = self.generator report_data["osname"] = self.osname report_data["osrelease"] = self.osrelease report_data["site"] = self.site report_data["target"] = self.target return report_data def upload(self, filename): if not self.cdash_upload_url: print("Cannot upload {0} due to missing upload url".format(filename)) return # Compute md5 checksum for the contents of this file. md5sum = checksum(hashlib.md5, filename, block_size=8192) with open(filename, "rb") as f: params_dict = { "build": self.buildname, "site": self.site, "stamp": self.buildstamp, "MD5": md5sum, } encoded_params = urlencode(params_dict) url = "{0}&{1}".format(self.cdash_upload_url, encoded_params) request = Request(url, data=f, method="PUT") request.add_header("Content-Type", "text/xml") request.add_header("Content-Length", os.path.getsize(filename)) if self.authtoken: request.add_header("Authorization", "Bearer {0}".format(self.authtoken)) try: response = web_util.urlopen(request, timeout=SPACK_CDASH_TIMEOUT) if self.current_package_name not in self.buildIds: resp_value = codecs.getreader("utf-8")(response).read() match = self.buildid_regexp.search(resp_value) if match: buildid = match.group(1) self.buildIds[self.current_package_name] = buildid except Exception as e: print(f"Upload to CDash failed: {e}") def finalize_report(self): if self.buildIds: tty.msg("View your build results here:") for package_name, buildid in self.buildIds.items(): # Construct and display a helpful link if CDash responded with # a buildId. build_url = self.cdash_upload_url build_url = build_url[0 : build_url.find("submit.php")] build_url += "buildSummary.php?buildid={0}".format(buildid) tty.msg("{0}: {1}".format(package_name, build_url)) if not self.success: raise SpackError("Errors encountered, see above for more details")
CDash
python
tensorflow__tensorflow
tensorflow/python/distribute/packed_distributed_variable_test.py
{ "start": 1338, "end": 1944 }
class ____(autotrackable.AutoTrackable): def __init__(self): with ops.device('/cpu:0'): v0 = resource_variable_ops.ResourceVariable(1.0, name='var0') with ops.device('/cpu:1'): v1 = resource_variable_ops.ResourceVariable(2.0, name='var1') self._packed_var = packed_distributed_variable.PackedDistributedVariable( [v0, v1] ) self._fn = def_function.function(self.update_var) @def_function.function def update_var(self): self._packed_var.assign_add(3.0).assign_sub(1.0) def save_function(self, directory): save.save(self, directory)
TestExportArchive
python
facelessuser__pymdown-extensions
tests/test_extensions/test_blocks/test_html.py
{ "start": 59, "end": 8322 }
class ____(util.MdCase): """Test Blocks HTML cases.""" extension = ['pymdownx.blocks.html', 'md_in_html'] extension_configs = { 'pymdownx.blocks.html': { 'custom': [ {'tag': 'custom', 'mode': 'block'} ] } } def test_raw_empty_block(self): """Test that raw empty blocks are handled properly.""" self.check_markdown( R''' /// html | pre /// ''', R''' <pre></pre> ''', True ) def test_bad_tag(self): """Test bad HTML tag.""" self.check_markdown( R''' /// html | 3tag Some *content* /// ''', R''' <p>/// html | 3tag Some <em>content</em> ///</p> ''', True ) def test_required_tag(self): """Test that tab is not processed if tag is omitted.""" self.check_markdown( R''' /// html Some *content* /// ''', r''' <p>/// html Some <em>content</em> ///</p> ''', True ) def test_html_block(self): """Test HTML block element.""" self.check_markdown( R''' /// html | div Some *content* And more `content`. /// ''', r''' <div> <p>Some <em>content</em></p> <p>And more <code>content</code>.</p> </div> ''', True ) def test_html_span(self): """Test HTML with span element.""" self.check_markdown( R''' /// html | span Will be parsed as inline *content* And more `content`. /// ''', r''' <span>Will be parsed as inline <em>content</em> And more <code>content</code>.</span> ''', True ) def test_html_raw_element(self): """Test HTML raw element.""" self.check_markdown( R''' /// html | pre Some *content* And more `content`. /// ''', r''' <pre>Some *content* And more `content`.</pre> ''', True ) def test_html_forced_raw_element(self): """Test HTML force raw element.""" self.check_markdown( R''' /// html | div markdown: raw Some *content* And more `content`. /// ''', r''' <div>Some *content* And more `content`.</div> ''', True ) def test_html_force_span(self): """Test HTML with force span element.""" self.check_markdown( R''' /// html | div markdown: inline Will be parsed as inline *content* And more `content`. /// ''', r''' <div>Will be parsed as inline <em>content</em> And more <code>content</code>.</div> ''', True ) def test_html_force_block(self): """Test HTML force block element.""" self.check_markdown( R''' /// html | span markdown: block Some *content* And more `content`. /// ''', r''' <span><p>Some <em>content</em></p><p>And more <code>content</code>.</p></span> ''', True ) def test_attributes(self): """Test attributes.""" self.check_markdown( R''' /// html | div.some.classes#an-id[name1 name2=value name3="string value"] Some *content* And more `content`. /// ''', r''' <div class="some classes" id="an-id" name1="name1" name2="value" name3="string value"> <p>Some <em>content</em></p> <p>And more <code>content</code>.</p> </div> ''', True ) def test_bad_attributes(self): """Test no attributes.""" self.check_markdown( R''' /// html | div.+ content /// ''', ''' <p>/// html | div.+ content ///</p> ''', True ) def test_multi_class(self): """Test multiple classes.""" self.check_markdown( R''' /// html | div.a.b[class=c] content /// ''', ''' <div class="a b c"> <p>content</p> </div> ''', True ) def test_multi_class2(self): """Test multiple classes.""" self.check_markdown( R''' /// html | div[class="a b"].c content /// ''', ''' <div class="a b c"> <p>content</p> </div> ''', True ) def test_inline_and_md_in_html(self): """Test inline format and HTML content.""" self.check_markdown( R''' /// html | div markdown: inline <div markdown="block"> **content** </div> **content** /// ''', ''' <div><div markdown="block"> **content** </div> <strong>content</strong></div> ''', True ) def test_raw_and_md_in_html(self): """Test raw format and HTML content.""" self.check_markdown( R''' /// html | div markdown: raw <div> **content** </div> this is <span>raw</span> **content** /// ''', ''' <div>&lt;div&gt; **content** &lt;/div&gt; this is &lt;span&gt;raw&lt;/span&gt; **content**</div> ''', True ) def test_html_and_html(self): """Test HTML mode format with HTML code.""" self.check_markdown( R''' /// html | div markdown: html <div> **content** </div> this is <span>raw</span> **content** /// ''', ''' <div><div> **content** </div> this is <span>raw</span> **content**</div> ''', True ) def test_html_and_script(self): """Test inline format with script.""" self.check_markdown( R''' /// html | script const el = document.querySelector('div'); el.innerHTML = '<span>test</span> /// ''', ''' <script>const el = document.querySelector('div'); el.innerHTML = '<span>test</span></script> ''', True ) def test_custom(self): """Test custom block handling.""" self.check_markdown( R''' /// html | custom - a - b /// ''', ''' <custom><ul><li>a</li><li>b</li></ul></custom> ''', True ) def test_custom_override(self): """Test custom block handling but mode is overridden.""" self.check_markdown( R''' /// html | custom markdown: inline - a - b /// ''', ''' <custom>- a - b</custom> ''', True )
TestBlocksHTML
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/linalg/matrix_triangular_solve_op_test.py
{ "start": 960, "end": 8785 }
class ____(test.TestCase): def _verifySolveAllWays(self, x, y, dtypes, batch_dims=None): for lower in True, False: for adjoint in True, False: for use_placeholder in True, False: self._verifySolve( x, y, lower=lower, adjoint=adjoint, batch_dims=batch_dims, use_placeholder=use_placeholder, dtypes=dtypes) def _verifySolveAllWaysReal(self, x, y, batch_dims=None): self._verifySolveAllWays(x, y, (np.float32, np.float64), batch_dims) def _verifySolveAllWaysComplex(self, x, y, batch_dims=None): self._verifySolveAllWays(x, y, (np.complex64, np.complex128), batch_dims) def _verifySolve(self, x, y, lower=True, adjoint=False, batch_dims=None, use_placeholder=False, dtypes=(np.float32, np.float64)): for np_type in dtypes: a = x.astype(np_type) b = y.astype(np_type) # For numpy.solve we have to explicitly zero out the strictly # upper or lower triangle. if lower and a.size > 0: a_np = np.tril(a) elif a.size > 0: a_np = np.triu(a) else: a_np = a if adjoint: axes = list(range(len(a_np.shape))) axes[-2] = -1 axes[-1] = -2 a_np = np.conj(np.transpose(a_np, axes=axes)) if batch_dims is not None: a = np.tile(a, batch_dims + [1, 1]) a_np = np.tile(a_np, batch_dims + [1, 1]) b = np.tile(b, batch_dims + [1, 1]) def broadcast(a, b): b1 = b + np.zeros(a.shape[:-2] + (1, 1), dtype=b.dtype) return a, b1 a_tf = a b_tf = b if use_placeholder: a_tf = array_ops.placeholder_with_default(a_tf, shape=None) b_tf = array_ops.placeholder_with_default(b_tf, shape=None) tf_ans = linalg_ops.matrix_triangular_solve( a_tf, b_tf, lower=lower, adjoint=adjoint) tf_val = self.evaluate(tf_ans) a_np, b = broadcast(a_np, b) np_ans = np.linalg.solve(a_np, b) self.assertEqual(np_ans.shape, tf_val.shape) self.assertAllClose(np_ans, tf_val) @test_util.run_deprecated_v1 def testSolve(self): # 1x1 matrix, single rhs. matrix = np.array([[0.1]]) rhs0 = np.array([[1.]]) self._verifySolveAllWaysReal(matrix, rhs0) # 2x2 matrices, single right-hand side. matrix = np.array([[1., 2.], [3., 4.]]) rhs0 = np.array([[1.], [1.]]) self._verifySolveAllWaysReal(matrix, rhs0) # 2x2 matrices, 3 right-hand sides. rhs1 = np.array([[1., 0., 1.], [0., 1., 1.]]) self._verifySolveAllWaysReal(matrix, rhs1) @test_util.run_deprecated_v1 def testSolveComplex(self): # 1x1 matrix, single rhs. matrix = np.array([[0.1 + 1j * 0.1]]) rhs0 = np.array([[1. + 1j]]) self._verifySolveAllWaysComplex(matrix, rhs0) # 2x2 matrices, single right-hand side. matrix = np.array([[1., 2.], [3., 4.]]).astype(np.complex64) matrix += 1j * matrix rhs0 = np.array([[1.], [1.]]).astype(np.complex64) rhs0 += 1j * rhs0 self._verifySolveAllWaysComplex(matrix, rhs0) # 2x2 matrices, 3 right-hand sides. rhs1 = np.array([[1., 0., 1.], [0., 1., 1.]]).astype(np.complex64) rhs1 += 1j * rhs1 self._verifySolveAllWaysComplex(matrix, rhs1) @test_util.run_deprecated_v1 def testSolveBatch(self): matrix = np.array([[1., 2.], [3., 4.]]) rhs = np.array([[1., 0., 1.], [0., 1., 1.]]) # Batch of 2x3x2x2 matrices, 2x3x2x3 right-hand sides. self._verifySolveAllWaysReal(matrix, rhs, batch_dims=[2, 3]) # Batch of 3x2x2x2 matrices, 3x2x2x3 right-hand sides. self._verifySolveAllWaysReal(matrix, rhs, batch_dims=[3, 2]) @test_util.run_deprecated_v1 def testSolveBatchBroadcast(self): # 2 x 2 x 2 matrix = np.array([[[1., 0.], [3., 4.]], [[1., 0.], [2., 1.]]]) # 2 x 3 rhs = np.array([[1., 0., 1.], [0., 1., 1.]]) # 2 x 2 x 3 self._verifySolveAllWaysReal(matrix, rhs) # 2 x 2 x 2 matrix2 = np.array([[[1., 0.], [3., 4.]], [[2., 0.], [1., 6.3]]]) # 1 x 2 x 3 rhs = np.array([[[1., 0., 1.], [0., 1., 1.]]]) # 2 x 2 x 3 self._verifySolveAllWaysReal(matrix2, rhs) @test_util.run_deprecated_v1 def testSolveBatchBroadcastLargerBatches(self): # 1 x 10 x 10 matrix = np.random.uniform(low=1, high=2., size=[1, 10, 10]) # 10 x 1 rhs = np.random.uniform(size=[10, 1]) # 1 x 10 x 1 self._verifySolveAllWaysReal(matrix, rhs) # 2 x 10 x 10 matrix = np.random.uniform(low=1, high=2., size=[2, 10, 10]) # 10 x 1 rhs = np.random.uniform(size=[10, 1]) # 2 x 10 x 1 self._verifySolveAllWaysReal(matrix, rhs) # 2 x 257 x 257 matrix = np.random.uniform(low=1, high=2., size=[2, 257, 257]) # Also ensure the matrix is well conditioned by making it diagonally # dominant. np.fill_diagonal(matrix[0, ...], 257 * 2) np.fill_diagonal(matrix[1, ...], 257 * 2) # 257 x 1 rhs = np.random.uniform(size=[257, 1]) # 2 x 257 x 1 self._verifySolveAllWaysReal(matrix, rhs) @test_util.run_deprecated_v1 def testSolveBatchComplex(self): matrix = np.array([[1., 2.], [3., 4.]]).astype(np.complex64) matrix += 1j * matrix rhs = np.array([[1., 0., 1.], [0., 1., 1.]]).astype(np.complex64) rhs += 1j * rhs # Batch of 2x3x2x2 matrices, 2x3x2x3 right-hand sides. self._verifySolveAllWaysComplex(matrix, rhs, batch_dims=[2, 3]) # Batch of 3x2x2x2 matrices, 3x2x2x3 right-hand sides. self._verifySolveAllWaysComplex(matrix, rhs, batch_dims=[3, 2]) @test_util.run_deprecated_v1 def testNonSquareMatrix(self): # A non-square matrix should cause an error. matrix = np.array([[1., 2., 3.], [3., 4., 5.]]) with self.cached_session(): with self.assertRaises(ValueError): self._verifySolve(matrix, matrix) with self.assertRaises(ValueError): self._verifySolve(matrix, matrix, batch_dims=[2, 3]) @test_util.run_deprecated_v1 def testWrongDimensions(self): # The matrix should have the same number of rows as the # right-hand sides. matrix = np.array([[1., 0.], [0., 1.]]) rhs = np.array([[1., 0.]]) with self.cached_session(): with self.assertRaises(ValueError): self._verifySolve(matrix, rhs) with self.assertRaises(ValueError): self._verifySolve(matrix, rhs, batch_dims=[2, 3]) @test_util.run_deprecated_v1 @test_util.disable_xla("XLA cannot throw assertion errors during a kernel.") def testNotInvertible(self): # The input should be invertible. # The matrix is singular because it has a zero on the diagonal. singular_matrix = np.array( [[[1., 0., 0.], [-1., 0., 0.], [0., -1., 1.]], [[1., 0., 0.], [-1., 1., 0.], [0., -1., 0.]], [[1., 0., 0.], [-1., 1., 0.], [0., -1., 1.]]]) rhs = np.array([[3.], [5.], [1.]]) expected = np.array([ [[3.], [np.inf], [np.inf]], [[3.], [8.], [np.inf]], [[3.], [8.], [9.]]]) with self.cached_session(use_gpu=False): ans = linalg_ops.matrix_triangular_solve(singular_matrix, rhs) self.assertAllClose(self.evaluate(ans), expected) def testEmpty(self): self._verifySolve(np.empty([0, 2, 2]), np.empty([0, 2, 2]), lower=True) self._verifySolve(np.empty([2, 0, 0]), np.empty([2, 0, 0]), lower=True) self._verifySolve(np.empty([2, 0, 0]), np.empty([2, 0, 0]), lower=False) self._verifySolve( np.empty([2, 0, 0]), np.empty([2, 0, 0]), lower=True, batch_dims=[3, 2]) self._verifySolve(np.empty([0, 0]), np.empty([0, 0]), lower=True) if __name__ == "__main__": test.main()
MatrixTriangularSolveOpTest
python
ray-project__ray
python/ray/train/tests/test_training_iterator.py
{ "start": 7017, "end": 11404 }
class ____: def __init__(self, fail_on, backend_executor): self.counter = 0 self.fail_on = fail_on self.worker_group = backend_executor.get_worker_group() self.results = [] def handle_result(self, intermiedate_results=None): if intermiedate_results: self.results.append(intermiedate_results) if self.counter == self.fail_on: print("killing") self.results = [] ray.kill(self.worker_group.workers[0].actor) time.sleep(3) self.counter += 1 @pytest.mark.parametrize( "backend", ["test", "torch", "tf"] if sys.version_info < (3, 12) else ["test", "torch"], ) def test_worker_kill(ray_start_4_cpus, backend): if backend == "test": test_config = BackendConfig() elif backend == "torch": from ray.train.torch import TorchConfig test_config = TorchConfig() elif backend == "tf": from ray.train.tensorflow import TensorflowConfig test_config = TensorflowConfig() def train_func(): for i in range(2): train.report(dict(loss=1, iter=i)) iterator = create_iterator(train_func, test_config) kill_callback = KillCallback(fail_on=0, backend_executor=iterator._backend_executor) for intermediate_result in iterator: # Run 1: iter=0, counter=1, Successful # Run 2: iter=1, counter=1, Unsuccessful, starts training from beginning # Run 3: iter=0, counter=2, Successful # Run 4: iter=1, counter=3, Successful kill_callback.handle_result() assert kill_callback.counter == 3 iterator = create_iterator(train_func, test_config) kill_callback = KillCallback(fail_on=1, backend_executor=iterator._backend_executor) for intermediate_result in iterator: # Run 1: iter=0, counter=1, Successful # Run 2: iter=1, counter=2, Successful # Run 3: None, counter=2, Unsuccessful, starts training from beginning. # Run 4: iter=0, counter=3, Successful # Run 5: iter=1, counter=4, Successful kill_callback.handle_result() assert kill_callback.counter == 4 @pytest.mark.skipif( sys.version_info >= (3, 12), reason="tensorflow is not installed in python 3.12+" ) def test_tensorflow_mnist_fail(ray_start_4_cpus): """Tests if tensorflow example works even with worker failure.""" epochs = 3 num_workers = 2 from ray.train.examples.tf.tensorflow_mnist_example import ( train_func as tensorflow_mnist_train_func, ) from ray.train.tensorflow import TensorflowConfig test_config = TensorflowConfig() train_func = functools.partial( tensorflow_mnist_train_func, {"lr": 1e-3, "batch_size": 64, "epochs": epochs} ) iterator = create_iterator(train_func, test_config, num_workers=num_workers) kill_callback = KillCallback(fail_on=0, backend_executor=iterator._backend_executor) for intermediate_result in iterator: assert len(intermediate_result) == num_workers kill_callback.handle_result(intermediate_result) results = kill_callback.results assert len(results) == epochs last_iter_result = results[-1][0].metrics first_iter_result = results[0][0].metrics assert last_iter_result["loss"] < first_iter_result["loss"] assert last_iter_result["accuracy"] > first_iter_result["accuracy"] def test_torch_linear_failure(ray_start_4_cpus): num_workers = 2 epochs = 3 from ray.train.torch import TorchConfig test_config = TorchConfig() train_func = functools.partial( linear_train_func, {"lr": 1e-3, "batch_size": 64, "epochs": epochs} ) iterator = create_iterator(train_func, test_config, num_workers=num_workers) kill_callback = KillCallback(fail_on=1, backend_executor=iterator._backend_executor) for intermediate_result in iterator: assert len(intermediate_result) == num_workers kill_callback.handle_result(intermediate_result) results = kill_callback.results assert len(results) == epochs for i in range(num_workers): last_result = results[-1][i].metrics first_result = results[0][i].metrics assert last_result["loss"] < first_result["loss"] if __name__ == "__main__": import sys import pytest sys.exit(pytest.main(sys.argv[1:] + ["-v", "-x", __file__]))
KillCallback
python
matplotlib__matplotlib
tools/triage_tests.py
{ "start": 2293, "end": 2760 }
class ____(QtCore.QObject): # A hack keypresses can be handled globally and aren't swallowed # by the individual widgets def __init__(self, window): super().__init__() self.window = window def eventFilter(self, receiver, event): if event.type() == QtCore.QEvent.Type.KeyPress: self.window.keyPressEvent(event) return True else: return super().eventFilter(receiver, event)
EventFilter
python
mlflow__mlflow
mlflow/pyspark/optuna/study.py
{ "start": 4354, "end": 12648 }
class ____(Study): """A wrapper of :class:`~optuna.study.Study` to incorporate Optuna with spark via MLflow experiment. This class automatically resumes existing studies with the same name, allowing for interrupted optimization to continue from where it left off. .. code-block:: python :caption: Basic Usage from mlflow.optuna.storage import MlflowStorage from mlflow.pyspark.optuna.study import MlflowSparkStudy def objective(trial): x = trial.suggest_float("x", -10, 10) return (x - 2) ** 2 experiment_id = "507151065975140" study_name = "spark_mlflow_storage" storage = MlflowStorage(experiment_id=experiment_id) mlflow_study = MlflowSparkStudy(study_name, storage) mlflow_study.optimize(objective, n_trials=4) # Later, create another instance with same name to resume resumed_study = MlflowSparkStudy(study_name, storage) print(f"Resumed with {len(resumed_study.trials)} existing trials") resumed_study.optimize(objective, n_trials=4) # Continue optimization """ def __init__( self, study_name: str, storage: MlflowStorage, sampler: samplers.BaseSampler | None = None, pruner: pruners.BasePruner | None = None, mlflow_tracking_uri: str | None = None, ): self.study_name = study_name self._storage = storages.get_storage(storage) self.sampler = sampler or samplers.TPESampler() self.pruner = pruner or pruners.MedianPruner() self.spark = SparkSession.active() # check whether the SparkConnect mode self._is_spark_connect_mode = is_spark_connect_mode() self._mlflow_tracking_env = mlflow_tracking_uri or mlflow.get_tracking_uri() mlflow.set_tracking_uri(self._mlflow_tracking_env) self.mlflow_client = MlflowClient() if not isinstance(self._storage, MlflowStorage): raise ValueError( f"MlflowSparkStudy only works with `MlflowStorage`. But get {type(self._storage)}." ) # Check if study exists and auto-resume if it does if self._storage.get_study_id_by_name_if_exists(self.study_name): # Load existing study self._study = optuna.load_study( study_name=self.study_name, sampler=self.sampler, storage=self._storage ) self._study_id = self._storage.get_study_id_from_name(self.study_name) self._is_resumed = True _logger.info( f"Resuming existing study '{self.study_name}' with {len(self._study.trials)} trials" ) else: # Create new study self._study = optuna.create_study( study_name=self.study_name, sampler=self.sampler, storage=self._storage ) self._study_id = self._storage.get_study_id_from_name(self.study_name) self._is_resumed = False _logger.info(f"Created new study '{self.study_name}'") self._directions = self._storage.get_study_directions(self._study_id) @property def is_resumed_study(self) -> bool: """Check if this study was resumed from existing data. Returns: True if the study was resumed from existing data, False if it's a new study """ return self._is_resumed @property def completed_trials_count(self) -> int: """Number of completed trials in the study. Returns: Count of trials that have completed successfully """ return len([t for t in self._study.trials if t.state == TrialState.COMPLETE]) def get_resume_info(self) -> ResumeInfo | None: """Get information about the resumed study. Returns: ResumeInfo dataclass containing resume information including trial counts and best results """ if not self._is_resumed: return ResumeInfo(is_resumed=False) return ResumeInfo( is_resumed=True, study_name=self.study_name, existing_trials=len(self._study.trials), completed_trials=self.completed_trials_count, best_value=self._study.best_value if self._study.trials else None, best_params=self._study.best_params if self._study.trials else None, ) def optimize( self, func: "optuna.study.study.ObjectiveFuncType", n_trials: int | None = None, timeout: float | None = None, n_jobs: int = -1, catch: Iterable[type[Exception]] = (), callbacks: Iterable[Callable[[Study, FrozenTrial], None]] | None = None, ) -> None: # Add logging for resume information if self._is_resumed and self._study.trials: _logger.info(f""" Continuing optimization with {len(self._study.trials)} existing trials. Current best value: {self._study.best_value} """) elif self._is_resumed: _logger.info("Resuming study with no previous trials") else: _logger.info("Starting optimization for new study") experiment_id = self._storage._experiment_id study_name = self.study_name mlflow_tracking_env = self._mlflow_tracking_env sampler = self.sampler def run_task_on_executor_pd(iterator): mlflow.set_tracking_uri(mlflow_tracking_env) mlflow_client = MlflowClient() storage = MlflowStorage(experiment_id=experiment_id) study = optuna.load_study(study_name=study_name, sampler=sampler, storage=storage) num_trials = sum(map(len, iterator)) error_message = None try: _optimize_sequential( study=study, func=func, mlflow_client=mlflow_client, n_trials=num_trials, timeout=timeout, catch=catch, callbacks=callbacks, ) except BaseException: error_message = traceback.format_exc() yield pd.DataFrame({"error": [error_message]}) num_tasks = n_trials if n_jobs == -1: n_jobs = num_tasks input_df = self.spark.range(start=0, end=num_tasks, step=1, numPartitions=n_jobs) trial_tag = f"optuna_trial_{study_name}_{experiment_id}" if self._is_spark_connect_mode: self.spark.addTag(trial_tag) else: job_group_id = self.spark.sparkContext.getLocalProperty("spark.jobGroup.id") if job_group_id is None: job_group_id = trial_tag job_group_description = f"optuna_trial_{study_name}" self.spark.sparkContext.setJobGroup( job_group_id, job_group_description, interruptOnCancel=True ) try: result_df = input_df.mapInPandas( func=run_task_on_executor_pd, schema="error string", ) except KeyboardInterrupt: if self._is_spark_connect_mode: self.spark.interruptTag(trial_tag) else: self.spark.sparkContext.cancelJobGroup(trial_tag) _logger.debug("MlflowSparkStudy optimize terminated by user.") self.mlflow_client.set_terminated(self._study_id, "KILLED") raise if "error" in result_df.columns: failed_runs = result_df.filter(col("error").isNotNull()) error_rows = failed_runs.select("error").collect() if len(error_rows) > 0: first_non_null_value = error_rows[0][0] self.mlflow_client.set_terminated(self._study_id, "KILLED") raise ExecutionException( f"Optimization run for Optuna MlflowSparkStudy failed. " f"See full error details in the failed MLflow runs. " f"Number of failed runs: {len(error_rows)}. " f"First trial failure message: {first_non_null_value}" ) self.mlflow_client.set_terminated(self._study_id)
MlflowSparkStudy
python
getsentry__sentry
src/sentry/organizations/services/organization/impl.py
{ "start": 31122, "end": 32120 }
class ____(OrganizationCheckService): def check_organization_by_slug(self, *, slug: str, only_visible: bool) -> int | None: # See ControlOrganizationCheckService above try: org = Organization.objects.get_from_cache(slug=slug) if only_visible and org.status != OrganizationStatus.ACTIVE: raise Organization.DoesNotExist return org.id except Organization.DoesNotExist: logger.info("Organization by slug [%s] not found", slug) return None def check_organization_by_id(self, *, id: int, only_visible: bool) -> bool: # See ControlOrganizationCheckService above try: org = Organization.objects.get_from_cache(id=id) if only_visible and org.status != OrganizationStatus.ACTIVE: raise Organization.DoesNotExist return True except Organization.DoesNotExist: pass return False
RegionOrganizationCheckService
python
apache__airflow
airflow-ctl/src/airflowctl/api/datamodels/generated.py
{ "start": 3859, "end": 3952 }
class ____(RootModel[str]): root: Annotated[str, Field(max_length=1000, title="Note")]
Note
python
django__django
tests/auth_tests/test_checks.py
{ "start": 14831, "end": 14938 }
class ____(SessionMiddleware): pass @override_system_checks([check_middleware])
SessionMiddlewareSubclass
python
tensorflow__tensorflow
tensorflow/python/data/experimental/ops/data_service_ops.py
{ "start": 5347, "end": 9299 }
class ____: """Options related to the tf.data service cross trainer cache. This is used to enable cross-trainer cache when distributing a dataset. For example: ``` dataset = dataset.apply(tf.data.experimental.service.distribute( processing_mode=tf.data.experimental.service.ShardingPolicy.OFF, service=FLAGS.tf_data_service_address, job_name="job", cross_trainer_cache=data_service_ops.CrossTrainerCache( trainer_id=trainer_id()))) ``` For more details, refer to https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers. """ def __init__(self, trainer_id): """Constructs a CrossTrainerCache. Args: trainer_id: Each training job has a unique ID. Once a job has consumed data, the data remains in the cache and is re-used by jobs with different `trainer_id`s. Requests with the same `trainer_id` do not re-use data. Raises: ValueError if `trainer_id` is empty. """ if not trainer_id: raise ValueError( "tf.data service cross-trainer cache requires a non-empty trainer ID." ) self.trainer_id = trainer_id def _to_proto(self) -> data_service_pb2.CrossTrainerCacheOptions: return data_service_pb2.CrossTrainerCacheOptions(trainer_id=self.trainer_id) def _get_validated_sharding_policy(processing_mode) -> ShardingPolicy: """Validates `processing_mode` and converts it to ShardingPolicy.""" if isinstance(processing_mode, ShardingPolicy): return processing_mode if processing_mode == _PARALLEL_EPOCHS: return ShardingPolicy.OFF if processing_mode == _DISTRIBUTED_EPOCH: return ShardingPolicy.DYNAMIC raise ValueError("tf.data service processing mode should be a " "`tf.data.experimental.service.ShardingPolicy`, " "`\"parallel_epochs\"`, or `\"distributed_epoch\"`. Got " f"{processing_mode!r}.") def _validate_job_name(job_name) -> None: if job_name is None: return if not isinstance(job_name, str): raise ValueError("`job_name` must be a string, but `job_name` was of type " f"{type(job_name)}. job_name={job_name}") if not job_name: raise ValueError("`job_name` must not be empty") def _validate_compression(compression) -> None: valid_compressions = [ COMPRESSION_AUTO, COMPRESSION_NONE, COMPRESSION_SNAPPY, ] if compression not in valid_compressions: raise ValueError(f"Invalid `compression` argument: {compression}. " f"Must be one of {valid_compressions}.") def _get_compression_proto( compression) -> data_service_pb2.DataServiceMetadata.Compression: if compression == COMPRESSION_AUTO: return data_service_pb2.DataServiceMetadata.COMPRESSION_SNAPPY if compression == COMPRESSION_SNAPPY: return data_service_pb2.DataServiceMetadata.COMPRESSION_FORCED_SNAPPY if compression == COMPRESSION_NONE: return data_service_pb2.DataServiceMetadata.COMPRESSION_OFF raise ValueError(f"Invalid `compression` argument: {compression}. " f"Must be one of {[COMPRESSION_AUTO, COMPRESSION_NONE]}.") def _to_tensor(dataset_id) -> tensor.Tensor: """Converts `dataset_id` to Tensor.""" if isinstance(dataset_id, tensor.Tensor): return dataset_id if isinstance(dataset_id, str) or isinstance(dataset_id, bytes): return ops.convert_to_tensor( dataset_id, dtype=dtypes.string, name="dataset_id") return ops.convert_to_tensor( dataset_id, dtype=dtypes.int64, name="dataset_id") def _to_string(dataset_id) -> str: """Converts `dataset_id` to string.""" if isinstance(dataset_id, tensor.Tensor): return (dataset_id if dataset_id.dtype == dtypes.string else string_ops.as_string(dataset_id)) return (dataset_id.decode() if isinstance(dataset_id, bytes) else str(dataset_id))
CrossTrainerCache
python
fluentpython__example-code-2e
23-descriptor/bulkfood/model_v4c.py
{ "start": 17, "end": 374 }
class ____: def __set_name__(self, owner, name): # <1> self.storage_name = name # <2> def __set__(self, instance, value): # <3> if value > 0: instance.__dict__[self.storage_name] = value else: msg = f'{self.storage_name} must be > 0' raise ValueError(msg) # END MODEL_V4
Quantity
python
zarr-developers__zarr-python
src/zarr/errors.py
{ "start": 1020, "end": 1169 }
class ____(BaseZarrError, FileNotFoundError): """ Raised when a node (array or group) is not found at a certain path. """
NodeNotFoundError
python
numba__numba
numba/np/npyimpl.py
{ "start": 1393, "end": 1534 }
class ____(object): def update_indices(self, loop_indices, name): pass def as_values(self): pass
_ScalarIndexingHelper
python
getsentry__sentry
tests/sentry/mail/test_adapter.py
{ "start": 40844, "end": 56673 }
class ____(BaseMailAdapterTest): def create_assert_delete_projectownership( self, proj: Project, rules: Sequence[grammar.Rule], data: Mapping, asserted_emails_fired: Sequence[str], ): po = ProjectOwnership.objects.create( project_id=proj.id, schema=dump_schema(rules), fallthrough=False ) self.assert_notify( self.store_event(data=data, project_id=proj.id), asserted_emails_fired, ) po.delete() def test_notify_with_path(self) -> None: user = self.create_user(email="foo@example.com", is_active=True) user2 = self.create_user(email="baz@example.com", is_active=True) organization = self.create_organization(owner=user) team = self.create_team(organization=organization) project = self.create_project(name="Test", teams=[team]) OrganizationMemberTeam.objects.create( organizationmember=OrganizationMember.objects.get( user_id=user.id, organization=organization ), team=team, ) self.create_member(user=user2, organization=organization, teams=[team]) self.group = self.create_group( first_seen=timezone.now(), last_seen=timezone.now(), project=project, message="hello world", logger="root", ) ProjectOwnership.objects.create( project_id=project.id, schema=dump_schema( [ grammar.Rule(Matcher("path", "*.py"), [Owner("team", team.slug)]), grammar.Rule(Matcher("path", "*.jx"), [Owner("user", user2.email)]), grammar.Rule( Matcher("path", "*.cbl"), [Owner("user", user.email), Owner("user", user2.email)], ), ] ), fallthrough=True, ) event_team = self.store_event(data=make_event_data("foo.py"), project_id=project.id) self.assert_notify(event_team, [user.email, user2.email]) event_single_user = self.store_event(data=make_event_data("foo.jx"), project_id=project.id) self.assert_notify(event_single_user, [user2.email]) with assume_test_silo_mode(SiloMode.CONTROL): # Make sure that disabling mail alerts works as expected NotificationSettingOption.objects.create( user_id=user2.id, scope_type="project", scope_identifier=project.id, type="alerts", value="never", ) def test_notify_with_release_tag(self) -> None: owner = self.create_user(email="theboss@example.com", is_active=True) organization = self.create_organization(owner=owner) team = self.create_team(organization=organization, name="awesome") team2 = self.create_team(organization=organization, name="sauce") project = self.create_project(name="Test", teams=[team, team2]) user = self.create_user(email="foo@example.com", is_active=True) user2 = self.create_user(email="baz@example.com", is_active=True) user3 = self.create_user(email="one@example.com", is_active=True) user4 = self.create_user(email="two@example.com", is_active=True) user5 = self.create_user(email="three@example.com", is_active=True) [self.create_member(user=u, organization=organization, teams=[team]) for u in [user, user2]] [ self.create_member(user=u, organization=organization, teams=[team2]) for u in [user3, user4, user5] ] with assume_test_silo_mode(SiloMode.CONTROL): for u in [user, user2, user3, user4, user5]: # disable slack NotificationSettingProvider.objects.create( user_id=u.id, scope_type="user", scope_identifier=u.id, provider="slack", type="alerts", value="never", ) self.create_assert_delete_projectownership( project, [ grammar.Rule( Matcher("tags.release", "*"), [Owner("user", user.email)], ), grammar.Rule( Matcher("tags.release", "1"), [Owner("user", user2.email)], ), ], {"release": "1"}, [user2.email], ) self.create_assert_delete_projectownership( project, [ grammar.Rule( Matcher("tags.release", "*"), [Owner("user", user.email)], ), grammar.Rule( Matcher("tags.release", "2"), [Owner("team", team2.slug)], ), ], {"release": "2"}, [user3.email, user4.email, user5.email], ) def test_notify_with_dist_tag(self) -> None: owner = self.create_user(email="theboss@example.com", is_active=True) organization = self.create_organization(owner=owner) team = self.create_team(organization=organization, name="awesome") team2 = self.create_team(organization=organization, name="sauce") project = self.create_project(name="Test", teams=[team, team2]) user = self.create_user(email="foo@example.com", is_active=True) user2 = self.create_user(email="baz@example.com", is_active=True) user3 = self.create_user(email="one@example.com", is_active=True) user4 = self.create_user(email="two@example.com", is_active=True) user5 = self.create_user(email="three@example.com", is_active=True) [self.create_member(user=u, organization=organization, teams=[team]) for u in [user, user2]] [ self.create_member(user=u, organization=organization, teams=[team2]) for u in [user3, user4, user5] ] with assume_test_silo_mode(SiloMode.CONTROL): for u in [user, user2, user3, user4, user5]: NotificationSettingProvider.objects.create( user_id=u.id, scope_type="user", scope_identifier=u.id, provider="slack", type="alerts", value="never", ) self.create_assert_delete_projectownership( project, [ grammar.Rule( Matcher("tags.dist", "*"), [Owner("user", user.email)], ), grammar.Rule( Matcher("tags.dist", "rc1"), [Owner("user", user2.email)], ), ], {"dist": "rc1", "release": "1"}, [user2.email], ) self.create_assert_delete_projectownership( project, [ grammar.Rule( Matcher("tags.dist", "*"), [Owner("user", user.email)], ), grammar.Rule( Matcher("tags.dist", "lenny"), [Owner("team", team2.slug)], ), ], {"dist": "lenny", "release": "1"}, [user3.email, user4.email, user5.email], ) def test_dont_notify_with_dist_if_no_rule(self) -> None: owner = self.create_user(email="theboss@example.com", is_active=True) organization = self.create_organization(owner=owner) team = self.create_team(organization=organization, name="awesome") project = self.create_project(name="Test", teams=[team]) user = self.create_user(email="foo@example.com", is_active=True) self.create_member(user=user, organization=organization, teams=[team]) self.create_assert_delete_projectownership( project, [ grammar.Rule( Matcher("tags.abc", "hello"), [Owner("user", user.email)], ), ], {"dist": "hello", "release": "1"}, [], ) def test_notify_with_user_tag(self) -> None: owner = self.create_user(email="theboss@example.com", is_active=True) organization = self.create_organization(owner=owner) team = self.create_team(organization=organization, name="sentry") project = self.create_project(name="Test", teams=[team]) user_by_id = self.create_user(email="one@example.com", is_active=True) user_by_username = self.create_user(email="two@example.com", is_active=True) user_by_email = self.create_user(email="three@example.com", is_active=True) user_by_ip = self.create_user(email="four@example.com", is_active=True) user_by_sub = self.create_user(email="five@example.com", is_active=True) user_by_extra = self.create_user(email="six@example.com", is_active=True) [ self.create_member(user=u, organization=organization, teams=[team]) for u in [ user_by_id, user_by_username, user_by_email, user_by_ip, user_by_sub, user_by_extra, ] ] self.create_assert_delete_projectownership( project, [ grammar.Rule( Matcher("tags.user.id", "unique_id"), [Owner("user", user_by_id.email)], ), grammar.Rule( Matcher("tags.user.username", "my_user"), [Owner("user", user_by_username.email)], ), grammar.Rule( Matcher("tags.user.email", "foo@example.com"), [Owner("user", user_by_email.email)], ), grammar.Rule( Matcher("tags.user.ip_address", "127.0.0.1"), [Owner("user", user_by_ip.email)], ), grammar.Rule( Matcher("tags.user.subscription", "basic"), [Owner("user", user_by_sub.email)], ), grammar.Rule( Matcher("tags.user.extra", "detail"), [Owner("user", user_by_extra.email)], ), ], { "user": { "id": "unique_id", "username": "my_user", "email": "foo@example.com", "ip_address": "127.0.0.1", "subscription": "basic", "extra": "detail", } }, [ user_by_extra.email, ], ) def test_notify_with_user_tag_edge_cases(self) -> None: owner = self.create_user(email="theboss@example.com", is_active=True) organization = self.create_organization(owner=owner) team = self.create_team(organization=organization, name="sentry") project = self.create_project(name="Test", teams=[team]) user = self.create_user(email="sentryuser@example.com", is_active=True) user_star = self.create_user(email="user_star@example.com", is_active=True) user_username = self.create_user(email="user_username@example.com", is_active=True) user_username_star = self.create_user( email="user_username_star@example.com", is_active=True ) users = [user, user_star, user_username, user_username_star] for u in users: self.create_member(user=u, organization=organization, teams=[team]) with assume_test_silo_mode(SiloMode.CONTROL): for u in users: NotificationSettingProvider.objects.create( user_id=self.user.id, scope_type="user", scope_identifier=self.user.id, provider="slack", type="alerts", value="never", ) """ tags.user.username:someemail@example.com sentryuser@example.com tags.user:someemail@example.com sentryuser@example.com tags.user:* sentryuser@example.com tags.user.username:* sentryuser@example.com tags.user:username sentryuser@example.com tags.user:*someemail* #sentry """ dat = {"user": {"username": "someemail@example.com"}} self.create_assert_delete_projectownership( project, [ grammar.Rule( Matcher("tags.user.username", "someemail@example.com"), [Owner("user", user_username.email)], ) ], dat, [user_username.email], ) self.create_assert_delete_projectownership( project, [ grammar.Rule( Matcher("tags.user", "someemail@example.com"), [Owner("user", user.email)] ) ], dat, [], ) self.create_assert_delete_projectownership( project, [grammar.Rule(Matcher("tags.user", "*"), [Owner("user", user_star.email)])], dat, [user_star.email], ) self.create_assert_delete_projectownership( project, [ grammar.Rule( Matcher("tags.user.username", "*"), [Owner("user", user_username_star.email)], ) ], dat, [user_username_star.email], ) self.create_assert_delete_projectownership( project, [grammar.Rule(Matcher("tags.user", "username"), [Owner("user", user.email)])], dat, [], ) self.create_assert_delete_projectownership( project, [grammar.Rule(Matcher("tags.user", "*someemail*"), [Owner("team", team.slug)])], dat, [u.email for u in [user, user_star, user_username, user_username_star]], ) self.create_assert_delete_projectownership( project, [grammar.Rule(Matcher("tags.user.email", "someemail*"), [Owner("team", team.slug)])], {"user": {"username": "someemail@example.com"}}, [], ) def test_group_substatus_header(self) -> None: event = self.store_event( data={"message": "Hello world", "level": "error"}, project_id=self.project.id ) # Header is based on the group substatus assert event.group is not None event.group.substatus = GroupSubStatus.REGRESSED event.group.save() rule = Rule.objects.create(project=self.project, label="my rule") ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True) notification = Notification(event=event, rule=rule) with self.options({"system.url-prefix": "http://example.com"}), self.tasks(): self.adapter.notify( notification, ActionTargetType.ISSUE_OWNERS, fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS, ) msg = mail.outbox[0] assert isinstance(msg, EmailMultiAlternatives) assert msg.subject == "[Sentry] BAR-1 - Hello world" assert isinstance(msg.alternatives[0][0], str) assert "Regressed issue" in msg.alternatives[0][0]
MailAdapterNotifyIssueOwnersTest
python
jupyterlab__jupyterlab
jupyterlab/labapp.py
{ "start": 4491, "end": 7511 }
class ____(JupyterApp, DebugLogFileMixin): version = version description = """ Build the JupyterLab application The application is built in the JupyterLab app directory in `/staging`. When the build is complete it is put in the JupyterLab app `/static` directory, where it is used to serve the application. """ aliases = build_aliases flags = build_flags # Not configurable! core_config = Instance(CoreConfig, allow_none=True) app_dir = Unicode("", config=True, help="The app directory to build in") name = Unicode("JupyterLab", config=True, help="The name of the built application") version = Unicode("", config=True, help="The version of the built application") dev_build = Bool( None, allow_none=True, config=True, help="Whether to build in dev mode. Defaults to True (dev mode) if there are any locally linked extensions, else defaults to False (production mode).", ) minimize = Bool( True, config=True, help="Whether to minimize a production build (defaults to True).", ) pre_clean = Bool( False, config=True, help="Whether to clean before building (defaults to False)" ) splice_source = Bool(False, config=True, help="Splice source packages into app directory.") def start(self): app_dir = self.app_dir or get_app_dir() app_options = AppOptions( app_dir=app_dir, logger=self.log, core_config=self.core_config, splice_source=self.splice_source, ) self.log.info(f"JupyterLab {version}") with self.debug_logging(): if self.pre_clean: self.log.info(f"Cleaning {app_dir}") clean(app_options=app_options) self.log.info(f"Building in {app_dir}") try: production = None if self.dev_build is None else not self.dev_build build( name=self.name, version=self.version, app_options=app_options, production=production, minimize=self.minimize, ) except Exception as e: self.log.error(build_failure_msg) raise e clean_aliases = dict(base_aliases) clean_aliases["app-dir"] = "LabCleanApp.app_dir" ext_warn_msg = "WARNING: this will delete all of your extensions, which will need to be reinstalled" clean_flags = dict(base_flags) clean_flags["extensions"] = ( {"LabCleanApp": {"extensions": True}}, f"Also delete <app-dir>/extensions.\n{ext_warn_msg}", ) clean_flags["settings"] = ( {"LabCleanApp": {"settings": True}}, "Also delete <app-dir>/settings", ) clean_flags["static"] = ( {"LabCleanApp": {"static": True}}, "Also delete <app-dir>/static", ) clean_flags["all"] = ( {"LabCleanApp": {"all": True}}, f"Delete the entire contents of the app directory.\n{ext_warn_msg}", )
LabBuildApp
python
pandas-dev__pandas
pandas/tests/series/test_arrow_interface.py
{ "start": 1637, "end": 1841 }
class ____: def __init__(self, array): self.array = array def __arrow_c_array__(self, requested_schema=None): return self.array.__arrow_c_array__(requested_schema)
ArrowArrayWrapper
python
matplotlib__matplotlib
lib/matplotlib/projections/__init__.py
{ "start": 2818, "end": 4438 }
class ____: """A mapping of registered projection names to projection classes.""" def __init__(self): self._all_projection_types = {} def register(self, *projections): """Register a new set of projections.""" for projection in projections: name = projection.name self._all_projection_types[name] = projection def get_projection_class(self, name): """Get a projection class from its *name*.""" return self._all_projection_types[name] def get_projection_names(self): """Return the names of all projections currently registered.""" return sorted(self._all_projection_types) projection_registry = ProjectionRegistry() projection_registry.register( axes.Axes, PolarAxes, AitoffAxes, HammerAxes, LambertAxes, MollweideAxes, ) if Axes3D is not None: projection_registry.register(Axes3D) else: # remove from namespace if not importable del Axes3D def register_projection(cls): projection_registry.register(cls) def get_projection_class(projection=None): """ Get a projection class from its name. If *projection* is None, a standard rectilinear projection is returned. """ if projection is None: projection = 'rectilinear' try: return projection_registry.get_projection_class(projection) except KeyError as err: raise ValueError("Unknown projection %r" % projection) from err get_projection_names = projection_registry.get_projection_names _docstring.interpd.register(projection_names=get_projection_names())
ProjectionRegistry
python
huggingface__transformers
src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py
{ "start": 61142, "end": 62060 }
class ____(PreTrainedModel): config: Phi4MultimodalConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["Phi4MultimodalDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = True _supports_attention_backend = True _can_record_outputs = { "hidden_states": Phi4MultimodalDecoderLayer, "attentions": Phi4MultimodalAttention, } _version = "0.0.5" input_modalities = ("image", "audio", "text") @torch.no_grad() def _init_weights(self, module): super()._init_weights(module) if isinstance(module, Phi4MultimodalImageEmbedding): init.zeros_(module.global_img_feature_extensor) init.zeros_(module.sub_img_feature_extensor)
Phi4MultimodalPreTrainedModel
python
davidhalter__parso
parso/file_io.py
{ "start": 837, "end": 1023 }
class ____(FileIO): def __init__(self, path, content): super().__init__(path) self._content = content def read(self): return self._content
KnownContentFileIO
python
tensorflow__tensorflow
tensorflow/python/keras/saving/saved_model/utils.py
{ "start": 9125, "end": 10796 }
class ____(threading.local): def __init__(self): super(SaveOptionsContext, self).__init__() self.save_traces = True _save_options_context = SaveOptionsContext() @tf_contextlib.contextmanager def keras_option_scope(save_traces): previous_value = _save_options_context.save_traces try: _save_options_context.save_traces = save_traces yield finally: _save_options_context.save_traces = previous_value def should_save_traces(): """Whether to trace layer functions-can be disabled in the save_traces arg.""" return _save_options_context.save_traces @tf_contextlib.contextmanager def no_automatic_dependency_tracking_scope(obj): """A context that disables automatic dependency tracking when assigning attrs. Objects that inherit from Autotrackable automatically creates dependencies to trackable objects through attribute assignments, and wraps data structures (lists or dicts) with trackable classes. This scope may be used to temporarily disable this behavior. This works similar to the decorator `no_automatic_dependency_tracking`. Example usage: ``` model = tf.keras.Model() model.arr1 = [] # Creates a ListWrapper object with no_automatic_dependency_tracking_scope(model): model.arr2 = [] # Creates a regular, untracked python list ``` Args: obj: A trackable object. Yields: a scope in which the object doesn't track dependencies. """ previous_value = getattr(obj, '_setattr_tracking', True) obj._setattr_tracking = False # pylint: disable=protected-access try: yield finally: obj._setattr_tracking = previous_value # pylint: disable=protected-access
SaveOptionsContext
python
aio-libs__aiohttp
tests/test_http_parser.py
{ "start": 62650, "end": 66124 }
class ____: async def test_feed_data(self, protocol: BaseProtocol) -> None: buf = aiohttp.StreamReader(protocol, 2**16, loop=asyncio.get_running_loop()) dbuf = DeflateBuffer(buf, "deflate") dbuf.decompressor = mock.Mock() dbuf.decompressor.decompress_sync.return_value = b"line" # First byte should be b'x' in order code not to change the decoder. dbuf.feed_data(b"xxxx") assert [b"line"] == list(buf._buffer) async def test_feed_data_err(self, protocol: BaseProtocol) -> None: buf = aiohttp.StreamReader(protocol, 2**16, loop=asyncio.get_running_loop()) dbuf = DeflateBuffer(buf, "deflate") exc = ValueError() dbuf.decompressor = mock.Mock() dbuf.decompressor.decompress_sync.side_effect = exc with pytest.raises(http_exceptions.ContentEncodingError): # Should be more than 4 bytes to trigger deflate FSM error. # Should start with b'x', otherwise code switch mocked decoder. dbuf.feed_data(b"xsomedata") async def test_feed_eof(self, protocol: BaseProtocol) -> None: buf = aiohttp.StreamReader(protocol, 2**16, loop=asyncio.get_running_loop()) dbuf = DeflateBuffer(buf, "deflate") dbuf.decompressor = mock.Mock() dbuf.decompressor.flush.return_value = b"line" dbuf.feed_eof() assert [b"line"] == list(buf._buffer) assert buf._eof async def test_feed_eof_err_deflate(self, protocol: BaseProtocol) -> None: buf = aiohttp.StreamReader(protocol, 2**16, loop=asyncio.get_running_loop()) dbuf = DeflateBuffer(buf, "deflate") dbuf.decompressor = mock.Mock() dbuf.decompressor.flush.return_value = b"line" dbuf.decompressor.eof = False with pytest.raises(http_exceptions.ContentEncodingError): dbuf.feed_eof() async def test_feed_eof_no_err_gzip(self, protocol: BaseProtocol) -> None: buf = aiohttp.StreamReader(protocol, 2**16, loop=asyncio.get_running_loop()) dbuf = DeflateBuffer(buf, "gzip") dbuf.decompressor = mock.Mock() dbuf.decompressor.flush.return_value = b"line" dbuf.decompressor.eof = False dbuf.feed_eof() assert [b"line"] == list(buf._buffer) async def test_feed_eof_no_err_brotli(self, protocol: BaseProtocol) -> None: buf = aiohttp.StreamReader(protocol, 2**16, loop=asyncio.get_running_loop()) dbuf = DeflateBuffer(buf, "br") dbuf.decompressor = mock.Mock() dbuf.decompressor.flush.return_value = b"line" dbuf.decompressor.eof = False dbuf.feed_eof() assert [b"line"] == list(buf._buffer) @pytest.mark.skipif(zstandard is None, reason="zstandard is not installed") async def test_feed_eof_no_err_zstandard(self, protocol: BaseProtocol) -> None: buf = aiohttp.StreamReader(protocol, 2**16, loop=asyncio.get_running_loop()) dbuf = DeflateBuffer(buf, "zstd") dbuf.decompressor = mock.Mock() dbuf.decompressor.flush.return_value = b"line" dbuf.decompressor.eof = False dbuf.feed_eof() assert [b"line"] == list(buf._buffer) async def test_empty_body(self, protocol: BaseProtocol) -> None: buf = aiohttp.StreamReader(protocol, 2**16, loop=asyncio.get_running_loop()) dbuf = DeflateBuffer(buf, "deflate") dbuf.feed_eof() assert buf.at_eof()
TestDeflateBuffer
python
catalyst-team__catalyst
catalyst/contrib/layers/lama.py
{ "start": 1301, "end": 2697 }
class ____(nn.Module): """@TODO: Docs. Contribution is welcome.""" name2activation = { "softmax": nn.Softmax(dim=1), "tanh": nn.Tanh(), "sigmoid": nn.Sigmoid(), } def __init__(self, in_features, activation=None, kernel_size=1, **params): """@TODO: Docs. Contribution is welcome.""" super().__init__() self.in_features = in_features activation = activation or "softmax" self.attention_pooling = nn.Sequential( nn.Conv1d( in_channels=in_features, out_channels=1, kernel_size=kernel_size, **params ), TemporalAttentionPooling.name2activation[activation], ) self.attention_pooling.apply(outer_init) def forward(self, x: torch.Tensor, mask: torch.Tensor = None) -> torch.Tensor: """ Forward call. Args: x: tensor of size (batch_size, history_len, feature_size) mask: mask to use Returns: pooling result """ batch_size, history_len, feature_size = x.shape x = x.view(batch_size, history_len, -1) x_a = x.transpose(1, 2) x_attn = (self.attention_pooling(x_a) * x_a).transpose(1, 2) x_attn = x_attn.sum(1, keepdim=True) return x_attn
TemporalAttentionPooling
python
walkccc__LeetCode
solutions/2742. Painting the Walls/2742-2.py
{ "start": 0, "end": 372 }
class ____: def paintWalls(self, cost: list[int], time: list[int]) -> int: MAX = 500_000_000 n = len(cost) # dp[i] := the minimum cost to paint i walls by the painters so far dp = [0] + [MAX] * n for c, t in zip(cost, time): for walls in range(n, 0, -1): dp[walls] = min(dp[walls], dp[max(walls - t - 1, 0)] + c) return dp[n]
Solution
python
great-expectations__great_expectations
great_expectations/datasource/fluent/data_asset/path/directory_asset.py
{ "start": 1648, "end": 10384 }
class ____(PathDataAsset[DatasourceT, ColumnPartitioner], ABC, Generic[DatasourceT]): """Base class for PathDataAssets which batch by combining the contents of a directory.""" data_directory: pathlib.Path @public_api def add_batch_definition_daily(self, name: str, column: str) -> BatchDefinition: """ Add a BatchDefinition, which creates a single Batch for each day in the directory. Args: name: Name of the Batch Definition. column: Column to partition on. Returns: A BatchDefinition that is partitioned daily. """ # todo: test column return self.add_batch_definition( name=name, partitioner=ColumnPartitionerDaily( method_name="partition_on_year_and_month_and_day", column_name=column ), ) @public_api def add_batch_definition_monthly(self, name: str, column: str) -> BatchDefinition: """ Add a BatchDefinition which creates a single batch for each month in the directory. Args: name: Name of the Batch Definition. column: Column to partition on. Returns: A BatchDefinition that is partitioned monthly. """ # todo: test column return self.add_batch_definition( name=name, partitioner=ColumnPartitionerMonthly( method_name="partition_on_year_and_month", column_name=column ), ) @public_api def add_batch_definition_yearly(self, name: str, column: str) -> BatchDefinition: """ Add a BatchDefinition which creates a single batch for each year in the directory. Args: name: Name of the Batch Definition. column: Column to partition on. Returns: A BatchDefinition that is partitioned yearly. """ # todo: test column return self.add_batch_definition( name=name, partitioner=ColumnPartitionerYearly( method_name="partition_on_year", column_name=column ), ) @public_api def add_batch_definition_whole_directory(self, name: str) -> BatchDefinition: """Add a BatchDefinition which creates a single batch for the entire directory.""" return self.add_batch_definition(name=name, partitioner=None) @override def _get_batch_definition_list( self, batch_request: BatchRequest ) -> list[LegacyBatchDefinition]: """Generate a batch definition list from a given batch request. Args: batch_request: Batch request used to generate batch definitions. Returns: List of a single batch definition. """ if batch_request.partitioner: # Currently non-sql asset partitioners do not introspect the datasource for available # batches and only return a single batch based on specified batch_identifiers. batch_identifiers = batch_request.options if not batch_identifiers.get("path"): batch_identifiers["path"] = self.data_directory batch_definition = LegacyBatchDefinition( datasource_name=self._data_connector.datasource_name, data_connector_name=_DATA_CONNECTOR_NAME, data_asset_name=self._data_connector.data_asset_name, batch_identifiers=make_batch_identifier(batch_identifiers), ) batch_definition_list = [batch_definition] else: batch_definition_list = self._data_connector.get_batch_definition_list( batch_request=batch_request ) return batch_definition_list @singledispatchmethod def _get_dataframe_partitioner(self, partitioner) -> Optional[DataframePartitioner]: ... @_get_dataframe_partitioner.register def _(self, partitioner: ColumnPartitionerYearly) -> DataframePartitionerYearly: return DataframePartitionerYearly(**partitioner.dict(exclude={"param_names"})) @_get_dataframe_partitioner.register def _(self, partitioner: ColumnPartitionerMonthly) -> DataframePartitionerMonthly: return DataframePartitionerMonthly(**partitioner.dict(exclude={"param_names"})) @_get_dataframe_partitioner.register def _(self, partitioner: ColumnPartitionerDaily) -> DataframePartitionerDaily: return DataframePartitionerDaily(**partitioner.dict(exclude={"param_names"})) @_get_dataframe_partitioner.register def _(self, partitioner: None) -> None: return None @override def _get_reader_options_include(self) -> set[str]: return { "data_directory", } @override def get_batch_parameters_keys( self, partitioner: Optional[ColumnPartitioner] = None, ) -> tuple[str, ...]: option_keys: tuple[str, ...] = (FILE_PATH_BATCH_SPEC_KEY,) dataframe_partitioner = self._get_dataframe_partitioner(partitioner) if dataframe_partitioner: option_keys += tuple(dataframe_partitioner.param_names) return option_keys @override def get_whole_directory_path_override( self, ) -> PathStr: return self.data_directory @override def build_batch_request( self, options: Optional[BatchParameters] = None, batch_slice: Optional[BatchSlice] = None, partitioner: Optional[ColumnPartitioner] = None, ) -> BatchRequest: if options is not None and not self._batch_parameters_are_valid( options=options, partitioner=partitioner, ): allowed_keys = set(self.get_batch_parameters_keys(partitioner=partitioner)) actual_keys = set(options.keys()) raise gx_exceptions.InvalidBatchRequestError( # noqa: TRY003 # FIXME CoP "Batch parameters should only contain keys from the following set:\n" f"{allowed_keys}\nbut your specified keys contain\n" f"{actual_keys.difference(allowed_keys)}\nwhich is not valid.\n" ) return BatchRequest( datasource_name=self.datasource.name, data_asset_name=self.name, options=options or {}, batch_slice=batch_slice, partitioner=partitioner, ) @override def _batch_spec_options_from_batch_request(self, batch_request: BatchRequest) -> dict: """Build a set of options for use in a batch spec from a batch request. Args: batch_request: Batch request to use to generate options. Returns: Dictionary containing batch spec options. """ get_reader_options_include: set[str] | None = self._get_reader_options_include() if not get_reader_options_include: # Set to None if empty set to include any additional `extra_kwargs` passed to `add_*_asset` # noqa: E501 # FIXME CoP get_reader_options_include = None batch_spec_options = { "reader_method": self._get_reader_method(), "reader_options": self.dict( include=get_reader_options_include, exclude=self._EXCLUDE_FROM_READER_OPTIONS, exclude_unset=True, by_alias=True, config_provider=self._datasource._config_provider, ), } partitioner_parameters = self._get_partitioner_parameters(batch_request=batch_request) if partitioner_parameters: batch_spec_options.update(partitioner_parameters) return batch_spec_options def _get_partitioner_parameters(self, batch_request: BatchRequest) -> Optional[dict]: """If a partitioner is present, add its configuration to batch parameters.""" partitioner: Optional[DataframePartitioner] = self._get_dataframe_partitioner( batch_request.partitioner ) if not partitioner: return None batch_identifiers = partitioner.batch_parameters_to_batch_spec_kwarg_identifiers( parameters=batch_request.options ) return { "partitioner_method": partitioner.method_name, "partitioner_kwargs": { **partitioner.partitioner_method_kwargs(), "batch_identifiers": batch_identifiers, }, } @override def _get_sortable_partitioner( self, partitioner: Optional[ColumnPartitioner] ) -> Optional[PartitionerSortingProtocol]: # DirectoryAssets can only ever return a single batch, so they do not require sorting. return None
DirectoryDataAsset