language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
tensorflow__tensorflow
tensorflow/python/training/saver_test.py
{ "start": 54248, "end": 70147 }
class ____(test.TestCase): def _get_test_dir(self, dirname): test_dir = os.path.join(self.get_temp_dir(), dirname) gfile.MakeDirs(test_dir) return test_dir def assertCheckpointState(self, model_checkpoint_path, all_model_checkpoint_paths, save_dir): checkpoint_state = checkpoint_management.get_checkpoint_state(save_dir) self.assertEqual(checkpoint_state.model_checkpoint_path, model_checkpoint_path) self.assertEqual(checkpoint_state.all_model_checkpoint_paths, all_model_checkpoint_paths) def testMaxToKeepEager(self): with context.eager_mode(): save_dir = self._get_test_dir("max_to_keep_eager") v = variable_v1.VariableV1(10.0, name="v") save = saver_module.Saver({"v": v}, max_to_keep=2) self.evaluate(variables.global_variables_initializer()) if not context.executing_eagerly(): self.assertEqual([], save.last_checkpoints) s1 = save.save(None, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) self.assertTrue(checkpoint_management.checkpoint_exists(s1)) self.assertCheckpointState( model_checkpoint_path=s1, all_model_checkpoint_paths=[s1], save_dir=save_dir) s2 = save.save(None, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) self.assertTrue(checkpoint_management.checkpoint_exists(s1)) self.assertTrue(checkpoint_management.checkpoint_exists(s2)) self.assertCheckpointState( model_checkpoint_path=s2, all_model_checkpoint_paths=[s1, s2], save_dir=save_dir) s3 = save.save(None, os.path.join(save_dir, "s3")) self.assertEqual([s2, s3], save.last_checkpoints) self.assertFalse(checkpoint_management.checkpoint_exists(s1)) self.assertTrue(checkpoint_management.checkpoint_exists(s2)) self.assertTrue(checkpoint_management.checkpoint_exists(s3)) self.assertCheckpointState( model_checkpoint_path=s3, all_model_checkpoint_paths=[s2, s3], save_dir=save_dir) # Create a second helper, identical to the first. save2 = saver_module.Saver({"v": v}, max_to_keep=2) save2.set_last_checkpoints(save.last_checkpoints) # Exercise the first helper. # Adding s2 again (old s2 is removed first, then new s2 appended) s2 = save.save(None, os.path.join(save_dir, "s2")) self.assertEqual([s3, s2], save.last_checkpoints) self.assertFalse(checkpoint_management.checkpoint_exists(s1)) self.assertTrue(checkpoint_management.checkpoint_exists(s3)) self.assertTrue(checkpoint_management.checkpoint_exists(s2)) self.assertCheckpointState( model_checkpoint_path=s2, all_model_checkpoint_paths=[s3, s2], save_dir=save_dir) # Adding s1 (s3 should now be deleted as oldest in list) s1 = save.save(None, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save.last_checkpoints) self.assertFalse(checkpoint_management.checkpoint_exists(s3)) self.assertTrue(checkpoint_management.checkpoint_exists(s2)) self.assertCheckpointState( model_checkpoint_path=s1, all_model_checkpoint_paths=[s2, s1], save_dir=save_dir) s2 = save2.save(None, os.path.join(save_dir, "s2")) self.assertEqual([s3, s2], save2.last_checkpoints) # Created by the first helper. self.assertTrue(checkpoint_management.checkpoint_exists(s1)) # Deleted by the first helper. self.assertFalse(checkpoint_management.checkpoint_exists(s3)) def testNonSharded(self): save_dir = self._get_test_dir("max_to_keep_non_sharded") # train.Saver is V1 only API. with ops_lib.Graph().as_default(), self.cached_session() as sess: v = variable_v1.VariableV1(10.0, name="v") save = saver_module.Saver({"v": v}, max_to_keep=2) self.evaluate(variables.global_variables_initializer()) self.assertEqual([], save.last_checkpoints) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) self.assertTrue(checkpoint_management.checkpoint_exists(s1)) self.assertCheckpointState( model_checkpoint_path=s1, all_model_checkpoint_paths=[s1], save_dir=save_dir) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) self.assertTrue(checkpoint_management.checkpoint_exists(s1)) self.assertTrue(checkpoint_management.checkpoint_exists(s2)) self.assertCheckpointState( model_checkpoint_path=s2, all_model_checkpoint_paths=[s1, s2], save_dir=save_dir) s3 = save.save(sess, os.path.join(save_dir, "s3")) self.assertEqual([s2, s3], save.last_checkpoints) self.assertFalse(checkpoint_management.checkpoint_exists(s1)) self.assertTrue(checkpoint_management.checkpoint_exists(s2)) self.assertTrue(checkpoint_management.checkpoint_exists(s3)) self.assertCheckpointState( model_checkpoint_path=s3, all_model_checkpoint_paths=[s2, s3], save_dir=save_dir) # Create a second helper, identical to the first. save2 = saver_module.Saver(saver_def=save.as_saver_def()) save2.set_last_checkpoints(save.last_checkpoints) # Create a third helper, with the same configuration but no knowledge of # previous checkpoints. save3 = saver_module.Saver(saver_def=save.as_saver_def()) # Exercise the first helper. # Adding s2 again (old s2 is removed first, then new s2 appended) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s3, s2], save.last_checkpoints) self.assertFalse(checkpoint_management.checkpoint_exists(s1)) self.assertFalse( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s1))) self.assertTrue(checkpoint_management.checkpoint_exists(s3)) self.assertTrue( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s3))) self.assertTrue(checkpoint_management.checkpoint_exists(s2)) self.assertTrue( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s2))) self.assertCheckpointState( model_checkpoint_path=s2, all_model_checkpoint_paths=[s3, s2], save_dir=save_dir) # Adding s1 (s3 should now be deleted as oldest in list) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save.last_checkpoints) self.assertFalse(checkpoint_management.checkpoint_exists(s3)) self.assertFalse( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s3))) self.assertTrue(checkpoint_management.checkpoint_exists(s2)) self.assertTrue( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s2))) self.assertTrue(checkpoint_management.checkpoint_exists(s1)) self.assertTrue( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s1))) self.assertCheckpointState( model_checkpoint_path=s1, all_model_checkpoint_paths=[s2, s1], save_dir=save_dir) # Exercise the second helper. # Adding s2 again (old s2 is removed first, then new s2 appended) s2 = save2.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s3, s2], save2.last_checkpoints) # Created by the first helper. self.assertTrue(checkpoint_management.checkpoint_exists(s1)) self.assertTrue( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s1))) # Deleted by the first helper. self.assertFalse(checkpoint_management.checkpoint_exists(s3)) self.assertFalse( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s3))) self.assertTrue(checkpoint_management.checkpoint_exists(s2)) self.assertTrue( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s2))) self.assertCheckpointState( model_checkpoint_path=s2, all_model_checkpoint_paths=[s3, s2], save_dir=save_dir) # Adding s1 (s3 should now be deleted as oldest in list) s1 = save2.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save2.last_checkpoints) self.assertFalse(checkpoint_management.checkpoint_exists(s3)) self.assertFalse( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s3))) self.assertTrue(checkpoint_management.checkpoint_exists(s2)) self.assertTrue( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s2))) self.assertTrue(checkpoint_management.checkpoint_exists(s1)) self.assertTrue( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s1))) self.assertCheckpointState( model_checkpoint_path=s1, all_model_checkpoint_paths=[s2, s1], save_dir=save_dir) # Exercise the third helper. # Adding s2 again (but helper is unaware of previous s2) s2 = save3.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s2], save3.last_checkpoints) # Created by the first helper. self.assertTrue(checkpoint_management.checkpoint_exists(s1)) self.assertTrue( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s1))) # Deleted by the first helper. self.assertFalse(checkpoint_management.checkpoint_exists(s3)) self.assertFalse( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s3))) self.assertTrue(checkpoint_management.checkpoint_exists(s2)) self.assertTrue( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s2))) # Even though the file for s1 exists, this saver isn't aware of it, which # is why it doesn't end up in the checkpoint state. self.assertCheckpointState( model_checkpoint_path=s2, all_model_checkpoint_paths=[s2], save_dir=save_dir) # Adding s1 (s3 should not be deleted because helper is unaware of it) s1 = save3.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save3.last_checkpoints) self.assertFalse(checkpoint_management.checkpoint_exists(s3)) self.assertFalse( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s3))) self.assertTrue(checkpoint_management.checkpoint_exists(s2)) self.assertTrue( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s2))) self.assertTrue(checkpoint_management.checkpoint_exists(s1)) self.assertTrue( checkpoint_management.checkpoint_exists( checkpoint_management.meta_graph_filename(s1))) self.assertCheckpointState( model_checkpoint_path=s1, all_model_checkpoint_paths=[s2, s1], save_dir=save_dir) def testSharded(self): save_dir = self._get_test_dir("max_to_keep_sharded") with session.Session( target="", config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = variable_v1.VariableV1(111, name="v0") with sess.graph.device("/cpu:1"): v1 = variable_v1.VariableV1(222, name="v1") save = saver_module.Saver( { "v0": v0, "v1": v1 }, sharded=True, max_to_keep=2) self.evaluate(variables.global_variables_initializer()) self.assertEqual([], save.last_checkpoints) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) if save._write_version is saver_pb2.SaverDef.V1: self.assertEqual(2, len(gfile.Glob(s1))) else: self.assertEqual(4, len(gfile.Glob(s1 + "*"))) self.assertTrue( gfile.Exists(checkpoint_management.meta_graph_filename(s1))) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) if save._write_version is saver_pb2.SaverDef.V1: self.assertEqual(2, len(gfile.Glob(s1))) else: self.assertEqual(4, len(gfile.Glob(s1 + "*"))) self.assertTrue( gfile.Exists(checkpoint_management.meta_graph_filename(s1))) if save._write_version is saver_pb2.SaverDef.V1: self.assertEqual(2, len(gfile.Glob(s2))) else: self.assertEqual(4, len(gfile.Glob(s2 + "*"))) self.assertTrue( gfile.Exists(checkpoint_management.meta_graph_filename(s2))) s3 = save.save(sess, os.path.join(save_dir, "s3")) self.assertEqual([s2, s3], save.last_checkpoints) self.assertEqual(0, len(gfile.Glob(s1 + "*"))) self.assertFalse( gfile.Exists(checkpoint_management.meta_graph_filename(s1))) if save._write_version is saver_pb2.SaverDef.V1: self.assertEqual(2, len(gfile.Glob(s2))) else: self.assertEqual(4, len(gfile.Glob(s2 + "*"))) self.assertTrue( gfile.Exists(checkpoint_management.meta_graph_filename(s2))) if save._write_version is saver_pb2.SaverDef.V1: self.assertEqual(2, len(gfile.Glob(s3))) else: self.assertEqual(4, len(gfile.Glob(s3 + "*"))) self.assertTrue( gfile.Exists(checkpoint_management.meta_graph_filename(s3))) def testNoMaxToKeep(self): save_dir = self._get_test_dir("no_max_to_keep") save_dir2 = self._get_test_dir("max_to_keep_0") with self.cached_session() as sess: v = variable_v1.VariableV1(10.0, name="v") self.evaluate(variables.global_variables_initializer()) # Test max_to_keep being None. save = saver_module.Saver({"v": v}, max_to_keep=None) self.assertEqual([], save.last_checkpoints) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([], save.last_checkpoints) self.assertTrue(checkpoint_management.checkpoint_exists(s1)) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([], save.last_checkpoints) self.assertTrue(checkpoint_management.checkpoint_exists(s2)) # Test max_to_keep being 0. save2 = saver_module.Saver({"v": v}, max_to_keep=0) self.assertEqual([], save2.last_checkpoints) s1 = save2.save(sess, os.path.join(save_dir2, "s1")) self.assertEqual([], save2.last_checkpoints) self.assertTrue(checkpoint_management.checkpoint_exists(s1)) s2 = save2.save(sess, os.path.join(save_dir2, "s2")) self.assertEqual([], save2.last_checkpoints) self.assertTrue(checkpoint_management.checkpoint_exists(s2)) def testNoMetaGraph(self): save_dir = self._get_test_dir("no_meta_graph") with self.cached_session() as sess: v = variable_v1.VariableV1(10.0, name="v") save = saver_module.Saver({"v": v}) self.evaluate(variables.global_variables_initializer()) s1 = save.save(sess, os.path.join(save_dir, "s1"), write_meta_graph=False) self.assertTrue(checkpoint_management.checkpoint_exists(s1)) self.assertFalse( gfile.Exists(checkpoint_management.meta_graph_filename(s1)))
MaxToKeepTest
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/constructor22.py
{ "start": 243, "end": 373 }
class ____(Protocol[T]): def a(self) -> "A[Tuple[T]]": ... def b(self) -> "A[Tuple[T]]": ... def c(self) -> "T": ...
A
python
Farama-Foundation__Gymnasium
tests/test_core.py
{ "start": 582, "end": 1599 }
class ____(Env): """Example testing environment.""" def __init__(self): """Constructor for example environment.""" self.observation_space = Box(0, 1) self.action_space = Box(0, 1) def step( self, action: ActType ) -> tuple[ObsType, float, bool, bool, dict[str, Any]]: """Steps through the environment.""" return 0, 0, False, False, {} def reset( self, *, seed: int | None = None, options: dict | None = None, ) -> tuple[ObsType, dict]: """Resets the environment.""" super().reset(seed=seed, options=options) return 0, {} @pytest.fixture def example_env(): return ExampleEnv() def test_example_env(example_env): """Tests a gymnasium environment.""" assert example_env.metadata == {"render_modes": []} assert example_env.render_mode is None assert example_env.spec is None assert example_env._np_random is None # pyright: ignore [reportPrivateUsage]
ExampleEnv
python
spyder-ide__spyder
spyder/plugins/history/plugin.py
{ "start": 667, "end": 4221 }
class ____(SpyderDockablePlugin): """ History log plugin. """ NAME = 'historylog' REQUIRES = [Plugins.Preferences, Plugins.Console] OPTIONAL = [Plugins.IPythonConsole] TABIFY = Plugins.IPythonConsole WIDGET_CLASS = HistoryWidget CONF_SECTION = NAME CONF_WIDGET_CLASS = HistoryConfigPage CONF_FILE = False # ---- Signals # ------------------------------------------------------------------------ sig_focus_changed = Signal() """ This signal is emitted when the focus of the code editor storing history changes. """ def __init__(self, parent=None, configuration=None): """Initialization.""" super().__init__(parent, configuration) self.add_history(get_conf_path('history.py')) # ---- SpyderDockablePlugin API # ------------------------------------------------------------------------ @staticmethod def get_name(): return _('History') @staticmethod def get_description(): return _('View command history for the IPython console.') @classmethod def get_icon(cls): return cls.create_icon('history') def on_initialize(self): widget = self.get_widget() widget.sig_focus_changed.connect(self.sig_focus_changed) @on_plugin_available(plugin=Plugins.Preferences) def on_preferences_available(self): preferences = self.get_plugin(Plugins.Preferences) preferences.register_plugin_preferences(self) @on_plugin_available(plugin=Plugins.Console) def on_console_available(self): console = self.get_plugin(Plugins.Console) console.sig_refreshed.connect(self.refresh) @on_plugin_available(plugin=Plugins.IPythonConsole) def on_ipyconsole_available(self): ipyconsole = self.get_plugin(Plugins.IPythonConsole) ipyconsole.sig_append_to_history_requested.connect( self.append_to_history) @on_plugin_teardown(plugin=Plugins.Preferences) def on_preferences_teardown(self): preferences = self.get_plugin(Plugins.Preferences) preferences.deregister_plugin_preferences(self) @on_plugin_teardown(plugin=Plugins.Console) def on_console_teardown(self): console = self.get_plugin(Plugins.Console) console.sig_refreshed.disconnect(self.refresh) @on_plugin_teardown(plugin=Plugins.IPythonConsole) def on_ipyconsole_teardown(self): ipyconsole = self.get_plugin(Plugins.IPythonConsole) ipyconsole.sig_append_to_history_requested.disconnect( self.append_to_history) def update_font(self): color_scheme = self.get_color_scheme() font = self.get_font(SpyderFontType.Monospace) self.get_widget().update_font(font, color_scheme) # ---- Public API # ------------------------------------------------------------------------ def refresh(self): """ Refresh main widget. """ self.get_widget().refresh() def add_history(self, filename): """ Create history file. Parameters ---------- filename: str History file. """ self.get_widget().add_history(filename) def append_to_history(self, filename, command): """ Append command to history file. Parameters ---------- filename: str History file. command: str Command to append to history file. """ self.get_widget().append_to_history(filename, command)
HistoryLog
python
has2k1__plotnine
plotnine/geoms/geom_count.py
{ "start": 79, "end": 598 }
class ____(geom_point): """ Plot overlapping points {usage} This is a variant [](`~plotnine.geoms.geom_point`) that counts the number of observations at each location, then maps the count to point area. It useful when you have discrete data and overplotting. Parameters ---------- {common_parameters} See Also -------- plotnine.stat_sum : The default `stat` for this `geom`. """ DEFAULT_PARAMS = {"stat": "sum", "position": "identity", "na_rm": False}
geom_count
python
lepture__authlib
authlib/oauth2/auth.py
{ "start": 1284, "end": 2392 }
class ____: """Attaches OAuth Client Information to HTTP requests. :param client_id: Client ID, which you get from client registration. :param client_secret: Client Secret, which you get from registration. :param auth_method: Client auth method for token endpoint. The supported methods for now: * client_secret_basic (default) * client_secret_post * none """ DEFAULT_AUTH_METHODS = { "client_secret_basic": encode_client_secret_basic, "client_secret_post": encode_client_secret_post, "none": encode_none, } def __init__(self, client_id, client_secret, auth_method=None): if auth_method is None: auth_method = "client_secret_basic" self.client_id = client_id self.client_secret = client_secret if auth_method in self.DEFAULT_AUTH_METHODS: auth_method = self.DEFAULT_AUTH_METHODS[auth_method] self.auth_method = auth_method def prepare(self, method, uri, headers, body): return self.auth_method(self, method, uri, headers, body)
ClientAuth
python
getsentry__sentry
src/sentry/workflow_engine/models/workflow_action_group_status.py
{ "start": 187, "end": 832 }
class ____(DefaultFieldsModel): """ Stores when a workflow action last fired for a Group. """ __relocation_scope__ = RelocationScope.Excluded workflow = FlexibleForeignKey("workflow_engine.Workflow", on_delete=models.CASCADE) action = FlexibleForeignKey("workflow_engine.Action", on_delete=models.CASCADE) group = FlexibleForeignKey("sentry.Group", db_constraint=True) class Meta: constraints = [ models.UniqueConstraint( fields=["workflow", "action", "group"], name="workflow_engine_uniq_workflow_action_group", ) ]
WorkflowActionGroupStatus
python
sympy__sympy
sympy/functions/special/polynomials.py
{ "start": 32927, "end": 36041 }
class ____(OrthogonalPolynomial): r""" ``hermite(n, x)`` gives the $n$th Hermite polynomial in $x$, $H_n(x)$. Explanation =========== The Hermite polynomials are orthogonal on $(-\infty, \infty)$ with respect to the weight $\exp\left(-x^2\right)$. Examples ======== >>> from sympy import hermite, diff >>> from sympy.abc import x, n >>> hermite(0, x) 1 >>> hermite(1, x) 2*x >>> hermite(2, x) 4*x**2 - 2 >>> hermite(n, x) hermite(n, x) >>> diff(hermite(n,x), x) 2*n*hermite(n - 1, x) >>> hermite(n, -x) (-1)**n*hermite(n, x) See Also ======== jacobi, gegenbauer, chebyshevt, chebyshevt_root, chebyshevu, chebyshevu_root, legendre, assoc_legendre, hermite_prob, laguerre, assoc_laguerre, sympy.polys.orthopolys.jacobi_poly sympy.polys.orthopolys.gegenbauer_poly sympy.polys.orthopolys.chebyshevt_poly sympy.polys.orthopolys.chebyshevu_poly sympy.polys.orthopolys.hermite_poly sympy.polys.orthopolys.hermite_prob_poly sympy.polys.orthopolys.legendre_poly sympy.polys.orthopolys.laguerre_poly References ========== .. [1] https://en.wikipedia.org/wiki/Hermite_polynomial .. [2] https://mathworld.wolfram.com/HermitePolynomial.html .. [3] https://functions.wolfram.com/Polynomials/HermiteH/ """ _ortho_poly = staticmethod(hermite_poly) @classmethod def eval(cls, n, x): if not n.is_Number: # Symbolic result H_n(x) # H_n(-x) ---> (-1)**n * H_n(x) if x.could_extract_minus_sign(): return S.NegativeOne**n * hermite(n, -x) # We can evaluate for some special values of x if x.is_zero: return 2**n * sqrt(S.Pi) / gamma((S.One - n)/2) elif x is S.Infinity: return S.Infinity else: # n is a given fixed integer, evaluate into polynomial if n.is_negative: raise ValueError( "The index n must be nonnegative integer (got %r)" % n) else: return cls._eval_at_order(n, x) def fdiff(self, argindex=2): if argindex == 1: # Diff wrt n raise ArgumentIndexError(self, argindex) elif argindex == 2: # Diff wrt x n, x = self.args return 2*n*hermite(n - 1, x) else: raise ArgumentIndexError(self, argindex) def _eval_rewrite_as_Sum(self, n, x, **kwargs): from sympy.concrete.summations import Sum k = Dummy("k") kern = S.NegativeOne**k / (factorial(k)*factorial(n - 2*k)) * (2*x)**(n - 2*k) return factorial(n)*Sum(kern, (k, 0, floor(n/2))) def _eval_rewrite_as_polynomial(self, n, x, **kwargs): # This function is just kept for backwards compatibility # but should not be used return self._eval_rewrite_as_Sum(n, x, **kwargs) def _eval_rewrite_as_hermite_prob(self, n, x, **kwargs): return sqrt(2)**n * hermite_prob(n, x*sqrt(2))
hermite
python
tox-dev__tox
src/tox/tox_env/python/virtual_env/package/pyproject.py
{ "start": 2148, "end": 2257 }
class ____(RuntimeError): """raised when build editable is not supported."""
BuildEditableNotSupportedError
python
keras-team__keras
keras/src/layers/convolutional/base_conv.py
{ "start": 561, "end": 17978 }
class ____(Layer): """Abstract N-D convolution layer (private, used as implementation base). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Note: layer attributes cannot be modified after the layer has been called once (except the `trainable` attribute). Args: rank: int, the rank of the convolution, e.g. 2 for 2D convolution. filters: int, the dimension of the output space (the number of filters in the convolution). kernel_size: int or tuple/list of `rank` integers, specifying the size of the convolution window. strides: int or tuple/list of `rank` integers, specifying the stride length of the convolution. If only one int is specified, the same stride size will be used for all dimensions. `strides > 1` is incompatible with `dilation_rate > 1`. padding: string, either `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input. When `padding="same"` and `strides=1`, the output has the same size as the input. data_format: string, either `"channels_last"` or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch, steps, features)` while `"channels_first"` corresponds to inputs with shape `(batch, features, steps)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be `"channels_last"`. dilation_rate: int or tuple/list of `rank` integers, specifying the dilation rate to use for dilated convolution. If only one int is specified, the same dilation rate will be used for all dimensions. groups: A positive int specifying the number of groups in which the input is split along the channel axis. Each group is convolved separately with `filters // groups` filters. The output is the concatenation of all the `groups` results along the channel axis. Input channels and `filters` must both be divisible by `groups`. activation: Activation function. If `None`, no activation is applied. use_bias: bool, if `True`, bias will be added to the output. kernel_initializer: Initializer for the convolution kernel. If `None`, the default initializer (`"glorot_uniform"`) will be used. bias_initializer: Initializer for the bias vector. If `None`, the default initializer (`"zeros"`) will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. lora_rank: Optional integer. If set, the layer's forward pass will implement LoRA (Low-Rank Adaptation) with the provided rank. LoRA sets the layer's kernel to non-trainable and replaces it with a delta over the original kernel, obtained via multiplying two lower-rank trainable matrices. This can be useful to reduce the computation cost of fine-tuning large dense layers. You can also enable LoRA on an existing layer by calling `layer.enable_lora(rank)`. lora_alpha: Optional integer. If set, this parameter scales the low-rank adaptation delta (computed as the product of two lower-rank trainable matrices) during the forward pass. The delta is scaled by `lora_alpha / lora_rank`, allowing you to fine-tune the strength of the LoRA adjustment independently of `lora_rank`. """ def __init__( self, rank, filters, kernel_size, strides=1, padding="valid", data_format=None, dilation_rate=1, groups=1, activation=None, use_bias=True, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, lora_rank=None, lora_alpha=None, **kwargs, ): super().__init__(activity_regularizer=activity_regularizer, **kwargs) self.rank = rank self.filters = filters self.groups = groups self.kernel_size = standardize_tuple(kernel_size, rank, "kernel_size") self.strides = standardize_tuple(strides, rank, "strides") self.dilation_rate = standardize_tuple( dilation_rate, rank, "dilation_rate" ) self.padding = standardize_padding(padding, allow_causal=rank == 1) self.data_format = standardize_data_format(data_format) self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.lora_rank = lora_rank self.lora_alpha = lora_alpha if lora_alpha is not None else lora_rank self.lora_enabled = False self.input_spec = InputSpec(min_ndim=self.rank + 2) self.data_format = self.data_format if self.filters is not None and self.filters <= 0: raise ValueError( "Invalid value for argument `filters`. Expected a strictly " f"positive value. Received filters={self.filters}." ) if self.groups <= 0: raise ValueError( "The number of groups must be a positive integer. " f"Received: groups={self.groups}." ) if self.filters is not None and self.filters % self.groups != 0: raise ValueError( "The number of filters must be evenly divisible by the " f"number of groups. Received: groups={self.groups}, " f"filters={self.filters}." ) if not all(self.kernel_size): raise ValueError( "The argument `kernel_size` cannot contain 0. Received " f"kernel_size={self.kernel_size}." ) if not all(self.strides): raise ValueError( "The argument `strides` cannot contains 0. Received " f"strides={self.strides}" ) if max(self.strides) > 1 and max(self.dilation_rate) > 1: raise ValueError( "`strides > 1` not supported in conjunction with " f"`dilation_rate > 1`. Received: strides={self.strides} and " f"dilation_rate={self.dilation_rate}" ) def build(self, input_shape): if self.data_format == "channels_last": channel_axis = -1 input_channel = input_shape[-1] else: channel_axis = 1 input_channel = input_shape[1] self.input_spec = InputSpec( min_ndim=self.rank + 2, axes={channel_axis: input_channel} ) if input_channel % self.groups != 0: raise ValueError( "The number of input channels must be evenly divisible by " f"the number of groups. Received groups={self.groups}, but the " f"input has {input_channel} channels (full input shape is " f"{input_shape})." ) kernel_shape = self.kernel_size + ( input_channel // self.groups, self.filters, ) # compute_output_shape contains some validation logic for the input # shape, and make sure the output shape has all positive dimensions. self.compute_output_shape(input_shape) self._kernel = self.add_weight( name="kernel", shape=kernel_shape, initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True, dtype=self.dtype, ) if self.use_bias: self.bias = self.add_weight( name="bias", shape=(self.filters,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True, dtype=self.dtype, ) else: self.bias = None self.built = True if self.lora_rank: self.enable_lora(self.lora_rank, lora_alpha=self.lora_alpha) @property def kernel(self): if not self.built: raise AttributeError( "You must build the layer before accessing `kernel`." ) if self.lora_enabled: return self._kernel + ( self.lora_alpha / self.lora_rank ) * ops.matmul(self.lora_kernel_a, self.lora_kernel_b) return self._kernel def convolution_op(self, inputs, kernel): return ops.conv( inputs, kernel, strides=list(self.strides), padding=self.padding, dilation_rate=self.dilation_rate, data_format=self.data_format, ) def call(self, inputs): outputs = self.convolution_op( inputs, self.kernel, ) if self.use_bias: if self.data_format == "channels_last": bias_shape = (1,) * (self.rank + 1) + (self.filters,) else: bias_shape = (1, self.filters) + (1,) * self.rank bias = ops.reshape(self.bias, bias_shape) outputs = ops.add(outputs, bias) if self.activation is not None: return self.activation(outputs) return outputs def compute_output_shape(self, input_shape): return compute_conv_output_shape( input_shape, self.filters, self.kernel_size, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate, ) def enable_lora( self, rank, lora_alpha=None, a_initializer="he_uniform", b_initializer="zeros", ): if self.kernel_constraint: raise ValueError( "Lora is incompatible with kernel constraints. " "In order to enable lora on this layer, remove the " "`kernel_constraint` argument." ) if not self.built: raise ValueError( "Cannot enable lora on a layer that isn't yet built." ) if self.lora_enabled: raise ValueError( "lora is already enabled. This can only be done once per layer." ) self._tracker.unlock() self.lora_kernel_a = self.add_weight( name="lora_kernel_a", shape=self._kernel.shape[:-1] + (rank,), initializer=initializers.get(a_initializer), regularizer=self.kernel_regularizer, ) self.lora_kernel_b = self.add_weight( name="lora_kernel_b", shape=(rank, self.filters), initializer=initializers.get(b_initializer), regularizer=self.kernel_regularizer, ) self._kernel.trainable = False self._tracker.lock() self.lora_enabled = True self.lora_rank = rank self.lora_alpha = lora_alpha if lora_alpha is not None else rank def save_own_variables(self, store): # Do nothing if the layer isn't yet built if not self.built: return target_variables = [self.kernel] if self.use_bias: target_variables.append(self.bias) for i, variable in enumerate(target_variables): store[str(i)] = variable def load_own_variables(self, store): if not self.lora_enabled: self._check_load_own_variables(store) # Do nothing if the layer isn't yet built if not self.built: return target_variables = [self._kernel] if self.use_bias: target_variables.append(self.bias) for i, variable in enumerate(target_variables): variable.assign(store[str(i)]) if self.lora_enabled: self.lora_kernel_a.assign(ops.zeros(self.lora_kernel_a.shape)) self.lora_kernel_b.assign(ops.zeros(self.lora_kernel_b.shape)) def get_config(self): config = super().get_config() config.update( { "filters": self.filters, "kernel_size": self.kernel_size, "strides": self.strides, "padding": self.padding, "data_format": self.data_format, "dilation_rate": self.dilation_rate, "groups": self.groups, "activation": activations.serialize(self.activation), "use_bias": self.use_bias, "kernel_initializer": initializers.serialize( self.kernel_initializer ), "bias_initializer": initializers.serialize( self.bias_initializer ), "kernel_regularizer": regularizers.serialize( self.kernel_regularizer ), "bias_regularizer": regularizers.serialize( self.bias_regularizer ), "activity_regularizer": regularizers.serialize( self.activity_regularizer ), "kernel_constraint": constraints.serialize( self.kernel_constraint ), "bias_constraint": constraints.serialize(self.bias_constraint), } ) if self.lora_rank: config["lora_rank"] = self.lora_rank config["lora_alpha"] = self.lora_alpha return config def _check_load_own_variables(self, store): all_vars = self._trainable_variables + self._non_trainable_variables if len(store.keys()) != len(all_vars): if len(all_vars) == 0 and not self.built: raise ValueError( f"Layer '{self.name}' was never built " "and thus it doesn't have any variables. " f"However the weights file lists {len(store.keys())} " "variables for this layer.\n" "In most cases, this error indicates that either:\n\n" "1. The layer is owned by a parent layer that " "implements a `build()` method, but calling the " "parent's `build()` method did NOT create the state of " f"the child layer '{self.name}'. A `build()` method " "must create ALL state for the layer, including " "the state of any children layers.\n\n" "2. You need to implement " "the `def build_from_config(self, config)` method " f"on layer '{self.name}', to specify how to rebuild " "it during loading. " "In this case, you might also want to implement the " "method that generates the build config at saving time, " "`def get_build_config(self)`. " "The method `build_from_config()` is meant " "to create the state " "of the layer (i.e. its variables) upon deserialization.", ) raise ValueError( f"Layer '{self.name}' expected {len(all_vars)} variables, " "but received " f"{len(store.keys())} variables during loading. " f"Expected: {[v.name for v in all_vars]}" )
BaseConv
python
apache__airflow
airflow-core/src/airflow/timetables/_cron.py
{ "start": 2685, "end": 7689 }
class ____: """Mixin to provide interface to work with croniter.""" def __init__(self, cron: str, timezone: str | Timezone | FixedTimezone) -> None: self._expression = cron_presets.get(cron, cron) if isinstance(timezone, str): timezone = parse_timezone(timezone) self._timezone = timezone try: # checking for more than 5 parameters in Cron and avoiding evaluation for now, # as Croniter has inconsistent evaluation with other libraries if len(croniter(self._expression).expanded) > 5: raise FormatException() self.description = self._describe_with_dom_dow_fix(self._expression) except (CroniterBadCronError, FormatException, MissingFieldException): self.description = "" def _describe_with_dom_dow_fix(self, expression: str) -> str: """ Return cron description with fix for DOM+DOW conflicts. If both DOM and DOW are restricted, explain them as OR. """ cron_fields = expression.split() if len(cron_fields) < 5: return ExpressionDescriptor( expression, casing_type=CasingTypeEnum.Sentence, use_24hour_time_format=True ).get_description() dom = cron_fields[2] dow = cron_fields[4] if dom != "*" and dow != "*": # Case: conflict → DOM OR DOW cron_fields_dom = cron_fields.copy() cron_fields_dom[4] = "*" day_of_month_desc = ExpressionDescriptor( " ".join(cron_fields_dom), casing_type=CasingTypeEnum.Sentence, use_24hour_time_format=True ).get_description() cron_fields_dow = cron_fields.copy() cron_fields_dow[2] = "*" day_of_week_desc = ExpressionDescriptor( " ".join(cron_fields_dow), casing_type=CasingTypeEnum.Sentence, use_24hour_time_format=True ).get_description() return f"{day_of_month_desc} (or) {day_of_week_desc}" # no conflict → return normal description return ExpressionDescriptor( expression, casing_type=CasingTypeEnum.Sentence, use_24hour_time_format=True ).get_description() def __eq__(self, other: object) -> bool: """ Both expression and timezone should match. This is only for testing purposes and should not be relied on otherwise. """ if not isinstance(other, type(self)): return NotImplemented return self._expression == other._expression and self._timezone == other._timezone def __hash__(self): return hash((self._expression, self._timezone)) @property def summary(self) -> str: return self._expression def validate(self) -> None: try: croniter(self._expression) except (CroniterBadCronError, CroniterBadDateError) as e: raise AirflowTimetableInvalid(str(e)) def _get_next(self, current: DateTime) -> DateTime: """Get the first schedule after specified time, with DST fixed.""" naive = make_naive(current, self._timezone) cron = croniter(self._expression, start_time=naive) scheduled = cron.get_next(datetime.datetime) if TYPE_CHECKING: assert isinstance(scheduled, datetime.datetime) if not _covers_every_hour(cron): return convert_to_utc(make_aware(scheduled, self._timezone)) delta = scheduled - naive return convert_to_utc(current.in_timezone(self._timezone) + delta) def _get_prev(self, current: DateTime) -> DateTime: """Get the first schedule before specified time, with DST fixed.""" naive = make_naive(current, self._timezone) cron = croniter(self._expression, start_time=naive) scheduled = cron.get_prev(datetime.datetime) if TYPE_CHECKING: assert isinstance(scheduled, datetime.datetime) if not _covers_every_hour(cron): return convert_to_utc(make_aware(scheduled, self._timezone)) delta = naive - scheduled return convert_to_utc(current.in_timezone(self._timezone) - delta) def _align_to_next(self, current: DateTime) -> DateTime: """ Get the next scheduled time. This is ``current + interval``, unless ``current`` falls right on the interval boundary, when ``current`` is returned. """ next_time = self._get_next(current) if self._get_prev(next_time) != current: return next_time return current def _align_to_prev(self, current: DateTime) -> DateTime: """ Get the prev scheduled time. This is ``current - interval``, unless ``current`` falls right on the interval boundary, when ``current`` is returned. """ prev_time = self._get_prev(current) if self._get_next(prev_time) != current: return prev_time return current
CronMixin
python
django__django
django/db/migrations/serializer.py
{ "start": 5816, "end": 5934 }
class ____(BaseUnorderedSequenceSerializer): def _format(self): return "frozenset([%s])"
FrozensetSerializer
python
doocs__leetcode
lcci/10.05.Sparse Array Search/Solution.py
{ "start": 0, "end": 408 }
class ____: def findString(self, words: List[str], s: str) -> int: def dfs(i: int, j: int) -> int: if i > j: return -1 mid = (i + j) >> 1 l = dfs(i, mid - 1) if l != -1: return l if words[mid] == s: return mid return dfs(mid + 1, j) return dfs(0, len(words) - 1)
Solution
python
kamyu104__LeetCode-Solutions
Python/minimum-number-of-days-to-disconnect-island.py
{ "start": 5042, "end": 7630 }
class ____(object): def minDays(self, grid): """ :type grid: List[List[int]] :rtype: int """ DIRECTIONS = [(0, 1), (1, 0), (0, -1), (-1, 0)] def floodfill(grid, i, j, lookup): stk = [(i, j)] lookup[i][j] = 1 while stk: i, j = stk.pop() for di, dj in DIRECTIONS: ni, nj = i+di, j+dj if not (0 <= ni < R and 0 <= nj < C and grid[ni][nj] and not lookup[ni][nj]): continue lookup[ni][nj] = 1 stk.append((ni, nj)) def count_islands(grid): lookup = [[0]*C for _ in xrange(R)] island_cnt = 0 for i in xrange(R): for j in xrange(C): if grid[i][j] == 0 or lookup[i][j]: continue island_cnt += 1 floodfill(grid, i, j, lookup) return island_cnt def merge(i): r, c = divmod(i, C) for dr, dc in DIRECTIONS: nr, nc = r+dr, c+dc ni = nr*C+nc if 0 <= nr < R and 0 <= nc < C and grid[nr][nc] == grid[r][c] and lookup[ni]: uf.union_set(i, ni) def check(i): r, c = divmod(i, C) if grid[r][c] == 0: return False lookup = set() for dr, dc in DIRECTIONS: nr, nc = r+dr, c+dc if 0 <= nr < R and 0 <= nc < C and grid[nr][nc] == grid[r][c]: lookup.add(uf.find_set(nr*C+nc)) return len(lookup) != 1 def dfs(left, right): if left == right: return check(left) mid = left + (right-left)//2 l1, r1, l2, r2 = left, mid, mid+1, right for _ in xrange(2): uf.snapshot() for i in xrange(l1, r1+1): lookup[i] = True merge(i) if dfs(l2, r2): return True for i in xrange(l1, r1+1): lookup[i] = False uf.rollback() l1, r1, l2, r2 = l2, r2, l1, r1 return False R, C = len(grid), len(grid[0]) if count_islands(grid) != 1: return 0 uf = PersistentUnionFind(R*C) lookup = [False]*(R*C) return 1 if dfs(0, R*C-1) else 2 # Time: O(m^2 * n^2) # Space: O(m * n) # flood fill
Solution2
python
pytorch__pytorch
torch/distributed/tensor/_op_schema.py
{ "start": 8051, "end": 10073 }
class ____(StrategyType): """ TupleStrategy is a special case for operators that are fundamentally compound or batched such that some subset of the inputs and outputs are completely unrelated to some other subset. Generally, foreach_* ops are the most common use-case for TupleStrategy, because they accept lists of inputs, but operate independently on each input or tuple of zipped inputs. For example, [out_a, out_b] = torch.foreach_add([a, b], scalar): input a's sharding only affects out_a's sharding, independent of b and out_b. An example of an operator that should NOT use TupleStrategy is torch.split. It produces a List[Tensor] as its output, but the sharding decision of one output is bound together with the decision of each other output and the common input. """ def __init__( self, children: Sequence[StrategyType], ) -> None: super().__init__() self.children: Sequence[StrategyType] = children @property @deprecated( "TupleStrategy.childs is deprecated, use TupleStrategy.children instead.", # codespell:ignore childs category=FutureWarning, ) def childs(self) -> Sequence[StrategyType]: # codespell:ignore childs """ Alias for children, to maintain backward compatibility. """ return self.children def child_mesh(self, index: int) -> DeviceMesh: op_strategy = self.children[index] assert isinstance(op_strategy, OpStrategy) return op_strategy.mesh def __str__(self) -> str: child_strategies_str = ", ".join( [f"{str(strat)}" for idx, strat in enumerate(self.children)] ) return f"TupleStrategy({child_strategies_str})" try: register_pytree_node( TupleStrategy, lambda node: (node.children, None), lambda children, _: TupleStrategy(tuple(children)), ) except ValueError: # already registered TupleStrategy, skip pass @dataclass
TupleStrategy
python
pydata__xarray
xarray/tests/test_indexing.py
{ "start": 36164, "end": 41175 }
class ____: def __array_namespace__(self, version=None): pass def __array_function__(self, func, types, args, kwargs): pass def as_dask_array(arr, chunks): try: import dask.array as da except ImportError: return None return da.from_array(arr, chunks=chunks) @pytest.mark.parametrize( ["array", "expected_type"], ( pytest.param( indexing.CopyOnWriteArray(np.array([1, 2])), indexing.CopyOnWriteArray, id="ExplicitlyIndexed", ), pytest.param( np.array([1, 2]), indexing.NumpyIndexingAdapter, id="numpy.ndarray" ), pytest.param( pd.Index([1, 2]), indexing.PandasIndexingAdapter, id="pandas.Index" ), pytest.param( as_dask_array(np.array([1, 2]), chunks=(1,)), indexing.DaskIndexingAdapter, id="dask.array", marks=requires_dask, ), pytest.param( ArrayWithNamespace(), indexing.ArrayApiIndexingAdapter, id="array_api" ), pytest.param( ArrayWithArrayFunction(), indexing.NdArrayLikeIndexingAdapter, id="array_like", ), pytest.param( ArrayWithNamespaceAndArrayFunction(), indexing.ArrayApiIndexingAdapter, id="array_api_with_fallback", ), ), ) def test_as_indexable(array, expected_type): actual = indexing.as_indexable(array) assert isinstance(actual, expected_type) def test_indexing_1d_object_array() -> None: items = (np.arange(3), np.arange(6)) arr = DataArray(np.array(items, dtype=object)) actual = arr[0] expected_data = np.empty((), dtype=object) expected_data[()] = items[0] expected = DataArray(expected_data) assert [actual.data.item()] == [expected.data.item()] @requires_dask def test_indexing_dask_array() -> None: import dask.array da = DataArray( np.ones(10 * 3 * 3).reshape((10, 3, 3)), dims=("time", "x", "y"), ).chunk(dict(time=-1, x=1, y=1)) with raise_if_dask_computes(): actual = da.isel(time=dask.array.from_array([9], chunks=(1,))) expected = da.isel(time=[9]) assert_identical(actual, expected) @requires_dask def test_indexing_dask_array_scalar() -> None: # GH4276 import dask.array a = dask.array.from_array(np.linspace(0.0, 1.0)) da = DataArray(a, dims="x") x_selector = da.argmax(dim=...) assert not isinstance(x_selector, DataArray) with raise_if_dask_computes(): actual = da.isel(x_selector) expected = da.isel(x=-1) assert_identical(actual, expected) @requires_dask def test_vectorized_indexing_dask_array() -> None: # https://github.com/pydata/xarray/issues/2511#issuecomment-563330352 darr = DataArray(data=[0.2, 0.4, 0.6], coords={"z": range(3)}, dims=("z",)) indexer = DataArray( data=np.random.randint(0, 3, 8).reshape(4, 2).astype(int), coords={"y": range(4), "x": range(2)}, dims=("y", "x"), ) expected = darr[indexer] # fails because we can't index pd.Index lazily (yet). # We could make this succeed by auto-chunking the values # and constructing a lazy index variable, and not automatically # create an index for it. with pytest.raises(ValueError, match="Cannot index with"): with raise_if_dask_computes(): darr.chunk()[indexer.chunk({"y": 2})] with pytest.raises(ValueError, match="Cannot index with"): with raise_if_dask_computes(): actual = darr[indexer.chunk({"y": 2})] with raise_if_dask_computes(): actual = darr.drop_vars("z").chunk()[indexer.chunk({"y": 2})] assert_identical(actual, expected.drop_vars("z")) with raise_if_dask_computes(): actual_variable = darr.variable.chunk()[indexer.variable.chunk({"y": 2})] assert_identical(actual_variable, expected.variable) @requires_dask def test_advanced_indexing_dask_array() -> None: # GH4663 import dask.array as da ds = Dataset( dict( a=("x", da.from_array(np.random.randint(0, 100, 100))), b=(("x", "y"), da.random.random((100, 10))), ) ) expected = ds.b.sel(x=ds.a.compute()) with raise_if_dask_computes(): actual = ds.b.sel(x=ds.a) assert_identical(expected, actual) with raise_if_dask_computes(): actual = ds.b.sel(x=ds.a.data) assert_identical(expected, actual) def test_backend_indexing_non_numpy() -> None: """This model indexing of a Zarr store that reads to GPU memory.""" array = DuckArrayWrapper(np.array([1, 2, 3])) indexed = indexing.explicit_indexing_adapter( indexing.BasicIndexer((slice(1),)), shape=array.shape, indexing_support=indexing.IndexingSupport.BASIC, raw_indexing_method=array.__getitem__, ) np.testing.assert_array_equal(indexed.array, np.array([1]))
ArrayWithNamespaceAndArrayFunction
python
PrefectHQ__prefect
src/integrations/prefect-sqlalchemy/tests/test_database.py
{ "start": 1569, "end": 2646 }
class ____: def __enter__(self): return self def __exit__(self, *exc): return False def execute(self, query, params): cursor_result = MagicMock() cursor_result.fetchall.side_effect = lambda: [ (query, params), ] cursor_result.fetchmany.side_effect = ( lambda size: [ (query, params), ] * size ) return cursor_result def commit(self): pass @pytest.fixture() def sqlalchemy_credentials_async(): sqlalchemy_credentials_mock = MagicMock() sqlalchemy_credentials_mock._driver_is_async = True sqlalchemy_credentials_mock.get_engine.return_value = SQLAlchemyAsyncEngineMock() return sqlalchemy_credentials_mock @pytest.fixture() def sqlalchemy_credentials_sync(): sqlalchemy_credentials_mock = MagicMock() sqlalchemy_credentials_mock._driver_is_async = False sqlalchemy_credentials_mock.get_engine.return_value = SQLAlchemyEngineMock() return sqlalchemy_credentials_mock
SQLAlchemyConnectionMock
python
getsentry__sentry
src/sentry/deletions/defaults/rule.py
{ "start": 261, "end": 1797 }
class ____(ModelDeletionTask[Rule]): def get_child_relations(self, instance: Rule) -> list[BaseRelation]: from sentry.models.grouprulestatus import GroupRuleStatus from sentry.models.rule import RuleActivity from sentry.models.rulefirehistory import RuleFireHistory from sentry.workflow_engine.models import AlertRuleDetector, AlertRuleWorkflow model_relations: list[BaseRelation] = [ ModelRelation(GroupRuleStatus, {"rule_id": instance.id}), ModelRelation(RuleFireHistory, {"rule_id": instance.id}), ModelRelation(RuleActivity, {"rule_id": instance.id}), ModelRelation(AlertRuleDetector, {"rule_id": instance.id}), ] alert_rule_workflow = AlertRuleWorkflow.objects.filter(rule_id=instance.id).first() if alert_rule_workflow: model_relations.append(ModelRelation(Workflow, {"id": alert_rule_workflow.workflow_id})) else: logger.error( "No AlertRuleWorkflow found for rule, skipping", extra={"rule_id": instance.id} ) model_relations.append(ModelRelation(AlertRuleWorkflow, {"rule_id": instance.id})) return model_relations def mark_deletion_in_progress(self, instance_list: Sequence[Rule]) -> None: from sentry.constants import ObjectStatus for instance in instance_list: if instance.status != ObjectStatus.PENDING_DELETION: instance.update(status=ObjectStatus.PENDING_DELETION)
RuleDeletionTask
python
keras-team__keras
guides/making_new_layers_and_models_via_subclassing.py
{ "start": 11644, "end": 13297 }
class ____(keras.layers.Layer): def __init__(self, units=32, **kwargs): super().__init__(**kwargs) self.units = units def build(self, input_shape): self.w = self.add_weight( shape=(input_shape[-1], self.units), initializer="random_normal", trainable=True, ) self.b = self.add_weight( shape=(self.units,), initializer="random_normal", trainable=True ) def call(self, inputs): return ops.matmul(inputs, self.w) + self.b def get_config(self): config = super().get_config() config.update({"units": self.units}) return config layer = Linear(64) config = layer.get_config() print(config) new_layer = Linear.from_config(config) """ If you need more flexibility when deserializing the layer from its config, you can also override the `from_config()` class method. This is the base implementation of `from_config()`: ```python def from_config(cls, config): return cls(**config) ``` To learn more about serialization and saving, see the complete [guide to saving and serializing models](/guides/serialization_and_saving/). """ """ ## Privileged `training` argument in the `call()` method Some layers, in particular the `BatchNormalization` layer and the `Dropout` layer, have different behaviors during training and inference. For such layers, it is standard practice to expose a `training` (boolean) argument in the `call()` method. By exposing this argument in `call()`, you enable the built-in training and evaluation loops (e.g. `fit()`) to correctly use the layer in training and inference. """
Linear
python
pytorch__pytorch
test/test_utils.py
{ "start": 33374, "end": 33670 }
class ____(TestCase): def test_cpp_compiler_is_ok(self): self.assertTrue(torch.utils.cpp_extension.check_compiler_ok_for_platform("c++")) def test_cc_compiler_is_ok(self): self.assertTrue(torch.utils.cpp_extension.check_compiler_ok_for_platform("cc"))
TestCppExtensionUtils
python
ashishps1__awesome-system-design-resources
implementations/python/rate_limiting/token_bucket.py
{ "start": 13, "end": 1219 }
class ____: def __init__(self, capacity, fill_rate): self.capacity = capacity # Maximum number of tokens the bucket can hold self.fill_rate = fill_rate # Rate at which tokens are added (tokens/second) self.tokens = capacity # Current token count, start with a full bucket self.last_time = time.time() # Last time we checked the token count def allow_request(self, tokens=1): now = time.time() # Calculate how many tokens have been added since the last check time_passed = now - self.last_time self.tokens = min(self.capacity, self.tokens + time_passed * self.fill_rate) self.last_time = now # Check if we have enough tokens for this request if self.tokens >= tokens: self.tokens -= tokens return True return False # Usage example limiter = TokenBucket(capacity=10, fill_rate=1) # 10 tokens, refill 1 token per second for _ in range(15): print(limiter.allow_request()) # Will print True for the first 10 requests, then False time.sleep(0.1) # Wait a bit between requests time.sleep(5) # Wait for bucket to refill print(limiter.allow_request()) # True
TokenBucket
python
falconry__falcon
tests/test_typing.py
{ "start": 806, "end": 2182 }
class ____(falcon.asgi.Response): context_type = RichContext # NOTE(vytas): the `type: ignore` exemption is currently required if it is # desirable to actually check typing of context attributes. See also: # https://falcon.readthedocs.io/en/latest/api/typing.html#known-limitations context: RichContext # type: ignore[assignment] USERS = { 'am9objoxMjM0': ('user', UUID('51e4b478-3825-4e46-9fd7-be7b61d616dc')), 'dnl0YXM6MTIz': ('admin', UUID('5e50d2c4-1c52-42c7-b4c5-879d9bd390ee')), } def fancy_error_serializer( req: FancyRequest, resp: falcon.Response, ex: falcon.HTTPError ) -> None: resp.content_type = falcon.MEDIA_JSON resp.media = ex.to_dict() resp.media.update(fancy=True, asgi=False) def fancy_asgi_error_serializer( req: FancyAsyncRequest, resp: falcon.asgi.Response, ex: falcon.HTTPError ) -> None: resp.content_type = falcon.MEDIA_JSON resp.media = ex.to_dict() resp.media.update(fancy=True, asgi=True) def _process_auth(req: falcon.Request, resp: falcon.Response) -> None: if req.method == 'OPTIONS': return if req.auth: for key, user_role in USERS.items(): if req.auth == f'Basic {key}': req.context.role, req.context.userid = user_role break else: raise falcon.HTTPUnauthorized()
FancyAsyncResponse
python
Lightning-AI__lightning
src/lightning/pytorch/strategies/launchers/multiprocessing.py
{ "start": 12575, "end": 12775 }
class ____(NamedTuple): best_model_path: Optional[_PATH] weights_path: Optional[_PATH] trainer_state: TrainerState trainer_results: Any extra: dict[str, Any] @dataclass
_WorkerOutput
python
google__jax
jax/_src/pallas/mosaic_gpu/pipeline.py
{ "start": 5806, "end": 16265 }
class ____: start: int | jax.Array size: int | jax.Array def __eq__(self, other: _Slice) -> jax.Array: # type: ignore return lax.bitwise_and(self.start == other.start, self.size == other.size) jax.tree_util.register_dataclass( _Slice, data_fields=["start", "size"], meta_fields=[] ) def _downcast_spec( spec: gpu_core.BlockSpec | pallas_core.BlockSpec, ) -> gpu_core.BlockSpec: if isinstance(spec, gpu_core.BlockSpec): return spec return gpu_core.BlockSpec( block_shape=spec.block_shape, index_map=spec.index_map, memory_space=spec.memory_space, pipeline_mode=spec.pipeline_mode, ) def emit_pipeline( body: Callable[..., T], *, grid: pallas_core.TupleGrid, in_specs: Sequence[pallas_core.BlockSpec] = (), out_specs: Sequence[pallas_core.BlockSpec] = (), max_concurrent_steps: int = 1, init_carry: T | None = None, ): r"""Creates a function to emit a manual pipeline within a Pallas kernel. Args: body: The pipeline body function, which is called with - ``indices``: Tuple of current loop indices. - ``*input_refs``: SMEM refs for inputs. - ``*output_refs``: SMEM refs for outputs. If ``init_carry`` is provided, ``body`` receives an additional argument ``carry`` -- the carry from the previous iteration. It must then return the next carry value. grid: The grid dimensions for the pipeline. in_specs: A sequence of :class:`~jax.experimental.pallas.BlockSpec`\s for inputs. out_specs: A sequence of :class:`~jax.experimental.pallas.BlockSpec`\s for outputs. max_concurrent_steps: Maximum concurrently active pipeline stages. init_carry: Optional initial carry. If provided, ``body`` handles carry-over state between iterations, and the pipeline returns the final carry. Returns: A function that, when called with GMEM input and output refs, executes the pipeline and returns the final carry value (if ``init_carry`` was used), otherwise it returns None. """ in_specs = tuple(map(_downcast_spec, in_specs)) out_specs = tuple(map(_downcast_spec, out_specs)) for spec in in_specs: if spec.collective_axes: raise NotImplementedError( "BlockSpecs with collective_axes are not supported in emit_pipeline" ) for spec in out_specs: if spec.collective_axes: raise ValueError("Output BlockSpecs cannot have collective_axes") # TODO(justinfu): Factor out common code between warp-specialized and # normal pipelines. delay_release_levels = sorted({s.delay_release for s in in_specs}) or [0] if delay_release_levels and max_concurrent_steps <= delay_release_levels[0]: raise ValueError( "max_concurrent_steps must be greater than all delay_release values," f" but {max_concurrent_steps=} and {delay_release_levels=}." ) num_steps = math.prod(grid) has_dynamic_grid = not isinstance(num_steps, int) # Convert the grid to int32 explicitly to avoid dtype promotion errors. grid = tuple(jnp.asarray(g, dtype=jnp.int32) for g in grid) # Shrink ``max_concurrent_steps`` if the total number of steps is lower to # reduce the size of the refs allocated in SMEM. if not has_dynamic_grid and max_concurrent_steps > num_steps: max_concurrent_steps = cast(int, num_steps) def pipeline(*gmem_refs: state.AbstractRef): in_gmem_refs, out_gmem_refs = util.split_list(gmem_refs, [len(in_specs)]) in_smem_refs, out_smem_refs = util.split_list( [ gpu_core.SMEM( (max_concurrent_steps, *_get_block_shape(spec)), # type: ignore ref.dtype, transforms=tuple( t.batch(1) for t in getattr(spec, "transforms", ()) ), ) if _in_smem(spec) else None for spec, ref in zip(it.chain(in_specs, out_specs), gmem_refs) ], [len(in_specs)], ) num_arrivals = sum(map(_in_smem, in_specs)) return pl.run_scoped( functools.partial( scoped_pipeline, in_gmem_refs=in_gmem_refs, out_gmem_refs=out_gmem_refs, ), in_smem_refs=in_smem_refs, out_smem_refs=out_smem_refs, barrier_ref=None if num_arrivals == 0 else gpu_core.Barrier( # TODO(slebedev): Change this to arrive only once. num_arrivals=num_arrivals, num_barriers=max_concurrent_steps, ), ) def scoped_pipeline( *, in_gmem_refs, out_gmem_refs, in_smem_refs, out_smem_refs, barrier_ref ): in_brefs: Sequence[BufferedRef] = [ BufferedRef(spec, _is_index_invariant(spec, grid), gmem_ref, smem_ref) for spec, gmem_ref, smem_ref in zip( in_specs, in_gmem_refs, in_smem_refs ) ] out_brefs: Sequence[BufferedRef] = [ BufferedRef(spec, _is_index_invariant(spec, grid), gmem_ref, smem_ref) for spec, gmem_ref, smem_ref in zip( out_specs, out_gmem_refs, out_smem_refs ) ] # Initialize the pipeline. indices = (jnp.asarray(0, dtype=jnp.int32),) * len(grid) if has_dynamic_grid: prologue_steps = lax.min(max_concurrent_steps, num_steps) else: assert max_concurrent_steps <= num_steps prologue_steps = max_concurrent_steps def prologue(step, fetch_indices): for bref in in_brefs: bref.copy_in(step, fetch_indices, barrier_ref) return _inc_grid_by_1(fetch_indices, grid) jax.lax.fori_loop(0, prologue_steps, prologue, indices, unroll=not has_dynamic_grid) # This is true if any of the outputs need to be transferred inside the loop. smem_out_brefs = [bref for bref in out_brefs if _in_smem(bref.spec)] copies_out_in_loop = not all(bref.is_index_invariant for bref in smem_out_brefs) needs_epilogue = any(bref.is_index_invariant for bref in smem_out_brefs) # In the loop body, `max_concurrent_steps` may be larger than `num_steps` in # the dynamic grid case. This is fine, since in that case, we will never # need to fetch more data anyway. def loop_body(step, carry): slot = lax.rem(step, max_concurrent_steps) indices, fetch_index_levels, last_store_slices, prev_body_carry = carry if barrier_ref is not None: # Wait for the current GMEM->SMEM copy to complete, if any. gpu_primitives.barrier_wait(barrier_ref.at[slot]) # Wait for the previous output SMEM->GMEM copy to complete. if copies_out_in_loop: gpu_primitives.wait_smem_to_gmem( max_concurrent_steps - 1, wait_read_only=True ) next_body_carry = body( indices, *( bref.get_ref_for_slot(slot) for bref in it.chain(in_brefs, out_brefs) ), *(prev_body_carry,) if init_carry is not None else (), ) if copies_out_in_loop: gpu_primitives.commit_smem() # Copy the output from SMEM to GMEM. new_store_slices = last_store_slices[:] for idx, bref in enumerate(out_brefs): if bref.is_index_invariant: assert last_store_slices[idx] is None continue assert last_store_slices[idx] is not None new_store_slices[idx] = tuple( _Slice(s.start, s.size) for s in bref.compute_gmem_slice(indices) ) are_same_slices = map( lambda old, new: old == new, last_store_slices[idx], new_store_slices[idx], ) slices_changed = ~functools.reduce(lax.bitwise_and, are_same_slices) is_last_step = step == num_steps - 1 # TODO(apaszke,slebedev): This still diverges significantly from the # TPU semantics in that it will move on to the next SMEM output slice # even if it's not storing the previous one. bref.copy_out( slot, indices, predicate=lax.bitwise_or(slices_changed, is_last_step), ) if copies_out_in_loop: gpu_primitives.commit_smem_to_gmem_group() for delay_release, fetch_indices in zip( delay_release_levels, fetch_index_levels ): fetch_step = step + (max_concurrent_steps - delay_release) fetch_slot = lax.rem(fetch_step, max_concurrent_steps) # pylint: disable=cell-var-from-loop def do_fetch(): for bref in in_brefs: if bref.spec.delay_release == delay_release: bref.copy_in(fetch_slot, fetch_indices, barrier_ref) # pylint: enable=cell-var-from-loop jax.lax.cond( lax.bitwise_and(step >= delay_release, fetch_step < num_steps), do_fetch, lambda: None, ) next_fetch_indices_levels = [ _inc_grid_by_1(fetch_indices, grid) for fetch_indices in fetch_index_levels ] return ( _inc_grid_by_1(indices, grid), next_fetch_indices_levels, new_store_slices, next_body_carry if init_carry is not None else None, ) fetch_index_levels = [] for delay_release in delay_release_levels: fetch_indices = indices for _ in range(max_concurrent_steps - delay_release): fetch_indices = _inc_grid_by_1(fetch_indices, grid) fetch_index_levels.append(fetch_indices) # TODO(justinfu): Only store base pointer instead of all indices. last_store_slices = [ None if bref.is_index_invariant else (_Slice(-1, -1),) * len(bref.spec.block_shape) for bref in out_brefs ] last_indices, _, _, final_carry = lax.fori_loop( 0, num_steps, loop_body, (indices, fetch_index_levels, last_store_slices, init_carry), ) # Outputs invariant to the sequential axis are never written from inside the # loop. This is the only place where we store them. if not copies_out_in_loop and needs_epilogue: gpu_primitives.commit_smem() if needs_epilogue: last_slot = lax.rem(num_steps - 1, max_concurrent_steps) for bref in out_brefs: if bref.is_index_invariant: bref.copy_out(last_slot, last_indices, predicate=None) gpu_primitives.commit_smem_to_gmem_group() if smem_out_brefs: # Finalize the pipeline. gpu_primitives.wait_smem_to_gmem(0) return final_carry if init_carry is not None else None return pipeline
_Slice
python
getsentry__sentry
src/sentry/integrations/utils/codecov.py
{ "start": 882, "end": 4703 }
class ____(Enum): MISSING_TOKEN = "Internal Error" MISSING_GH = "Codecov access can only be enabled if the organization has a GitHub integration." MISSING_CODECOV = ( "Codecov access can only be enabled if the organization has a Codecov integration." ) def codecov_enabled(organization: Organization) -> bool: # We only need to check the organization flag since the flag will not be set if the plan-based feature flag is False. return bool(organization.flags.codecov_access) def has_codecov_integration(organization: Organization) -> tuple[bool, str | None]: """ Checks if the organization has a Codecov integration. Returns a tuple of (has_codecov_integration, error_message) """ integrations = integration_service.get_integrations( organization_id=organization.id, providers=[IntegrationProviderSlug.GITHUB.value] ) if not integrations: logger.info( "codecov.get_integrations", extra={"error": "Missing github integration", "org_id": organization.id}, ) return False, CodecovIntegrationError.MISSING_GH.value for integration in integrations: integration_installation = integration.get_installation(organization.id) if not integration_installation: continue repos = integration_installation.get_client().get_repos() if not repos: continue owner_username, _ = repos[0].get("full_name").split("/") url = CODECOV_REPOS_URL.format( service=IntegrationProviderSlug.GITHUB.value, owner_username=owner_username ) response = requests.get(url) if response.status_code == 200: logger.info( "codecov.check_integration_success", extra={"url": url, "org_id": organization.id, "status_code": 200}, ) return True, None # We found a codecov integration, so we can stop looking logger.warning( "codecov.check_integration_failed", extra={"url": url, "status_code": response.status_code, "org_id": organization.id}, ) # None of the Github Integrations had a Codecov integration return ( False, CodecovIntegrationError.MISSING_CODECOV.value, ) def get_codecov_data(repo: str, service: str, path: str) -> tuple[LineCoverage | None, str | None]: codecov_token = options.get("codecov.client-secret") if not codecov_token: return None, None params = repo.split("/") owner_username, repo_name = params[:2] service = "gh" if service == IntegrationProviderSlug.GITHUB.value else service path = path.lstrip("/") url = CODECOV_REPORT_URL.format( service=service, owner_username=owner_username, repo_name=repo_name, path=path, ) line_coverage, codecov_url = None, None scope = Scope.get_isolation_scope() response = requests.get( url, params={"walk_back": 10}, headers={"Authorization": f"Bearer {codecov_token}"}, timeout=CODECOV_TIMEOUT, ) response.raise_for_status() tags = { "codecov.request_url": url, "codecov.request_path": path, "codecov.http_code": response.status_code, } response_json = response.json() tags["codecov.new_endpoint"] = True line_coverage = response_json.get("line_coverage") coverage_found = line_coverage not in [None, [], [[]]] codecov_url = response_json.get("commit_file_url", "") tags.update( { "codecov.coverage_found": coverage_found, "codecov.coverage_url": codecov_url, }, ) for key, value in tags.items(): scope.set_tag(key, value) return line_coverage, codecov_url
CodecovIntegrationError
python
kubernetes-client__python
kubernetes/client/models/v1beta2_allocation_result.py
{ "start": 383, "end": 5868 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'allocation_timestamp': 'datetime', 'devices': 'V1beta2DeviceAllocationResult', 'node_selector': 'V1NodeSelector' } attribute_map = { 'allocation_timestamp': 'allocationTimestamp', 'devices': 'devices', 'node_selector': 'nodeSelector' } def __init__(self, allocation_timestamp=None, devices=None, node_selector=None, local_vars_configuration=None): # noqa: E501 """V1beta2AllocationResult - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._allocation_timestamp = None self._devices = None self._node_selector = None self.discriminator = None if allocation_timestamp is not None: self.allocation_timestamp = allocation_timestamp if devices is not None: self.devices = devices if node_selector is not None: self.node_selector = node_selector @property def allocation_timestamp(self): """Gets the allocation_timestamp of this V1beta2AllocationResult. # noqa: E501 AllocationTimestamp stores the time when the resources were allocated. This field is not guaranteed to be set, in which case that time is unknown. This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gate. # noqa: E501 :return: The allocation_timestamp of this V1beta2AllocationResult. # noqa: E501 :rtype: datetime """ return self._allocation_timestamp @allocation_timestamp.setter def allocation_timestamp(self, allocation_timestamp): """Sets the allocation_timestamp of this V1beta2AllocationResult. AllocationTimestamp stores the time when the resources were allocated. This field is not guaranteed to be set, in which case that time is unknown. This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gate. # noqa: E501 :param allocation_timestamp: The allocation_timestamp of this V1beta2AllocationResult. # noqa: E501 :type: datetime """ self._allocation_timestamp = allocation_timestamp @property def devices(self): """Gets the devices of this V1beta2AllocationResult. # noqa: E501 :return: The devices of this V1beta2AllocationResult. # noqa: E501 :rtype: V1beta2DeviceAllocationResult """ return self._devices @devices.setter def devices(self, devices): """Sets the devices of this V1beta2AllocationResult. :param devices: The devices of this V1beta2AllocationResult. # noqa: E501 :type: V1beta2DeviceAllocationResult """ self._devices = devices @property def node_selector(self): """Gets the node_selector of this V1beta2AllocationResult. # noqa: E501 :return: The node_selector of this V1beta2AllocationResult. # noqa: E501 :rtype: V1NodeSelector """ return self._node_selector @node_selector.setter def node_selector(self, node_selector): """Sets the node_selector of this V1beta2AllocationResult. :param node_selector: The node_selector of this V1beta2AllocationResult. # noqa: E501 :type: V1NodeSelector """ self._node_selector = node_selector def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1beta2AllocationResult): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1beta2AllocationResult): return True return self.to_dict() != other.to_dict()
V1beta2AllocationResult
python
keras-team__keras
keras/src/backend/jax/layer.py
{ "start": 272, "end": 308 }
class ____(BaseLayer): pass
JaxLayer
python
joke2k__faker
faker/providers/person/zh_CN/__init__.py
{ "start": 81, "end": 15274 }
class ____(PersonProvider): formats = ["{{last_name}}{{first_name}}"] first_names_male = [ "伟", "强", "磊", "洋", "勇", "军", "杰", "涛", "超", "明", "刚", "平", "辉", "鹏", "华", "飞", "鑫", "波", "斌", "宇", "浩", "凯", "健", "俊", "帆", "帅", "旭", "宁", "龙", "林", "欢", "佳", "阳", "建华", "亮", "成", "建", "峰", "建国", "建军", "晨", "瑞", "志强", "兵", "雷", "东", "博", "彬", "坤", "想", "岩", "杨", "文", "利", "楠", "红霞", "建平", ] first_names_female = [ "芳", "娜", "敏", "静", "秀英", "丽", "艳", "娟", "霞", "秀兰", "燕", "玲", "桂英", "丹", "萍", "红", "玉兰", "桂兰", "英", "梅", "莉", "秀珍", "婷", "玉梅", "玉珍", "凤英", "晶", "玉英", "颖", "雪", "慧", "红梅", "倩", "琴", "兰英", "畅", "云", "洁", "柳", "淑珍", "春梅", "海燕", "冬梅", "秀荣", "桂珍", "莹", "秀云", "桂荣", "秀梅", "丽娟", "婷婷", "玉华", "琳", "雪梅", "淑兰", "丽丽", "玉", "秀芳", "欣", "淑英", "桂芳", "丽华", "丹丹", "桂香", "淑华", "荣", "秀华", "桂芝", "小红", "金凤", "瑜", "桂花", "璐", "凤兰", ] first_names = first_names_male + first_names_female # From https://zh.wikipedia.org/wiki/%E4%B8%AD%E5%9B%BD%E5%A7%93%E6%B0%8F%E6%8E%92%E5%90%8D last_names = OrderedDict( ( ("王", 7.170), ("李", 7.000), ("张", 6.740), ("刘", 5.100), ("陈", 4.610), ("杨", 3.220), ("黄", 2.450), ("吴", 2.000), ("赵", 2.000), ("周", 1.900), ("徐", 1.450), ("孙", 1.380), ("马", 1.290), ("朱", 1.280), ("胡", 1.160), ("林", 1.130), ("郭", 1.130), ("何", 1.060), ("高", 1.000), ("罗", 0.950), ("郑", 0.930), ("梁", 0.850), ("谢", 0.760), ("宋", 0.700), ("唐", 0.690), ("许", 0.660), ("邓", 0.620), ("冯", 0.620), ("韩", 0.610), ("曹", 0.600), ("曾", 0.580), ("彭", 0.580), ("萧", 0.560), ("蔡", 0.530), ("潘", 0.520), ("田", 0.520), ("董", 0.510), ("袁", 0.500), ("于", 0.480), ("余", 0.480), ("叶", 0.480), ("蒋", 0.480), ("杜", 0.470), ("苏", 0.460), ("魏", 0.450), ("程", 0.450), ("吕", 0.450), ("丁", 0.430), ("沈", 0.410), ("任", 0.410), ("姚", 0.400), ("卢", 0.400), ("傅", 0.400), ("钟", 0.400), ("姜", 0.390), ("崔", 0.380), ("谭", 0.380), ("廖", 0.370), ("范", 0.360), ("汪", 0.360), ("陆", 0.360), ("金", 0.350), ("石", 0.340), ("戴", 0.340), ("贾", 0.330), ("韦", 0.320), ("夏", 0.320), ("邱", 0.320), ("方", 0.310), ("侯", 0.300), ("邹", 0.300), ("熊", 0.290), ("孟", 0.290), ("秦", 0.290), ("白", 0.280), ("江", 0.280), ("阎", 0.270), ("薛", 0.260), ("尹", 0.260), ("段", 0.240), ("雷", 0.240), ("黎", 0.220), ("史", 0.210), ("龙", 0.210), ("陶", 0.210), ("贺", 0.210), ("顾", 0.200), ("毛", 0.200), ("郝", 0.200), ("龚", 0.200), ("邵", 0.200), ("万", 0.190), ("钱", 0.190), ("严", 0.190), ("赖", 0.180), ("覃", 0.180), ("洪", 0.180), ("武", 0.180), ("莫", 0.180), ("孔", 0.170), ("汤", 0.170), ("向", 0.170), ("常", 0.160), ("温", 0.160), ("康", 0.160), ("施", 0.150), ("文", 0.150), ("牛", 0.150), ("樊", 0.150), ("葛", 0.150), ("邢", 0.140), ("安", 0.130), ("齐", 0.130), ("易", 0.130), ("乔", 0.130), ("伍", 0.130), ("庞", 0.130), ("颜", 0.120), ("倪", 0.120), ("庄", 0.120), ("聂", 0.120), ("章", 0.120), ("鲁", 0.110), ("岳", 0.110), ("翟", 0.110), ("殷", 0.110), ("詹", 0.110), ("申", 0.110), ("欧", 0.110), ("耿", 0.110), ("关", 0.100), ("兰", 0.100), ("焦", 0.100), ("俞", 0.100), ("左", 0.100), ("柳", 0.100), ("甘", 0.095), ("祝", 0.090), ("包", 0.087), ("宁", 0.083), ("尚", 0.082), ("符", 0.082), ("舒", 0.082), ("阮", 0.082), ("柯", 0.080), ("纪", 0.080), ("梅", 0.079), ("童", 0.079), ("凌", 0.078), ("毕", 0.078), ("单", 0.076), ("季", 0.076), ("裴", 0.076), ("霍", 0.075), ("涂", 0.075), ("成", 0.075), ("苗", 0.075), ("谷", 0.075), ("盛", 0.074), ("曲", 0.074), ("翁", 0.073), ("冉", 0.073), ("骆", 0.073), ("蓝", 0.072), ("路", 0.072), ("游", 0.071), ("辛", 0.070), ("靳", 0.069), ("欧阳", 0.068), ("管", 0.065), ("柴", 0.065), ("蒙", 0.062), ("鲍", 0.062), ("华", 0.061), ("喻", 0.061), ("祁", 0.061), ("蒲", 0.056), ("房", 0.056), ("滕", 0.055), ("屈", 0.055), ("饶", 0.055), ("解", 0.053), ("牟", 0.053), ("艾", 0.052), ("尤", 0.052), ("阳", 0.050), ("时", 0.050), ("穆", 0.048), ("农", 0.047), ("司", 0.044), ("卓", 0.043), ("古", 0.043), ("吉", 0.043), ("缪", 0.043), ("简", 0.043), ("车", 0.043), ("项", 0.043), ("连", 0.043), ("芦", 0.042), ("麦", 0.041), ("褚", 0.041), ("娄", 0.040), ("窦", 0.040), ("戚", 0.040), ("岑", 0.039), ("景", 0.039), ("党", 0.039), ("宫", 0.039), ("费", 0.039), ("卜", 0.038), ("冷", 0.038), ("晏", 0.038), ("席", 0.036), ("卫", 0.036), ("米", 0.035), ("柏", 0.035), ("宗", 0.034), ("瞿", 0.033), ("桂", 0.033), ("全", 0.033), ("佟", 0.033), ("应", 0.033), ("臧", 0.032), ("闵", 0.032), ("苟", 0.032), ("邬", 0.032), ("边", 0.032), ("卞", 0.032), ("姬", 0.032), ("师", 0.031), ("和", 0.031), ("仇", 0.030), ("栾", 0.030), ("隋", 0.030), ("商", 0.030), ("刁", 0.030), ("沙", 0.030), ("荣", 0.029), ("巫", 0.029), ("寇", 0.029), ("桑", 0.028), ("郎", 0.028), ("甄", 0.027), ("丛", 0.027), ("仲", 0.027), ("虞", 0.026), ("敖", 0.026), ("巩", 0.026), ("明", 0.026), ("佘", 0.025), ("池", 0.025), ("查", 0.025), ("麻", 0.025), ("苑", 0.025), ("迟", 0.024), ("邝", 0.024), ("官", 0.023), ("封", 0.023), ("谈", 0.023), ("匡", 0.023), ("鞠", 0.230), ("惠", 0.022), ("荆", 0.022), ("乐", 0.022), ("冀", 0.021), ("郁", 0.021), ("胥", 0.021), ("南", 0.021), ("班", 0.021), ("储", 0.021), ("原", 0.020), ("栗", 0.020), ("燕", 0.020), ("楚", 0.020), ("鄢", 0.020), ("劳", 0.019), ("谌", 0.019), ("奚", 0.017), ("皮", 0.017), ("粟", 0.017), ("冼", 0.017), ("蔺", 0.017), ("楼", 0.017), ("盘", 0.017), ("满", 0.016), ("闻", 0.016), ("位", 0.016), ("厉", 0.016), ("伊", 0.016), ("仝", 0.015), ("区", 0.015), ("郜", 0.015), ("海", 0.015), ("阚", 0.015), ("花", 0.015), ("权", 0.014), ("强", 0.014), ("帅", 0.014), ("屠", 0.014), ("豆", 0.014), ("朴", 0.014), ("盖", 0.014), ("练", 0.014), ("廉", 0.014), ("禹", 0.014), ("井", 0.013), ("祖", 0.013), ("漆", 0.013), ("巴", 0.013), ("丰", 0.013), ("支", 0.013), ("卿", 0.013), ("国", 0.013), ("狄", 0.013), ("平", 0.013), ("计", 0.012), ("索", 0.012), ("宣", 0.012), ("晋", 0.012), ("相", 0.012), ("初", 0.012), ("门", 0.012), ("云", 0.012), ("容", 0.012), ("敬", 0.011), ("来", 0.011), ("扈", 0.011), ("晁", 0.011), ("芮", 0.011), ("都", 0.011), ("普", 0.011), ("阙", 0.011), ("浦", 0.011), ("戈", 0.011), ("伏", 0.011), ("鹿", 0.011), ("薄", 0.011), ("邸", 0.011), ("雍", 0.010), ("辜", 0.010), ("羊", 0.010), ("阿", 0.010), ("乌", 0.010), ("母", 0.010), ("裘", 0.010), ("亓", 0.010), ("修", 0.010), ("邰", 0.010), ("赫", 0.010), ("杭", 0.010), ("况", 0.0094), ("那", 0.0093), ("宿", 0.0093), ("鲜", 0.0092), ("印", 0.0091), ("逯", 0.0091), ("隆", 0.0090), ("茹", 0.0090), ("诸", 0.0089), ("战", 0.0088), ("慕", 0.0086), ("危", 0.0084), ("玉", 0.0084), ("银", 0.0084), ("亢", 0.0083), ("嵇", 0.0082), ("公", 0.0082), ("哈", 0.0081), ("湛", 0.0079), ("宾", 0.0077), ("戎", 0.0076), ("勾", 0.0076), ("茅", 0.0076), ("利", 0.0076), ("于", 0.0074), ("呼", 0.0074), ("居", 0.0074), ("揭", 0.0073), ("干", 0.0072), ("但", 0.0072), ("尉", 0.0071), ("冶", 0.0071), ("斯", 0.0070), ("元", 0.0069), ("束", 0.0068), ("檀", 0.0068), ("衣", 0.0067), ("信", 0.0067), ("展", 0.0067), ("阴", 0.0067), ("昝", 0.0066), ("智", 0.0065), ("幸", 0.0065), ("奉", 0.0064), ("植", 0.0064), ("衡", 0.0063), ("富", 0.0063), ("尧", 0.0060), ("闭", 0.0060), ("由", 0.0060), ) ) romanized_formats = ("{{first_romanized_name}} {{last_romanized_name}}",) # From https://en.wikipedia.org/wiki/Chinese_given_name#Common_Chinese_names, # with accents stripped first_romanized_names = ( "Chao", "Fang", "Gang", "Guiying", "Jie", "Jing", "Juan", "Jun", "Lei", "Li", "Min", "Ming", "Na", "Ping", "Qiang", "Tao", "Wei", "Xia", "Xiulan", "Xiuying", "Yang", "Yong", "Yan", ) # From https://en.wikipedia.org/wiki/List_of_common_Chinese_surnames # with accents stripped last_romanized_names = ( "Bai", "Cai", "Cao", "Chang", "Chen", "Cheng", "Cui", "Dai", "Deng", "Ding", "Dong", "Du", "Duan", "Fan", "Fang", "Feng", "Fu", "Gao", "Gong", "Gu", "Guo", "Han", "Hao", "He", "Hou", "Hu", "Huang", "Jia", "Jiang", "Jin", "Kang", "Kong", "Lai", "Lei", "Li", "Liang", "Liao", "Lin", "Liu", "Long", "Lu", "Luo", "Ma", "Mao", "Meng", "Mo", "Pan", "Peng", "Qian", "Qiao", "Qin", "Qiu", "Ren", "Shao", "Shen", "Shi", "Song", "Su", "Sun", "Tan", "Tang", "Tao", "Tian", "Wan", "Wang", "Wei", "Wen", "Wu", "Xia", "Xiang", "Xiao", "Xie", "Xiong", "Xu", "Xue", "Yan", "Yang", "Yao", "Ye", "Yi", "Yin", "Yu", "Yuan", "Zeng", "Zhang", "Zhao", "Zheng", "Zhong", "Zhou", "Zhu", "Zou", ) def romanized_name(self) -> str: """ :example: 'Chao Bai' """ pattern: str = self.random_element(self.romanized_formats) return self.generator.parse(pattern) def first_romanized_name(self) -> str: """ :example: 'Chao' """ return self.random_element(self.first_romanized_names) def last_romanized_name(self) -> str: """ :example: 'Chao' """ return self.random_element(self.last_romanized_names)
Provider
python
apache__airflow
airflow-core/src/airflow/api/client/local_client.py
{ "start": 1073, "end": 3930 }
class ____: """Local API client implementation.""" def __init__(self, auth=None, session: httpx.Client | None = None): self._session: httpx.Client = session or httpx.Client() if auth: self._session.auth = auth def trigger_dag( self, dag_id, run_id=None, conf=None, logical_date=None, triggering_user_name=None, replace_microseconds=True, ) -> dict | None: dag_run = trigger_dag.trigger_dag( dag_id=dag_id, triggered_by=DagRunTriggeredByType.CLI, triggering_user_name=triggering_user_name, run_id=run_id, conf=conf, logical_date=logical_date, replace_microseconds=replace_microseconds, ) if dag_run: return { "conf": dag_run.conf, "dag_id": dag_run.dag_id, "dag_run_id": dag_run.run_id, "data_interval_start": dag_run.data_interval_start, "data_interval_end": dag_run.data_interval_end, "end_date": dag_run.end_date, "last_scheduling_decision": dag_run.last_scheduling_decision, "logical_date": dag_run.logical_date, "run_type": dag_run.run_type, "start_date": dag_run.start_date, "state": dag_run.state, "triggering_user_name": dag_run.triggering_user_name, } return dag_run def delete_dag(self, dag_id): count = delete_dag.delete_dag(dag_id) return f"Removed {count} record(s)" def get_pool(self, name): pool = Pool.get_pool(pool_name=name) if not pool: raise PoolNotFound(f"Pool {name} not found") return pool.pool, pool.slots, pool.description, pool.include_deferred def get_pools(self): return [(p.pool, p.slots, p.description, p.include_deferred) for p in Pool.get_pools()] def create_pool(self, name, slots, description, include_deferred): if not (name and name.strip()): raise AirflowBadRequest("Pool name shouldn't be empty") pool_name_length = Pool.pool.property.columns[0].type.length if len(name) > pool_name_length: raise AirflowBadRequest(f"pool name cannot be more than {pool_name_length} characters") try: slots = int(slots) except ValueError: raise AirflowBadRequest(f"Bad value for `slots`: {slots}") pool = Pool.create_or_update_pool( name=name, slots=slots, description=description, include_deferred=include_deferred ) return pool.pool, pool.slots, pool.description def delete_pool(self, name): pool = Pool.delete_pool(name=name) return pool.pool, pool.slots, pool.description
Client
python
numba__numba
numba/core/untyped_passes.py
{ "start": 8290, "end": 9279 }
class ____(FunctionPass): _name = "with_lifting" def __init__(self): FunctionPass.__init__(self) def run_pass(self, state): """ Extract with-contexts """ main, withs = transforms.with_lifting( func_ir=state.func_ir, typingctx=state.typingctx, targetctx=state.targetctx, flags=state.flags, locals=state.locals, ) if withs: from numba.core.compiler import compile_ir, _EarlyPipelineCompletion cres = compile_ir(state.typingctx, state.targetctx, main, state.args, state.return_type, state.flags, state.locals, lifted=tuple(withs), lifted_from=None, pipeline_class=type(state.pipeline)) raise _EarlyPipelineCompletion(cres) return True @register_pass(mutates_CFG=True, analysis_only=False)
WithLifting
python
ray-project__ray
python/ray/tests/test_runtime_env_plugin.py
{ "start": 672, "end": 3100 }
class ____(RuntimeEnvPlugin): name = MY_PLUGIN_NAME env_key = "MY_PLUGIN_TEST_ENVIRONMENT_KEY" @staticmethod def validate(runtime_env: RuntimeEnv) -> str: value = runtime_env[MY_PLUGIN_NAME] if value == "fail": raise ValueError("not allowed") return value def modify_context( self, uris: List[str], runtime_env: RuntimeEnv, ctx: RuntimeEnvContext, logger: logging.Logger, ) -> None: plugin_config_dict = runtime_env[MY_PLUGIN_NAME] ctx.env_vars[MyPlugin.env_key] = str(plugin_config_dict["env_value"]) ctx.command_prefix += [ "echo", plugin_config_dict["tmp_content"], ">", plugin_config_dict["tmp_file"], "&&", ] ctx.py_executable = ( plugin_config_dict["prefix_command"] + " " + ctx.py_executable ) @pytest.mark.parametrize( "set_runtime_env_plugins", [ '[{"class":"' + MY_PLUGIN_CLASS_PATH + '"}]', ], indirect=True, ) def test_simple_env_modification_plugin(set_runtime_env_plugins, ray_start_regular): _, tmp_file_path = tempfile.mkstemp() @ray.remote def f(): import psutil with open(tmp_file_path, "r") as f: content = f.read().strip() return { "env_value": os.environ[MyPlugin.env_key], "tmp_content": content, "nice": psutil.Process().nice(), } with pytest.raises(RuntimeEnvSetupError, match="not allowed"): ray.get(f.options(runtime_env={MY_PLUGIN_NAME: "fail"}).remote()) if os.name != "nt": output = ray.get( f.options( runtime_env={ MY_PLUGIN_NAME: { "env_value": 42, "tmp_file": tmp_file_path, "tmp_content": "hello", # See https://en.wikipedia.org/wiki/Nice_(Unix) "prefix_command": "nice -n 19", } } ).remote() ) assert output == {"env_value": "42", "tmp_content": "hello", "nice": 19} MY_PLUGIN_FOR_HANG_CLASS_PATH = "ray.tests.test_runtime_env_plugin.MyPluginForHang" MY_PLUGIN_FOR_HANG_NAME = "MyPluginForHang" my_plugin_setup_times = 0 # This plugin will hang when first setup, second setup will ok
MyPlugin
python
doocs__leetcode
solution/0600-0699/0634.Find the Derangement of An Array/Solution.py
{ "start": 0, "end": 223 }
class ____: def findDerangement(self, n: int) -> int: mod = 10**9 + 7 f = [1] + [0] * n for i in range(2, n + 1): f[i] = (i - 1) * (f[i - 1] + f[i - 2]) % mod return f[n]
Solution
python
airbytehq__airbyte
airbyte-integrations/connectors/source-sftp-bulk/source_sftp_bulk/source.py
{ "start": 526, "end": 959 }
class ____(FileBasedSource): def __init__(self, catalog: Optional[ConfiguredAirbyteCatalog], config: Optional[Mapping[str, Any]], state: Optional[TState]): super().__init__( stream_reader=SourceSFTPBulkStreamReader(), spec_class=SourceSFTPBulkSpec, catalog=catalog, config=config, state=state, cursor_cls=DefaultFileBasedCursor, )
SourceSFTPBulk
python
fluentpython__example-code
attic/objects/attr_list.py
{ "start": 65, "end": 1517 }
class ____(): pass # int, str, sample_types = [object, list, Class, type(Class), type(fn)] if '-' in sys.argv: del sample_types[0] # exlude `object` sample_objs = [type_() for type_ in sample_types[:-2]] + [Class, fn] sample_oids = [id(obj) for obj in sample_objs] fmt = '{attr:17}' + '|{:8}' * len(sample_types) headings = [t.__name__ for t in sample_types] headings[headings.index('Class')] = 'instance' headings[headings.index('type')] = 'class' common_attrs = set() for obj in sample_objs: for attr_name in dir(obj): common_attrs.add(attr_name) print(fmt.format(*headings, attr='')) counter = Counter() for attr_name in sorted(common_attrs): if not attr_name.startswith('__'): continue flags = [] found = 0 for obj in sample_objs: try: attr = getattr(obj, attr_name) if type(attr) == type: flag = 'type' elif callable(attr): flag = 'method' else: flag = 'data' counter[id(obj)] += 1 found += 1 except AttributeError: flag = '' flags.append(flag) if '-' in sys.argv: include = found < len(sample_objs) else: include = found == len(sample_objs) if include: print(fmt.format(*flags, attr=attr_name)) counts = [counter[oid] for oid in sample_oids] print(fmt.format(*counts, attr='TOTALS')) print(sys.argv)
Class
python
huggingface__transformers
src/transformers/models/dpr/tokenization_dpr_fast.py
{ "start": 1140, "end": 1660 }
class ____(BertTokenizer): r""" Construct a "fast" DPRContextEncoder tokenizer (backed by HuggingFace's *tokenizers* library). [`DPRContextEncoderTokenizerFast`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = DPRContextEncoderTokenizer
DPRContextEncoderTokenizerFast
python
kamyu104__LeetCode-Solutions
Python/final-prices-with-a-special-discount-in-a-shop.py
{ "start": 29, "end": 361 }
class ____(object): def finalPrices(self, prices): """ :type prices: List[int] :rtype: List[int] """ stk = [] for i, p in enumerate(prices): while stk and prices[stk[-1]] >= p: prices[stk.pop()] -= p stk.append(i) return prices
Solution
python
django-haystack__django-haystack
test_haystack/elasticsearch7_tests/test_backend.py
{ "start": 47619, "end": 49493 }
class ____(TestCase): """Used to test actual implementation details of the SearchQuerySet.""" fixtures = ["bulk_data.json"] def setUp(self): super().setUp() # Wipe it clean. clear_elasticsearch_index() # Reboot the schema. self.sb = connections["elasticsearch"].get_backend() self.sb.setup() # Stow. self.old_ui = connections["elasticsearch"].get_unified_index() self.ui = UnifiedIndex() self.smmi = Elasticsearch7MockSpellingIndex() self.ui.build(indexes=[self.smmi]) connections["elasticsearch"]._index = self.ui self.sqs = SearchQuerySet("elasticsearch") self.smmi.update(using="elasticsearch") def tearDown(self): # Restore. connections["elasticsearch"]._index = self.old_ui super().tearDown() def test_spelling(self): # self.assertEqual( # self.sqs.auto_query("structurd").spelling_suggestion(), "structured" # ) self.assertEqual( self.sqs.auto_query("structurd").spelling_suggestion(), "structur" ) # self.assertEqual(self.sqs.spelling_suggestion("structurd"), "structured") self.assertEqual(self.sqs.spelling_suggestion("structurd"), "structur") # self.assertEqual( # self.sqs.auto_query("srchindex instanc").spelling_suggestion(), # "searchindex instance", # ) self.assertEqual( self.sqs.auto_query("srchindex instanc").spelling_suggestion(), "searchindex instanc", ) # self.assertEqual( # self.sqs.spelling_suggestion("srchindex instanc"), "searchindex instance" # ) self.assertEqual( self.sqs.spelling_suggestion("srchindex instanc"), "searchindex instanc" )
LiveElasticsearch7SpellingTestCase
python
pallets__flask
tests/test_helpers.py
{ "start": 6970, "end": 9596 }
class ____: def test_streaming_with_context(self, app, client): @app.route("/") def index(): def generate(): yield "Hello " yield flask.request.args["name"] yield "!" return flask.Response(flask.stream_with_context(generate())) rv = client.get("/?name=World") assert rv.data == b"Hello World!" def test_streaming_with_context_as_decorator(self, app, client): @app.route("/") def index(): @flask.stream_with_context def generate(hello): yield hello yield flask.request.args["name"] yield "!" return flask.Response(generate("Hello ")) rv = client.get("/?name=World") assert rv.data == b"Hello World!" def test_streaming_with_context_and_custom_close(self, app, client): called = [] class Wrapper: def __init__(self, gen): self._gen = gen def __iter__(self): return self def close(self): called.append(42) def __next__(self): return next(self._gen) next = __next__ @app.route("/") def index(): def generate(): yield "Hello " yield flask.request.args["name"] yield "!" return flask.Response(flask.stream_with_context(Wrapper(generate()))) rv = client.get("/?name=World") assert rv.data == b"Hello World!" assert called == [42] def test_stream_keeps_session(self, app, client): @app.route("/") def index(): flask.session["test"] = "flask" @flask.stream_with_context def gen(): yield flask.session["test"] return flask.Response(gen()) rv = client.get("/") assert rv.data == b"flask" def test_async_view(self, app, client): @app.route("/") async def index(): flask.session["test"] = "flask" @flask.stream_with_context def gen(): yield flask.session["test"] return flask.Response(gen()) # response is closed without reading stream client.get().close() # response stream is read assert client.get().text == "flask" # same as above, but with client context preservation with client: client.get().close() with client: assert client.get().text == "flask"
TestStreaming
python
numba__numba
numba/core/ir.py
{ "start": 21555, "end": 21956 }
class ____(Stmt): def __init__(self, dct, key, value, loc): assert isinstance(dct, Var) assert isinstance(key, Var) assert isinstance(value, Var) assert isinstance(loc, Loc) self.dct = dct self.key = key self.value = value self.loc = loc def __repr__(self): return '%s[%s] = %s' % (self.dct, self.key, self.value)
StoreMap
python
getsentry__sentry
src/sentry_plugins/github/webhooks/events/push.py
{ "start": 953, "end": 9210 }
class ____(Webhook): def _handle(self, event, organization_id, is_apps): authors = {} gh_username_cache: dict[str, str | None] = {} try: repo = Repository.objects.get( organization_id=organization_id, provider="github_apps" if is_apps else "github", external_id=str(event["repository"]["id"]), ) except Repository.DoesNotExist: raise Http404() # We need to track GitHub's "full_name" which is the repository slug. # This is needed to access the API since `external_id` isn't sufficient. if repo.config.get("name") != event["repository"]["full_name"]: repo.config["name"] = event["repository"]["full_name"] repo.save() for commit in event["commits"]: if not commit["distinct"]: continue if RepositoryProvider.should_ignore_commit(commit["message"]): continue author_email = commit["author"]["email"] if "@" not in author_email: author_email = f"{author_email[:65]}@localhost" # try to figure out who anonymous emails are elif is_anonymous_email(author_email): gh_username = commit["author"].get("username") # bot users don't have usernames if gh_username: external_id = get_external_id(gh_username) if gh_username in gh_username_cache: author_email = gh_username_cache[gh_username] or author_email else: try: commit_author = CommitAuthor.objects.get( external_id=external_id, organization_id=organization_id ) except CommitAuthor.DoesNotExist: commit_author = None if commit_author is not None and not is_anonymous_email( commit_author.email ): author_email = commit_author.email gh_username_cache[gh_username] = author_email else: try: with GithubPluginClient() as client: gh_user = client.request_no_auth("GET", f"/users/{gh_username}") except ApiError as exc: logger.exception(str(exc)) else: # even if we can't find a user, set to none so we # don't re-query gh_username_cache[gh_username] = None user = user_service.get_user_by_social_auth( organization_id=organization_id, provider="github", uid=gh_user["id"], ) if user is not None: author_email = user.email gh_username_cache[gh_username] = author_email if commit_author is not None: try: with transaction.atomic( router.db_for_write(CommitAuthor) ): commit_author.update( email=author_email, external_id=external_id ) except IntegrityError: pass if commit_author is not None: authors[author_email] = commit_author # TODO(dcramer): we need to deal with bad values here, but since # its optional, lets just throw it out for now if len(author_email) > 75: author = None elif author_email not in authors: authors[author_email] = author = CommitAuthor.objects.get_or_create( organization_id=organization_id, email=author_email, defaults={"name": commit["author"]["name"][:128]}, )[0] update_kwargs = {} if author.name != commit["author"]["name"][:128]: update_kwargs["name"] = commit["author"]["name"][:128] gh_username = commit["author"].get("username") if gh_username: external_id = get_external_id(gh_username) if author.external_id != external_id and not is_anonymous_email(author.email): update_kwargs["external_id"] = external_id if update_kwargs: try: with transaction.atomic(router.db_for_write(CommitAuthor)): author.update(**update_kwargs) except IntegrityError: pass else: author = authors[author_email] try: with transaction.atomic(router.db_for_write(Commit)): c = Commit.objects.create( repository_id=repo.id, organization_id=organization_id, key=commit["id"], message=commit["message"], author=author, date_added=parse_date(commit["timestamp"]).astimezone(timezone.utc), ) file_changes = [] for fname in commit["added"]: file_changes.append( CommitFileChange( organization_id=organization_id, commit_id=c.id, filename=fname, type="A", ) ) for fname in commit["removed"]: file_changes.append( CommitFileChange( organization_id=organization_id, commit_id=c.id, filename=fname, type="D", ) ) for fname in commit["modified"]: file_changes.append( CommitFileChange( organization_id=organization_id, commit_id=c.id, filename=fname, type="M", ) ) if file_changes: CommitFileChange.objects.bulk_create(file_changes) except IntegrityError: pass # https://developer.github.com/v3/activity/events/types/#pushevent def __call__(self, event, organization: Organization | None = None): is_apps = "installation" in event if organization is None: if "installation" not in event: return integration = integration_service.get_integration( external_id=event["installation"]["id"], provider="github_apps" ) if integration is None: raise Integration.DoesNotExist integration_orgs = integration_service.get_organization_integrations( integration_id=integration.id ) organizations = [org.organization_id for org in integration_orgs] else: organizations = [organization.id] for org_id in organizations: self._handle(event, org_id, is_apps)
PushEventWebhook
python
huggingface__transformers
src/transformers/utils/quantization_config.py
{ "start": 84378, "end": 87204 }
class ____(QuantizationConfigMixin): """ Configuration class for applying BitNet quantization. Args: modules_to_not_convert (`Optional[List]`, *optional*): Optionally, provides a list of full paths of `nn.Linear` weight parameters that shall not be quantized. Defaults to None. linear_class (`str`, *optional*, defaults to `"bitlinear"`): The type of linear class to use. Can be either `bitlinear` or `autobitlinear`. quantization_mode (`str`, *optional*, defaults to `"offline"`): The quantization mode to use. Can be either `online` or `offline`. In `online` mode, the weight quantization parameters are calculated dynamically during each forward pass (e.g., based on the current weight values). This can adapt to weight changes during training (Quantization-Aware Training - QAT). In `offline` mode, quantization parameters are pre-calculated *before* inference. These parameters are then fixed and loaded into the quantized model. This generally results in lower runtime overhead compared to online quantization. use_rms_norm (`bool`, *optional*, defaults to `False`): Whether to apply RMSNorm on the activations before quantization. This matches the original BitNet paper's approach of normalizing activations before quantization/packing. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon value used in the RMSNorm layer for numerical stability. kwargs (`dict[str, Any]`, *optional*): Additional keyword arguments that may be used by specific quantization backends or future versions. """ def __init__( self, modules_to_not_convert: list | None = None, linear_class: str = "bitlinear", quantization_mode: str = "offline", use_rms_norm: bool = False, rms_norm_eps: float | None = 1e-6, **kwargs, ): if linear_class not in ["bitlinear", "autobitlinear"]: raise ValueError(f"linear_class must be either 'bitlinear' or 'autobitlinear', but got {linear_class}") if quantization_mode not in ["online", "offline"]: raise ValueError(f"quantization_mode must be either 'online' or 'offline', but got {quantization_mode}") self.quant_method = QuantizationMethod.BITNET self.modules_to_not_convert = modules_to_not_convert self.linear_class = linear_class self.quantization_mode = quantization_mode self.use_rms_norm = use_rms_norm self.rms_norm_eps = rms_norm_eps self.post_init() def post_init(self): r""" Safety checker that arguments are correct """ @dataclass
BitNetQuantConfig
python
langchain-ai__langchain
libs/langchain/langchain_classic/chains/combine_documents/map_reduce.py
{ "start": 995, "end": 11858 }
class ____(BaseCombineDocumentsChain): """Combining documents by mapping a chain over them, then combining results. We first call `llm_chain` on each document individually, passing in the `page_content` and any other kwargs. This is the `map` step. We then process the results of that `map` step in a `reduce` step. This should likely be a ReduceDocumentsChain. Example: ```python from langchain_classic.chains import ( StuffDocumentsChain, LLMChain, ReduceDocumentsChain, MapReduceDocumentsChain, ) from langchain_core.prompts import PromptTemplate from langchain_openai import OpenAI # This controls how each document will be formatted. Specifically, # it will be passed to `format_document` - see that function for more # details. document_prompt = PromptTemplate( input_variables=["page_content"], template="{page_content}" ) document_variable_name = "context" model = OpenAI() # The prompt here should take as an input variable the # `document_variable_name` prompt = PromptTemplate.from_template("Summarize this content: {context}") llm_chain = LLMChain(llm=model, prompt=prompt) # We now define how to combine these summaries reduce_prompt = PromptTemplate.from_template( "Combine these summaries: {context}" ) reduce_llm_chain = LLMChain(llm=model, prompt=reduce_prompt) combine_documents_chain = StuffDocumentsChain( llm_chain=reduce_llm_chain, document_prompt=document_prompt, document_variable_name=document_variable_name, ) reduce_documents_chain = ReduceDocumentsChain( combine_documents_chain=combine_documents_chain, ) chain = MapReduceDocumentsChain( llm_chain=llm_chain, reduce_documents_chain=reduce_documents_chain, ) # If we wanted to, we could also pass in collapse_documents_chain # which is specifically aimed at collapsing documents BEFORE # the final call. prompt = PromptTemplate.from_template("Collapse this content: {context}") llm_chain = LLMChain(llm=model, prompt=prompt) collapse_documents_chain = StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, document_variable_name=document_variable_name, ) reduce_documents_chain = ReduceDocumentsChain( combine_documents_chain=combine_documents_chain, collapse_documents_chain=collapse_documents_chain, ) chain = MapReduceDocumentsChain( llm_chain=llm_chain, reduce_documents_chain=reduce_documents_chain, ) ``` """ llm_chain: LLMChain """Chain to apply to each document individually.""" reduce_documents_chain: BaseCombineDocumentsChain """Chain to use to reduce the results of applying `llm_chain` to each doc. This typically either a ReduceDocumentChain or StuffDocumentChain.""" document_variable_name: str """The variable name in the llm_chain to put the documents in. If only one variable in the llm_chain, this need not be provided.""" return_intermediate_steps: bool = False """Return the results of the map steps in the output.""" @override def get_output_schema( self, config: RunnableConfig | None = None, ) -> type[BaseModel]: if self.return_intermediate_steps: return create_model( "MapReduceDocumentsOutput", **{ self.output_key: (str, None), "intermediate_steps": (list[str], None), }, ) return super().get_output_schema(config) @property def output_keys(self) -> list[str]: """Expect input key.""" _output_keys = super().output_keys if self.return_intermediate_steps: _output_keys = [*_output_keys, "intermediate_steps"] return _output_keys model_config = ConfigDict( arbitrary_types_allowed=True, extra="forbid", ) @model_validator(mode="before") @classmethod def get_reduce_chain(cls, values: dict) -> Any: """For backwards compatibility.""" if "combine_document_chain" in values: if "reduce_documents_chain" in values: msg = ( "Both `reduce_documents_chain` and `combine_document_chain` " "cannot be provided at the same time. `combine_document_chain` " "is deprecated, please only provide `reduce_documents_chain`" ) raise ValueError(msg) combine_chain = values["combine_document_chain"] collapse_chain = values.get("collapse_document_chain") reduce_chain = ReduceDocumentsChain( combine_documents_chain=combine_chain, collapse_documents_chain=collapse_chain, ) values["reduce_documents_chain"] = reduce_chain del values["combine_document_chain"] values.pop("collapse_document_chain", None) return values @model_validator(mode="before") @classmethod def get_return_intermediate_steps(cls, values: dict) -> Any: """For backwards compatibility.""" if "return_map_steps" in values: values["return_intermediate_steps"] = values["return_map_steps"] del values["return_map_steps"] return values @model_validator(mode="before") @classmethod def get_default_document_variable_name(cls, values: dict) -> Any: """Get default document variable name, if not provided.""" if "llm_chain" not in values: msg = "llm_chain must be provided" raise ValueError(msg) llm_chain_variables = values["llm_chain"].prompt.input_variables if "document_variable_name" not in values: if len(llm_chain_variables) == 1: values["document_variable_name"] = llm_chain_variables[0] else: msg = ( "document_variable_name must be provided if there are " "multiple llm_chain input_variables" ) raise ValueError(msg) elif values["document_variable_name"] not in llm_chain_variables: msg = ( f"document_variable_name {values['document_variable_name']} was " f"not found in llm_chain input_variables: {llm_chain_variables}" ) raise ValueError(msg) return values @property def collapse_document_chain(self) -> BaseCombineDocumentsChain: """Kept for backward compatibility.""" if isinstance(self.reduce_documents_chain, ReduceDocumentsChain): if self.reduce_documents_chain.collapse_documents_chain: return self.reduce_documents_chain.collapse_documents_chain return self.reduce_documents_chain.combine_documents_chain msg = ( f"`reduce_documents_chain` is of type " f"{type(self.reduce_documents_chain)} so it does not have " f"this attribute." ) raise ValueError(msg) @property def combine_document_chain(self) -> BaseCombineDocumentsChain: """Kept for backward compatibility.""" if isinstance(self.reduce_documents_chain, ReduceDocumentsChain): return self.reduce_documents_chain.combine_documents_chain msg = ( f"`reduce_documents_chain` is of type " f"{type(self.reduce_documents_chain)} so it does not have " f"this attribute." ) raise ValueError(msg) def combine_docs( self, docs: list[Document], token_max: int | None = None, callbacks: Callbacks = None, **kwargs: Any, ) -> tuple[str, dict]: """Combine documents in a map reduce manner. Combine by mapping first chain over all documents, then reducing the results. This reducing can be done recursively if needed (if there are many documents). """ map_results = self.llm_chain.apply( # FYI - this is parallelized and so it is fast. [{self.document_variable_name: d.page_content, **kwargs} for d in docs], callbacks=callbacks, ) question_result_key = self.llm_chain.output_key result_docs = [ Document(page_content=r[question_result_key], metadata=docs[i].metadata) # This uses metadata from the docs, and the textual results from `results` for i, r in enumerate(map_results) ] result, extra_return_dict = self.reduce_documents_chain.combine_docs( result_docs, token_max=token_max, callbacks=callbacks, **kwargs, ) if self.return_intermediate_steps: intermediate_steps = [r[question_result_key] for r in map_results] extra_return_dict["intermediate_steps"] = intermediate_steps return result, extra_return_dict async def acombine_docs( self, docs: list[Document], token_max: int | None = None, callbacks: Callbacks = None, **kwargs: Any, ) -> tuple[str, dict]: """Combine documents in a map reduce manner. Combine by mapping first chain over all documents, then reducing the results. This reducing can be done recursively if needed (if there are many documents). """ map_results = await self.llm_chain.aapply( # FYI - this is parallelized and so it is fast. [{self.document_variable_name: d.page_content, **kwargs} for d in docs], callbacks=callbacks, ) question_result_key = self.llm_chain.output_key result_docs = [ Document(page_content=r[question_result_key], metadata=docs[i].metadata) # This uses metadata from the docs, and the textual results from `results` for i, r in enumerate(map_results) ] result, extra_return_dict = await self.reduce_documents_chain.acombine_docs( result_docs, token_max=token_max, callbacks=callbacks, **kwargs, ) if self.return_intermediate_steps: intermediate_steps = [r[question_result_key] for r in map_results] extra_return_dict["intermediate_steps"] = intermediate_steps return result, extra_return_dict @property def _chain_type(self) -> str: return "map_reduce_documents_chain"
MapReduceDocumentsChain
python
django__django
tests/i18n/forms.py
{ "start": 56, "end": 396 }
class ____(forms.Form): decimal_field = forms.DecimalField(localize=True) float_field = forms.FloatField(localize=True) date_field = forms.DateField(localize=True) datetime_field = forms.DateTimeField(localize=True) time_field = forms.TimeField(localize=True) integer_field = forms.IntegerField(localize=True)
I18nForm
python
kubernetes-client__python
kubernetes/client/models/v1_api_resource_list.py
{ "start": 383, "end": 7505 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'group_version': 'str', 'kind': 'str', 'resources': 'list[V1APIResource]' } attribute_map = { 'api_version': 'apiVersion', 'group_version': 'groupVersion', 'kind': 'kind', 'resources': 'resources' } def __init__(self, api_version=None, group_version=None, kind=None, resources=None, local_vars_configuration=None): # noqa: E501 """V1APIResourceList - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._group_version = None self._kind = None self._resources = None self.discriminator = None if api_version is not None: self.api_version = api_version self.group_version = group_version if kind is not None: self.kind = kind self.resources = resources @property def api_version(self): """Gets the api_version of this V1APIResourceList. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1APIResourceList. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1APIResourceList. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1APIResourceList. # noqa: E501 :type: str """ self._api_version = api_version @property def group_version(self): """Gets the group_version of this V1APIResourceList. # noqa: E501 groupVersion is the group and version this APIResourceList is for. # noqa: E501 :return: The group_version of this V1APIResourceList. # noqa: E501 :rtype: str """ return self._group_version @group_version.setter def group_version(self, group_version): """Sets the group_version of this V1APIResourceList. groupVersion is the group and version this APIResourceList is for. # noqa: E501 :param group_version: The group_version of this V1APIResourceList. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and group_version is None: # noqa: E501 raise ValueError("Invalid value for `group_version`, must not be `None`") # noqa: E501 self._group_version = group_version @property def kind(self): """Gets the kind of this V1APIResourceList. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1APIResourceList. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1APIResourceList. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1APIResourceList. # noqa: E501 :type: str """ self._kind = kind @property def resources(self): """Gets the resources of this V1APIResourceList. # noqa: E501 resources contains the name of the resources and if they are namespaced. # noqa: E501 :return: The resources of this V1APIResourceList. # noqa: E501 :rtype: list[V1APIResource] """ return self._resources @resources.setter def resources(self, resources): """Sets the resources of this V1APIResourceList. resources contains the name of the resources and if they are namespaced. # noqa: E501 :param resources: The resources of this V1APIResourceList. # noqa: E501 :type: list[V1APIResource] """ if self.local_vars_configuration.client_side_validation and resources is None: # noqa: E501 raise ValueError("Invalid value for `resources`, must not be `None`") # noqa: E501 self._resources = resources def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1APIResourceList): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1APIResourceList): return True return self.to_dict() != other.to_dict()
V1APIResourceList
python
explosion__spaCy
spacy/lang/zh/__init__.py
{ "start": 1303, "end": 11154 }
class ____(DummyTokenizer): def __init__(self, vocab: Vocab, segmenter: Segmenter = Segmenter.char): self.vocab = vocab self.segmenter = ( segmenter.value if isinstance(segmenter, Segmenter) else segmenter ) self.pkuseg_seg = None self.jieba_seg = None if self.segmenter not in Segmenter.values(): warn_msg = Warnings.W103.format( lang="Chinese", segmenter=self.segmenter, supported=", ".join(Segmenter.values()), default="'char' (character segmentation)", ) warnings.warn(warn_msg) self.segmenter = Segmenter.char if self.segmenter == Segmenter.jieba: self.jieba_seg = try_jieba_import() def initialize( self, get_examples: Optional[Callable[[], Iterable[Example]]] = None, *, nlp: Optional[Language] = None, pkuseg_model: Optional[str] = None, pkuseg_user_dict: Optional[str] = "default", ): if self.segmenter == Segmenter.pkuseg: if pkuseg_user_dict is None: pkuseg_user_dict = pkuseg_model self.pkuseg_seg = try_pkuseg_import( pkuseg_model=pkuseg_model, pkuseg_user_dict=pkuseg_user_dict ) def __call__(self, text: str) -> Doc: if self.segmenter == Segmenter.jieba: words = list([x for x in self.jieba_seg.cut(text, cut_all=False) if x]) # type: ignore[union-attr] (words, spaces) = util.get_words_and_spaces(words, text) return Doc(self.vocab, words=words, spaces=spaces) elif self.segmenter == Segmenter.pkuseg: if self.pkuseg_seg is None: raise ValueError(Errors.E1000) words = self.pkuseg_seg.cut(text) (words, spaces) = util.get_words_and_spaces(words, text) return Doc(self.vocab, words=words, spaces=spaces) # warn if segmenter setting is not the only remaining option "char" if self.segmenter != Segmenter.char: warn_msg = Warnings.W103.format( lang="Chinese", segmenter=self.segmenter, supported=", ".join(Segmenter.values()), default="'char' (character segmentation)", ) warnings.warn(warn_msg) # split into individual characters words = list(text) (words, spaces) = util.get_words_and_spaces(words, text) return Doc(self.vocab, words=words, spaces=spaces) def pkuseg_update_user_dict(self, words: List[str], reset: bool = False): if self.segmenter == Segmenter.pkuseg: if reset: try: import spacy_pkuseg self.pkuseg_seg.preprocesser = spacy_pkuseg.Preprocesser(None) # type: ignore[attr-defined] except ImportError: msg = ( "spacy_pkuseg not installed: unable to reset pkuseg " "user dict. Please " + _PKUSEG_INSTALL_MSG ) raise ImportError(msg) from None for word in words: self.pkuseg_seg.preprocesser.insert(word.strip(), "") # type: ignore[attr-defined] else: warn_msg = Warnings.W104.format(target="pkuseg", current=self.segmenter) warnings.warn(warn_msg) def score(self, examples): validate_examples(examples, "ChineseTokenizer.score") return Scorer.score_tokenization(examples) def _get_config(self) -> Dict[str, Any]: return { "segmenter": self.segmenter, } def _set_config(self, config: Dict[str, Any] = {}) -> None: self.segmenter = config.get("segmenter", Segmenter.char) def to_bytes(self, **kwargs): pkuseg_features_b = b"" pkuseg_weights_b = b"" pkuseg_processors_data = None if self.pkuseg_seg: with tempfile.TemporaryDirectory() as tempdir: self.pkuseg_seg.feature_extractor.save(tempdir) self.pkuseg_seg.model.save(tempdir) tempdir = Path(tempdir) with open(tempdir / "features.msgpack", "rb") as fileh: pkuseg_features_b = fileh.read() with open(tempdir / "weights.npz", "rb") as fileh: pkuseg_weights_b = fileh.read() pkuseg_processors_data = ( _get_pkuseg_trie_data(self.pkuseg_seg.preprocesser.trie), self.pkuseg_seg.postprocesser.do_process, sorted(list(self.pkuseg_seg.postprocesser.common_words)), sorted(list(self.pkuseg_seg.postprocesser.other_words)), ) serializers = { "cfg": lambda: srsly.json_dumps(self._get_config()), "pkuseg_features": lambda: pkuseg_features_b, "pkuseg_weights": lambda: pkuseg_weights_b, "pkuseg_processors": lambda: srsly.msgpack_dumps(pkuseg_processors_data), } return util.to_bytes(serializers, []) def from_bytes(self, data, **kwargs): pkuseg_data = {"features_b": b"", "weights_b": b"", "processors_data": None} def deserialize_pkuseg_features(b): pkuseg_data["features_b"] = b def deserialize_pkuseg_weights(b): pkuseg_data["weights_b"] = b def deserialize_pkuseg_processors(b): pkuseg_data["processors_data"] = srsly.msgpack_loads(b) deserializers = { "cfg": lambda b: self._set_config(srsly.json_loads(b)), "pkuseg_features": deserialize_pkuseg_features, "pkuseg_weights": deserialize_pkuseg_weights, "pkuseg_processors": deserialize_pkuseg_processors, } util.from_bytes(data, deserializers, []) if pkuseg_data["features_b"] and pkuseg_data["weights_b"]: with tempfile.TemporaryDirectory() as tempdir: tempdir = Path(tempdir) with open(tempdir / "features.msgpack", "wb") as fileh: fileh.write(pkuseg_data["features_b"]) with open(tempdir / "weights.npz", "wb") as fileh: fileh.write(pkuseg_data["weights_b"]) try: import spacy_pkuseg except ImportError: raise ImportError( "spacy-pkuseg not installed. To use this model, " + _PKUSEG_INSTALL_MSG ) from None self.pkuseg_seg = spacy_pkuseg.pkuseg(str(tempdir)) if pkuseg_data["processors_data"]: processors_data = pkuseg_data["processors_data"] (user_dict, do_process, common_words, other_words) = processors_data self.pkuseg_seg.preprocesser = spacy_pkuseg.Preprocesser(user_dict) self.pkuseg_seg.postprocesser.do_process = do_process self.pkuseg_seg.postprocesser.common_words = set(common_words) self.pkuseg_seg.postprocesser.other_words = set(other_words) return self def to_disk(self, path, **kwargs): path = util.ensure_path(path) def save_pkuseg_model(path): if self.pkuseg_seg: if not path.exists(): path.mkdir(parents=True) self.pkuseg_seg.model.save(path) self.pkuseg_seg.feature_extractor.save(path) def save_pkuseg_processors(path): if self.pkuseg_seg: data = ( _get_pkuseg_trie_data(self.pkuseg_seg.preprocesser.trie), self.pkuseg_seg.postprocesser.do_process, sorted(list(self.pkuseg_seg.postprocesser.common_words)), sorted(list(self.pkuseg_seg.postprocesser.other_words)), ) srsly.write_msgpack(path, data) serializers = { "cfg": lambda p: srsly.write_json(p, self._get_config()), "pkuseg_model": lambda p: save_pkuseg_model(p), "pkuseg_processors": lambda p: save_pkuseg_processors(p), } return util.to_disk(path, serializers, []) def from_disk(self, path, **kwargs): path = util.ensure_path(path) def load_pkuseg_model(path): try: import spacy_pkuseg except ImportError: if self.segmenter == Segmenter.pkuseg: raise ImportError( "spacy-pkuseg not installed. To use this model, " + _PKUSEG_INSTALL_MSG ) from None if path.exists(): self.pkuseg_seg = spacy_pkuseg.pkuseg(path) def load_pkuseg_processors(path): try: import spacy_pkuseg except ImportError: if self.segmenter == Segmenter.pkuseg: raise ImportError(self._pkuseg_install_msg) from None if self.segmenter == Segmenter.pkuseg: data = srsly.read_msgpack(path) (user_dict, do_process, common_words, other_words) = data self.pkuseg_seg.preprocesser = spacy_pkuseg.Preprocesser(user_dict) self.pkuseg_seg.postprocesser.do_process = do_process self.pkuseg_seg.postprocesser.common_words = set(common_words) self.pkuseg_seg.postprocesser.other_words = set(other_words) serializers = { "cfg": lambda p: self._set_config(srsly.read_json(p)), "pkuseg_model": lambda p: load_pkuseg_model(p), "pkuseg_processors": lambda p: load_pkuseg_processors(p), } util.from_disk(path, serializers, [])
ChineseTokenizer
python
spyder-ide__spyder
spyder/plugins/workingdirectory/container.py
{ "start": 1074, "end": 1223 }
class ____: Previous = 'previous_action' Next = "next_action" Browse = "browse_action" Parent = "parent_action"
WorkingDirectoryActions
python
sympy__sympy
sympy/geometry/line.py
{ "start": 75378, "end": 79437 }
class ____(LinearEntity3D, Ray): """ A Ray is a semi-line in the space with a source point and a direction. Parameters ========== p1 : Point3D The source of the Ray p2 : Point or a direction vector direction_ratio: Determines the direction in which the Ray propagates. Attributes ========== source xdirection ydirection zdirection See Also ======== sympy.geometry.point.Point3D, Line3D Examples ======== >>> from sympy import Point3D, Ray3D >>> r = Ray3D(Point3D(2, 3, 4), Point3D(3, 5, 0)) >>> r Ray3D(Point3D(2, 3, 4), Point3D(3, 5, 0)) >>> r.points (Point3D(2, 3, 4), Point3D(3, 5, 0)) >>> r.source Point3D(2, 3, 4) >>> r.xdirection oo >>> r.ydirection oo >>> r.direction_ratio [1, 2, -4] """ def __new__(cls, p1, pt=None, direction_ratio=(), **kwargs): if isinstance(p1, LinearEntity3D): if pt is not None: raise ValueError('If p1 is a LinearEntity, pt must be None') p1, pt = p1.args else: p1 = Point(p1, dim=3) if pt is not None and len(direction_ratio) == 0: pt = Point(pt, dim=3) elif len(direction_ratio) == 3 and pt is None: pt = Point3D(p1.x + direction_ratio[0], p1.y + direction_ratio[1], p1.z + direction_ratio[2]) else: raise ValueError(filldedent(''' A 2nd Point or keyword "direction_ratio" must be used. ''')) return LinearEntity3D.__new__(cls, p1, pt, **kwargs) @property def xdirection(self): """The x direction of the ray. Positive infinity if the ray points in the positive x direction, negative infinity if the ray points in the negative x direction, or 0 if the ray is vertical. See Also ======== ydirection Examples ======== >>> from sympy import Point3D, Ray3D >>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(0, -1, 0) >>> r1, r2 = Ray3D(p1, p2), Ray3D(p1, p3) >>> r1.xdirection oo >>> r2.xdirection 0 """ if self.p1.x < self.p2.x: return S.Infinity elif self.p1.x == self.p2.x: return S.Zero else: return S.NegativeInfinity @property def ydirection(self): """The y direction of the ray. Positive infinity if the ray points in the positive y direction, negative infinity if the ray points in the negative y direction, or 0 if the ray is horizontal. See Also ======== xdirection Examples ======== >>> from sympy import Point3D, Ray3D >>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(-1, -1, -1), Point3D(-1, 0, 0) >>> r1, r2 = Ray3D(p1, p2), Ray3D(p1, p3) >>> r1.ydirection -oo >>> r2.ydirection 0 """ if self.p1.y < self.p2.y: return S.Infinity elif self.p1.y == self.p2.y: return S.Zero else: return S.NegativeInfinity @property def zdirection(self): """The z direction of the ray. Positive infinity if the ray points in the positive z direction, negative infinity if the ray points in the negative z direction, or 0 if the ray is horizontal. See Also ======== xdirection Examples ======== >>> from sympy import Point3D, Ray3D >>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(-1, -1, -1), Point3D(-1, 0, 0) >>> r1, r2 = Ray3D(p1, p2), Ray3D(p1, p3) >>> r1.ydirection -oo >>> r2.ydirection 0 >>> r2.zdirection 0 """ if self.p1.z < self.p2.z: return S.Infinity elif self.p1.z == self.p2.z: return S.Zero else: return S.NegativeInfinity
Ray3D
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-milvus/unit_tests/destination_test.py
{ "start": 319, "end": 3831 }
class ____(unittest.TestCase): def setUp(self): self.config = { "processing": {"text_fields": ["str_col"], "metadata_fields": [], "chunk_size": 1000}, "embedding": {"mode": "openai", "openai_key": "mykey"}, "indexing": { "host": "https://notmilvus.com", "collection": "test2", "auth": { "mode": "token", "token": "mytoken", }, "vector_field": "vector", "text_field": "text", }, } self.config_model = ConfigModel.parse_obj(self.config) self.logger = logging.getLogger("airbyte") @patch("destination_milvus.destination.MilvusIndexer") @patch("destination_milvus.destination.create_from_config") def test_check(self, MockedEmbedder, MockedMilvusIndexer): mock_embedder = Mock() mock_indexer = Mock() MockedEmbedder.return_value = mock_embedder MockedMilvusIndexer.return_value = mock_indexer mock_embedder.check.return_value = None mock_indexer.check.return_value = None destination = DestinationMilvus() result = destination.check(self.logger, self.config) self.assertEqual(result.status, Status.SUCCEEDED) mock_embedder.check.assert_called_once() mock_indexer.check.assert_called_once() @patch("destination_milvus.destination.MilvusIndexer") @patch("destination_milvus.destination.create_from_config") def test_check_with_errors(self, MockedEmbedder, MockedMilvusIndexer): mock_embedder = Mock() mock_indexer = Mock() MockedEmbedder.return_value = mock_embedder MockedMilvusIndexer.return_value = mock_indexer embedder_error_message = "Embedder Error" indexer_error_message = "Indexer Error" mock_embedder.check.return_value = embedder_error_message mock_indexer.check.return_value = indexer_error_message destination = DestinationMilvus() result = destination.check(self.logger, self.config) self.assertEqual(result.status, Status.FAILED) self.assertEqual(result.message, f"{embedder_error_message}\n{indexer_error_message}") mock_embedder.check.assert_called_once() mock_indexer.check.assert_called_once() @patch("destination_milvus.destination.Writer") @patch("destination_milvus.destination.MilvusIndexer") @patch("destination_milvus.destination.create_from_config") def test_write(self, MockedEmbedder, MockedMilvusIndexer, MockedWriter): mock_embedder = Mock() mock_indexer = Mock() mock_writer = Mock() MockedEmbedder.return_value = mock_embedder MockedMilvusIndexer.return_value = mock_indexer MockedWriter.return_value = mock_writer mock_writer.write.return_value = [] configured_catalog = MagicMock() input_messages = [] destination = DestinationMilvus() list(destination.write(self.config, configured_catalog, input_messages)) MockedWriter.assert_called_once_with(self.config_model.processing, mock_indexer, mock_embedder, batch_size=128, omit_raw_text=False) mock_writer.write.assert_called_once_with(configured_catalog, input_messages) def test_spec(self): destination = DestinationMilvus() result = destination.spec() self.assertIsInstance(result, ConnectorSpecification)
TestDestinationMilvus
python
pytorch__pytorch
torch/library.py
{ "start": 1776, "end": 67668 }
class ____: """ A class to create libraries that can be used to register new operators or override operators in existing libraries from Python. A user can optionally pass in a dispatch keyname if they only want to register kernels corresponding to only one specific dispatch key. To create a library to override operators in an existing library (with name ns), set the kind to "IMPL". To create a new library (with name ns) to register new operators, set the kind to "DEF". To create a fragment of a possibly existing library to register operators (and bypass the limitation that there is only one library for a given namespace), set the kind to "FRAGMENT". Args: ns: library name kind: "DEF", "IMPL", "FRAGMENT" dispatch_key: PyTorch dispatch key (default: "") """ def __init__(self, ns, kind, dispatch_key=""): from torch.fx.operator_schemas import _SCHEMA_TO_SIGNATURE_CACHE if kind not in ("IMPL", "DEF", "FRAGMENT"): raise ValueError("Unsupported kind: ", kind) if ns in _reserved_namespaces and (kind == "DEF" or kind == "FRAGMENT"): raise ValueError( ns, " is a reserved namespace. Please try creating a library with another name.", ) frame = traceback.extract_stack(limit=2)[0] filename, lineno = frame.filename, frame.lineno self.m: Optional[Any] = torch._C._dispatch_library( kind, ns, dispatch_key, filename, lineno ) self.ns = ns self._op_defs: set[str] = set() self._op_impls: set[str] = set() self._registration_handles: list[torch._library.utils.RegistrationHandle] = [] self.kind = kind self.dispatch_key = dispatch_key # Use a finalizer to setup the "destructor" instead of __del__. # Python __del__ can lead to weird things (globals and locals may already # be gone when __del__ actually gets called!). finalizers help the # situation because it lets us capture references and keeps them alive weakref.finalize( self, _del_library, _impls, self._op_impls, _defs, self._op_defs, self._registration_handles, self.m, _SCHEMA_TO_SIGNATURE_CACHE, ) def __repr__(self): return f"Library(kind={self.kind}, ns={self.ns}, dispatch_key={self.dispatch_key})>" def define(self, schema, alias_analysis="", *, tags=()): r"""Defines a new operator and its semantics in the ns namespace. Args: schema: function schema to define a new operator. alias_analysis (optional): Indicates if the aliasing properties of the operator arguments can be inferred from the schema (default behavior) or not ("CONSERVATIVE"). tags (Tag | Sequence[Tag]): one or more torch.Tag to apply to this operator. Tagging an operator changes the operator's behavior under various PyTorch subsystems; please read the docs for the torch.Tag carefully before applying it. Returns: name of the operator as inferred from the schema. Example:: >>> my_lib = Library("mylib", "DEF") >>> my_lib.define("sum(Tensor self) -> Tensor") """ # This is added because we also want to disallow PURE_FUNCTION alias analysis which is a valid # AliasAnalysis type in C++ if alias_analysis not in ["", "FROM_SCHEMA", "CONSERVATIVE"]: raise RuntimeError(f"Invalid alias_analysis type {alias_analysis}") assert self.m is not None if isinstance(tags, torch.Tag): tags = (tags,) name = schema.split("(")[0] packet_name = name.split(".")[0] if "." in name else name has_preexisting_packet = hasattr(torch.ops, self.ns) and hasattr( getattr(torch.ops, self.ns), packet_name ) result = self.m.define(schema, alias_analysis, tuple(tags)) name = schema.split("(")[0] qualname = self.ns + "::" + name # If the OpOverloadPacket exists already, then this means we're adding a # new OpOverload for it. Refresh the packet to include the new OpOverload. if has_preexisting_packet: ns = getattr(torch.ops, self.ns) packet = getattr(ns, packet_name) torch._ops._refresh_packet(packet) self._op_defs.add(qualname) _defs.add(qualname) return result def _register_fake(self, op_name, fn, _stacklevel=1, *, allow_override=False): r"""Registers the fake impl for an operator defined in the library.""" source = torch._library.utils.get_source(_stacklevel + 1) frame = sys._getframe(_stacklevel) caller_module = inspect.getmodule(frame) # Can be none if you call register_fake from somewhere there isn't a module # (e.g. __main__) caller_module_name = None if caller_module is None else caller_module.__name__ # TODO(rzou): We're gonna need to stage this change with torchvision, # since torchvision is github first. if caller_module_name is not None and caller_module_name.startswith( "torchvision." ): caller_module_name = None qualname = f"{self.ns}::{op_name}" entry = torch._library.simple_registry.singleton.find(qualname) if caller_module_name is not None: func_to_register = _check_pystubs_once(fn, qualname, caller_module_name) else: func_to_register = fn handle = entry.fake_impl.register( func_to_register, source, lib=self, allow_override=allow_override ) self._registration_handles.append(handle) def _register_torch_dispatch_rule(self, op_name, torch_dispatch_class, fn): r"""Registers a torch_dispatch rule for the given operator and torch_dispatch_class. This allows for open registration to specify the behavior between the operator and the torch_dispatch_class without needing to modify the torch_dispatch_class or the operator directly. The torch_dispatch_class is either a Tensor subclass with `__torch_dispatch__` or a TorchDispatchMode. If it is a Tensor subclass, we expect fn to have the following signature: (cls, func: OpOverload, types: Tuple[type, ...], args, kwargs) -> Any If it is a TorchDispatchMode, we expect fn to have the following signature: (mode, func: OpOverload, types: Tuple[type, ...], args, kwargs) -> Any """ qualname = f"{self.ns}::{op_name}" entry = torch._library.simple_registry.singleton.find(qualname) handle = entry.torch_dispatch_rules.register(torch_dispatch_class, fn) self._registration_handles.append(handle) def _impl_with_aoti_compile(self, op_name, dispatch_key=""): r"""Register the operator to use the AOTI-compiled implementation. Args: op_name: operator name (along with the overload) or OpOverload object. dispatch_key: dispatch key that the input function should be registered for. By default, it uses the dispatch key that the library was created with. Example:: >>> my_lib = Library("aten", "IMPL") >>> my_lib._impl_with_aoti_compile("div.Tensor", "CPU") """ if dispatch_key == "": dispatch_key = self.dispatch_key # pyrefly: ignore [bad-argument-type] assert torch.DispatchKeySet(dispatch_key).has(torch._C.DispatchKey.Dense) if isinstance(op_name, str): name = op_name elif isinstance(op_name, OpOverload): name = op_name._schema.name overload_name = op_name._schema.overload_name if overload_name != "": name = name + "." + overload_name else: raise RuntimeError( "_impl_with_aoti_compile should be passed either a name or an OpOverload object " "as the first argument" ) key = self.ns + "/" + name.split("::")[-1] + "/" + dispatch_key if key in _impls: # TODO: in future, add more info about where the existing function is registered (this info is # today already returned by the C++ warning when _impl_with_aoti_compile is called but we error out before that) raise RuntimeError( "This is not allowed since there's already a kernel registered from python overriding {}" "'s behavior for {} dispatch key and {} namespace.".format( name.split("::")[-1], dispatch_key, self.ns ) ) assert self.m is not None impl_fn: Callable = self.m.impl_with_aoti_compile impl_fn(self.ns, name.split("::")[-1], dispatch_key) _impls.add(key) self._op_impls.add(key) def impl( self, op_name, fn, dispatch_key="", *, with_keyset=False, allow_override=False ): r"""Registers the function implementation for an operator defined in the library. Args: op_name: operator name (along with the overload) or OpOverload object. fn: function that's the operator implementation for the input dispatch key or :func:`~fallthrough_kernel` to register a fallthrough. dispatch_key: dispatch key that the input function should be registered for. By default, it uses the dispatch key that the library was created with. with_keyset: flag controlling if the current dispatcher call keyset should be passed as the first argument to :attr:`fn` when calling. This should be used to create the appropriate keyset for redispatch calls. allow_override: Flag controlling if we want to override an existing registered kernel implementation. This is by default off, and will error you're trying to register a kernel to a dispatch key with a kernel already registered. Example:: >>> my_lib = Library("aten", "IMPL") >>> def div_cpu(self, other): >>> return self * (1 / other) >>> my_lib.impl("div.Tensor", div_cpu, "CPU") """ if not callable(fn): raise TypeError( f"Input function is required to be a callable but found type {type(fn)}" ) if dispatch_key == "": dispatch_key = self.dispatch_key if isinstance(op_name, str): name = op_name elif isinstance(op_name, OpOverload): name = op_name._schema.name overload_name = op_name._schema.overload_name if overload_name != "": name = name + "." + overload_name else: raise RuntimeError( "impl should be passed either a name or an OpOverload object as the first argument" ) key = self.ns + "/" + name.split("::")[-1] + "/" + dispatch_key if (not allow_override) and key in _impls: # TODO: in future, add more info about where the existing function is registered (this info is # today already returned by the C++ warning when impl is called but we error out before that) raise RuntimeError( "This is not allowed since there's already a kernel registered from python overriding {}" "'s behavior for {} dispatch key and {} namespace.".format( name.split("::")[-1], dispatch_key, self.ns ) ) if dispatch_key == "Meta": dispatcher_op_name = name if "::" not in dispatcher_op_name: dispatcher_op_name = f"{self.ns}::{dispatcher_op_name}" # Internally, we shouldn't be registering meta kernels for any operators that # have CompositeImplicitAutograd kernels. # Instead, we should be letting those decompositions run, and writing meta kernels # only for the base operators. if torch._C._dispatch_has_kernel_for_dispatch_key( dispatcher_op_name, "CompositeImplicitAutograd" ): raise RuntimeError( f"We should not register a meta kernel directly to the operator '{name}'," " because it has a CompositeImplicitAutograd kernel in core." " Instead we should let the operator decompose, and ensure that we have meta kernels" " for the base ops that it decomposes into." ) assert self.m is not None self.m.impl( name, dispatch_key if dispatch_key != "" else "CompositeImplicitAutograd", fn, with_keyset, ) _impls.add(key) self._op_impls.add(key) def fallback(self, fn, dispatch_key="", *, with_keyset=False): r"""Registers the function implementation as the fallback for the given key. This function only works for a library with global namespace ("_"). Args: fn: function used as fallback for the given dispatch key or :func:`~fallthrough_kernel` to register a fallthrough. dispatch_key: dispatch key that the input function should be registered for. By default, it uses the dispatch key that the library was created with. with_keyset: flag controlling if the current dispatcher call keyset should be passed as the first argument to :attr:`fn` when calling. This should be used to create the appropriate keyset for redispatch calls. Example:: >>> my_lib = Library("_", "IMPL") >>> def fallback_kernel(op, *args, **kwargs): >>> # Handle all autocast ops generically >>> # ... >>> my_lib.fallback(fallback_kernel, "Autocast") """ if dispatch_key == "": dispatch_key = self.dispatch_key if self.ns != "_": raise RuntimeError( f"""Fallback can only be registered using library fragment on the global namespace "_" but it is {self.ns}""" ) assert dispatch_key != "" assert self.m is not None self.m.fallback(dispatch_key, fn, with_keyset) def _register_effectful_op(self, op_name: str, effect: Optional[EffectType]): """ Registers an effect to an operator. This is used to register an op that has side effects that is not capturable by the schema. Args: op_name: operator name (along with the overload) or OpOverload object. effect: The effect of the op. """ from torch._higher_order_ops.effects import ( _register_effectful_op as hoo_register_effect, ) handle = hoo_register_effect(op_name, effect) self._registration_handles.append(handle) def _destroy(self): if self.m is not None: self.m.reset() self.m = None for handle in self._registration_handles: handle.destroy() self._registration_handles.clear() global _impls _impls -= self._op_impls for name in self._op_defs: # Delete the cached torch.ops.ns.foo if it was registered. # Otherwise, accessing it leads to a segfault. # It's possible that we only registered an overload in this Library # and another library owns an alive overload. # That's OK - the next time torch.ops.ns.foo gets called, it'll be # recomputed to point at the right collection of overloads. ns, name_with_overload = name.split("::") name = name_with_overload.split(".")[0] if not hasattr(torch.ops, ns): continue namespace = getattr(torch.ops, ns) if not hasattr(namespace, name): continue delattr(namespace, name) namespace._dir.remove(name) def _del_library( captured_impls, op_impls, captured_defs, op_defs, registration_handles, m, schema_to_signature_cache, ): for op_def in op_defs: name = op_def overload_name = "" if "." in op_def: name, overload_name = op_def.split(".") if ( name, overload_name, ) in schema_to_signature_cache: del schema_to_signature_cache[(name, overload_name)] captured_impls -= op_impls captured_defs -= op_defs for handle in registration_handles: handle.destroy() if m is not None: m.reset() @contextlib.contextmanager def _scoped_library(*args, **kwargs): try: lib = Library(*args, **kwargs) yield lib finally: lib._destroy() _keep_alive: list[Library] = [] NAMELESS_SCHEMA = re.compile(r"\(.*\) -> .*") @functools.singledispatch def define(qualname, schema, *, lib=None, tags=()): r"""Defines a new operator. In PyTorch, defining an op (short for "operator") is a two step-process: - we need to define the op (by providing an operator name and schema) - we need to implement behavior for how the operator interacts with various PyTorch subsystems, like CPU/CUDA Tensors, Autograd, etc. This entrypoint defines the custom operator (the first step) you must then perform the second step by calling various ``impl_*`` APIs, like :func:`torch.library.impl` or :func:`torch.library.register_fake`. Args: qualname (str): The qualified name for the operator. Should be a string that looks like "namespace::name", e.g. "aten::sin". Operators in PyTorch need a namespace to avoid name collisions; a given operator may only be created once. If you are writing a Python library, we recommend the namespace to be the name of your top-level module. schema (str): The schema of the operator. E.g. "(Tensor x) -> Tensor" for an op that accepts one Tensor and returns one Tensor. It does not contain the operator name (that is passed in ``qualname``). lib (Optional[Library]): If provided, the lifetime of this operator will be tied to the lifetime of the Library object. tags (Tag | Sequence[Tag]): one or more torch.Tag to apply to this operator. Tagging an operator changes the operator's behavior under various PyTorch subsystems; please read the docs for the torch.Tag carefully before applying it. Example:: >>> import torch >>> import numpy as np >>> >>> # Define the operator >>> torch.library.define("mylib::sin", "(Tensor x) -> Tensor") >>> >>> # Add implementations for the operator >>> @torch.library.impl("mylib::sin", "cpu") >>> def f(x): >>> return torch.from_numpy(np.sin(x.numpy())) >>> >>> # Call the new operator from torch.ops. >>> x = torch.randn(3) >>> y = torch.ops.mylib.sin(x) >>> assert torch.allclose(y, x.sin()) """ if not isinstance(qualname, str): raise ValueError( f"define(qualname, schema): expected qualname " f"to be instance of str, got {type(qualname)}" ) namespace, name = torch._library.utils.parse_namespace(qualname) if lib is None: lib = Library(namespace, "FRAGMENT") _keep_alive.append(lib) if not NAMELESS_SCHEMA.fullmatch(schema): raise ValueError( f"define(qualname, schema, ...): expected schema " f'to look like e.g. "(Tensor x) -> Tensor" but ' f'got "{schema}"' ) lib.define(name + schema, alias_analysis="", tags=tags) @define.register def _(lib: Library, schema, alias_analysis=""): """The old torch.library.define. We're keeping this around for BC reasons """ def wrap(f): name = lib.define(schema, alias_analysis) lib.impl(name, f) return f return wrap @overload def impl( qualname: str, types: Union[str, Sequence[str]], func: None = None, *, lib: Optional[Library] = None, ) -> Callable[[Callable[..., object]], None]: ... @overload def impl( qualname: str, types: Union[str, Sequence[str]], func: Callable[..., object], *, lib: Optional[Library] = None, ) -> None: ... # Deprecated BC API @overload def impl( lib: Library, name: str, dispatch_key: str = "", ) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]: ... @functools.singledispatch def impl( qualname: str, types: Union[str, Sequence[str]], func: Optional[Callable[_P, _T]] = None, *, lib: Optional[Library] = None, ) -> object: """Register an implementation for a device type for this operator. You may pass "default" for ``types`` to register this implementation as the default implementation for ALL device types. Please only use this if the implementation truly supports all device types; for example, this is true if it is a composition of built-in PyTorch operators. This API may be used as a decorator. You can use nested decorators with this API provided they return a function and are placed inside this API (see Example 2). Some valid types are: "cpu", "cuda", "xla", "mps", "ipu", "xpu". Args: qualname (str): Should be a string that looks like "namespace::operator_name". types (str | Sequence[str]): The device types to register an impl to. lib (Optional[Library]): If provided, the lifetime of this registration will be tied to the lifetime of the Library object. Examples: >>> import torch >>> import numpy as np >>> # Example 1: Register function. >>> # Define the operator >>> torch.library.define("mylib::mysin", "(Tensor x) -> Tensor") >>> >>> # Add implementations for the cpu device >>> @torch.library.impl("mylib::mysin", "cpu") >>> def f(x): >>> return torch.from_numpy(np.sin(x.numpy())) >>> >>> x = torch.randn(3) >>> y = torch.ops.mylib.mysin(x) >>> assert torch.allclose(y, x.sin()) >>> >>> # Example 2: Register function with decorator. >>> def custom_decorator(func): >>> def wrapper(*args, **kwargs): >>> return func(*args, **kwargs) + 1 >>> return wrapper >>> >>> # Define the operator >>> torch.library.define("mylib::sin_plus_one", "(Tensor x) -> Tensor") >>> >>> # Add implementations for the operator >>> @torch.library.impl("mylib::sin_plus_one", "cpu") >>> @custom_decorator >>> def f(x): >>> return torch.from_numpy(np.sin(x.numpy())) >>> >>> # Call the new operator from torch.ops. >>> x = torch.randn(3) >>> >>> y1 = torch.ops.mylib.sin_plus_one(x) >>> y2 = torch.sin(x) + 1 >>> assert torch.allclose(y1, y2) """ return _impl(qualname, types, func, lib=lib, disable_dynamo=False) if not TYPE_CHECKING: @impl.register def _( lib: Library, name: str, dispatch_key: str = "" ) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]: """Legacy torch.library.impl API. Kept around for BC""" def wrap(f: Callable[_P, _T]) -> Callable[_P, _T]: lib.impl(name, f, dispatch_key) return f return wrap @overload def _impl( qualname: str, types: Union[str, Sequence[str]], func: None = None, *, lib: Optional[Library] = None, disable_dynamo: bool = False, ) -> Callable[[Callable[..., object]], None]: ... @overload def _impl( qualname: str, types: Union[str, Sequence[str]], func: Callable[..., object], *, lib: Optional[Library] = None, disable_dynamo: bool = False, ) -> None: ... def _impl( qualname: str, types: Union[str, Sequence[str]], func: Optional[Callable[..., object]] = None, *, lib: Optional[Library] = None, disable_dynamo: bool = False, ) -> Optional[Callable[[Callable[..., object]], None]]: # See impl() if isinstance(types, str): types = (types,) keys = set({}) for typ in types: is_dispatch_key = torch._C._parse_dispatch_key(typ) if is_dispatch_key: # We also support passing a DispatchKey to impl. Please prefer using # the higher-level torch.library APIs and only pass DispatchKey to # torch.library.impl with caution (or even better, don't use this # option and file an issue on GitHub for what you need). # We don't advertise this to users because # it is very easy to shoot yourself in the foot. keys.add(typ) else: keys.add(_device_type_to_key(typ)) def register_(func: Callable[..., object]) -> None: namespace, _ = torch._library.utils.parse_namespace(qualname) if lib is None: use_lib = Library(namespace, "FRAGMENT") _keep_alive.append(use_lib) else: use_lib = lib if disable_dynamo: @torch._disable_dynamo def func_no_dynamo(*args, **kwargs): return func(*args, **kwargs) for key in keys: use_lib.impl(qualname, func_no_dynamo, key) else: for key in keys: use_lib.impl(qualname, func, key) if func is None: return register_ else: register_(func) return None def _device_type_to_key(device_type: str) -> str: if device_type == "default": # This is technically not correct, because although all device_type # DispatchKeys are included in CompositeExplicitAutograd, # not everything in CompositeExplicitAutograd is associated with a # device_type. I don't really care that much about the difference. return "CompositeExplicitAutograd" return torch._C._dispatch_key_for_device(device_type) @deprecated( "`torch.library.impl_abstract` was renamed to `torch.library.register_fake`. Please use that " "instead; we will remove `torch.library.impl_abstract` in a future version of PyTorch.", category=FutureWarning, ) def impl_abstract(qualname, func=None, *, lib=None, _stacklevel=1): r"""This API was renamed to :func:`torch.library.register_fake` in PyTorch 2.4. Please use that instead. """ if func is not None: _stacklevel = _stacklevel + 1 return register_fake(qualname, func, lib=lib, _stacklevel=_stacklevel) _op_identifier = Union[ str, "torch._ops.OpOverload", "torch._library.custom_ops.CustomOpDef" ] def register_kernel( op: _op_identifier, device_types: device_types_t, func: Optional[Callable] = None, /, *, lib: Optional[Library] = None, ): """Register an implementation for a device type for this operator. Some valid device_types are: "cpu", "cuda", "xla", "mps", "ipu", "xpu". This API may be used as a decorator. Args: op (str | OpOverload): The operator to register an impl to. device_types (None | str | Sequence[str]): The device_types to register an impl to. If None, we will register to all device types -- please only use this option if your implementation is truly device-type-agnostic. func (Callable): The function to register as the implementation for the given device types. lib (Optional[Library]): If provided, the lifetime of this registration Examples:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> import torch >>> from torch import Tensor >>> from torch.library import custom_op >>> import numpy as np >>> >>> # Create a custom op that works on cpu >>> @custom_op("mylib::numpy_sin", mutates_args=(), device_types="cpu") >>> def numpy_sin(x: Tensor) -> Tensor: >>> x_np = x.numpy() >>> y_np = np.sin(x_np) >>> return torch.from_numpy(y_np) >>> >>> # Add implementations for the cuda device >>> @torch.library.register_kernel("mylib::numpy_sin", "cuda") >>> def _(x): >>> x_np = x.cpu().numpy() >>> y_np = np.sin(x_np) >>> return torch.from_numpy(y_np).to(device=x.device) >>> >>> x_cpu = torch.randn(3) >>> x_cuda = x_cpu.cuda() >>> assert torch.allclose(numpy_sin(x_cpu), x_cpu.sin()) >>> assert torch.allclose(numpy_sin(x_cuda), x_cuda.sin()) """ if not isinstance( op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef) ): raise ValueError( f"register_kernel({op}): got unexpected type for op: {type(op)}" ) if isinstance(op, torch._ops.OpOverload): op = op._name opdef = _maybe_get_opdef(op) if opdef is not None: return opdef.register_kernel(device_types, func) assert isinstance(op, str) if device_types is None: device_types = "CompositeExplicitAutograd" return _impl(op, device_types, func, lib=lib, disable_dynamo=True) def register_autocast( op: _op_identifier, device_type: str, cast_inputs: _dtype, /, *, lib: Optional[Library] = None, ): r"""Register an autocast dispatch rule for this custom op. Valid `device_type` include: "cpu" and "cuda". Args: op (str | OpOverload): The operator to register an autocast dispatch rule to. device_type(str): Device type to use. 'cuda' or 'cpu'. The type is the same as the `type` attribute of a :class:`torch.device`. Thus, you may obtain the device type of a tensor using `Tensor.device.type`. cast_inputs (:class:`torch.dtype`): When custom op runs in an autocast-enabled region, casts incoming floating-point Tensors to the target dtype (non-floating-point Tensors are not affected), then executes custom op with autocast disabled. lib (Optional[Library]): If provided, the lifetime of this registration Examples:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> import torch >>> from torch import Tensor >>> from torch.library import custom_op >>> >>> # Create a custom op that works on cuda >>> @torch.library.custom_op("mylib::my_sin", mutates_args=()) >>> def my_sin(x: Tensor) -> Tensor: >>> return torch.sin(x) >>> >>> # Register autocast dispatch rule for the cuda device >>> torch.library.register_autocast("mylib::my_sin", "cuda", torch.float16) >>> >>> x = torch.randn(3, dtype=torch.float32, device="cuda") >>> with torch.autocast("cuda", dtype=torch.float16): >>> y = torch.ops.mylib.my_sin(x) >>> assert y.dtype == torch.float16 """ if not isinstance( op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef) ): raise ValueError( f"register_autocast({op}): got unexpected type for op: {type(op)}" ) if device_type not in ["cpu", "cuda"]: raise ValueError(f"Unknown device type: {device_type}") if isinstance(op, torch._ops.OpOverload): op = op._name opdef = _maybe_get_opdef(op) if opdef is not None: return opdef.register_autocast(device_type, cast_inputs) assert isinstance(op, str) qualname = op _op = torch._library.utils.lookup_op(qualname) namespace, opname = torch._library.utils.parse_namespace(qualname) if lib is None: lib = Library(namespace, "FRAGMENT") _keep_alive.append(lib) def _maybe_override_py_impl(op: torch._ops.OpOverload, dispatch_key): def inner(kernel): if op.has_kernel_for_dispatch_key(dispatch_key): op.py_kernels.pop(dispatch_key) return op.py_impl(dispatch_key)(kernel) return inner @_maybe_override_py_impl(_op, torch._C.DispatchKey.AutocastCPU) @_maybe_override_py_impl(_op, torch._C.DispatchKey.AutocastCUDA) def _autocast_py_impl(*args, **kwargs): assert len(kwargs) == 0, "Custom ops do not support kwargs yet." autocast_keyset = torch._C.DispatchKeySet( torch._C.DispatchKey.AutocastCPU ) | torch._C.DispatchKeySet(torch._C.DispatchKey.AutocastCUDA) with torch._C._ExcludeDispatchKeyGuard(autocast_keyset): return _op(*_cast(args, device_type, cast_inputs)) def kernel(_, *args, **kwargs): assert len(kwargs) == 0, "Custom ops do not support kwargs yet." return _autocast_py_impl(*args, **kwargs) if device_type == "cuda": return lib.impl(opname, kernel, "AutocastCUDA", with_keyset=True) else: # device_type is "cpu" return lib.impl(opname, kernel, "AutocastCPU", with_keyset=True) def register_fake( op: _op_identifier, func: Optional[Callable] = None, /, *, lib: Optional[Library] = None, _stacklevel: int = 1, allow_override: bool = False, ): r"""Register a FakeTensor implementation ("fake impl") for this operator. Also sometimes known as a "meta kernel", "abstract impl". An "FakeTensor implementation" specifies the behavior of this operator on Tensors that carry no data ("FakeTensor"). Given some input Tensors with certain properties (sizes/strides/storage_offset/device), it specifies what the properties of the output Tensors are. The FakeTensor implementation has the same signature as the operator. It is run for both FakeTensors and meta tensors. To write a FakeTensor implementation, assume that all Tensor inputs to the operator are regular CPU/CUDA/Meta tensors, but they do not have storage, and you are trying to return regular CPU/CUDA/Meta tensor(s) as output. The FakeTensor implementation must consist of only PyTorch operations (and may not directly access the storage or data of any input or intermediate Tensors). This API may be used as a decorator (see examples). For a detailed guide on custom ops, please see https://pytorch.org/tutorials/advanced/custom_ops_landing_page.html Args: op_name: Operator name (along with the overload) or OpOverload object. func: Fake tensor implementation. lib (Optional[Library]): Library to register the fake tensor to. allow_override: Flag controlling if we want to override an existing registered fake impl. This is by default off, and will error you're trying to register a fake impl to an operator that already has a fake impl. This also only applies if the custom operator was not created via torch.library.custom_op, as overriding and existing fake impl is already allowed. Examples: >>> import torch >>> import numpy as np >>> from torch import Tensor >>> >>> # Example 1: an operator without data-dependent output shape >>> @torch.library.custom_op("mylib::custom_linear", mutates_args=()) >>> def custom_linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor: >>> raise NotImplementedError("Implementation goes here") >>> >>> @torch.library.register_fake("mylib::custom_linear") >>> def _(x, weight, bias): >>> assert x.dim() == 2 >>> assert weight.dim() == 2 >>> assert bias.dim() == 1 >>> assert x.shape[1] == weight.shape[1] >>> assert weight.shape[0] == bias.shape[0] >>> assert x.device == weight.device >>> >>> return (x @ weight.t()) + bias >>> >>> with torch._subclasses.fake_tensor.FakeTensorMode(): >>> x = torch.randn(2, 3) >>> w = torch.randn(3, 3) >>> b = torch.randn(3) >>> y = torch.ops.mylib.custom_linear(x, w, b) >>> >>> assert y.shape == (2, 3) >>> >>> # Example 2: an operator with data-dependent output shape >>> @torch.library.custom_op("mylib::custom_nonzero", mutates_args=()) >>> def custom_nonzero(x: Tensor) -> Tensor: >>> x_np = x.numpy(force=True) >>> res = np.stack(np.nonzero(x_np), axis=1) >>> return torch.tensor(res, device=x.device) >>> >>> @torch.library.register_fake("mylib::custom_nonzero") >>> def _(x): >>> # Number of nonzero-elements is data-dependent. >>> # Since we cannot peek at the data in an fake impl, >>> # we use the ctx object to construct a new symint that >>> # represents the data-dependent size. >>> ctx = torch.library.get_ctx() >>> nnz = ctx.new_dynamic_size() >>> shape = [nnz, x.dim()] >>> result = x.new_empty(shape, dtype=torch.int64) >>> return result >>> >>> from torch.fx.experimental.proxy_tensor import make_fx >>> >>> x = torch.tensor([0, 1, 2, 3, 4, 0]) >>> trace = make_fx(torch.ops.mylib.custom_nonzero, tracing_mode="symbolic")(x) >>> trace.print_readable() >>> >>> assert torch.allclose(trace(x), torch.ops.mylib.custom_nonzero(x)) """ if not isinstance( op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef) ): raise ValueError(f"register_fake({op}): got unexpected type for op: {type(op)}") if isinstance(op, torch._ops.OpOverload): op = op._name opdef = _maybe_get_opdef(op) if opdef is not None: if func is None: return opdef.register_fake else: return opdef.register_fake(func) assert isinstance(op, str) stacklevel = _stacklevel def register(func): namespace, op_name = torch._library.utils.parse_namespace(op) if lib is None: use_lib = Library(namespace, "FRAGMENT") _keep_alive.append(use_lib) else: use_lib = lib use_lib._register_fake( op_name, func, _stacklevel=stacklevel + 1, allow_override=allow_override ) return func if func is None: return register else: stacklevel += 1 return register(func) def _register_effectful_op( op: _op_identifier, effect: Optional[EffectType], *, lib: Optional[Library] = None, ) -> None: r""" To specify that an operator has side-effects, we must register an effect type for the operator. This will prevent graph passes in torch.compile from reordering operations with the same effect type. Args: op_name: Operator name (along with the overload) or OpOverload object. effect: Effect type to register. None means the operator is not effectful. """ if not isinstance( op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef) ): raise ValueError( f"register_effectful_op({op}): got unexpected type for op: {type(op)}" ) if isinstance(op, torch._ops.OpOverload): op = op._name opdef = _maybe_get_opdef(op) if opdef is not None: opdef.register_effect(effect) assert isinstance(op, str) namespace, _ = torch._library.utils.parse_namespace(op) if lib is None: use_lib = Library(namespace, "FRAGMENT") _keep_alive.append(use_lib) else: use_lib = lib use_lib._register_effectful_op(op, effect) def register_autograd( op: _op_identifier, backward: Callable, /, *, setup_context: Optional[Callable] = None, lib=None, ) -> None: r"""Register a backward formula for this custom op. In order for an operator to work with autograd, you need to register a backward formula: 1. You must tell us how to compute gradients during the backward pass by providing us a "backward" function. 2. If you need any values from the forward to compute gradients, you can use `setup_context` to save values for backward. ``backward`` runs during the backward pass. It accepts ``(ctx, *grads)``: - ``grads`` is one or more gradients. The number of gradients matches the number of outputs of the operator. The ``ctx`` object is `the same ctx object <context_method_mixins>`_ used by :class:`torch.autograd.Function`. The semantics of ``backward_fn`` are the same as :meth:`torch.autograd.Function.backward`. ``setup_context(ctx, inputs, output)`` runs during the forward pass. Please save quantities needed for backward onto the ``ctx`` object via either :meth:`torch.autograd.function.FunctionCtx.save_for_backward` or assigning them as attributes of ``ctx``. If your custom op has kwarg-only arguments, we expect the signature of ``setup_context`` to be ``setup_context(ctx, inputs, keyword_only_inputs, output)``. Both ``setup_context_fn`` and ``backward_fn`` must be traceable. That is, they may not directly access :meth:`torch.Tensor.data_ptr` and they must not depend on or mutate global state. If you need a non-traceable backward, you can make it a separate custom_op that you call inside ``backward_fn``. If you need different autograd behavior on different devices, then we recommend creating two different custom operators, one for each device that needs different behavior, and switching between them at runtime. Examples: >>> import torch >>> import numpy as np >>> from torch import Tensor >>> >>> @torch.library.custom_op("mylib::numpy_sin", mutates_args=()) >>> def numpy_sin(x: Tensor) -> Tensor: >>> x_np = x.cpu().numpy() >>> y_np = np.sin(x_np) >>> return torch.from_numpy(y_np).to(device=x.device) >>> >>> def setup_context(ctx, inputs, output) -> Tensor: >>> x, = inputs >>> ctx.save_for_backward(x) >>> >>> def backward(ctx, grad): >>> x, = ctx.saved_tensors >>> return grad * x.cos() >>> >>> torch.library.register_autograd( ... "mylib::numpy_sin", backward, setup_context=setup_context ... ) >>> >>> x = torch.randn(3, requires_grad=True) >>> y = numpy_sin(x) >>> (grad_x,) = torch.autograd.grad(y, x, torch.ones_like(y)) >>> assert torch.allclose(grad_x, x.cos()) >>> >>> # Example with a keyword-only arg >>> @torch.library.custom_op("mylib::numpy_mul", mutates_args=()) >>> def numpy_mul(x: Tensor, *, val: float) -> Tensor: >>> x_np = x.cpu().numpy() >>> y_np = x_np * val >>> return torch.from_numpy(y_np).to(device=x.device) >>> >>> def setup_context(ctx, inputs, keyword_only_inputs, output) -> Tensor: >>> ctx.val = keyword_only_inputs["val"] >>> >>> def backward(ctx, grad): >>> return grad * ctx.val >>> >>> torch.library.register_autograd( ... "mylib::numpy_mul", backward, setup_context=setup_context ... ) >>> >>> x = torch.randn(3, requires_grad=True) >>> y = numpy_mul(x, val=3.14) >>> (grad_x,) = torch.autograd.grad(y, x, torch.ones_like(y)) >>> assert torch.allclose(grad_x, torch.full_like(x, 3.14)) """ if not isinstance( op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef) ): raise ValueError( f"register_autograd({op}): got unexpected type for op: {type(op)}" ) if isinstance(op, torch._ops.OpOverload): op = op._name opdef = _maybe_get_opdef(op) if opdef is not None: opdef.register_autograd(backward, setup_context=setup_context) return assert isinstance(op, str) qualname = op op = torch._library.utils.lookup_op(qualname) schema = op._schema if not _library.utils.is_functional_schema(schema): raise RuntimeError( f"Cannot register autograd formula for non-functional operator " f"{op} with schema {schema}. Please create " f"a functional operator and register an autograd formula for that." ) if _library.utils.has_kwarg_only_tensors(schema): raise NotImplementedError( f"register_autograd with kwarg-only Tensor args. In the original " f"definition of the op, please make your tensors not kwarg-only. " f"Got: {schema}" ) info = _library.autograd.Info(backward, setup_context) autograd_kernel = _library.autograd.make_autograd_impl(op, info) namespace, opname = torch._library.utils.parse_namespace(qualname) if lib is None: lib = Library(namespace, "FRAGMENT") _keep_alive.append(lib) lib.impl(opname, autograd_kernel, "Autograd", with_keyset=True) def register_torch_dispatch( op: _op_identifier, torch_dispatch_class: Any, func: Optional[Callable] = None, /, *, lib: Optional[Library] = None, ): r"""Registers a torch_dispatch rule for the given operator and ``torch_dispatch_class``. This allows for open registration to specify the behavior between the operator and the ``torch_dispatch_class`` without needing to modify the ``torch_dispatch_class`` or the operator directly. The ``torch_dispatch_class`` is either a Tensor subclass with ``__torch_dispatch__`` or a TorchDispatchMode. If it is a Tensor subclass, we expect ``func`` to have the following signature: ``(cls, func: OpOverload, types: Tuple[type, ...], args, kwargs) -> Any`` If it is a TorchDispatchMode, we expect ``func`` to have the following signature: ``(mode, func: OpOverload, types: Tuple[type, ...], args, kwargs) -> Any`` ``args`` and ``kwargs`` will have been normalized the same way they are in ``__torch_dispatch__`` (see :ref:`torch-dispatch-calling-convention`). Examples: >>> import torch >>> >>> @torch.library.custom_op("mylib::foo", mutates_args={}) >>> def foo(x: torch.Tensor) -> torch.Tensor: >>> return x.clone() >>> >>> class MyMode(torch.utils._python_dispatch.TorchDispatchMode): >>> def __torch_dispatch__(self, func, types, args=(), kwargs=None): >>> return func(*args, **kwargs) >>> >>> @torch.library.register_torch_dispatch("mylib::foo", MyMode) >>> def _(mode, func, types, args, kwargs): >>> x, = args >>> return x + 1 >>> >>> x = torch.randn(3) >>> y = foo(x) >>> assert torch.allclose(y, x) >>> >>> with MyMode(): >>> y = foo(x) >>> assert torch.allclose(y, x + 1) """ if not isinstance( op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef) ): raise ValueError( f"register_torch_dispatch({op}): got unexpected type for op: {type(op)}" ) if isinstance(op, torch._ops.OpOverload): op = op._name opdef = _maybe_get_opdef(op) if opdef is not None: return opdef.register_torch_dispatch(torch_dispatch_class, func) assert isinstance(op, str) def register(func): namespace, op_name = torch._library.utils.parse_namespace(op) if lib is None: use_lib = Library(namespace, "FRAGMENT") _keep_alive.append(use_lib) else: use_lib = lib use_lib._register_torch_dispatch_rule(op_name, torch_dispatch_class, func) return func if func is None: return register else: return register(func) def register_vmap( op: _op_identifier, func: Optional[Callable] = None, /, *, lib=None, ): r"""Register a vmap implementation to support :func:`torch.vmap` for this custom op. This API may be used as a decorator (see examples). In order for an operator to work with :func:`torch.vmap`, you may need to register a vmap implementation in the following signature: ``vmap_func(info, in_dims: Tuple[Optional[int]], *args, **kwargs)``, where ``*args`` and ``**kwargs`` are the arguments and kwargs for ``op``. We do not support kwarg-only Tensor args. It specifies how do we compute the batched version of ``op`` given inputs with an additional dimension (specified by ``in_dims``). For each arg in ``args``, ``in_dims`` has a corresponding ``Optional[int]``. It is ``None`` if the arg is not a Tensor or if the arg is not being vmapped over, otherwise, it is an integer specifying what dimension of the Tensor is being vmapped over. ``info`` is a collection of additional metadata that may be helpful: ``info.batch_size`` specifies the size of the dimension being vmapped over, while ``info.randomness`` is the ``randomness`` option that was passed to :func:`torch.vmap`. The return of the function ``func`` is a tuple of ``(output, out_dims)``. Similar to ``in_dims``, ``out_dims`` should be of the same structure as ``output`` and contain one ``out_dim`` per output that specifies if the output has the vmapped dimension and what index it is in. Examples: >>> import torch >>> import numpy as np >>> from torch import Tensor >>> from typing import Tuple >>> >>> def to_numpy(tensor): >>> return tensor.cpu().numpy() >>> >>> lib = torch.library.Library("mylib", "FRAGMENT") >>> @torch.library.custom_op("mylib::numpy_cube", mutates_args=()) >>> def numpy_cube(x: Tensor) -> Tuple[Tensor, Tensor]: >>> x_np = to_numpy(x) >>> dx = torch.tensor(3 * x_np ** 2, device=x.device) >>> return torch.tensor(x_np ** 3, device=x.device), dx >>> >>> def numpy_cube_vmap(info, in_dims, x): >>> result = numpy_cube(x) >>> return result, (in_dims[0], in_dims[0]) >>> >>> torch.library.register_vmap(numpy_cube, numpy_cube_vmap) >>> >>> x = torch.randn(3) >>> torch.vmap(numpy_cube)(x) >>> >>> @torch.library.custom_op("mylib::numpy_mul", mutates_args=()) >>> def numpy_mul(x: Tensor, y: Tensor) -> Tensor: >>> return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device) >>> >>> @torch.library.register_vmap("mylib::numpy_mul") >>> def numpy_mul_vmap(info, in_dims, x, y): >>> x_bdim, y_bdim = in_dims >>> x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1) >>> y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1) >>> result = x * y >>> result = result.movedim(-1, 0) >>> return result, 0 >>> >>> >>> x = torch.randn(3) >>> y = torch.randn(3) >>> torch.vmap(numpy_mul)(x, y) .. note:: The vmap function should aim to preserve the semantics of the entire custom operator. That is, ``grad(vmap(op))`` should be replaceable with a ``grad(map(op))``. If your custom operator has any custom behavior in the backward pass, please keep this in mind. """ if not isinstance( op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef) ): raise ValueError(f"register_vmap({op}): got unexpected type for op: {type(op)}") if isinstance(op, torch._ops.OpOverload): op = op._name opdef = _maybe_get_opdef(op) if opdef is not None: return opdef.register_vmap(func) assert isinstance(op, str) qualname = op op = torch._library.utils.lookup_op(qualname) schema = op._schema if _library.utils.has_kwarg_only_tensors(schema): raise NotImplementedError( f"register_vmap with kwarg-only Tensor args. In the original " f"definition of the op, please make your tensors not kwarg-only. " f"Got: {schema}" ) def register(func): nonlocal op, lib namespace, opname = torch._library.utils.parse_namespace(qualname) if lib is None: lib = Library(namespace, "FRAGMENT") _keep_alive.append(lib) from torch._functorch.autograd_function import custom_function_call_vmap_helper from torch._functorch.pyfunctorch import retrieve_current_functorch_interpreter def wrapped_func(keyset, *args, **kwargs): interpreter = retrieve_current_functorch_interpreter() return custom_function_call_vmap_helper( interpreter, func, op, *args, **kwargs ) lib.impl(opname, wrapped_func, "FuncTorchBatched", with_keyset=True) if func is None: return register else: return register(func) # If the op was defined in C++, then we want to make sure there was an # m.set_python_module(module, ...) call and that the module is the # same as the module that called torch.library.register_fake. def _check_pystubs_once(func, qualname, actual_module_name): checked = False def inner(*args, **kwargs): nonlocal checked if checked: return func(*args, **kwargs) op = torch._library.utils.lookup_op(qualname) if op._defined_in_python: checked = True return func(*args, **kwargs) maybe_pystub = torch._C._dispatch_pystub( op._schema.name, op._schema.overload_name ) if maybe_pystub is None: if torch._library.utils.requires_set_python_module(): namespace = op.namespace cpp_filename = op._handle.debug() raise RuntimeError( f"Operator '{qualname}' was defined in C++ and has a Python " f"fake impl. In this situation, we require there to also be a " f'companion C++ `m.set_python_module("{actual_module_name}")` ' f"call, but we could not find one. Please add that to " f"to the top of the C++ TORCH_LIBRARY({namespace}, ...) block the " f"operator was registered in ({cpp_filename})" ) else: pystub_module = maybe_pystub[0] if actual_module_name != pystub_module: cpp_filename = op._handle.debug() raise RuntimeError( f"Operator '{qualname}' specified that its python fake impl " f"is in the Python module '{pystub_module}' but it was actually found " f"in '{actual_module_name}'. Please either move the fake impl " f"or correct the m.set_python_module call ({cpp_filename})" ) checked = True return func(*args, **kwargs) return inner # NOTE [ctx inside the fake implementation] # If a user has an operator with data-dependent output shape, then when writing # a fake implementation they must query the current ctx and use methods on the # ctx to construct a new unbacked symint. # # This is done via us setting the global_ctx_getter function every time a fake # implementation is invoked. def get_ctx() -> "torch._library.fake_impl.FakeImplCtx": """get_ctx() returns the current AbstractImplCtx object. Calling ``get_ctx()`` is only valid inside of an fake impl (see :func:`torch.library.register_fake` for more usage details. """ return torch._library.fake_impl.global_ctx_getter() def get_kernel( op: _op_identifier, dispatch_key: Union[str, torch.DispatchKey] ) -> torch._C._SafeKernelFunction: """Returns the computed kernel for a given operator and dispatch key. This function retrieves the kernel that would be executed for a given operator and dispatch key combination. The returned SafeKernelFunction can be used to call the kernel in a boxed fashion. The intended use case for this function is to retrieve the original kernel for a given dispatch key and then register another kernel to the same dispatch key that calls into the original kernel for certain cases. Args: op: Operator name (along with the overload) or OpOverload object Can be a string (e.g., "aten::add.Tensor"), an OpOverload, or a CustomOpDef. dispatch_key (str | torch.DispatchKey): The dispatch key to get the kernel for. Can be a string (e.g., "CPU", "CUDA") or a DispatchKey enum value. Returns: torch._C._SafeKernelFunction: A safe kernel function that can be used to call the kernel. Raises: RuntimeError: If the operator does not exist. Example: >>> # Get the CPU kernel for torch.add >>> kernel = torch.library.get_kernel("aten::add.Tensor", "CPU") >>> >>> # You can also use DispatchKey enum >>> kernel = torch.library.get_kernel("aten::add.Tensor", torch.DispatchKey.CPU) >>> >>> # Or use an OpOverload directly >>> kernel = torch.library.get_kernel(torch.ops.aten.add.Tensor, "CPU") >>> >>> # Example: Using get_kernel in a custom op with conditional dispatch >>> # Get the original kernel for torch.sin >>> original_sin_kernel = torch.library.get_kernel("aten::sin", "CPU") >>> >>> # If input has negative values, use original sin, otherwise return zeros >>> def conditional_sin_impl(dispatch_keys, x): >>> if (x < 0).any(): >>> return original_sin_kernel.call_boxed(dispatch_keys, x) >>> else: >>> return torch.zeros_like(x) >>> >>> lib = torch.library.Library("aten", "IMPL") >>> # with_keyset=True so the first argument to the impl is the current DispatchKeySet >>> which needs to be the first argument to ``kernel.call_boxed`` >>> lib.impl("sin", conditional_sin_impl, "CPU", with_keyset=True) >>> >>> # Test the conditional behavior >>> x_positive = torch.tensor([1.0, 2.0]) >>> x_mixed = torch.tensor([-1.0, 2.0]) >>> torch.sin(x_positive) tensor([0., 0.]) >>> torch.sin(x_mixed) tensor([-0.8415, 0.9093]) """ if not isinstance(op, (str, torch._ops.OpOverload)): raise ValueError(f"get_kernel({op}): got unexpected type for op: {type(op)}") if isinstance(op, torch._ops.OpOverload): op = op._name if isinstance(dispatch_key, str): try: dispatch_key = torch._C.DispatchKey.__members__[dispatch_key] except KeyError: raise ValueError(f"Invalid dispatch key: {dispatch_key}") from None return torch._C._dispatch_get_computed_kernel_for_dispatch_key(op, dispatch_key) _OPCHECK_DEFAULT_UTILS = ( "test_schema", "test_autograd_registration", "test_faketensor", "test_aot_dispatch_dynamic", ) def opcheck( op: Union[torch._ops.OpOverload, torch._ops.OpOverloadPacket, CustomOpDef], args: tuple[Any, ...], kwargs: Optional[dict[str, Any]] = None, *, test_utils: Union[str, Sequence[str]] = _OPCHECK_DEFAULT_UTILS, raise_exception: bool = True, atol=None, rtol=None, ) -> dict[str, str]: """Given an operator and some sample arguments, tests if the operator is registered correctly. That is, when you use the torch.library/TORCH_LIBRARY APIs to create a custom op, you specified metadata (e.g. mutability info) about the custom op and these APIs require that the functions you pass them satisfy certain properties (e.g. no data pointer access in the fake/meta/abstract kernel) ``opcheck`` tests these metadata and properties. Concretely, we test the following: - test_schema: If the schema matches the implementation of the operator. For example: if the schema specifies a Tensor is mutated, then we check the implementation mutates the Tensor. If the schema specifies that we return a new Tensor, then we check that the implementation returns a new Tensor (instead of an existing one or a view of an existing one). - test_autograd_registration: If the operator supports training (autograd): we check that its autograd formula is registered via torch.library.register_autograd or a manual registration to one or more DispatchKey::Autograd keys. Any other DispatchKey-based registrations may lead to undefined behavior. - test_faketensor: If the operator has a FakeTensor kernel (and if it is correct). The FakeTensor kernel is necessary ( but not sufficient) for the operator to work with PyTorch compilation APIs (torch.compile/export/FX). We check that a FakeTensor kernel (also sometimes known as a meta kernel) was registered for the operator and that it is correct. This test takes the result of running the operator on real tensors and the result of running the operator on FakeTensors and checks that they have the same Tensor metadata (sizes/strides/dtype/device/etc). - test_aot_dispatch_dynamic: If the operator has correct behavior with PyTorch compilation APIs (torch.compile/export/FX). This checks that the outputs (and gradients, if applicable) are the same under eager-mode PyTorch and torch.compile. This test is a superset of ``test_faketensor`` and is an e2e test; other things it tests are that the operator supports functionalization and that the backward pass (if it exists) also supports FakeTensor and functionalization. For best results, please call ``opcheck`` multiple times with a representative set of inputs. If your operator supports autograd, please use ``opcheck`` with inputs with ``requires_grad = True``; if your operator supports multiple devices (e.g. CPU and CUDA), please use ``opcheck`` with inputs on all supported devices. Args: op: The operator. Must either be a function decorated with :func:`torch.library.custom_op` or an OpOverload/OpOverloadPacket found in torch.ops.* (e.g. torch.ops.aten.sin, torch.ops.mylib.foo) args: The args to the operator kwargs: The kwargs to the operator test_utils: Tests that we should run. Default: all of them. Example: ("test_schema", "test_faketensor") raise_exception: If we should raise an exception on the first error. If False, we will return a dict with information on if each test passed or not. rtol (Optional[float]): Relative tolerance for floating point comparisons. If specified ``atol`` must also be specified. If omitted, default values based on the ``dtype`` are selected (see the table in :func:`torch.testing.assert_close`). atol (Optional[float]): Absolute tolerance for floating point comparisons. If specified ``rtol`` must also be specified. If omitted, default values based on the ``dtype`` are selected (see the table in :func:`torch.testing.assert_close`). .. warning:: opcheck and :func:`torch.autograd.gradcheck` test different things; opcheck tests if your usage of torch.library APIs is correct while :func:`torch.autograd.gradcheck` tests if your autograd formula is mathematically correct. Use both to test custom ops that support gradient computation. Example: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> @torch.library.custom_op("mylib::numpy_mul", mutates_args=()) >>> def numpy_mul(x: Tensor, y: float) -> Tensor: >>> x_np = x.numpy(force=True) >>> z_np = x_np * y >>> return torch.from_numpy(z_np).to(x.device) >>> >>> @numpy_mul.register_fake >>> def _(x, y): >>> return torch.empty_like(x) >>> >>> def setup_context(ctx, inputs, output): >>> y, = inputs >>> ctx.y = y >>> >>> def backward(ctx, grad): >>> return grad * ctx.y, None >>> >>> numpy_mul.register_autograd(backward, setup_context=setup_context) >>> >>> sample_inputs = [ >>> (torch.randn(3), 3.14), >>> (torch.randn(2, 3, device='cuda'), 2.718), >>> (torch.randn(1, 10, requires_grad=True), 1.234), >>> (torch.randn(64, 64, device='cuda', requires_grad=True), 90.18), >>> ] >>> >>> for args in sample_inputs: >>> torch.library.opcheck(numpy_mul, args) """ import torch.testing._internal.optests as optests return optests.opcheck( op, args, kwargs, test_utils=test_utils, raise_exception=raise_exception, rtol=rtol, atol=atol, )
Library
python
apache__airflow
airflow-core/src/airflow/cli/cli_parser.py
{ "start": 3563, "end": 4577 }
class ____(RichHelpFormatter): """ Custom help formatter to display help message. It displays simple commands and groups of commands in separate sections. """ def _iter_indented_subactions(self, action: Action): if isinstance(action, argparse._SubParsersAction): self._indent() subactions = action._get_subactions() action_subcommands, group_subcommands = partition( lambda d: isinstance(ALL_COMMANDS_DICT[d.dest], GroupCommand), subactions ) yield Action([], f"\n{' ':{self._current_indent}}Groups", nargs=0) self._indent() yield from group_subcommands self._dedent() yield Action([], f"\n{' ':{self._current_indent}}Commands:", nargs=0) self._indent() yield from action_subcommands self._dedent() self._dedent() else: yield from super()._iter_indented_subactions(action)
AirflowHelpFormatter
python
coleifer__peewee
tests/regressions.py
{ "start": 9348, "end": 9408 }
class ____(TestModel): id = IntegerField(primary_key=True)
A
python
spack__spack
.github/workflows/bin/format-rst.py
{ "start": 2257, "end": 2521 }
class ____(Warning): def __init__(self, path: str, line: int, message: str, diff: str): super().__init__(path, line, f"{message}\n{diff}") def __str__(self) -> str: return _warning(f"{self.path}:{self.line}: {self.message}")
CodeBlockWarning
python
pytorch__pytorch
test/higher_order_ops/test_invoke_quant.py
{ "start": 3769, "end": 3838 }
class ____(TestInvokeQuant): backend = "eager"
TestInvokeQuantEager
python
dask__distributed
distributed/comm/tcp.py
{ "start": 25492, "end": 26285 }
class ____(Backend): # I/O def get_connector(self): return self._connector_class() def get_listener(self, loc, handle_comm, deserialize, **connection_args): return self._listener_class(loc, handle_comm, deserialize, **connection_args) # Address handling def get_address_host(self, loc): return parse_host_port(loc)[0] def get_address_host_port(self, loc): return parse_host_port(loc) def resolve_address(self, loc): host, port = parse_host_port(loc) return unparse_host_port(ensure_ip(host), port) def get_local_address_for(self, loc): host, port = parse_host_port(loc) host = ensure_ip(host) local_host = get_ip(host) return unparse_host_port(local_host, None)
BaseTCPBackend
python
gevent__gevent
benchmarks/bench_local.py
{ "start": 266, "end": 301 }
class ____(glocal): pass
GLocalSub
python
tensorflow__tensorflow
tensorflow/python/training/evaluation.py
{ "start": 2677, "end": 4664 }
class ____(session_run_hook.SessionRunHook): """Run hook used by the evaluation routines to run the `eval_ops` N times.""" def __init__(self, num_evals, steps_per_run=1): """Constructs the run hook. Args: num_evals: The number of evaluations to run for. if set to None, will iterate the dataset until all inputs are exhausted. steps_per_run: Number of steps executed per run call. """ self._num_evals = num_evals self._evals_completed = None self._steps_per_run_initial_value = steps_per_run def _set_evals_completed_tensor(self, updated_eval_step): self._evals_completed = updated_eval_step def begin(self): self._steps_per_run_variable = \ basic_session_run_hooks.get_or_create_steps_per_run_variable() def after_create_session(self, session, coord): # Update number of steps to run in the first run call if self._num_evals is None: steps = self._steps_per_run_initial_value else: steps = min(self._steps_per_run_initial_value, self._num_evals) self._steps_per_run_variable.load(steps, session=session) def before_run(self, run_context): return session_run_hook.SessionRunArgs( {'evals_completed': self._evals_completed}) def after_run(self, run_context, run_values): evals_completed = run_values.results['evals_completed'] # Update number of steps to run in the next iteration if self._num_evals is None: steps = self._steps_per_run_initial_value else: steps = min(self._num_evals - evals_completed, self._steps_per_run_initial_value) self._steps_per_run_variable.load(steps, session=run_context.session) if self._num_evals is None: logging.info('Evaluation [%d]', evals_completed) else: logging.info('Evaluation [%d/%d]', evals_completed, self._num_evals) if self._num_evals is not None and evals_completed >= self._num_evals: run_context.request_stop()
_MultiStepStopAfterNEvalsHook
python
matplotlib__matplotlib
lib/matplotlib/backends/backend_ps.py
{ "start": 31557, "end": 31727 }
class ____(Enum): portrait, landscape = range(2) def swap_if_landscape(self, shape): return shape[::-1] if self.name == "landscape" else shape
_Orientation
python
numba__numba
numba/cuda/cudadecl.py
{ "start": 1879, "end": 1961 }
class ____(Cuda_array_decl): key = cuda.shared.array @register
Cuda_shared_array
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeVarTuple10.py
{ "start": 492, "end": 1674 }
class ____(Generic[DType, Unpack[Shape]]): def __abs__(self) -> Array[DType, Unpack[Shape]]: ... def __add__( self, other: Array[DType, Unpack[Shape]] ) -> Array[DType, Unpack[Shape]]: ... def process_batch_channels( x: Array[Batch, Unpack[tuple[Any, ...]], Channels], ) -> None: ... def expect_variadic_array1(x: Array[Batch, Unpack[Shape]]) -> tuple[Unpack[Shape]]: ... def expect_variadic_array2(x: Array[Batch, Unpack[tuple[Any, ...]]]) -> None: ... def expect_precise_array(x: Array[Batch, Height, Width, Channels]) -> None: ... def func1(x: Array[Batch, Height, Width, Channels]): process_batch_channels(x) expect_precise_array(x) def func2(y: Array[Batch, Channels]): process_batch_channels(y) # This should generate an error because the type args don't match. expect_precise_array(y) def func3(z: Array[Batch]): # This should generate an error because Channels is missing process_batch_channels(z) def func4(y: Array[Any, Unpack[tuple[Any, ...]]]): reveal_type(y, expected_text="Array[Any, *tuple[Any, ...]]") expect_variadic_array1(y) expect_variadic_array2(y) expect_precise_array(y)
Array
python
django__django
tests/generic_views/test_edit.py
{ "start": 8964, "end": 14887 }
class ____(TestCase): @classmethod def setUpTestData(cls): cls.author = Author.objects.create( pk=1, # Required for OneAuthorUpdate. name="Randall Munroe", slug="randall-munroe", ) def test_update_post(self): res = self.client.get("/edit/author/%d/update/" % self.author.pk) self.assertEqual(res.status_code, 200) self.assertIsInstance(res.context["form"], forms.ModelForm) self.assertEqual(res.context["object"], self.author) self.assertEqual(res.context["author"], self.author) self.assertTemplateUsed(res, "generic_views/author_form.html") self.assertEqual(res.context["view"].get_form_called_count, 1) # Modification with both POST and PUT (browser compatible) res = self.client.post( "/edit/author/%d/update/" % self.author.pk, {"name": "Randall Munroe (xkcd)", "slug": "randall-munroe"}, ) self.assertEqual(res.status_code, 302) self.assertRedirects(res, "/list/authors/") self.assertQuerySetEqual( Author.objects.values_list("name", flat=True), ["Randall Munroe (xkcd)"] ) def test_update_invalid(self): res = self.client.post( "/edit/author/%d/update/" % self.author.pk, {"name": "A" * 101, "slug": "randall-munroe"}, ) self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, "generic_views/author_form.html") self.assertEqual(len(res.context["form"].errors), 1) self.assertQuerySetEqual(Author.objects.all(), [self.author]) self.assertEqual(res.context["view"].get_form_called_count, 1) def test_update_with_object_url(self): a = Artist.objects.create(name="Rene Magritte") res = self.client.post( "/edit/artists/%d/update/" % a.pk, {"name": "Rene Magritte"} ) self.assertEqual(res.status_code, 302) self.assertRedirects(res, "/detail/artist/%d/" % a.pk) self.assertQuerySetEqual(Artist.objects.all(), [a]) def test_update_with_redirect(self): res = self.client.post( "/edit/author/%d/update/redirect/" % self.author.pk, {"name": "Randall Munroe (author of xkcd)", "slug": "randall-munroe"}, ) self.assertEqual(res.status_code, 302) self.assertRedirects(res, "/edit/authors/create/") self.assertQuerySetEqual( Author.objects.values_list("name", flat=True), ["Randall Munroe (author of xkcd)"], ) def test_update_with_interpolated_redirect(self): res = self.client.post( "/edit/author/%d/update/interpolate_redirect/" % self.author.pk, {"name": "Randall Munroe (author of xkcd)", "slug": "randall-munroe"}, ) self.assertQuerySetEqual( Author.objects.values_list("name", flat=True), ["Randall Munroe (author of xkcd)"], ) self.assertEqual(res.status_code, 302) pk = Author.objects.first().pk self.assertRedirects(res, "/edit/author/%d/update/" % pk) # Also test with escaped chars in URL res = self.client.post( "/edit/author/%d/update/interpolate_redirect_nonascii/" % self.author.pk, {"name": "John Doe", "slug": "john-doe"}, ) self.assertEqual(res.status_code, 302) pk = Author.objects.get(name="John Doe").pk self.assertRedirects(res, "/%C3%A9dit/author/{}/update/".format(pk)) def test_update_with_special_properties(self): res = self.client.get("/edit/author/%d/update/special/" % self.author.pk) self.assertEqual(res.status_code, 200) self.assertIsInstance(res.context["form"], views.AuthorForm) self.assertEqual(res.context["object"], self.author) self.assertEqual(res.context["thingy"], self.author) self.assertNotIn("author", res.context) self.assertTemplateUsed(res, "generic_views/form.html") res = self.client.post( "/edit/author/%d/update/special/" % self.author.pk, {"name": "Randall Munroe (author of xkcd)", "slug": "randall-munroe"}, ) self.assertEqual(res.status_code, 302) self.assertRedirects(res, "/detail/author/%d/" % self.author.pk) self.assertQuerySetEqual( Author.objects.values_list("name", flat=True), ["Randall Munroe (author of xkcd)"], ) def test_update_without_redirect(self): msg = ( "No URL to redirect to. Either provide a url or define a " "get_absolute_url method on the Model." ) with self.assertRaisesMessage(ImproperlyConfigured, msg): self.client.post( "/edit/author/%d/update/naive/" % self.author.pk, {"name": "Randall Munroe (author of xkcd)", "slug": "randall-munroe"}, ) def test_update_get_object(self): res = self.client.get("/edit/author/update/") self.assertEqual(res.status_code, 200) self.assertIsInstance(res.context["form"], forms.ModelForm) self.assertIsInstance(res.context["view"], View) self.assertEqual(res.context["object"], self.author) self.assertEqual(res.context["author"], self.author) self.assertTemplateUsed(res, "generic_views/author_form.html") # Modification with both POST and PUT (browser compatible) res = self.client.post( "/edit/author/update/", {"name": "Randall Munroe (xkcd)", "slug": "randall-munroe"}, ) self.assertEqual(res.status_code, 302) self.assertRedirects(res, "/list/authors/") self.assertQuerySetEqual( Author.objects.values_list("name", flat=True), ["Randall Munroe (xkcd)"] ) @override_settings(ROOT_URLCONF="generic_views.urls")
UpdateViewTests
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/postgresql/pg8000.py
{ "start": 4620, "end": 4685 }
class ____(_PGNumericCommon, sqltypes.Numeric): pass
_PGNumeric
python
allegroai__clearml
clearml/backend_interface/task/repo/detectors.py
{ "start": 898, "end": 7527 }
class ____(object): """Base class for repository detection""" """ Commands are represented using the result class, where each attribute contains the command used to obtain the value of the same attribute in the actual result. """ _fallback = "_fallback" _remote = "_remote" @classmethod def _get_logger(cls) -> logging.Logger: return get_logger("Repository Detection") @attr.s class Commands(object): """ " Repository information as queried by a detector""" url = attr.ib(default=None, type=list) branch = attr.ib(default=None, type=list) commit = attr.ib(default=None, type=list) root = attr.ib(default=None, type=list) status = attr.ib(default=None, type=list) diff = attr.ib(default=None, type=list) modified = attr.ib(default=None, type=list) # alternative commands branch_fallback = attr.ib(default=None, type=list) diff_fallback = attr.ib(default=None, type=list) # remote commands commit_remote = attr.ib(default=None, type=list) diff_remote = attr.ib(default=None, type=list) diff_fallback_remote = attr.ib(default=None, type=list) def __init__(self, type_name: str, name: str = None) -> None: self.type_name = type_name self.name = name or type_name def _get_commands(self) -> "Detector.Commands": """Returns a RepoInfo instance containing a command for each info attribute""" return self.Commands() def _get_command_output( self, path: str, name: str, command: list, commands: "Detector.Commands" = None, strip: bool = True, ) -> str: """Run a command and return its output""" try: return get_command_output(command, path, strip=strip) except (CalledProcessError, UnicodeDecodeError) as ex: if not name.endswith(self._fallback): fallback_command = attr.asdict(commands or self._get_commands()).get(name + self._fallback) if fallback_command: try: return get_command_output(fallback_command, path, strip=strip) except (CalledProcessError, UnicodeDecodeError): pass self._get_logger().warning("Can't get {} information for {} repo in {}".format(name, self.type_name, path)) # full details only in debug self._get_logger().debug( "Can't get {} information for {} repo in {}: {}".format(name, self.type_name, path, str(ex)) ) return "" def _get_info( self, path: str, include_diff: bool = False, diff_from_remote: bool = False, ) -> Result: """ Get repository information. :param path: Path to repository :param include_diff: Whether to include the diff command's output (if available) :param diff_from_remote: Whether to store the remote diff/commit based on the remote commit (not local commit) :return: RepoInfo instance """ path = str(path) commands = self._get_commands() if not include_diff: commands.diff = None # skip the local commands if diff_from_remote and commands: for name, command in attr.asdict(commands).items(): if name.endswith(self._remote) and command: setattr(commands, name[: -len(self._remote)], None) info = Result( **{ name: self._get_command_output(path, name, command, commands=commands, strip=bool(name != "diff")) for name, command in attr.asdict(commands).items() if command and not name.endswith(self._fallback) and not name.endswith(self._remote) } ) if diff_from_remote and commands: for name, command in attr.asdict(commands).items(): if name.endswith(self._remote) and command: setattr(commands, name[: -len(self._remote)], command + [info.branch]) info = attr.assoc( info, **{ name[: -len(self._remote)]: self._get_command_output( path, name[: -len(self._remote)], command + [info.branch], commands=commands, strip=not name.startswith("diff"), ) for name, command in attr.asdict(commands).items() if command and (name.endswith(self._remote) and not name[: -len(self._remote)].endswith(self._fallback)) }, ) # make sure we match the modified with the git remote diff state info.modified = bool(info.diff) return info def _post_process_info(self, info: Result) -> Result: # check if there are uncommitted changes in the current repository return info def get_info( self, path: str, include_diff: bool = False, diff_from_remote: bool = False, ) -> Result: """ Get repository information. :param path: Path to repository :param include_diff: Whether to include the diff command's output (if available) :param diff_from_remote: Whether to store the remote diff/commit based on the remote commit (not local commit) :return: RepoInfo instance """ info = self._get_info(path, include_diff, diff_from_remote=diff_from_remote) return self._post_process_info(info) def _is_repo_type(self, script_path: Path) -> bool: try: with open(os.devnull, "wb") as devnull: return ( call( [self.type_name, "status"], stderr=devnull, stdout=devnull, cwd=str(script_path), ) == 0 ) except CalledProcessError: self._get_logger().warning("Can't get {} status".format(self.type_name)) except (OSError, EnvironmentError, IOError): # File not found or can't be executed pass return False def exists(self, script_path: str) -> bool: """ Test whether the given script resides in a repository type represented by this plugin. """ return self._is_repo_type(script_path)
Detector
python
ray-project__ray
python/ray/tests/test_actor_out_of_order.py
{ "start": 1023, "end": 2430 }
class ____: @pytest.fixture(scope="class", autouse=True) def start_ray_cluster(self): ray.init() yield ray.shutdown() def test_options_with_in_order_async_actor_raises_error(self): @ray.remote class Actor: async def method(self): pass with pytest.raises(ValueError): Actor.options(allow_out_of_order_execution=False).remote() def test_remote_with_in_order_concurrent_actor_raises_error(self): class Actor: async def method(self): pass with pytest.raises(ValueError): ray.remote(allow_out_of_order_execution=False)(Actor).remote() def test_options_with_in_order_multi_threaded_actor_raises_error(self): @ray.remote(max_concurrency=2) class Actor: pass with pytest.raises(ValueError): Actor.options(allow_out_of_order_execution=False).remote() def test_remote_with_in_order_multi_threaded_actor_raises_error(self): class Actor: pass with pytest.raises(ValueError): ray.remote(max_concurrency=2, allow_out_of_order_execution=False)( Actor ).remote() if __name__ == "__main__": # Test suite is timing out. Disable on windows for now. sys.exit(pytest.main(["-sv", __file__]))
TestAllowOutOfOrderExecutionValidation
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/event/base.py
{ "start": 2303, "end": 2727 }
class ____(Generic[_ET]): __slots__ = () _instance_cls: Optional[Type[_ET]] def _join(self, other: _DispatchCommon[_ET]) -> _JoinedDispatcher[_ET]: raise NotImplementedError() def __getattr__(self, name: str) -> _InstanceLevelDispatch[_ET]: raise NotImplementedError() @property def _events(self) -> Type[_HasEventsDispatch[_ET]]: raise NotImplementedError()
_DispatchCommon
python
sphinx-doc__sphinx
sphinx/transforms/references.py
{ "start": 366, "end": 886 }
class ____(DanglingReferences): """DanglingReferences transform which does not output info messages.""" def apply(self, **kwargs: Any) -> None: try: reporter = self.document.reporter report_level = reporter.report_level # suppress INFO level messages for a while reporter.report_level = max(reporter.WARNING_LEVEL, reporter.report_level) super().apply() finally: reporter.report_level = report_level
SphinxDanglingReferences
python
PrefectHQ__prefect
tests/server/orchestration/api/test_block_documents.py
{ "start": 29204, "end": 45962 }
class ____: async def test_update_block_document_data(self, session, client, block_schemas): block_document = await models.block_documents.create_block_document( session, block_document=schemas.actions.BlockDocumentCreate( name="test-update-data", data=dict(x=1), block_schema_id=block_schemas[1].id, block_type_id=block_schemas[1].block_type_id, ), ) await session.commit() response = await client.patch( f"/block_documents/{block_document.id}", json=BlockDocumentUpdate( data=dict(x=2), ).model_dump(mode="json", exclude_unset=True), ) assert response.status_code == status.HTTP_204_NO_CONTENT updated_block_document = await models.block_documents.read_block_document_by_id( session, block_document_id=block_document.id ) assert updated_block_document.data == dict(x=2) @pytest.mark.parametrize("new_data", [{"x": 4}, {}]) async def test_update_block_document_data_without_merging_existing_data( self, session, client, block_schemas, new_data ): block_document = await models.block_documents.create_block_document( session, block_document=schemas.actions.BlockDocumentCreate( name="test-update-data", data=dict(x=1, y=2, z=3), block_schema_id=block_schemas[1].id, block_type_id=block_schemas[1].block_type_id, ), ) await session.commit() response = await client.patch( f"/block_documents/{block_document.id}", json=BlockDocumentUpdate( data=new_data, merge_existing_data=False, ).model_dump(mode="json", exclude_unset=True), ) assert response.status_code == status.HTTP_204_NO_CONTENT updated_block_document = await models.block_documents.read_block_document_by_id( session, block_document_id=block_document.id ) assert updated_block_document.data == new_data async def test_partial_update_block_document_data( self, session, client, block_schemas ): block_document = await models.block_documents.create_block_document( session, block_document=schemas.actions.BlockDocumentCreate( name="test-update-data", data=dict(x=1, y=2, z=3), block_schema_id=block_schemas[1].id, block_type_id=block_schemas[1].block_type_id, ), ) await session.commit() response = await client.patch( f"/block_documents/{block_document.id}", json=BlockDocumentUpdate( data=dict(y=99), ).model_dump(mode="json", exclude_unset=True), ) assert response.status_code == status.HTTP_204_NO_CONTENT updated_block_document = await models.block_documents.read_block_document_by_id( session, block_document_id=block_document.id ) assert updated_block_document.data == dict(x=1, y=99, z=3) async def test_update_anonymous_block_document_data( self, session, client, block_schemas ): block_document = await models.block_documents.create_block_document( session, block_document=schemas.actions.BlockDocumentCreate( data=dict(x=1), block_schema_id=block_schemas[1].id, block_type_id=block_schemas[1].block_type_id, is_anonymous=True, ), ) await session.commit() response = await client.patch( f"/block_documents/{block_document.id}", json=BlockDocumentUpdate( data=dict(x=2), ).model_dump(mode="json", exclude_unset=True), ) assert response.status_code == status.HTTP_204_NO_CONTENT updated_block_document = await models.block_documents.read_block_document_by_id( session, block_document_id=block_document.id ) assert updated_block_document.data == dict(x=2) async def test_update_nested_block_document_data( self, session, client, block_schemas ): inner_block_document = await models.block_documents.create_block_document( session, block_document=schemas.actions.BlockDocumentCreate( name="test-update-nested-block", data=dict(x=1), block_schema_id=block_schemas[1].id, block_type_id=block_schemas[1].block_type_id, ), ) outer_block_document = await models.block_documents.create_block_document( session, block_document=schemas.actions.BlockDocumentCreate( name="test-update-nested-block", data={ "b": {"$ref": {"block_document_id": inner_block_document.id}}, "z": "zzzzz", }, block_schema_id=block_schemas[3].id, block_type_id=block_schemas[3].block_type_id, ), ) await session.commit() block_document_before_update = ( await models.block_documents.read_block_document_by_id( session, block_document_id=outer_block_document.id ) ) assert block_document_before_update.data == { "b": {"x": 1}, "z": "zzzzz", } assert block_document_before_update.block_document_references == { "b": { "block_document": { "id": inner_block_document.id, "name": inner_block_document.name, "block_type": inner_block_document.block_type, "is_anonymous": False, "block_document_references": {}, } } } response = await client.patch( f"/block_documents/{inner_block_document.id}", json=BlockDocumentUpdate( data=dict(x=4), ).model_dump(mode="json", exclude_unset=True), ) assert response.status_code == status.HTTP_204_NO_CONTENT block_document_after_update = ( await models.block_documents.read_block_document_by_id( session, block_document_id=outer_block_document.id ) ) assert block_document_after_update.data == { "b": {"x": 4}, "z": "zzzzz", } assert block_document_after_update.block_document_references == { "b": { "block_document": { "id": inner_block_document.id, "name": inner_block_document.name, "block_type": inner_block_document.block_type, "is_anonymous": False, "block_document_references": {}, } } } async def test_update_nested_block_document_reference( self, session, client, block_schemas ): inner_block_document = await models.block_documents.create_block_document( session, block_document=schemas.actions.BlockDocumentCreate( name="test-update-nested-block", data=dict(x=1), block_schema_id=block_schemas[1].id, block_type_id=block_schemas[1].block_type_id, ), ) outer_block_document = await models.block_documents.create_block_document( session, block_document=schemas.actions.BlockDocumentCreate( name="test-update-nested-block", data={ "b": {"$ref": {"block_document_id": inner_block_document.id}}, "z": "zzzzz", }, block_schema_id=block_schemas[3].id, block_type_id=block_schemas[3].block_type_id, ), ) await session.commit() block_document_before_update = ( await models.block_documents.read_block_document_by_id( session, block_document_id=outer_block_document.id ) ) assert block_document_before_update.data == { "b": {"x": 1}, "z": "zzzzz", } assert block_document_before_update.block_document_references == { "b": { "block_document": { "id": inner_block_document.id, "name": inner_block_document.name, "block_type": inner_block_document.block_type, "is_anonymous": False, "block_document_references": {}, } } } new_inner_block_document = await models.block_documents.create_block_document( session, block_document=schemas.actions.BlockDocumentCreate( name="this-is-a-new-inner-block", data=dict(x=1000), block_schema_id=block_schemas[1].id, block_type_id=block_schemas[1].block_type_id, ), ) await session.commit() response = await client.patch( f"/block_documents/{outer_block_document.id}", json=BlockDocumentUpdate( data={ "b": {"$ref": {"block_document_id": new_inner_block_document.id}}, "z": "zzzzz", }, ).model_dump(mode="json", exclude_unset=True), ) assert response.status_code == status.HTTP_204_NO_CONTENT block_document_after_update = ( await models.block_documents.read_block_document_by_id( session, block_document_id=outer_block_document.id ) ) assert block_document_after_update.data == { "b": { "x": 1000, }, "z": "zzzzz", } assert block_document_after_update.block_document_references == { "b": { "block_document": { "id": new_inner_block_document.id, "name": new_inner_block_document.name, "block_type": new_inner_block_document.block_type, "is_anonymous": False, "block_document_references": {}, } } } async def test_update_with_faulty_block_document_reference( self, session, client, block_schemas ): inner_block_document = await models.block_documents.create_block_document( session, block_document=schemas.actions.BlockDocumentCreate( name="test-update-nested-block", data=dict(x=1), block_schema_id=block_schemas[1].id, block_type_id=block_schemas[1].block_type_id, ), ) outer_block_document = await models.block_documents.create_block_document( session, block_document=schemas.actions.BlockDocumentCreate( name="test-update-nested-block", data={ "b": {"$ref": {"block_document_id": inner_block_document.id}}, "z": "zzzzz", }, block_schema_id=block_schemas[3].id, block_type_id=block_schemas[3].block_type_id, ), ) await session.commit() response = await client.patch( f"/block_documents/{outer_block_document.id}", json=BlockDocumentUpdate( data={ "b": {"$ref": {"block_document_id": uuid4()}}, "z": "zzzzz", }, ).model_dump(mode="json", exclude_unset=True), ) assert response.status_code == status.HTTP_409_CONFLICT async def test_update_with_missing_block_document_reference_id( self, session, client, block_schemas ): inner_block_document = await models.block_documents.create_block_document( session, block_document=schemas.actions.BlockDocumentCreate( name="test-update-nested-block", data=dict(x=1), block_schema_id=block_schemas[1].id, block_type_id=block_schemas[1].block_type_id, ), ) outer_block_document = await models.block_documents.create_block_document( session, block_document=schemas.actions.BlockDocumentCreate( name="test-update-nested-block", data={ "b": {"$ref": {"block_document_id": inner_block_document.id}}, "z": "zzzzz", }, block_schema_id=block_schemas[3].id, block_type_id=block_schemas[3].block_type_id, ), ) await session.commit() response = await client.patch( f"/block_documents/{outer_block_document.id}", json=BlockDocumentUpdate( data={ "b": {"$ref": {}}, "z": "zzzzz", }, ).model_dump(mode="json", exclude_unset=True), ) assert response.status_code == status.HTTP_400_BAD_REQUEST async def test_update_nonsense_block_document(self, client): """Regression test for an issue we observed in Cloud where a client made requests for /block_documents/null""" response = await client.patch( "/block_documents/not-even", json=BlockDocumentUpdate( data={ "b": {"$ref": {}}, "z": "zzzzz", }, ).model_dump(mode="json", exclude_unset=True), ) assert response.status_code == status.HTTP_404_NOT_FOUND async def test_update_nested_block_document_reference_through_removing( self, session, client, block_schemas ): inner_block_document = await models.block_documents.create_block_document( session, block_document=schemas.actions.BlockDocumentCreate( name="test-update-nested-block", data=dict(x=1), block_schema_id=block_schemas[1].id, block_type_id=block_schemas[1].block_type_id, ), ) outer_block_document = await models.block_documents.create_block_document( session, block_document=schemas.actions.BlockDocumentCreate( name="test-update-nested-block", data={ "b": {"$ref": {"block_document_id": inner_block_document.id}}, "z": "zzzzz", }, block_schema_id=block_schemas[3].id, block_type_id=block_schemas[3].block_type_id, ), ) await session.commit() block_document_before_update = ( await models.block_documents.read_block_document_by_id( session, block_document_id=outer_block_document.id ) ) assert block_document_before_update.data == { "b": {"x": 1}, "z": "zzzzz", } assert block_document_before_update.block_document_references == { "b": { "block_document": { "id": inner_block_document.id, "name": inner_block_document.name, "block_type": inner_block_document.block_type, "is_anonymous": False, "block_document_references": {}, } } } response = await client.patch( f"/block_documents/{outer_block_document.id}", json=BlockDocumentUpdate( data={ "b": {}, # removes block document refs "z": "zzzzz", }, merge_existing_data=False, ).model_dump(mode="json", exclude_unset=True), ) assert response.status_code == status.HTTP_204_NO_CONTENT block_document_after_update = ( await models.block_documents.read_block_document_by_id( session, block_document_id=outer_block_document.id ) ) assert block_document_after_update.data == { "b": {}, "z": "zzzzz", } assert block_document_after_update.block_document_references == {}
TestUpdateBlockDocument
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_reload_repository_location.py
{ "start": 8438, "end": 8977 }
class ____(ReadonlyGraphQLContextTestMatrix): def test_reload_repository_permission_failure(self, graphql_context): result = execute_dagster_graphql( graphql_context, RELOAD_REPOSITORY_LOCATION_QUERY, {"repositoryLocationName": main_repo_location_name()}, ) assert result assert result.data assert result.data["reloadRepositoryLocation"] assert result.data["reloadRepositoryLocation"]["__typename"] == "UnauthorizedError"
TestReloadRepositoriesReadOnly
python
Netflix__metaflow
metaflow/plugins/argo/exit_hooks.py
{ "start": 3751, "end": 4892 }
class ____(Hook): # Warning: terrible hack to workaround a bug in Argo Workflow where the # templates listed above do not execute unless there is an # explicit exit hook. as and when this bug is patched, we should # remove this effectively no-op template. # Note: We use the Http template because changing this to an actual no-op container had the side-effect of # leaving LifecycleHooks in a pending state even when they have finished execution. def __init__( self, url, headers=None, body=None, ): self.template = _Template("exit-hook-hack") http = _HttpSpec("GET").url(url) if headers is not None: for header, value in headers.items(): http.header(header, value) if body is not None: http.body(json.dumps(body)) http.success_condition("true == true") self.template.http(http) self.lifecycle_hooks = [] # add an expressionless lifecycle hook self.lifecycle_hooks.append(_LifecycleHook("exit").template("exit-hook-hack"))
ExitHookHack
python
readthedocs__readthedocs.org
readthedocs/telemetry/models.py
{ "start": 3447, "end": 3735 }
class ____(TimeStampedModel): class Meta: verbose_name_plural = "Build data" indexes = [ # Speeds up `delete_old_build_data` task. models.Index(fields=["created"]), ] data = models.JSONField() objects = BuildDataManager()
BuildData
python
langchain-ai__langchain
libs/langchain_v1/tests/unit_tests/agents/test_response_format.py
{ "start": 1833, "end": 2930 }
class ____(TypedDict): city: str country: str location_json_schema = { "type": "object", "properties": { "city": {"type": "string", "description": "The city name"}, "country": {"type": "string", "description": "The country name"}, }, "title": "location_schema", "required": ["city", "country"], } @tool def get_weather() -> str: """Get the weather.""" return "The weather is sunny and 75°F." @tool def get_location() -> str: """Get the current location.""" return "You are in New York, USA." # Standardized test data WEATHER_DATA = {"temperature": 75.0, "condition": "sunny"} LOCATION_DATA = {"city": "New York", "country": "USA"} # Standardized expected responses EXPECTED_WEATHER_PYDANTIC = WeatherBaseModel(**WEATHER_DATA) EXPECTED_WEATHER_DATACLASS = WeatherDataclass(**WEATHER_DATA) EXPECTED_WEATHER_DICT: WeatherTypedDict = {"temperature": 75.0, "condition": "sunny"} EXPECTED_LOCATION = LocationResponse(**LOCATION_DATA) EXPECTED_LOCATION_DICT: LocationTypedDict = {"city": "New York", "country": "USA"}
LocationTypedDict
python
lepture__authlib
authlib/integrations/requests_client/oauth2_session.py
{ "start": 464, "end": 1133 }
class ____(AuthBase, TokenAuth): """Sign requests for OAuth 2.0, currently only bearer token is supported.""" def ensure_active_token(self): if self.client and not self.client.ensure_active_token(self.token): raise InvalidTokenError() def __call__(self, req): self.ensure_active_token() try: req.url, req.headers, req.body = self.prepare( req.url, req.headers, req.body ) except KeyError as error: description = f"Unsupported token_type: {str(error)}" raise UnsupportedTokenTypeError(description=description) from error return req
OAuth2Auth
python
dagster-io__dagster
python_modules/libraries/dagster-postgres/dagster_postgres/run_storage/run_storage.py
{ "start": 1267, "end": 9757 }
class ____(SqlRunStorage, ConfigurableClass): """Postgres-backed run storage. Users should not directly instantiate this class; it is instantiated by internal machinery when ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file. To use Postgres for all of the components of your instance storage, you can add the following block to your ``dagster.yaml``: .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg.yaml :caption: dagster.yaml :lines: 1-8 :language: YAML If you are configuring the different storage components separately and are specifically configuring your run storage to use Postgres, you can add a block such as the following to your ``dagster.yaml``: .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg-legacy.yaml :caption: dagster.yaml :lines: 1-10 :language: YAML Note that the fields in this config are :py:class:`~dagster.StringSource` and :py:class:`~dagster.IntSource` and can be configured from environment variables. """ def __init__( self, postgres_url: str, should_autocreate_tables: bool = True, inst_data: Optional[ConfigurableClassData] = None, ): self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData) self.postgres_url = postgres_url self.should_autocreate_tables = check.bool_param( should_autocreate_tables, "should_autocreate_tables" ) # Default to not holding any connections open to prevent accumulating connections per DagsterInstance self._engine = create_engine( self.postgres_url, isolation_level="AUTOCOMMIT", poolclass=db_pool.NullPool, ) self._index_migration_cache = {} # Stamp and create tables if the main table does not exist (we can't check alembic # revision because alembic config may be shared with other storage classes) if self.should_autocreate_tables: table_names = retry_pg_connection_fn(lambda: db.inspect(self._engine).get_table_names()) if "runs" not in table_names: retry_pg_creation_fn(self._init_db) self.migrate() self.optimize() elif "instance_info" not in table_names: InstanceInfo.create(self._engine) super().__init__() def _init_db(self) -> None: with self.connect() as conn: with conn.begin(): RunStorageSqlMetadata.create_all(conn) # This revision may be shared by any other dagster storage classes using the same DB stamp_alembic_rev(pg_alembic_config(__file__), conn) def optimize_for_webserver( self, statement_timeout: int, pool_recycle: int, max_overflow: int ) -> None: # When running in dagster-webserver, hold an open connection and set statement_timeout kwargs = { "isolation_level": "AUTOCOMMIT", "pool_size": 1, "pool_recycle": pool_recycle, "max_overflow": max_overflow, } existing_options = self._engine.url.query.get("options") if existing_options: kwargs["connect_args"] = {"options": existing_options} self._engine = create_engine(self.postgres_url, **kwargs) event.listen( self._engine, "connect", lambda connection, _: set_pg_statement_timeout(connection, statement_timeout), ) @property def inst_data(self) -> Optional[ConfigurableClassData]: return self._inst_data @classmethod def config_type(cls) -> UserConfigSchema: return pg_config() @classmethod def from_config_value( # pyright: ignore[reportIncompatibleMethodOverride] cls, inst_data: Optional[ConfigurableClassData], config_value: PostgresStorageConfig ): return PostgresRunStorage( inst_data=inst_data, postgres_url=pg_url_from_config(config_value), should_autocreate_tables=config_value.get("should_autocreate_tables", True), ) @staticmethod def create_clean_storage( postgres_url: str, should_autocreate_tables: bool = True ) -> "PostgresRunStorage": engine = create_engine( postgres_url, isolation_level="AUTOCOMMIT", poolclass=db_pool.NullPool ) try: RunStorageSqlMetadata.drop_all(engine) finally: engine.dispose() return PostgresRunStorage(postgres_url, should_autocreate_tables) def connect(self) -> ContextManager[Connection]: return create_pg_connection(self._engine) def upgrade(self) -> None: with self.connect() as conn: run_alembic_upgrade(pg_alembic_config(__file__), conn) def has_built_index(self, migration_name: str) -> bool: if migration_name not in self._index_migration_cache: self._index_migration_cache[migration_name] = super().has_built_index(migration_name) return self._index_migration_cache[migration_name] def mark_index_built(self, migration_name: str) -> None: super().mark_index_built(migration_name) if migration_name in self._index_migration_cache: del self._index_migration_cache[migration_name] def add_daemon_heartbeat(self, daemon_heartbeat: DaemonHeartbeat) -> None: with self.connect() as conn: # insert or update if already present, using postgres specific on_conflict conn.execute( db_dialects.postgresql.insert(DaemonHeartbeatsTable) .values( timestamp=datetime_from_timestamp(daemon_heartbeat.timestamp), daemon_type=daemon_heartbeat.daemon_type, daemon_id=daemon_heartbeat.daemon_id, body=serialize_value(daemon_heartbeat), ) .on_conflict_do_update( index_elements=[DaemonHeartbeatsTable.c.daemon_type], set_={ "timestamp": datetime_from_timestamp(daemon_heartbeat.timestamp), "daemon_id": daemon_heartbeat.daemon_id, "body": serialize_value(daemon_heartbeat), }, ) .returning( # required because sqlalchemy might by default return the declared primary key, # which might not exist DaemonHeartbeatsTable.c.daemon_type, ) ) def set_cursor_values(self, pairs: Mapping[str, str]) -> None: check.mapping_param(pairs, "pairs", key_type=str, value_type=str) # pg specific on_conflict_do_update insert_stmt = db_dialects.postgresql.insert(KeyValueStoreTable).values( [{"key": k, "value": v} for k, v in pairs.items()] ) upsert_stmt = insert_stmt.on_conflict_do_update( index_elements=[ KeyValueStoreTable.c.key, ], set_={"value": insert_stmt.excluded.value}, ).returning( # required because sqlalchemy might by default return the declared primary key, # which might not exist KeyValueStoreTable.c.key ) with self.connect() as conn: conn.execute(upsert_stmt) def _add_snapshot(self, snapshot_id: str, snapshot_obj, snapshot_type: SnapshotType) -> str: with self.connect() as conn: snapshot_insert = ( db_dialects.postgresql.insert(SnapshotsTable) .values( snapshot_id=snapshot_id, snapshot_body=zlib.compress(serialize_value(snapshot_obj).encode("utf-8")), snapshot_type=snapshot_type.value, ) .on_conflict_do_nothing() ) conn.execute(snapshot_insert) return snapshot_id def alembic_version(self) -> AlembicVersion: alembic_config = pg_alembic_config(__file__) with self.connect() as conn: return check_alembic_revision(alembic_config, conn)
PostgresRunStorage
python
mlflow__mlflow
mlflow/genai/labeling/labeling.py
{ "start": 1097, "end": 7931 }
class ____: """A session for labeling items in the review app. .. note:: This functionality is only available in Databricks. Please run `pip install mlflow[databricks]` to use it. """ def __init__( self, *, name: str, assigned_users: list[str], agent: str | None, label_schemas: list[str], labeling_session_id: str, mlflow_run_id: str, review_app_id: str, experiment_id: str, url: str, enable_multi_turn_chat: bool, custom_inputs: dict[str, Any] | None, ): self._name = name self._assigned_users = assigned_users self._agent = agent self._label_schemas = label_schemas self._labeling_session_id = labeling_session_id self._mlflow_run_id = mlflow_run_id self._review_app_id = review_app_id self._experiment_id = experiment_id self._url = url self._enable_multi_turn_chat = enable_multi_turn_chat self._custom_inputs = custom_inputs @property def name(self) -> str: """The name of the labeling session.""" return self._name @property def assigned_users(self) -> list[str]: """The users assigned to label items in the session.""" return self._assigned_users @property def agent(self) -> str | None: """The agent used to generate responses for the items in the session.""" return self._agent @property def label_schemas(self) -> list[str]: """The label schemas used in the session.""" return self._label_schemas @property def labeling_session_id(self) -> str: """The unique identifier of the labeling session.""" return self._labeling_session_id @property def mlflow_run_id(self) -> str: """The MLflow run ID associated with the session.""" return self._mlflow_run_id @property def review_app_id(self) -> str: """The review app ID associated with the session.""" return self._review_app_id @property def experiment_id(self) -> str: """The experiment ID associated with the session.""" return self._experiment_id @property def url(self) -> str: """The URL of the labeling session in the review app.""" return self._url @property def enable_multi_turn_chat(self) -> bool: """Whether multi-turn chat is enabled for the session.""" return self._enable_multi_turn_chat @property def custom_inputs(self) -> dict[str, Any] | None: """Custom inputs used in the session.""" return self._custom_inputs def _get_store(self): """ Get a labeling store instance. This method is defined in order to avoid circular imports. """ from mlflow.genai.labeling.stores import _get_labeling_store return _get_labeling_store() def add_dataset( self, dataset_name: str, record_ids: list[str] | None = None ) -> "LabelingSession": """Add a dataset to the labeling session. .. note:: This functionality is only available in Databricks. Please run `pip install mlflow[databricks]` to use it. Args: dataset_name: The name of the dataset. record_ids: Optional. The individual record ids to be added to the session. If not provided, all records in the dataset will be added. Returns: LabelingSession: The updated labeling session. """ store = self._get_store() return store.add_dataset_to_session(self, dataset_name, record_ids) def add_traces( self, traces: Union[Iterable[Trace], Iterable[str], "pd.DataFrame"], ) -> "LabelingSession": """Add traces to the labeling session. .. note:: This functionality is only available in Databricks. Please run `pip install mlflow[databricks]` to use it. Args: traces: Can be either: a) a pandas DataFrame with a 'trace' column. The 'trace' column should contain either `mlflow.entities.Trace` objects or their json string representations. b) an iterable of `mlflow.entities.Trace` objects. c) an iterable of json string representations of `mlflow.entities.Trace` objects. Returns: LabelingSession: The updated labeling session. """ import pandas as pd if isinstance(traces, pd.DataFrame): if "trace" not in traces.columns: raise MlflowException( "traces must have a 'trace' column like the result of mlflow.search_traces()", error_code=INVALID_PARAMETER_VALUE, ) traces = traces["trace"].to_list() trace_list: list[Trace] = [] for trace in traces: if isinstance(trace, str): trace_list.append(Trace.from_json(trace)) elif isinstance(trace, Trace): trace_list.append(trace) elif trace is None: raise MlflowException( "trace cannot be None. Must be mlflow.entities.Trace or its json string " "representation.", error_code=INVALID_PARAMETER_VALUE, ) else: raise MlflowException( f"Expected mlflow.entities.Trace or json string, got {type(trace).__name__}", error_code=INVALID_PARAMETER_VALUE, ) store = self._get_store() return store.add_traces_to_session(self, trace_list) def sync(self, to_dataset: str) -> None: """Sync the traces and expectations from the labeling session to a dataset. .. note:: This functionality is only available in Databricks. Please run `pip install mlflow[databricks]` to use it. Args: to_dataset: The name of the dataset to sync traces and expectations to. """ store = self._get_store() return store.sync_session_expectations(self, to_dataset) def set_assigned_users(self, assigned_users: list[str]) -> "LabelingSession": """Set the assigned users for the labeling session. .. note:: This functionality is only available in Databricks. Please run `pip install mlflow[databricks]` to use it. Args: assigned_users: The list of users to assign to the session. Returns: LabelingSession: The updated labeling session. """ store = self._get_store() return store.set_session_assigned_users(self, assigned_users)
LabelingSession
python
dagster-io__dagster
python_modules/automation/automation_tests/dagster_dev_tests/test_bk_build_status.py
{ "start": 745, "end": 9619 }
class ____: """Test cases for the bk-build-status command.""" def test_successful_build_status_with_build_number(self): """Test successful build status retrieval with explicit build number.""" # Mock job data jobs = [ create_mock_job("Unit Tests", "passed"), create_mock_job("Integration Tests", "running"), create_mock_job("Linting", "failed"), ] mock_response_data = create_mock_buildkite_response(jobs) with patch("automation.dagster_dev.commands.bk_build_status.requests.get") as mock_get: mock_get.return_value = mock_requests_get(mock_response_data) # Set environment variable with patch.dict(os.environ, {"BUILDKITE_API_TOKEN": "test-token"}): from automation.dagster_dev.commands.bk_build_status import bk_build_status runner = CliRunner() result = runner.invoke(bk_build_status, ["12345"]) assert result.exit_code == 0 assert "Build Status Summary for dagster/dagster-dagster #12345" in result.output assert "Passed Jobs (1 total)" in result.output assert "Unit Tests" in result.output assert "Running Jobs (1 total)" in result.output assert "Integration Tests" in result.output assert "Failed Jobs (1 total)" in result.output assert "Linting" in result.output assert "Action Required" in result.output def test_successful_build_status_json_output(self): """Test JSON output format.""" jobs = [ create_mock_job("Unit Tests", "passed", "job-1"), create_mock_job("Integration Tests", "running", "job-2"), ] mock_response_data = create_mock_buildkite_response(jobs) with patch("automation.dagster_dev.commands.bk_build_status.requests.get") as mock_get: mock_get.return_value = mock_requests_get(mock_response_data) with patch.dict(os.environ, {"BUILDKITE_API_TOKEN": "test-token"}): from automation.dagster_dev.commands.bk_build_status import bk_build_status runner = CliRunner() result = runner.invoke(bk_build_status, ["12345", "--json"]) assert result.exit_code == 0 # Parse JSON output output_data = json.loads(result.output) assert output_data["build"]["number"] == "12345" assert output_data["summary"]["passed"] == 1 assert output_data["summary"]["running"] == 1 assert output_data["summary"]["failed"] == 0 assert output_data["status"] == "running" assert len(output_data["jobs"]["passed"]) == 1 assert len(output_data["jobs"]["running"]) == 1 def test_build_status_auto_detect_from_pr(self): """Test build status with auto-detected build number from PR.""" jobs = [create_mock_job("Unit Tests", "passed")] mock_response_data = create_mock_buildkite_response(jobs) with ( patch("automation.dagster_dev.commands.bk_build_status.requests.get") as mock_get, patch( "automation.dagster_dev.commands.bk_build_status.get_latest_build_for_pr" ) as mock_get_build, ): mock_get.return_value = mock_requests_get(mock_response_data) mock_get_build.return_value = "54321" with patch.dict(os.environ, {"BUILDKITE_API_TOKEN": "test-token"}): from automation.dagster_dev.commands.bk_build_status import bk_build_status runner = CliRunner() result = runner.invoke(bk_build_status, []) assert result.exit_code == 0 assert "detecting from current PR" in result.output assert "Build Status Summary for dagster/dagster-dagster #54321" in result.output mock_get_build.assert_called_once() def test_missing_buildkite_token(self): """Test error when BUILDKITE_API_TOKEN is not set.""" with patch.dict(os.environ, {}, clear=True): from automation.dagster_dev.commands.bk_build_status import bk_build_status runner = CliRunner() result = runner.invoke(bk_build_status, ["12345"]) assert result.exit_code == 1 assert "BUILDKITE_API_TOKEN environment variable not set" in result.output def test_api_request_failure(self): """Test handling of Buildkite API request failure.""" import requests with patch("automation.dagster_dev.commands.bk_build_status.requests.get") as mock_get: mock_get.side_effect = requests.exceptions.RequestException("Network error") with patch.dict(os.environ, {"BUILDKITE_API_TOKEN": "test-token"}): from automation.dagster_dev.commands.bk_build_status import bk_build_status runner = CliRunner() result = runner.invoke(bk_build_status, ["12345"]) assert result.exit_code == 1 assert "Error calling Buildkite API" in result.output def test_api_json_decode_error(self): """Test handling of invalid JSON response.""" with patch("automation.dagster_dev.commands.bk_build_status.requests.get") as mock_get: mock_response = Mock() mock_response.json.side_effect = json.JSONDecodeError("Invalid JSON", "", 0) mock_response.raise_for_status.return_value = None mock_get.return_value = mock_response with patch.dict(os.environ, {"BUILDKITE_API_TOKEN": "test-token"}): from automation.dagster_dev.commands.bk_build_status import bk_build_status runner = CliRunner() result = runner.invoke(bk_build_status, ["12345"]) assert result.exit_code == 1 assert "Error parsing JSON response" in result.output def test_custom_org_and_pipeline(self): """Test with custom organization and pipeline.""" jobs = [create_mock_job("Test Job", "passed")] mock_response_data = create_mock_buildkite_response(jobs) with patch("automation.dagster_dev.commands.bk_build_status.requests.get") as mock_get: mock_get.return_value = mock_requests_get(mock_response_data) with patch.dict(os.environ, {"BUILDKITE_API_TOKEN": "test-token"}): from automation.dagster_dev.commands.bk_build_status import bk_build_status runner = CliRunner() result = runner.invoke( bk_build_status, ["12345", "--org", "myorg", "--pipeline", "mypipeline"] ) assert result.exit_code == 0 assert "Build Status Summary for myorg/mypipeline #12345" in result.output # Verify correct API endpoint was called mock_get.assert_called_once() called_url = mock_get.call_args[0][0] assert "myorg" in called_url assert "mypipeline" in called_url def test_all_jobs_passed(self): """Test successful build with all jobs passed.""" jobs = [ create_mock_job("Unit Tests", "passed"), create_mock_job("Integration Tests", "passed"), ] mock_response_data = create_mock_buildkite_response(jobs) with patch("automation.dagster_dev.commands.bk_build_status.requests.get") as mock_get: mock_get.return_value = mock_requests_get(mock_response_data) with patch.dict(os.environ, {"BUILDKITE_API_TOKEN": "test-token"}): from automation.dagster_dev.commands.bk_build_status import bk_build_status runner = CliRunner() result = runner.invoke(bk_build_status, ["12345"]) assert result.exit_code == 0 assert "Build completed successfully!" in result.output assert "(none)" in result.output # For running and failed jobs def test_no_jobs_in_build(self): """Test build with no jobs.""" mock_response_data = create_mock_buildkite_response([]) with patch("automation.dagster_dev.commands.bk_build_status.requests.get") as mock_get: mock_get.return_value = mock_requests_get(mock_response_data) with patch.dict(os.environ, {"BUILDKITE_API_TOKEN": "test-token"}): from automation.dagster_dev.commands.bk_build_status import bk_build_status runner = CliRunner() result = runner.invoke(bk_build_status, ["12345"]) assert result.exit_code == 0 assert "0 total active jobs" in result.output
TestBkBuildStatus
python
huggingface__transformers
tests/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py
{ "start": 1440, "end": 5167 }
class ____: def __init__( self, parent, batch_size=13, patch_size=2, max_length=24, num_mel_bins=16, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, scope=None, frequency_stride=2, time_stride=2, attn_implementation="eager", ): self.parent = parent self.batch_size = batch_size self.patch_size = patch_size self.max_length = max_length self.num_mel_bins = num_mel_bins self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.frequency_stride = frequency_stride self.time_stride = time_stride self.attn_implementation = attn_implementation # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) frequency_out_dimension = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 time_out_dimension = (self.max_length - self.patch_size) // self.time_stride + 1 num_patches = frequency_out_dimension * time_out_dimension self.seq_length = num_patches + 2 def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, input_values, labels def get_config(self): return ASTConfig( patch_size=self.patch_size, max_length=self.max_length, num_mel_bins=self.num_mel_bins, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, frequency_stride=self.frequency_stride, time_stride=self.time_stride, attn_implementation=self.attn_implementation, ) def create_and_check_model(self, config, input_values, labels): model = ASTModel(config=config) model.to(torch_device) model.eval() result = model(input_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_values, labels, ) = config_and_inputs inputs_dict = {"input_values": input_values} return config, inputs_dict @require_torch
ASTModelTester
python
numba__numba
numba/tests/test_debug.py
{ "start": 3636, "end": 4986 }
class ____(FunctionDebugTestBase): def test_dump_bytecode(self): with override_config('DUMP_BYTECODE', True): out = self.compile_simple_nopython() self.check_debug_output(out, ['bytecode']) def test_dump_ir(self): with override_config('DUMP_IR', True): out = self.compile_simple_nopython() self.check_debug_output(out, ['ir']) def test_dump_cfg(self): with override_config('DUMP_CFG', True): out = self.compile_simple_nopython() self.check_debug_output(out, ['cfg']) def test_dump_llvm(self): with override_config('DUMP_LLVM', True): out = self.compile_simple_nopython() self.check_debug_output(out, ['llvm']) def test_dump_func_opt_llvm(self): with override_config('DUMP_FUNC_OPT', True): out = self.compile_simple_nopython() self.check_debug_output(out, ['func_opt_llvm']) def test_dump_optimized_llvm(self): with override_config('DUMP_OPTIMIZED', True): out = self.compile_simple_nopython() self.check_debug_output(out, ['optimized_llvm']) def test_dump_assembly(self): with override_config('DUMP_ASSEMBLY', True): out = self.compile_simple_nopython() self.check_debug_output(out, ['assembly'])
TestFunctionDebugOutput
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_cond_format13.py
{ "start": 315, "end": 1428 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("cond_format04.xlsx") def test_create_file(self): """Test the creation of an XlsxWriter file with conditional formatting.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() format1 = workbook.add_format({"num_format": 2, "dxf_index": 1}) format2 = workbook.add_format({"num_format": "0.000", "dxf_index": 0}) worksheet.write("A1", 10) worksheet.write("A2", 20) worksheet.write("A3", 30) worksheet.write("A4", 40) options = { "type": "cell", "format": format1, "criteria": ">", "value": 2, } worksheet.conditional_format("A1", options) options["criteria"] = "<" options["value"] = 8 options["format"] = format2 worksheet.conditional_format("A2", options) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
docker__docker-py
docker/models/configs.py
{ "start": 537, "end": 1845 }
class ____(Collection): """Configs on the Docker server.""" model = Config def create(self, **kwargs): obj = self.client.api.create_config(**kwargs) obj.setdefault("Spec", {})["Name"] = kwargs.get("name") return self.prepare_model(obj) create.__doc__ = APIClient.create_config.__doc__ def get(self, config_id): """ Get a config. Args: config_id (str): Config ID. Returns: (:py:class:`Config`): The config. Raises: :py:class:`docker.errors.NotFound` If the config does not exist. :py:class:`docker.errors.APIError` If the server returns an error. """ return self.prepare_model(self.client.api.inspect_config(config_id)) def list(self, **kwargs): """ List configs. Similar to the ``docker config ls`` command. Args: filters (dict): Server-side list filtering options. Returns: (list of :py:class:`Config`): The configs. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ resp = self.client.api.configs(**kwargs) return [self.prepare_model(obj) for obj in resp]
ConfigCollection
python
huggingface__transformers
src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py
{ "start": 6050, "end": 13316 }
class ____(Qwen2_5_VLPreTrainedModel): config: Qwen2_5_VLVisionConfig _no_split_modules = ["Qwen2_5_VLVisionBlock"] def __init__(self, config, *inputs, **kwargs) -> None: super().__init__(config, *inputs, **kwargs) self.spatial_merge_size = config.spatial_merge_size self.patch_size = config.patch_size self.fullatt_block_indexes = config.fullatt_block_indexes self.window_size = config.window_size self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size self.patch_embed = Qwen2_5_VisionPatchEmbed( patch_size=config.patch_size, temporal_patch_size=config.temporal_patch_size, in_channels=config.in_channels, embed_dim=config.hidden_size, ) head_dim = config.hidden_size // config.num_heads self.rotary_pos_emb = Qwen2_5_VisionRotaryEmbedding(head_dim // 2) self.blocks = nn.ModuleList([Qwen2_5_VLVisionBlock(config) for _ in range(config.depth)]) self.merger = Qwen2_5_VLPatchMerger( dim=config.out_hidden_size, context_dim=config.hidden_size, spatial_merge_size=config.spatial_merge_size, ) self.gradient_checkpointing = False def rot_pos_emb(self, grid_thw): pos_ids = [] for t, h, w in grid_thw: hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w) hpos_ids = hpos_ids.reshape( h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size, ) hpos_ids = hpos_ids.permute(0, 2, 1, 3) hpos_ids = hpos_ids.flatten() wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1) wpos_ids = wpos_ids.reshape( h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size, ) wpos_ids = wpos_ids.permute(0, 2, 1, 3) wpos_ids = wpos_ids.flatten() pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)) pos_ids = torch.cat(pos_ids, dim=0) max_grid_size = grid_thw[:, 1:].max() rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size) rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1) return rotary_pos_emb def get_window_index(self, grid_thw): window_index: list = [] cu_window_seqlens: list = [0] window_index_id = 0 vit_merger_window_size = self.window_size // self.spatial_merge_size // self.patch_size for grid_t, grid_h, grid_w in grid_thw: llm_grid_h, llm_grid_w = ( grid_h // self.spatial_merge_size, grid_w // self.spatial_merge_size, ) index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(grid_t, llm_grid_h, llm_grid_w) pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100) index_padded = index_padded.reshape( grid_t, num_windows_h, vit_merger_window_size, num_windows_w, vit_merger_window_size, ) index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape( grid_t, num_windows_h * num_windows_w, vit_merger_window_size, vit_merger_window_size, ) seqlens = (index_padded != -100).sum([2, 3]).reshape(-1) index_padded = index_padded.reshape(-1) index_new = index_padded[index_padded != -100] window_index.append(index_new + window_index_id) cu_seqlens_tmp = seqlens.cumsum(0) * self.spatial_merge_unit + cu_window_seqlens[-1] cu_window_seqlens.extend(cu_seqlens_tmp.tolist()) window_index_id += (grid_t * llm_grid_h * llm_grid_w).item() window_index = torch.cat(window_index, dim=0) return window_index, cu_window_seqlens def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor: """ Args: hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`): The final hidden states of the model. grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`): The temporal, height and width of feature shape of each image in LLM. Returns: `torch.Tensor`: hidden_states. """ hidden_states = self.patch_embed(hidden_states) rotary_pos_emb = self.rot_pos_emb(grid_thw) window_index, cu_window_seqlens = self.get_window_index(grid_thw) cu_window_seqlens = torch.tensor( cu_window_seqlens, device=hidden_states.device, dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, ) cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens) seq_len, _ = hidden_states.size() hidden_states = hidden_states.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) hidden_states = hidden_states[window_index, :, :] hidden_states = hidden_states.reshape(seq_len, -1) rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) rotary_pos_emb = rotary_pos_emb[window_index, :, :] rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1) emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) position_embeddings = (emb.cos(), emb.sin()) cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum( dim=0, # Select dtype based on the following factors: # - FA2 requires that cu_seqlens_q must have dtype int32 # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw # See https://github.com/huggingface/transformers/pull/34852 for more information dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, ) cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0) for layer_num, blk in enumerate(self.blocks): if layer_num in self.fullatt_block_indexes: cu_seqlens_now = cu_seqlens else: cu_seqlens_now = cu_window_seqlens hidden_states = blk( hidden_states, cu_seqlens=cu_seqlens_now, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.merger(hidden_states) reverse_indices = torch.argsort(window_index) hidden_states = hidden_states[reverse_indices, :] return hidden_states
Qwen2_5_VisionTransformerPretrainedModel
python
mahmoud__boltons
boltons/urlutils.py
{ "start": 14740, "end": 15588 }
class ____: """The ``cachedproperty`` is used similar to :class:`property`, except that the wrapped method is only called once. This is commonly used to implement lazy attributes. After the property has been accessed, the value is stored on the instance itself, using the same name as the cachedproperty. This allows the cache to be cleared with :func:`delattr`, or through manipulating the object's ``__dict__``. """ def __init__(self, func): self.__doc__ = getattr(func, '__doc__') self.func = func def __get__(self, obj, objtype=None): if obj is None: return self value = obj.__dict__[self.func.__name__] = self.func(obj) return value def __repr__(self): cn = self.__class__.__name__ return f'<{cn} func={self.func}>'
cachedproperty
python
tiangolo__fastapi
docs_src/header_param_models/tutorial003_an_py310.py
{ "start": 116, "end": 424 }
class ____(BaseModel): host: str save_data: bool if_modified_since: str | None = None traceparent: str | None = None x_tag: list[str] = [] @app.get("/items/") async def read_items( headers: Annotated[CommonHeaders, Header(convert_underscores=False)], ): return headers
CommonHeaders
python
PrefectHQ__prefect
src/prefect/server/schemas/responses.py
{ "start": 5284, "end": 11575 }
class ____(ORMBaseModel): name: str = Field( default_factory=lambda: generate_slug(2), description=( "The name of the flow run. Defaults to a random slug if not specified." ), examples=["my-flow-run"], ) flow_id: UUID = Field(default=..., description="The id of the flow being run.") state_id: Optional[UUID] = Field( default=None, description="The id of the flow run's current state." ) deployment_id: Optional[UUID] = Field( default=None, description=( "The id of the deployment associated with this flow run, if available." ), ) deployment_version: Optional[str] = Field( default=None, description="The version of the deployment associated with this flow run.", examples=["1.0"], ) work_queue_id: Optional[UUID] = Field( default=None, description="The id of the run's work pool queue." ) work_queue_name: Optional[str] = Field( default=None, description="The work queue that handled this flow run." ) flow_version: Optional[str] = Field( default=None, description="The version of the flow executed in this flow run.", examples=["1.0"], ) parameters: Dict[str, Any] = Field( default_factory=dict, description="Parameters for the flow run." ) idempotency_key: Optional[str] = Field( default=None, description=( "An optional idempotency key for the flow run. Used to ensure the same flow" " run is not created multiple times." ), ) context: Dict[str, Any] = Field( default_factory=dict, description="Additional context for the flow run.", examples=[{"my_var": "my_val"}], ) empirical_policy: FlowRunPolicy = Field( default_factory=FlowRunPolicy, ) tags: List[str] = Field( default_factory=list, description="A list of tags on the flow run", examples=[["tag-1", "tag-2"]], ) labels: KeyValueLabelsField parent_task_run_id: Optional[UUID] = Field( default=None, description=( "If the flow run is a subflow, the id of the 'dummy' task in the parent" " flow used to track subflow state." ), ) state_type: Optional[schemas.states.StateType] = Field( default=None, description="The type of the current flow run state." ) state_name: Optional[str] = Field( default=None, description="The name of the current flow run state." ) run_count: int = Field( default=0, description="The number of times the flow run was executed." ) expected_start_time: Optional[DateTime] = Field( default=None, description="The flow run's expected start time.", ) next_scheduled_start_time: Optional[DateTime] = Field( default=None, description="The next time the flow run is scheduled to start.", ) start_time: Optional[DateTime] = Field( default=None, description="The actual start time." ) end_time: Optional[DateTime] = Field( default=None, description="The actual end time." ) total_run_time: datetime.timedelta = Field( default=datetime.timedelta(0), description=( "Total run time. If the flow run was executed multiple times, the time of" " each run will be summed." ), ) estimated_run_time: datetime.timedelta = Field( default=datetime.timedelta(0), description="A real-time estimate of the total run time.", ) estimated_start_time_delta: datetime.timedelta = Field( default=datetime.timedelta(0), description="The difference between actual and expected start time.", ) auto_scheduled: bool = Field( default=False, description="Whether or not the flow run was automatically scheduled.", ) infrastructure_document_id: Optional[UUID] = Field( default=None, description="The block document defining infrastructure to use this flow run.", ) infrastructure_pid: Optional[str] = Field( default=None, description="The id of the flow run as returned by an infrastructure block.", ) created_by: Optional[CreatedBy] = Field( default=None, description="Optional information about the creator of this flow run.", ) work_pool_id: Optional[UUID] = Field( default=None, description="The id of the flow run's work pool.", ) work_pool_name: Optional[str] = Field( default=None, description="The name of the flow run's work pool.", examples=["my-work-pool"], ) state: Optional[schemas.states.State] = Field( default=None, description="The current state of the flow run." ) job_variables: Optional[Dict[str, Any]] = Field( default=None, description="Variables used as overrides in the base job template", ) @classmethod def model_validate( cls: Type[Self], obj: Any, *, strict: Optional[bool] = None, from_attributes: Optional[bool] = None, context: Optional[dict[str, Any]] = None, ) -> Self: response = super().model_validate(obj) if from_attributes: if obj.work_queue: response.work_queue_id = obj.work_queue.id response.work_queue_name = obj.work_queue.name if obj.work_queue.work_pool: response.work_pool_id = obj.work_queue.work_pool.id response.work_pool_name = obj.work_queue.work_pool.name return response def __eq__(self, other: Any) -> bool: """ Check for "equality" to another flow run schema Estimates times are rolling and will always change with repeated queries for a flow run so we ignore them during equality checks. """ if isinstance(other, FlowRunResponse): exclude_fields = {"estimated_run_time", "estimated_start_time_delta"} return self.model_dump(exclude=exclude_fields) == other.model_dump( exclude=exclude_fields ) return super().__eq__(other)
FlowRunResponse
python
pydata__xarray
xarray/tests/test_dask.py
{ "start": 11573, "end": 29756 }
class ____(DaskTestCase): def assertLazyAndIdentical(self, expected, actual): self.assertLazyAnd(expected, actual, assert_identical) def assertLazyAndAllClose(self, expected, actual): self.assertLazyAnd(expected, actual, assert_allclose) def assertLazyAndEqual(self, expected, actual): self.assertLazyAnd(expected, actual, assert_equal) @pytest.fixture(autouse=True) def setUp(self): self.values = np.random.randn(4, 6) self.data = da.from_array(self.values, chunks=(2, 2)) self.eager_array = DataArray( self.values, coords={"x": range(4)}, dims=("x", "y"), name="foo" ) self.lazy_array = DataArray( self.data, coords={"x": range(4)}, dims=("x", "y"), name="foo" ) def test_chunk(self) -> None: test_cases: list[ tuple[int | str | dict[str, Any], tuple[tuple[int, ...], ...]] ] = [ ({}, ((2, 2), (2, 2, 2))), (3, ((3, 1), (3, 3))), ({"x": 3, "y": 3}, ((3, 1), (3, 3))), ({"x": 3}, ((3, 1), (2, 2, 2))), ({"x": (3, 1)}, ((3, 1), (2, 2, 2))), ({"x": "16B"}, ((1, 1, 1, 1), (2, 2, 2))), ("16B", ((1, 1, 1, 1), (1,) * 6)), ("16MB", ((4,), (6,))), ] for chunks, expected in test_cases: # Test DataArray rechunked = self.lazy_array.chunk(chunks) assert rechunked.chunks == expected self.assertLazyAndIdentical(self.eager_array, rechunked) expected_chunksizes = dict(zip(self.lazy_array.dims, expected, strict=True)) assert rechunked.chunksizes == expected_chunksizes # Test Dataset lazy_dataset = self.lazy_array.to_dataset() eager_dataset = self.eager_array.to_dataset() expected_chunksizes = dict(zip(lazy_dataset.dims, expected, strict=True)) rechunked = lazy_dataset.chunk(chunks) # type: ignore[assignment] # Dataset.chunks has a different return type to DataArray.chunks - see issue #5843 assert rechunked.chunks == expected_chunksizes self.assertLazyAndIdentical(eager_dataset, rechunked) assert rechunked.chunksizes == expected_chunksizes def test_rechunk(self): chunked = self.eager_array.chunk({"x": 2}).chunk({"y": 2}) assert chunked.chunks == ((2,) * 2, (2,) * 3) self.assertLazyAndIdentical(self.lazy_array, chunked) def test_new_chunk(self): chunked = self.eager_array.chunk() assert chunked.data.name.startswith("xarray-<this-array>") def test_lazy_dataset(self): lazy_ds = Dataset({"foo": (("x", "y"), self.data)}) assert isinstance(lazy_ds.foo.variable.data, da.Array) def test_lazy_array(self): u = self.eager_array v = self.lazy_array self.assertLazyAndAllClose(u, v) self.assertLazyAndAllClose(-u, -v) self.assertLazyAndAllClose(u.T, v.T) self.assertLazyAndAllClose(u.mean(), v.mean()) self.assertLazyAndAllClose(1 + u, 1 + v) actual = xr.concat([v[:2], v[2:]], "x") self.assertLazyAndAllClose(u, actual) def test_compute(self): u = self.eager_array v = self.lazy_array assert dask.is_dask_collection(v) (v2,) = dask.compute(v + 1) assert not dask.is_dask_collection(v2) assert ((u + 1).data == v2.data).all() def test_persist(self): u = self.eager_array v = self.lazy_array + 1 (v2,) = dask.persist(v) assert v is not v2 assert len(v2.__dask_graph__()) < len(v.__dask_graph__()) assert v2.__dask_keys__() == v.__dask_keys__() assert dask.is_dask_collection(v) assert dask.is_dask_collection(v2) self.assertLazyAndAllClose(u + 1, v) self.assertLazyAndAllClose(u + 1, v2) def test_concat_loads_variables(self): # Test that concat() computes not-in-memory variables at most once # and loads them in the output, while leaving the input unaltered. d1 = build_dask_array("d1") c1 = build_dask_array("c1") d2 = build_dask_array("d2") c2 = build_dask_array("c2") d3 = build_dask_array("d3") c3 = build_dask_array("c3") # Note: c is a non-index coord. # Index coords are loaded by IndexVariable.__init__. ds1 = Dataset(data_vars={"d": ("x", d1)}, coords={"c": ("x", c1)}) ds2 = Dataset(data_vars={"d": ("x", d2)}, coords={"c": ("x", c2)}) ds3 = Dataset(data_vars={"d": ("x", d3)}, coords={"c": ("x", c3)}) assert kernel_call_count == 0 out = xr.concat( [ds1, ds2, ds3], dim="n", data_vars="different", coords="different", compat="equals", ) # each kernel is computed exactly once assert kernel_call_count == 6 # variables are loaded in the output assert isinstance(out["d"].data, np.ndarray) assert isinstance(out["c"].data, np.ndarray) out = xr.concat([ds1, ds2, ds3], dim="n", data_vars="all", coords="all") # no extra kernel calls assert kernel_call_count == 6 assert isinstance(out["d"].data, dask.array.Array) assert isinstance(out["c"].data, dask.array.Array) out = xr.concat([ds1, ds2, ds3], dim="n", data_vars=["d"], coords=["c"]) # no extra kernel calls assert kernel_call_count == 6 assert isinstance(out["d"].data, dask.array.Array) assert isinstance(out["c"].data, dask.array.Array) out = xr.concat([ds1, ds2, ds3], dim="n", data_vars=[], coords=[]) # variables are loaded once as we are validating that they're identical assert kernel_call_count == 12 assert isinstance(out["d"].data, np.ndarray) assert isinstance(out["c"].data, np.ndarray) out = xr.concat( [ds1, ds2, ds3], dim="n", data_vars="different", coords="different", compat="identical", ) # compat=identical doesn't do any more kernel calls than compat=equals assert kernel_call_count == 18 assert isinstance(out["d"].data, np.ndarray) assert isinstance(out["c"].data, np.ndarray) # When the test for different turns true halfway through, # stop computing variables as it would not have any benefit ds4 = Dataset(data_vars={"d": ("x", [2.0])}, coords={"c": ("x", [2.0])}) out = xr.concat( [ds1, ds2, ds4, ds3], dim="n", data_vars="different", coords="different", compat="equals", ) # the variables of ds1 and ds2 were computed, but those of ds3 didn't assert kernel_call_count == 22 assert isinstance(out["d"].data, dask.array.Array) assert isinstance(out["c"].data, dask.array.Array) # the data of ds1 and ds2 was loaded into numpy and then # concatenated to the data of ds3. Thus, only ds3 is computed now. out.compute() assert kernel_call_count == 24 # Finally, test that originals are unaltered assert ds1["d"].data is d1 assert ds1["c"].data is c1 assert ds2["d"].data is d2 assert ds2["c"].data is c2 assert ds3["d"].data is d3 assert ds3["c"].data is c3 # now check that concat() is correctly using dask name equality to skip loads out = xr.concat( [ds1, ds1, ds1], dim="n", data_vars="different", coords="different", compat="equals", ) assert kernel_call_count == 24 # variables are not loaded in the output assert isinstance(out["d"].data, dask.array.Array) assert isinstance(out["c"].data, dask.array.Array) out = xr.concat( [ds1, ds1, ds1], dim="n", data_vars=[], coords=[], compat="identical" ) assert kernel_call_count == 24 # variables are not loaded in the output assert isinstance(out["d"].data, dask.array.Array) assert isinstance(out["c"].data, dask.array.Array) out = xr.concat( [ds1, ds2.compute(), ds3], dim="n", data_vars="all", coords="different", compat="identical", ) # c1,c3 must be computed for comparison since c2 is numpy; # d2 is computed too assert kernel_call_count == 28 out = xr.concat( [ds1, ds2.compute(), ds3], dim="n", data_vars="all", coords="all", compat="identical", ) # no extra computes assert kernel_call_count == 30 # Finally, test that originals are unaltered assert ds1["d"].data is d1 assert ds1["c"].data is c1 assert ds2["d"].data is d2 assert ds2["c"].data is c2 assert ds3["d"].data is d3 assert ds3["c"].data is c3 def test_groupby(self): u = self.eager_array v = self.lazy_array expected = u.groupby("x").mean(...) with raise_if_dask_computes(): actual = v.groupby("x").mean(...) self.assertLazyAndAllClose(expected, actual) def test_rolling(self): u = self.eager_array v = self.lazy_array expected = u.rolling(x=2).mean() with raise_if_dask_computes(): actual = v.rolling(x=2).mean() self.assertLazyAndAllClose(expected, actual) @pytest.mark.parametrize("func", ["first", "last"]) def test_groupby_first_last(self, func): method = operator.methodcaller(func) u = self.eager_array v = self.lazy_array for coords in [u.coords, v.coords]: coords["ab"] = ("x", ["a", "a", "b", "b"]) expected = method(u.groupby("ab")) with raise_if_dask_computes(): actual = method(v.groupby("ab")) self.assertLazyAndAllClose(expected, actual) with raise_if_dask_computes(): actual = method(v.groupby("ab")) self.assertLazyAndAllClose(expected, actual) def test_reindex(self): u = self.eager_array.assign_coords(y=range(6)) v = self.lazy_array.assign_coords(y=range(6)) kwargs_list: list[dict[str, Any]] = [ {"x": [2, 3, 4]}, {"x": [1, 100, 2, 101, 3]}, {"x": [2.5, 3, 3.5], "y": [2, 2.5, 3]}, ] for kwargs in kwargs_list: expected = u.reindex(**kwargs) actual = v.reindex(**kwargs) self.assertLazyAndAllClose(expected, actual) def test_to_dataset_roundtrip(self): u = self.eager_array v = self.lazy_array expected = u.assign_coords(x=u["x"]) self.assertLazyAndEqual(expected, v.to_dataset("x").to_dataarray("x")) def test_merge(self): def duplicate_and_merge(array): return xr.merge([array, array.rename("bar")]).to_dataarray() expected = duplicate_and_merge(self.eager_array) actual = duplicate_and_merge(self.lazy_array) self.assertLazyAndEqual(expected, actual) def test_ufuncs(self): u = self.eager_array v = self.lazy_array self.assertLazyAndAllClose(np.sin(u), np.sin(v)) def test_where_dispatching(self): a = np.arange(10) b = a > 3 x = da.from_array(a, 5) y = da.from_array(b, 5) expected = DataArray(a).where(b) self.assertLazyAndEqual(expected, DataArray(a).where(y)) self.assertLazyAndEqual(expected, DataArray(x).where(b)) self.assertLazyAndEqual(expected, DataArray(x).where(y)) def test_simultaneous_compute(self): ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk() count = [0] def counting_get(*args, **kwargs): count[0] += 1 return dask.get(*args, **kwargs) ds.load(scheduler=counting_get) assert count[0] == 1 def test_duplicate_dims(self): data = np.random.normal(size=(4, 4)) with pytest.warns(UserWarning, match="Duplicate dimension"): arr = DataArray(data, dims=("x", "x")) with pytest.warns(UserWarning, match="Duplicate dimension"): chunked_array = arr.chunk({"x": 2}) assert chunked_array.chunks == ((2, 2), (2, 2)) assert chunked_array.chunksizes == {"x": (2, 2)} def test_stack(self): data = da.random.normal(size=(2, 3, 4), chunks=(1, 3, 4)) arr = DataArray(data, dims=("w", "x", "y")) stacked = arr.stack(z=("x", "y")) z = pd.MultiIndex.from_product( [list(range(3)), list(range(4))], names=["x", "y"] ) expected = DataArray(data.reshape(2, -1), {"z": z}, dims=["w", "z"]) assert stacked.data.chunks == expected.data.chunks self.assertLazyAndEqual(expected, stacked) def test_dot(self): eager = self.eager_array.dot(self.eager_array[0]) lazy = self.lazy_array.dot(self.lazy_array[0]) self.assertLazyAndAllClose(eager, lazy) def test_dataarray_repr(self): data = build_dask_array("data") nonindex_coord = build_dask_array("coord") a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)}) expected = dedent( f"""\ <xarray.DataArray 'data' (x: 1)> Size: 8B {data!r} Coordinates: y (x) int64 8B dask.array<chunksize=(1,), meta=np.ndarray> Dimensions without coordinates: x""" ) assert expected == repr(a) assert kernel_call_count == 0 # should not evaluate dask array def test_dataset_repr(self): data = build_dask_array("data") nonindex_coord = build_dask_array("coord") ds = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)}) expected = dedent( """\ <xarray.Dataset> Size: 16B Dimensions: (x: 1) Coordinates: y (x) int64 8B dask.array<chunksize=(1,), meta=np.ndarray> Dimensions without coordinates: x Data variables: a (x) int64 8B dask.array<chunksize=(1,), meta=np.ndarray>""" ) assert expected == repr(ds) assert kernel_call_count == 0 # should not evaluate dask array def test_dataarray_pickle(self): # Test that pickling/unpickling converts the dask backend # to numpy in neither the data variable nor the non-index coords data = build_dask_array("data") nonindex_coord = build_dask_array("coord") a1 = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)}) a1.compute() assert not a1._in_memory assert not a1.coords["y"]._in_memory assert kernel_call_count == 2 a2 = pickle.loads(pickle.dumps(a1)) assert kernel_call_count == 2 assert_identical(a1, a2) assert not a1._in_memory assert not a2._in_memory assert not a1.coords["y"]._in_memory assert not a2.coords["y"]._in_memory def test_dataset_pickle(self): # Test that pickling/unpickling converts the dask backend # to numpy in neither the data variables nor the non-index coords data = build_dask_array("data") nonindex_coord = build_dask_array("coord") ds1 = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)}) ds1.compute() assert not ds1["a"]._in_memory assert not ds1["y"]._in_memory assert kernel_call_count == 2 ds2 = pickle.loads(pickle.dumps(ds1)) assert kernel_call_count == 2 assert_identical(ds1, ds2) assert not ds1["a"]._in_memory assert not ds2["a"]._in_memory assert not ds1["y"]._in_memory assert not ds2["y"]._in_memory def test_dataarray_getattr(self): # ipython/jupyter does a long list of getattr() calls to when trying to # represent an object. # Make sure we're not accidentally computing dask variables. data = build_dask_array("data") nonindex_coord = build_dask_array("coord") a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)}) with suppress(AttributeError): _ = a.NOTEXIST assert kernel_call_count == 0 def test_dataset_getattr(self): # Test that pickling/unpickling converts the dask backend # to numpy in neither the data variables nor the non-index coords data = build_dask_array("data") nonindex_coord = build_dask_array("coord") ds = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)}) with suppress(AttributeError): _ = ds.NOTEXIST assert kernel_call_count == 0 def test_values(self): # Test that invoking the values property does not convert the dask # backend to numpy a = DataArray([1, 2]).chunk() assert not a._in_memory assert a.values.tolist() == [1, 2] assert not a._in_memory def test_from_dask_variable(self): # Test array creation from Variable with dask backend. # This is used e.g. in broadcast() a = DataArray(self.lazy_array.variable, coords={"x": range(4)}, name="foo") self.assertLazyAndIdentical(self.lazy_array, a) @requires_pint def test_tokenize_duck_dask_array(self): import pint unit_registry = pint.UnitRegistry() q = unit_registry.Quantity(self.data, unit_registry.meter) data_array = xr.DataArray( data=q, coords={"x": range(4)}, dims=("x", "y"), name="foo" ) token = dask.base.tokenize(data_array) post_op = data_array + 5 * unit_registry.meter assert dask.base.tokenize(data_array) != dask.base.tokenize(post_op) # Immutability check assert dask.base.tokenize(data_array) == token
TestDataArrayAndDataset
python
huggingface__transformers
src/transformers/models/dpr/tokenization_dpr_fast.py
{ "start": 1660, "end": 7064 }
class ____(BertTokenizer): r""" Constructs a "fast" DPRQuestionEncoder tokenizer (backed by HuggingFace's *tokenizers* library). [`DPRQuestionEncoderTokenizerFast`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = DPRQuestionEncoderTokenizer DPRSpanPrediction = collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) DPRReaderOutput = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) CUSTOM_DPR_READER_DOCSTRING = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `list[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `list[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `list[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `dict[str, list[list[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(CUSTOM_DPR_READER_DOCSTRING)
DPRQuestionEncoderTokenizerFast
python
walkccc__LeetCode
solutions/1101. The Earliest Moment When Everyone Become Friends/1101.py
{ "start": 609, "end": 907 }
class ____: def earliestAcq(self, logs: list[list[int]], n: int) -> int: uf = UnionFind(n) # Sort `logs` by timestamp. logs.sort(key=lambda x: x[0]) for timestamp, x, y in logs: uf.unionByRank(x, y) if uf.getCount() == 1: return timestamp return -1
Solution
python
pyca__cryptography
tests/x509/test_x509.py
{ "start": 242035, "end": 243941 }
class ____: def test_eq(self): oid1 = x509.ObjectIdentifier("2.999.1") oid2 = x509.ObjectIdentifier("2.999.1") assert oid1 == oid2 def test_ne(self): oid1 = x509.ObjectIdentifier("2.999.1") assert oid1 != x509.ObjectIdentifier("2.999.2") assert oid1 != object() def test_comparison(self): oid1 = x509.ObjectIdentifier("2.999.1") oid2 = x509.ObjectIdentifier("2.999.2") with pytest.raises(TypeError): oid1 < oid2 # type: ignore[operator] def test_repr(self): oid = x509.ObjectIdentifier("2.5.4.3") assert repr(oid) == "<ObjectIdentifier(oid=2.5.4.3, name=commonName)>" oid = x509.ObjectIdentifier("2.999.1") assert repr(oid) == "<ObjectIdentifier(oid=2.999.1, name=Unknown OID)>" def test_name_property(self): oid = x509.ObjectIdentifier("2.5.4.3") assert oid._name == "commonName" oid = x509.ObjectIdentifier("2.999.1") assert oid._name == "Unknown OID" def test_too_short(self): with pytest.raises(ValueError): x509.ObjectIdentifier("1") def test_invalid_input(self): with pytest.raises(ValueError): x509.ObjectIdentifier("notavalidform") def test_invalid_node1(self): with pytest.raises(ValueError): x509.ObjectIdentifier("7.1.37") def test_invalid_node2(self): with pytest.raises(ValueError): x509.ObjectIdentifier("1.50.200") def test_valid(self): x509.ObjectIdentifier("0.35.200") x509.ObjectIdentifier("1.39.999") x509.ObjectIdentifier("2.5.29.3") x509.ObjectIdentifier("2.999.37.5.22.8") x509.ObjectIdentifier(f"2.25.{2**128 - 1}") def test_oid_arc_too_large(self): with pytest.raises(ValueError): x509.ObjectIdentifier(f"2.25.{2**128}")
TestObjectIdentifier
python
getsentry__sentry
src/sentry/integrations/source_code_management/repo_trees.py
{ "start": 705, "end": 1019 }
class ____(NamedTuple): repo: RepoAndBranch files: Sequence[str] # Tasks which hit the API multiple connection errors should give up. MAX_CONNECTION_ERRORS = 10 # When the number of remaining API requests is less than this value, it will # fall back to the cache. MINIMUM_REQUESTS_REMAINING = 200
RepoTree
python
huggingface__transformers
tests/test_image_processing_common.py
{ "start": 33561, "end": 37055 }
class ____: # this mixin adds a test to assert that usages of the # to-be-deprecated `AnnotionFormat` continue to be # supported for the time being def test_processor_can_use_legacy_annotation_format(self): image_processor_dict = self.image_processor_tester.prepare_image_processor_dict() fixtures_path = pathlib.Path(__file__).parent / "fixtures" / "tests_samples" / "COCO" with open(fixtures_path / "coco_annotations.txt") as f: detection_target = json.loads(f.read()) detection_annotations = {"image_id": 39769, "annotations": detection_target} detection_params = { "images": Image.open(fixtures_path / "000000039769.png"), "annotations": detection_annotations, "return_tensors": "pt", } with open(fixtures_path / "coco_panoptic_annotations.txt") as f: panoptic_target = json.loads(f.read()) panoptic_annotations = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": panoptic_target} masks_path = pathlib.Path(fixtures_path / "coco_panoptic") panoptic_params = { "images": Image.open(fixtures_path / "000000039769.png"), "annotations": panoptic_annotations, "return_tensors": "pt", "masks_path": masks_path, } test_cases = [ ("coco_detection", detection_params), ("coco_panoptic", panoptic_params), (AnnotionFormat.COCO_DETECTION, detection_params), (AnnotionFormat.COCO_PANOPTIC, panoptic_params), (AnnotationFormat.COCO_DETECTION, detection_params), (AnnotationFormat.COCO_PANOPTIC, panoptic_params), ] def _compare(a, b) -> None: if isinstance(a, (dict, BatchFeature)): self.assertEqual(a.keys(), b.keys()) for k, v in a.items(): _compare(v, b[k]) elif isinstance(a, list): self.assertEqual(len(a), len(b)) for idx in range(len(a)): _compare(a[idx], b[idx]) elif isinstance(a, torch.Tensor): torch.testing.assert_close(a, b, rtol=1e-3, atol=1e-3) elif isinstance(a, str): self.assertEqual(a, b) for annotation_format, params in test_cases: with self.subTest(annotation_format): image_processor_params = {**image_processor_dict, **{"format": annotation_format}} image_processor_first = self.image_processing_class(**image_processor_params) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(tmpdirname) image_processor_second = self.image_processing_class.from_pretrained(tmpdirname) # check the 'format' key exists and that the dicts of the # first and second processors are equal self.assertIn("format", image_processor_first.to_dict().keys()) self.assertEqual(image_processor_second.to_dict(), image_processor_first.to_dict()) # perform encoding using both processors and compare # the resulting BatchFeatures first_encoding = image_processor_first(**params) second_encoding = image_processor_second(**params) _compare(first_encoding, second_encoding)
AnnotationFormatTestMixin
python
Textualize__textual
src/textual/css/_style_properties.py
{ "start": 35460, "end": 37859 }
class ____: """Descriptor for getting and set style flag properties (e.g. ``bold italic underline``).""" def __set_name__(self, owner: StylesBase, name: str) -> None: self.name = name def __get__( self, obj: StylesBase, objtype: type[StylesBase] | None = None ) -> Style: """Get the ``Style``. Args: obj: The ``Styles`` object. objtype: The ``Styles`` class. Returns: The ``Style`` object. """ return obj.get_rule(self.name, Style.null()) # type: ignore[return-value] def __set__(self, obj: StylesBase, style_flags: Style | str | None) -> None: """Set the style using a style flag string. Args: obj: The ``Styles`` object. style_flags: The style flags to set as a string. For example, ``"bold italic"``. Raises: StyleValueError: If the value is an invalid style flag. """ _rich_traceback_omit = True if style_flags is None: if obj.clear_rule(self.name): obj.refresh(children=True) elif isinstance(style_flags, Style): if obj.set_rule(self.name, style_flags): obj.refresh(children=True) else: words = [word.strip() for word in style_flags.split(" ")] valid_word = VALID_STYLE_FLAGS.__contains__ for word in words: if not valid_word(word): raise StyleValueError( f"unknown word {word!r} in style flags", help_text=style_flags_property_help_text( self.name, word, context="inline" ), ) try: style = Style.parse(style_flags) except rich.errors.StyleSyntaxError as error: if "none" in words and len(words) > 1: raise StyleValueError( "cannot mix 'none' with other style flags", help_text=style_flags_property_help_text( self.name, " ".join(words), context="inline" ), ) from None raise error from None if obj.set_rule(self.name, style): obj.refresh(children=True)
StyleFlagsProperty
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/overrides.py
{ "start": 3011, "end": 3109 }
class ____(AnalyzeAllOverrides): def return_source(self): pass
AnalyzeAllOverridesChild1
python
pydata__xarray
xarray/coding/cftime_offsets.py
{ "start": 19407, "end": 20168 }
class ____(YearOffset): _freq = "YE" _day_option = "end" _default_month = 12 def onOffset(self, date) -> bool: """Check if the given date is in the set of possible dates created using a length-one version of this offset class.""" return date.day == date.daysinmonth and date.month == self.month def rollforward(self, date): """Roll date forward to nearest end of year""" if self.onOffset(date): return date else: return date + YearEnd(month=self.month) def rollback(self, date): """Roll date backward to nearest end of year""" if self.onOffset(date): return date else: return date - YearEnd(month=self.month)
YearEnd
python
marshmallow-code__marshmallow
src/marshmallow/validate.py
{ "start": 12652, "end": 15054 }
class ____(Validator): """Validator which succeeds if the value passed to it has a length between a minimum and maximum. Uses len(), so it can work for strings, lists, or anything with length. :param min: The minimum length. If not provided, minimum length will not be checked. :param max: The maximum length. If not provided, maximum length will not be checked. :param equal: The exact length. If provided, maximum and minimum length will not be checked. :param error: Error message to raise in case of a validation error. Can be interpolated with `{input}`, `{min}` and `{max}`. """ message_min = "Shorter than minimum length {min}." message_max = "Longer than maximum length {max}." message_all = "Length must be between {min} and {max}." message_equal = "Length must be {equal}." def __init__( self, min: int | None = None, # noqa: A002 max: int | None = None, # noqa: A002 *, equal: int | None = None, error: str | None = None, ): if equal is not None and any([min, max]): raise ValueError( "The `equal` parameter was provided, maximum or " "minimum parameter must not be provided." ) self.min = min self.max = max self.error = error self.equal = equal def _repr_args(self) -> str: return f"min={self.min!r}, max={self.max!r}, equal={self.equal!r}" def _format_error(self, value: _SizedT, message: str) -> str: return (self.error or message).format( input=value, min=self.min, max=self.max, equal=self.equal ) def __call__(self, value: _SizedT) -> _SizedT: length = len(value) if self.equal is not None: if length != self.equal: raise ValidationError(self._format_error(value, self.message_equal)) return value if self.min is not None and length < self.min: message = self.message_min if self.max is None else self.message_all raise ValidationError(self._format_error(value, message)) if self.max is not None and length > self.max: message = self.message_max if self.min is None else self.message_all raise ValidationError(self._format_error(value, message)) return value
Length
python
PrefectHQ__prefect
src/prefect/events/schemas/automations.py
{ "start": 7864, "end": 9592 }
class ____(PrefectBaseModel): """Defines a subset of the Trigger subclass, which is specific to Metric automations, that specify the query configurations and breaching conditions for the Automation""" name: PrefectMetric = Field( ..., description="The name of the metric to query.", ) threshold: float = Field( ..., description=( "The threshold value against which we'll compare the query result." ), ) operator: MetricTriggerOperator = Field( ..., description=( "The comparative operator (LT / LTE / GT / GTE) used to compare " "the query result against the threshold value." ), ) range: timedelta = Field( timedelta(seconds=300), # defaults to 5 minutes description=( "The lookback duration (seconds) for a metric query. This duration is " "used to determine the time range over which the query will be executed. " "The minimum value is 300 seconds (5 minutes)." ), ) firing_for: timedelta = Field( timedelta(seconds=300), # defaults to 5 minutes description=( "The duration (seconds) for which the metric query must breach " "or resolve continuously before the state is updated and the " "automation is triggered. " "The minimum value is 300 seconds (5 minutes)." ), ) @field_validator("range", "firing_for") def enforce_minimum_range(cls, value: timedelta) -> timedelta: if value < timedelta(seconds=300): raise ValueError("The minimum range is 300 seconds (5 minutes)") return value
MetricTriggerQuery
python
apache__airflow
helm-tests/tests/helm_tests/airflow_aux/test_configmap.py
{ "start": 914, "end": 10873 }
class ____: """Tests configmaps.""" def test_single_annotation(self): docs = render_chart( values={ "airflowConfigAnnotations": {"key": "value"}, }, show_only=["templates/configmaps/configmap.yaml"], ) annotations = jmespath.search("metadata.annotations", docs[0]) assert annotations.get("key") == "value" def test_multiple_annotations(self): docs = render_chart( values={ "airflowConfigAnnotations": {"key": "value", "key-two": "value-two"}, }, show_only=["templates/configmaps/configmap.yaml"], ) annotations = jmespath.search("metadata.annotations", docs[0]) assert annotations.get("key") == "value" assert annotations.get("key-two") == "value-two" @pytest.mark.parametrize( ("af_version", "secret_key", "secret_key_name", "expected"), [ ("3.0.0", None, None, False), ("2.2.0", None, None, True), ("2.2.0", "foo", None, False), ("2.2.0", None, "foo", False), ("2.1.3", None, None, False), ("2.1.3", "foo", None, False), ], ) def test_default_airflow_local_settings(self, af_version, secret_key, secret_key_name, expected): docs = render_chart( values={ "airflowVersion": af_version, "webserverSecretKey": secret_key, "webserverSecretKeySecretName": secret_key_name, }, show_only=["templates/configmaps/configmap.yaml"], ) if expected: assert ( "Usage of a dynamic webserver secret key detected" in jmespath.search('data."airflow_local_settings.py"', docs[0]).strip() ) else: assert jmespath.search('data."airflow_local_settings.py"', docs[0]).strip() == "" def test_airflow_local_settings(self): docs = render_chart( values={"airflowLocalSettings": "# Well hello {{ .Release.Name }}!"}, show_only=["templates/configmaps/configmap.yaml"], ) assert ( jmespath.search('data."airflow_local_settings.py"', docs[0]).strip() == "# Well hello release-name!" ) def test_kerberos_config_available_with_celery_executor(self): docs = render_chart( values={ "executor": "CeleryExecutor", "kerberos": {"enabled": True, "config": "krb5\ncontent"}, }, show_only=["templates/configmaps/configmap.yaml"], ) assert jmespath.search('data."krb5.conf"', docs[0]) == "krb5\ncontent" @pytest.mark.parametrize( ("executor", "af_version", "should_be_created"), [ ("KubernetesExecutor", "1.10.11", False), ("KubernetesExecutor", "1.10.12", True), ("KubernetesExecutor", "2.0.0", True), ("CeleryExecutor", "1.10.11", False), ("CeleryExecutor", "2.0.0", False), ("CeleryExecutor,KubernetesExecutor", "2.0.0", True), ("CeleryExecutor,KubernetesExecutor", "1.10.11", False), ], ) def test_pod_template_created(self, executor, af_version, should_be_created): docs = render_chart( values={ "executor": executor, "airflowVersion": af_version, }, show_only=["templates/configmaps/configmap.yaml"], ) keys = jmespath.search("data", docs[0]).keys() if should_be_created: assert "pod_template_file.yaml" in keys else: assert "pod_template_file.yaml" not in keys def test_pod_template_is_templated(self): docs = render_chart( values={ "executor": "KubernetesExecutor", "podTemplate": """ apiVersion: v1 kind: Pod metadata: name: example-name labels: mylabel: {{ .Release.Name }} """, }, show_only=["templates/configmaps/configmap.yaml"], ) pod_template_file = jmespath.search('data."pod_template_file.yaml"', docs[0]) assert "mylabel: release-name" in pod_template_file def test_default_flower_url_prefix(self): docs = render_chart( values={ "executor": "CeleryExecutor", }, show_only=["templates/configmaps/configmap.yaml"], ) expected = "flower_url_prefix = " cfg = jmespath.search('data."airflow.cfg"', docs[0]) assert expected in cfg.splitlines() def test_overridedn_flower_url_prefix(self): docs = render_chart( values={"executor": "CeleryExecutor", "ingress": {"flower": {"path": "/overridden-path"}}}, show_only=["templates/configmaps/configmap.yaml"], ) expected = "flower_url_prefix = /overridden-path" cfg = jmespath.search('data."airflow.cfg"', docs[0]) assert expected in cfg.splitlines() @pytest.mark.parametrize( ("dag_values", "expected_default_dag_folder"), [ ( {"gitSync": {"enabled": True}}, "/opt/airflow/dags/repo/tests/dags", ), ( {"persistence": {"enabled": True}}, "/opt/airflow/dags", ), ( {"mountPath": "/opt/airflow/dags/custom", "gitSync": {"enabled": True}}, "/opt/airflow/dags/custom/repo/tests/dags", ), ( { "mountPath": "/opt/airflow/dags/custom", "gitSync": {"enabled": True, "subPath": "mysubPath"}, }, "/opt/airflow/dags/custom/repo/mysubPath", ), ( {"mountPath": "/opt/airflow/dags/custom", "persistence": {"enabled": True}}, "/opt/airflow/dags/custom", ), ], ) def test_expected_default_dag_folder(self, dag_values, expected_default_dag_folder): docs = render_chart( values={"dags": dag_values}, show_only=["templates/configmaps/configmap.yaml"], ) cfg = jmespath.search('data."airflow.cfg"', docs[0]) expected_folder_config = f"dags_folder = {expected_default_dag_folder}" assert expected_folder_config in cfg.splitlines() @pytest.mark.parametrize( ("airflow_version", "enabled"), [ ("2.10.4", False), ("3.0.0", True), ], ) def test_default_standalone_dag_processor_by_airflow_version(self, airflow_version, enabled): docs = render_chart( values={"airflowVersion": airflow_version}, show_only=["templates/configmaps/configmap.yaml"], ) cfg = jmespath.search('data."airflow.cfg"', docs[0]) expected_line = f"standalone_dag_processor = {enabled}" assert expected_line in cfg.splitlines() @pytest.mark.parametrize( ("airflow_version", "enabled"), [ ("2.10.4", False), ("2.10.4", True), ("3.0.0", False), ("3.0.0", True), ], ) def test_standalone_dag_processor_explicit(self, airflow_version, enabled): docs = render_chart( values={ "airflowVersion": airflow_version, "config": {"scheduler": {"standalone_dag_processor": enabled}}, }, show_only=["templates/configmaps/configmap.yaml"], ) cfg = jmespath.search('data."airflow.cfg"', docs[0]) expected_line = f"standalone_dag_processor = {str(enabled).lower()}" assert expected_line in cfg.splitlines() @pytest.mark.parametrize( ("airflow_version", "base_url", "execution_api_server_url", "expected_execution_url"), [ ( "3.0.0", None, None, "http://release-name-api-server:8080/execution/", ), ( "2.9.0", None, None, None, ), ( "3.0.0", "http://example.com", None, "http://release-name-api-server:8080/execution/", ), ( "3.0.0", "http://example.com/airflow", None, "http://release-name-api-server:8080/airflow/execution/", ), ( "3.0.0", "http://example.com/airflow", "http://service:9090/execution/", "http://service:9090/execution/", ), ], ) def test_execution_api_server_url( self, airflow_version, base_url, execution_api_server_url, expected_execution_url ): config_overrides = {} if base_url: config_overrides["api"] = {"base_url": base_url} if execution_api_server_url: config_overrides["core"] = {"execution_api_server_url": execution_api_server_url} configmap = render_chart( values={"airflowVersion": airflow_version, "config": config_overrides}, show_only=["templates/configmaps/configmap.yaml"], ) config = jmespath.search('data."airflow.cfg"', configmap[0]) assert config is not None, "Configmap data for airflow.cfg should not be None" assert len(config) > 0, "Configmap data for airflow.cfg should not be empty" if expected_execution_url is not None: assert f"\nexecution_api_server_url = {expected_execution_url}\n" in config else: assert "execution_api_server_url" not in config, ( "execution_api_server_url should not be set for Airflow 2.x versions" )
TestConfigmap
python
getsentry__sentry
src/sentry/utils/locking/backends/migration.py
{ "start": 559, "end": 3688 }
class ____(LockBackend): """ Backend class intended for controlled migrations of locks from one backend to another. Example use in combination with runtime option: def selector_func(key, routing_key, backend_new, backend_old): if int(hashlib.md5("{key}{routing_key}".encode("utf8")).hexdigest(), 16) % 100 <= options.get( "migrate.locks", 0 ): return backend_old return backend_new backend = MigrationLockBackend( backend_new_config={ "path": "sentry.utils.locking.backends.redis.RedisLockBackend", "options": {"cluster": "new-cluster"}, }, backend_old_config={ "path": "sentry.utils.locking.backends.redis.RedisLockBackend", "options": {"cluster": "old-cluster"}, }, selector_func_path="python.path.to.selector_func", ) locks = LockManager(backend) This example setup allows to move portion of keys, based on the value of the runtime option, to use the new Redis cluster or revert to the old one. """ def __init__( self, backend_new_config: ServiceOptions, backend_old_config: ServiceOptions, selector_func_path: str | SelectorFncType | None = None, ): self.backend_new = build_instance_from_options_of_type(LockBackend, backend_new_config) self.backend_old = build_instance_from_options_of_type(LockBackend, backend_old_config) self.selector_func: SelectorFncType = ( resolve_callable(selector_func_path) if selector_func_path else _default_selector_func ) def _get_backend(self, key: str, routing_key: str | int | None) -> LockBackend: return self.selector_func( key, routing_key, self.backend_new, self.backend_old, ) def acquire(self, key: str, duration: int, routing_key: str | None = None) -> None: backend = self._get_backend(key=key, routing_key=routing_key) # in case new backend is selected for the key, make sure it's not held # by the old backend if backend != self.backend_old and self.backend_old.locked( key=key, routing_key=routing_key ): raise Exception(f"Could not set key: {key!r}") return backend.acquire(key=key, duration=duration, routing_key=routing_key) def release(self, key: str, routing_key: str | None = None) -> None: backend = self._get_backend(key=key, routing_key=routing_key) try: (self.backend_new if backend == self.backend_old else self.backend_old).release( key=key, routing_key=routing_key ) except Exception: pass backend.release(key=key, routing_key=routing_key) def locked(self, key: str, routing_key: str | None = None) -> bool: return self.backend_old.locked(key=key, routing_key=routing_key) or self.backend_new.locked( key=key, routing_key=routing_key )
MigrationLockBackend
python
dask__distributed
distributed/worker_memory.py
{ "start": 20601, "end": 21214 }
class ____: name: str def __set_name__(self, owner: type, name: str) -> None: self.name = name def __get__(self, instance: Nanny | Worker | None, owner: type) -> Any: if instance is None: # This is triggered by Sphinx return None # pragma: nocover _warn_deprecated(instance, self.name) return getattr(instance.memory_manager, self.name) def __set__(self, instance: Nanny | Worker, value: Any) -> None: _warn_deprecated(instance, self.name) setattr(instance.memory_manager, self.name, value)
DeprecatedMemoryManagerAttribute
python
kubernetes-client__python
kubernetes/client/models/v1_ingress_class.py
{ "start": 383, "end": 6580 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'kind': 'str', 'metadata': 'V1ObjectMeta', 'spec': 'V1IngressClassSpec' } attribute_map = { 'api_version': 'apiVersion', 'kind': 'kind', 'metadata': 'metadata', 'spec': 'spec' } def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501 """V1IngressClass - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._kind = None self._metadata = None self._spec = None self.discriminator = None if api_version is not None: self.api_version = api_version if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata if spec is not None: self.spec = spec @property def api_version(self): """Gets the api_version of this V1IngressClass. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1IngressClass. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1IngressClass. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1IngressClass. # noqa: E501 :type: str """ self._api_version = api_version @property def kind(self): """Gets the kind of this V1IngressClass. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1IngressClass. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1IngressClass. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1IngressClass. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V1IngressClass. # noqa: E501 :return: The metadata of this V1IngressClass. # noqa: E501 :rtype: V1ObjectMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V1IngressClass. :param metadata: The metadata of this V1IngressClass. # noqa: E501 :type: V1ObjectMeta """ self._metadata = metadata @property def spec(self): """Gets the spec of this V1IngressClass. # noqa: E501 :return: The spec of this V1IngressClass. # noqa: E501 :rtype: V1IngressClassSpec """ return self._spec @spec.setter def spec(self, spec): """Sets the spec of this V1IngressClass. :param spec: The spec of this V1IngressClass. # noqa: E501 :type: V1IngressClassSpec """ self._spec = spec def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1IngressClass): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1IngressClass): return True return self.to_dict() != other.to_dict()
V1IngressClass
python
pytorch__pytorch
torch/testing/_internal/distributed/rpc/rpc_test.py
{ "start": 2752, "end": 3680 }
class ____: def __init__(self, world_size): self.world_size = world_size def get_worker_infos(self): return { WorkerInfo(name=worker_name(rank), id=rank) for rank in range(self.world_size) } def _stub_construct_rpc_backend_options_handler(**kwargs): return mock.Mock() # RpcBackendOptions. def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options): return StubRpcAgent(world_size=world_size) def set_value(value): VALUE_FUTURE.set_result(value) def wait_for_value_future(): return VALUE_FUTURE.result() def set_and_check_done(value): VALUE_FUTURE.set_result(value) return DONE_FUTURE.result() # it is used to test python user defined function over rpc # classes and functions are used to test python user defined class and # methods over rpc TensorClass = namedtuple("TensorClass", ["tensors"])
StubRpcAgent
python
pennersr__django-allauth
allauth/mfa/recovery_codes/internal/auth.py
{ "start": 218, "end": 3684 }
class ____: def __init__(self, instance: Authenticator) -> None: self.instance = instance @classmethod def activate(cls, user) -> "RecoveryCodes": instance = Authenticator.objects.filter( user=user, type=Authenticator.Type.RECOVERY_CODES ).first() if instance: return cls(instance) instance = Authenticator( user=user, type=Authenticator.Type.RECOVERY_CODES, data={ "seed": encrypt(cls.generate_seed()), "used_mask": 0, }, ) instance.save() return cls(instance) @classmethod def generate_seed(self) -> str: key = secrets.token_hex(40) return key def _get_migrated_codes(self) -> Optional[List[str]]: codes = self.instance.data.get("migrated_codes") if codes is not None: return [decrypt(code) for code in codes] return None def generate_codes(self) -> List[str]: migrated_codes = self._get_migrated_codes() if migrated_codes is not None: return migrated_codes ret = [] seed = decrypt(self.instance.data["seed"]) h = hmac.new(key=seed.encode("ascii"), msg=None, digestmod=sha1) byte_count = min(app_settings.RECOVERY_CODE_DIGITS // 2, h.digest_size) for i in range(app_settings.RECOVERY_CODE_COUNT): h.update((f"{i:3},").encode("utf-8")) value = int.from_bytes( h.digest()[:byte_count], byteorder="big", signed=False ) value %= 10**app_settings.RECOVERY_CODE_DIGITS fmt_value = str(value).zfill(app_settings.RECOVERY_CODE_DIGITS) ret.append(fmt_value) return ret def _is_code_used(self, i: int) -> bool: used_mask = self.instance.data["used_mask"] return bool(used_mask & (1 << i)) def _mark_code_used(self, i: int) -> None: used_mask = self.instance.data["used_mask"] used_mask |= 1 << i self.instance.data["used_mask"] = used_mask self.instance.save() def get_unused_codes(self) -> List[str]: migrated_codes = self._get_migrated_codes() if migrated_codes is not None: return migrated_codes ret = [] for i, code in enumerate(self.generate_codes()): if self._is_code_used(i): continue ret.append(code) return ret def _validate_migrated_code(self, code: str) -> Optional[bool]: migrated_codes = self._get_migrated_codes() if migrated_codes is None: return None try: idx = migrated_codes.index(code) except ValueError: return False else: migrated_codes = self.instance.data["migrated_codes"] assert isinstance(migrated_codes, list) # nosec migrated_codes.pop(idx) self.instance.data["migrated_codes"] = migrated_codes self.instance.save() return True def validate_code(self, code: str) -> bool: ret = self._validate_migrated_code(code) if ret is not None: return ret for i, c in enumerate(self.generate_codes()): if self._is_code_used(i): continue if code == c: self._mark_code_used(i) return True return False
RecoveryCodes
python
huggingface__transformers
src/transformers/models/univnet/modeling_univnet.py
{ "start": 1698, "end": 3581 }
class ____(nn.Module): """ Implementation of the residual block for the kernel predictor network inside each location variable convolution block (LVCBlock). Parameters: config: (`UnivNetConfig`): Config for the `UnivNetModel` model. """ def __init__( self, config: UnivNetConfig, ): super().__init__() self.channels = config.model_in_channels self.kernel_size = config.kernel_predictor_conv_size self.dropout_prob = config.kernel_predictor_dropout self.leaky_relu_slope = config.leaky_relu_slope padding = (self.kernel_size - 1) // 2 self.dropout = nn.Dropout(self.dropout_prob) self.conv1 = nn.Conv1d(self.channels, self.channels, self.kernel_size, padding=padding, bias=True) self.conv2 = nn.Conv1d(self.channels, self.channels, self.kernel_size, padding=padding, bias=True) def forward(self, hidden_states: torch.FloatTensor): # hidden_states should have shape (batch_size, channels, seq_length) residual = hidden_states hidden_states = self.dropout(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = self.conv2(hidden_states) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) return hidden_states + residual def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.conv1) weight_norm(self.conv2) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv1) nn.utils.remove_weight_norm(self.conv2)
UnivNetKernelPredictorResidualBlock