language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
scikit-learn__scikit-learn
asv_benchmarks/benchmarks/linear_model.py
{ "start": 5553, "end": 6606 }
class ____(Predictor, Estimator, Benchmark): """ Benchmarks for Lasso. """ param_names = ["representation", "precompute"] params = (["dense", "sparse"], [True, False]) def setup_cache(self): super().setup_cache() def make_data(self, params): representation, precompute = params if representation == "dense": data = _synth_regression_dataset(n_samples=1000000, n_features=100) else: data = _synth_regression_sparse_dataset( n_samples=50000, n_features=5000, density=0.01 ) return data def make_estimator(self, params): representation, precompute = params estimator = Lasso(precompute=precompute, alpha=0.001, random_state=0) return estimator def make_scorers(self): make_gen_reg_scorers(self) def skip(self, params): representation, precompute = params if representation == "sparse" and precompute is False: return True return False
LassoBenchmark
python
django-compressor__django-compressor
compressor/tests/test_filters.py
{ "start": 7916, "end": 8431 }
class ____(TestCase): def test_csscompressor_filter(self): content = """/*! * django-compressor * Copyright (c) 2009-2014 Django Compressor authors */ p { background: rgb(51,102,153) url('../../images/image.gif'); } """ output = """/*! * django-compressor * Copyright (c) 2009-2014 Django Compressor authors */p{background:#369 url('../../images/image.gif')}""" self.assertEqual(output, CSSCompressorFilter(content).output())
CSSCompressorTestCase
python
pennersr__django-allauth
allauth/socialaccount/providers/snapchat/provider.py
{ "start": 305, "end": 445 }
class ____(ProviderAccount): def get_user_data(self): return self.account.extra_data.get("data", {}).get("me", {})
SnapchatAccount
python
spyder-ide__spyder
spyder/plugins/editor/widgets/status.py
{ "start": 430, "end": 848 }
class ____(StatusBarWidget): """Status bar widget for current file read/write mode.""" ID = "read_write_status" def update_readonly(self, readonly): """Update read/write file status.""" value = "R" if readonly else "RW" self.set_value(value.ljust(3)) def get_tooltip(self): """Return localized tool tip for widget.""" return _("File permissions")
ReadWriteStatus
python
hyperopt__hyperopt
hyperopt/mongoexp.py
{ "start": 4942, "end": 5082 }
class ____(Exception): """Raised when the search program tries to change the bandit attached to an experiment. """
DomainSwapError
python
pytorch__pytorch
torch/fx/experimental/proxy_tensor.py
{ "start": 72084, "end": 72185 }
class ____: def reset_proxy_mapping(self, base: Module, path: str) -> None: pass
_AttrProxy
python
kamyu104__LeetCode-Solutions
Python/compute-alternating-sum.py
{ "start": 37, "end": 278 }
class ____(object): def alternatingSum(self, nums): """ :type nums: List[int] :rtype: int """ return sum(nums[i] for i in xrange(0, len(nums), 2))-sum(nums[i] for i in xrange(1, len(nums), 2))
Solution
python
apache__airflow
providers/fab/tests/unit/fab/auth_manager/schemas/test_role_and_permission_schema.py
{ "start": 2477, "end": 3889 }
class ____: def test_serialize(self, minimal_app_for_auth_api): with minimal_app_for_auth_api.app_context(): role1 = create_role( minimal_app_for_auth_api, name="Test1", permissions=[ (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION), ], ) role2 = create_role( minimal_app_for_auth_api, name="Test2", permissions=[ (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG), ], ) instance = RoleCollection([role1, role2], total_entries=2) deserialized = role_collection_schema.dump(instance) assert deserialized == { "roles": [ { "name": "Test1", "actions": [{"resource": {"name": "Connections"}, "action": {"name": "can_create"}}], }, { "name": "Test2", "actions": [{"resource": {"name": "DAGs"}, "action": {"name": "can_edit"}}], }, ], "total_entries": 2, } delete_role(minimal_app_for_auth_api, "Test1") delete_role(minimal_app_for_auth_api, "Test2")
TestRoleCollectionSchema
python
getsentry__sentry
tests/sentry/metrics/test_datadog.py
{ "start": 314, "end": 2118 }
class ____(TestCase): def setUp(self) -> None: self.backend = DatadogMetricsBackend(prefix="sentrytest.") @patch("datadog.threadstats.base.ThreadStats.increment") def test_incr(self, mock_incr: MagicMock) -> None: self.backend.incr("foo", instance="bar") mock_incr.assert_called_once_with( "sentrytest.foo", 1, sample_rate=1, tags=["instance:bar"], host=get_hostname(hostname_from_config=True), ) @patch("datadog.threadstats.base.ThreadStats.timing") def test_timing(self, mock_timing: MagicMock) -> None: self.backend.timing("foo", 30, instance="bar") mock_timing.assert_called_once_with( "sentrytest.foo", 30, sample_rate=1, tags=["instance:bar"], host=get_hostname(hostname_from_config=True), ) @patch("datadog.threadstats.base.ThreadStats.gauge") def test_gauge(self, mock_gauge: MagicMock) -> None: self.backend.gauge("foo", 5, instance="bar") mock_gauge.assert_called_once_with( "sentrytest.foo", 5, sample_rate=1, tags=["instance:bar"], host=get_hostname(hostname_from_config=True), ) @patch("datadog.threadstats.base.ThreadStats.event") def test_event(self, mock_event: MagicMock) -> None: self.backend.event("foo", "bar", instance="baz") mock_event.assert_called_once_with( title="foo", message="bar", alert_type=None, aggregation_key=None, source_type_name=None, priority=None, tags=["instance:baz"], hostname=get_hostname(hostname_from_config=True), )
DatadogMetricsBackendTest
python
tensorflow__tensorflow
tensorflow/python/checkpoint/checkpoint_test.py
{ "start": 6197, "end": 6905 }
class ____(saver_lib.BaseSaverBuilder.SaveableObject): def __init__(self, primary_variable, mirrored_variable, name): self._primary_variable = primary_variable self._mirrored_variable = mirrored_variable tensor = self._primary_variable.read_value() spec = saver_lib.BaseSaverBuilder.SaveSpec( tensor=tensor, slice_spec="", name=name) super().__init__(tensor, [spec], name) def restore(self, restored_tensors, restored_shapes): """Restore the same value into both variables.""" tensor, = restored_tensors return control_flow_ops.group( self._primary_variable.assign(tensor), self._mirrored_variable.assign(tensor))
_MirroringSaveable
python
sympy__sympy
sympy/plotting/series.py
{ "start": 75088, "end": 78722 }
class ____(SurfaceBaseSeries): """Representation for a 3D surface consisting of three parametric SymPy expressions and a range.""" is_parametric = True def __init__(self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v, label="", **kwargs): super().__init__(**kwargs) self.expr_x = expr_x if callable(expr_x) else sympify(expr_x) self.expr_y = expr_y if callable(expr_y) else sympify(expr_y) self.expr_z = expr_z if callable(expr_z) else sympify(expr_z) self.expr = (self.expr_x, self.expr_y, self.expr_z) self.ranges = [var_start_end_u, var_start_end_v] self.color_func = kwargs.get("color_func", lambda x, y, z, u, v: z) self._set_surface_label(label) self._post_init() @property def var_u(self): return self.ranges[0][0] @property def var_v(self): return self.ranges[1][0] @property def start_u(self): try: return float(self.ranges[0][1]) except TypeError: return self.ranges[0][1] @property def end_u(self): try: return float(self.ranges[0][2]) except TypeError: return self.ranges[0][2] @property def start_v(self): try: return float(self.ranges[1][1]) except TypeError: return self.ranges[1][1] @property def end_v(self): try: return float(self.ranges[1][2]) except TypeError: return self.ranges[1][2] @property def nb_of_points_u(self): return self.n[0] @nb_of_points_u.setter def nb_of_points_u(self, v): n = self.n self.n = [v, n[1:]] @property def nb_of_points_v(self): return self.n[1] @nb_of_points_v.setter def nb_of_points_v(self, v): n = self.n self.n = [n[0], v, n[2]] def __str__(self): return self._str_helper( "parametric cartesian surface: (%s, %s, %s) for" " %s over %s and %s over %s" % ( str(self.expr_x), str(self.expr_y), str(self.expr_z), str(self.var_u), str((self.start_u, self.end_u)), str(self.var_v), str((self.start_v, self.end_v)), )) def get_parameter_meshes(self): return self.get_data()[3:] def get_meshes(self): """Return the x,y,z coordinates for plotting the surface. This function is available for back-compatibility purposes. Consider using ``get_data()`` instead. """ return self.get_data()[:3] def get_data(self): """Return arrays of coordinates for plotting. Returns ======= x : np.ndarray [n2 x n1] x-coordinates. y : np.ndarray [n2 x n1] y-coordinates. z : np.ndarray [n2 x n1] z-coordinates. mesh_u : np.ndarray [n2 x n1] Discretized u range. mesh_v : np.ndarray [n2 x n1] Discretized v range. """ np = import_module('numpy') results = self._evaluate() # mask out complex values for i, r in enumerate(results): _re, _im = np.real(r), np.imag(r) _re[np.invert(np.isclose(_im, np.zeros_like(_im)))] = np.nan results[i] = _re # TODO: remove this x, y, z = results[2:] self._xlim = (np.amin(x), np.amax(x)) self._ylim = (np.amin(y), np.amax(y)) self._zlim = (np.amin(z), np.amax(z)) return self._apply_transform(*results[2:], *results[:2]) ### Contours
ParametricSurfaceSeries
python
pallets__click
src/click/_compat.py
{ "start": 14039, "end": 18693 }
class ____: def __init__(self, f: t.IO[t.Any], tmp_filename: str, real_filename: str) -> None: self._f = f self._tmp_filename = tmp_filename self._real_filename = real_filename self.closed = False @property def name(self) -> str: return self._real_filename def close(self, delete: bool = False) -> None: if self.closed: return self._f.close() os.replace(self._tmp_filename, self._real_filename) self.closed = True def __getattr__(self, name: str) -> t.Any: return getattr(self._f, name) def __enter__(self) -> _AtomicFile: return self def __exit__( self, exc_type: type[BaseException] | None, exc_value: BaseException | None, tb: TracebackType | None, ) -> None: self.close(delete=exc_type is not None) def __repr__(self) -> str: return repr(self._f) def strip_ansi(value: str) -> str: return _ansi_re.sub("", value) def _is_jupyter_kernel_output(stream: t.IO[t.Any]) -> bool: while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)): stream = stream._stream return stream.__class__.__module__.startswith("ipykernel.") def should_strip_ansi( stream: t.IO[t.Any] | None = None, color: bool | None = None ) -> bool: if color is None: if stream is None: stream = sys.stdin return not isatty(stream) and not _is_jupyter_kernel_output(stream) return not color # On Windows, wrap the output streams with colorama to support ANSI # color codes. # NOTE: double check is needed so mypy does not analyze this on Linux if sys.platform.startswith("win") and WIN: from ._winconsole import _get_windows_console_stream def _get_argv_encoding() -> str: import locale return locale.getpreferredencoding() _ansi_stream_wrappers: cabc.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary() def auto_wrap_for_ansi(stream: t.TextIO, color: bool | None = None) -> t.TextIO: """Support ANSI color and style codes on Windows by wrapping a stream with colorama. """ try: cached = _ansi_stream_wrappers.get(stream) except Exception: cached = None if cached is not None: return cached import colorama strip = should_strip_ansi(stream, color) ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip) rv = t.cast(t.TextIO, ansi_wrapper.stream) _write = rv.write def _safe_write(s: str) -> int: try: return _write(s) except BaseException: ansi_wrapper.reset_all() raise rv.write = _safe_write # type: ignore[method-assign] try: _ansi_stream_wrappers[stream] = rv except Exception: pass return rv else: def _get_argv_encoding() -> str: return getattr(sys.stdin, "encoding", None) or sys.getfilesystemencoding() def _get_windows_console_stream( f: t.TextIO, encoding: str | None, errors: str | None ) -> t.TextIO | None: return None def term_len(x: str) -> int: return len(strip_ansi(x)) def isatty(stream: t.IO[t.Any]) -> bool: try: return stream.isatty() except Exception: return False def _make_cached_stream_func( src_func: t.Callable[[], t.TextIO | None], wrapper_func: t.Callable[[], t.TextIO], ) -> t.Callable[[], t.TextIO | None]: cache: cabc.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary() def func() -> t.TextIO | None: stream = src_func() if stream is None: return None try: rv = cache.get(stream) except Exception: rv = None if rv is not None: return rv rv = wrapper_func() try: cache[stream] = rv except Exception: pass return rv return func _default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin) _default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout) _default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr) binary_streams: cabc.Mapping[str, t.Callable[[], t.BinaryIO]] = { "stdin": get_binary_stdin, "stdout": get_binary_stdout, "stderr": get_binary_stderr, } text_streams: cabc.Mapping[str, t.Callable[[str | None, str | None], t.TextIO]] = { "stdin": get_text_stdin, "stdout": get_text_stdout, "stderr": get_text_stderr, }
_AtomicFile
python
kamyu104__LeetCode-Solutions
Python/paint-house-iv.py
{ "start": 42, "end": 939 }
class ____(object): def minCost(self, n, cost): """ :type n: int :type cost: List[List[int]] :rtype: int """ l = len(cost[0]) dp = [[0]*l for i in xrange(l)] for k in xrange(n//2): new_dp = [[float("inf")]*l for i in xrange(l)] for i in xrange(l): for j in xrange(l): if j == i: continue for ni in xrange(l): if ni == i: continue for nj in xrange(l): if nj == j or ni == nj: continue new_dp[ni][nj] = min(new_dp[ni][nj], dp[i][j]+cost[k][ni]+cost[~k][nj]) dp = new_dp return min(dp[i][j] for i in xrange(l) for j in xrange(l) if i != j)
Solution
python
huggingface__transformers
src/transformers/models/cohere2/modeling_cohere2.py
{ "start": 9327, "end": 12661 }
class ____(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: Cohere2Config, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None self.sliding_window = config.sliding_window if layer_type == "sliding_attention" else None self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear( config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias ) def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings if self.sliding_window is not None: query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=self.sliding_window, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights
Cohere2Attention
python
airbytehq__airbyte
airbyte-integrations/connectors/source-google-ads/unit_tests/test_streams.py
{ "start": 2861, "end": 3488 }
class ____(MockGoogleAds): def send_request(self, query: str, customer_id: str, login_customer_id: str = "none"): self.count += 1 if self.count == 1: return mock_response_fails_1() else: return mock_response_fails_2() def mock_response_fails_one_date(): yield [ {"segments.date": "2021-01-03", "click_view.gclid": "3"}, {"segments.date": "2021-01-03", "click_view.gclid": "4"}, {"segments.date": "2021-01-03", "click_view.gclid": "5"}, {"segments.date": "2021-01-03", "click_view.gclid": "6"}, ] raise exception
MockGoogleAdsFails
python
pytorch__pytorch
torch/cuda/_sanitizer.py
{ "start": 19968, "end": 22374 }
class ____(TorchDispatchMode): def __init__(self) -> None: self.event_handler = EventHandler() torch._C._activate_gpu_trace() gpu_trace.register_callback_for_event_creation( self.event_handler._handle_event_creation ) gpu_trace.register_callback_for_event_deletion( self.event_handler._handle_event_deletion ) gpu_trace.register_callback_for_event_record( self.event_handler._handle_event_record ) gpu_trace.register_callback_for_event_wait( self.event_handler._handle_event_wait ) gpu_trace.register_callback_for_memory_allocation( self.event_handler._handle_memory_allocation ) gpu_trace.register_callback_for_memory_deallocation( self.event_handler._handle_memory_deallocation ) gpu_trace.register_callback_for_stream_creation( self.event_handler._handle_stream_creation ) gpu_trace.register_callback_for_device_synchronization( self.event_handler._handle_device_synchronization ) gpu_trace.register_callback_for_stream_synchronization( self.event_handler._handle_stream_synchronization ) gpu_trace.register_callback_for_event_synchronization( self.event_handler._handle_event_synchronization ) def __torch_dispatch__(self, func, types, args=(), kwargs=None): if kwargs is None: kwargs = {} is_factory = bool(FACTORY_FUNCTION_REGEX.match(func._schema.name)) argument_handler = ArgumentHandler() argument_handler.parse_inputs(func._schema, args, kwargs, is_factory=is_factory) outputs = func(*args, **kwargs) argument_handler.parse_outputs(func._schema, outputs, is_factory=is_factory) errors = self.event_handler._handle_kernel_launch( torch.cuda.current_stream().cuda_stream, argument_handler.dataptrs_read - argument_handler.dataptrs_written, argument_handler.dataptrs_written, argument_handler.outputs, func._schema, argument_handler.tensor_aliases, ) if errors: for error in errors: print(error, file=sys.stderr) raise CUDASanitizerErrors(errors) return outputs
CUDASanitizerDispatchMode
python
Textualize__textual
tests/test_freeze.py
{ "start": 129, "end": 246 }
class ____(Screen): def compose(self): yield Header() yield Input() yield Footer()
MyScreen
python
eth-brownie__brownie
brownie/utils/docopt.py
{ "start": 7892, "end": 9850 }
class ____(_Pattern): """Branch/inner node of a pattern tree.""" def __init__(self, *children) -> None: self.children = list(children) def match(self, left: list[_Pattern], collected: list[_Pattern] | None = None) -> Any: raise NotImplementedError # pragma: no cover def fix(self) -> _BranchPattern: self.fix_identities() self.fix_repeating_arguments() return self def fix_identities(self, uniq: list | None = None) -> None: """Make pattern-tree tips point to same object if they are equal.""" flattened = self.flat() uniq = list(set(flattened)) if uniq is None else uniq for i, child in enumerate(self.children): if not hasattr(child, "children"): assert child in uniq self.children[i] = uniq[uniq.index(child)] else: child.fix_identities(uniq) return None def fix_repeating_arguments(self) -> _BranchPattern: """Fix elements that should accumulate/increment values.""" for case in map(list, (child.children for child in _transform(self).children)): for e in (child for child in case if case.count(child) > 1): if type(e) is _Argument or type(e) is _Option and e.argcount: if e.value is None: e.value = [] elif type(e.value) is not list: e.value = cast(str, e.value) e.value = e.value.split() if type(e) is _Command or type(e) is _Option and e.argcount == 0: e.value = 0 return self def __repr__(self) -> str: return f'{self.__class__.__name__}({", ".join(map(repr, self.children))}' def flat(self, *types) -> Any: if type(self) in types: return [self] return sum((child.flat(*types) for child in self.children), [])
_BranchPattern
python
doocs__leetcode
solution/2900-2999/2912.Number of Ways to Reach Destination in the Grid/Solution.py
{ "start": 0, "end": 601 }
class ____: def numberOfWays( self, n: int, m: int, k: int, source: List[int], dest: List[int] ) -> int: mod = 10**9 + 7 a, b, c, d = 1, 0, 0, 0 for _ in range(k): aa = ((n - 1) * b + (m - 1) * c) % mod bb = (a + (n - 2) * b + (m - 1) * d) % mod cc = (a + (m - 2) * c + (n - 1) * d) % mod dd = (b + c + (n - 2) * d + (m - 2) * d) % mod a, b, c, d = aa, bb, cc, dd if source[0] == dest[0]: return a if source[1] == dest[1] else c return b if source[1] == dest[1] else d
Solution
python
pytest-dev__pytest-asyncio
docs/reference/markers/module_scoped_loop_strict_mode_example.py
{ "start": 313, "end": 450 }
class ____: async def test_this_runs_in_same_loop(self): global loop assert asyncio.get_running_loop() is loop
TestClassA
python
skorch-dev__skorch
skorch/history.py
{ "start": 152, "end": 3511 }
class ____: """Special placeholder since ``None`` is a valid value.""" def _not_none(items): """Whether the item is a placeholder or contains a placeholder.""" if not isinstance(items, (tuple, list)): items = (items,) return all(item is not _none for item in items) def _getitem_list_list(items, keys, tuple_=False): """Ugly but efficient extraction of multiple values from a list of items. """ filtered = [] for item in items: row = [] for key in keys: try: row.append(item[key]) except KeyError: break else: # no break if row: filtered.append(tuple(row) if tuple_ else row) if items and not filtered: return _none return filtered def _getitem_list_tuple(items, keys): return _getitem_list_list(items, keys, tuple_=True) def _getitem_list_str(items, key): filtered = [] for item in items: try: filtered.append(item[key]) except KeyError: continue if items and not filtered: return _none return filtered def _getitem_dict_list(item, keys): return [item.get(key, _none) for key in keys] def _getitem_dict_tuple(item, keys): return tuple(item.get(key, _none) for key in keys) def _getitem_dict_str(item, key): return item.get(key, _none) def _get_getitem_method(items, key): """Return method to extract values from items. For the given type of items and type of keys, find the correct method to extract the values. By calling this only once per items, we can save a lot of type checking, which can be slow if there are a lot of epochs and a lot of items. However, we now make the assumption that the type of items doesn't change (we know that the key doesn't change). This should always be true, except if something really weird happens. We are multi-dispatching based on the following possibilities: * history[0, 'foo', :10]: get a list of items * history[0, 'foo', 0]: get a dict * history[0, 'foo', :, 'bar']: key is a str * history[0, 'foo', :, ('bar', 'baz')]: key is list/tuple of str """ if isinstance(items, list): if isinstance(key, list): return _getitem_list_list if isinstance(key, tuple): return _getitem_list_tuple if isinstance(key, str): return _getitem_list_str raise TypeError("History access with given types not supported") if isinstance(items, dict): if isinstance(key, list): return _getitem_dict_list if isinstance(key, tuple): return _getitem_dict_tuple if isinstance(key, str): return _getitem_dict_str raise TypeError("History access with given types not supported") def _unpack_index(i): """Unpack index and return exactly four elements. If index is more shallow than 4, return None for trailing dimensions. If index is deeper than 4, raise a KeyError. """ if len(i) > 4: raise KeyError( "Tried to index history with {} indices but only " "4 indices are possible.".format(len(i))) # fill trailing indices with None i_e, k_e, i_b, k_b = i + tuple([None] * (4 - len(i))) return i_e, k_e, i_b, k_b
_none
python
scikit-learn__scikit-learn
sklearn/linear_model/_base.py
{ "start": 13105, "end": 15047 }
class ____: """Mixin for converting coef_ to and from CSR format. L1-regularizing estimators should inherit this. """ def densify(self): """ Convert coefficient matrix to dense array format. Converts the ``coef_`` member (back) to a numpy.ndarray. This is the default format of ``coef_`` and is required for fitting, so calling this method is only required on models that have previously been sparsified; otherwise, it is a no-op. Returns ------- self Fitted estimator. """ msg = "Estimator, %(name)s, must be fitted before densifying." check_is_fitted(self, msg=msg) if sp.issparse(self.coef_): self.coef_ = self.coef_.toarray() return self def sparsify(self): """ Convert coefficient matrix to sparse format. Converts the ``coef_`` member to a scipy.sparse matrix, which for L1-regularized models can be much more memory- and storage-efficient than the usual numpy.ndarray representation. The ``intercept_`` member is not converted. Returns ------- self Fitted estimator. Notes ----- For non-sparse models, i.e. when there are not many zeros in ``coef_``, this may actually *increase* memory usage, so use this method with care. A rule of thumb is that the number of zero elements, which can be computed with ``(coef_ == 0).sum()``, must be more than 50% for this to provide significant benefits. After calling this method, further fitting with the partial_fit method (if any) will not work until you call densify. """ msg = "Estimator, %(name)s, must be fitted before sparsifying." check_is_fitted(self, msg=msg) self.coef_ = sp.csr_matrix(self.coef_) return self
SparseCoefMixin
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/output/vt100.py
{ "start": 7372, "end": 11468 }
class ____(Dict[Attrs, str]): """ Cache for VT100 escape codes. It maps (fgcolor, bgcolor, bold, underline, strike, italic, blink, reverse, hidden, dim) tuples to VT100 escape sequences. :param true_color: When True, use 24bit colors instead of 256 colors. """ def __init__(self, color_depth: ColorDepth) -> None: self.color_depth = color_depth def __missing__(self, attrs: Attrs) -> str: ( fgcolor, bgcolor, bold, underline, strike, italic, blink, reverse, hidden, dim, ) = attrs parts: list[str] = [] parts.extend(self._colors_to_code(fgcolor or "", bgcolor or "")) if bold: parts.append("1") if dim: parts.append("2") if italic: parts.append("3") if blink: parts.append("5") if underline: parts.append("4") if reverse: parts.append("7") if hidden: parts.append("8") if strike: parts.append("9") if parts: result = "\x1b[0;" + ";".join(parts) + "m" else: result = "\x1b[0m" self[attrs] = result return result def _color_name_to_rgb(self, color: str) -> tuple[int, int, int]: "Turn 'ffffff', into (0xff, 0xff, 0xff)." try: rgb = int(color, 16) except ValueError: raise else: r = (rgb >> 16) & 0xFF g = (rgb >> 8) & 0xFF b = rgb & 0xFF return r, g, b def _colors_to_code(self, fg_color: str, bg_color: str) -> Iterable[str]: """ Return a tuple with the vt100 values that represent this color. """ # When requesting ANSI colors only, and both fg/bg color were converted # to ANSI, ensure that the foreground and background color are not the # same. (Unless they were explicitly defined to be the same color.) fg_ansi = "" def get(color: str, bg: bool) -> list[int]: nonlocal fg_ansi table = BG_ANSI_COLORS if bg else FG_ANSI_COLORS if not color or self.color_depth == ColorDepth.DEPTH_1_BIT: return [] # 16 ANSI colors. (Given by name.) elif color in table: return [table[color]] # RGB colors. (Defined as 'ffffff'.) else: try: rgb = self._color_name_to_rgb(color) except ValueError: return [] # When only 16 colors are supported, use that. if self.color_depth == ColorDepth.DEPTH_4_BIT: if bg: # Background. if fg_color != bg_color: exclude = [fg_ansi] else: exclude = [] code, name = _16_bg_colors.get_code(rgb, exclude=exclude) return [code] else: # Foreground. code, name = _16_fg_colors.get_code(rgb) fg_ansi = name return [code] # True colors. (Only when this feature is enabled.) elif self.color_depth == ColorDepth.DEPTH_24_BIT: r, g, b = rgb return [(48 if bg else 38), 2, r, g, b] # 256 RGB colors. else: return [(48 if bg else 38), 5, _256_colors[rgb]] result: list[int] = [] result.extend(get(fg_color, False)) result.extend(get(bg_color, True)) return map(str, result) def _get_size(fileno: int) -> tuple[int, int]: """ Get the size of this pseudo terminal. :param fileno: stdout.fileno() :returns: A (rows, cols) tuple. """ size = os.get_terminal_size(fileno) return size.lines, size.columns
_EscapeCodeCache
python
getsentry__sentry
src/sentry/objectstore/__init__.py
{ "start": 399, "end": 3698 }
class ____(MetricsBackend): def increment( self, name: str, value: int | float = 1, tags: Tags | None = None, ) -> None: sentry_metrics.incr(name, int(value), tags=tags) def gauge(self, name: str, value: int | float, tags: Tags | None = None) -> None: """ Sets a gauge metric to the given value. """ sentry_metrics.gauge(name, value, tags=tags) def distribution( self, name: str, value: int | float, tags: Tags | None = None, unit: str | None = None, ) -> None: sentry_metrics.distribution(name, value, tags=tags, unit=unit) _ATTACHMENTS_CLIENT: Client | None = None _ATTACHMENTS_USECASE = Usecase("attachments", expiration_policy=TimeToLive(timedelta(days=30))) def get_attachments_session(org: int, project: int) -> Session: global _ATTACHMENTS_CLIENT if not _ATTACHMENTS_CLIENT: from sentry import options as options_store options = options_store.get("objectstore.config") _ATTACHMENTS_CLIENT = Client( options["base_url"], metrics_backend=SentryMetricsBackend(), ) return _ATTACHMENTS_CLIENT.session(_ATTACHMENTS_USECASE, org=org, project=project) _IS_SYMBOLICATOR_CONTAINER: bool | None = None def get_symbolicator_url(session: Session, key: str) -> str: """ Gets the URL that Symbolicator shall use to access the object at the given key in Objectstore. In prod, this is simply the `object_url` returned by `objectstore_client`, as both Sentry and Symbolicator will talk to Objectstore using the same hostname. While in development or testing, we might need to replace the hostname, depending on how Symbolicator is running. This function runs a `docker ps` to automatically return the correct URL in the following 2 cases: - Symbolicator running in Docker (possibly via `devservices`) -- this mirrors `sentry`'s CI. If this is detected, we replace Objectstore's hostname with the one reachable in the Docker network. Note that this approach doesn't work if Objectstore is running both locally and in Docker, as we'll always rewrite the URL to the Docker one, so Sentry and Symbolicator might attempt to talk to 2 different Objectstores. - Symbolicator running locally -- this mirrors `symbolicator`'s CI. In this case, we don't need to rewrite the URL. """ global _IS_SYMBOLICATOR_CONTAINER # Cached to avoid running `docker ps` multiple times url = session.object_url(key) if not (settings.IS_DEV or in_test_environment()): return url if _IS_SYMBOLICATOR_CONTAINER is None: try: docker_ps = subprocess.run( ["docker", "ps", "--format", "{{.Names}}"], capture_output=True, text=True ) _IS_SYMBOLICATOR_CONTAINER = "symbolicator" in docker_ps.stdout except Exception: _IS_SYMBOLICATOR_CONTAINER = False if not _IS_SYMBOLICATOR_CONTAINER: return url replacement = "objectstore" parsed = urlparse(url) if parsed.port: replacement += f":{parsed.port}" updated = parsed._replace(netloc=replacement) return urlunparse(updated)
SentryMetricsBackend
python
google__pytype
pytype/tests/test_errors2.py
{ "start": 15879, "end": 16713 }
class ____(test_base.BaseTest): """Test matrix operations.""" def test_matmul(self): errors = self.CheckWithErrors(""" def f(): return 'foo' @ 3 # unsupported-operands[e] """) self.assertErrorSequences( errors, { "e": [ "@", "str", "int", "'__matmul__' on ", "str", "'__rmatmul__' on ", "int", ] }, ) def test_imatmul(self): errors = self.CheckWithErrors(""" class A: def __imatmul__(self, x: "A"): pass def f(): v = A() v @= 3 # unsupported-operands[e] """) self.assertErrorSequences( errors, {"e": ["@", "A", "int", "__imatmul__ on A", "A"]} )
MatrixOperationsTest
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeVarDefaultClass2.py
{ "start": 3941, "end": 3985 }
class ____(Generic[T1, *Ts1, T2]): ...
ClassTB
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/sensors/tasks.py
{ "start": 1249, "end": 3541 }
class ____(BaseSensorOperator): """ Pulls tasks count from a cloud task queue; waits for queue to return task count as 0. :param project_id: the Google Cloud project ID for the subscription (templated) :param gcp_conn_id: The connection ID to use connecting to Google Cloud. :param queue_name: The queue name to for which task empty sensing is required. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = ( "project_id", "location", "queue_name", "gcp_conn_id", "impersonation_chain", ) def __init__( self, *, location: str, project_id: str = PROVIDE_PROJECT_ID, queue_name: str | None = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.location = location self.project_id = project_id self.queue_name = queue_name self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def poke(self, context: Context) -> bool: hook = CloudTasksHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) # TODO uncomment page_size once https://issuetracker.google.com/issues/155978649?pli=1 gets fixed tasks = hook.list_tasks( location=self.location, queue_name=self.queue_name, # page_size=1 ) self.log.info("tasks exhausted in cloud task queue?: %s", (len(tasks) == 0)) return len(tasks) == 0
TaskQueueEmptySensor
python
spyder-ide__spyder
external-deps/python-lsp-server/pylsp/workspace.py
{ "start": 19625, "end": 21826 }
class ____: """Represents a notebook.""" def __init__( self, uri, notebook_type, workspace, cells=None, version=None, metadata=None ) -> None: self.uri = uri self.notebook_type = notebook_type self.workspace = workspace self.version = version self.cells = cells or [] self.metadata = metadata or {} self._lock = RLock() def __str__(self): return "Notebook with URI '%s'" % str(self.uri) def add_cells(self, new_cells: list, start: int) -> None: self.cells[start:start] = new_cells def remove_cells(self, start: int, delete_count: int) -> None: del self.cells[start : start + delete_count] def cell_data(self): """Extract current cell data. Returns a dict (ordered by cell position) where the key is the cell uri and the value is a dict with line_start, line_end, and source attributes. """ cell_data = {} offset = 0 for cell in self.cells: cell_uri = cell["document"] cell_document = self.workspace.get_cell_document(cell_uri) num_lines = cell_document.line_count cell_data[cell_uri] = { "line_start": offset, "line_end": offset + num_lines - 1, "source": cell_document.source, } offset += num_lines return cell_data @lock def jedi_names( self, up_to_cell_uri: Optional[str] = None, all_scopes=False, definitions=True, references=False, ): """ Get the names in the notebook up to a certain cell. Parameters ---------- up_to_cell_uri: str, optional The cell uri to stop at. If None, all cells are considered. """ names = set() for cell in self.cells: cell_uri = cell["document"] cell_document = self.workspace.get_cell_document(cell_uri) names.update(cell_document.jedi_names(all_scopes, definitions, references)) if cell_uri == up_to_cell_uri: break return {name.name for name in names}
Notebook
python
walkccc__LeetCode
solutions/1760. Minimum Limit of Balls in a Bag/1760.py
{ "start": 0, "end": 392 }
class ____: def minimumSize(self, nums: list[int], maxOperations: int) -> int: def numOperations(m: int) -> int: """Returns the number of operations required to make m penalty.""" return sum((num - 1) // m for num in nums) l = 1 r = max(nums) return bisect.bisect_left( range(l, r), True, key=lambda m: numOperations(m) <= maxOperations) + l
Solution
python
django__django
django/db/models/functions/math.py
{ "start": 1819, "end": 2045 }
class ____(Transform): function = "CEILING" lookup_name = "ceil" def as_oracle(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="CEIL", **extra_context)
Ceil
python
pyparsing__pyparsing
tests/test_unit.py
{ "start": 393562, "end": 394240 }
class ____(Test02_WithoutPackrat): """ rerun Test2 tests, now with unbounded packrat cache """ def test000_assert_packrat_status(self): print("Packrat enabled:", ParserElement._packratEnabled) print( "Packrat cache:", type(ParserElement.packrat_cache).__name__, getattr(ParserElement.packrat_cache, "size", "- no size attribute -"), ) self.assertTrue(ParserElement._packratEnabled, "packrat not enabled") self.assertEqual( "_UnboundedCache", type(ParserElement.packrat_cache).__name__, msg="incorrect cache type", )
Test08_WithUnboundedPackrat
python
PrefectHQ__prefect
tests/server/schemas/test_core.py
{ "start": 2496, "end": 4035 }
class ____: class OldFlowRunPolicy(PrefectBaseModel): # Schemas ignore extras during normal execution, but raise errors during tests if not explicitly ignored. model_config = ConfigDict(extra="ignore") max_retries: int = 0 retry_delay_seconds: float = 0 async def test_flow_run_policy_is_backwards_compatible(self): """ In version 2.1.1 and prior, the FlowRunPolicy schema required two properties, `max_retries` and `retry_delay_seconds`. These properties are deprecated. This test ensures old clients can load new FlowRunPolicySchemas. It can be removed when the corresponding properties are removed. """ empty_new_policy = schemas.core.FlowRunPolicy() # should not raise an error self.OldFlowRunPolicy(**empty_new_policy.model_dump()) async def test_flow_run_policy_populates_new_properties_from_deprecated(self): """ In version 2.1.1 and prior, the FlowRunPolicy schema required two properties, `max_retries` and `retry_delay_seconds`. These properties are deprecated. This test ensures new servers correctly parse old FlowRunPolicySchemas. It can be removed when the corresponding properties are removed. """ old_policy = self.OldFlowRunPolicy(max_retries=1, retry_delay_seconds=2) new_policy = schemas.core.FlowRunPolicy(**old_policy.model_dump()) assert new_policy.retries == 1 assert new_policy.retry_delay == 2
TestFlowRunPolicy
python
sqlalchemy__sqlalchemy
test/orm/declarative/test_mixin.py
{ "start": 2276, "end": 54782 }
class ____(DeclarativeTestBase): @testing.combinations("generate_base", "subclass", argnames="base_type") def test_init_subclass_works(self, registry, base_type): reg = registry if base_type == "generate_base": class Base: def __init_subclass__(cls): cls.id = Column(Integer, primary_key=True) Base = registry.generate_base(cls=Base) elif base_type == "subclass": class Base(DeclarativeBase): registry = reg def __init_subclass__(cls): cls.id = Column(Integer, primary_key=True) # hmmm what do we think of this. if DeclarativeBase # used a full metaclass approach we wouldn't need this. super().__init_subclass__() else: assert False class Foo(Base): __tablename__ = "foo" name = Column(String) self.assert_compile(select(Foo), "SELECT foo.name, foo.id FROM foo") @testing.variation("base_type", ["generate_base", "subclass"]) @testing.variation("attrname", ["table", "tablename"]) @testing.variation("position", ["base", "abstract"]) @testing.variation("assert_no_extra_cols", [True, False]) def test_declared_attr_on_base( self, registry, base_type, attrname, position, assert_no_extra_cols ): """test #11509""" if position.abstract: if base_type.generate_base: SuperBase = registry.generate_base() class Base(SuperBase): __abstract__ = True if attrname.table: @declared_attr.directive def __table__(cls): return Table( cls.__name__, cls.registry.metadata, Column("id", Integer, primary_key=True), ) elif attrname.tablename: @declared_attr.directive def __tablename__(cls): return cls.__name__ else: attrname.fail() elif base_type.subclass: class SuperBase(DeclarativeBase): pass class Base(SuperBase): __abstract__ = True if attrname.table: @declared_attr.directive def __table__(cls): return Table( cls.__name__, cls.registry.metadata, Column("id", Integer, primary_key=True), ) elif attrname.tablename: @declared_attr.directive def __tablename__(cls): return cls.__name__ else: attrname.fail() else: base_type.fail() else: if base_type.generate_base: class Base: if attrname.table: @declared_attr.directive def __table__(cls): return Table( cls.__name__, cls.registry.metadata, Column("id", Integer, primary_key=True), ) elif attrname.tablename: @declared_attr.directive def __tablename__(cls): return cls.__name__ else: attrname.fail() Base = registry.generate_base(cls=Base) elif base_type.subclass: class Base(DeclarativeBase): if attrname.table: @declared_attr.directive def __table__(cls): return Table( cls.__name__, cls.registry.metadata, Column("id", Integer, primary_key=True), ) elif attrname.tablename: @declared_attr.directive def __tablename__(cls): return cls.__name__ else: attrname.fail() else: base_type.fail() if attrname.table and assert_no_extra_cols: with expect_raises_message( sa.exc.ArgumentError, "Can't add additional column 'data' when specifying __table__", ): class MyNopeClass(Base): data = Column(String) return class MyClass(Base): if attrname.tablename: id = Column(Integer, primary_key=True) # noqa: A001 class MyOtherClass(Base): if attrname.tablename: id = Column(Integer, primary_key=True) # noqa: A001 t = Table( "my_override", Base.metadata, Column("id", Integer, primary_key=True), ) class MyOverrideClass(Base): __table__ = t Base.registry.configure() # __table__ was assigned assert isinstance(MyClass.__dict__["__table__"], schema.Table) assert isinstance(MyOtherClass.__dict__["__table__"], schema.Table) eq_(MyClass.__table__.name, "MyClass") eq_(MyClass.__table__.c.keys(), ["id"]) eq_(MyOtherClass.__table__.name, "MyOtherClass") eq_(MyOtherClass.__table__.c.keys(), ["id"]) is_(MyOverrideClass.__table__, t) def test_simple_wbase(self): class MyMixin: id = Column( Integer, primary_key=True, test_needs_autoincrement=True ) def foo(self): return "bar" + str(self.id) class MyModel(Base, MyMixin): __tablename__ = "test" name = Column(String(100), nullable=False, index=True) Base.metadata.create_all(testing.db) session = fixture_session() session.add(MyModel(name="testing")) session.flush() session.expunge_all() obj = session.query(MyModel).one() eq_(obj.id, 1) eq_(obj.name, "testing") eq_(obj.foo(), "bar1") def test_simple_wdecorator(self): class MyMixin: id = Column( Integer, primary_key=True, test_needs_autoincrement=True ) def foo(self): return "bar" + str(self.id) @mapper_registry.mapped class MyModel(MyMixin): __tablename__ = "test" name = Column(String(100), nullable=False, index=True) Base.metadata.create_all(testing.db) session = fixture_session() session.add(MyModel(name="testing")) session.flush() session.expunge_all() obj = session.query(MyModel).one() eq_(obj.id, 1) eq_(obj.name, "testing") eq_(obj.foo(), "bar1") @uses_deprecated( "The declarative_mixin decorator was used only by the now removed " "mypy plugin so it has no longer any use and can be safely removed." ) def test_declarative_mixin_decorator(self): @declarative_mixin class MyMixin: id = Column( Integer, primary_key=True, test_needs_autoincrement=True ) def foo(self): return "bar" + str(self.id) @mapper_registry.mapped class MyModel(MyMixin): __tablename__ = "test" name = Column(String(100), nullable=False, index=True) Base.metadata.create_all(testing.db) session = fixture_session() session.add(MyModel(name="testing")) session.flush() session.expunge_all() obj = session.query(MyModel).one() eq_(obj.id, 1) eq_(obj.name, "testing") eq_(obj.foo(), "bar1") @testing.combinations(Column, mapped_column, argnames="_column") def test_unique_column(self, _column): class MyMixin: id = _column(Integer, primary_key=True) value = _column(String, unique=True) class MyModel(Base, MyMixin): __tablename__ = "test" assert MyModel.__table__.c.value.unique @testing.combinations(Column, mapped_column, argnames="_column") def test_hierarchical_bases_wbase(self, _column): class MyMixinParent: id = _column( Integer, primary_key=True, test_needs_autoincrement=True ) def foo(self): return "bar" + str(self.id) class MyMixin(MyMixinParent): baz = _column(String(100), nullable=False, index=True) class MyModel(Base, MyMixin): __tablename__ = "test" name = _column(String(100), nullable=False, index=True) Base.metadata.create_all(testing.db) session = fixture_session() session.add(MyModel(name="testing", baz="fu")) session.flush() session.expunge_all() obj = session.query(MyModel).one() eq_(obj.id, 1) eq_(obj.name, "testing") eq_(obj.foo(), "bar1") eq_(obj.baz, "fu") @testing.combinations(Column, mapped_column, argnames="_column") def test_hierarchical_bases_wdecorator(self, _column): class MyMixinParent: id = _column( Integer, primary_key=True, test_needs_autoincrement=True ) def foo(self): return "bar" + str(self.id) class MyMixin(MyMixinParent): baz = _column(String(100), nullable=False, index=True) @mapper_registry.mapped class MyModel(MyMixin): __tablename__ = "test" name = Column(String(100), nullable=False, index=True) Base.metadata.create_all(testing.db) session = fixture_session() session.add(MyModel(name="testing", baz="fu")) session.flush() session.expunge_all() obj = session.query(MyModel).one() eq_(obj.id, 1) eq_(obj.name, "testing") eq_(obj.foo(), "bar1") eq_(obj.baz, "fu") @testing.combinations(Column, mapped_column, argnames="_column") def test_mixin_overrides_wbase(self, _column): """test a mixin that overrides a column on a superclass.""" class MixinA: foo = _column(String(50)) class MixinB(MixinA): foo = _column(Integer) class MyModelA(Base, MixinA): __tablename__ = "testa" id = _column(Integer, primary_key=True) class MyModelB(Base, MixinB): __tablename__ = "testb" id = _column(Integer, primary_key=True) eq_(MyModelA.__table__.c.foo.type.__class__, String) eq_(MyModelB.__table__.c.foo.type.__class__, Integer) def test_mixin_overrides_wdecorator(self): """test a mixin that overrides a column on a superclass.""" class MixinA: foo = Column(String(50)) class MixinB(MixinA): foo = Column(Integer) @mapper_registry.mapped class MyModelA(MixinA): __tablename__ = "testa" id = Column(Integer, primary_key=True) @mapper_registry.mapped class MyModelB(MixinB): __tablename__ = "testb" id = Column(Integer, primary_key=True) eq_(MyModelA.__table__.c.foo.type.__class__, String) eq_(MyModelB.__table__.c.foo.type.__class__, Integer) def test_same_base_multiple_times(self): class User(Base): __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) surname = Column(String) class SpecialUser(User): __abstract__ = True class ConvenienceStuff(User): __abstract__ = True def fullname(self): return self.name + " " + self.surname class Manager(SpecialUser, ConvenienceStuff, User): __tablename__ = "manager" id = Column(Integer, ForeignKey("user.id"), primary_key=True) title = Column(String) eq_(Manager.__table__.name, "manager") def test_same_base_multiple_metadata(self): m1 = MetaData() m2 = MetaData() class B1(Base): __abstract__ = True metadata = m1 class B2(Base): __abstract__ = True metadata = m2 def fullname(self): return self.name + " " + self.surname class User(B1): __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) surname = Column(String) class AD(B1): __tablename__ = "address" id = Column(Integer, primary_key=True) class OtherUser(B2): __tablename__ = "user" id = Column(Integer, primary_key=True) username = Column(String) class BUser(Base): __tablename__ = "user" id = Column(Integer, primary_key=True) login = Column(String) eq_(set(m1.tables), {"user", "address"}) eq_(set(m2.tables), {"user"}) eq_(set(Base.registry.metadata.tables), {"user"}) eq_(Base.registry.metadata.tables["user"].c.keys(), ["id", "login"]) eq_(m1.tables["user"].c.keys(), ["id", "name", "surname"]) eq_(m2.tables["user"].c.keys(), ["id", "username"]) def test_same_registry_multiple_metadata(self): m1 = MetaData() m2 = MetaData() reg = registry() class B1: metadata = m1 class B2: metadata = m2 def fullname(self): return self.name + " " + self.surname @reg.mapped class User(B1): __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) surname = Column(String) @reg.mapped class AD(B1): __tablename__ = "address" id = Column(Integer, primary_key=True) @reg.mapped class OtherUser(B2): __tablename__ = "user" id = Column(Integer, primary_key=True) username = Column(String) @reg.mapped class BUser: __tablename__ = "user" id = Column(Integer, primary_key=True) login = Column(String) eq_(set(m1.tables), {"user", "address"}) eq_(set(m2.tables), {"user"}) eq_(set(reg.metadata.tables), {"user"}) eq_(reg.metadata.tables["user"].c.keys(), ["id", "login"]) eq_(m1.tables["user"].c.keys(), ["id", "name", "surname"]) eq_(m2.tables["user"].c.keys(), ["id", "username"]) @testing.combinations(Column, mapped_column, argnames="_column") @testing.combinations("strname", "colref", "objref", argnames="fk_type") def test_fk_mixin(self, decl_base, fk_type, _column): class Bar(decl_base): __tablename__ = "bar" id = _column(Integer, primary_key=True) if fk_type == "strname": fk = ForeignKey("bar.id") elif fk_type == "colref": fk = ForeignKey(Bar.__table__.c.id) elif fk_type == "objref": fk = ForeignKey(Bar.id) else: assert False class MyMixin: foo = _column(Integer, fk) class A(MyMixin, decl_base): __tablename__ = "a" id = _column(Integer, primary_key=True) class B(MyMixin, decl_base): __tablename__ = "b" id = _column(Integer, primary_key=True) is_true(A.__table__.c.foo.references(Bar.__table__.c.id)) is_true(B.__table__.c.foo.references(Bar.__table__.c.id)) fka = list(A.__table__.c.foo.foreign_keys)[0] fkb = list(A.__table__.c.foo.foreign_keys)[0] is_not(fka, fkb) @testing.combinations(Column, mapped_column, argnames="_column") def test_fk_mixin_self_referential_error(self, decl_base, _column): class MyMixin: id = _column(Integer, primary_key=True) foo = _column(Integer, ForeignKey(id)) with expect_raises_message( sa.exc.InvalidRequestError, "Columns with foreign keys to non-table-bound columns " "must be declared as @declared_attr", ): class A(MyMixin, decl_base): __tablename__ = "a" @testing.combinations(Column, mapped_column, argnames="_column") def test_fk_mixin_self_referential_declared_attr(self, decl_base, _column): class MyMixin: id = _column(Integer, primary_key=True) @declared_attr def foo(cls): return _column(Integer, ForeignKey(cls.id)) class A(MyMixin, decl_base): __tablename__ = "a" class B(MyMixin, decl_base): __tablename__ = "b" is_true(A.__table__.c.foo.references(A.__table__.c.id)) is_true(B.__table__.c.foo.references(B.__table__.c.id)) fka = list(A.__table__.c.foo.foreign_keys)[0] fkb = list(A.__table__.c.foo.foreign_keys)[0] is_not(fka, fkb) is_true(A.__table__.c.foo.references(A.__table__.c.id)) is_true(B.__table__.c.foo.references(B.__table__.c.id)) fka = list(A.__table__.c.foo.foreign_keys)[0] fkb = list(A.__table__.c.foo.foreign_keys)[0] is_not(fka, fkb) def test_not_allowed(self): class MyRelMixin: foo = relationship("Bar") def go(): class MyModel(Base, MyRelMixin): __tablename__ = "foo" assert_raises(sa.exc.InvalidRequestError, go) class MyDefMixin: foo = deferred(Column("foo", String)) def go(): class MyModel(Base, MyDefMixin): __tablename__ = "foo" assert_raises(sa.exc.InvalidRequestError, go) class MyCPropMixin: foo = column_property(Column("foo", String)) def go(): class MyModel(Base, MyCPropMixin): __tablename__ = "foo" assert_raises(sa.exc.InvalidRequestError, go) def test_table_name_inherited(self): class MyMixin: @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class MyModel(Base, MyMixin): pass eq_(MyModel.__table__.name, "mymodel") def test_classproperty_still_works(self): class MyMixin: @classproperty def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class MyModel(Base, MyMixin): __tablename__ = "overridden" eq_(MyModel.__table__.name, "overridden") def test_table_name_not_inherited(self): class MyMixin: @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class MyModel(Base, MyMixin): __tablename__ = "overridden" eq_(MyModel.__table__.name, "overridden") def test_table_name_inheritance_order(self): class MyMixin1: @declared_attr def __tablename__(cls): return cls.__name__.lower() + "1" class MyMixin2: @declared_attr def __tablename__(cls): return cls.__name__.lower() + "2" class MyModel(Base, MyMixin1, MyMixin2): id = Column(Integer, primary_key=True) eq_(MyModel.__table__.name, "mymodel1") def test_table_name_dependent_on_subclass(self): class MyHistoryMixin: @declared_attr def __tablename__(cls): return cls.parent_name + "_changelog" class MyModel(Base, MyHistoryMixin): parent_name = "foo" id = Column(Integer, primary_key=True) eq_(MyModel.__table__.name, "foo_changelog") def test_table_args_inherited(self): class MyMixin: __table_args__ = {"mysql_engine": "InnoDB"} class MyModel(Base, MyMixin): __tablename__ = "test" id = Column(Integer, primary_key=True) eq_(MyModel.__table__.kwargs, {"mysql_engine": "InnoDB"}) def test_table_args_inherited_descriptor(self): class MyMixin: @declared_attr def __table_args__(cls): return {"info": cls.__name__} class MyModel(Base, MyMixin): __tablename__ = "test" id = Column(Integer, primary_key=True) eq_(MyModel.__table__.info, "MyModel") def test_table_args_inherited_single_table_inheritance(self): class MyMixin: __table_args__ = {"mysql_engine": "InnoDB"} class General(Base, MyMixin): __tablename__ = "test" id = Column(Integer, primary_key=True) type_ = Column(String(50)) __mapper__args = {"polymorphic_on": type_} class Specific(General): __mapper_args__ = {"polymorphic_identity": "specific"} assert Specific.__table__ is General.__table__ eq_(General.__table__.kwargs, {"mysql_engine": "InnoDB"}) def test_columns_single_table_inheritance(self): """Test a column on a mixin with an alternate attribute name, mapped to a superclass and single-table inheritance subclass. The superclass table gets the column, the subclass shares the MapperProperty. """ class MyMixin: foo = Column("foo", Integer) bar = Column("bar_newname", Integer) class General(Base, MyMixin): __tablename__ = "test" id = Column(Integer, primary_key=True) type_ = Column(String(50)) __mapper__args = {"polymorphic_on": type_} class Specific(General): __mapper_args__ = {"polymorphic_identity": "specific"} assert General.bar.prop.columns[0] is General.__table__.c.bar_newname assert len(General.bar.prop.columns) == 1 assert Specific.bar.prop is General.bar.prop @testing.skip_if( lambda: testing.against("oracle"), "Test has an empty insert in it at the moment", ) @testing.combinations(Column, mapped_column, argnames="_column") def test_columns_single_inheritance_conflict_resolution(self, _column): """Test that a declared_attr can return the existing column and it will be ignored. this allows conditional columns to be added. See [ticket:2472]. """ class Person(Base): __tablename__ = "person" id = _column(Integer, primary_key=True) class Mixin: @declared_attr def target_id(cls): return cls.__table__.c.get( "target_id", _column(Integer, ForeignKey("other.id")) ) @declared_attr def target(cls): return relationship("Other") class Engineer(Mixin, Person): """single table inheritance""" class Manager(Mixin, Person): """single table inheritance""" class Other(Base): __tablename__ = "other" id = _column(Integer, primary_key=True) is_( Engineer.target_id.property.columns[0], Person.__table__.c.target_id, ) is_( Manager.target_id.property.columns[0], Person.__table__.c.target_id ) # do a brief round trip on this Base.metadata.create_all(testing.db) session = fixture_session() o1, o2 = Other(), Other() session.add_all( [Engineer(target=o1), Manager(target=o2), Manager(target=o1)] ) session.commit() eq_(session.query(Engineer).first().target, o1) @testing.combinations(Column, mapped_column, argnames="_column") def test_columns_joined_table_inheritance(self, _column): """Test a column on a mixin with an alternate attribute name, mapped to a superclass and joined-table inheritance subclass. Both tables get the column, in the case of the subclass the two columns are joined under one MapperProperty. """ class MyMixin: foo = _column("foo", Integer) bar = _column("bar_newname", Integer) class General(Base, MyMixin): __tablename__ = "test" id = _column(Integer, primary_key=True) type_ = _column(String(50)) __mapper_args__ = {"polymorphic_on": type_} class Specific(General): __tablename__ = "sub" id = _column(Integer, ForeignKey("test.id"), primary_key=True) __mapper_args__ = {"polymorphic_identity": "specific"} assert General.bar.prop.columns[0] is General.__table__.c.bar_newname assert len(General.bar.prop.columns) == 1 assert Specific.bar.prop is General.bar.prop eq_(len(Specific.bar.prop.columns), 1) assert Specific.bar.prop.columns[0] is General.__table__.c.bar_newname def test_column_join_checks_superclass_type(self): """Test that the logic which joins subclass props to those of the superclass checks that the superclass property is a column. """ class General(Base): __tablename__ = "test" id = Column(Integer, primary_key=True) general_id = Column(Integer, ForeignKey("test.id")) type_ = relationship("General") class Specific(General): __tablename__ = "sub" id = Column(Integer, ForeignKey("test.id"), primary_key=True) type_ = Column("foob", String(50)) assert isinstance(General.type_.property, sa.orm.RelationshipProperty) assert Specific.type_.property.columns[0] is Specific.__table__.c.foob def test_column_join_checks_subclass_type(self): """Test that the logic which joins subclass props to those of the superclass checks that the subclass property is a column. """ def go(): class General(Base): __tablename__ = "test" id = Column(Integer, primary_key=True) type_ = Column("foob", Integer) class Specific(General): __tablename__ = "sub" id = Column(Integer, ForeignKey("test.id"), primary_key=True) specific_id = Column(Integer, ForeignKey("sub.id")) type_ = relationship("Specific") assert_raises_message( sa.exc.ArgumentError, "column 'foob' conflicts with property", go ) def test_table_args_overridden(self): class MyMixin: __table_args__ = {"mysql_engine": "Foo"} class MyModel(Base, MyMixin): __tablename__ = "test" __table_args__ = {"mysql_engine": "InnoDB"} id = Column(Integer, primary_key=True) eq_(MyModel.__table__.kwargs, {"mysql_engine": "InnoDB"}) @testing.teardown_events(orm_events.MapperEvents) def test_declare_first_mixin(self): canary = mock.Mock() class MyMixin: @classmethod def __declare_first__(cls): canary.declare_first__(cls) @classmethod def __declare_last__(cls): canary.declare_last__(cls) class MyModel(Base, MyMixin): __tablename__ = "test" id = Column(Integer, primary_key=True) configure_mappers() eq_( canary.mock_calls, [ mock.call.declare_first__(MyModel), mock.call.declare_last__(MyModel), ], ) @testing.teardown_events(orm_events.MapperEvents) def test_declare_first_base(self): canary = mock.Mock() class MyMixin: @classmethod def __declare_first__(cls): canary.declare_first__(cls) @classmethod def __declare_last__(cls): canary.declare_last__(cls) class Base(MyMixin): pass Base = declarative_base(cls=Base) class MyModel(Base): __tablename__ = "test" id = Column(Integer, primary_key=True) configure_mappers() eq_( canary.mock_calls, [ mock.call.declare_first__(MyModel), mock.call.declare_last__(MyModel), ], ) @testing.teardown_events(orm_events.MapperEvents) def test_declare_first_direct(self): canary = mock.Mock() class MyOtherModel(Base): __tablename__ = "test2" id = Column(Integer, primary_key=True) @classmethod def __declare_first__(cls): canary.declare_first__(cls) @classmethod def __declare_last__(cls): canary.declare_last__(cls) configure_mappers() eq_( canary.mock_calls, [ mock.call.declare_first__(MyOtherModel), mock.call.declare_last__(MyOtherModel), ], ) def test_mapper_args_declared_attr(self): class ComputedMapperArgs: @declared_attr def __mapper_args__(cls): if cls.__name__ == "Person": return {"polymorphic_on": cls.discriminator} else: return {"polymorphic_identity": cls.__name__} class Person(Base, ComputedMapperArgs): __tablename__ = "people" id = Column(Integer, primary_key=True) discriminator = Column("type", String(50)) class Engineer(Person): pass configure_mappers() assert class_mapper(Person).polymorphic_on is Person.__table__.c.type eq_(class_mapper(Engineer).polymorphic_identity, "Engineer") def test_mapper_args_declared_attr_two(self): # same as test_mapper_args_declared_attr, but we repeat # ComputedMapperArgs on both classes for no apparent reason. class ComputedMapperArgs: @declared_attr def __mapper_args__(cls): if cls.__name__ == "Person": return {"polymorphic_on": cls.discriminator} else: return {"polymorphic_identity": cls.__name__} class Person(Base, ComputedMapperArgs): __tablename__ = "people" id = Column(Integer, primary_key=True) discriminator = Column("type", String(50)) class Engineer(Person, ComputedMapperArgs): pass configure_mappers() assert class_mapper(Person).polymorphic_on is Person.__table__.c.type eq_(class_mapper(Engineer).polymorphic_identity, "Engineer") def test_table_args_composite(self): class MyMixin1: __table_args__ = {"info": {"baz": "bob"}} class MyMixin2: __table_args__ = {"info": {"foo": "bar"}} class MyModel(Base, MyMixin1, MyMixin2): __tablename__ = "test" @declared_attr def __table_args__(self): info = {} args = dict(info=info) info.update(MyMixin1.__table_args__["info"]) info.update(MyMixin2.__table_args__["info"]) return args id = Column(Integer, primary_key=True) eq_(MyModel.__table__.info, {"foo": "bar", "baz": "bob"}) def test_mapper_args_inherited(self): class MyMixin: __mapper_args__ = {"always_refresh": True} class MyModel(Base, MyMixin): __tablename__ = "test" id = Column(Integer, primary_key=True) eq_(MyModel.__mapper__.always_refresh, True) def test_mapper_args_inherited_descriptor(self): class MyMixin: @declared_attr def __mapper_args__(cls): # tenuous, but illustrates the problem! if cls.__name__ == "MyModel": return dict(always_refresh=True) else: return dict(always_refresh=False) class MyModel(Base, MyMixin): __tablename__ = "test" id = Column(Integer, primary_key=True) eq_(MyModel.__mapper__.always_refresh, True) def test_mapper_args_polymorphic_on_inherited(self): class MyMixin: type_ = Column(String(50)) __mapper_args__ = {"polymorphic_on": type_} class MyModel(Base, MyMixin): __tablename__ = "test" id = Column(Integer, primary_key=True) col = MyModel.__mapper__.polymorphic_on eq_(col.name, "type_") assert col.table is not None def test_mapper_args_overridden(self): class MyMixin: __mapper_args__ = dict(always_refresh=True) class MyModel(Base, MyMixin): __tablename__ = "test" __mapper_args__ = dict(always_refresh=False) id = Column(Integer, primary_key=True) eq_(MyModel.__mapper__.always_refresh, False) def test_mapper_args_composite(self): class MyMixin1: type_ = Column(String(50)) __mapper_args__ = {"polymorphic_on": type_} class MyMixin2: __mapper_args__ = {"always_refresh": True} class MyModel(Base, MyMixin1, MyMixin2): __tablename__ = "test" @declared_attr def __mapper_args__(cls): args = {} args.update(MyMixin1.__mapper_args__) args.update(MyMixin2.__mapper_args__) if cls.__name__ != "MyModel": args.pop("polymorphic_on") args["polymorphic_identity"] = cls.__name__ return args id = Column(Integer, primary_key=True) class MySubModel(MyModel): pass eq_(MyModel.__mapper__.polymorphic_on.name, "type_") assert MyModel.__mapper__.polymorphic_on.table is not None eq_(MyModel.__mapper__.always_refresh, True) eq_(MySubModel.__mapper__.always_refresh, True) eq_(MySubModel.__mapper__.polymorphic_identity, "MySubModel") def test_mapper_args_property(self): class MyModel(Base): @declared_attr def __tablename__(cls): return cls.__name__.lower() @declared_attr def __table_args__(cls): return {"mysql_engine": "InnoDB"} @declared_attr def __mapper_args__(cls): args = {} args["polymorphic_identity"] = cls.__name__ return args id = Column(Integer, primary_key=True) class MySubModel(MyModel): id = Column(Integer, ForeignKey("mymodel.id"), primary_key=True) class MySubModel2(MyModel): __tablename__ = "sometable" id = Column(Integer, ForeignKey("mymodel.id"), primary_key=True) eq_(MyModel.__mapper__.polymorphic_identity, "MyModel") eq_(MySubModel.__mapper__.polymorphic_identity, "MySubModel") eq_(MyModel.__table__.kwargs["mysql_engine"], "InnoDB") eq_(MySubModel.__table__.kwargs["mysql_engine"], "InnoDB") eq_(MySubModel2.__table__.kwargs["mysql_engine"], "InnoDB") eq_(MyModel.__table__.name, "mymodel") eq_(MySubModel.__table__.name, "mysubmodel") def test_mapper_args_custom_base(self): """test the @declared_attr approach from a custom base.""" class Base: @declared_attr def __tablename__(cls): return cls.__name__.lower() @declared_attr def __table_args__(cls): return {"mysql_engine": "InnoDB"} @declared_attr def id(self): return Column(Integer, primary_key=True) Base = declarative_base(cls=Base) class MyClass(Base): pass class MyOtherClass(Base): pass eq_(MyClass.__table__.kwargs["mysql_engine"], "InnoDB") eq_(MyClass.__table__.name, "myclass") eq_(MyOtherClass.__table__.name, "myotherclass") assert MyClass.__table__.c.id.table is MyClass.__table__ assert MyOtherClass.__table__.c.id.table is MyOtherClass.__table__ def test_single_table_no_propagation(self): class IdColumn: id = Column(Integer, primary_key=True) class Generic(Base, IdColumn): __tablename__ = "base" discriminator = Column("type", String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) value = Column(Integer()) class Specific(Generic): __mapper_args__ = dict(polymorphic_identity="specific") assert Specific.__table__ is Generic.__table__ eq_(list(Generic.__table__.c.keys()), ["type", "value", "id"]) assert ( class_mapper(Specific).polymorphic_on is Generic.__table__.c.type ) eq_(class_mapper(Specific).polymorphic_identity, "specific") def test_joined_table_propagation(self): class CommonMixin: @declared_attr def __tablename__(cls): return cls.__name__.lower() __table_args__ = {"mysql_engine": "InnoDB"} timestamp = mapped_column(Integer) id = Column(Integer, primary_key=True) class Generic(Base, CommonMixin): discriminator = Column("python_type", String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) class Specific(Generic): __mapper_args__ = dict(polymorphic_identity="specific") id = Column(Integer, ForeignKey("generic.id"), primary_key=True) eq_(Generic.__table__.name, "generic") eq_(Specific.__table__.name, "specific") eq_( list(Generic.__table__.c.keys()), ["python_type", "timestamp", "id"], ) eq_(list(Specific.__table__.c.keys()), ["id"]) eq_(Generic.__table__.kwargs, {"mysql_engine": "InnoDB"}) eq_(Specific.__table__.kwargs, {"mysql_engine": "InnoDB"}) def test_some_propagation(self): class CommonMixin: @declared_attr def __tablename__(cls): return cls.__name__.lower() __table_args__ = {"mysql_engine": "InnoDB"} timestamp = Column(Integer) class BaseType(Base, CommonMixin): discriminator = Column("type", String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) id = Column(Integer, primary_key=True) value = Column(Integer()) class Single(BaseType): __tablename__ = None __mapper_args__ = dict(polymorphic_identity="type1") class Joined(BaseType): __mapper_args__ = dict(polymorphic_identity="type2") id = Column(Integer, ForeignKey("basetype.id"), primary_key=True) eq_(BaseType.__table__.name, "basetype") eq_( list(BaseType.__table__.c.keys()), ["type", "id", "value", "timestamp"], ) eq_(BaseType.__table__.kwargs, {"mysql_engine": "InnoDB"}) assert Single.__table__ is BaseType.__table__ eq_(Joined.__table__.name, "joined") eq_(list(Joined.__table__.c.keys()), ["id"]) eq_(Joined.__table__.kwargs, {"mysql_engine": "InnoDB"}) def test_col_copy_vs_declared_attr_joined_propagation(self): class Mixin: a = Column(Integer) @declared_attr def b(cls): return Column(Integer) class A(Mixin, Base): __tablename__ = "a" id = Column(Integer, primary_key=True) class B(A): __tablename__ = "b" id = Column(Integer, ForeignKey("a.id"), primary_key=True) assert "a" in A.__table__.c assert "b" in A.__table__.c assert "a" not in B.__table__.c assert "b" not in B.__table__.c def test_col_copy_vs_declared_attr_joined_propagation_newname(self): class Mixin: a = Column("a1", Integer) @declared_attr def b(cls): return Column("b1", Integer) class A(Mixin, Base): __tablename__ = "a" id = Column(Integer, primary_key=True) class B(A): __tablename__ = "b" id = Column(Integer, ForeignKey("a.id"), primary_key=True) assert "a1" in A.__table__.c assert "b1" in A.__table__.c assert "a1" not in B.__table__.c assert "b1" not in B.__table__.c def test_col_copy_vs_declared_attr_single_propagation(self): class Mixin: a = Column(Integer) @declared_attr def b(cls): return Column(Integer) class A(Mixin, Base): __tablename__ = "a" id = Column(Integer, primary_key=True) class B(A): pass assert "a" in A.__table__.c assert "b" in A.__table__.c def test_non_propagating_mixin(self): class NoJoinedTableNameMixin: @declared_attr def __tablename__(cls): if has_inherited_table(cls): return None return cls.__name__.lower() class BaseType(Base, NoJoinedTableNameMixin): discriminator = Column("type", String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) id = Column(Integer, primary_key=True) value = Column(Integer()) class Specific(BaseType): __mapper_args__ = dict(polymorphic_identity="specific") eq_(BaseType.__table__.name, "basetype") eq_(list(BaseType.__table__.c.keys()), ["type", "id", "value"]) assert Specific.__table__ is BaseType.__table__ assert ( class_mapper(Specific).polymorphic_on is BaseType.__table__.c.type ) eq_(class_mapper(Specific).polymorphic_identity, "specific") def test_non_propagating_mixin_used_for_joined(self): class TableNameMixin: @declared_attr def __tablename__(cls): if ( has_inherited_table(cls) and TableNameMixin not in cls.__bases__ ): return None return cls.__name__.lower() class BaseType(Base, TableNameMixin): discriminator = Column("type", String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) id = Column(Integer, primary_key=True) value = Column(Integer()) class Specific(BaseType, TableNameMixin): __mapper_args__ = dict(polymorphic_identity="specific") id = Column(Integer, ForeignKey("basetype.id"), primary_key=True) eq_(BaseType.__table__.name, "basetype") eq_(list(BaseType.__table__.c.keys()), ["type", "id", "value"]) eq_(Specific.__table__.name, "specific") eq_(list(Specific.__table__.c.keys()), ["id"]) def test_single_back_propagate(self): class ColumnMixin: timestamp = Column(Integer) class BaseType(Base): __tablename__ = "foo" discriminator = Column("type", String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) id = Column(Integer, primary_key=True) class Specific(BaseType, ColumnMixin): __mapper_args__ = dict(polymorphic_identity="specific") eq_(list(BaseType.__table__.c.keys()), ["type", "id", "timestamp"]) def test_table_in_model_and_same_column_in_mixin(self): class ColumnMixin: data = Column(Integer) class Model(Base, ColumnMixin): __table__ = Table( "foo", Base.metadata, Column("data", Integer), Column("id", Integer, primary_key=True), ) model_col = Model.__table__.c.data mixin_col = ColumnMixin.data assert model_col is not mixin_col eq_(model_col.name, "data") assert model_col.type.__class__ is mixin_col.type.__class__ def test_table_in_model_and_different_named_column_in_mixin(self): class ColumnMixin: tada = Column(Integer) def go(): class Model(Base, ColumnMixin): __table__ = Table( "foo", Base.metadata, Column("data", Integer), Column("id", Integer, primary_key=True), ) foo = relationship("Dest") assert_raises_message( sa.exc.ArgumentError, "Can't add additional column 'tada' when specifying __table__", go, ) def test_table_in_model_and_different_named_alt_key_column_in_mixin(self): # here, the __table__ has a column 'tada'. We disallow # the add of the 'foobar' column, even though it's # keyed to 'tada'. class ColumnMixin: tada = Column("foobar", Integer) def go(): class Model(Base, ColumnMixin): __table__ = Table( "foo", Base.metadata, Column("data", Integer), Column("tada", Integer), Column("id", Integer, primary_key=True), ) foo = relationship("Dest") assert_raises_message( sa.exc.ArgumentError, "Can't add additional column 'foobar' when " "specifying __table__", go, ) def test_table_in_model_overrides_different_typed_column_in_mixin(self): class ColumnMixin: data = Column(String) class Model(Base, ColumnMixin): __table__ = Table( "foo", Base.metadata, Column("data", Integer), Column("id", Integer, primary_key=True), ) model_col = Model.__table__.c.data mixin_col = ColumnMixin.data assert model_col is not mixin_col eq_(model_col.name, "data") assert model_col.type.__class__ is Integer def test_mixin_column_ordering(self): class Foo: col1 = Column(Integer) col3 = Column(Integer) class Bar: col2 = Column(Integer) col4 = Column(Integer) class Model(Base, Foo, Bar): id = Column(Integer, primary_key=True) __tablename__ = "model" eq_( list(Model.__table__.c.keys()), ["id", "col1", "col3", "col2", "col4"], ) def test_honor_class_mro_one(self): class HasXMixin: @declared_attr def x(self): return Column(Integer) class Parent(HasXMixin, Base): __tablename__ = "parent" id = Column(Integer, primary_key=True) class Child(Parent): __tablename__ = "child" id = Column(Integer, ForeignKey("parent.id"), primary_key=True) assert "x" not in Child.__table__.c def test_honor_class_mro_two(self): class HasXMixin: @declared_attr def x(self): return Column(Integer) class Parent(HasXMixin, Base): __tablename__ = "parent" id = Column(Integer, primary_key=True) def x(self): return "hi" class C(Parent): __tablename__ = "c" id = Column(Integer, ForeignKey("parent.id"), primary_key=True) assert C().x() == "hi" def test_arbitrary_attrs_one(self): class HasMixin: @declared_attr def some_attr(cls): return cls.__name__ + "SOME ATTR" class Mapped(HasMixin, Base): __tablename__ = "t" id = Column(Integer, primary_key=True) eq_(Mapped.some_attr, "MappedSOME ATTR") eq_(Mapped.__dict__["some_attr"], "MappedSOME ATTR") def test_arbitrary_attrs_two(self): from sqlalchemy.ext.associationproxy import association_proxy class FilterA(Base): __tablename__ = "filter_a" id = Column(Integer(), primary_key=True) parent_id = Column(Integer(), ForeignKey("type_a.id")) filter = Column(String()) def __init__(self, filter_, **kw): self.filter = filter_ class FilterB(Base): __tablename__ = "filter_b" id = Column(Integer(), primary_key=True) parent_id = Column(Integer(), ForeignKey("type_b.id")) filter = Column(String()) def __init__(self, filter_, **kw): self.filter = filter_ class FilterMixin: @declared_attr def _filters(cls): return relationship( cls.filter_class, cascade="all,delete,delete-orphan" ) @declared_attr def filters(cls): return association_proxy("_filters", "filter") class TypeA(Base, FilterMixin): __tablename__ = "type_a" filter_class = FilterA id = Column(Integer(), primary_key=True) class TypeB(Base, FilterMixin): __tablename__ = "type_b" filter_class = FilterB id = Column(Integer(), primary_key=True) TypeA(filters=["foo"]) TypeB(filters=["foo"]) def test_arbitrary_attrs_three(self): class Mapped(Base): __tablename__ = "t" id = Column(Integer, primary_key=True) @declared_attr def some_attr(cls): return cls.__name__ + "SOME ATTR" eq_(Mapped.some_attr, "MappedSOME ATTR") eq_(Mapped.__dict__["some_attr"], "MappedSOME ATTR") def test_arbitrary_attrs_doesnt_apply_to_abstract_declared_attr(self): names = ["name1", "name2", "name3"] class SomeAbstract(Base): __abstract__ = True @declared_attr def some_attr(cls): return names.pop(0) class M1(SomeAbstract): __tablename__ = "t1" id = Column(Integer, primary_key=True) class M2(SomeAbstract): __tablename__ = "t2" id = Column(Integer, primary_key=True) eq_(M1.__dict__["some_attr"], "name1") eq_(M2.__dict__["some_attr"], "name2") def test_arbitrary_attrs_doesnt_apply_to_prepare_nocascade(self): names = ["name1", "name2", "name3"] class SomeAbstract(Base): __tablename__ = "t0" __no_table__ = True # used by AbstractConcreteBase _sa_decl_prepare_nocascade = True id = Column(Integer, primary_key=True) @declared_attr def some_attr(cls): return names.pop(0) class M1(SomeAbstract): __tablename__ = "t1" id = Column(Integer, primary_key=True) class M2(SomeAbstract): __tablename__ = "t2" id = Column(Integer, primary_key=True) eq_(M1.some_attr, "name2") eq_(M2.some_attr, "name3") eq_(M1.__dict__["some_attr"], "name2") eq_(M2.__dict__["some_attr"], "name3") assert isinstance(SomeAbstract.__dict__["some_attr"], declared_attr)
DeclarativeMixinTest
python
redis__redis-py
tests/test_multidb/test_healthcheck.py
{ "start": 436, "end": 2117 }
class ____: def test_policy_returns_true_for_all_successful_probes(self): mock_hc1 = Mock(spec=HealthCheck) mock_hc2 = Mock(spec=HealthCheck) mock_hc1.check_health.return_value = True mock_hc2.check_health.return_value = True mock_db = Mock(spec=Database) policy = HealthyAllPolicy(3, 0.01) assert policy.execute([mock_hc1, mock_hc2], mock_db) assert mock_hc1.check_health.call_count == 3 assert mock_hc2.check_health.call_count == 3 def test_policy_returns_false_on_first_failed_probe(self): mock_hc1 = Mock(spec=HealthCheck) mock_hc2 = Mock(spec=HealthCheck) mock_hc1.check_health.side_effect = [True, True, False] mock_hc2.check_health.return_value = True mock_db = Mock(spec=Database) policy = HealthyAllPolicy(3, 0.01) assert not policy.execute([mock_hc1, mock_hc2], mock_db) assert mock_hc1.check_health.call_count == 3 assert mock_hc2.check_health.call_count == 0 def test_policy_raise_unhealthy_database_exception(self): mock_hc1 = Mock(spec=HealthCheck) mock_hc2 = Mock(spec=HealthCheck) mock_hc1.check_health.side_effect = [True, True, ConnectionError] mock_hc2.check_health.return_value = True mock_db = Mock(spec=Database) policy = HealthyAllPolicy(3, 0.01) with pytest.raises(UnhealthyDatabaseException, match="Unhealthy database"): policy.execute([mock_hc1, mock_hc2], mock_db) assert mock_hc1.check_health.call_count == 3 assert mock_hc2.check_health.call_count == 0 @pytest.mark.onlynoncluster
TestHealthyAllPolicy
python
scipy__scipy
scipy/fftpack/tests/test_real_transforms.py
{ "start": 8406, "end": 8547 }
class ____(_TestDCTIBase): def setup_method(self): self.rdt = np.float64 self.dec = 10 self.type = 1
TestDCTIDouble
python
chroma-core__chroma
chromadb/test/property/test_collections.py
{ "start": 622, "end": 12327 }
class ____(RuleBasedStateMachine): collections: Bundle[strategies.ExternalCollection] _model: Dict[str, Optional[types.CollectionMetadata]] collections = Bundle("collections") def __init__(self, client: ClientAPI): super().__init__() self._model = {} self.client = client @initialize() def initialize(self) -> None: self.client.reset() self._model = {} @rule(target=collections, coll=strategies.collections()) def create_coll( self, coll: strategies.ExternalCollection ) -> MultipleResults[strategies.ExternalCollection]: # Metadata can either be None or a non-empty dict if coll.name in self.model or ( coll.metadata is not None and len(coll.metadata) == 0 ): with pytest.raises(Exception): c = self.client.create_collection( name=coll.name, metadata=coll.metadata, # type: ignore[arg-type] embedding_function=coll.embedding_function, ) return multiple() c = self.client.create_collection( name=coll.name, metadata=coll.metadata, # type: ignore[arg-type] embedding_function=coll.embedding_function, ) self.set_model(coll.name, coll.metadata) # type: ignore[arg-type] assert c.name == coll.name check_metadata(self.model[coll.name], c.metadata) return multiple(coll) @rule(coll=collections) def get_coll(self, coll: strategies.ExternalCollection) -> None: if coll.name in self.model: c = self.client.get_collection(name=coll.name) assert c.name == coll.name check_metadata(self.model[coll.name], c.metadata) else: with pytest.raises(Exception): self.client.get_collection(name=coll.name) @rule(coll=consumes(collections)) def delete_coll(self, coll: strategies.ExternalCollection) -> None: if coll.name in self.model: with invariants.collection_deleted(self.client, coll.name): self.client.delete_collection(name=coll.name) self.delete_from_model(coll.name) else: with pytest.raises(Exception): self.client.delete_collection(name=coll.name) with pytest.raises(Exception): self.client.get_collection(name=coll.name) @rule() def list_collections(self) -> None: colls = self.client.list_collections() assert len(colls) == len(self.model) for c in colls: assert c.name in self.model # @rule for list_collections with limit and offset @rule( limit=st.integers(min_value=1, max_value=5), offset=st.integers(min_value=0, max_value=5), ) def list_collections_with_limit_offset(self, limit: int, offset: int) -> None: colls = self.client.list_collections(limit=limit, offset=offset) total_collections = self.client.count_collections() # get all collections all_colls = self.client.list_collections() # manually slice the collections based on the given limit and offset man_colls = all_colls[offset : offset + limit] # given limit and offset, make various assertions regarding the total number of collections if limit + offset > total_collections: assert len(colls) == max(total_collections - offset, 0) # assert that our manually sliced collections are the same as the ones returned by the API assert colls == man_colls else: assert len(colls) == limit @rule( target=collections, new_metadata=st.one_of(st.none(), strategies.collection_metadata), coll=st.one_of(consumes(collections), strategies.collections()), ) def get_or_create_coll( self, coll: strategies.ExternalCollection, new_metadata: Optional[types.Metadata], ) -> MultipleResults[strategies.ExternalCollection]: # Cases for get_or_create # Case 0 # new_metadata is none, coll is an existing collection # get_or_create should return the existing collection with existing metadata # Case 1 # new_metadata is none, coll is a new collection # get_or_create should create a new collection with the metadata of None # Case 2 # new_metadata is not none, coll is an existing collection # get_or_create should return the existing collection with the original metadata # Case 3 # new_metadata is not none, coll is a new collection # get_or_create should create a new collection with the new metadata if new_metadata is not None and len(new_metadata) == 0: with pytest.raises(Exception): c = self.client.get_or_create_collection( name=coll.name, metadata=new_metadata, # type: ignore[arg-type] embedding_function=coll.embedding_function, ) return multiple() # Update model if coll.name not in self.model: # Handles case 1 and 3 coll.metadata = new_metadata self.set_model(coll.name, coll.metadata) # type: ignore[arg-type] # Update API c = self.client.get_or_create_collection( name=coll.name, metadata=new_metadata, # type: ignore[arg-type] embedding_function=coll.embedding_function, ) # Check that model and API are in sync assert c.name == coll.name check_metadata(self.model[coll.name], c.metadata) return multiple(coll) @rule( target=collections, coll=consumes(collections), new_metadata=strategies.collection_metadata, new_name=st.one_of(st.none(), strategies.collection_name()), ) def modify_coll( self, coll: strategies.ExternalCollection, new_metadata: types.Metadata, new_name: Optional[str], ) -> MultipleResults[strategies.ExternalCollection]: if coll.name not in self.model: with pytest.raises(Exception): c = self.client.get_collection(name=coll.name) return multiple() c = self.client.get_collection(name=coll.name) _metadata: Optional[Mapping[str, Any]] = self.model[coll.name] _name: str = coll.name if new_metadata is not None: # Can't set metadata to an empty dict if len(new_metadata) == 0: with pytest.raises(Exception): c = self.client.get_or_create_collection( name=coll.name, metadata=new_metadata, # type: ignore[arg-type] embedding_function=coll.embedding_function, ) return multiple() coll.metadata = new_metadata _metadata = new_metadata if new_name is not None: if new_name in self.model and new_name != coll.name: with pytest.raises(Exception): c.modify(metadata=new_metadata, name=new_name) # type: ignore[arg-type] return multiple() self.delete_from_model(coll.name) coll.name = new_name _name = new_name self.set_model(_name, _metadata) # type: ignore[arg-type] c.modify(metadata=_metadata, name=_name) # type: ignore[arg-type] c = self.client.get_collection(name=coll.name) assert c.name == coll.name check_metadata(self.model[coll.name], c.metadata) return multiple(coll) def set_model( self, name: str, metadata: Optional[types.CollectionMetadata], ) -> None: model = self.model model[name] = metadata def delete_from_model(self, name: str) -> None: model = self.model del model[name] @property def model(self) -> Dict[str, Optional[types.CollectionMetadata]]: return self._model def test_collections(caplog: pytest.LogCaptureFixture, client: ClientAPI) -> None: caplog.set_level(logging.ERROR) run_state_machine_as_test(lambda: CollectionStateMachine(client)) # type: ignore # Below are tests that have failed in the past. If your test fails, please add # it to protect against regressions in the test harness itself. If you need # help doing so, talk to anton. def test_previously_failing_one(client: ClientAPI) -> None: state = CollectionStateMachine(client) state.initialize() # I don't know why the typechecker is red here. This code is correct and is # pulled from the logs. (v1,) = state.get_or_create_coll( # type: ignore[misc] coll=strategies.ExternalCollection( name="jjn2yjLW1zp2T", metadata=None, embedding_function=hashing_embedding_function(dtype=numpy.float32, dim=863), # type: ignore[arg-type] ), new_metadata=None, ) (v6,) = state.get_or_create_coll( # type: ignore[misc] coll=strategies.ExternalCollection( name="jjn2yjLW1zp2T", metadata=None, embedding_function=hashing_embedding_function(dtype=numpy.float32, dim=863), # type: ignore[arg-type] ), new_metadata=None, ) state.modify_coll( coll=v1, new_metadata={"7": -1281, "fGe": -0.0, "K5j": "im"}, new_name=None ) state.modify_coll(coll=v6, new_metadata=None, new_name=None) # https://github.com/chroma-core/chroma/commit/cf476d70f0cebb7c87cb30c7172ba74d6ea175cd#diff-e81868b665d149bb315d86890dea6fc6a9fc9fc9ea3089aa7728142b54f622c5R210 def test_previously_failing_two(client: ClientAPI) -> None: state = CollectionStateMachine(client) state.initialize() (v13,) = state.get_or_create_coll( # type: ignore[misc] coll=strategies.ExternalCollection( name="C1030", metadata={}, embedding_function=hashing_embedding_function(dim=2, dtype=numpy.float32), # type: ignore[arg-type] ), new_metadata=None, ) (v15,) = state.modify_coll( # type: ignore[misc] coll=v13, new_metadata={ "0": "10", "40": "0", "p1nviWeL7fO": "qN", "7b": "YS", "VYWq4LEMWjCo": True, }, new_name="OF5F0MzbQg", ) state.get_or_create_coll( coll=strategies.ExternalCollection( name="VS0QGh", metadata={ "h": 5.681951615025145e-227, "A1": 61126, "uhUhLEEMfeC_kN": 2147483647, "weF": "pSP", "B3DSaP": False, "6H533K": 1.192092896e-07, }, embedding_function=hashing_embedding_function( # type: ignore[arg-type] dim=1915, dtype=numpy.float32 ), ), new_metadata={ "xVW09xUpDZA": 31734, "g": 1.1, "n1dUTalF-MY": -1000000.0, "y": "G3EtXTZ", "ugXZ_hK": 5494, }, ) v17 = state.modify_coll( # noqa: F841 coll=v15, new_metadata={"L35J2S": "K0l026"}, new_name="Ai1" ) v18 = state.get_or_create_coll(coll=v13, new_metadata=None) # noqa: F841 state.get_or_create_coll( coll=strategies.ExternalCollection( name="VS0QGh", metadata=None, embedding_function=hashing_embedding_function(dim=326, dtype=numpy.float16), # type: ignore[arg-type] ), new_metadata=None, )
CollectionStateMachine
python
PrefectHQ__prefect
tests/logging/test_logs_subscriber.py
{ "start": 3939, "end": 22682 }
class ____: token: Optional[str] hard_auth_failure: bool refuse_any_further_connections: bool hard_disconnect_after: Optional[str] # log id outgoing_logs: list[Log] def __init__(self): self.hard_auth_failure = False self.refuse_any_further_connections = False self.hard_disconnect_after = None self.outgoing_logs = [] @pytest.fixture def log_recorder() -> LogRecorder: return LogRecorder() @pytest.fixture def log_puppeteer() -> LogPuppeteer: return LogPuppeteer() @pytest.fixture async def logs_server( unused_tcp_port: int, log_recorder: LogRecorder, log_puppeteer: LogPuppeteer ): from starlette.status import WS_1008_POLICY_VIOLATION from websockets.asyncio.server import Server, ServerConnection, serve server: Server async def handler(socket: ServerConnection) -> None: assert socket.request path = socket.request.path log_recorder.connections += 1 if log_puppeteer.refuse_any_further_connections: raise ValueError("nope") log_recorder.path = path if path.endswith("/logs/out"): await outgoing_logs(socket) async def outgoing_logs(socket: ServerConnection): # 1. authentication auth_message = json.loads(await socket.recv()) assert auth_message["type"] == "auth" log_recorder.token = auth_message["token"] if log_puppeteer.token != log_recorder.token: if not log_puppeteer.hard_auth_failure: await socket.send( json.dumps({"type": "auth_failure", "reason": "nope"}) ) await socket.close(WS_1008_POLICY_VIOLATION) return await socket.send(json.dumps({"type": "auth_success"})) # 2. filter filter_message = json.loads(await socket.recv()) assert filter_message["type"] == "filter" log_recorder.filter = LogFilter.model_validate(filter_message["filter"]) # 3. send logs for log in log_puppeteer.outgoing_logs: await socket.send( json.dumps( { "type": "log", "log": log.model_dump(mode="json"), } ) ) if log_puppeteer.hard_disconnect_after == str(log.id): log_puppeteer.hard_disconnect_after = None raise ValueError("zonk") async with serve(handler, host="localhost", port=unused_tcp_port) as server: yield server @pytest.fixture def logs_api_url(logs_server, unused_tcp_port: int) -> str: return f"http://localhost:{unused_tcp_port}" @pytest.fixture def logs_cloud_api_url(logs_server, unused_tcp_port: int) -> str: return f"http://localhost:{unused_tcp_port}/accounts/A/workspaces/W" @pytest.fixture(autouse=True) def api_setup(logs_cloud_api_url: str): with temporary_settings( updates={ PREFECT_API_URL: logs_cloud_api_url, PREFECT_API_KEY: "my-token", PREFECT_API_AUTH_STRING: "my-token", # For base subscriber } ): yield async def test_subscriber_can_connect_with_defaults( Subscriber: Type[PrefectLogsSubscriber], socket_path: str, token: Optional[str], example_log_1: Log, example_log_2: Log, log_recorder: LogRecorder, log_puppeteer: LogPuppeteer, ): # For base subscriber (token=None), it will use auth string "my-token" # For cloud subscriber (token="my-token"), it will use the provided token expected_token = token or "my-token" log_puppeteer.token = expected_token log_puppeteer.outgoing_logs = [example_log_1, example_log_2] async with Subscriber() as subscriber: async for log in subscriber: log_recorder.logs.append(log) assert log_recorder.connections == 1 assert log_recorder.path == socket_path assert log_recorder.logs == [example_log_1, example_log_2] assert log_recorder.token == expected_token assert subscriber._filter assert log_recorder.filter == subscriber._filter async def test_cloud_subscriber_complains_without_api_url_and_key( CloudSubscriber: Type[PrefectCloudLogsSubscriber], socket_path: str, token: Optional[str], example_log_1: Log, example_log_2: Log, log_recorder: LogRecorder, log_puppeteer: LogPuppeteer, ): with temporary_settings(updates={PREFECT_API_KEY: "", PREFECT_API_URL: ""}): with pytest.raises(ValueError, match="must be provided or set"): CloudSubscriber() async def test_subscriber_can_connect_and_receive_one_log( Subscriber: Type[PrefectLogsSubscriber], socket_path: str, token: Optional[str], example_log_1: Log, example_log_2: Log, log_recorder: LogRecorder, log_puppeteer: LogPuppeteer, ): expected_token = token or "my-token" log_puppeteer.token = expected_token log_puppeteer.outgoing_logs = [example_log_1, example_log_2] filter = LogFilter(level=LogFilterLevel(ge_=logging.INFO)) async with Subscriber( filter=filter, reconnection_attempts=0, ) as subscriber: async for log in subscriber: log_recorder.logs.append(log) assert log_recorder.connections == 1 assert log_recorder.path == socket_path assert log_recorder.logs == [example_log_1, example_log_2] assert log_recorder.token == expected_token assert log_recorder.filter == filter async def test_subscriber_specifying_negative_reconnects_gets_error( Subscriber: Type[PrefectLogsSubscriber], socket_path: str, token: Optional[str], example_log_1: Log, example_log_2: Log, log_recorder: LogRecorder, log_puppeteer: LogPuppeteer, ): expected_token = token or "my-token" log_puppeteer.token = expected_token log_puppeteer.outgoing_logs = [example_log_1, example_log_2] filter = LogFilter(level=LogFilterLevel(ge_=logging.INFO)) with pytest.raises(ValueError, match="non-negative"): Subscriber( filter=filter, reconnection_attempts=-1, ) assert log_recorder.connections == 0 async def test_subscriber_raises_on_invalid_auth_with_soft_denial( CloudSubscriber: Type[PrefectCloudLogsSubscriber], socket_path: str, token: Optional[str], logs_cloud_api_url: str, example_log_1: Log, example_log_2: Log, log_recorder: LogRecorder, log_puppeteer: LogPuppeteer, ): log_puppeteer.token = "my-token" log_puppeteer.outgoing_logs = [example_log_1, example_log_2] filter = LogFilter(level=LogFilterLevel(ge_=logging.INFO)) with pytest.raises(Exception, match="Unable to authenticate"): subscriber = CloudSubscriber( logs_cloud_api_url, "bogus", filter=filter, reconnection_attempts=0, ) await subscriber.__aenter__() assert log_recorder.connections == 1 assert log_recorder.path == socket_path assert log_recorder.token == "bogus" assert log_recorder.logs == [] async def test_cloud_subscriber_raises_on_invalid_auth_with_hard_denial( CloudSubscriber: Type[PrefectCloudLogsSubscriber], socket_path: str, token: Optional[str], logs_cloud_api_url: str, example_log_1: Log, example_log_2: Log, log_recorder: LogRecorder, log_puppeteer: LogPuppeteer, ): log_puppeteer.hard_auth_failure = True log_puppeteer.token = "my-token" log_puppeteer.outgoing_logs = [example_log_1, example_log_2] filter = LogFilter(level=LogFilterLevel(ge_=logging.INFO)) with pytest.raises(Exception, match="Unable to authenticate"): subscriber = CloudSubscriber( logs_cloud_api_url, "bogus", filter=filter, reconnection_attempts=0, ) await subscriber.__aenter__() assert log_recorder.connections == 1 assert log_recorder.path == socket_path assert log_recorder.token == "bogus" assert log_recorder.logs == [] async def test_subscriber_reconnects_on_hard_disconnects( Subscriber: Type[PrefectLogsSubscriber], socket_path: str, token: Optional[str], example_log_1: Log, example_log_2: Log, log_recorder: LogRecorder, log_puppeteer: LogPuppeteer, ): expected_token = token or "my-token" log_puppeteer.token = expected_token log_puppeteer.outgoing_logs = [example_log_1, example_log_2] log_puppeteer.hard_disconnect_after = str(example_log_1.id) filter = LogFilter(level=LogFilterLevel(ge_=logging.INFO)) async with Subscriber( filter=filter, reconnection_attempts=2, ) as subscriber: async for log in subscriber: log_recorder.logs.append(log) assert log_recorder.connections == 2 assert log_recorder.logs == [example_log_1, example_log_2] async def test_subscriber_gives_up_after_so_many_attempts( Subscriber: Type[PrefectLogsSubscriber], socket_path: str, token: Optional[str], example_log_1: Log, example_log_2: Log, log_recorder: LogRecorder, log_puppeteer: LogPuppeteer, ): expected_token = token or "my-token" log_puppeteer.token = expected_token log_puppeteer.outgoing_logs = [example_log_1, example_log_2] log_puppeteer.hard_disconnect_after = str(example_log_1.id) filter = LogFilter(level=LogFilterLevel(ge_=logging.INFO)) with pytest.raises(ConnectionClosedError): async with Subscriber( filter=filter, reconnection_attempts=4, ) as subscriber: async for log in subscriber: log_puppeteer.refuse_any_further_connections = True log_recorder.logs.append(log) assert log_recorder.connections == 1 + 4 async def test_subscriber_skips_duplicate_logs( Subscriber: Type[PrefectLogsSubscriber], socket_path: str, token: Optional[str], example_log_1: Log, example_log_2: Log, log_recorder: LogRecorder, log_puppeteer: LogPuppeteer, ): expected_token = token or "my-token" log_puppeteer.token = expected_token log_puppeteer.outgoing_logs = [example_log_1, example_log_1, example_log_2] filter = LogFilter(level=LogFilterLevel(ge_=logging.INFO)) async with Subscriber(filter=filter) as subscriber: async for log in subscriber: log_recorder.logs.append(log) assert log_recorder.logs == [example_log_1, example_log_2] def test_http_to_ws_conversion(): """Test HTTP to WebSocket URL conversion utility""" from prefect.logging.clients import http_to_ws assert http_to_ws("http://example.com/api") == "ws://example.com/api" assert http_to_ws("https://example.com/api/") == "wss://example.com/api" assert http_to_ws("https://example.com/api/v1/") == "wss://example.com/api/v1" def test_logs_out_socket_from_api_url(): """Test log WebSocket URL construction""" from prefect.logging.clients import logs_out_socket_from_api_url assert ( logs_out_socket_from_api_url("http://example.com/api") == "ws://example.com/api/logs/out" ) assert ( logs_out_socket_from_api_url("https://example.com/api/") == "wss://example.com/api/logs/out" ) def test_get_api_url_and_key_missing_values(): """Test _get_api_url_and_key error handling""" from prefect.logging.clients import _get_api_url_and_key with temporary_settings({PREFECT_API_URL: None, PREFECT_API_KEY: None}): with pytest.raises(ValueError, match="must be provided or set"): _get_api_url_and_key(None, None) with pytest.raises(ValueError, match="must be provided or set"): _get_api_url_and_key("http://example.com", None) with pytest.raises(ValueError, match="must be provided or set"): _get_api_url_and_key(None, "my-key") def test_get_api_url_and_key_success(): """Test _get_api_url_and_key with valid values""" from prefect.logging.clients import _get_api_url_and_key url, key = _get_api_url_and_key("http://example.com", "my-key") assert url == "http://example.com" assert key == "my-key" def test_subscriber_auth_token_missing_error(): """Test authentication error when no token is available""" from prefect.logging.clients import PrefectLogsSubscriber with temporary_settings({PREFECT_API_AUTH_STRING: None}): subscriber = PrefectLogsSubscriber("http://example.com") subscriber._api_key = None subscriber._auth_token = None # The auth check logic should fail when there's no token auth_token = subscriber._api_key or subscriber._auth_token assert auth_token is None # Verify that no token is available # This test validates that the subscriber correctly identifies missing tokens # The actual connection would fail with ValueError during _reconnect() async def test_subscriber_connection_closed_gracefully_stops_iteration(): """Test that ConnectionClosedOK gracefully stops iteration""" from unittest.mock import AsyncMock from websockets.exceptions import ConnectionClosedOK from prefect.logging.clients import PrefectLogsSubscriber subscriber = PrefectLogsSubscriber("http://example.com") subscriber._websocket = AsyncMock() subscriber._websocket.recv.side_effect = ConnectionClosedOK(None, None, None) with pytest.raises(StopAsyncIteration): await subscriber.__anext__() def test_subscriber_sleep_logic(): """Test that sleep logic is correct (without actually sleeping)""" # Just test that the sleep would be called correctly # The actual sleep is in the reconnection loop and depends on attempt number # For attempts > 2, sleep(1) should be called # This is tested by the condition: if i > 2: await asyncio.sleep(1) assert 3 > 2 # This would trigger sleep on attempt 3 assert 4 > 2 # This would trigger sleep on attempt 4 assert 1 <= 2 # This would NOT trigger sleep on attempt 1 assert 2 <= 2 # This would NOT trigger sleep on attempt 2 async def test_subscriber_auth_with_none_token(): """Test that authentication works when auth token is None (Prefect server)""" from prefect.logging.clients import PrefectLogsSubscriber with temporary_settings({PREFECT_API_AUTH_STRING: None}): subscriber = PrefectLogsSubscriber("http://example.com") subscriber._api_key = None subscriber._auth_token = None # Mock the websocket connection to succeed mock_connect = AsyncMock() mock_websocket = AsyncMock() # Create a mock pong that can be awaited class MockPong: def __await__(self): return iter([None]) mock_websocket.ping.return_value = MockPong() # Mock auth success response mock_websocket.recv.return_value = orjson.dumps( {"type": "auth_success"} ).decode() mock_connect.__aenter__.return_value = mock_websocket mock_connect.__aexit__ = AsyncMock() subscriber._connect = mock_connect # Should not raise ValueError - None tokens are valid for Prefect server await subscriber._reconnect() # Verify auth message was sent with None token # _reconnect sends two messages: auth first, then filter assert mock_websocket.send.call_count == 2 auth_call = mock_websocket.send.call_args_list[0] auth_message = orjson.loads(auth_call[0][0]) assert auth_message["type"] == "auth" assert auth_message["token"] is None async def test_subscriber_auth_with_empty_token(): """Test that authentication works when auth token is empty string""" from prefect.logging.clients import PrefectLogsSubscriber with temporary_settings({PREFECT_API_AUTH_STRING: ""}): subscriber = PrefectLogsSubscriber("http://example.com") subscriber._api_key = None subscriber._auth_token = "" # Mock the websocket connection to succeed mock_connect = AsyncMock() mock_websocket = AsyncMock() # Create a mock pong that can be awaited class MockPong: def __await__(self): return iter([None]) mock_websocket.ping.return_value = MockPong() # Mock auth success response mock_websocket.recv.return_value = orjson.dumps( {"type": "auth_success"} ).decode() mock_connect.__aenter__.return_value = mock_websocket mock_connect.__aexit__ = AsyncMock() subscriber._connect = mock_connect # Should not raise ValueError - empty tokens are valid await subscriber._reconnect() # Verify auth message was sent with empty token assert mock_websocket.send.call_count == 2 auth_call = mock_websocket.send.call_args_list[0] auth_message = orjson.loads(auth_call[0][0]) assert auth_message["type"] == "auth" assert auth_message["token"] == "" async def test_subscriber_auth_with_falsy_tokens(): """Test authentication with various falsy token values""" from prefect.logging.clients import PrefectLogsSubscriber falsy_values = [None, ""] # Only test string-compatible falsy values for falsy_token in falsy_values: with temporary_settings({PREFECT_API_AUTH_STRING: falsy_token}): subscriber = PrefectLogsSubscriber("http://example.com") subscriber._api_key = None subscriber._auth_token = falsy_token # Mock the websocket connection to succeed mock_connect = AsyncMock() mock_websocket = AsyncMock() # Create a mock pong that can be awaited class MockPong: def __await__(self): return iter([None]) mock_websocket.ping.return_value = MockPong() # Mock auth success response mock_websocket.recv.return_value = orjson.dumps( {"type": "auth_success"} ).decode() mock_connect.__aenter__.return_value = mock_websocket mock_connect.__aexit__ = AsyncMock() subscriber._connect = mock_connect # Should not raise ValueError - all falsy tokens should be sent await subscriber._reconnect() # Verify auth message was sent with the falsy token assert mock_websocket.send.call_count == 2 auth_call = mock_websocket.send.call_args_list[0] auth_message = orjson.loads(auth_call[0][0]) assert auth_message["type"] == "auth" assert auth_message["token"] == falsy_token
LogPuppeteer
python
gevent__gevent
src/gevent/tests/test__hub.py
{ "start": 4018, "end": 12343 }
class ____(greentest.TestCase): def _reset_hub(self): hub = get_hub() try: del hub.exception_stream except AttributeError: pass if hub._threadpool is not None: hub.threadpool.join() hub.threadpool.kill() del hub.threadpool def setUp(self): super(TestPeriodicMonitoringThread, self).setUp() self.monitor_thread = gevent.config.monitor_thread gevent.config.monitor_thread = True from gevent.monkey import get_original self.lock = get_original('threading', 'Lock')() self.monitor_fired = 0 self.monitored_hubs = set() self._reset_hub() def tearDown(self): hub = get_hub() if not self.monitor_thread and hub.periodic_monitoring_thread: # If it was true, nothing to do. If it was false, tear things down. hub.periodic_monitoring_thread.kill() hub.periodic_monitoring_thread = None gevent.config.monitor_thread = self.monitor_thread self.monitored_hubs = None self._reset_hub() super(TestPeriodicMonitoringThread, self).tearDown() def _monitor(self, hub): with self.lock: self.monitor_fired += 1 if self.monitored_hubs is not None: self.monitored_hubs.add(hub) def test_config(self): self.assertEqual(0.1, gevent.config.max_blocking_time) def _run_monitoring_threads(self, monitor, kill=True): self.assertTrue(monitor.should_run) from threading import Condition cond = Condition() cond.acquire() def monitor_cond(_hub): cond.acquire() cond.notify_all() cond.release() if kill: # Only run once. Especially helpful on PyPy, where # formatting stacks is expensive. monitor.kill() monitor.add_monitoring_function(monitor_cond, 0.01) cond.wait() cond.release() monitor.add_monitoring_function(monitor_cond, None) @greentest.ignores_leakcheck def test_kill_removes_trace(self): from greenlet import gettrace hub = get_hub() hub.start_periodic_monitoring_thread() self.assertIsNotNone(gettrace()) hub.periodic_monitoring_thread.kill() self.assertIsNone(gettrace()) @greentest.ignores_leakcheck def test_blocking_this_thread(self): hub = get_hub() stream = hub.exception_stream = NativeStrIO() monitor = hub.start_periodic_monitoring_thread() self.assertIsNotNone(monitor) basic_monitor_func_count = 1 if get_this_psutil_process() is not None: # psutil is installed basic_monitor_func_count += 1 self.assertEqual(basic_monitor_func_count, len(monitor.monitoring_functions())) monitor.add_monitoring_function(self._monitor, 0.1) self.assertEqual(basic_monitor_func_count + 1, len(monitor.monitoring_functions())) self.assertEqual(self._monitor, monitor.monitoring_functions()[-1].function) self.assertEqual(0.1, monitor.monitoring_functions()[-1].period) # We must make sure we have switched greenlets at least once, # otherwise we can't detect a failure. gevent.sleep(hub.loop.approx_timer_resolution) assert hub.exception_stream is stream try: time.sleep(0.3) # Thrice the default self._run_monitoring_threads(monitor) finally: monitor.add_monitoring_function(self._monitor, None) self.assertEqual(basic_monitor_func_count, len(monitor._monitoring_functions)) assert hub.exception_stream is stream monitor.kill() del hub.exception_stream self.assertGreaterEqual(self.monitor_fired, 1) data = stream.getvalue() self.assertIn('appears to be blocked', data) self.assertIn('PeriodicMonitoringThread', data) def _prep_worker_thread(self): hub = get_hub() threadpool = hub.threadpool worker_hub = threadpool.apply(get_hub) assert hub is not worker_hub stream = NativeStrIO() # It does not have a monitoring thread yet self.assertIsNone(worker_hub.periodic_monitoring_thread) # So switch to it and give it one by letting it run. # XXX: Python 3.10 appears to have made some changes in the memory model. # Specifically, reading values from the background that are set in the # background hub *from this thread* is flaky. It takes them awhile to show up. # Really, that's correct and expected from a standard C point of view, as we # don't insert any memory barriers or things like that. It just always used to # work in the past. So now, rather than read them directly, we need to read them # from the background thread itself. The same, apparently, goes for # writing. # Need to figure out what exactly the change was. def task(): get_hub().exception_stream = stream gevent.sleep(0.01) mon = get_hub().periodic_monitoring_thread mon.add_monitoring_function(self._monitor, 0.1) return mon worker_monitor = threadpool.apply(task) self.assertIsNotNone(worker_monitor) return worker_hub, stream, worker_monitor @greentest.ignores_leakcheck def test_blocking_threadpool_thread_task_queue(self): # A threadpool thread spends much of its time # blocked on the native Lock object. Unless we take # care, if that thread had created a hub, it will constantly # be reported as blocked. worker_hub, stream, worker_monitor = self._prep_worker_thread() # Now wait until the monitoring threads have run. self._run_monitoring_threads(worker_monitor) worker_monitor.kill() # We did run the monitor in the worker thread, but it # did NOT report itself blocked by the worker thread sitting there. with self.lock: self.assertIn(worker_hub, self.monitored_hubs) self.assertEqual(stream.getvalue(), '') @greentest.ignores_leakcheck def test_blocking_threadpool_thread_one_greenlet(self): # If the background threadpool thread has no other greenlets to run # and never switches, then even if it has a hub # we don't report it blocking. The threadpool is *meant* to run # tasks that block. hub = get_hub() threadpool = hub.threadpool worker_hub, stream, worker_monitor = self._prep_worker_thread() task = threadpool.spawn(time.sleep, 0.3) # Now wait until the monitoring threads have run. self._run_monitoring_threads(worker_monitor) # and be sure the task ran task.get() worker_monitor.kill() # We did run the monitor in the worker thread, but it # did NOT report itself blocked by the worker thread with self.lock: self.assertIn(worker_hub, self.monitored_hubs) self.assertEqual(stream.getvalue(), '') @greentest.ignores_leakcheck def test_blocking_threadpool_thread_multi_greenlet(self): # If the background threadpool thread ever switches # greenlets, monitoring goes into affect. hub = get_hub() threadpool = hub.threadpool worker_hub, stream, worker_monitor = self._prep_worker_thread() def task(): g = gevent.spawn(time.sleep, 0.7) g.join() task = threadpool.spawn(task) # Now wait until the monitoring threads have run. self._run_monitoring_threads(worker_monitor, kill=False) # and be sure the task ran task.get() worker_monitor.kill() # We did run the monitor in the worker thread, and it # DID report itself blocked by the worker thread self.assertIn(worker_hub, self.monitored_hubs) data = stream.getvalue() self.assertIn('appears to be blocked', data) self.assertIn('PeriodicMonitoringThread', data)
TestPeriodicMonitoringThread
python
automl__auto-sklearn
test/test_pipeline/components/feature_preprocessing/test_select_rates_classification.py
{ "start": 302, "end": 4623 }
class ____(unittest.TestCase): def test_default_configuration(self): transformation, original = _test_preprocessing(SelectClassificationRates) self.assertEqual(transformation.shape[0], original.shape[0]) self.assertEqual(transformation.shape[1], 3) self.assertFalse((transformation == 0).all()) transformation, original = _test_preprocessing( SelectClassificationRates, make_sparse=True ) self.assertTrue(scipy.sparse.issparse(transformation)) self.assertEqual(transformation.shape[0], original.shape[0]) self.assertEqual(transformation.shape[1], int(original.shape[1] / 2)) # Custom preprocessing test to check if clipping to zero works X_train, Y_train, X_test, Y_test = get_dataset(dataset="digits") original_X_train = X_train.copy() ss = sklearn.preprocessing.StandardScaler() X_train = ss.fit_transform(X_train) configuration_space = ( SelectClassificationRates.get_hyperparameter_search_space() ) default = configuration_space.get_default_configuration() preprocessor = SelectClassificationRates( random_state=1, **{ hp_name: default[hp_name] for hp_name in default if default[hp_name] is not None }, ) transformer = preprocessor.fit(X_train, Y_train) transformation, original = transformer.transform(X_train), original_X_train self.assertEqual(transformation.shape[0], original.shape[0]) # I don't know why it's 52 here and not 32 which would be half of the # number of features. Seems to be related to a runtime warning raised # by sklearn self.assertEqual(transformation.shape[1], 52) def test_preprocessing_dtype(self): # Dense # np.float32 X_train, Y_train, X_test, Y_test = get_dataset("iris") self.assertEqual(X_train.dtype, np.float32) configuration_space = ( SelectClassificationRates.get_hyperparameter_search_space() ) default = configuration_space.get_default_configuration() preprocessor = SelectClassificationRates( random_state=1, **{hp_name: default[hp_name] for hp_name in default} ) preprocessor.fit(X_train, Y_train) Xt = preprocessor.transform(X_train) self.assertEqual(Xt.dtype, np.float32) # np.float64 X_train, Y_train, X_test, Y_test = get_dataset("iris") X_train = X_train.astype(np.float64) configuration_space = ( SelectClassificationRates.get_hyperparameter_search_space() ) default = configuration_space.get_default_configuration() preprocessor = SelectClassificationRates( random_state=1, **{hp_name: default[hp_name] for hp_name in default} ) preprocessor.fit(X_train, Y_train) Xt = preprocessor.transform(X_train) self.assertEqual(Xt.dtype, np.float64) # Sparse # np.float32 X_train, Y_train, X_test, Y_test = get_dataset("iris", make_sparse=True) self.assertEqual(X_train.dtype, np.float32) configuration_space = ( SelectClassificationRates.get_hyperparameter_search_space() ) default = configuration_space.get_default_configuration() preprocessor = SelectClassificationRates( random_state=1, **{hp_name: default[hp_name] for hp_name in default} ) preprocessor.fit(X_train, Y_train) Xt = preprocessor.transform(X_train) self.assertEqual(Xt.dtype, np.float32) # np.float64 X_train, Y_train, X_test, Y_test = get_dataset("iris", make_sparse=True) X_train = X_train.astype(np.float64) configuration_space = ( SelectClassificationRates.get_hyperparameter_search_space() ) default = configuration_space.get_default_configuration() preprocessor = SelectClassificationRates( random_state=1, **{hp_name: default[hp_name] for hp_name in default} ) preprocessor.fit(X_train, Y_train) Xt = preprocessor.transform(X_train) self.assertEqual(Xt.dtype, np.float64)
SelectClassificationRatesComponentTest
python
numpy__numpy
numpy/distutils/fcompiler/nv.py
{ "start": 81, "end": 1541 }
class ____(FCompiler): """ NVIDIA High Performance Computing (HPC) SDK Fortran Compiler https://developer.nvidia.com/hpc-sdk Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers, https://www.pgroup.com/index.htm. See also `numpy.distutils.fcompiler.pg`. """ compiler_type = 'nv' description = 'NVIDIA HPC SDK' version_pattern = r'\s*(nvfortran|.+ \(aka nvfortran\)) (?P<version>[\d.-]+).*' executables = { 'version_cmd': ["<F90>", "-V"], 'compiler_f77': ["nvfortran"], 'compiler_fix': ["nvfortran", "-Mfixed"], 'compiler_f90': ["nvfortran"], 'linker_so': ["<F90>"], 'archiver': ["ar", "-cr"], 'ranlib': ["ranlib"] } pic_flags = ['-fpic'] module_dir_switch = '-module ' module_include_switch = '-I' def get_flags(self): opt = ['-Minform=inform', '-Mnosecond_underscore'] return self.pic_flags + opt def get_flags_opt(self): return ['-fast'] def get_flags_debug(self): return ['-g'] def get_flags_linker_so(self): return ["-shared", '-fpic'] def runtime_library_dir_option(self, dir): return '-R%s' % dir if __name__ == '__main__': from distutils import log log.set_verbosity(2) from numpy.distutils import customized_fcompiler print(customized_fcompiler(compiler='nv').get_version())
NVHPCFCompiler
python
python-openxml__python-docx
src/docx/enum/table.py
{ "start": 116, "end": 1627 }
class ____(BaseXmlEnum): """Alias: **WD_ALIGN_VERTICAL** Specifies the vertical alignment of text in one or more cells of a table. Example:: from docx.enum.table import WD_ALIGN_VERTICAL table = document.add_table(3, 3) table.cell(0, 0).vertical_alignment = WD_ALIGN_VERTICAL.BOTTOM MS API name: `WdCellVerticalAlignment` https://msdn.microsoft.com/en-us/library/office/ff193345.aspx """ TOP = (0, "top", "Text is aligned to the top border of the cell.") """Text is aligned to the top border of the cell.""" CENTER = (1, "center", "Text is aligned to the center of the cell.") """Text is aligned to the center of the cell.""" BOTTOM = (3, "bottom", "Text is aligned to the bottom border of the cell.") """Text is aligned to the bottom border of the cell.""" BOTH = ( 101, "both", "This is an option in the OpenXml spec, but not in Word itself. It's not" " clear what Word behavior this setting produces. If you find out please" " let us know and we'll update this documentation. Otherwise, probably best" " to avoid this option.", ) """This is an option in the OpenXml spec, but not in Word itself. It's not clear what Word behavior this setting produces. If you find out please let us know and we'll update this documentation. Otherwise, probably best to avoid this option. """ WD_ALIGN_VERTICAL = WD_CELL_VERTICAL_ALIGNMENT
WD_CELL_VERTICAL_ALIGNMENT
python
mozilla__bleach
bleach/_vendor/parse.py
{ "start": 5322, "end": 6307 }
class ____(object): """Shared methods for the parsed result objects containing a netloc element""" __slots__ = () @property def username(self): return self._userinfo[0] @property def password(self): return self._userinfo[1] @property def hostname(self): hostname = self._hostinfo[0] if not hostname: return None # Scoped IPv6 address may have zone info, which must not be lowercased # like http://[fe80::822a:a8ff:fe49:470c%tESt]:1234/keys separator = '%' if isinstance(hostname, str) else b'%' hostname, percent, zone = hostname.partition(separator) return hostname.lower() + percent + zone @property def port(self): port = self._hostinfo[1] if port is not None: port = int(port, 10) if not ( 0 <= port <= 65535): raise ValueError("Port out of range 0-65535") return port
_NetlocResultMixinBase
python
dagster-io__dagster
python_modules/libraries/dagster-msteams/dagster_msteams/utils.py
{ "start": 42, "end": 414 }
class ____(NamedTuple): text: str url: str def build_message_with_link( is_legacy_webhook: bool, text: str, link: Optional[MSTeamsHyperlink] ) -> str: if link: if is_legacy_webhook: return f"{text} <a href='{link.url}'>{link.text}</a>" else: return f"{text} [{link.text}]({link.url})" return text
MSTeamsHyperlink
python
huggingface__transformers
tests/models/roc_bert/test_modeling_roc_bert.py
{ "start": 1524, "end": 19096 }
class ____: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, pronunciation_vocab_size=99, shape_vocab_size=99, pronunciation_embed_dim=32, shape_embed_dim=32, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.pronunciation_vocab_size = pronunciation_vocab_size self.shape_vocab_size = shape_vocab_size self.pronunciation_embed_dim = pronunciation_embed_dim self.shape_embed_dim = shape_embed_dim self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_shape_ids = ids_tensor([self.batch_size, self.seq_length], self.shape_vocab_size) input_pronunciation_ids = ids_tensor([self.batch_size, self.seq_length], self.pronunciation_vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return ( config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def get_config(self): return RoCBertConfig( vocab_size=self.vocab_size, shape_vocab_size=self.shape_vocab_size, pronunciation_vocab_size=self.pronunciation_vocab_size, shape_embed_dim=self.shape_embed_dim, pronunciation_embed_dim=self.pronunciation_embed_dim, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = RoCBertModel(config=config) model.to(torch_device) model.eval() result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, ) result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, token_type_ids=token_type_ids, ) result = model(input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = RoCBertModel(config) model.to(torch_device) model.eval() result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = RoCBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = RoCBertForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_shape_tokens = ids_tensor((self.batch_size, 3), config.shape_vocab_size) next_pronunciation_tokens = ids_tensor((self.batch_size, 3), config.pronunciation_vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_input_shape_ids = torch.cat([input_shape_ids, next_shape_tokens], dim=-1) next_input_pronunciation_ids = torch.cat([input_pronunciation_ids, next_pronunciation_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, input_shape_ids=next_input_shape_ids, input_pronunciation_ids=next_input_pronunciation_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, input_shape_ids=next_shape_tokens, input_pronunciation_ids=next_pronunciation_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_question_answering( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = RoCBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.num_labels = self.num_labels model = RoCBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.num_labels = self.num_labels model = RoCBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.num_choices = self.num_choices model = RoCBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_inputs_shape_ids = input_shape_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_inputs_pronunciation_ids = ( input_pronunciation_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() ) multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, input_shape_ids=multiple_choice_inputs_shape_ids, input_pronunciation_ids=multiple_choice_inputs_pronunciation_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "input_shape_ids": input_shape_ids, "input_pronunciation_ids": input_pronunciation_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict def create_and_check_for_pretraining( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = RoCBertForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, input_shape_ids, input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, attack_input_ids=input_ids, attack_input_shape_ids=input_shape_ids, attack_input_pronunciation_ids=input_pronunciation_ids, attack_attention_mask=input_mask, attack_token_type_ids=token_type_ids, labels_input_ids=token_labels, labels_input_shape_ids=input_shape_ids, labels_input_pronunciation_ids=input_pronunciation_ids, labels_attention_mask=input_mask, labels_token_type_ids=token_type_ids, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) @require_torch
RoCBertModelTester
python
pydata__xarray
xarray/tests/test_dataarray.py
{ "start": 234504, "end": 251141 }
class ____(TestReduce): def test_argmin_dim( self, x: np.ndarray, minindices_x: dict[str, np.ndarray], minindices_y: dict[str, np.ndarray], minindices_z: dict[str, np.ndarray], minindices_xy: dict[str, np.ndarray], minindices_xz: dict[str, np.ndarray], minindices_yz: dict[str, np.ndarray], minindices_xyz: dict[str, np.ndarray], maxindices_x: dict[str, np.ndarray], maxindices_y: dict[str, np.ndarray], maxindices_z: dict[str, np.ndarray], maxindices_xy: dict[str, np.ndarray], maxindices_xz: dict[str, np.ndarray], maxindices_yz: dict[str, np.ndarray], maxindices_xyz: dict[str, np.ndarray], nanindices_x: dict[str, np.ndarray], nanindices_y: dict[str, np.ndarray], nanindices_z: dict[str, np.ndarray], nanindices_xy: dict[str, np.ndarray], nanindices_xz: dict[str, np.ndarray], nanindices_yz: dict[str, np.ndarray], nanindices_xyz: dict[str, np.ndarray], ) -> None: ar = xr.DataArray( x, dims=["x", "y", "z"], coords={ "x": np.arange(x.shape[0]) * 4, "y": 1 - np.arange(x.shape[1]), "z": 2 + 3 * np.arange(x.shape[2]), }, attrs=self.attrs, ) for inds in [ minindices_x, minindices_y, minindices_z, minindices_xy, minindices_xz, minindices_yz, minindices_xyz, ]: if np.array([np.isnan(i) for i in inds.values()]).any(): with pytest.raises(ValueError): ar.argmin(dim=list(inds)) return result0 = ar.argmin(dim=["x"]) assert isinstance(result0, dict) expected0 = { key: xr.DataArray(value, dims=("y", "z"), attrs=self.attrs) for key, value in minindices_x.items() } for key in expected0: assert_identical(result0[key].drop_vars(["y", "z"]), expected0[key]) result1 = ar.argmin(dim=["y"]) assert isinstance(result1, dict) expected1 = { key: xr.DataArray(value, dims=("x", "z"), attrs=self.attrs) for key, value in minindices_y.items() } for key in expected1: assert_identical(result1[key].drop_vars(["x", "z"]), expected1[key]) result2 = ar.argmin(dim=["z"]) assert isinstance(result2, dict) expected2 = { key: xr.DataArray(value, dims=("x", "y"), attrs=self.attrs) for key, value in minindices_z.items() } for key in expected2: assert_identical(result2[key].drop_vars(["x", "y"]), expected2[key]) result3 = ar.argmin(dim=("x", "y")) assert isinstance(result3, dict) expected3 = { key: xr.DataArray(value, dims=("z"), attrs=self.attrs) for key, value in minindices_xy.items() } for key in expected3: assert_identical(result3[key].drop_vars("z"), expected3[key]) result4 = ar.argmin(dim=("x", "z")) assert isinstance(result4, dict) expected4 = { key: xr.DataArray(value, dims=("y"), attrs=self.attrs) for key, value in minindices_xz.items() } for key in expected4: assert_identical(result4[key].drop_vars("y"), expected4[key]) result5 = ar.argmin(dim=("y", "z")) assert isinstance(result5, dict) expected5 = { key: xr.DataArray(value, dims=("x"), attrs=self.attrs) for key, value in minindices_yz.items() } for key in expected5: assert_identical(result5[key].drop_vars("x"), expected5[key]) result6 = ar.argmin(...) assert isinstance(result6, dict) expected6 = { key: xr.DataArray(value, attrs=self.attrs) for key, value in minindices_xyz.items() } for key in expected6: assert_identical(result6[key], expected6[key]) minindices_x = { key: xr.where( nanindices_x[key] == None, # noqa: E711 minindices_x[key], nanindices_x[key], ) for key in minindices_x } expected7 = { key: xr.DataArray(value, dims=("y", "z"), attrs=self.attrs) for key, value in minindices_x.items() } result7 = ar.argmin(dim=["x"], skipna=False) assert isinstance(result7, dict) for key in expected7: assert_identical(result7[key].drop_vars(["y", "z"]), expected7[key]) minindices_y = { key: xr.where( nanindices_y[key] == None, # noqa: E711 minindices_y[key], nanindices_y[key], ) for key in minindices_y } expected8 = { key: xr.DataArray(value, dims=("x", "z"), attrs=self.attrs) for key, value in minindices_y.items() } result8 = ar.argmin(dim=["y"], skipna=False) assert isinstance(result8, dict) for key in expected8: assert_identical(result8[key].drop_vars(["x", "z"]), expected8[key]) minindices_z = { key: xr.where( nanindices_z[key] == None, # noqa: E711 minindices_z[key], nanindices_z[key], ) for key in minindices_z } expected9 = { key: xr.DataArray(value, dims=("x", "y"), attrs=self.attrs) for key, value in minindices_z.items() } result9 = ar.argmin(dim=["z"], skipna=False) assert isinstance(result9, dict) for key in expected9: assert_identical(result9[key].drop_vars(["x", "y"]), expected9[key]) minindices_xy = { key: xr.where( nanindices_xy[key] == None, # noqa: E711 minindices_xy[key], nanindices_xy[key], ) for key in minindices_xy } expected10 = { key: xr.DataArray(value, dims="z", attrs=self.attrs) for key, value in minindices_xy.items() } result10 = ar.argmin(dim=("x", "y"), skipna=False) assert isinstance(result10, dict) for key in expected10: assert_identical(result10[key].drop_vars("z"), expected10[key]) minindices_xz = { key: xr.where( nanindices_xz[key] == None, # noqa: E711 minindices_xz[key], nanindices_xz[key], ) for key in minindices_xz } expected11 = { key: xr.DataArray(value, dims="y", attrs=self.attrs) for key, value in minindices_xz.items() } result11 = ar.argmin(dim=("x", "z"), skipna=False) assert isinstance(result11, dict) for key in expected11: assert_identical(result11[key].drop_vars("y"), expected11[key]) minindices_yz = { key: xr.where( nanindices_yz[key] == None, # noqa: E711 minindices_yz[key], nanindices_yz[key], ) for key in minindices_yz } expected12 = { key: xr.DataArray(value, dims="x", attrs=self.attrs) for key, value in minindices_yz.items() } result12 = ar.argmin(dim=("y", "z"), skipna=False) assert isinstance(result12, dict) for key in expected12: assert_identical(result12[key].drop_vars("x"), expected12[key]) minindices_xyz = { key: xr.where( nanindices_xyz[key] == None, # noqa: E711 minindices_xyz[key], nanindices_xyz[key], ) for key in minindices_xyz } expected13 = { key: xr.DataArray(value, attrs=self.attrs) for key, value in minindices_xyz.items() } result13 = ar.argmin(..., skipna=False) assert isinstance(result13, dict) for key in expected13: assert_identical(result13[key], expected13[key]) def test_argmax_dim( self, x: np.ndarray, minindices_x: dict[str, np.ndarray], minindices_y: dict[str, np.ndarray], minindices_z: dict[str, np.ndarray], minindices_xy: dict[str, np.ndarray], minindices_xz: dict[str, np.ndarray], minindices_yz: dict[str, np.ndarray], minindices_xyz: dict[str, np.ndarray], maxindices_x: dict[str, np.ndarray], maxindices_y: dict[str, np.ndarray], maxindices_z: dict[str, np.ndarray], maxindices_xy: dict[str, np.ndarray], maxindices_xz: dict[str, np.ndarray], maxindices_yz: dict[str, np.ndarray], maxindices_xyz: dict[str, np.ndarray], nanindices_x: dict[str, np.ndarray], nanindices_y: dict[str, np.ndarray], nanindices_z: dict[str, np.ndarray], nanindices_xy: dict[str, np.ndarray], nanindices_xz: dict[str, np.ndarray], nanindices_yz: dict[str, np.ndarray], nanindices_xyz: dict[str, np.ndarray], ) -> None: ar = xr.DataArray( x, dims=["x", "y", "z"], coords={ "x": np.arange(x.shape[0]) * 4, "y": 1 - np.arange(x.shape[1]), "z": 2 + 3 * np.arange(x.shape[2]), }, attrs=self.attrs, ) for inds in [ maxindices_x, maxindices_y, maxindices_z, maxindices_xy, maxindices_xz, maxindices_yz, maxindices_xyz, ]: if np.array([np.isnan(i) for i in inds.values()]).any(): with pytest.raises(ValueError): ar.argmax(dim=list(inds)) return result0 = ar.argmax(dim=["x"]) assert isinstance(result0, dict) expected0 = { key: xr.DataArray(value, dims=("y", "z"), attrs=self.attrs) for key, value in maxindices_x.items() } for key in expected0: assert_identical(result0[key].drop_vars(["y", "z"]), expected0[key]) result1 = ar.argmax(dim=["y"]) assert isinstance(result1, dict) expected1 = { key: xr.DataArray(value, dims=("x", "z"), attrs=self.attrs) for key, value in maxindices_y.items() } for key in expected1: assert_identical(result1[key].drop_vars(["x", "z"]), expected1[key]) result2 = ar.argmax(dim=["z"]) assert isinstance(result2, dict) expected2 = { key: xr.DataArray(value, dims=("x", "y"), attrs=self.attrs) for key, value in maxindices_z.items() } for key in expected2: assert_identical(result2[key].drop_vars(["x", "y"]), expected2[key]) result3 = ar.argmax(dim=("x", "y")) assert isinstance(result3, dict) expected3 = { key: xr.DataArray(value, dims=("z"), attrs=self.attrs) for key, value in maxindices_xy.items() } for key in expected3: assert_identical(result3[key].drop_vars("z"), expected3[key]) result4 = ar.argmax(dim=("x", "z")) assert isinstance(result4, dict) expected4 = { key: xr.DataArray(value, dims=("y"), attrs=self.attrs) for key, value in maxindices_xz.items() } for key in expected4: assert_identical(result4[key].drop_vars("y"), expected4[key]) result5 = ar.argmax(dim=("y", "z")) assert isinstance(result5, dict) expected5 = { key: xr.DataArray(value, dims=("x"), attrs=self.attrs) for key, value in maxindices_yz.items() } for key in expected5: assert_identical(result5[key].drop_vars("x"), expected5[key]) result6 = ar.argmax(...) assert isinstance(result6, dict) expected6 = { key: xr.DataArray(value, attrs=self.attrs) for key, value in maxindices_xyz.items() } for key in expected6: assert_identical(result6[key], expected6[key]) maxindices_x = { key: xr.where( nanindices_x[key] == None, # noqa: E711 maxindices_x[key], nanindices_x[key], ) for key in maxindices_x } expected7 = { key: xr.DataArray(value, dims=("y", "z"), attrs=self.attrs) for key, value in maxindices_x.items() } result7 = ar.argmax(dim=["x"], skipna=False) assert isinstance(result7, dict) for key in expected7: assert_identical(result7[key].drop_vars(["y", "z"]), expected7[key]) maxindices_y = { key: xr.where( nanindices_y[key] == None, # noqa: E711 maxindices_y[key], nanindices_y[key], ) for key in maxindices_y } expected8 = { key: xr.DataArray(value, dims=("x", "z"), attrs=self.attrs) for key, value in maxindices_y.items() } result8 = ar.argmax(dim=["y"], skipna=False) assert isinstance(result8, dict) for key in expected8: assert_identical(result8[key].drop_vars(["x", "z"]), expected8[key]) maxindices_z = { key: xr.where( nanindices_z[key] == None, # noqa: E711 maxindices_z[key], nanindices_z[key], ) for key in maxindices_z } expected9 = { key: xr.DataArray(value, dims=("x", "y"), attrs=self.attrs) for key, value in maxindices_z.items() } result9 = ar.argmax(dim=["z"], skipna=False) assert isinstance(result9, dict) for key in expected9: assert_identical(result9[key].drop_vars(["x", "y"]), expected9[key]) maxindices_xy = { key: xr.where( nanindices_xy[key] == None, # noqa: E711 maxindices_xy[key], nanindices_xy[key], ) for key in maxindices_xy } expected10 = { key: xr.DataArray(value, dims="z", attrs=self.attrs) for key, value in maxindices_xy.items() } result10 = ar.argmax(dim=("x", "y"), skipna=False) assert isinstance(result10, dict) for key in expected10: assert_identical(result10[key].drop_vars("z"), expected10[key]) maxindices_xz = { key: xr.where( nanindices_xz[key] == None, # noqa: E711 maxindices_xz[key], nanindices_xz[key], ) for key in maxindices_xz } expected11 = { key: xr.DataArray(value, dims="y", attrs=self.attrs) for key, value in maxindices_xz.items() } result11 = ar.argmax(dim=("x", "z"), skipna=False) assert isinstance(result11, dict) for key in expected11: assert_identical(result11[key].drop_vars("y"), expected11[key]) maxindices_yz = { key: xr.where( nanindices_yz[key] == None, # noqa: E711 maxindices_yz[key], nanindices_yz[key], ) for key in maxindices_yz } expected12 = { key: xr.DataArray(value, dims="x", attrs=self.attrs) for key, value in maxindices_yz.items() } result12 = ar.argmax(dim=("y", "z"), skipna=False) assert isinstance(result12, dict) for key in expected12: assert_identical(result12[key].drop_vars("x"), expected12[key]) maxindices_xyz = { key: xr.where( nanindices_xyz[key] == None, # noqa: E711 maxindices_xyz[key], nanindices_xyz[key], ) for key in maxindices_xyz } expected13 = { key: xr.DataArray(value, attrs=self.attrs) for key, value in maxindices_xyz.items() } result13 = ar.argmax(..., skipna=False) assert isinstance(result13, dict) for key in expected13: assert_identical(result13[key], expected13[key])
TestReduce3D
python
getsentry__sentry
src/sentry/search/events/fields.py
{ "start": 32719, "end": 33571 }
class ____(FunctionArg): def __init__(self, name: str, start: float | None, end: float | None): super().__init__(name) self.start = start self.end = end def normalize( self, value: str, params: ParamsType, combinator: Combinator | None ) -> float | None: try: normalized_value = float(value) except ValueError: raise InvalidFunctionArgument(f"{value} is not a number") if self.start and normalized_value < self.start: raise InvalidFunctionArgument( f"{normalized_value:g} must be greater than or equal to {self.start:g}" ) elif self.end and normalized_value >= self.end: raise InvalidFunctionArgument(f"{normalized_value:g} must be less than {self.end:g}") return normalized_value
NumberRange
python
ray-project__ray
python/ray/llm/tests/common/cloud/test_s3_filesystem.py
{ "start": 15784, "end": 19531 }
class ____: """Integration tests for S3FileSystem (requires actual S3 access).""" def test_list_subfolders_real_s3(self): """Test listing subfolders from real S3 bucket.""" # Test listing subfolders in the parent directory which has actual subfolders folders = S3FileSystem.list_subfolders( "s3://anonymous@air-example-data/rayllm-ossci/" ) # Verify we get expected subfolders assert isinstance(folders, list) assert "meta-Llama-3.2-1B-Instruct" in folders assert len(folders) > 0 def test_get_file_real_s3(self): """Test getting a file from real S3 bucket.""" # Test getting a small config file content = S3FileSystem.get_file( "s3://anonymous@air-example-data/rayllm-ossci/meta-Llama-3.2-1B-Instruct/config.json" ) assert content is not None assert isinstance(content, str) # Verify it's valid JSON config = json.loads(content) assert "model_type" in config or "vocab_size" in config def test_download_files_with_exclusion(self): """Test downloading files with exclusion filter (exclude safetensors files).""" with tempfile.TemporaryDirectory() as tempdir: # Download files excluding safetensors S3FileSystem.download_files( tempdir, "s3://anonymous@air-example-data/rayllm-ossci/meta-Llama-3.2-1B-Instruct/", suffixes_to_exclude=[".safetensors"], ) # Get list of downloaded files downloaded_files = set() for root, dirs, files in os.walk(tempdir): for file in files: rel_path = os.path.relpath(os.path.join(root, file), tempdir) downloaded_files.add(rel_path) # Verify safetensors file is excluded assert ( "model.safetensors" not in downloaded_files ), "safetensors file should be excluded" # Verify other files are downloaded assert "config.json" in downloaded_files assert "tokenizer.json" in downloaded_files assert len(downloaded_files) > 0 def test_download_files_with_inclusion(self): """Test downloading files with inclusion filter (include only .json files).""" with tempfile.TemporaryDirectory() as tempdir: # Download only .json files S3FileSystem.download_files( tempdir, "s3://anonymous@air-example-data/rayllm-ossci/meta-Llama-3.2-1B-Instruct/", substrings_to_include=[".json"], ) # Get list of downloaded files downloaded_files = set() for root, dirs, files in os.walk(tempdir): for file in files: rel_path = os.path.relpath(os.path.join(root, file), tempdir) downloaded_files.add(rel_path) # Verify only .json files are downloaded expected_json_files = { "config.json", "generation_config.json", "special_tokens_map.json", "tokenizer.json", "tokenizer_config.json", } assert ( downloaded_files == expected_json_files ), f"Expected {expected_json_files}, got {downloaded_files}" # Verify non-json files are excluded assert "model.safetensors" not in downloaded_files assert "README.md" not in downloaded_files assert "LICENSE.txt" not in downloaded_files if __name__ == "__main__": sys.exit(pytest.main(["-v", __file__]))
TestS3FileSystemIntegration
python
fastapi__sqlmodel
docs_src/tutorial/update/tutorial002.py
{ "start": 100, "end": 1860 }
class ____(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) name: str = Field(index=True) secret_name: str age: Optional[int] = Field(default=None, index=True) sqlite_file_name = "database.db" sqlite_url = f"sqlite:///{sqlite_file_name}" engine = create_engine(sqlite_url, echo=True) def create_db_and_tables(): SQLModel.metadata.create_all(engine) def create_heroes(): hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson") hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador") hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48) hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32) hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35) hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36) hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93) with Session(engine) as session: session.add(hero_1) session.add(hero_2) session.add(hero_3) session.add(hero_4) session.add(hero_5) session.add(hero_6) session.add(hero_7) session.commit() def update_heroes(): with Session(engine) as session: statement = select(Hero).where(Hero.name == "Spider-Boy") # (1)! results = session.exec(statement) # (2)! hero = results.one() # (3)! print("Hero:", hero) # (4)! hero.age = 16 # (5)! session.add(hero) # (6)! session.commit() # (7)! session.refresh(hero) # (8)! print("Updated hero:", hero) # (9)! def main(): create_db_and_tables() create_heroes() update_heroes() if __name__ == "__main__": main()
Hero
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/mpileaks/package.py
{ "start": 217, "end": 1413 }
class ____(Package): """Mpileaks is a mock package that passes audits""" homepage = "http://www.spack.llnl.gov" url = "http://www.spack.llnl.gov/mpileaks-1.0.tar.gz" version("2.3", sha256="2e34cc4505556d1c1f085758e26f2f8eea0972db9382f051b2dcfb1d7d9e1825") version("2.2", sha256="2e34cc4505556d1c1f085758e26f2f8eea0972db9382f051b2dcfb1d7d9e1825") version("2.1", sha256="2e34cc4505556d1c1f085758e26f2f8eea0972db9382f051b2dcfb1d7d9e1825") version("1.0", sha256="2e34cc4505556d1c1f085758e26f2f8eea0972db9382f051b2dcfb1d7d9e1825") variant("debug", default=False, description="Debug variant") variant("opt", default=False, description="Optimized variant") variant("shared", default=True, description="Build shared library") variant("static", default=True, description="Build static library") depends_on("mpi") depends_on("callpath") depends_on("c", type="build") # Will be used to try raising an exception libs = None def install(self, spec, prefix): touch(prefix.mpileaks) mkdirp(prefix.man) def setup_run_environment(self, env: EnvironmentModifications) -> None: env.set("FOOBAR", self.name)
Mpileaks
python
django__django
django/contrib/postgres/search.py
{ "start": 2360, "end": 3020 }
class ____(Expression): def __init__(self, config): super().__init__() if not hasattr(config, "resolve_expression"): config = Value(config) self.config = config @classmethod def from_parameter(cls, config): if config is None or isinstance(config, cls): return config return cls(config) def get_source_expressions(self): return [self.config] def set_source_expressions(self, exprs): (self.config,) = exprs def as_sql(self, compiler, connection): sql, params = compiler.compile(self.config) return "%s::regconfig" % sql, params
SearchConfig
python
pytorch__pytorch
test/test_overrides.py
{ "start": 8061, "end": 11854 }
class ____(DiagonalTensor): """A subclass of ``DiagonalTensor`` to test custom dispatch This class tests semantics for defining ``__torch_function__`` on a subclass of another class that defines ``__torch_function__``. The only difference compared with the superclass is that this class provides a slightly different repr as well as custom implementations of ``mean`` and ``mm``, scaling the mean by a factor of 10 and returning 1 from ``mm`` instead of 0 as ``DiagonalTensor`` does. """ handled_functions = HANDLED_FUNCTIONS_SUB_DIAGONAL def __repr__(self): return f"SubDiagonalTensor(N={self._N}, value={self._i})" @implements_sub_diagonal(torch.mean) def sub_diagonal_mean(mat): return 10 * float(mat._i) / mat._N @implements_sub_diagonal(bar) def sub_diagonal_bar(mat): return 0 @implements_sub_diagonal(torch.mm) def sub_diagonal_mm(mat1, mat2): return 1 @implements_sub_diagonal(torch.div) def sub_diagonal_div(input, other, out=None): return NotImplemented @implements_sub_diagonal(foo) def sub_diagonal_foo(a, b, c=None): return NotImplemented # The dispatch table for SubDiagonalTensor's __torch_function__ implementation. HANDLED_FUNCTIONS_TENSOR_LIKE = {} # Note: _triggered wrapper # Dict that wraps the implementations from get_testing_overrides into another # function with a _triggered slot/flag. The triggered flag is set when the # implementation is called. WRAPPED_TRIGGERED_IMPLS = {} def triggered_wrapper(f): @functools.wraps(f) def wrapped(*args, **kwargs): wrapped._triggered = True return f(*args, **kwargs) wrapped._triggered = False return wrapped def implements_tensor_like(torch_function): "Register a torch function override for TensorLike" @functools.wraps(torch_function) def decorator(func): HANDLED_FUNCTIONS_TENSOR_LIKE[torch_function] = func return func return decorator def generate_tensor_like_torch_implementations(): untested_funcs = [] testing_overrides = get_testing_overrides() # test/test_cpp_api_parity.py monkeypatches torch.nn to have a new # function sample_functional. Depending on what order you run pytest # collection, this may trigger the error here. This is a hack to fix # the problem. A more proper fix is to make the "not tested" check # a test on its own, and to make sure the monkeypatch is only installed # for the span of the relevant test (and deleted afterwards) testing_ignore = {"sample_functional", "autocast"} for namespace, funcs in get_overridable_functions().items(): for func in funcs: if func not in testing_overrides and func.__name__ not in testing_ignore: untested_funcs.append(f"{namespace}.{func.__name__}") msg = ( "The following functions are not tested for __torch_function__ " "support, please ensure there is an entry in the dict returned by " "torch.overrides.get_testing_overrides for this function or if a " "__torch_function__ override does not make sense, add an entry to " "the tuple returned by torch._overrides.get_ignored_functions.\n\n{}" ) assert len(untested_funcs) == 0, msg.format(pprint.pformat(untested_funcs)) for func, override in testing_overrides.items(): # decorate the overrides with implements_tensor_like if it's not a # torch.Tensor method wrapped = triggered_wrapper(override) # See note: "_triggered wrapper" WRAPPED_TRIGGERED_IMPLS[func] = wrapped if is_tensor_method_or_property(func): implements_sub(func)(wrapped) else: implements_tensor_like(func)(wrapped) generate_tensor_like_torch_implementations()
SubDiagonalTensor
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-airbyte-salesforce/llama_index/readers/airbyte_salesforce/base.py
{ "start": 126, "end": 722 }
class ____(AirbyteCDKReader): """ AirbyteSalesforceReader reader. Retrieve documents from Salesforce Args: config: The config object for the salesforce source. """ def __init__( self, config: Mapping[str, Any], record_handler: Optional[RecordHandler] = None, ) -> None: """Initialize with parameters.""" import source_salesforce super().__init__( source_class=source_salesforce.SourceSalesforce, config=config, record_handler=record_handler, )
AirbyteSalesforceReader
python
pypa__warehouse
warehouse/oidc/forms/_core.py
{ "start": 635, "end": 4402 }
class ____: # Attributes that must be provided by subclasses _user: User _check_project_name: typing.Callable[[str], None] _route_url: typing.Callable[..., str] project_name = wtforms.StringField( validators=[ wtforms.validators.InputRequired(message=_("Specify project name")), wtforms.validators.Regexp( PROJECT_NAME_RE, message=_("Invalid project name") ), ] ) def validate_project_name(self, field: wtforms.Field) -> None: project_name = field.data try: self._check_project_name(project_name) except ProjectNameUnavailableInvalidError: raise wtforms.validators.ValidationError(_("Invalid project name")) except ProjectNameUnavailableExistingError as e: # If the user owns the existing project, the error message includes a # link to the project settings that the user can modify. if self._user in e.existing_project.owners: # Mixin doesn't inherit from wtforms.Form but composed classes do url_params = {name: value for name, value in self.data.items() if value} # type: ignore[attr-defined] # noqa: E501 url_params["provider"] = {self.provider} url = self._route_url( "manage.project.settings.publishing", project_name=project_name, _query=url_params, ) # We mark the error message as safe, so that the HTML hyperlink is # not escaped by Jinja raise wtforms.validators.ValidationError( markupsafe.Markup( _( "This project already exists: use the project's " "publishing settings <a href='${url}'>here</a> to " "create a Trusted Publisher for it.", mapping={"url": url}, ) ) ) else: raise wtforms.validators.ValidationError( _("This project already exists.") ) except ProjectNameUnavailableProhibitedError: raise wtforms.validators.ValidationError( _("This project name isn't allowed") ) except ProjectNameUnavailableSimilarError: raise wtforms.validators.ValidationError( _("This project name is too similar to an existing project") ) except ProjectNameUnavailableStdlibError: raise wtforms.validators.ValidationError( _( "This project name isn't allowed (conflict with the Python" " standard library module name)" ) ) # TODO: Cover with testing and remove pragma except ProjectNameUnavailableTypoSquattingError as exc: # pragma: no cover # TODO: raise with an appropriate message when we're ready to implement # or combine with `ProjectNameUnavailableSimilarError` # TODO: This is an attempt at structlog, since `request.log` isn't in scope. # We should be able to use `log` instead, but doesn't have the same output log.error( "Typo-squatting error raised but not handled in form validation", check_name=exc.check_name, existing_project_name=exc.existing_project_name, ) pass @property def provider(self) -> str: # pragma: no cover # Only concrete subclasses are constructed. raise NotImplementedError
PendingPublisherMixin
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_display_units09.py
{ "start": 315, "end": 1205 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_display_units09.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "column"}) chart.axis_ids = [56159232, 61364096] data = [ [10000000, 20000000, 30000000, 20000000, 10000000], ] worksheet.write_column(0, 0, data[0]) chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.set_y_axis({"display_units": "billions", "display_units_visible": 0}) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
apache__airflow
providers/cloudant/src/airflow/providers/cloudant/cloudant_fake.py
{ "start": 822, "end": 1026 }
class ____: """Phony class to pass mypy when real class is not imported.""" def __init__(self, authenticator): pass def set_service_url(self, service_url: str): pass
CloudantV1
python
allegroai__clearml
clearml/backend_api/services/v2_9/tasks.py
{ "start": 202067, "end": 221343 }
class ____(Response): """ Response of tasks.get_all endpoint. :param tasks: List of tasks :type tasks: Sequence[Task] """ _service = "tasks" _action = "get_all" _version = "2.9" _schema = { "definitions": { "artifact": { "properties": { "content_size": { "description": "Raw data length in bytes", "type": "integer", }, "display_data": { "description": "User-defined list of key/value pairs, sorted", "items": {"items": {"type": "string"}, "type": "array"}, "type": "array", }, "hash": { "description": "Hash of entire raw data", "type": "string", }, "key": {"description": "Entry key", "type": "string"}, "mode": { "default": "output", "description": "System defined input/output indication", "enum": ["input", "output"], "type": "string", }, "timestamp": { "description": "Epoch time when artifact was created", "type": "integer", }, "type": {"description": "System defined type", "type": "string"}, "type_data": { "$ref": "#/definitions/artifact_type_data", "description": "Additional fields defined by the system", }, "uri": {"description": "Raw data location", "type": "string"}, }, "required": ["key", "type"], "type": "object", }, "artifact_type_data": { "properties": { "content_type": { "description": "System defined raw data content type", "type": ["string", "null"], }, "data_hash": { "description": "Hash of raw data, without any headers or descriptive parts", "type": ["string", "null"], }, "preview": { "description": "Description or textual data", "type": ["string", "null"], }, }, "type": "object", }, "configuration_item": { "properties": { "description": { "description": "The parameter description. Optional", "type": ["string", "null"], }, "name": { "description": "Name of the parameter. Should be unique", "type": ["string", "null"], }, "type": { "description": "Type of the parameter. Optional", "type": ["string", "null"], }, "value": { "description": "Value of the parameter", "type": ["string", "null"], }, }, "type": "object", }, "execution": { "properties": { "artifacts": { "description": "Task artifacts", "items": {"$ref": "#/definitions/artifact"}, "type": ["array", "null"], }, "docker_cmd": { "description": "Command for running docker script for the execution of the task", "type": ["string", "null"], }, "framework": { "description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ", "type": ["string", "null"], }, "model": { "description": "Execution input model ID Not applicable for Register (Import) tasks", "type": ["string", "null"], }, "model_desc": { "additionalProperties": True, "description": "Json object representing the Model descriptors", "type": ["object", "null"], }, "model_labels": { "additionalProperties": {"type": "integer"}, "description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks", "type": ["object", "null"], }, "parameters": { "additionalProperties": True, "description": "Json object containing the Task parameters", "type": ["object", "null"], }, "queue": { "description": "Queue ID where task was queued.", "type": ["string", "null"], }, }, "type": "object", }, "last_metrics_event": { "properties": { "max_value": { "description": "Maximum value reported", "type": ["number", "null"], }, "metric": { "description": "Metric name", "type": ["string", "null"], }, "min_value": { "description": "Minimum value reported", "type": ["number", "null"], }, "value": { "description": "Last value reported", "type": ["number", "null"], }, "variant": { "description": "Variant name", "type": ["string", "null"], }, }, "type": "object", }, "last_metrics_variants": { "additionalProperties": {"$ref": "#/definitions/last_metrics_event"}, "description": "Last metric events, one for each variant hash", "type": "object", }, "output": { "properties": { "destination": { "description": "Storage id. This is where output files will be stored.", "type": ["string", "null"], }, "error": { "description": "Last error text", "type": ["string", "null"], }, "model": {"description": "Model id.", "type": ["string", "null"]}, "result": { "description": "Task result. Values: 'success', 'failure'", "type": ["string", "null"], }, }, "type": "object", }, "params_item": { "properties": { "description": { "description": "The parameter description. Optional", "type": ["string", "null"], }, "name": { "description": "Name of the parameter. The combination of section and name should be unique", "type": ["string", "null"], }, "section": { "description": "Section that the parameter belongs to", "type": ["string", "null"], }, "type": { "description": "Type of the parameter. Optional", "type": ["string", "null"], }, "value": { "description": "Value of the parameter", "type": ["string", "null"], }, }, "type": "object", }, "script": { "properties": { "binary": { "default": "python", "description": "Binary to use when running the script", "type": ["string", "null"], }, "branch": { "description": "Repository branch id If not provided and tag not provided, default repository branch is used.", "type": ["string", "null"], }, "diff": { "description": "Uncommitted changes found in the repository when task was run", "type": ["string", "null"], }, "entry_point": { "description": "Path to execute within the repository", "type": ["string", "null"], }, "repository": { "description": "Name of the repository where the script is located", "type": ["string", "null"], }, "requirements": { "description": "A JSON object containing requirements strings by key", "type": ["object", "null"], }, "tag": { "description": "Repository tag", "type": ["string", "null"], }, "version_num": { "description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.", "type": ["string", "null"], }, "working_dir": { "description": "Path to the folder from which to run the script Default - root folder of repository", "type": ["string", "null"], }, }, "type": "object", }, "section_params": { "additionalProperties": {"$ref": "#/definitions/params_item"}, "description": "Task section params", "type": "object", }, "task": { "properties": { "comment": { "description": "Free text comment", "type": ["string", "null"], }, "company": { "description": "Company ID", "type": ["string", "null"], }, "completed": { "description": "Task end time (UTC)", "format": "date-time", "type": ["string", "null"], }, "configuration": { "additionalProperties": {"$ref": "#/definitions/configuration_item"}, "description": "Task configuration params", "type": ["object", "null"], }, "created": { "description": "Task creation time (UTC) ", "format": "date-time", "type": ["string", "null"], }, "execution": { "description": "Task execution params", "oneOf": [ {"$ref": "#/definitions/execution"}, {"type": "null"}, ], }, "hyperparams": { "additionalProperties": {"$ref": "#/definitions/section_params"}, "description": "Task hyper params per section", "type": ["object", "null"], }, "id": {"description": "Task id", "type": ["string", "null"]}, "last_iteration": { "description": "Last iteration reported for this task", "type": ["integer", "null"], }, "last_metrics": { "additionalProperties": {"$ref": "#/definitions/last_metrics_variants"}, "description": "Last metric variants (hash to events), one for each metric hash", "type": ["object", "null"], }, "last_update": { "description": "Last time this task was created, updated, changed or events for this task were reported", "format": "date-time", "type": ["string", "null"], }, "last_worker": { "description": "ID of last worker that handled the task", "type": ["string", "null"], }, "last_worker_report": { "description": "Last time a worker reported while working on this task", "format": "date-time", "type": ["string", "null"], }, "name": {"description": "Task Name", "type": ["string", "null"]}, "output": { "description": "Task output params", "oneOf": [{"$ref": "#/definitions/output"}, {"type": "null"}], }, "parent": { "description": "Parent task id", "type": ["string", "null"], }, "project": { "description": "Project ID of the project to which this task is assigned", "type": ["string", "null"], }, "published": { "description": "Last status change time", "format": "date-time", "type": ["string", "null"], }, "script": { "description": "Script info", "oneOf": [{"$ref": "#/definitions/script"}, {"type": "null"}], }, "started": { "description": "Task start time (UTC)", "format": "date-time", "type": ["string", "null"], }, "status": { "description": "", "oneOf": [ {"$ref": "#/definitions/task_status_enum"}, {"type": "null"}, ], }, "status_changed": { "description": "Last status change time", "format": "date-time", "type": ["string", "null"], }, "status_message": { "description": "free text string representing info about the status", "type": ["string", "null"], }, "status_reason": { "description": "Reason for last status change", "type": ["string", "null"], }, "system_tags": { "description": "System tags list. This field is reserved for system use, please don't use it.", "items": {"type": "string"}, "type": ["array", "null"], }, "tags": { "description": "User-defined tags list", "items": {"type": "string"}, "type": ["array", "null"], }, "type": { "description": "Type of task. Values: 'training', 'testing'", "oneOf": [ {"$ref": "#/definitions/task_type_enum"}, {"type": "null"}, ], }, "user": { "description": "Associated user id", "type": ["string", "null"], }, }, "type": "object", }, "task_status_enum": { "enum": [ "created", "queued", "in_progress", "stopped", "published", "publishing", "closed", "failed", "completed", "unknown", ], "type": "string", }, "task_type_enum": { "enum": [ "training", "testing", "inference", "data_processing", "application", "monitor", "controller", "optimizer", "service", "qc", "custom", ], "type": "string", }, }, "properties": { "tasks": { "description": "List of tasks", "items": {"$ref": "#/definitions/task"}, "type": ["array", "null"], } }, "type": "object", } def __init__(self, tasks: Optional[List[Any]] = None, **kwargs: Any) -> None: super(GetAllResponse, self).__init__(**kwargs) self.tasks = tasks @schema_property("tasks") def tasks(self) -> Optional[List[Any]]: return self._property_tasks @tasks.setter def tasks(self, value: Optional[List[Any]]) -> None: if value is None: self._property_tasks = None return self.assert_isinstance(value, "tasks", (list, tuple)) if any((isinstance(v, dict) for v in value)): value = [Task.from_dict(v) if isinstance(v, dict) else v for v in value] else: self.assert_isinstance(value, "tasks", Task, is_array=True) self._property_tasks = value
GetAllResponse
python
mlflow__mlflow
tests/resources/mlflow-test-plugin/mlflow_test_plugin/dummy_backend.py
{ "start": 244, "end": 600 }
class ____(SubmittedRun): """ A run that just does nothing """ def __init__(self, run_id): self._run_id = run_id def wait(self): return True def get_status(self): return RunStatus.FINISHED def cancel(self): pass @property def run_id(self): return self._run_id
DummySubmittedRun
python
doocs__leetcode
solution/0500-0599/0542.01 Matrix/Solution.py
{ "start": 0, "end": 686 }
class ____: def updateMatrix(self, mat: List[List[int]]) -> List[List[int]]: m, n = len(mat), len(mat[0]) ans = [[-1] * n for _ in range(m)] q = deque() for i, row in enumerate(mat): for j, x in enumerate(row): if x == 0: ans[i][j] = 0 q.append((i, j)) dirs = (-1, 0, 1, 0, -1) while q: i, j = q.popleft() for a, b in pairwise(dirs): x, y = i + a, j + b if 0 <= x < m and 0 <= y < n and ans[x][y] == -1: ans[x][y] = ans[i][j] + 1 q.append((x, y)) return ans
Solution
python
django-import-export__django-import-export
tests/core/tests/test_resources/test_import_export.py
{ "start": 18704, "end": 19853 }
class ____(TestCase): """ If a custom field is declared, import should skip setting an attribute if the Field declaration has no attribute name. # 1874 """ class _EBookResource(ModelResource): published = Field(column_name="published") class Meta: model = EBook fields = ("id", "name", "published") def setUp(self): super().setUp() self.resource = DeclaredFieldWithNoAttributeTestCase._EBookResource() @patch("import_export.resources.logger") def test_import_with_no_attribute(self, mock_logger): self.assertEqual(0, EBook.objects.count()) dataset = tablib.Dataset( *[(1, "Moonraker", "1955-04-05")], headers=["id", "name", "published"] ) self.resource.import_data(dataset, raise_errors=True) self.assertEqual(1, EBook.objects.count()) self.assertIsNone(EBook.objects.first().published) mock_logger.debug.assert_any_call( "skipping field '<import_export.fields.Field: published>' " "- field attribute is not defined" )
DeclaredFieldWithNoAttributeTestCase
python
ethereum__web3.py
tests/core/method-class/test_method.py
{ "start": 9860, "end": 10031 }
class ____(Exception): pass def return_exception_raising_formatter(_method): def formatter(_params): raise Success() return compose(formatter)
Success
python
geekcomputers__Python
swap.py
{ "start": 0, "end": 1998 }
class ____: """ A class to perform swapping of two values. Methods: ------- swap_tuple_unpacking(self): Swaps the values of x and y using a tuple unpacking method. swap_temp_variable(self): Swaps the values of x and y using a temporary variable. swap_arithmetic_operations(self): Swaps the values of x and y using arithmetic operations. """ def __init__(self, x, y): """ Initialize the Swapper class with two values. Parameters: ---------- x : int The first value to be swapped. y : int The second value to be swapped. """ if not isinstance(x, (int, float)) or not isinstance(y, (float, int)): raise ValueError("Both x and y should be integers.") self.x = x self.y = y def display_values(self, message): print(f"{message} x: {self.x}, y: {self.y}") def swap_tuple_unpacking(self): """ Swaps the values of x and y using a tuple unpacking method. """ self.display_values("Before swapping") self.x, self.y = self.y, self.x self.display_values("After swapping") def swap_temp_variable(self): """ Swaps the values of x and y using a temporary variable. """ self.display_values("Before swapping") temp = self.x self.x = self.y self.y = temp self.display_values("After swapping") def swap_arithmetic_operations(self): """ Swaps the values of x and y using arithmetic operations. """ self.display_values("Before swapping") self.x = self.x - self.y self.y = self.x + self.y self.x = self.y - self.x self.display_values("After swapping") print("Example 1:") swapper1 = Swapper(5, 10) swapper1.swap_tuple_unpacking() print() print("Example 2:") swapper2 = Swapper(100, 200) swapper2.swap_temp_variable() print()
Swapper
python
graphql-python__graphene
graphene/types/tests/test_definition.py
{ "start": 514, "end": 674 }
class ____(ObjectType): id = String() name = String() pic = Field(Image, width=Int(), height=Int()) recent_article = Field(lambda: Article)
Author
python
jina-ai__jina
tests/unit/orchestrate/pods/test_pod.py
{ "start": 440, "end": 2284 }
class ____(BaseExecutor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # pod/pod-specific assert os.environ['key1'] == 'value1' assert os.environ['key2'] == 'value2' # inherit from parent process assert os.environ['key_parent'] == 'value3' def test_pod_runtime_env_setting(fake_env): args = _generate_pod_args( [ '--uses', 'EnvChecker1', '--env', 'key1=value1', '--env', 'key2=value2', ] ) with Pod(args): pass # should not affect the main process assert 'key1' not in os.environ assert 'key2' not in os.environ assert 'key_parent' in os.environ @pytest.mark.parametrize( 'protocol, expected', [ ('grpc', 'GRPCGateway'), ('websocket', 'WebSocketGateway'), ('http', 'HTTPGateway'), ], ) def test_gateway_args(protocol, expected): args = set_gateway_parser().parse_args( [ '--host', 'jina-custom-gateway', '--port', '23456', '--protocol', protocol, ] ) p = Pod(args) assert p.args.uses == expected @pytest.mark.parametrize( 'protocol, expected', [ ('grpc', 'GRPCGateway'), ('websocket', 'WebSocketGateway'), ('http', 'HTTPGateway'), ], ) def test_gateway_runtimes(protocol, expected): args = set_gateway_parser().parse_args( [ '--graph-description', '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}', '--deployments-addresses', '{"pod0": ["0.0.0.0:1234"]}', '--protocol', protocol, ] ) with Pod(args) as p: assert p.args.uses == expected
EnvChecker1
python
scipy__scipy
benchmarks/benchmarks/signal.py
{ "start": 4810, "end": 5259 }
class ____(Benchmark): def setup(self): self.system = signal.lti(1.0, [1, 0, 1]) self.t = np.arange(0, 100, 0.5) self.u = np.sin(2 * self.t) def time_lsim(self): signal.lsim(self.system, self.u, self.t) def time_step(self): signal.step(self.system, T=self.t) def time_impulse(self): signal.impulse(self.system, T=self.t) def time_bode(self): signal.bode(self.system)
LTI
python
getsentry__sentry
tests/sentry/models/test_environment.py
{ "start": 111, "end": 1265 }
class ____(TestCase): def test_simple(self) -> None: project = self.create_project() with pytest.raises(Environment.DoesNotExist): Environment.get_for_organization_id(project.organization_id, "prod") env = Environment.get_or_create(project=project, name="prod") assert env.name == "prod" assert env.projects.first().id == project.id env2 = Environment.get_or_create(project=project, name="prod") assert env2.id == env.id with self.assertNumQueries(0): assert Environment.get_for_organization_id(project.organization_id, "prod").id == env.id @pytest.mark.parametrize( "val,expected", [ ("42", True), ("ok", True), ("production", True), ("deadbeef", True), ("staging.0.1.company", True), ("valid_under", True), ("spaces ok", True), ("no/slashes", False), ("no\nnewlines", False), ("no\rcarriage", False), ("no\fform-feed", False), ], ) def test_valid_name(val: str, expected: bool) -> None: assert Environment.is_valid_name(val) == expected
GetOrCreateTest
python
huggingface__transformers
src/transformers/models/dots1/modular_dots1.py
{ "start": 2726, "end": 2942 }
class ____(DeepseekV3DecoderLayer): def __init__(self, config: Dots1Config, layer_idx: int): super().__init__(config, layer_idx) self.attention_type = config.layer_types[layer_idx]
Dots1DecoderLayer
python
jina-ai__jina
jina/serve/runtimes/gateway/streamer.py
{ "start": 1131, "end": 19242 }
class ____: """ Wrapper object to be used in a Custom Gateway. Naming to be defined """ def __init__( self, graph_representation: Dict, executor_addresses: Dict[str, Union[str, List[str]]], graph_conditions: Dict = {}, deployments_metadata: Dict[str, Dict[str, str]] = {}, deployments_no_reduce: List[str] = [], timeout_send: Optional[float] = None, retries: int = 0, compression: Optional[str] = None, runtime_name: str = 'custom gateway', prefetch: int = 0, logger: Optional['JinaLogger'] = None, metrics_registry: Optional['CollectorRegistry'] = None, meter: Optional['Meter'] = None, aio_tracing_client_interceptors: Optional[Sequence['ClientInterceptor']] = None, tracing_client_interceptor: Optional['OpenTelemetryClientInterceptor'] = None, grpc_channel_options: Optional[list] = None, ): """ :param graph_representation: A dictionary describing the topology of the Deployments. 2 special nodes are expected, the name `start-gateway` and `end-gateway` to determine the nodes that receive the very first request and the ones whose response needs to be sent back to the client. All the nodes with no outgoing nodes will be considered to be floating, and they will be "flagged" so that the user can ignore their tasks and not await them. :param executor_addresses: dictionary JSON with the input addresses of each Deployment. Each Executor can have one single address or a list of addrresses for each Executor :param graph_conditions: Dictionary stating which filtering conditions each Executor in the graph requires to receive Documents. :param deployments_metadata: Dictionary with the metadata of each Deployment. Each executor deployment can have a list of key-value pairs to provide information associated with the request to the deployment. :param deployments_no_reduce: list of Executor disabling the built-in merging mechanism. :param timeout_send: Timeout to be considered when sending requests to Executors :param retries: Number of retries to try to make successfull sendings to Executors :param compression: The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, check https://grpc.github.io/grpc/python/grpc.html#compression. :param runtime_name: Name to be used for monitoring. :param prefetch: How many Requests are processed from the Client at the same time. :param logger: Optional logger that can be used for logging :param metrics_registry: optional metrics registry for prometheus used if we need to expose metrics :param meter: optional OpenTelemetry meter that can provide instruments for collecting metrics :param aio_tracing_client_interceptors: Optional list of aio grpc tracing server interceptors. :param tracing_client_interceptor: Optional gprc tracing server interceptor. :param grpc_channel_options: Optional gprc channel options. """ self.logger = logger or JinaLogger(self.__class__.__name__) self.topology_graph = TopologyGraph( graph_representation=graph_representation, graph_conditions=graph_conditions, deployments_metadata=deployments_metadata, deployments_no_reduce=deployments_no_reduce, timeout_send=timeout_send, retries=retries, logger=logger, ) self.runtime_name = runtime_name self.aio_tracing_client_interceptors = aio_tracing_client_interceptors self.tracing_client_interceptor = tracing_client_interceptor self._executor_addresses = executor_addresses self._connection_pool = self._create_connection_pool( executor_addresses, compression, metrics_registry, meter, logger, aio_tracing_client_interceptors, tracing_client_interceptor, grpc_channel_options, ) request_handler = AsyncRequestResponseHandler( metrics_registry, meter, runtime_name, logger ) self._single_doc_request_handler = ( request_handler.handle_single_document_request( graph=self.topology_graph, connection_pool=self._connection_pool ) ) self._streamer = RequestStreamer( request_handler=request_handler.handle_request( graph=self.topology_graph, connection_pool=self._connection_pool ), result_handler=request_handler.handle_result(), prefetch=prefetch, logger=logger, ) self._endpoints_models_map = None self._streamer.Call = self._streamer.stream def _create_connection_pool( self, deployments_addresses, compression, metrics_registry, meter, logger, aio_tracing_client_interceptors, tracing_client_interceptor, grpc_channel_options=None, ): # add the connections needed connection_pool = GrpcConnectionPool( runtime_name=self.runtime_name, logger=logger, compression=compression, metrics_registry=metrics_registry, meter=meter, aio_tracing_client_interceptors=aio_tracing_client_interceptors, tracing_client_interceptor=tracing_client_interceptor, channel_options=grpc_channel_options, ) for deployment_name, addresses in deployments_addresses.items(): for address in addresses: connection_pool.add_connection( deployment=deployment_name, address=address, head=True ) return connection_pool def rpc_stream(self, *args, **kwargs): """ stream requests from client iterator and stream responses back. :param args: positional arguments to be passed to inner RequestStreamer :param kwargs: keyword arguments to be passed to inner RequestStreamer :return: An iterator over the responses from the Executors """ return self._streamer.stream(*args, **kwargs) def rpc_stream_doc(self, *args, **kwargs): """ stream requests from client iterator and stream responses back. :param args: positional arguments to be passed to inner RequestStreamer :param kwargs: keyword arguments to be passed to inner RequestStreamer :return: An iterator over the responses from the Executors """ return self._single_doc_request_handler(*args, **kwargs) async def _get_endpoints_input_output_models(self, is_cancel): """ Return a Dictionary with endpoints as keys and values as a dictionary of input and output schemas and names taken from the endpoints proto endpoint of Executors. :param is_cancel: event signal to show that you should stop trying """ # The logic should be to get the response of all the endpoints protos schemas from all the nodes. Then do a # logic that for every endpoint fom every Executor computes what is the input and output schema seen by the # Flow. self._endpoints_models_map = ( await self._streamer._get_endpoints_input_output_models( self.topology_graph, self._connection_pool, is_cancel ) ) def _validate_flow_docarray_compatibility(self): """ This method aims to validate that the input-output docarray models of Executors are good """ self.topology_graph._validate_flow_docarray_compatibility() async def stream( self, docs: DocumentArray, request_size: int = 100, return_results: bool = False, exec_endpoint: Optional[str] = None, target_executor: Optional[str] = None, parameters: Optional[Dict] = None, results_in_order: bool = False, return_type: Type[DocumentArray] = DocumentArray, ) -> AsyncIterator[Tuple[Union[DocumentArray, 'Request'], 'ExecutorError']]: """ stream Documents and yield Documents or Responses and unpacked Executor error if any. :param docs: The Documents to be sent to all the Executors :param request_size: The amount of Documents to be put inside a single request. :param return_results: If set to True, the generator will yield Responses and not `DocumentArrays` :param exec_endpoint: The Executor endpoint to which to send the Documents :param target_executor: A regex expression indicating the Executors that should receive the Request :param parameters: Parameters to be attached to the Requests :param results_in_order: return the results in the same order as the request_iterator :param return_type: the DocumentArray type to be returned. By default, it is `DocumentArray`. :yield: tuple of Documents or Responses and unpacked error from Executors if any """ async for result in self.stream_docs( docs=docs, request_size=request_size, return_results=True, # force return Responses exec_endpoint=exec_endpoint, target_executor=target_executor, parameters=parameters, results_in_order=results_in_order, return_type=return_type, ): error = None if jina_pb2.StatusProto.ERROR == result.status.code: exception = result.status.exception error = ExecutorError( name=exception.name, args=exception.args, stacks=exception.stacks, executor=exception.executor, ) if return_results: yield result, error else: yield result.data.docs, error async def stream_doc( self, doc: 'Document', return_results: bool = False, exec_endpoint: Optional[str] = None, target_executor: Optional[str] = None, parameters: Optional[Dict] = None, request_id: Optional[str] = None, return_type: Type[DocumentArray] = DocumentArray, ) -> AsyncIterator[Tuple[Union[DocumentArray, 'Request'], 'ExecutorError']]: """ stream Documents and yield Documents or Responses and unpacked Executor error if any. :param doc: The Documents to be sent to all the Executors :param return_results: If set to True, the generator will yield Responses and not `DocumentArrays` :param exec_endpoint: The Executor endpoint to which to send the Documents :param target_executor: A regex expression indicating the Executors that should receive the Request :param parameters: Parameters to be attached to the Requests :param request_id: Request ID to add to the request streamed to Executor. Only applicable if request_size is equal or less to the length of the docs :param return_type: the DocumentArray type to be returned. By default, it is `DocumentArray`. :yield: tuple of Documents or Responses and unpacked error from Executors if any """ req = SingleDocumentRequest() req.document_cls = doc.__class__ req.data.doc = doc if request_id: req.header.request_id = request_id if exec_endpoint: req.header.exec_endpoint = exec_endpoint if target_executor: req.header.target_executor = target_executor if parameters: req.parameters = parameters async for result in self.rpc_stream_doc(request=req, return_type=return_type): error = None if jina_pb2.StatusProto.ERROR == result.status.code: exception = result.status.exception error = ExecutorError( name=exception.name, args=exception.args, stacks=exception.stacks, executor=exception.executor, ) if return_results: yield result, error else: yield result.data.doc, error async def stream_docs( self, docs: DocumentArray, request_size: int = 100, return_results: bool = False, exec_endpoint: Optional[str] = None, target_executor: Optional[str] = None, parameters: Optional[Dict] = None, results_in_order: bool = False, request_id: Optional[str] = None, return_type: Type[DocumentArray] = DocumentArray, ): """ stream documents and stream responses back. :param docs: The Documents to be sent to all the Executors :param request_size: The amount of Documents to be put inside a single request. :param return_results: If set to True, the generator will yield Responses and not `DocumentArrays` :param exec_endpoint: The Executor endpoint to which to send the Documents :param target_executor: A regex expression indicating the Executors that should receive the Request :param parameters: Parameters to be attached to the Requests :param results_in_order: return the results in the same order as the request_iterator :param request_id: Request ID to add to the request streamed to Executor. Only applicable if request_size is equal or less to the length of the docs :param return_type: the DocumentArray type to be returned. By default, it is `DocumentArray`. :yield: Yields DocumentArrays or Responses from the Executors """ request_id = request_id if len(docs) <= request_size else None def _req_generator(): if not docarray_v2: for docs_batch in docs.batch(batch_size=request_size, shuffle=False): req = DataRequest() req.data.docs = docs_batch if request_id: req.header.request_id = request_id if exec_endpoint: req.header.exec_endpoint = exec_endpoint if target_executor: req.header.target_executor = target_executor if parameters: req.parameters = parameters yield req else: from docarray import BaseDoc def batch(iterable, n=1): l = len(iterable) for ndx in range(0, l, n): yield iterable[ndx : min(ndx + n, l)] if len(docs) > 0: for docs_batch in batch(docs, n=request_size): req = DataRequest() req.document_array_cls = DocList[docs_batch.doc_type] req.data.docs = docs_batch if request_id: req.header.request_id = request_id if exec_endpoint: req.header.exec_endpoint = exec_endpoint if target_executor: req.header.target_executor = target_executor if parameters: req.parameters = parameters yield req else: req = DataRequest() req.document_array_cls = DocList[BaseDoc] req.data.docs = DocList[BaseDoc]() if request_id: req.header.request_id = request_id if exec_endpoint: req.header.exec_endpoint = exec_endpoint if target_executor: req.header.target_executor = target_executor if parameters: req.parameters = parameters yield req async for resp in self.rpc_stream( request_iterator=_req_generator(), results_in_order=results_in_order, return_type=return_type, ): if return_results: yield resp else: yield resp.docs async def close(self): """ Gratefully closes the object making sure all the floating requests are taken care and the connections are closed gracefully """ await self._streamer.wait_floating_requests_end() await self._connection_pool.close() Call = rpc_stream async def process_single_data( self, request: DataRequest, context=None ) -> DataRequest: """Implements request and response handling of a single DataRequest :param request: DataRequest from Client :param context: grpc context :return: response DataRequest """ return await self._streamer.process_single_data(request, context) @staticmethod def get_streamer(): """ Return a streamer object based on the current environment context. The streamer object is contructed using runtime arguments stored in the `JINA_STREAMER_ARGS` environment variable. If this method is used outside a Jina context (process not controlled/orchestrated by jina), this method will raise an error. The streamer object does not have tracing/instrumentation capabilities. :return: Returns an instance of `GatewayStreamer` """ if 'JINA_STREAMER_ARGS' in os.environ: args_dict = json.loads(os.environ['JINA_STREAMER_ARGS']) return GatewayStreamer(**args_dict) else: raise OSError('JINA_STREAMER_ARGS environment variable is not set') @staticmethod def _set_env_streamer_args(**kwargs): os.environ['JINA_STREAMER_ARGS'] = json.dumps(kwargs)
GatewayStreamer
python
airbytehq__airbyte
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py
{ "start": 51939, "end": 52630 }
class ____(BaseModel): type: Literal["DefaultPaginator"] pagination_strategy: Union[CursorPagination, CustomPaginationStrategy, OffsetIncrement, PageIncrement] = Field( ..., description="Strategy defining how records are paginated.", title="Pagination Strategy", ) decoder: Optional[JsonDecoder] = Field( None, description="Component decoding the response so records can be extracted.", title="Decoder", ) page_size_option: Optional[RequestOption] = None page_token_option: Optional[Union[RequestOption, RequestPath]] = None parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
DefaultPaginator
python
readthedocs__readthedocs.org
readthedocs/projects/views/private.py
{ "start": 37868, "end": 38041 }
class ____(EnvironmentVariableMixin, DeleteViewWithMessage): success_message = _("Environment variable deleted") http_method_names = ["post"]
EnvironmentVariableDelete
python
more-itertools__more-itertools
tests/test_recipes.py
{ "start": 7290, "end": 7750 }
class ____(TestCase): """Tests for ``flatten()``""" def test_basic_usage(self): """ensure list of lists is flattened one level""" f = [[0, 1, 2], [3, 4, 5]] self.assertEqual(list(range(6)), list(mi.flatten(f))) def test_single_level(self): """ensure list of lists is flattened only one level""" f = [[0, [1, 2]], [[3, 4], 5]] self.assertEqual([0, [1, 2], [3, 4], 5], list(mi.flatten(f)))
FlattenTests
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/instigation.py
{ "start": 6184, "end": 6854 }
class ____(DynamicPartitionsRequestMixin, graphene.ObjectType): class Meta: # pyright: ignore[reportIncompatibleVariableOverride] name = "DynamicPartitionRequest" def __init__( self, dynamic_partition_request: Union[ AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest ], ): super().__init__() self._dynamic_partitions_request = dynamic_partition_request def get_dynamic_partitions_request( self, ) -> Union[ AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest, ]: return self._dynamic_partitions_request
GrapheneDynamicPartitionsRequest
python
apache__airflow
providers/google/tests/unit/google/cloud/links/test_base_link.py
{ "start": 3010, "end": 3627 }
class ____(GoogleCloudBaseOperator): operator_extra_links = (GoogleLink(),) def __init__(self, project_id: str, location: str, cluster_id: str, **kwargs): super().__init__(**kwargs) self.project_id = project_id self.location = location self.cluster_id = cluster_id @property def extra_links_params(self) -> dict[str, Any]: return { "project_id": self.project_id, "cluster_id": self.cluster_id, "location": self.location, } def execute(self, context) -> Any: GoogleLink.persist(context=context)
MyOperator
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 562518, "end": 562865 }
class ____(sgqlc.types.Type): """Autogenerated return type of DeleteTeamDiscussion""" __schema__ = github_schema __field_names__ = ("client_mutation_id",) client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
DeleteTeamDiscussionPayload
python
OmkarPathak__pygorithm
tests/test_sorting.py
{ "start": 3596, "end": 3780 }
class ____(unittest.TestCase, TestSortingAlgorithm): inplace = False alph_support = True @staticmethod def sort(arr): return bucket_sort.sort(arr)
TestBucketSort
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/engine/mock.py
{ "start": 904, "end": 4156 }
class ____: def __init__(self, dialect: Dialect, execute: Callable[..., Any]): self._dialect = dialect self._execute_impl = execute engine: Engine = cast(Any, property(lambda s: s)) dialect: Dialect = cast(Any, property(attrgetter("_dialect"))) name: str = cast(Any, property(lambda s: s._dialect.name)) def connect(self, **kwargs: Any) -> MockConnection: return self def schema_for_object(self, obj: HasSchemaAttr) -> Optional[str]: return obj.schema def execution_options(self, **kw: Any) -> MockConnection: return self def _run_ddl_visitor( self, visitorcallable: Type[InvokeDDLBase], element: Visitable, **kwargs: Any, ) -> None: kwargs["checkfirst"] = False visitorcallable( dialect=self.dialect, connection=self, **kwargs ).traverse_single(element) def execute( self, obj: Executable, parameters: Optional[_CoreAnyExecuteParams] = None, execution_options: Optional[CoreExecuteOptionsParameter] = None, ) -> Any: return self._execute_impl(obj, parameters) def create_mock_engine( url: Union[str, URL], executor: Any, **kw: Any ) -> MockConnection: """Create a "mock" engine used for echoing DDL. This is a utility function used for debugging or storing the output of DDL sequences as generated by :meth:`_schema.MetaData.create_all` and related methods. The function accepts a URL which is used only to determine the kind of dialect to be used, as well as an "executor" callable function which will receive a SQL expression object and parameters, which can then be echoed or otherwise printed. The executor's return value is not handled, nor does the engine allow regular string statements to be invoked, and is therefore only useful for DDL that is sent to the database without receiving any results. E.g.:: from sqlalchemy import create_mock_engine def dump(sql, *multiparams, **params): print(sql.compile(dialect=engine.dialect)) engine = create_mock_engine("postgresql+psycopg2://", dump) metadata.create_all(engine, checkfirst=False) :param url: A string URL which typically needs to contain only the database backend name. :param executor: a callable which receives the arguments ``sql``, ``*multiparams`` and ``**params``. The ``sql`` parameter is typically an instance of :class:`.ExecutableDDLElement`, which can then be compiled into a string using :meth:`.ExecutableDDLElement.compile`. .. versionadded:: 1.4 - the :func:`.create_mock_engine` function replaces the previous "mock" engine strategy used with :func:`_sa.create_engine`. .. seealso:: :ref:`faq_ddl_as_string` """ # create url.URL object u = _url.make_url(url) dialect_cls = u.get_dialect() dialect_args = {} # consume dialect arguments from kwargs for k in util.get_cls_kwargs(dialect_cls): if k in kw: dialect_args[k] = kw.pop(k) # create dialect dialect = dialect_cls(**dialect_args) return MockConnection(dialect, executor)
MockConnection
python
rushter__MLAlgorithms
mla/neuralnet/layers/basic.py
{ "start": 1052, "end": 2076 }
class ____(Layer, ParamMixin): def __init__(self, output_dim, parameters=None): """A fully connected layer. Parameters ---------- output_dim : int """ self._params = parameters self.output_dim = output_dim self.last_input = None if parameters is None: self._params = Parameters() def setup(self, x_shape): self._params.setup_weights((x_shape[1], self.output_dim)) def forward_pass(self, X): self.last_input = X return self.weight(X) def weight(self, X): W = np.dot(X, self._params["W"]) return W + self._params["b"] def backward_pass(self, delta): dW = np.dot(self.last_input.T, delta) db = np.sum(delta, axis=0) # Update gradient values self._params.update_grad("W", dW) self._params.update_grad("b", db) return np.dot(delta, self._params["W"].T) def shape(self, x_shape): return x_shape[0], self.output_dim
Dense
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeNarrowingLiteralMember1.py
{ "start": 459, "end": 2568 }
class ____: kind: Literal[1, 2, 3] def eq_obj1(c: Union[A, B]): if c.kind == "A": reveal_type(c, expected_text="A") else: reveal_type(c, expected_text="B") def is_obj1_1(c: Union[A, B]): if c.kind is "A": reveal_type(c, expected_text="A | B") else: reveal_type(c, expected_text="A | B") def is_obj1_2(c: Union[A, B]): if c.is_a is False: reveal_type(c, expected_text="B") else: reveal_type(c, expected_text="A") def eq_obj2(c: Union[A, B]): if c.kind != "A": reveal_type(c, expected_text="B") else: reveal_type(c, expected_text="A") def is_obj2(c: Union[A, B]): if c.kind is not "A": reveal_type(c, expected_text="A | B") else: reveal_type(c, expected_text="A | B") def eq_obj3(c: Union[A, B, C]): if c.kind == "A": reveal_type(c, expected_text="A | C") else: reveal_type(c, expected_text="B | C") def is_obj3(c: Union[A, B, C]): if c.kind is "A": reveal_type(c, expected_text="A | B | C") else: reveal_type(c, expected_text="A | B | C") def eq_obj4(c: Union[A, B]): if c.d == 1: reveal_type(c, expected_text="A") elif c.d == 3: reveal_type(c, expected_text="A | B") def is_obj4(c: Union[A, B]): if c.d is 1: reveal_type(c, expected_text="A | B") elif c.d is 3: reveal_type(c, expected_text="A | B") def eq_obj5(d: D): if d.kind == 1: reveal_type(d, expected_text="D") elif d.kind == 2: reveal_type(d, expected_text="D") def is_obj5(d: D): if d.kind is 1: reveal_type(d, expected_text="D") elif d.kind is 2: reveal_type(d, expected_text="D") def eq_class2(c: Union[type[A], type[B]]): if c.kind_class == "A": reveal_type(c, expected_text="type[A]") else: reveal_type(c, expected_text="type[B]") def is_class2(c: Union[type[A], type[B]]): if c.kind_class is "A": reveal_type(c, expected_text="type[A] | type[B]") else: reveal_type(c, expected_text="type[A] | type[B]")
D
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typedDictReadOnly2.py
{ "start": 2560, "end": 2608 }
class ____(TypedDict): a: NotRequired[int]
TD9
python
networkx__networkx
networkx/classes/tests/test_filters.py
{ "start": 39, "end": 5851 }
class ____: def test_no_filter(self): nf = nx.filters.no_filter assert nf() assert nf(1) assert nf(2, 1) def test_hide_nodes(self): f = nx.classes.filters.hide_nodes([1, 2, 3]) assert not f(1) assert not f(2) assert not f(3) assert f(4) assert f(0) assert f("a") pytest.raises(TypeError, f, 1, 2) pytest.raises(TypeError, f) def test_show_nodes(self): f = nx.classes.filters.show_nodes([1, 2, 3]) assert f(1) assert f(2) assert f(3) assert not f(4) assert not f(0) assert not f("a") pytest.raises(TypeError, f, 1, 2) pytest.raises(TypeError, f) def test_hide_edges(self): factory = nx.classes.filters.hide_edges f = factory([(1, 2), (3, 4)]) assert not f(1, 2) assert not f(3, 4) assert not f(4, 3) assert f(2, 3) assert f(0, -1) assert f("a", "b") pytest.raises(TypeError, f, 1, 2, 3) pytest.raises(TypeError, f, 1) pytest.raises(TypeError, f) pytest.raises(TypeError, factory, [1, 2, 3]) pytest.raises(ValueError, factory, [(1, 2, 3)]) def test_show_edges(self): factory = nx.classes.filters.show_edges f = factory([(1, 2), (3, 4)]) assert f(1, 2) assert f(3, 4) assert f(4, 3) assert not f(2, 3) assert not f(0, -1) assert not f("a", "b") pytest.raises(TypeError, f, 1, 2, 3) pytest.raises(TypeError, f, 1) pytest.raises(TypeError, f) pytest.raises(TypeError, factory, [1, 2, 3]) pytest.raises(ValueError, factory, [(1, 2, 3)]) def test_hide_diedges(self): factory = nx.classes.filters.hide_diedges f = factory([(1, 2), (3, 4)]) assert not f(1, 2) assert not f(3, 4) assert f(4, 3) assert f(2, 3) assert f(0, -1) assert f("a", "b") pytest.raises(TypeError, f, 1, 2, 3) pytest.raises(TypeError, f, 1) pytest.raises(TypeError, f) pytest.raises(TypeError, factory, [1, 2, 3]) pytest.raises(ValueError, factory, [(1, 2, 3)]) def test_show_diedges(self): factory = nx.classes.filters.show_diedges f = factory([(1, 2), (3, 4)]) assert f(1, 2) assert f(3, 4) assert not f(4, 3) assert not f(2, 3) assert not f(0, -1) assert not f("a", "b") pytest.raises(TypeError, f, 1, 2, 3) pytest.raises(TypeError, f, 1) pytest.raises(TypeError, f) pytest.raises(TypeError, factory, [1, 2, 3]) pytest.raises(ValueError, factory, [(1, 2, 3)]) def test_hide_multiedges(self): factory = nx.classes.filters.hide_multiedges f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) assert not f(1, 2, 0) assert not f(1, 2, 1) assert f(1, 2, 2) assert f(3, 4, 0) assert not f(3, 4, 1) assert not f(4, 3, 1) assert f(4, 3, 0) assert f(2, 3, 0) assert f(0, -1, 0) assert f("a", "b", 0) pytest.raises(TypeError, f, 1, 2, 3, 4) pytest.raises(TypeError, f, 1, 2) pytest.raises(TypeError, f, 1) pytest.raises(TypeError, f) pytest.raises(TypeError, factory, [1, 2, 3]) pytest.raises(ValueError, factory, [(1, 2)]) pytest.raises(ValueError, factory, [(1, 2, 3, 4)]) def test_show_multiedges(self): factory = nx.classes.filters.show_multiedges f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) assert f(1, 2, 0) assert f(1, 2, 1) assert not f(1, 2, 2) assert not f(3, 4, 0) assert f(3, 4, 1) assert f(4, 3, 1) assert not f(4, 3, 0) assert not f(2, 3, 0) assert not f(0, -1, 0) assert not f("a", "b", 0) pytest.raises(TypeError, f, 1, 2, 3, 4) pytest.raises(TypeError, f, 1, 2) pytest.raises(TypeError, f, 1) pytest.raises(TypeError, f) pytest.raises(TypeError, factory, [1, 2, 3]) pytest.raises(ValueError, factory, [(1, 2)]) pytest.raises(ValueError, factory, [(1, 2, 3, 4)]) def test_hide_multidiedges(self): factory = nx.classes.filters.hide_multidiedges f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) assert not f(1, 2, 0) assert not f(1, 2, 1) assert f(1, 2, 2) assert f(3, 4, 0) assert not f(3, 4, 1) assert f(4, 3, 1) assert f(4, 3, 0) assert f(2, 3, 0) assert f(0, -1, 0) assert f("a", "b", 0) pytest.raises(TypeError, f, 1, 2, 3, 4) pytest.raises(TypeError, f, 1, 2) pytest.raises(TypeError, f, 1) pytest.raises(TypeError, f) pytest.raises(TypeError, factory, [1, 2, 3]) pytest.raises(ValueError, factory, [(1, 2)]) pytest.raises(ValueError, factory, [(1, 2, 3, 4)]) def test_show_multidiedges(self): factory = nx.classes.filters.show_multidiedges f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) assert f(1, 2, 0) assert f(1, 2, 1) assert not f(1, 2, 2) assert not f(3, 4, 0) assert f(3, 4, 1) assert not f(4, 3, 1) assert not f(4, 3, 0) assert not f(2, 3, 0) assert not f(0, -1, 0) assert not f("a", "b", 0) pytest.raises(TypeError, f, 1, 2, 3, 4) pytest.raises(TypeError, f, 1, 2) pytest.raises(TypeError, f, 1) pytest.raises(TypeError, f) pytest.raises(TypeError, factory, [1, 2, 3]) pytest.raises(ValueError, factory, [(1, 2)]) pytest.raises(ValueError, factory, [(1, 2, 3, 4)])
TestFilterFactory
python
graphql-python__graphene
graphene/types/schema.py
{ "start": 2176, "end": 14476 }
class ____(dict): def __init__( self, query=None, mutation=None, subscription=None, types=None, auto_camelcase=True, ): assert_valid_root_type(query) assert_valid_root_type(mutation) assert_valid_root_type(subscription) if types is None: types = [] for type_ in types: assert is_graphene_type(type_) self.auto_camelcase = auto_camelcase create_graphql_type = self.add_type self.query = create_graphql_type(query) if query else None self.mutation = create_graphql_type(mutation) if mutation else None self.subscription = create_graphql_type(subscription) if subscription else None self.types = [create_graphql_type(graphene_type) for graphene_type in types] def add_type(self, graphene_type): if inspect.isfunction(graphene_type): graphene_type = graphene_type() if isinstance(graphene_type, List): return GraphQLList(self.add_type(graphene_type.of_type)) if isinstance(graphene_type, NonNull): return GraphQLNonNull(self.add_type(graphene_type.of_type)) try: name = graphene_type._meta.name except AttributeError: raise TypeError(f"Expected Graphene type, but received: {graphene_type}.") graphql_type = self.get(name) if graphql_type: return graphql_type if issubclass(graphene_type, ObjectType): graphql_type = self.create_objecttype(graphene_type) elif issubclass(graphene_type, InputObjectType): graphql_type = self.create_inputobjecttype(graphene_type) elif issubclass(graphene_type, Interface): graphql_type = self.create_interface(graphene_type) elif issubclass(graphene_type, Scalar): graphql_type = self.create_scalar(graphene_type) elif issubclass(graphene_type, Enum): graphql_type = self.create_enum(graphene_type) elif issubclass(graphene_type, Union): graphql_type = self.construct_union(graphene_type) else: raise TypeError(f"Expected Graphene type, but received: {graphene_type}.") self[name] = graphql_type return graphql_type @staticmethod def create_scalar(graphene_type): # We have a mapping to the original GraphQL types # so there are no collisions. _scalars = { String: GraphQLString, Int: GraphQLInt, Float: GraphQLFloat, Boolean: GraphQLBoolean, ID: GraphQLID, } if graphene_type in _scalars: return _scalars[graphene_type] return GrapheneScalarType( graphene_type=graphene_type, name=graphene_type._meta.name, description=graphene_type._meta.description, serialize=getattr(graphene_type, "serialize", None), parse_value=getattr(graphene_type, "parse_value", None), parse_literal=getattr(graphene_type, "parse_literal", None), ) @staticmethod def create_enum(graphene_type): values = {} for name, value in graphene_type._meta.enum.__members__.items(): description = getattr(value, "description", None) # if the "description" attribute is an Enum, it is likely an enum member # called description, not a description property if isinstance(description, PyEnum): description = None if not description and callable(graphene_type._meta.description): description = graphene_type._meta.description(value) deprecation_reason = getattr(value, "deprecation_reason", None) if isinstance(deprecation_reason, PyEnum): deprecation_reason = None if not deprecation_reason and callable( graphene_type._meta.deprecation_reason ): deprecation_reason = graphene_type._meta.deprecation_reason(value) values[name] = GraphQLEnumValue( value=value, description=description, deprecation_reason=deprecation_reason, ) type_description = ( graphene_type._meta.description(None) if callable(graphene_type._meta.description) else graphene_type._meta.description ) return GrapheneEnumType( graphene_type=graphene_type, values=values, name=graphene_type._meta.name, description=type_description, ) def create_objecttype(self, graphene_type): create_graphql_type = self.add_type def interfaces(): interfaces = [] for graphene_interface in graphene_type._meta.interfaces: interface = create_graphql_type(graphene_interface) assert interface.graphene_type == graphene_interface interfaces.append(interface) return interfaces if graphene_type._meta.possible_types: is_type_of = partial( is_type_of_from_possible_types, graphene_type._meta.possible_types ) else: is_type_of = graphene_type.is_type_of return GrapheneObjectType( graphene_type=graphene_type, name=graphene_type._meta.name, description=graphene_type._meta.description, fields=partial(self.create_fields_for_type, graphene_type), is_type_of=is_type_of, interfaces=interfaces, ) def create_interface(self, graphene_type): resolve_type = ( partial( self.resolve_type, graphene_type.resolve_type, graphene_type._meta.name ) if graphene_type.resolve_type else None ) def interfaces(): interfaces = [] for graphene_interface in graphene_type._meta.interfaces: interface = self.add_type(graphene_interface) assert interface.graphene_type == graphene_interface interfaces.append(interface) return interfaces return GrapheneInterfaceType( graphene_type=graphene_type, name=graphene_type._meta.name, description=graphene_type._meta.description, fields=partial(self.create_fields_for_type, graphene_type), interfaces=interfaces, resolve_type=resolve_type, ) def create_inputobjecttype(self, graphene_type): return GrapheneInputObjectType( graphene_type=graphene_type, name=graphene_type._meta.name, description=graphene_type._meta.description, out_type=graphene_type._meta.container, fields=partial( self.create_fields_for_type, graphene_type, is_input_type=True ), ) def construct_union(self, graphene_type): create_graphql_type = self.add_type def types(): union_types = [] for graphene_objecttype in graphene_type._meta.types: object_type = create_graphql_type(graphene_objecttype) assert object_type.graphene_type == graphene_objecttype union_types.append(object_type) return union_types resolve_type = ( partial( self.resolve_type, graphene_type.resolve_type, graphene_type._meta.name ) if graphene_type.resolve_type else None ) return GrapheneUnionType( graphene_type=graphene_type, name=graphene_type._meta.name, description=graphene_type._meta.description, types=types, resolve_type=resolve_type, ) def get_name(self, name): if self.auto_camelcase: return to_camel_case(name) return name def create_fields_for_type(self, graphene_type, is_input_type=False): create_graphql_type = self.add_type fields = {} for name, field in graphene_type._meta.fields.items(): if isinstance(field, Dynamic): field = get_field_as(field.get_type(self), _as=Field) if not field: continue field_type = create_graphql_type(field.type) if is_input_type: _field = GraphQLInputField( field_type, default_value=field.default_value, out_name=name, description=field.description, deprecation_reason=field.deprecation_reason, ) else: args = {} for arg_name, arg in field.args.items(): arg_type = create_graphql_type(arg.type) processed_arg_name = arg.name or self.get_name(arg_name) args[processed_arg_name] = GraphQLArgument( arg_type, out_name=arg_name, description=arg.description, default_value=arg.default_value, deprecation_reason=arg.deprecation_reason, ) subscribe = field.wrap_subscribe( self.get_function_for_type( graphene_type, f"subscribe_{name}", name, field.default_value ) ) # If we are in a subscription, we use (by default) an # identity-based resolver for the root, rather than the # default resolver for objects/dicts. if subscribe: field_default_resolver = identity_resolve elif issubclass(graphene_type, ObjectType): default_resolver = ( graphene_type._meta.default_resolver or get_default_resolver() ) field_default_resolver = partial( default_resolver, name, field.default_value ) else: field_default_resolver = None resolve = field.wrap_resolve( self.get_function_for_type( graphene_type, f"resolve_{name}", name, field.default_value ) or field_default_resolver ) _field = GraphQLField( field_type, args=args, resolve=resolve, subscribe=subscribe, deprecation_reason=field.deprecation_reason, description=field.description, ) field_name = field.name or self.get_name(name) fields[field_name] = _field return fields def get_function_for_type(self, graphene_type, func_name, name, default_value): """Gets a resolve or subscribe function for a given ObjectType""" if not issubclass(graphene_type, ObjectType): return resolver = getattr(graphene_type, func_name, None) if not resolver: # If we don't find the resolver in the ObjectType class, then try to # find it in each of the interfaces interface_resolver = None for interface in graphene_type._meta.interfaces: if name not in interface._meta.fields: continue interface_resolver = getattr(interface, func_name, None) if interface_resolver: break resolver = interface_resolver # Only if is not decorated with classmethod if resolver: return get_unbound_function(resolver) def resolve_type(self, resolve_type_func, type_name, root, info, _type): type_ = resolve_type_func(root, info) if inspect.isclass(type_) and issubclass(type_, ObjectType): return type_._meta.name return_type = self[type_name] return default_type_resolver(root, info, return_type)
TypeMap
python
kamyu104__LeetCode-Solutions
Python/maximum-total-subarray-value-ii.py
{ "start": 1479, "end": 3338 }
class ____(object): def maxTotalValue(self, nums, k): """ :type nums: List[int] :type k: int :rtype: int """ # RMQ - Sparse Table # Template: https://github.com/kamyu104/GoogleCodeJam-Farewell-Rounds/blob/main/Round%20D/genetic_sequences2.py3 # Time: ctor: O(NlogN) * O(fn) # query: O(fn) # Space: O(NlogN) class SparseTable(object): def __init__(self, arr, fn): self.fn = fn self.bit_length = [0] n = len(arr) k = n.bit_length()-1 # log2_floor(n) for i in xrange(k+1): self.bit_length.extend(i+1 for _ in xrange(min(1<<i, (n+1)-len(self.bit_length)))) self.st = [[0]*n for _ in xrange(k+1)] self.st[0] = arr[:] for i in xrange(1, k+1): # Time: O(NlogN) * O(fn) for j in xrange((n-(1<<i))+1): self.st[i][j] = fn(self.st[i-1][j], self.st[i-1][j+(1<<(i-1))]) def query(self, L, R): # Time: O(fn) i = self.bit_length[R-L+1]-1 # log2_floor(R-L+1) return self.fn(self.st[i][L], self.st[i][R-(1<<i)+1]) rmq_min = SparseTable(nums, min) rmq_max = SparseTable(nums, max) max_heap = [(-(rmq_max.query(i, len(nums)-1)-rmq_min.query(i, len(nums)-1)), (i, len(nums)-1)) for i in xrange(len(nums))] heapq.heapify(max_heap) result = 0 for _ in xrange(k): v, (i, j) = heappop(max_heap) result += -v if i <= j-1: heapq.heappush(max_heap, (-(rmq_max.query(i, j-1)-rmq_min.query(i, j-1)), (i, j-1))) return result # Time: O((n + k) * logn) # Space: O(n) import heapq # heap, segment tree
Solution2
python
doocs__leetcode
solution/2400-2499/2448.Minimum Cost to Make Array Equal/Solution.py
{ "start": 0, "end": 546 }
class ____: def minCost(self, nums: List[int], cost: List[int]) -> int: arr = sorted(zip(nums, cost)) n = len(arr) f = [0] * (n + 1) g = [0] * (n + 1) for i in range(1, n + 1): a, b = arr[i - 1] f[i] = f[i - 1] + a * b g[i] = g[i - 1] + b ans = inf for i in range(1, n + 1): a = arr[i - 1][0] l = a * g[i - 1] - f[i - 1] r = f[n] - f[i] - a * (g[n] - g[i]) ans = min(ans, l + r) return ans
Solution
python
django__django
django/contrib/admin/widgets.py
{ "start": 1933, "end": 2310 }
class ____(DateTimeWidgetContextMixin, forms.DateInput): class Media: js = [ "admin/js/calendar.js", "admin/js/admin/DateTimeShortcuts.js", ] def __init__(self, attrs=None, format=None): attrs = {"class": "vDateField", "size": "10", **(attrs or {})} super().__init__(attrs=attrs, format=format)
BaseAdminDateWidget
python
tensorflow__tensorflow
tensorflow/python/autograph/operators/conditional_expressions_test.py
{ "start": 1223, "end": 2238 }
class ____(test.TestCase): def test_tensor(self): self.assertEqual(self.evaluate(_basic_expr(constant_op.constant(True))), 1) self.assertEqual(self.evaluate(_basic_expr(constant_op.constant(False))), 2) def test_tensor_mismatched_type(self): # tf.function required because eager cond degenerates to Python if. @def_function.function def test_fn(): conditional_expressions.if_exp( constant_op.constant(True), lambda: 1.0, lambda: 2, 'expr_repr') with self.assertRaisesRegex( TypeError, "'expr_repr' has dtype float32 in the main.*int32 in the else"): test_fn() def test_python(self): self.assertEqual(self.evaluate(_basic_expr(True)), 1) self.assertEqual(self.evaluate(_basic_expr(False)), 2) self.assertEqual( conditional_expressions.if_exp(True, lambda: 1, lambda: 2, ''), 1) self.assertEqual( conditional_expressions.if_exp(False, lambda: 1, lambda: 2, ''), 2) if __name__ == '__main__': test.main()
IfExpTest
python
great-expectations__great_expectations
great_expectations/exceptions/exceptions.py
{ "start": 12274, "end": 12339 }
class ____(DatasourceError): pass
DatasourceInitializationError
python
ZoranPandovski__al-go-rithms
data_structures/Diameter_Of_Binary_Tree/Diameter_Of_Binary_Tree.py
{ "start": 361, "end": 520 }
class ____: def __init__(self, val=0, left=None, right=None): self.val = val self.left = left self.right = right
TreeNode
python
python__mypy
mypy/nodes.py
{ "start": 88443, "end": 90154 }
class ____(SymbolNode, Expression): """Base class for TypeVarExpr, ParamSpecExpr and TypeVarTupleExpr. Note that they are constructed by the semantic analyzer. """ __slots__ = ("_name", "_fullname", "upper_bound", "default", "variance", "is_new_style") _name: str _fullname: str # Upper bound: only subtypes of upper_bound are valid as values. By default # this is 'object', meaning no restriction. upper_bound: mypy.types.Type # Default: used to resolve the TypeVar if the default is not explicitly given. # By default this is 'AnyType(TypeOfAny.from_omitted_generics)'. See PEP 696. default: mypy.types.Type # Variance of the type variable. Invariant is the default. # TypeVar(..., covariant=True) defines a covariant type variable. # TypeVar(..., contravariant=True) defines a contravariant type # variable. variance: int def __init__( self, name: str, fullname: str, upper_bound: mypy.types.Type, default: mypy.types.Type, variance: int = INVARIANT, is_new_style: bool = False, line: int = -1, ) -> None: super().__init__(line=line) self._name = name self._fullname = fullname self.upper_bound = upper_bound self.default = default self.variance = variance self.is_new_style = is_new_style @property def name(self) -> str: return self._name @property def fullname(self) -> str: return self._fullname # All types that are both SymbolNodes and Expressions. # Use when common children of them are needed. SYMBOL_NODE_EXPRESSION_TYPES: Final = (TypeVarLikeExpr,)
TypeVarLikeExpr
python
jazzband__django-oauth-toolkit
tests/test_models.py
{ "start": 958, "end": 1129 }
class ____(TestCase): @classmethod def setUpTestData(cls): cls.user = UserModel.objects.create_user("test_user", "test@example.com", "123456")
BaseTestModels
python
squidfunk__mkdocs-material
material/plugins/search/plugin.py
{ "start": 1673, "end": 6156 }
class ____(BasePlugin[SearchConfig]): # Initialize plugin def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Initialize incremental builds self.is_dirty = False self.is_dirtyreload = False # Initialize search index cache self.search_index_prev = None # Determine whether we're serving the site def on_startup(self, *, command, dirty): self.is_dirty = dirty # Initialize plugin def on_config(self, config): if not self.config.enabled: return # Retrieve default value for language if not self.config.lang: self.config.lang = [self._translate( config, "search.config.lang" )] # Retrieve default value for separator if not self.config.separator: self.config.separator = self._translate( config, "search.config.separator" ) # Retrieve default value for pipeline if self.config.pipeline is None: self.config.pipeline = list(filter(len, re.split( r"\s*,\s*", self._translate(config, "search.config.pipeline") ))) # Validate field configuration validator = SubConfig(SearchFieldConfig) for config in self.config.fields.values(): validator.run_validation(config) # Merge with default fields if "title" not in self.config.fields: self.config.fields["title"] = { "boost": 1e3 } if "text" not in self.config.fields: self.config.fields["text"] = { "boost": 1e0 } if "tags" not in self.config.fields: self.config.fields["tags"] = { "boost": 1e6 } # Initialize search index self.search_index = SearchIndex(**self.config) # Set jieba dictionary, if given if self.config.jieba_dict: path = os.path.normpath(self.config.jieba_dict) if os.path.isfile(path): jieba.set_dictionary(path) log.debug(f"Loading jieba dictionary: {path}") else: log.warning( f"Configuration error for 'search.jieba_dict': " f"'{self.config.jieba_dict}' does not exist." ) # Set jieba user dictionary, if given if self.config.jieba_dict_user: path = os.path.normpath(self.config.jieba_dict_user) if os.path.isfile(path): jieba.load_userdict(path) log.debug(f"Loading jieba user dictionary: {path}") else: log.warning( f"Configuration error for 'search.jieba_dict_user': " f"'{self.config.jieba_dict_user}' does not exist." ) # Add page to search index def on_page_context(self, context, *, page, config, nav): if not self.config.enabled: return # Index page self.search_index.add_entry_from_context(page) page.content = re.sub( r"\s?data-search-\w+=\"[^\"]+\"", "", page.content ) # Generate search index def on_post_build(self, *, config): if not self.config.enabled: return # Write search index base = os.path.join(config.site_dir, "search") path = os.path.join(base, "search_index.json") # Generate and write search index to file data = self.search_index.generate_search_index(self.search_index_prev) utils.write_file(data.encode("utf-8"), path) # Persist search index for repeated invocation if self.is_dirty: self.search_index_prev = self.search_index # Determine whether we're running under dirty reload def on_serve(self, server, *, config, builder): self.is_dirtyreload = self.is_dirty # ------------------------------------------------------------------------- # Translate the given placeholder value def _translate(self, config, value): env = config.theme.get_env() # Load language template and return translation for placeholder language = "partials/language.html" template = env.get_template(language, None, { "config": config }) return template.module.t(value) # ----------------------------------------------------------------------------- # Search index with support for additional fields
SearchPlugin
python
numpy__numpy
numpy/f2py/tests/test_modules.py
{ "start": 106, "end": 510 }
class ____(util.F2PyTest): sources = [ util.getpath( "tests", "src", "modules", "gh26920", "two_mods_with_one_public_routine.f90" ) ] # we filter the only public function mod2 only = ["mod1_func1", ] def test_gh26920(self): # if it compiles and can be loaded, things are fine pass @pytest.mark.slow
TestModuleFilterPublicEntities
python
apache__airflow
airflow-core/src/airflow/api_fastapi/core_api/datamodels/dag_run.py
{ "start": 1487, "end": 1683 }
class ____(str, Enum): """Enum for DAG Run states when updating a DAG Run.""" QUEUED = DagRunState.QUEUED SUCCESS = DagRunState.SUCCESS FAILED = DagRunState.FAILED
DAGRunPatchStates
python
coleifer__peewee
tests/sqlite.py
{ "start": 50576, "end": 52532 }
class ____(TestFullTextSearch): database = SqliteExtDatabase(':memory:', c_extensions=CYTHON_EXTENSION) def test_c_extensions(self): self.assertTrue(self.database._c_extensions) self.assertTrue(Post._meta.database._c_extensions) def test_bm25f(self): def assertResults(term, expected): query = MultiColumn.search_bm25f(term, [1.0, 0, 0, 0], True) self.assertEqual( [(mc.c4, round(mc.score, 2)) for mc in query], expected) self._create_multi_column() MultiColumn.create(c1='aaaaa fffff', c4=5) assertResults('aaaaa', [(5, -0.76), (1, -0.62)]) assertResults('fffff', [(5, -0.76), (3, -0.65)]) assertResults('eeeee', [(2, -2.13)]) # No column specified, use the first text field. query = MultiColumn.search_bm25f('aaaaa OR fffff', [1., 3., 0, 0], 1) self.assertEqual([(mc.c4, round(mc.score, 2)) for mc in query], [ (1, -14.18), (5, -12.01), (3, -11.48)]) def test_lucene(self): for message in self.messages: Document.create(message=message) def assertResults(term, expected, sort_cleaned=False): query = Document.search_lucene(term, with_score=True) cleaned = [ (round(doc.score, 3), ' '.join(doc.message.split()[:2])) for doc in query] if sort_cleaned: cleaned = sorted(cleaned) self.assertEqual(cleaned, expected) assertResults('things', [ (-0.166, 'Faith has'), (-0.137, 'Be faithful')]) assertResults('faith', [ (0.036, 'All who'), (0.042, 'Faith has'), (0.047, 'A faith'), (0.049, 'Be faithful'), (0.049, 'Faith consists')], sort_cleaned=True) @skip_unless(FTS5Model.fts5_installed(), 'requires fts5')
TestFullTextSearchCython
python
ray-project__ray
python/ray/serve/_private/benchmarks/streaming/_grpc/test_server_pb2_grpc.py
{ "start": 5229, "end": 8850 }
class ____(object): """Missing associated documentation comment in .proto file.""" @staticmethod def Unary( request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None, ): return grpc.experimental.unary_unary( request, target, "/GRPCTestServer/Unary", backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Request.SerializeToString, backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Response.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, ) @staticmethod def ClientStreaming( request_iterator, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None, ): return grpc.experimental.stream_unary( request_iterator, target, "/GRPCTestServer/ClientStreaming", backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Request.SerializeToString, backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Response.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, ) @staticmethod def ServerStreaming( request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None, ): return grpc.experimental.unary_stream( request, target, "/GRPCTestServer/ServerStreaming", backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Request.SerializeToString, backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Response.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, ) @staticmethod def BidiStreaming( request_iterator, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None, ): return grpc.experimental.stream_stream( request_iterator, target, "/GRPCTestServer/BidiStreaming", backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Request.SerializeToString, backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Response.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, )
GRPCTestServer
python
pypa__hatch
tests/cli/fmt/test_fmt.py
{ "start": 10793, "end": 14679 }
class ____: def test_only_linter(self, hatch, temp_dir, config_file, env_run, mocker, platform, defaults_file_stable): config_file.model.template.plugins["default"]["tests"] = False config_file.save() project_name = "My.App" with temp_dir.as_cwd(): result = hatch("new", project_name) assert result.exit_code == 0, result.output project_path = temp_dir / "my-app" data_path = temp_dir / "data" data_path.mkdir() with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}): result = hatch("fmt", "--linter") assert result.exit_code == 0, result.output assert not result.output root_data_path = data_path / "env" / ".internal" / "hatch-static-analysis" / ".config" config_dir = next(root_data_path.iterdir()) default_config = config_dir / "ruff_defaults.toml" user_config = config_dir / "pyproject.toml" user_config_path = platform.join_command_args([str(user_config)]) assert env_run.call_args_list == [ mocker.call(f"ruff check --config {user_config_path} --fix .", shell=True), ] assert default_config.read_text() == defaults_file_stable old_contents = (project_path / "pyproject.toml").read_text() config_path = str(default_config).replace("\\", "\\\\") assert ( user_config.read_text() == f"""\ {old_contents} [tool.ruff] extend = "{config_path}\"""" ) def test_only_formatter(self, hatch, temp_dir, config_file, env_run, mocker, platform, defaults_file_stable): config_file.model.template.plugins["default"]["tests"] = False config_file.save() project_name = "My.App" with temp_dir.as_cwd(): result = hatch("new", project_name) assert result.exit_code == 0, result.output project_path = temp_dir / "my-app" data_path = temp_dir / "data" data_path.mkdir() with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}): result = hatch("fmt", "--formatter") assert result.exit_code == 0, result.output assert not result.output root_data_path = data_path / "env" / ".internal" / "hatch-static-analysis" / ".config" config_dir = next(root_data_path.iterdir()) default_config = config_dir / "ruff_defaults.toml" user_config = config_dir / "pyproject.toml" user_config_path = platform.join_command_args([str(user_config)]) assert env_run.call_args_list == [ mocker.call(f"ruff format --config {user_config_path} .", shell=True), ] assert default_config.read_text() == defaults_file_stable old_contents = (project_path / "pyproject.toml").read_text() config_path = str(default_config).replace("\\", "\\\\") assert ( user_config.read_text() == f"""\ {old_contents} [tool.ruff] extend = "{config_path}\"""" ) @pytest.mark.usefixtures("env_run") def test_select_multiple(self, hatch, helpers, temp_dir, config_file): config_file.model.template.plugins["default"]["tests"] = False config_file.save() project_name = "My.App" with temp_dir.as_cwd(): result = hatch("new", project_name) assert result.exit_code == 0, result.output project_path = temp_dir / "my-app" data_path = temp_dir / "data" data_path.mkdir() with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}): result = hatch("fmt", "--linter", "--formatter") assert result.exit_code == 1, result.output assert result.output == helpers.dedent( """ Cannot specify both --linter and --formatter """ )
TestComponents
python
pytorch__pytorch
test/dynamo/test_modules.py
{ "start": 8593, "end": 9171 }
class ____(torch.nn.Module): def __init__(self) -> None: super().__init__() self.layers = torch.nn.ModuleList( [ torch.nn.Linear(10, 10), torch.nn.ReLU(), torch.nn.Linear(10, 10), torch.nn.ReLU(), ] ) def __getitem__(self, idx: int): return self.layers[idx] def __len__(self) -> int: return len(self.layers) def forward(self, x): for i in range(len(self)): x = self[i](x) return x
CustomGetItemModuleList
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 573987, "end": 574288 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") node = sgqlc.types.Field("Ref", graphql_name="node")
RefEdge
python
apache__airflow
devel-common/src/tests_common/test_utils/azure_system_helpers.py
{ "start": 3966, "end": 5784 }
class ____(SystemTest): """Base class for Azure system tests.""" @classmethod def create_share(cls, share_name: str, azure_fileshare_conn_id: str): hook = AzureFileShareHook(azure_fileshare_conn_id=azure_fileshare_conn_id) hook.create_share(share_name) @classmethod def delete_share(cls, share_name: str, azure_fileshare_conn_id: str): hook = AzureFileShareHook(azure_fileshare_conn_id=azure_fileshare_conn_id) hook.delete_share(share_name=share_name) @classmethod def create_directory(cls, share_name: str, azure_fileshare_conn_id: str, directory: str): hook = AzureFileShareHook( azure_fileshare_conn_id=azure_fileshare_conn_id, share_name=share_name, directory_path=directory ) hook.create_directory() @classmethod def upload_file_from_string( cls, string_data: str, share_name: str, azure_fileshare_conn_id: str, file_name: str, ): hook = AzureFileShareHook( azure_fileshare_conn_id=azure_fileshare_conn_id, share_name=share_name, file_path=file_name ) hook.load_data(string_data=string_data) @classmethod def prepare_share(cls, share_name: str, azure_fileshare_conn_id: str, file_name: str, directory: str): """Create share with a file in given directory. If directory is None, file is in root dir.""" hook = AzureFileShareHook( azure_fileshare_conn_id=azure_fileshare_conn_id, share_name=share_name, directory_path=directory, file_path=file_name, ) hook.create_share(share_name) hook.create_directory() string_data = "".join(random.choices(string.ascii_letters, k=1024)) hook.load_data(string_data)
AzureSystemTest
python
numpy__numpy
benchmarks/benchmarks/bench_core.py
{ "start": 6242, "end": 6330 }
class ____(Benchmark): def time_indices(self): np.indices((1000, 500))
Indices
python
getsentry__sentry
tests/sentry/incidents/subscription_processor/test_subscription_processor_base.py
{ "start": 8110, "end": 10206 }
class ____(ProcessUpdateBaseClass): def test_uses_stored_last_update_value(self) -> None: stored_timestamp = timezone.now() + timedelta(minutes=10) store_detector_last_update(self.metric_detector, self.project.id, stored_timestamp) processor = SubscriptionProcessor(self.sub) old_update_message = self.build_subscription_update( self.sub, value=self.critical_threshold + 1, time_delta=timedelta(minutes=5) ) with ( self.feature(["organizations:incidents", "organizations:performance-view"]), self.capture_on_commit_callbacks(execute=True), ): result = processor.process_update(old_update_message) assert result is False def test_no_detector_returns_false_without_exception(self) -> None: with self.tasks(): snuba_query = create_snuba_query( query_type=SnubaQuery.Type.ERROR, dataset=Dataset.Events, query="", aggregate="count()", time_window=timedelta(minutes=1), resolution=timedelta(minutes=1), environment=self.environment, event_types=[ SnubaQueryEventType.EventType.ERROR, SnubaQueryEventType.EventType.DEFAULT, ], ) subscription_without_detector = create_snuba_subscription( project=self.project, subscription_type=INCIDENTS_SNUBA_SUBSCRIPTION_TYPE, snuba_query=snuba_query, ) processor = SubscriptionProcessor(subscription_without_detector) assert processor.detector is None message = self.build_subscription_update(subscription_without_detector, value=100) with ( self.feature(["organizations:incidents", "organizations:performance-view"]), self.capture_on_commit_callbacks(execute=True), ): result = processor.process_update(message) assert result is False
TestSubscriptionProcessorLastUpdate
python
tensorflow__tensorflow
tensorflow/tools/proto_splitter/split_graph_def.py
{ "start": 9922, "end": 10945 }
class ____(SplitBasedOnSize): """Splits the FunctionDef message type.""" def build_chunks(self) -> int: """Splits the proto, and returns the size of the chunks created.""" size_diff = 0 # First check if the entire FunctionDef can be split into a separate chunk. # We do this before the `RepeatedMessageSplitter`, which is costly because # it iterates through every `node_def`. if _GREEDY_SPLIT(self.proto_size) and not _ABOVE_MAX_SIZE(self.proto_size): size_diff += LargeMessageSplitter( self._proto, self.proto_size, parent_splitter=self, fields_in_parent=[], ).build_chunks() if _ABOVE_MAX_SIZE(self.proto_size): # Split FunctionDefLibrary.function.node_def size_diff += RepeatedMessageSplitter( self._proto, "node_def", [ConstantNodeDefSplitter, LargeMessageSplitter], parent_splitter=self, fields_in_parent=[], ).build_chunks() return size_diff
FunctionDefSplitter