language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
sympy__sympy
sympy/polys/polyoptions.py
{ "start": 7889, "end": 8660 }
class ____(Option, metaclass=OptionType): """``wrt`` option to polynomial manipulation functions. """ option = 'wrt' requires: list[str] = [] excludes: list[str] = [] _re_split = re.compile(r"\s*,\s*|\s+") @classmethod def preprocess(cls, wrt): if isinstance(wrt, Basic): return [str(wrt)] elif isinstance(wrt, str): wrt = wrt.strip() if wrt.endswith(','): raise OptionError('Bad input: missing parameter.') if not wrt: return [] return list(cls._re_split.split(wrt)) elif hasattr(wrt, '__getitem__'): return list(map(str, wrt)) else: raise OptionError("invalid argument for 'wrt' option")
Wrt
python
getsentry__sentry
src/sentry/preprod/api/endpoints/size_analysis/project_preprod_size_analysis_compare.py
{ "start": 1436, "end": 19354 }
class ____(PreprodArtifactEndpoint): owner = ApiOwner.EMERGE_TOOLS publish_status = { "GET": ApiPublishStatus.EXPERIMENTAL, "POST": ApiPublishStatus.EXPERIMENTAL, } permission_classes = (ProjectPreprodArtifactPermission,) def get( self, request: Request, project: Project, head_artifact_id: int, base_artifact_id: int, head_artifact: PreprodArtifact, base_artifact: PreprodArtifact, ) -> HttpResponseBase: """ Get size analysis comparison results for a preprod artifact ```````````````````````````````````````````````````` Get size analysis comparison results for a preprod artifact. :pparam string organization_id_or_slug: the id or slug of the organization the artifact belongs to. :pparam string project_id_or_slug: the id or slug of the project to retrieve the artifact from. :pparam string head_artifact_id: the ID of the head preprod artifact to get size analysis comparison for. :pparam string base_artifact_id: the ID of the base preprod artifact to get size analysis comparison for. :auth: required """ analytics.record( PreprodArtifactApiSizeAnalysisCompareGetEvent( organization_id=project.organization_id, project_id=project.id, user_id=request.user.id, head_artifact_id=str(head_artifact_id), base_artifact_id=str(base_artifact_id), ) ) if not features.has( "organizations:preprod-frontend-routes", project.organization, actor=request.user ): return Response({"error": "Feature not enabled"}, status=403) logger.info( "preprod.size_analysis.compare.api.get", extra={"head_artifact_id": head_artifact_id, "base_artifact_id": base_artifact_id}, ) if head_artifact.project.id != project.id: return Response({"error": "Project not found"}, status=404) head_size_metrics_qs = PreprodArtifactSizeMetrics.objects.filter( preprod_artifact_id__in=[head_artifact.id], preprod_artifact__project=project, ).select_related("preprod_artifact") head_size_metrics = list(head_size_metrics_qs) if len(head_size_metrics) == 0: return Response( {"detail": f"Head PreprodArtifact with id {head_artifact_id} has no size metrics."}, status=404, ) if base_artifact.project.id != project.id: return Response({"error": "Project not found"}, status=404) base_size_metrics_qs = PreprodArtifactSizeMetrics.objects.filter( preprod_artifact_id__in=[base_artifact.id], preprod_artifact__project=project, ).select_related("preprod_artifact") base_size_metrics = list(base_size_metrics_qs) if len(base_size_metrics) == 0: return Response( {"detail": f"Base PreprodArtifact with id {base_artifact_id} has no size metrics."}, status=404, ) head_metrics_map = build_size_metrics_map(head_size_metrics) base_metrics_map = build_size_metrics_map(base_size_metrics) comparisons: list[SizeAnalysisComparison] = [] for key, head_metric in head_metrics_map.items(): base_metric = base_metrics_map.get(key) if not base_metric: logger.info( "preprod.size_analysis.compare.api.get.no_matching_base_metric", extra={"head_metric_id": head_metric.id}, ) # No matching base metric, so we can't compare comparisons.append( SizeAnalysisComparison( head_size_metric_id=head_metric.id, base_size_metric_id=None, metrics_artifact_type=head_metric.metrics_artifact_type, identifier=head_metric.identifier, state=PreprodArtifactSizeComparison.State.FAILED, comparison_id=None, error_code="NO_BASE_METRIC", error_message="No matching base artifact size metric found.", ) ) continue logger.info( "preprod.size_analysis.compare.api.get.metrics", extra={"head_metric": head_metric, "base_metric": base_metric}, ) # Try to find a comparison object try: comparison_obj = PreprodArtifactSizeComparison.objects.get( head_size_analysis_id=head_metric.id, base_size_analysis_id=base_metric.id, ) except PreprodArtifactSizeComparison.DoesNotExist: logger.info( "preprod.size_analysis.compare.api.get.no_comparison_obj", extra={"head_metric_id": head_metric.id, "base_metric_id": base_metric.id}, ) continue logger.info( "preprod.size_analysis.compare.api.get.comparison_obj", extra={"comparison_obj": comparison_obj}, ) if comparison_obj.state == PreprodArtifactSizeComparison.State.SUCCESS: comparisons.append( SizeAnalysisComparison( head_size_metric_id=head_metric.id, base_size_metric_id=base_metric.id, metrics_artifact_type=head_metric.metrics_artifact_type, identifier=head_metric.identifier, state=PreprodArtifactSizeComparison.State.SUCCESS, comparison_id=comparison_obj.id, error_code=None, error_message=None, ) ) elif comparison_obj.state == PreprodArtifactSizeComparison.State.FAILED: comparisons.append( SizeAnalysisComparison( head_size_metric_id=head_metric.id, base_size_metric_id=base_metric.id, metrics_artifact_type=head_metric.metrics_artifact_type, identifier=head_metric.identifier, state=PreprodArtifactSizeComparison.State.FAILED, comparison_id=comparison_obj.id, error_code=( str(comparison_obj.error_code) if comparison_obj.error_code is not None else None ), error_message=comparison_obj.error_message, ) ) else: # Still processing or pending comparisons.append( SizeAnalysisComparison( head_size_metric_id=head_metric.id, base_size_metric_id=base_metric.id, metrics_artifact_type=head_metric.metrics_artifact_type, identifier=head_metric.identifier, state=PreprodArtifactSizeComparison.State.PROCESSING, comparison_id=comparison_obj.id, error_code=None, error_message=None, ) ) logger.info( "preprod.size_analysis.compare.api.get.success", extra={ "head_artifact_id": head_artifact_id, "base_artifact_id": base_artifact_id, "comparisons": len(comparisons), }, ) head_build_details = transform_preprod_artifact_to_build_details(head_artifact) base_build_details = transform_preprod_artifact_to_build_details(base_artifact) response = SizeAnalysisCompareGETResponse( head_build_details=head_build_details, base_build_details=base_build_details, comparisons=comparisons, ) return Response(response.dict()) def post( self, request: Request, project: Project, head_artifact_id: int, base_artifact_id: int, head_artifact: PreprodArtifact, base_artifact: PreprodArtifact, ) -> HttpResponseBase: """ Trigger size analysis comparison for a preprod artifact ```````````````````````````````````````````````````` Trigger size analysis comparison for a preprod artifact. Will run comparisons async for all size metrics. :pparam string organization_id_or_slug: the id or slug of the organization the artifact belongs to. :pparam string project_id_or_slug: the id or slug of the project to retrieve the artifact from. :pparam string head_artifact_id: the ID of the head preprod artifact to trigger size analysis comparison for. :pparam string base_artifact_id: the ID of the base preprod artifact to trigger size analysis comparison for. :auth: required """ analytics.record( PreprodArtifactApiSizeAnalysisComparePostEvent( organization_id=project.organization_id, project_id=project.id, user_id=request.user.id, head_artifact_id=str(head_artifact_id), base_artifact_id=str(base_artifact_id), ) ) if not features.has( "organizations:preprod-frontend-routes", project.organization, actor=request.user ): return Response({"error": "Feature not enabled"}, status=403) logger.info( "preprod.size_analysis.compare.api.post", extra={"head_artifact_id": head_artifact_id, "base_artifact_id": base_artifact_id}, ) if head_artifact.build_configuration != base_artifact.build_configuration: return Response( {"error": "Head and base build configurations must be the same."}, status=400 ) head_size_metrics_qs = PreprodArtifactSizeMetrics.objects.filter( preprod_artifact_id__in=[head_artifact.id], preprod_artifact__project=project, ).select_related("preprod_artifact") if head_size_metrics_qs.count() == 0: return Response( {"detail": f"Head PreprodArtifact with id {head_artifact_id} has no size metrics."}, status=404, ) # Check if any of the size metrics are not completed if ( head_size_metrics_qs.filter( state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED ).count() == 0 ): body = SizeAnalysisComparePOSTResponse( status="processing", message=f"Head PreprodArtifact with id {head_artifact_id} has no completed size metrics yet. Size analysis may still be processing. Please try again later.", ) return Response( body.dict(), status=202, # Accepted, processing not complete ) base_size_metrics_qs = PreprodArtifactSizeMetrics.objects.filter( preprod_artifact_id__in=[base_artifact.id], preprod_artifact__project=project, ).select_related("preprod_artifact") if base_size_metrics_qs.count() == 0: return Response( {"detail": f"Base PreprodArtifact with id {base_artifact_id} has no size metrics."}, status=404, ) if ( base_size_metrics_qs.filter( state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED ).count() == 0 ): body = SizeAnalysisComparePOSTResponse( status="processing", message=f"Base PreprodArtifact with id {base_artifact_id} has no completed size metrics yet. Size analysis may still be processing. Please try again later.", ) return Response( body.dict(), status=202, # Accepted, processing not complete ) head_size_metrics = list(head_size_metrics_qs) base_size_metrics = list(base_size_metrics_qs) # Check if the size metrics can be compared if not can_compare_size_metrics(head_size_metrics, base_size_metrics): return Response( {"detail": "Head and base size metrics cannot be compared."}, status=400, ) existing_comparisons = PreprodArtifactSizeComparison.objects.filter( head_size_analysis__in=head_size_metrics, base_size_analysis__in=base_size_metrics, ) if existing_comparisons.exists(): # Build SizeAnalysisComparison models for each existing comparison comparison_models = [] for comparison in existing_comparisons: comparison_models.append( SizeAnalysisComparison( head_size_metric_id=comparison.head_size_analysis.id, base_size_metric_id=comparison.base_size_analysis.id, metrics_artifact_type=comparison.head_size_analysis.metrics_artifact_type, identifier=comparison.head_size_analysis.identifier, state=comparison.state, comparison_id=( comparison.id if comparison.state == PreprodArtifactSizeComparison.State.SUCCESS else None ), error_code=( str(comparison.error_code) if comparison.state == PreprodArtifactSizeComparison.State.FAILED and comparison.error_code is not None else None ), error_message=( comparison.error_message if comparison.state == PreprodArtifactSizeComparison.State.FAILED else None ), ) ) body = SizeAnalysisComparePOSTResponse( status="exists", message="A comparison already exists for the head and base size metrics.", comparisons=comparison_models, ) return Response(body.dict(), status=200) logger.info( "preprod.size_analysis.compare.api.post.creating_pending_comparisons", extra={"head_artifact_id": head_artifact.id, "base_artifact_id": base_artifact.id}, ) # Create PENDING comparison records for each matching head/base metric pair head_metrics_map = build_size_metrics_map(head_size_metrics) base_metrics_map = build_size_metrics_map(base_size_metrics) created_comparisons = [] with transaction.atomic(router.db_for_write(PreprodArtifactSizeComparison)): for key, head_metric in head_metrics_map.items(): base_metric = base_metrics_map.get(key) if base_metric: comparison = PreprodArtifactSizeComparison.objects.create( head_size_analysis=head_metric, base_size_analysis=base_metric, organization_id=project.organization_id, state=PreprodArtifactSizeComparison.State.PENDING, ) comparison.save() created_comparisons.append( SizeAnalysisComparison( head_size_metric_id=head_metric.id, base_size_metric_id=base_metric.id, metrics_artifact_type=head_metric.metrics_artifact_type, identifier=head_metric.identifier, state=PreprodArtifactSizeComparison.State.PENDING, comparison_id=None, error_code=None, error_message=None, ) ) logger.info( "preprod.size_analysis.compare.api.post.running_comparison", extra={ "head_artifact_id": head_artifact.id, "base_artifact_id": base_artifact.id, "pending_comparisons_count": len(created_comparisons), }, ) manual_size_analysis_comparison.apply_async( kwargs={ "project_id": project.id, "org_id": project.organization_id, "head_artifact_id": head_artifact.id, "base_artifact_id": base_artifact.id, } ) logger.info( "preprod.size_analysis.compare.api.post.success", extra={ "head_artifact_id": head_artifact_id, "base_artifact_id": base_artifact_id, "created_comparisons_count": len(created_comparisons), }, ) body = SizeAnalysisComparePOSTResponse( status="created", message="Comparison records created and processing started.", comparisons=created_comparisons, ) return Response(body.dict(), status=200)
ProjectPreprodArtifactSizeAnalysisCompareEndpoint
python
ray-project__ray
python/ray/tests/test_runtime_env_packaging.py
{ "start": 22853, "end": 24672 }
class ____: """Test ABFSS protocol implementation.""" def test_abfss_protocol_handler_with_invalid_uris(self, tmp_path): """Test that ABFSS protocol handler raises ValueError for invalid URIs.""" import unittest.mock as mock invalid_uris = [ "abfss://@account.dfs.core.windows.net/file.zip", # Empty container name "abfss://container@.dfs.core.windows.net/file.zip", # Empty account name "abfss://container@account.blob.core.windows.net/file.zip", # Wrong endpoint "abfss://container@account.core.windows.net/file.zip", # Missing .dfs "abfss://account.dfs.core.windows.net/file.zip", # Missing container@ "abfss://container", # Missing @ and hostname "abfss://", # Empty netloc ] dest_file = tmp_path / "test_download.zip" # Mock adlfs and azure.identity modules in sys.modules to avoid import errors in CI import sys mock_adlfs_module = mock.MagicMock() mock_azure_identity_module = mock.MagicMock() with mock.patch.dict( sys.modules, { "adlfs": mock_adlfs_module, "azure": mock.MagicMock(), "azure.identity": mock_azure_identity_module, }, ): # Setup the mocks (though they won't be called due to validation failures) mock_filesystem = mock.Mock() mock_adlfs_module.AzureBlobFileSystem.return_value = mock_filesystem mock_filesystem.open.return_value = mock.Mock() for invalid_uri in invalid_uris: with pytest.raises(ValueError, match="Invalid ABFSS URI format"): Protocol.ABFSS.download_remote_uri(invalid_uri, str(dest_file))
TestAbfssProtocol
python
pandas-dev__pandas
asv_bench/benchmarks/ctors.py
{ "start": 718, "end": 1860 }
class ____: param_names = ["data_fmt", "with_index", "dtype"] params = [ [ no_change, list, list_of_str, gen_of_str, arr_dict, list_of_tuples, gen_of_tuples, list_of_lists, list_of_tuples_with_none, list_of_lists_with_none, ], [False, True], ["float", "int"], ] # Generators get exhausted on use, so run setup before every call number = 1 repeat = (3, 250, 10) def setup(self, data_fmt, with_index, dtype): if data_fmt in (gen_of_str, gen_of_tuples) and with_index: raise NotImplementedError( "Series constructors do not support using generators with indexes" ) N = 10**4 if dtype == "float": arr = np.random.randn(N) else: arr = np.arange(N) self.data = data_fmt(arr) self.index = np.arange(N) if with_index else None def time_series_constructor(self, data_fmt, with_index, dtype): Series(self.data, index=self.index)
SeriesConstructors
python
scipy__scipy
scipy/stats/tests/test_stats.py
{ "start": 395926, "end": 400816 }
class ____: @pytest.mark.parametrize('axis', [None, 1, -1, (-2, 2)]) @pytest.mark.parametrize('keepdims', [False, True]) @pytest.mark.parametrize('correction', [0, 1]) @pytest.mark.parametrize('nan_policy', ['propagate', 'omit']) def test_xp_var_basic(self, xp, axis, keepdims, correction, nan_policy): rng = np.random.default_rng(90359458245906) x = rng.random((3, 4, 5)) var_ref = np.var if nan_policy == 'omit': nan_mask = rng.random(size=x.shape) > 0.5 x[nan_mask] = np.nan var_ref = np.nanvar x_xp = xp.asarray(x) res = _xp_var(x_xp, axis=axis, keepdims=keepdims, correction=correction, nan_policy=nan_policy) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "Degrees of freedom <= 0 for slice", RuntimeWarning) ref = var_ref(x, axis=axis, keepdims=keepdims, ddof=correction) xp_assert_close(res, xp.asarray(ref)) def test_special_cases(self, xp): # correction too big res = _xp_var(xp.asarray([1., 2.]), correction=3) xp_assert_close(res, xp.asarray(xp.nan)) def test_nan_policy(self, xp): x = xp.arange(10.) mask = (x == 3) x = xp.where(mask, xp.nan, x) # `nan_policy='propagate'` is the default, and the result is NaN res1 = _xp_var(x) res2 = _xp_var(x, nan_policy='propagate') ref = xp.asarray(xp.nan) xp_assert_equal(res1, ref) xp_assert_equal(res2, ref) # `nan_policy='omit'` omits NaNs in `x` res = _xp_var(x, nan_policy='omit') ref = xp.var(x[~mask]) xp_assert_close(res, ref) @skip_xp_backends(eager_only=True) def test_nan_policy_warns(self, xp): x = xp.arange(10.) x = xp.where(x == 3, xp.nan, x) # Check for warning if omitting NaNs causes empty slice message = 'After omitting NaNs...' with pytest.warns(RuntimeWarning, match=message): res = _xp_var(x * np.nan, nan_policy='omit') ref = xp.asarray(xp.nan) xp_assert_equal(res, ref) @skip_xp_backends(eager_only=True) def test_nan_policy_raise(self, xp): # nan_policy='raise' raises an error when NaNs are present message = 'The input contains nan values' with pytest.raises(ValueError, match=message): _xp_var(xp.asarray([1, 2, xp.nan]), nan_policy='raise') def test_empty(self, xp): message = 'One or more sample arguments is too small...' with pytest.warns(SmallSampleWarning, match=message): res = _xp_var(xp.asarray([])) ref = xp.asarray(xp.nan) xp_assert_equal(res, ref) message = "All axis-slices of one or more sample arguments..." with pytest.warns(SmallSampleWarning, match=message): res = _xp_var(xp.asarray([[]]), axis=1) ref = xp.asarray([xp.nan]) xp_assert_equal(res, ref) res = _xp_var(xp.asarray([[]]), axis=0) ref = xp.asarray([]) xp_assert_equal(res, ref) @pytest.mark.filterwarnings( "ignore:overflow encountered in reduce:RuntimeWarning" ) # Overflow occurs for float32 input def test_dtype(self, xp): max = xp.finfo(xp.float32).max x_np = np.asarray([max, max/2], dtype=np.float32) x_xp = xp.asarray(x_np) res = _xp_var(x_xp) ref = np.var(x_np) np.testing.assert_equal(ref, np.inf) xp_assert_close(res, xp.asarray(ref)) # correct result is returned if `float64` is used res = _xp_var(x_xp, dtype=xp.float64) ref = xp.asarray(np.var(np.asarray(x_np, dtype=np.float64))) xp_assert_close(res, ref) def test_integer(self, xp): # integer inputs are converted to the appropriate float x = xp.arange(10) y = xp.arange(10.) xp_assert_equal(_xp_var(x), _xp_var(y)) def test_complex_gh22404(self, xp): rng = np.random.default_rng(90359458245906) x, y = rng.random((2, 20)) res = _xp_var(xp.asarray(x + y*1j)) ref = np.var(x + y*1j) xp_assert_close(res, xp.asarray(ref), check_dtype=False) def test_chk_asarray(xp): rng = np.random.default_rng(2348923425434) x0 = rng.random(size=(2, 3, 4)) x = xp.asarray(x0) axis = 1 x_out, axis_out = _chk_asarray(x, axis=axis, xp=xp) xp_assert_equal(x_out, xp.asarray(x0)) assert_equal(axis_out, axis) axis = None x_out, axis_out = _chk_asarray(x, axis=axis, xp=xp) xp_assert_equal(x_out, xp.asarray(x0.ravel())) assert_equal(axis_out, 0) axis = 2 x_out, axis_out = _chk_asarray(x[0, 0, 0], axis=axis, xp=xp) xp_assert_equal(x_out, xp.asarray(np.atleast_1d(x0[0, 0, 0]))) assert_equal(axis_out, axis)
TestXP_Var
python
Textualize__textual
tests/snapshot_tests/snapshot_apps/scroll_visible_margin.py
{ "start": 137, "end": 686 }
class ____(App): CSS = """ Container { height: auto; margin-top: 8; border: solid red; } """ def compose(self) -> ComposeResult: with VerticalScroll(): with Container(): for index in range(1, 51): yield Button(f"Hello, world! ({index})", id=f"b{index}") def key_x(self): button_twenty = self.query_one("#b26") button_twenty.scroll_visible() app = ScrollVisibleMargin() if __name__ == "__main__": app.run()
ScrollVisibleMargin
python
tensorflow__tensorflow
tensorflow/python/ops/numpy_ops/np_array_ops.py
{ "start": 53964, "end": 67733 }
class ____(enum.Enum): UPDATE = 0 ADD = 1 MIN = 2 MAX = 3 def _slice_helper(tensor, slice_spec, update_method=None, updates=None): """Helper function for __getitem__ and _with_index_update_helper. This function collects the indices in `slice_spec` into two buckets, which we can call "idx1" and "idx2" here. idx1 is intended for `strided_slice`, idx2 `gather`. They also correspond to "basic indices" and "advanced indices" in numpy. This function supports both reading and writing at the indices. The reading path can be summarized as `gather(stride_slice(tensor, idx1), idx2)`. The writing path can be summarized as `strided_slice_update(tensor, idx1, scatter(strided_slice(tensor, idx1), idx2, updates))`. (`gather` here means `tf.gather` or `tf.gather_nd`; `scatter` here means `tf.tensor_scatter_update`.) The writing path is inefficient because it needs to first read out a portion (probably much larger than `updates`) of `tensor` using `strided_slice`, update it, and then write the portion back. An alternative approach is to only use `scatter`, which amounts to using the indexing mechanism of gather/scatter to implement strided_slice/strided_slice_update. This is feasible for XLA Gather/Scatter because they support spans (e.g. `2:5`) in indices (as begin/end pairs), but not TF gather/scatter because they don't support spans (except those that cover entire dimensions, i.e. `:`). If we materialize spans into individual indices, the size of the index tensor would explode. (Note that XLA Gather/Scatter have a similar problem for stride > 1 because they don't support strides. Indices such as `1:2:8` will need to be materialized into individual indices such as [1, 3, 5, 7].) Args: tensor: the tensor to be read from or write into. slice_spec: the indices. update_method: (optional) a member of `_UpdateMethod`, indicating how to update the values (replacement, add, etc.). `None` indicates just reading. updates: (optional) the new values to write into `tensor`. It must have the same dtype as `tensor`. Returns: The result of reading (if `update_method` is `None`) or the updated `tensor` after writing. """ begin, end, strides = [], [], [] new_axis_mask, shrink_axis_mask = 0, 0 begin_mask, end_mask = 0, 0 ellipsis_mask = 0 advanced_indices = [] shrink_indices = [] for index, s in enumerate(slice_spec): if isinstance(s, slice): if s.start is not None: begin.append(_as_index(s.start)[0]) else: begin.append(0) begin_mask |= 1 << index if s.stop is not None: end.append(_as_index(s.stop)[0]) else: end.append(0) end_mask |= 1 << index if s.step is not None: strides.append(_as_index(s.step)[0]) else: strides.append(1) elif s is Ellipsis: begin.append(0) end.append(0) strides.append(1) ellipsis_mask |= 1 << index elif s is array_ops.newaxis: begin.append(0) end.append(0) strides.append(1) new_axis_mask |= 1 << index else: s, is_scalar = _as_index(s, False) if is_scalar: begin.append(s) end.append(s + 1) strides.append(1) shrink_axis_mask |= 1 << index shrink_indices.append(index) else: begin.append(0) end.append(0) strides.append(1) begin_mask |= 1 << index end_mask |= 1 << index advanced_indices.append((index, s, ellipsis_mask != 0)) # stack possibly involves no tensors, so we must use op_scope correct graph. with ops.name_scope( None, 'strided_slice', [tensor] + begin + end + strides, skip_on_eager=False, ) as name: if begin: packed_begin, packed_end, packed_strides = ( array_ops_stack.stack(begin), array_ops_stack.stack(end), array_ops_stack.stack(strides), ) if ( packed_begin.dtype == dtypes.int64 or packed_end.dtype == dtypes.int64 or packed_strides.dtype == dtypes.int64 ): if packed_begin.dtype != dtypes.int64: packed_begin = math_ops.cast(packed_begin, dtypes.int64) if packed_end.dtype != dtypes.int64: packed_end = math_ops.cast(packed_end, dtypes.int64) if packed_strides.dtype != dtypes.int64: packed_strides = math_ops.cast(packed_strides, dtypes.int64) else: var_empty = constant_op.constant([], dtype=dtypes.int32) packed_begin = packed_end = packed_strides = var_empty if update_method == _UpdateMethod.UPDATE and not advanced_indices: return array_ops.tensor_strided_slice_update( tensor, packed_begin, packed_end, packed_strides, updates, begin_mask=begin_mask, end_mask=end_mask, shrink_axis_mask=shrink_axis_mask, new_axis_mask=new_axis_mask, ellipsis_mask=ellipsis_mask, name=name, ) else: # TODO(b/164251540): Find a better way to support update that does not # involve one read + two writes. if updates is not None: original_tensor = tensor # TODO(agarwal): set_shape on tensor to set rank. tensor = array_ops.strided_slice( tensor, packed_begin, packed_end, packed_strides, begin_mask=begin_mask, end_mask=end_mask, shrink_axis_mask=shrink_axis_mask, new_axis_mask=new_axis_mask, ellipsis_mask=ellipsis_mask, name=name, ) if not advanced_indices: if update_method is None: return tensor assert update_method != _UpdateMethod.UPDATE # TF lacks TensorStridedSliceAdd and alike, so we need to do # read+add+update. if update_method == _UpdateMethod.ADD: update_op = math_ops.add elif update_method == _UpdateMethod.MIN: update_op = math_ops.minimum elif update_method == _UpdateMethod.MAX: update_op = math_ops.maximum return array_ops.tensor_strided_slice_update( original_tensor, packed_begin, packed_end, packed_strides, update_op(tensor, updates), begin_mask=begin_mask, end_mask=end_mask, shrink_axis_mask=shrink_axis_mask, new_axis_mask=new_axis_mask, ellipsis_mask=ellipsis_mask, name=name + '_2', ) advanced_indices_map = {} for index, data, had_ellipsis in advanced_indices: if had_ellipsis: num_shrink = len([x for x in shrink_indices if x > index]) dim = index - len(slice_spec) + num_shrink else: num_shrink = len([x for x in shrink_indices if x < index]) dim = index - num_shrink advanced_indices_map[dim] = data dims = sorted(advanced_indices_map.keys()) dims_contiguous = True if len(dims) > 1: if dims[0] < 0 and dims[-1] >= 0: # not all same sign dims_contiguous = False else: for i in range(len(dims) - 1): if dims[i] + 1 != dims[i + 1]: dims_contiguous = False break indices = [advanced_indices_map[x] for x in dims] indices = _promote_dtype(*indices) indices = np_utils.tf_broadcast(*indices) stacked_indices = array_ops_stack.stack(indices, axis=-1) # Skip the contiguous-dims optimization for update because there is no # tf.*scatter* op that supports the `axis` argument. if not dims_contiguous or updates is not None: if range(len(dims)) != dims: tensor = moveaxis(tensor, dims, range(len(dims))) tensor_shape_prefix = array_ops.shape( tensor, out_type=stacked_indices.dtype )[: len(dims)] stacked_indices = array_ops.where_v2( stacked_indices < 0, stacked_indices + tensor_shape_prefix, stacked_indices, ) if updates is None: return array_ops.gather_nd(tensor, stacked_indices) else: # We only need to move-axis `updates` in the contiguous case becausce # only in this case the result dimensions of advanced indexing are in # the middle of `updates`. In the non-contiguous case, those dimensions # are always at the front. if dims_contiguous: # TODO(wangpeng): Support unknown rank (e.g. by partially flattening # `updates`) if stacked_indices.shape.rank is None: raise NotImplementedError( 'Rank of the advanced indices must currently be known' ) batch_size = stacked_indices.shape.rank - 1 batch_start = dims[0] if batch_start < 0: batch_start += len(dims) - batch_size def range_(start, length): return range(start, start + length) updates = moveaxis( updates, range_(batch_start, batch_size), range(batch_size) ) if update_method == _UpdateMethod.UPDATE: update_op = array_ops.tensor_scatter_update elif update_method == _UpdateMethod.ADD: update_op = array_ops.tensor_scatter_add elif update_method == _UpdateMethod.MIN: update_op = array_ops.tensor_scatter_min elif update_method == _UpdateMethod.MAX: update_op = array_ops.tensor_scatter_max tensor = update_op(tensor, stacked_indices, updates) if range(len(dims)) != dims: tensor = moveaxis(tensor, range(len(dims)), dims) return array_ops.tensor_strided_slice_update( original_tensor, packed_begin, packed_end, packed_strides, tensor, begin_mask=begin_mask, end_mask=end_mask, shrink_axis_mask=shrink_axis_mask, new_axis_mask=new_axis_mask, ellipsis_mask=ellipsis_mask, name=name + '_2', ) # Note that gather_nd does not support gathering from inside the array. # To avoid shuffling data back and forth, we transform the indices and # do a gather instead. rank = np_utils._maybe_static(array_ops.rank(tensor)) # pylint: disable=protected-access dims = [(x + rank if x < 0 else x) for x in dims] shape_tensor = array_ops.shape(tensor) dim_sizes = array_ops.gather(shape_tensor, dims) if len(dims) == 1: stacked_indices = indices[0] stacked_indices = math_ops.cast(stacked_indices, dtypes.int32) stacked_indices = array_ops.where_v2( stacked_indices < 0, stacked_indices + dim_sizes, stacked_indices ) axis = dims[0] if len(dims) > 1: index_scaling = math_ops.cumprod(dim_sizes, reverse=True, exclusive=True) def _tensordot(a, b): # TODO(b/168657656): This function should be replaced by # tensordot(axis=1) once MatMul has int32 XLA kernel. b = array_ops.broadcast_to(b, array_ops.shape(a)) return math_ops.reduce_sum(a * b, axis=-1) stacked_indices = _tensordot(stacked_indices, index_scaling) flat_shape = array_ops.concat( [shape_tensor[:axis], [-1], shape_tensor[axis + len(dims) :]], axis=0 ) tensor = array_ops.reshape(tensor, flat_shape) return array_ops.gather(tensor, stacked_indices, axis=axis) def _as_spec_tuple(slice_spec): """Convert slice_spec to tuple.""" if isinstance(slice_spec, (list, tuple)) and not isinstance( slice_spec, np.ndarray ): is_index = True for s in slice_spec: if s is None or s is Ellipsis or isinstance(s, (list, tuple, slice)): is_index = False break elif isinstance(s, (np_arrays.ndarray, np.ndarray)) and s.ndim != 0: is_index = False break if not is_index: return tuple(slice_spec) return (slice_spec,) def _getitem(self, slice_spec): """Implementation of ndarray.__getitem__.""" if ( isinstance(slice_spec, bool) or ( isinstance(slice_spec, core_tf_types.Tensor) and slice_spec.dtype == dtypes.bool ) or ( isinstance(slice_spec, (np.ndarray, np_arrays.ndarray)) and slice_spec.dtype == np.bool_ ) ): return array_ops.boolean_mask(tensor=self, mask=slice_spec) if not isinstance(slice_spec, tuple): slice_spec = _as_spec_tuple(slice_spec) result_t = _slice_helper(self, slice_spec) return result_t def _with_index_update_helper(update_method, a, slice_spec, updates): """Implementation of ndarray._with_index_*.""" if ( isinstance(slice_spec, bool) or ( isinstance(slice_spec, core_tf_types.Tensor) and slice_spec.dtype == dtypes.bool ) or ( isinstance(slice_spec, (np.ndarray, np_arrays.ndarray)) and slice_spec.dtype == np.bool_ ) ): slice_spec = nonzero(slice_spec) if not isinstance(slice_spec, tuple): slice_spec = _as_spec_tuple(slice_spec) a_dtype = a.dtype a, updates = _promote_dtype_binary(a, updates) result_t = _slice_helper(a, slice_spec, update_method, updates) return result_t.astype(a_dtype) setattr(np_arrays.ndarray, '_numpy_style_getitem', _getitem) setattr( np_arrays.ndarray, '_with_index_update', functools.partial(_with_index_update_helper, _UpdateMethod.UPDATE), ) setattr( np_arrays.ndarray, '_with_index_add', functools.partial(_with_index_update_helper, _UpdateMethod.ADD), ) setattr( np_arrays.ndarray, '_with_index_min', functools.partial(_with_index_update_helper, _UpdateMethod.MIN), ) setattr( np_arrays.ndarray, '_with_index_max', functools.partial(_with_index_update_helper, _UpdateMethod.MAX), )
_UpdateMethod
python
getsentry__sentry-python
sentry_sdk/integrations/opentelemetry/propagator.py
{ "start": 812, "end": 3720 }
class ____(TextMapPropagator): """ Propagates tracing headers for Sentry's tracing system in a way OTel understands. """ def extract(self, carrier, context=None, getter=default_getter): # type: (CarrierT, Optional[Context], Getter[CarrierT]) -> Context if context is None: context = get_current() sentry_trace = getter.get(carrier, SENTRY_TRACE_HEADER_NAME) if not sentry_trace: return context sentrytrace = extract_sentrytrace_data(sentry_trace[0]) if not sentrytrace: return context context = set_value(SENTRY_TRACE_KEY, sentrytrace, context) trace_id, span_id = sentrytrace["trace_id"], sentrytrace["parent_span_id"] span_context = SpanContext( trace_id=int(trace_id, 16), # type: ignore span_id=int(span_id, 16), # type: ignore # we simulate a sampled trace on the otel side and leave the sampling to sentry trace_flags=TraceFlags(TraceFlags.SAMPLED), is_remote=True, ) baggage_header = getter.get(carrier, BAGGAGE_HEADER_NAME) if baggage_header: baggage = Baggage.from_incoming_header(baggage_header[0]) else: # If there's an incoming sentry-trace but no incoming baggage header, # for instance in traces coming from older SDKs, # baggage will be empty and frozen and won't be populated as head SDK. baggage = Baggage(sentry_items={}) baggage.freeze() context = set_value(SENTRY_BAGGAGE_KEY, baggage, context) span = NonRecordingSpan(span_context) modified_context = trace.set_span_in_context(span, context) return modified_context def inject(self, carrier, context=None, setter=default_setter): # type: (CarrierT, Optional[Context], Setter[CarrierT]) -> None if context is None: context = get_current() current_span = trace.get_current_span(context) current_span_context = current_span.get_span_context() if not current_span_context.is_valid: return span_id = trace.format_span_id(current_span_context.span_id) span_map = SentrySpanProcessor().otel_span_map sentry_span = span_map.get(span_id, None) if not sentry_span: return setter.set(carrier, SENTRY_TRACE_HEADER_NAME, sentry_span.to_traceparent()) if sentry_span.containing_transaction: baggage = sentry_span.containing_transaction.get_baggage() if baggage: baggage_data = baggage.serialize() if baggage_data: setter.set(carrier, BAGGAGE_HEADER_NAME, baggage_data) @property def fields(self): # type: () -> Set[str] return {SENTRY_TRACE_HEADER_NAME, BAGGAGE_HEADER_NAME}
SentryPropagator
python
python__mypy
mypyc/test-data/fixtures/ir.py
{ "start": 1640, "end": 1799 }
class ____: def __init__(self, o: object) -> None: ... def __or__(self, o: object) -> Any: ... __name__ : str __annotations__: Dict[str, Any]
type
python
joke2k__faker
tests/providers/test_ssn.py
{ "start": 39669, "end": 40066 }
class ____(unittest.TestCase): def setUp(self): self.fake = Faker("th_TH") Faker.seed(0) def test_ssn(self): for _ in range(100): assert re.search(r"^[1-8]-[1-9]\d{3}-\d{5}-\d{2}-\d$", self.fake.ssn()) def test_vat_id(self): for _ in range(100): assert re.search(r"^[1-8]-[1-9]\d{3}-\d{5}-\d{2}-\d$", self.fake.vat_id())
TestThTH
python
scipy__scipy
scipy/special/tests/test_ndtr.py
{ "start": 209, "end": 475 }
class ____: def test_zero(self): assert sc.ndtri(0.5) == 0.0 def test_asymptotes(self): assert_equal(sc.ndtri([0.0, 1.0]), [-np.inf, np.inf]) def test_outside_of_domain(self): assert all(np.isnan(sc.ndtri([-1.5, 1.5])))
TestNdtri
python
dagster-io__dagster
examples/docs_snippets/docs_snippets_tests/snippet_checks/guides/dg/test_using_env.py
{ "start": 2689, "end": 20494 }
class ____(Enum): LOCAL = "localDeploymentScope" BRANCH = "allBranchDeploymentsScope" FULL = "fullDeploymentScope" def mock_gql_for_list_env( location_name: str, secrets: dict[str, set[EnvVarScope]], ) -> None: scope_vars_by_name = { name: { "fullDeploymentScope": False, "allBranchDeploymentsScope": False, "localDeploymentScope": False, **{scope.value: True for scope in scopes}, } for name, scopes in secrets.items() } mock_gql_mutation( gql.GET_SECRETS_FOR_SCOPES_QUERY_NO_VALUE, json_data={ "data": { "secretsOrError": { "secrets": [ { "secretName": name, "locationNames": [location_name], **scope_vars, } for name, scope_vars in scope_vars_by_name.items() ] } } }, expected_variables={ "locationName": location_name, "scopes": { "fullDeploymentScope": True, "allBranchDeploymentsScope": True, "localDeploymentScope": True, }, }, ) def mock_gql_for_pull_env( location_name: str, secrets: dict[str, set[EnvVarScope]], ) -> None: scope_vars_by_name = { name: { "fullDeploymentScope": False, "allBranchDeploymentsScope": False, "localDeploymentScope": False, **{scope.value: True for scope in scopes}, } for name, scopes in secrets.items() } mock_gql_mutation( gql.SECRETS_QUERY, json_data={ "data": { "secretsOrError": { "secrets": [ { "secretName": name, "locationNames": [location_name], "secretValue": "...", **scope_vars, } for name, scope_vars in scope_vars_by_name.items() ] } } }, expected_variables={ "onlyViewable": True, "scopes": { "localDeploymentScope": True, }, }, ) def mock_gql_for_create_env( location_name: str, secret_name: str, secret_value: str, scopes: set[EnvVarScope] ) -> None: scope_vars = { "fullDeploymentScope": False, "allBranchDeploymentsScope": False, "localDeploymentScope": False, **{scope.value: True for scope in scopes}, } mock_gql_mutation( gql.GET_SECRETS_FOR_SCOPES_QUERY, json_data={"data": {"secretsOrError": {"secrets": []}}}, expected_variables={ "locationName": location_name, "scopes": scope_vars, "secretName": secret_name, }, ) mock_gql_mutation( gql.CREATE_OR_UPDATE_SECRET_FOR_SCOPES_MUTATION, json_data={ "data": { "createOrUpdateSecretForScopes": { "secret": { "secretName": secret_name, "locationNames": [location_name], **scope_vars, } } } }, expected_variables={ "locationName": location_name, "scopes": scope_vars, "secretName": secret_name, "secretValue": secret_value, }, ) @responses.activate def test_component_docs_using_env( update_snippets: bool, mock_graphql_server: str ) -> None: with isolated_snippet_generation_environment( should_update_snippets=update_snippets, snapshot_base_dir=SNIPPETS_DIR, global_snippet_replace_regexes=[ MASK_EDITABLE_DAGSTER, MASK_INGESTION, _MASK_EMPTY_WARNINGS, MASK_VENV, ], ) as context: with ExitStack() as stack: context.run_command_and_snippet_output( cmd="create-dagster project ingestion --use-editable-dagster", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-dg-init.txt", snippet_replace_regex=[ (r"Using CPython.*?(?:\n(?!\n).*)*\n\n", "...venv creation...\n"), # Kind of a hack, this appears after you enter "y" at the prompt, but when # we simulate the input we don't get the newline we get in terminal so we # slide it in here. (r"Running `uv sync`\.\.\.", "\nRunning `uv sync`..."), ("create-dagster", "uvx create-dagster@latest"), ], input_str="y\n", ignore_output=True, ) context.run_command_and_snippet_output( cmd="cd ingestion && source .venv/bin/activate", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-activate-venv.txt", ignore_output=True, ) # Activate the virtual environment after creating it-- executing the above `source # .venv/bin/activate` command does not actually activate the virtual environment # across subsequent command invocations in this test. stack.enter_context(activate_venv(".venv")) context.run_command_and_snippet_output( cmd=f"uv add --editable '{EDITABLE_DIR / 'dagster-sling'!s}'", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-uv-add-sling.txt", ignore_output=True, print_cmd="uv add dagster-sling", ) context.run_command_and_snippet_output( cmd="dg list components", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-dg-list-components.txt", ) # Scaffold dbt project components context.run_command_and_snippet_output( cmd="dg scaffold defs dagster_sling.SlingReplicationCollectionComponent ingest_to_snowflake", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-dg-scaffold-sling.txt", ) context.run_command_and_snippet_output( cmd=textwrap.dedent(""" curl -O https://raw.githubusercontent.com/dbt-labs/jaffle-shop-classic/refs/heads/main/seeds/raw_customers.csv """).strip(), snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-curl.txt", ignore_output=True, ) context.create_file( file_path=Path("src") / "ingestion" / "defs" / "ingest_files" / "replication.yaml", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-replication.yaml", contents=textwrap.dedent( """ source: LOCAL target: SNOWFLAKE defaults: mode: full-refresh object: "{stream_table}" streams: file://raw_customers.csv: object: "sandbox.raw_customers" """, ).strip(), ) # Add Snowflake connection context.create_file( file_path=Path("src") / "ingestion" / "defs" / "ingest_files" / "defs.yaml", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-defs.yaml", contents=format_multiline(""" type: dagster_sling.SlingReplicationCollectionComponent attributes: connections: SNOWFLAKE: type: snowflake account: "{{ env.SNOWFLAKE_ACCOUNT }}" user: "{{ env.SNOWFLAKE_USER }}" password: "{{ env.SNOWFLAKE_PASSWORD }}" database: "{{ env.SNOWFLAKE_DATABASE }}" replications: - path: replication.yaml """), ) context.run_command_and_snippet_output( cmd="dg check yaml --validate-requirements", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-dg-component-check.txt", snippet_replace_regex=[ MASK_INGESTION, ], expect_error=True, ) # Add Snowflake connection context.create_file( file_path=Path("src") / "ingestion" / "defs" / "ingest_files" / "defs.yaml", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-component-with-env-deps.yaml", contents=format_multiline(""" type: dagster_sling.SlingReplicationCollectionComponent attributes: connections: SNOWFLAKE: type: snowflake account: "{{ env.SNOWFLAKE_ACCOUNT }}" user: "{{ env.SNOWFLAKE_USER }}" password: "{{ env.SNOWFLAKE_PASSWORD }}" database: "{{ env.SNOWFLAKE_DATABASE }}" replications: - path: replication.yaml requirements: env: - SNOWFLAKE_ACCOUNT - SNOWFLAKE_USER - SNOWFLAKE_PASSWORD - SNOWFLAKE_DATABASE """), ) context.run_command_and_snippet_output( cmd="dg check yaml", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-dg-component-check-fixed.txt", ) context.run_command_and_snippet_output( cmd="dg list env", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-dg-list-env.txt", snippet_replace_regex=[MASK_INGESTION, REMOVE_EXCESS_DESCRIPTION_ROW], ) context.run_command_and_snippet_output( cmd=textwrap.dedent(""" echo 'SNOWFLAKE_ACCOUNT=...' >> .env echo 'SNOWFLAKE_USER=...' >> .env echo 'SNOWFLAKE_PASSWORD=...' >> .env echo "SNOWFLAKE_DATABASE=sandbox" >> .env """).strip(), snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-inject-env.txt", ) context.run_command_and_snippet_output( cmd="dg list env", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-dg-list-env.txt", snippet_replace_regex=[MASK_INGESTION, REMOVE_EXCESS_DESCRIPTION_ROW], ) Path(os.environ["DG_CLI_CONFIG"]).write_text( f""" [cli.telemetry] enabled = false [cli.plus] organization = "hooli" url = "{mock_graphql_server}" user_token = "test" default_deployment = "prod" """ ) mock_gql_for_list_env( location_name="ingestion", secrets={}, ) context.run_command_and_snippet_output( cmd="dg list env", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-dg-env-list.txt", ) mock_gql_for_create_env( location_name="ingestion", secret_name="SNOWFLAKE_ACCOUNT", secret_value="...", scopes={EnvVarScope.LOCAL}, ) mock_gql_for_create_env( location_name="ingestion", secret_name="SNOWFLAKE_USER", secret_value="...", scopes={EnvVarScope.LOCAL}, ) mock_gql_for_create_env( location_name="ingestion", secret_name="SNOWFLAKE_PASSWORD", secret_value="...", scopes={EnvVarScope.LOCAL}, ) mock_gql_for_create_env( location_name="ingestion", secret_name="SNOWFLAKE_DATABASE", secret_value="sandbox", scopes={EnvVarScope.LOCAL}, ) context.run_command_and_snippet_output( cmd=textwrap.dedent(""" dg plus create env SNOWFLAKE_ACCOUNT --from-local-env --scope local && dg plus create env SNOWFLAKE_USER --from-local-env --scope local && dg plus create env SNOWFLAKE_PASSWORD --from-local-env --scope local && dg plus create env SNOWFLAKE_DATABASE --from-local-env --scope local """).strip(), snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-dg-plus-env-add.txt", ) mock_gql_for_list_env( location_name="ingestion", secrets={ "SNOWFLAKE_USER": {EnvVarScope.LOCAL}, "SNOWFLAKE_PASSWORD": {EnvVarScope.LOCAL}, "SNOWFLAKE_DATABASE": {EnvVarScope.LOCAL}, "SNOWFLAKE_ACCOUNT": {EnvVarScope.LOCAL}, }, ) context.run_command_and_snippet_output( cmd="dg list env", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-dg-env-list.txt", ) mock_gql_for_pull_env( location_name="ingestion", secrets={ "SNOWFLAKE_USER": {EnvVarScope.LOCAL}, "SNOWFLAKE_PASSWORD": {EnvVarScope.LOCAL}, "SNOWFLAKE_DATABASE": {EnvVarScope.LOCAL}, "SNOWFLAKE_ACCOUNT": {EnvVarScope.LOCAL}, }, ) context.run_command_and_snippet_output( cmd="dg plus pull env", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-dg-env-pull.txt", ) mock_gql_for_create_env( location_name="ingestion", secret_name="SNOWFLAKE_ACCOUNT", secret_value="...", scopes={EnvVarScope.BRANCH, EnvVarScope.FULL}, ) mock_gql_for_create_env( location_name="ingestion", secret_name="SNOWFLAKE_USER", secret_value="...", scopes={EnvVarScope.BRANCH, EnvVarScope.FULL}, ) mock_gql_for_create_env( location_name="ingestion", secret_name="SNOWFLAKE_PASSWORD", secret_value="...", scopes={EnvVarScope.BRANCH, EnvVarScope.FULL}, ) mock_gql_for_create_env( location_name="ingestion", secret_name="SNOWFLAKE_DATABASE", secret_value="production", scopes={EnvVarScope.BRANCH, EnvVarScope.FULL}, ) context.run_command_and_snippet_output( cmd=textwrap.dedent(""" dg plus create env SNOWFLAKE_ACCOUNT ... --scope branch --scope full && dg plus create env SNOWFLAKE_USER ... --scope branch --scope full && dg plus create env SNOWFLAKE_PASSWORD ... --scope branch --scope full && dg plus create env SNOWFLAKE_DATABASE production --scope branch --scope full """).strip(), snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-dg-plus-env-add.txt", ) mock_gql_for_list_env( location_name="ingestion", secrets={ "SNOWFLAKE_USER": { EnvVarScope.LOCAL, EnvVarScope.BRANCH, EnvVarScope.FULL, }, "SNOWFLAKE_PASSWORD": { EnvVarScope.LOCAL, EnvVarScope.BRANCH, EnvVarScope.FULL, }, "SNOWFLAKE_DATABASE": { EnvVarScope.LOCAL, EnvVarScope.BRANCH, EnvVarScope.FULL, }, "SNOWFLAKE_ACCOUNT": { EnvVarScope.LOCAL, EnvVarScope.BRANCH, EnvVarScope.FULL, }, }, ) context.run_command_and_snippet_output( cmd="dg list env", snippet_path=SNIPPETS_DIR / f"{context.get_next_snip_number()}-dg-env-list.txt", )
EnvVarScope
python
mlflow__mlflow
mlflow/telemetry/events.py
{ "start": 4745, "end": 4813 }
class ____(Event): name: str = "create_dataset"
CreateDatasetEvent
python
tensorflow__tensorflow
tensorflow/python/eager/polymorphic_function/atomic_function.py
{ "start": 2131, "end": 2746 }
class ____: """Specifies additional configuration for an AtomicFunction call.""" # Used by ACD to identify the CollectiveManager this function is scoped in. collective_manager_ids_used: List[int] = dataclasses.field( default_factory=list ) # Used by ACD to list Ops/Tensors/Callables that must be called in advance. control_captures: List[Any] = dataclasses.field(default_factory=list) # Determines what kind of partitioned call is used for this function. is_stateful: bool = False # Maps the (scope_id, name) in runtime to associated AtomicFunctions. RUNTIME_FUNCTION_REFS = {}
CallOptions
python
google__jax
jax/_src/pallas/core.py
{ "start": 2923, "end": 3207 }
class ____(dtypes.ExtendedDType): name: str _rules = AbstractSemaphoreTyRules def __repr__(self) -> str: return self.name def __eq__(self, other): return self.__class__ == other.__class__ def __hash__(self) -> int: return hash(self.__class__)
AbstractSemaphoreTy
python
jd__tenacity
tenacity/__init__.py
{ "start": 4104, "end": 4214 }
class ____(Exception): """Always retry the executed function when raised.""" NO_RESULT = object()
TryAgain
python
pytorch__pytorch
test/torch_np/numpy_tests/core/test_shape_base.py
{ "start": 8175, "end": 15959 }
class ____(TestCase): def test_out_and_dtype_simple(self): # numpy raises TypeError on both out=... and dtype=... a, b, out = np.ones(3), np.ones(4), np.ones(3 + 4) with pytest.raises(TypeError): np.concatenate((a, b), out=out, dtype=float) def test_returns_copy(self): a = np.eye(3) b = np.concatenate([a]) b[0, 0] = 2 assert b[0, 0] != a[0, 0] def test_exceptions(self): # test axis must be in bounds for ndim in [1, 2, 3]: a = np.ones((1,) * ndim) np.concatenate((a, a), axis=0) # OK assert_raises((IndexError, np.AxisError), np.concatenate, (a, a), axis=ndim) assert_raises( (IndexError, np.AxisError), np.concatenate, (a, a), axis=-(ndim + 1) ) # Scalars cannot be concatenated assert_raises((RuntimeError, ValueError), concatenate, (0,)) assert_raises((RuntimeError, ValueError), concatenate, (np.array(0),)) # dimensionality must match assert_raises( (RuntimeError, ValueError), # assert_raises_regex( # ValueError, # r"all the input arrays must have same number of dimensions, but " # r"the array at index 0 has 1 dimension\(s\) and the array at " # r"index 1 has 2 dimension\(s\)", np.concatenate, (np.zeros(1), np.zeros((1, 1))), ) # test shapes must match except for concatenation axis a = np.ones((1, 2, 3)) b = np.ones((2, 2, 3)) axis = list(range(3)) for _ in range(3): np.concatenate((a, b), axis=axis[0]) # OK # assert_raises_regex( assert_raises( (RuntimeError, ValueError), # "all the input array dimensions except for the concatenation axis " # "must match exactly, but along dimension {}, the array at " # "index 0 has size 1 and the array at index 1 has size 2" # .format(i), np.concatenate, (a, b), axis=axis[1], ) assert_raises( (RuntimeError, ValueError), np.concatenate, (a, b), axis=axis[2] ) a = np.moveaxis(a, -1, 0) b = np.moveaxis(b, -1, 0) axis.append(axis.pop(0)) # No arrays to concatenate raises ValueError assert_raises(ValueError, concatenate, ()) def test_concatenate_axis_None(self): a = np.arange(4, dtype=np.float64).reshape((2, 2)) b = list(range(3)) r = np.concatenate((a, a), axis=None) assert r.dtype == a.dtype assert r.ndim == 1 r = np.concatenate((a, b), axis=None) assert r.size == a.size + len(b) assert r.dtype == a.dtype out = np.zeros(a.size + len(b)) r = np.concatenate((a, b), axis=None) rout = np.concatenate((a, b), axis=None, out=out) assert out is rout assert np.all(r == rout) @xpassIfTorchDynamo_np # (reason="concatenate(x, axis=None) relies on x being a sequence") def test_large_concatenate_axis_None(self): # When no axis is given, concatenate uses flattened versions. # This also had a bug with many arrays (see gh-5979). x = np.arange(1, 100) r = np.concatenate(x, None) assert np.all(x == r) # This should probably be deprecated: r = np.concatenate(x, 100) # axis is >= MAXDIMS assert_array_equal(x, r) def test_concatenate(self): # Test concatenate function # One sequence returns unmodified (but as array) # XXX: a single argument; relies on an ndarray being a sequence r4 = list(range(4)) # assert_array_equal(concatenate((r4,)), r4) # # Any sequence # assert_array_equal(concatenate((tuple(r4),)), r4) # assert_array_equal(concatenate((array(r4),)), r4) # 1D default concatenation r3 = list(range(3)) assert_array_equal(concatenate((r4, r3)), r4 + r3) # Mixed sequence types assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3) assert_array_equal(concatenate((array(r4), r3)), r4 + r3) # Explicit axis specification assert_array_equal(concatenate((r4, r3), 0), r4 + r3) # Including negative assert_array_equal(concatenate((r4, r3), -1), r4 + r3) # 2D a23 = array([[10, 11, 12], [13, 14, 15]]) a13 = array([[0, 1, 2]]) res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]]) assert_array_equal(concatenate((a23, a13)), res) assert_array_equal(concatenate((a23, a13), 0), res) assert_array_equal(concatenate((a23.T, a13.T), 1), res.T) assert_array_equal(concatenate((a23.T, a13.T), -1), res.T) # Arrays much match shape assert_raises((RuntimeError, ValueError), concatenate, (a23.T, a13.T), 0) # 3D res = np.arange(2 * 3 * 7).reshape((2, 3, 7)) a0 = res[..., :4] a1 = res[..., 4:6] a2 = res[..., 6:] assert_array_equal(concatenate((a0, a1, a2), 2), res) assert_array_equal(concatenate((a0, a1, a2), -1), res) assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T) out = res.copy() rout = concatenate((a0, a1, a2), 2, out=out) assert_(out is rout) assert_equal(res, rout) @skip(reason="concat, arrays, sequence") @skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython") def test_operator_concat(self): import operator a = array([1, 2]) b = array([3, 4]) n = [1, 2] assert_raises(TypeError, operator.concat, a, b) assert_raises(TypeError, operator.concat, a, n) assert_raises(TypeError, operator.concat, n, a) assert_raises(TypeError, operator.concat, a, 1) assert_raises(TypeError, operator.concat, 1, a) def test_bad_out_shape(self): a = array([1, 2]) b = array([3, 4]) assert_raises(ValueError, concatenate, (a, b), out=np.empty(5)) assert_raises(ValueError, concatenate, (a, b), out=np.empty((4, 1))) assert_raises(ValueError, concatenate, (a, b), out=np.empty((1, 4))) concatenate((a, b), out=np.empty(4)) @parametrize("axis", [None, 0]) @parametrize( "out_dtype", ["c8", "f4", "f8", "i8"] ) # torch does not have ">f8", "S4" @parametrize("casting", ["no", "equiv", "safe", "same_kind", "unsafe"]) def test_out_and_dtype(self, axis, out_dtype, casting): # Compare usage of `out=out` with `dtype=out.dtype` out = np.empty(4, dtype=out_dtype) to_concat = (array([1.1, 2.2]), array([3.3, 4.4])) if not np.can_cast(to_concat[0], out_dtype, casting=casting): with assert_raises(TypeError): concatenate(to_concat, out=out, axis=axis, casting=casting) with assert_raises(TypeError): concatenate(to_concat, dtype=out.dtype, axis=axis, casting=casting) else: res_out = concatenate(to_concat, out=out, axis=axis, casting=casting) res_dtype = concatenate( to_concat, dtype=out.dtype, axis=axis, casting=casting ) assert res_out is out assert_array_equal(out, res_dtype) assert res_dtype.dtype == out_dtype with assert_raises(TypeError): concatenate(to_concat, out=out, dtype=out_dtype, axis=axis) @instantiate_parametrized_tests
TestConcatenate
python
sympy__sympy
sympy/matrices/expressions/applyfunc.py
{ "start": 292, "end": 6751 }
class ____(MatrixExpr): r""" Apply function to a matrix elementwise without evaluating. Examples ======== It can be created by calling ``.applyfunc(<function>)`` on a matrix expression: >>> from sympy import MatrixSymbol >>> from sympy.matrices.expressions.applyfunc import ElementwiseApplyFunction >>> from sympy import exp >>> X = MatrixSymbol("X", 3, 3) >>> X.applyfunc(exp) Lambda(_d, exp(_d)).(X) Otherwise using the class constructor: >>> from sympy import eye >>> expr = ElementwiseApplyFunction(exp, eye(3)) >>> expr Lambda(_d, exp(_d)).(Matrix([ [1, 0, 0], [0, 1, 0], [0, 0, 1]])) >>> expr.doit() Matrix([ [E, 1, 1], [1, E, 1], [1, 1, E]]) Notice the difference with the real mathematical functions: >>> exp(eye(3)) Matrix([ [E, 0, 0], [0, E, 0], [0, 0, E]]) """ def __new__(cls, function, expr): expr = _sympify(expr) if not expr.is_Matrix: raise ValueError("{} must be a matrix instance.".format(expr)) if expr.shape == (1, 1): # Check if the function returns a matrix, in that case, just apply # the function instead of creating an ElementwiseApplyFunc object: ret = function(expr) if isinstance(ret, MatrixExpr): return ret if not isinstance(function, (FunctionClass, Lambda)): d = Dummy('d') function = Lambda(d, function(d)) function = sympify(function) if not isinstance(function, (FunctionClass, Lambda)): raise ValueError( "{} should be compatible with SymPy function classes." .format(function)) if 1 not in function.nargs: raise ValueError( '{} should be able to accept 1 arguments.'.format(function)) if not isinstance(function, Lambda): d = Dummy('d') function = Lambda(d, function(d)) obj = MatrixExpr.__new__(cls, function, expr) return obj @property def function(self): return self.args[0] @property def expr(self): return self.args[1] @property def shape(self): return self.expr.shape def doit(self, **hints): deep = hints.get("deep", True) expr = self.expr if deep: expr = expr.doit(**hints) function = self.function if isinstance(function, Lambda) and function.is_identity: # This is a Lambda containing the identity function. return expr if isinstance(expr, MatrixBase): return expr.applyfunc(self.function) elif isinstance(expr, ElementwiseApplyFunction): return ElementwiseApplyFunction( lambda x: self.function(expr.function(x)), expr.expr ).doit(**hints) else: return self def _entry(self, i, j, **kwargs): return self.function(self.expr._entry(i, j, **kwargs)) def _get_function_fdiff(self): d = Dummy("d") function = self.function(d) fdiff = function.diff(d) if isinstance(fdiff, Function): fdiff = type(fdiff) else: fdiff = Lambda(d, fdiff) return fdiff def _eval_derivative(self, x): from sympy.matrices.expressions.hadamard import hadamard_product dexpr = self.expr.diff(x) fdiff = self._get_function_fdiff() return hadamard_product( dexpr, ElementwiseApplyFunction(fdiff, self.expr) ) def _eval_derivative_matrix_lines(self, x): from sympy.matrices.expressions.special import Identity from sympy.tensor.array.expressions.array_expressions import ArrayContraction from sympy.tensor.array.expressions.array_expressions import ArrayDiagonal from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct fdiff = self._get_function_fdiff() lr = self.expr._eval_derivative_matrix_lines(x) ewdiff = ElementwiseApplyFunction(fdiff, self.expr) if 1 in x.shape: # Vector: iscolumn = self.shape[1] == 1 for i in lr: if iscolumn: ptr1 = i.first_pointer ptr2 = Identity(self.shape[1]) else: ptr1 = Identity(self.shape[0]) ptr2 = i.second_pointer subexpr = ExprBuilder( ArrayDiagonal, [ ExprBuilder( ArrayTensorProduct, [ ewdiff, ptr1, ptr2, ] ), (0, 2) if iscolumn else (1, 4) ], validator=ArrayDiagonal._validate ) i._lines = [subexpr] i._first_pointer_parent = subexpr.args[0].args i._first_pointer_index = 1 i._second_pointer_parent = subexpr.args[0].args i._second_pointer_index = 2 else: # Matrix case: for i in lr: ptr1 = i.first_pointer ptr2 = i.second_pointer newptr1 = Identity(ptr1.shape[1]) newptr2 = Identity(ptr2.shape[1]) subexpr = ExprBuilder( ArrayContraction, [ ExprBuilder( ArrayTensorProduct, [ptr1, newptr1, ewdiff, ptr2, newptr2] ), (1, 2, 4), (5, 7, 8), ], validator=ArrayContraction._validate ) i._first_pointer_parent = subexpr.args[0].args i._first_pointer_index = 1 i._second_pointer_parent = subexpr.args[0].args i._second_pointer_index = 4 i._lines = [subexpr] return lr def _eval_transpose(self): from sympy.matrices.expressions.transpose import Transpose return self.func(self.function, Transpose(self.expr).doit())
ElementwiseApplyFunction
python
astropy__astropy
astropy/extern/ply/lex.py
{ "start": 2416, "end": 2689 }
class ____(object): def __str__(self): return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos) def __repr__(self): return str(self) # This object is a stand-in for a logging object created by the # logging module.
LexToken
python
sqlalchemy__sqlalchemy
test/orm/test_generative.py
{ "start": 539, "end": 5002 }
class ____(fixtures.MappedTest): run_inserts = "once" run_deletes = None @classmethod def define_tables(cls, metadata): Table( "foo", metadata, Column( "id", Integer, normalize_sequence(config, sa.Sequence("foo_id_seq")), primary_key=True, ), Column("bar", Integer), Column("range", Integer), ) @classmethod def fixtures(cls): rows = tuple([(i, i % 10) for i in range(100)]) foo_data = (("bar", "range"),) + rows return dict(foo=foo_data) @classmethod def setup_mappers(cls): foo = cls.tables.foo class Foo(cls.Basic): pass cls.mapper_registry.map_imperatively(Foo, foo) def test_selectby(self): Foo = self.classes.Foo res = fixture_session().query(Foo).filter_by(range=5) assert res.order_by(Foo.bar)[0].bar == 5 assert res.order_by(sa.desc(Foo.bar))[0].bar == 95 def test_slice(self): Foo = self.classes.Foo sess = fixture_session() query = sess.query(Foo).order_by(Foo.id) orig = query.all() assert query[1] == orig[1] assert list(query[10:20]) == orig[10:20] assert list(query[10:]) == orig[10:] assert list(query[:10]) == orig[:10] assert list(query[:10]) == orig[:10] assert list(query[5:5]) == orig[5:5] assert list(query[10:40:3]) == orig[10:40:3] # negative slices and indexes are deprecated and are tested # in test_query.py and test_deprecations.py assert query[10:20][5] == orig[10:20][5] def test_aggregate(self): foo, Foo = self.tables.foo, self.classes.Foo sess = fixture_session() query = sess.query(Foo) assert query.count() == 100 assert sess.query(func.min(foo.c.bar)).filter( foo.c.bar < 30 ).one() == (0,) assert sess.query(func.max(foo.c.bar)).filter( foo.c.bar < 30 ).one() == (29,) eq_( query.filter(foo.c.bar < 30) .with_entities(sa.func.max(foo.c.bar)) .scalar(), 29, ) @testing.fails_if( lambda: testing.against("mysql+mysqldb") and testing.db.dialect.dbapi.version_info[:4] == (1, 2, 1, "gamma"), "unknown incompatibility", ) def test_aggregate_1(self): foo = self.tables.foo query = fixture_session().query(func.sum(foo.c.bar)) assert query.filter(foo.c.bar < 30).one() == (435,) @testing.fails_on( "mssql", "AVG produces an average as the original column type on mssql.", ) def test_aggregate_2(self): foo = self.tables.foo query = fixture_session().query(func.avg(foo.c.bar)) avg = query.filter(foo.c.bar < 30).one()[0] eq_(float(round(avg, 1)), 14.5) @testing.fails_on( "mssql", "AVG produces an average as the original column type on mssql.", ) def test_aggregate_3(self): foo, Foo = self.tables.foo, self.classes.Foo query = fixture_session().query(Foo) avg_f = ( query.filter(foo.c.bar < 30) .with_entities(sa.func.avg(foo.c.bar)) .scalar() ) eq_(float(round(avg_f, 1)), 14.5) avg_o = ( query.filter(foo.c.bar < 30) .with_entities(sa.func.avg(foo.c.bar)) .scalar() ) eq_(float(round(avg_o, 1)), 14.5) def test_filter(self): Foo = self.classes.Foo query = fixture_session().query(Foo) assert query.count() == 100 assert query.filter(Foo.bar < 30).count() == 30 res2 = query.filter(Foo.bar < 30).filter(Foo.bar > 10) assert res2.count() == 19 def test_order_by(self): Foo = self.classes.Foo query = fixture_session().query(Foo) assert query.order_by(Foo.bar)[0].bar == 0 assert query.order_by(sa.desc(Foo.bar))[0].bar == 99 def test_offset_order_by(self): Foo = self.classes.Foo query = fixture_session().query(Foo) assert list(query.order_by(Foo.bar).offset(10))[0].bar == 10 def test_offset(self): Foo = self.classes.Foo query = fixture_session().query(Foo) assert len(list(query.limit(10))) == 10
GenerativeQueryTest
python
apache__airflow
task-sdk/src/airflow/sdk/api/datamodels/_generated.py
{ "start": 10857, "end": 11153 }
class ____(BaseModel): """ Variable schema for responses with fields that are needed for Runtime. """ model_config = ConfigDict( extra="forbid", ) key: Annotated[str, Field(title="Key")] value: Annotated[str | None, Field(title="Value")] = None
VariableResponse
python
ansible__ansible
lib/ansible/plugins/inventory/__init__.py
{ "start": 12678, "end": 12885 }
class ____(_BaseInventoryPlugin): """ Parses a File based Inventory Source""" TYPE = 'storage' def __init__(self): super(BaseFileInventoryPlugin, self).__init__()
BaseFileInventoryPlugin
python
pypa__warehouse
warehouse/accounts/interfaces.py
{ "start": 582, "end": 643 }
class ____(RecoveryCodeException): pass
InvalidRecoveryCode
python
apache__airflow
providers/google/tests/unit/google/cloud/transfers/test_gdrive_to_gcs.py
{ "start": 1363, "end": 2598 }
class ____: @mock.patch("airflow.providers.google.cloud.transfers.gdrive_to_gcs.GCSHook") @mock.patch("airflow.providers.google.cloud.transfers.gdrive_to_gcs.GoogleDriveHook") def test_execute(self, mock_gdrive_hook, mock_gcs_hook): context = {} op = GoogleDriveToGCSOperator( task_id="test_task", folder_id=FOLDER_ID, file_name=FILE_NAME, drive_id=DRIVE_ID, bucket_name=BUCKET, object_name=OBJECT, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) meta = {"id": "123xyz"} mock_gdrive_hook.return_value.get_file_id.return_value = meta op.execute(context) mock_gdrive_hook.return_value.get_file_id.assert_called_once_with( folder_id=FOLDER_ID, file_name=FILE_NAME, drive_id=DRIVE_ID ) mock_gdrive_hook.return_value.download_file.assert_called_once_with( file_id=meta["id"], file_handle=mock.ANY ) mock_gcs_hook.return_value.provide_file_and_upload.assert_called_once_with( bucket_name=BUCKET, object_name=OBJECT ) assert op.dry_run() is None
TestGoogleDriveToGCSOperator
python
sympy__sympy
sympy/core/function.py
{ "start": 3066, "end": 3181 }
class ____(TypeError): '''Raised when a Lambda is created with an invalid signature''' pass
BadSignatureError
python
encode__django-rest-framework
rest_framework/relations.py
{ "start": 15146, "end": 15857 }
class ____(HyperlinkedRelatedField): """ A read-only field that represents the identity URL for an object, itself. This is in contrast to `HyperlinkedRelatedField` which represents the URL of relationships to other objects. """ def __init__(self, view_name=None, **kwargs): assert view_name is not None, 'The `view_name` argument is required.' kwargs['read_only'] = True kwargs['source'] = '*' super().__init__(view_name, **kwargs) def use_pk_only_optimization(self): # We have the complete object instance already. We don't need # to run the 'only get the pk for this relationship' code. return False
HyperlinkedIdentityField
python
Lightning-AI__lightning
src/lightning/pytorch/callbacks/spike.py
{ "start": 266, "end": 1154 }
class ____(FabricSpikeDetection, Callback): @torch.no_grad() def on_train_batch_end( # type: ignore self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs: Union[torch.Tensor, Mapping[str, torch.Tensor]], batch: Any, batch_idx: int, ) -> None: if isinstance(outputs, torch.Tensor): loss = outputs.detach() elif isinstance(outputs, Mapping): loss = outputs["loss"].detach() else: raise TypeError(f"outputs have to be of type torch.Tensor or Mapping, got {type(outputs).__qualname__}") if self.exclude_batches_path is None: self.exclude_batches_path = os.path.join(trainer.default_root_dir, "skip_batches.json") return FabricSpikeDetection.on_train_batch_end(self, trainer, loss, batch, batch_idx) # type: ignore
SpikeDetection
python
python-attrs__attrs
tests/test_next_gen.py
{ "start": 10595, "end": 12824 }
class ____: """ Tests for __attrs_props__ in define-style classes. """ def test_define_props_custom(self): """ define() sets __attrs_props__ with custom parameters. """ @attrs.define( slots=False, frozen=True, order=True, unsafe_hash=True, init=True, repr=True, eq=True, match_args=False, kw_only=True, cache_hash=True, str=True, ) class C: x: int assert ( ClassProps( is_exception=False, is_slotted=False, is_frozen=True, kw_only=ClassProps.KeywordOnly.YES, added_init=True, added_repr=True, added_eq=True, added_ordering=True, hashability=ClassProps.Hashability.HASHABLE_CACHED, added_match_args=False, has_weakref_slot=True, collected_fields_by_mro=True, added_str=True, added_pickling=False, # because slots=False on_setattr_hook=None, field_transformer=None, ) == C.__attrs_props__ ) def test_define_props_defaults(self): """ frozen() sets default __attrs_props__ values. """ @attrs.frozen class C: x: int assert ( ClassProps( is_exception=False, is_slotted=True, is_frozen=True, added_init=True, added_repr=True, added_eq=True, added_ordering=False, hashability=ClassProps.Hashability.HASHABLE, # b/c frozen added_match_args=True, kw_only=ClassProps.KeywordOnly.NO, has_weakref_slot=True, collected_fields_by_mro=True, added_str=False, added_pickling=True, on_setattr_hook=None, field_transformer=None, ) == C.__attrs_props__ )
TestProps
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_password_is_not_leaked.py
{ "start": 876, "end": 1884 }
class ____(ColumnMapMetricProvider): # This is the id string that will be used to reference your metric. condition_metric_name = "column_values.password_is_not_leaked" # This method implements the core logic for the PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): return column.apply(lambda x: is_password_not_leaked(x)) # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # raise NotImplementedError # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # raise NotImplementedError # This class defines the Expectation itself
ColumnValuesPasswordIsNotLeaked
python
celery__celery
celery/concurrency/asynpool.py
{ "start": 13912, "end": 51870 }
class ____(_pool.Pool): """AsyncIO Pool (no threads).""" ResultHandler = ResultHandler Worker = Worker #: Set by :meth:`register_with_event_loop` after running the first time. _registered_with_event_loop = False def WorkerProcess(self, worker): worker = super().WorkerProcess(worker) worker.dead = False return worker def __init__(self, processes=None, synack=False, sched_strategy=None, proc_alive_timeout=None, *args, **kwargs): self.sched_strategy = SCHED_STRATEGIES.get(sched_strategy, sched_strategy) processes = self.cpu_count() if processes is None else processes self.synack = synack # create queue-pairs for all our processes in advance. self._queues = { self.create_process_queues(): None for _ in range(processes) } # inqueue fileno -> process mapping self._fileno_to_inq = {} # outqueue fileno -> process mapping self._fileno_to_outq = {} # synqueue fileno -> process mapping self._fileno_to_synq = {} # We keep track of processes that haven't yet # sent a WORKER_UP message. If a process fails to send # this message within _proc_alive_timeout we terminate it # and hope the next process will recover. self._proc_alive_timeout = ( PROC_ALIVE_TIMEOUT if proc_alive_timeout is None else proc_alive_timeout ) self._waiting_to_start = set() # denormalized set of all inqueues. self._all_inqueues = set() # Set of fds being written to (busy) self._active_writes = set() # Set of active co-routines currently writing jobs. self._active_writers = set() # Set of fds that are busy (executing task) self._busy_workers = set() self._mark_worker_as_available = self._busy_workers.discard # Holds jobs waiting to be written to child processes. self.outbound_buffer = deque() self.write_stats = Counter() super().__init__(processes, *args, **kwargs) for proc in self._pool: # create initial mappings, these will be updated # as processes are recycled, or found lost elsewhere. self._fileno_to_outq[proc.outqR_fd] = proc self._fileno_to_synq[proc.synqW_fd] = proc self.on_soft_timeout = getattr( self._timeout_handler, 'on_soft_timeout', noop, ) self.on_hard_timeout = getattr( self._timeout_handler, 'on_hard_timeout', noop, ) def _create_worker_process(self, i): worker_before_create_process.send(sender=self) gc.collect() # Issue #2927 return super()._create_worker_process(i) def _event_process_exit(self, hub, proc): # This method is called whenever the process sentinel is readable. self._untrack_child_process(proc, hub) self.maintain_pool() def _track_child_process(self, proc, hub): """Helper method determines appropriate fd for process.""" try: fd = proc._sentinel_poll except AttributeError: # we need to duplicate the fd here to carefully # control when the fd is removed from the process table, # as once the original fd is closed we cannot unregister # the fd from epoll(7) anymore, causing a 100% CPU poll loop. fd = proc._sentinel_poll = os.dup(proc._popen.sentinel) # Safely call hub.add_reader for the determined fd iterate_file_descriptors_safely( [fd], None, hub.add_reader, self._event_process_exit, hub, proc) def _untrack_child_process(self, proc, hub): if proc._sentinel_poll is not None: fd, proc._sentinel_poll = proc._sentinel_poll, None hub.remove(fd) os.close(fd) def register_with_event_loop(self, hub): """Register the async pool with the current event loop.""" self._result_handler.register_with_event_loop(hub) self.handle_result_event = self._result_handler.handle_event self._create_timelimit_handlers(hub) self._create_process_handlers(hub) self._create_write_handlers(hub) # Add handler for when a process exits (calls maintain_pool) [self._track_child_process(w, hub) for w in self._pool] # Handle_result_event is called whenever one of the # result queues are readable. iterate_file_descriptors_safely( self._fileno_to_outq, self._fileno_to_outq, hub.add_reader, self.handle_result_event, '*fd*') # Timers include calling maintain_pool at a regular interval # to be certain processes are restarted. for handler, interval in self.timers.items(): hub.call_repeatedly(interval, handler) # Add on_poll_start to the event loop only once to prevent duplication # when the Consumer restarts due to a connection error. if not self._registered_with_event_loop: hub.on_tick.add(self.on_poll_start) self._registered_with_event_loop = True def _create_timelimit_handlers(self, hub): """Create handlers used to implement time limits.""" call_later = hub.call_later trefs = self._tref_for_id = WeakValueDictionary() def on_timeout_set(R, soft, hard): if soft: trefs[R._job] = call_later( soft, self._on_soft_timeout, R._job, soft, hard, hub, ) elif hard: trefs[R._job] = call_later( hard, self._on_hard_timeout, R._job, ) self.on_timeout_set = on_timeout_set def _discard_tref(job): try: tref = trefs.pop(job) tref.cancel() del tref except (KeyError, AttributeError): pass # out of scope self._discard_tref = _discard_tref def on_timeout_cancel(R): _discard_tref(R._job) self.on_timeout_cancel = on_timeout_cancel def _on_soft_timeout(self, job, soft, hard, hub): # only used by async pool. if hard: self._tref_for_id[job] = hub.call_later( hard - soft, self._on_hard_timeout, job, ) try: result = self._cache[job] except KeyError: pass # job ready else: self.on_soft_timeout(result) finally: if not hard: # remove tref self._discard_tref(job) def _on_hard_timeout(self, job): # only used by async pool. try: result = self._cache[job] except KeyError: pass # job ready else: self.on_hard_timeout(result) finally: # remove tref self._discard_tref(job) def on_job_ready(self, job, i, obj, inqW_fd): self._mark_worker_as_available(inqW_fd) def _create_process_handlers(self, hub): """Create handlers called on process up/down, etc.""" add_reader, remove_reader, remove_writer = ( hub.add_reader, hub.remove_reader, hub.remove_writer, ) cache = self._cache all_inqueues = self._all_inqueues fileno_to_inq = self._fileno_to_inq fileno_to_outq = self._fileno_to_outq fileno_to_synq = self._fileno_to_synq busy_workers = self._busy_workers handle_result_event = self.handle_result_event process_flush_queues = self.process_flush_queues waiting_to_start = self._waiting_to_start def verify_process_alive(proc): proc = proc() # is a weakref if (proc is not None and proc._is_alive() and proc in waiting_to_start): assert proc.outqR_fd in fileno_to_outq assert fileno_to_outq[proc.outqR_fd] is proc assert proc.outqR_fd in hub.readers error('Timed out waiting for UP message from %r', proc) os.kill(proc.pid, 9) def on_process_up(proc): """Called when a process has started.""" # If we got the same fd as a previous process then we'll also # receive jobs in the old buffer, so we need to reset the # job._write_to and job._scheduled_for attributes used to recover # message boundaries when processes exit. infd = proc.inqW_fd for job in cache.values(): if job._write_to and job._write_to.inqW_fd == infd: job._write_to = proc if job._scheduled_for and job._scheduled_for.inqW_fd == infd: job._scheduled_for = proc fileno_to_outq[proc.outqR_fd] = proc # maintain_pool is called whenever a process exits. self._track_child_process(proc, hub) assert not isblocking(proc.outq._reader) # handle_result_event is called when the processes outqueue is # readable. add_reader(proc.outqR_fd, handle_result_event, proc.outqR_fd) waiting_to_start.add(proc) hub.call_later( self._proc_alive_timeout, verify_process_alive, ref(proc), ) self.on_process_up = on_process_up def _remove_from_index(obj, proc, index, remove_fun, callback=None): # this remove the file descriptors for a process from # the indices. we have to make sure we don't overwrite # another processes fds, as the fds may be reused. try: fd = obj.fileno() except OSError: return try: if index[fd] is proc: # fd hasn't been reused so we can remove it from index. index.pop(fd, None) except KeyError: pass else: remove_fun(fd) if callback is not None: callback(fd) return fd def on_process_down(proc): """Called when a worker process exits.""" if getattr(proc, 'dead', None): return process_flush_queues(proc) _remove_from_index( proc.outq._reader, proc, fileno_to_outq, remove_reader, ) if proc.synq: _remove_from_index( proc.synq._writer, proc, fileno_to_synq, remove_writer, ) inq = _remove_from_index( proc.inq._writer, proc, fileno_to_inq, remove_writer, callback=all_inqueues.discard, ) if inq: busy_workers.discard(inq) self._untrack_child_process(proc, hub) waiting_to_start.discard(proc) self._active_writes.discard(proc.inqW_fd) remove_writer(proc.inq._writer) remove_reader(proc.outq._reader) if proc.synqR_fd: remove_reader(proc.synq._reader) if proc.synqW_fd: self._active_writes.discard(proc.synqW_fd) remove_reader(proc.synq._writer) self.on_process_down = on_process_down def _create_write_handlers(self, hub, pack=pack, dumps=_pickle.dumps, protocol=HIGHEST_PROTOCOL): """Create handlers used to write data to child processes.""" fileno_to_inq = self._fileno_to_inq fileno_to_synq = self._fileno_to_synq outbound = self.outbound_buffer pop_message = outbound.popleft put_message = outbound.append all_inqueues = self._all_inqueues active_writes = self._active_writes active_writers = self._active_writers busy_workers = self._busy_workers diff = all_inqueues.difference add_writer = hub.add_writer hub_add, hub_remove = hub.add, hub.remove mark_write_fd_as_active = active_writes.add mark_write_gen_as_active = active_writers.add mark_worker_as_busy = busy_workers.add write_generator_done = active_writers.discard get_job = self._cache.__getitem__ write_stats = self.write_stats is_fair_strategy = self.sched_strategy == SCHED_STRATEGY_FAIR revoked_tasks = worker_state.revoked getpid = os.getpid precalc = {ACK: self._create_payload(ACK, (0,)), NACK: self._create_payload(NACK, (0,))} def _put_back(job, _time=time.time): # puts back at the end of the queue if job._terminated is not None or \ job.correlation_id in revoked_tasks: if not job._accepted: job._ack(None, _time(), getpid(), None) job._set_terminated(job._terminated) else: # XXX linear lookup, should find a better way, # but this happens rarely and is here to protect against races. if job not in outbound: outbound.appendleft(job) self._put_back = _put_back # called for every event loop iteration, and if there # are messages pending this will schedule writing one message # by registering the 'schedule_writes' function for all currently # inactive inqueues (not already being written to) # consolidate means the event loop will merge them # and call the callback once with the list writable fds as # argument. Using this means we minimize the risk of having # the same fd receive every task if the pipe read buffer is not # full. def on_poll_start(): # Determine which io descriptors are not busy inactive = diff(active_writes) # Determine hub_add vs hub_remove strategy conditional if is_fair_strategy: # outbound buffer present and idle workers exist add_cond = outbound and len(busy_workers) < len(all_inqueues) else: # default is add when data exists in outbound buffer add_cond = outbound if add_cond: # calling hub_add vs hub_remove iterate_file_descriptors_safely( inactive, all_inqueues, hub_add, None, WRITE | ERR, consolidate=True) else: iterate_file_descriptors_safely( inactive, all_inqueues, hub.remove_writer) self.on_poll_start = on_poll_start def on_inqueue_close(fd, proc): # Makes sure the fd is removed from tracking when # the connection is closed, this is essential as fds may be reused. busy_workers.discard(fd) try: if fileno_to_inq[fd] is proc: fileno_to_inq.pop(fd, None) active_writes.discard(fd) all_inqueues.discard(fd) except KeyError: pass self.on_inqueue_close = on_inqueue_close self.hub_remove = hub_remove def schedule_writes(ready_fds, total_write_count=None): if not total_write_count: total_write_count = [0] # Schedule write operation to ready file descriptor. # The file descriptor is writable, but that does not # mean the process is currently reading from the socket. # The socket is buffered so writable simply means that # the buffer can accept at least 1 byte of data. # This means we have to cycle between the ready fds. # the first version used shuffle, but this version # using `total_writes % ready_fds` is about 30% faster # with many processes, and also leans more towards fairness # in write stats when used with many processes # [XXX On macOS, this may vary depending # on event loop implementation (i.e, select/poll vs epoll), so # have to test further] num_ready = len(ready_fds) for _ in range(num_ready): ready_fd = ready_fds[total_write_count[0] % num_ready] total_write_count[0] += 1 if ready_fd in active_writes: # already writing to this fd continue if is_fair_strategy and ready_fd in busy_workers: # worker is already busy with another task continue if ready_fd not in all_inqueues: hub.remove_writer(ready_fd) continue try: job = pop_message() except IndexError: # no more messages, remove all inactive fds from the hub. # this is important since the fds are always writable # as long as there's 1 byte left in the buffer, and so # this may create a spinloop where the event loop # always wakes up. for inqfd in diff(active_writes): hub.remove_writer(inqfd) break else: if not job._accepted: # job not accepted by another worker try: # keep track of what process the write operation # was scheduled for. proc = job._scheduled_for = fileno_to_inq[ready_fd] except KeyError: # write was scheduled for this fd but the process # has since exited and the message must be sent to # another process. put_message(job) continue cor = _write_job(proc, ready_fd, job) job._writer = ref(cor) mark_write_gen_as_active(cor) mark_write_fd_as_active(ready_fd) mark_worker_as_busy(ready_fd) # Try to write immediately, in case there's an error. try: next(cor) except StopIteration: pass except OSError as exc: if exc.errno != errno.EBADF: raise else: add_writer(ready_fd, cor) hub.consolidate_callback = schedule_writes def send_job(tup): # Schedule writing job request for when one of the process # inqueues are writable. body = dumps(tup, protocol=protocol) body_size = len(body) header = pack('>I', body_size) # index 1,0 is the job ID. job = get_job(tup[1][0]) job._payload = memoryview(header), memoryview(body), body_size put_message(job) self._quick_put = send_job def on_not_recovering(proc, fd, job, exc): logger.exception( 'Process inqueue damaged: %r %r: %r', proc, proc.exitcode, exc) if proc._is_alive(): proc.terminate() hub.remove(fd) self._put_back(job) def _write_job(proc, fd, job): # writes job to the worker process. # Operation must complete if more than one byte of data # was written. If the broker connection is lost # and no data was written the operation shall be canceled. header, body, body_size = job._payload errors = 0 try: # job result keeps track of what process the job is sent to. job._write_to = proc send = proc.send_job_offset Hw = Bw = 0 # write header while Hw < 4: try: Hw += send(header, Hw) except Exception as exc: # pylint: disable=broad-except if getattr(exc, 'errno', None) not in UNAVAIL: raise # suspend until more data errors += 1 if errors > 100: on_not_recovering(proc, fd, job, exc) raise StopIteration() yield else: errors = 0 # write body while Bw < body_size: try: Bw += send(body, Bw) except Exception as exc: # pylint: disable=broad-except if getattr(exc, 'errno', None) not in UNAVAIL: raise # suspend until more data errors += 1 if errors > 100: on_not_recovering(proc, fd, job, exc) raise StopIteration() yield else: errors = 0 finally: hub.remove_writer(fd) write_stats[proc.index] += 1 # message written, so this fd is now available active_writes.discard(fd) write_generator_done(job._writer()) # is a weakref def send_ack(response, pid, job, fd): # Only used when synack is enabled. # Schedule writing ack response for when the fd is writable. msg = Ack(job, fd, precalc[response]) callback = promise(write_generator_done) cor = _write_ack(fd, msg, callback=callback) mark_write_gen_as_active(cor) mark_write_fd_as_active(fd) callback.args = (cor,) add_writer(fd, cor) self.send_ack = send_ack def _write_ack(fd, ack, callback=None): # writes ack back to the worker if synack enabled. # this operation *MUST* complete, otherwise # the worker process will hang waiting for the ack. header, body, body_size = ack[2] try: try: proc = fileno_to_synq[fd] except KeyError: # process died, we can safely discard the ack at this # point. raise StopIteration() send = proc.send_syn_offset Hw = Bw = 0 # write header while Hw < 4: try: Hw += send(header, Hw) except Exception as exc: # pylint: disable=broad-except if getattr(exc, 'errno', None) not in UNAVAIL: raise yield # write body while Bw < body_size: try: Bw += send(body, Bw) except Exception as exc: # pylint: disable=broad-except if getattr(exc, 'errno', None) not in UNAVAIL: raise # suspend until more data yield finally: if callback: callback() # message written, so this fd is now available active_writes.discard(fd) def flush(self): if self._state == TERMINATE: return # cancel all tasks that haven't been accepted so that NACK is sent # if synack is enabled. if self.synack: for job in self._cache.values(): if not job._accepted: job._cancel() # clear the outgoing buffer as the tasks will be redelivered by # the broker anyway. if self.outbound_buffer: self.outbound_buffer.clear() self.maintain_pool() try: # ...but we must continue writing the payloads we already started # to keep message boundaries. # The messages may be NACK'ed later if synack is enabled. if self._state == RUN: # flush outgoing buffers intervals = fxrange(0.01, 0.1, 0.01, repeatlast=True) # TODO: Rewrite this as a dictionary comprehension once we drop support for Python 3.7 # This dict comprehension requires the walrus operator which is only available in 3.8. owned_by = {} for job in self._cache.values(): writer = _get_job_writer(job) if writer is not None: owned_by[writer] = job if not self._active_writers: self._cache.clear() else: while self._active_writers: writers = list(self._active_writers) for gen in writers: if (gen.__name__ == '_write_job' and gen_not_started(gen)): # hasn't started writing the job so can # discard the task, but we must also remove # it from the Pool._cache. try: job = owned_by[gen] except KeyError: pass else: # removes from Pool._cache job.discard() self._active_writers.discard(gen) else: try: job = owned_by[gen] except KeyError: pass else: job_proc = job._write_to if job_proc._is_alive(): self._flush_writer(job_proc, gen) job.discard() # workers may have exited in the meantime. self.maintain_pool() sleep(next(intervals)) # don't busyloop finally: self.outbound_buffer.clear() self._active_writers.clear() self._active_writes.clear() self._busy_workers.clear() def _flush_writer(self, proc, writer): fds = {proc.inq._writer} try: while fds: if not proc._is_alive(): break # process exited readable, writable, again = _select( writers=fds, err=fds, timeout=0.5, ) if not again and (writable or readable): try: next(writer) except (StopIteration, OSError, EOFError): break finally: self._active_writers.discard(writer) def get_process_queues(self): """Get queues for a new process. Here we'll find an unused slot, as there should always be one available when we start a new process. """ return next(q for q, owner in self._queues.items() if owner is None) def on_grow(self, n): """Grow the pool by ``n`` processes.""" diff = max(self._processes - len(self._queues), 0) if diff: self._queues.update({ self.create_process_queues(): None for _ in range(diff) }) def on_shrink(self, n): """Shrink the pool by ``n`` processes.""" def create_process_queues(self): """Create new in, out, etc. queues, returned as a tuple.""" # NOTE: Pipes must be set O_NONBLOCK at creation time (the original # fd), otherwise it won't be possible to change the flags until # there's an actual reader/writer on the other side. inq = _SimpleQueue(wnonblock=True) outq = _SimpleQueue(rnonblock=True) synq = None assert isblocking(inq._reader) assert not isblocking(inq._writer) assert not isblocking(outq._reader) assert isblocking(outq._writer) if self.synack: synq = _SimpleQueue(wnonblock=True) assert isblocking(synq._reader) assert not isblocking(synq._writer) return inq, outq, synq def on_process_alive(self, pid): """Called when receiving the :const:`WORKER_UP` message. Marks the process as ready to receive work. """ try: proc = next(w for w in self._pool if w.pid == pid) except StopIteration: return logger.warning('process with pid=%s already exited', pid) assert proc.inqW_fd not in self._fileno_to_inq assert proc.inqW_fd not in self._all_inqueues self._waiting_to_start.discard(proc) self._fileno_to_inq[proc.inqW_fd] = proc self._fileno_to_synq[proc.synqW_fd] = proc self._all_inqueues.add(proc.inqW_fd) def on_job_process_down(self, job, pid_gone): """Called for each job when the process assigned to it exits.""" if job._write_to and not job._write_to._is_alive(): # job was partially written self.on_partial_read(job, job._write_to) elif job._scheduled_for and not job._scheduled_for._is_alive(): # job was only scheduled to be written to this process, # but no data was sent so put it back on the outbound_buffer. self._put_back(job) def on_job_process_lost(self, job, pid, exitcode): """Called when the process executing job' exits. This happens when the process job' was assigned to exited by mysterious means (error exitcodes and signals). """ self.mark_as_worker_lost(job, exitcode) def human_write_stats(self): if self.write_stats is None: return 'N/A' vals = list(self.write_stats.values()) total = sum(vals) def per(v, total): return f'{(float(v) / total) if v else 0:.2f}' return { 'total': total, 'avg': per(total / len(self.write_stats) if total else 0, total), 'all': ', '.join(per(v, total) for v in vals), 'raw': ', '.join(map(str, vals)), 'strategy': SCHED_STRATEGY_TO_NAME.get( self.sched_strategy, self.sched_strategy, ), 'inqueues': { 'total': len(self._all_inqueues), 'active': len(self._active_writes), } } def _process_cleanup_queues(self, proc): """Called to clean up queues after process exit.""" if not proc.dead: try: self._queues[self._find_worker_queues(proc)] = None except (KeyError, ValueError): pass @staticmethod def _stop_task_handler(task_handler): """Called at shutdown to tell processes that we're shutting down.""" for proc in task_handler.pool: try: setblocking(proc.inq._writer, 1) except OSError: pass else: try: proc.inq.put(None) except OSError as exc: if exc.errno != errno.EBADF: raise def create_result_handler(self): return super().create_result_handler( fileno_to_outq=self._fileno_to_outq, on_process_alive=self.on_process_alive, ) def _process_register_queues(self, proc, queues): """Mark new ownership for ``queues`` to update fileno indices.""" assert queues in self._queues b = len(self._queues) self._queues[queues] = proc assert b == len(self._queues) def _find_worker_queues(self, proc): """Find the queues owned by ``proc``.""" try: return next(q for q, owner in self._queues.items() if owner == proc) except StopIteration: raise ValueError(proc) def _setup_queues(self): # this is only used by the original pool that used a shared # queue for all processes. self._quick_put = None # these attributes are unused by this class, but we'll still # have to initialize them for compatibility. self._inqueue = self._outqueue = \ self._quick_get = self._poll_result = None def process_flush_queues(self, proc): """Flush all queues. Including the outbound buffer, so that all tasks that haven't been started will be discarded. In Celery this is called whenever the transport connection is lost (consumer restart), and when a process is terminated. """ resq = proc.outq._reader on_state_change = self._result_handler.on_state_change fds = {resq} while fds and not resq.closed and self._state != TERMINATE: readable, _, _ = _select(fds, None, fds, timeout=0.01) if readable: try: task = resq.recv() except (OSError, EOFError) as exc: _errno = getattr(exc, 'errno', None) if _errno == errno.EINTR: continue elif _errno == errno.EAGAIN: break elif _errno not in UNAVAIL: debug('got %r while flushing process %r', exc, proc, exc_info=1) break else: if task is None: debug('got sentinel while flushing process %r', proc) break else: on_state_change(task) else: break def on_partial_read(self, job, proc): """Called when a job was partially written to exited child.""" # worker terminated by signal: # we cannot reuse the sockets again, because we don't know if # the process wrote/read anything from them, and if so we cannot # restore the message boundaries. if not job._accepted: # job was not acked, so find another worker to send it to. self._put_back(job) writer = _get_job_writer(job) if writer: self._active_writers.discard(writer) del writer if not proc.dead: proc.dead = True # Replace queues to avoid reuse before = len(self._queues) try: queues = self._find_worker_queues(proc) if self.destroy_queues(queues, proc): self._queues[self.create_process_queues()] = None except ValueError: pass assert len(self._queues) == before def destroy_queues(self, queues, proc): """Destroy queues that can no longer be used. This way they can be replaced by new usable sockets. """ assert not proc._is_alive() self._waiting_to_start.discard(proc) removed = 1 try: self._queues.pop(queues) except KeyError: removed = 0 try: self.on_inqueue_close(queues[0]._writer.fileno(), proc) except OSError: pass for queue in queues: if queue: for sock in (queue._reader, queue._writer): if not sock.closed: self.hub_remove(sock) try: sock.close() except OSError: pass return removed def _create_payload(self, type_, args, dumps=_pickle.dumps, pack=pack, protocol=HIGHEST_PROTOCOL): body = dumps((type_, args), protocol=protocol) size = len(body) header = pack('>I', size) return header, body, size @classmethod def _set_result_sentinel(cls, _outqueue, _pool): # unused pass def _help_stuff_finish_args(self): # Pool._help_stuff_finished is a classmethod so we have to use this # trick to modify the arguments passed to it. return (self._pool,) @classmethod def _help_stuff_finish(cls, pool): # pylint: disable=arguments-differ debug( 'removing tasks from inqueue until task handler finished', ) fileno_to_proc = {} inqR = set() for w in pool: try: fd = w.inq._reader.fileno() inqR.add(fd) fileno_to_proc[fd] = w except OSError: pass while inqR: readable, _, again = _select(inqR, timeout=0.5) if again: continue if not readable: break for fd in readable: fileno_to_proc[fd].inq._reader.recv() sleep(0) @property def timers(self): return {self.maintain_pool: 5.0}
AsynPool
python
google__jax
tests/pallas/mosaic_gpu_test.py
{ "start": 169947, "end": 170051 }
class ____( PipelineTest, lowering_semantics=plgpu.LoweringSemantics.Warpgroup ): ...
PipelineWGTest
python
huggingface__transformers
src/transformers/models/shieldgemma2/convert_shieldgemma2_weights_orbax_to_hf.py
{ "start": 14732, "end": 19420 }
class ____: state_tree: dict[str, torch.Tensor] config: ShieldGemma2Config def convert( shieldgemma_checkpoint_path: str, gemma_checkpoint_path: str, config: ShieldGemma2Config, target_dtype: torch.dtype, ) -> ConversionResult: """Loads Orbax checkpoint from `input_path` and converts it to HF tree.""" checkpointer = obc.PyTreeCheckpointer() sg2_ckpt = checkpointer.restore(shieldgemma_checkpoint_path) g3_ckpt = checkpointer.restore(gemma_checkpoint_path) hf_tree: dict[str, torch.Tensor] = {} def update_tree(path: str, weights: np.ndarray) -> None: torch_tensor = torch.from_numpy(weights.astype("float32")).type(target_dtype) logging.info( "%s converted shape=%s with dtype=%s", path, weights.shape, torch_tensor.dtype, ) hf_tree[f"model.{path}"] = torch_tensor for paths, value in tree.flatten_with_path(g3_ckpt): if paths[0].startswith("SigLiPFromPatches_"): path, weights = convert_siglip_weight(config=config.vision_config, paths=paths, weights=value) update_tree(path, weights) for paths, value in tree.flatten_with_path(sg2_ckpt): for path, weights in convert_transformer_weights(config=config.text_config, paths=paths, weights=value): update_tree(path, weights) hf_tree["model.language_model.lm_head.weight"] = hf_tree["model.language_model.model.embed_tokens.weight"] return ConversionResult(state_tree=hf_tree, config=config) def main(*args): del args dtype = getattr(torch, PRECISION.value) output_path = OUTPUT_PATH.value tokenizer = GemmaTokenizerFast( TOKENIZER_PATH.value, extra_special_tokens={ "image_token": "<image_soft_token>", # Should be ID=262_144 "boi_token": "<start_of_image>", # Should be ID=255_999 "eoi_token": "<end_of_image>", # Should be ID=256_000 }, ) yes_token_index, no_token_index = torch.tensor(tokenizer(["Yes", "No"])["input_ids"])[:, 1].numpy() config = ShieldGemma2Config( yes_token_index=int(yes_token_index), no_token_index=int(no_token_index), text_config=Gemma3TextConfig( vocab_size=262_208, hidden_size=2560, intermediate_size=2560 * 8 // 2, num_attention_heads=8, head_dim=256, num_hidden_layers=34, num_key_value_heads=4, sliding_window=1024, rope_parameters={"rope_type": "linear", "factor": 8.0}, # used for global RoPE only rope_theta=1_000_000, rope_local_base_freq=10_000, attn_logit_softcapping=None, query_pre_attn_scalar=256, max_position_embeddings=8192, ), vision_config={ "hidden_size": 1152, "intermediate_size": 4304, "num_hidden_layers": 27, "num_attention_heads": 16, "num_channels": 3, "image_size": 896, "patch_size": 14, "hidden_act": "gelu_pytorch_tanh", "layer_norm_eps": 1e-6, "attention_dropout": 0.0, "vision_use_head": False, }, ) config.save_pretrained(output_path) image_processor = Gemma3ImageProcessor( image_seq_length=256, image_mean=(0.5,) * 3, image_std=(0.5,) * 3, size={"height": 896, "width": 896}, resample=PILImageResampling.BILINEAR, ) processor = ShieldGemma2Processor( image_processor=image_processor, tokenizer=tokenizer, policy_definitions=_SHIELDGEMMA2_POLICIES, ) tokenizer.chat_template = _CHAT_TEMPLATE processor.chat_template = _CHAT_TEMPLATE processor.save_pretrained(output_path) logging.info("Saved Shieldgemma2Processor to %s", output_path) del processor del tokenizer logging.info("Converting Shieldgemma2 @ %s", dtype) result = convert(_SHIELDGEMMA_CHECKPOINT_PATH.value, _GEMMA_CHECKPOINT_PATH.value, config, dtype) logging.info("Converted Shieldgemma2 state tree from Orbax to Hugging Face.") with accelerate.init_empty_weights(): model = ShieldGemma2ForImageClassification(config=config) model.load_state_dict(result.state_tree, assign=True, strict=True) model.config.dtype = dtype logging.info("Loaded Shieldgemma2 in Hugging Face Transformers.") model.save_pretrained(output_path, safe_serialization=True) logging.info("Saved Shieldgemma2 to SafeTensors in %s", output_path) del model del result if __name__ == "__main__": app.run(main)
ConversionResult
python
PrefectHQ__prefect
src/prefect/server/schemas/statuses.py
{ "start": 445, "end": 677 }
class ____(AutoEnum): """Enumeration of deployment statuses.""" READY = AutoEnum.auto() NOT_READY = AutoEnum.auto() def in_kebab_case(self) -> str: return self.value.lower().replace("_", "-")
DeploymentStatus
python
gevent__gevent
src/gevent/tests/test__core_async.py
{ "start": 197, "end": 761 }
class ____(greentest.TestCase): def test(self): hub = gevent.get_hub() watcher = hub.loop.async_() # BWC for <3.7: This should still be an attribute assert hasattr(hub.loop, 'async') gevent.spawn_later(0.1, thread.start_new_thread, watcher.send, ()) start = time.time() with gevent.Timeout(1.0): # Large timeout for appveyor hub.wait(watcher) print('Watcher %r reacted after %.6f seconds' % (watcher, time.time() - start - 0.1)) if __name__ == '__main__': greentest.main()
Test
python
great-expectations__great_expectations
tests/actions/test_core_actions.py
{ "start": 13737, "end": 19852 }
class ____: @pytest.mark.unit def test_run(self, checkpoint_result: CheckpointResult): action = MicrosoftTeamsNotificationAction(name="my_action", teams_webhook="test") with mock.patch.object(Session, "post") as mock_post: action.run(checkpoint_result=checkpoint_result) mock_post.assert_called_once() body = mock_post.call_args.kwargs["json"]["attachments"][0]["content"]["body"] assert len(body) == 5 # Assert header assert "Success" in body[0]["columns"][1]["items"][0]["text"] # Assert first validation assert body[1]["text"] == "Validation Result (1 of 2) ✅" assert body[2]["facts"] == [ {"title": "Data Asset name: ", "value": "--"}, {"title": "Suite name: ", "value": SUITE_A}, { "title": "Run name: ", "value": "prod_20240401", }, { "title": "Summary:", "value": "*3* of *3* Expectations were met", }, ] # Assert second validation assert body[3]["text"] == "Validation Result (2 of 2) ✅" assert body[4]["facts"] == [ {"title": "Data Asset name: ", "value": "--"}, {"title": "Suite name: ", "value": SUITE_B}, { "title": "Run name: ", "value": "prod_20240402", }, { "title": "Summary:", "value": "*2* of *2* Expectations were met", }, ] @pytest.mark.unit def test_run_webhook_substitution(self, checkpoint_result: CheckpointResult): config_provider = project_manager.get_config_provider() assert isinstance(config_provider, mock.Mock) # noqa: TID251 # just using for the instance compare MS_TEAMS_WEBHOOK_VAR = "${ms_teams_webhook}" MS_TEAMS_WEBHOOK_VALUE = "https://my_org.webhook.office.com/webhookb2/abc" action = MicrosoftTeamsNotificationAction( name="my_action", teams_webhook=MS_TEAMS_WEBHOOK_VAR, ) config_from_uncommitted_config = {MS_TEAMS_WEBHOOK_VAR: MS_TEAMS_WEBHOOK_VALUE} config_provider.substitute_config.side_effect = lambda key: config_from_uncommitted_config[ key ] with mock.patch.object(Session, "post") as mock_send_notification: action.run(checkpoint_result=checkpoint_result) mock_send_notification.assert_called_once_with(url=MS_TEAMS_WEBHOOK_VALUE, json=mock.ANY) @pytest.mark.integration @pytest.mark.parametrize( "notify_on, expected_notification", [ ("all", True), ("success", True), ("failure", False), ("critical", False), ("warning", False), ("info", False), ], ) def test_run_integration_success_with_severity_filtering( self, notify_on: str, expected_notification: bool, checkpoint_result: CheckpointResult, ): """ Test that notify_on filtering works with successful checkpoint results. For this test, we are using a successful checkpoint result, so we expect a notification for the "all" and "success" cases only. """ # Necessary to retrieve config provider gx.get_context(mode="ephemeral") action = MicrosoftTeamsNotificationAction( name="test-action", teams_webhook="${GX_MS_TEAMS_WEBHOOK}", # Set as a secret in GH Actions notify_on=notify_on, ) result = action.run(checkpoint_result=checkpoint_result) if expected_notification: assert result == { "microsoft_teams_notification_result": "Microsoft Teams notification succeeded." } else: assert result == {"microsoft_teams_notification_result": None} @pytest.mark.integration @pytest.mark.parametrize( "notify_on, expected_notification", [ ("all", True), ("success", False), ("failure", True), ("critical", False), ("warning", True), ("info", False), ], ) def test_run_integration_failure_with_severity_filtering( self, notify_on: str, expected_notification: bool, checkpoint_result_with_failure: CheckpointResult, ): """ Test that notify_on filtering works with failed checkpoint results. For this test, we are using a failed checkpoint result with WARNING-level severity, so we expect a notification for the "all", "failure" and "warning" cases only. """ # Necessary to retrieve config provider gx.get_context(mode="ephemeral") action = MicrosoftTeamsNotificationAction( name="test-action", teams_webhook="${GX_MS_TEAMS_WEBHOOK}", # Set as a secret in GH Actions notify_on=notify_on, ) result = action.run(checkpoint_result=checkpoint_result_with_failure) if expected_notification: assert result == { "microsoft_teams_notification_result": "Microsoft Teams notification succeeded." } else: assert result == {"microsoft_teams_notification_result": None} @pytest.mark.integration def test_run_integration_failure( self, checkpoint_result: CheckpointResult, caplog, ): # Necessary to retrieve config provider gx.get_context(mode="ephemeral") action = MicrosoftTeamsNotificationAction( name="test-action", teams_webhook="https://fake.office.com/fake", ) with caplog.at_level(logging.WARNING): result = action.run(checkpoint_result=checkpoint_result) assert result == {"microsoft_teams_notification_result": None} assert caplog.records[-1].message.startswith("Failed to connect to Microsoft Teams webhook")
TestMicrosoftTeamsNotificationAction
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_combined01.py
{ "start": 315, "end": 1223 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_combined01.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart1 = workbook.add_chart({"type": "column"}) chart1.axis_ids = [84882560, 84884096] data = [ [2, 7, 3, 6, 2], [20, 25, 10, 10, 20], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) chart1.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart1.add_series({"values": "=Sheet1!$B$1:$B$5"}) worksheet.insert_chart("E9", chart1) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
mitmproxy__pdoc
test/testdata/misc.py
{ "start": 1960, "end": 2469 }
class ____(Base): def __init__(self): super().__init__() def foo(self): pass @classmethod def bar(cls): pass @staticmethod def baz(): pass @property def qux(self): return @cached_property def quux(self): return quuux: int = 42 # Testing that an attribute that is only annotated does not trigger a "submodule not found" warning. only_annotated: int # Testing that a private class in __all__ is displayed
Child
python
numba__numba
numba/cuda/tests/cudapy/test_caching.py
{ "start": 18393, "end": 19082 }
class ____(CUDATestCase): # For tests of miscellaneous CUDACodeLibrary behaviour that we wish to # explicitly check def test_cannot_serialize_unfinalized(self): # The CUDA codegen failes to import under the simulator, so we cannot # import it at the top level from numba.cuda.codegen import CUDACodeLibrary # Usually a CodeLibrary requires a real CodeGen, but since we don't # interact with it, anything will do codegen = object() name = 'library' cl = CUDACodeLibrary(codegen, name) with self.assertRaisesRegex(RuntimeError, 'Cannot pickle unfinalized'): cl._reduce_states()
TestCUDACodeLibrary
python
huggingface__transformers
src/transformers/models/clap/modeling_clap.py
{ "start": 76319, "end": 79346 }
class ____(ClapPreTrainedModel): config: ClapAudioConfig main_input_name = "input_features" input_modalities = "audio" def __init__(self, config: ClapAudioConfig): super().__init__(config) self.audio_model = ClapAudioModel(config) self.audio_projection = ClapProjectionLayer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.audio_model.audio_encoder.patch_embed.proj @can_return_tuple @auto_docstring def forward( self, input_features: Optional[torch.FloatTensor] = None, is_longer: Optional[torch.BoolTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ClapAudioModelOutput]: r""" is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*): Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance the features. Examples: ```python >>> from datasets import load_dataset >>> from transformers import ClapAudioModelWithProjection, ClapProcessor >>> model = ClapAudioModelWithProjection.from_pretrained("laion/clap-htsat-fused") >>> processor = ClapProcessor.from_pretrained("laion/clap-htsat-fused") >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> inputs = processor(audio=audio_sample, return_tensors="pt") >>> outputs = model(**inputs) >>> audio_embeds = outputs.audio_embeds ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) audio_outputs = self.audio_model( input_features=input_features, is_longer=is_longer, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) pooled_output = audio_outputs[1] if not return_dict else audio_outputs.pooler_output audio_embeds = self.audio_projection(pooled_output) return ClapAudioModelOutput( audio_embeds=audio_embeds, last_hidden_state=audio_outputs.last_hidden_state, attentions=audio_outputs.attentions, hidden_states=audio_outputs.hidden_states, ) __all__ = [ "ClapModel", "ClapPreTrainedModel", "ClapTextModel", "ClapTextModelWithProjection", "ClapAudioModel", "ClapAudioModelWithProjection", ]
ClapAudioModelWithProjection
python
openai__openai-python
src/openai/types/responses/response_computer_tool_call_param.py
{ "start": 4341, "end": 5082 }
class ____(TypedDict, total=False): id: Required[str] """The unique ID of the computer call.""" action: Required[Action] """A click action.""" call_id: Required[str] """An identifier used when responding to the tool call with output.""" pending_safety_checks: Required[Iterable[PendingSafetyCheck]] """The pending safety checks for the computer call.""" status: Required[Literal["in_progress", "completed", "incomplete"]] """The status of the item. One of `in_progress`, `completed`, or `incomplete`. Populated when items are returned via API. """ type: Required[Literal["computer_call"]] """The type of the computer call. Always `computer_call`."""
ResponseComputerToolCallParam
python
Textualize__textual
docs/examples/styles/grid.py
{ "start": 65, "end": 582 }
class ____(App): CSS_PATH = "grid.tcss" def compose(self): yield Static("Grid cell 1\n\nrow-span: 3;\ncolumn-span: 2;", id="static1") yield Static("Grid cell 2", id="static2") yield Static("Grid cell 3", id="static3") yield Static("Grid cell 4", id="static4") yield Static("Grid cell 5", id="static5") yield Static("Grid cell 6", id="static6") yield Static("Grid cell 7", id="static7") if __name__ == "__main__": app = GridApp() app.run()
GridApp
python
pypa__setuptools
setuptools/command/bdist_wheel.py
{ "start": 4027, "end": 22247 }
class ____(Command): description = "create a wheel distribution" supported_compressions = { "stored": ZIP_STORED, "deflated": ZIP_DEFLATED, } user_options = [ ("bdist-dir=", "b", "temporary directory for creating the distribution"), ( "plat-name=", "p", "platform name to embed in generated filenames " f"[default: {get_platform(None)}]", ), ( "keep-temp", "k", "keep the pseudo-installation tree around after " "creating the distribution archive", ), ("dist-dir=", "d", "directory to put final built distributions in"), ("skip-build", None, "skip rebuilding everything (for testing/debugging)"), ( "relative", None, "build the archive using relative paths [default: false]", ), ( "owner=", "u", "Owner name used when creating a tar file [default: current user]", ), ( "group=", "g", "Group name used when creating a tar file [default: current group]", ), ("universal", None, "*DEPRECATED* make a universal wheel [default: false]"), ( "compression=", None, f"zipfile compression (one of: {', '.join(supported_compressions)}) [default: 'deflated']", ), ( "python-tag=", None, f"Python implementation compatibility tag [default: '{python_tag()}']", ), ( "build-number=", None, "Build number for this particular version. " "As specified in PEP-0427, this must start with a digit. " "[default: None]", ), ( "py-limited-api=", None, "Python tag (cp32|cp33|cpNN) for abi3 wheel tag [default: false]", ), ( "dist-info-dir=", None, "directory where a pre-generated dist-info can be found (e.g. as a " "result of calling the PEP517 'prepare_metadata_for_build_wheel' " "method)", ), ] boolean_options = ["keep-temp", "skip-build", "relative", "universal"] def initialize_options(self) -> None: self.bdist_dir: str | None = None self.data_dir = "" self.plat_name: str | None = None self.plat_tag: str | None = None self.format = "zip" self.keep_temp = False self.dist_dir: str | None = None self.dist_info_dir = None self.egginfo_dir: str | None = None self.root_is_pure: bool | None = None self.skip_build = False self.relative = False self.owner = None self.group = None self.universal = False self.compression: str | int = "deflated" self.python_tag = python_tag() self.build_number: str | None = None self.py_limited_api: str | Literal[False] = False self.plat_name_supplied = False def finalize_options(self) -> None: if not self.bdist_dir: bdist_base = self.get_finalized_command("bdist").bdist_base self.bdist_dir = os.path.join(bdist_base, "wheel") if self.dist_info_dir is None: egg_info = cast(egg_info_cls, self.distribution.get_command_obj("egg_info")) egg_info.ensure_finalized() # needed for correct `wheel_dist_name` self.data_dir = self.wheel_dist_name + ".data" self.plat_name_supplied = bool(self.plat_name) need_options = ("dist_dir", "plat_name", "skip_build") self.set_undefined_options("bdist", *zip(need_options, need_options)) self.root_is_pure = not ( self.distribution.has_ext_modules() or self.distribution.has_c_libraries() ) self._validate_py_limited_api() # Support legacy [wheel] section for setting universal wheel = self.distribution.get_option_dict("wheel") if "universal" in wheel: # pragma: no cover # please don't define this in your global configs log.warn("The [wheel] section is deprecated. Use [bdist_wheel] instead.") val = wheel["universal"][1].strip() if val.lower() in ("1", "true", "yes"): self.universal = True if self.universal: SetuptoolsDeprecationWarning.emit( "bdist_wheel.universal is deprecated", """ With Python 2.7 end-of-life, support for building universal wheels (i.e., wheels that support both Python 2 and Python 3) is being obviated. Please discontinue using this option, or if you still need it, file an issue with pypa/setuptools describing your use case. """, due_date=(2025, 8, 30), # Introduced in 2024-08-30 ) if self.build_number is not None and not self.build_number[:1].isdigit(): raise ValueError("Build tag (build-number) must start with a digit.") def _validate_py_limited_api(self) -> None: if not self.py_limited_api: return if not re.match(PY_LIMITED_API_PATTERN, self.py_limited_api): raise ValueError(f"py-limited-api must match '{PY_LIMITED_API_PATTERN}'") if sysconfig.get_config_var("Py_GIL_DISABLED"): raise ValueError( f"`py_limited_api={self.py_limited_api!r}` not supported. " "`Py_LIMITED_API` is currently incompatible with " "`Py_GIL_DISABLED`. " "See https://github.com/python/cpython/issues/111506." ) @property def wheel_dist_name(self) -> str: """Return distribution full name with - replaced with _""" components = [ safer_name(self.distribution.get_name()), safer_version(self.distribution.get_version()), ] if self.build_number: components.append(self.build_number) return "-".join(components) def get_tag(self) -> tuple[str, str, str]: # bdist sets self.plat_name if unset, we should only use it for purepy # wheels if the user supplied it. if self.plat_name_supplied and self.plat_name: plat_name = self.plat_name elif self.root_is_pure: plat_name = "any" else: # macosx contains system version in platform name so need special handle if self.plat_name and not self.plat_name.startswith("macosx"): plat_name = self.plat_name else: # on macosx always limit the platform name to comply with any # c-extension modules in bdist_dir, since the user can specify # a higher MACOSX_DEPLOYMENT_TARGET via tools like CMake # on other platforms, and on macosx if there are no c-extension # modules, use the default platform name. plat_name = get_platform(self.bdist_dir) if _is_32bit_interpreter(): if plat_name in ("linux-x86_64", "linux_x86_64"): plat_name = "linux_i686" if plat_name in ("linux-aarch64", "linux_aarch64"): # TODO armv8l, packaging pull request #690 => this did not land # in pip/packaging yet plat_name = "linux_armv7l" plat_name = ( plat_name.lower().replace("-", "_").replace(".", "_").replace(" ", "_") ) if self.root_is_pure: if self.universal: impl = "py2.py3" else: impl = self.python_tag tag = (impl, "none", plat_name) else: impl_name = tags.interpreter_name() impl_ver = tags.interpreter_version() impl = impl_name + impl_ver # We don't work on CPython 3.1, 3.0. if self.py_limited_api and (impl_name + impl_ver).startswith("cp3"): impl = self.py_limited_api abi_tag = "abi3" else: abi_tag = str(get_abi_tag()).lower() tag = (impl, abi_tag, plat_name) # issue gh-374: allow overriding plat_name supported_tags = [ (t.interpreter, t.abi, plat_name) for t in tags.sys_tags() ] assert tag in supported_tags, ( f"would build wheel with unsupported tag {tag}" ) return tag def run(self): build_scripts = self.reinitialize_command("build_scripts") build_scripts.executable = "python" build_scripts.force = True build_ext = self.reinitialize_command("build_ext") build_ext.inplace = False if not self.skip_build: self.run_command("build") install = self.reinitialize_command("install", reinit_subcommands=True) install.root = self.bdist_dir install.compile = False install.skip_build = self.skip_build install.warn_dir = False # A wheel without setuptools scripts is more cross-platform. # Use the (undocumented) `no_ep` option to setuptools' # install_scripts command to avoid creating entry point scripts. install_scripts = self.reinitialize_command("install_scripts") install_scripts.no_ep = True # Use a custom scheme for the archive, because we have to decide # at installation time which scheme to use. for key in ("headers", "scripts", "data", "purelib", "platlib"): setattr(install, "install_" + key, os.path.join(self.data_dir, key)) basedir_observed = "" if os.name == "nt": # win32 barfs if any of these are ''; could be '.'? # (distutils.command.install:change_roots bug) basedir_observed = os.path.normpath(os.path.join(self.data_dir, "..")) self.install_libbase = self.install_lib = basedir_observed setattr( install, "install_purelib" if self.root_is_pure else "install_platlib", basedir_observed, ) log.info(f"installing to {self.bdist_dir}") self.run_command("install") impl_tag, abi_tag, plat_tag = self.get_tag() archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}" if not self.relative: archive_root = self.bdist_dir else: archive_root = os.path.join( self.bdist_dir, self._ensure_relative(install.install_base) ) self.set_undefined_options("install_egg_info", ("target", "egginfo_dir")) distinfo_dirname = ( f"{safer_name(self.distribution.get_name())}-" f"{safer_version(self.distribution.get_version())}.dist-info" ) distinfo_dir = os.path.join(self.bdist_dir, distinfo_dirname) if self.dist_info_dir: # Use the given dist-info directly. log.debug(f"reusing {self.dist_info_dir}") shutil.copytree(self.dist_info_dir, distinfo_dir) # Egg info is still generated, so remove it now to avoid it getting # copied into the wheel. _shutil.rmtree(self.egginfo_dir) else: # Convert the generated egg-info into dist-info. self.egg2dist(self.egginfo_dir, distinfo_dir) self.write_wheelfile(distinfo_dir) # Make the archive if not os.path.exists(self.dist_dir): os.makedirs(self.dist_dir) wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl") with WheelFile(wheel_path, "w", self._zip_compression()) as wf: wf.write_files(archive_root) # Add to 'Distribution.dist_files' so that the "upload" command works getattr(self.distribution, "dist_files", []).append(( "bdist_wheel", f"{sys.version_info.major}.{sys.version_info.minor}", wheel_path, )) if not self.keep_temp: log.info(f"removing {self.bdist_dir}") if not self.dry_run: _shutil.rmtree(self.bdist_dir) def write_wheelfile( self, wheelfile_base: str, generator: str = f"setuptools ({__version__})" ) -> None: from email.message import Message msg = Message() msg["Wheel-Version"] = "1.0" # of the spec msg["Generator"] = generator msg["Root-Is-Purelib"] = str(self.root_is_pure).lower() if self.build_number is not None: msg["Build"] = self.build_number # Doesn't work for bdist_wininst impl_tag, abi_tag, plat_tag = self.get_tag() for impl in impl_tag.split("."): for abi in abi_tag.split("."): for plat in plat_tag.split("."): msg["Tag"] = "-".join((impl, abi, plat)) wheelfile_path = os.path.join(wheelfile_base, "WHEEL") log.info(f"creating {wheelfile_path}") with open(wheelfile_path, "wb") as f: BytesGenerator(f, maxheaderlen=0).flatten(msg) def _ensure_relative(self, path: str) -> str: # copied from dir_util, deleted drive, path = os.path.splitdrive(path) if path[0:1] == os.sep: path = drive + path[1:] return path @property def license_paths(self) -> Iterable[str]: if setuptools_major_version >= 57: # Setuptools has resolved any patterns to actual file names return self.distribution.metadata.license_files or () files = set[str]() metadata = self.distribution.get_option_dict("metadata") if setuptools_major_version >= 42: # Setuptools recognizes the license_files option but does not do globbing patterns = cast(Sequence[str], self.distribution.metadata.license_files) else: # Prior to those, wheel is entirely responsible for handling license files if "license_files" in metadata: patterns = metadata["license_files"][1].split() else: patterns = () if "license_file" in metadata: warnings.warn( 'The "license_file" option is deprecated. Use "license_files" instead.', DeprecationWarning, stacklevel=2, ) files.add(metadata["license_file"][1]) if not files and not patterns and not isinstance(patterns, list): patterns = ("LICEN[CS]E*", "COPYING*", "NOTICE*", "AUTHORS*") for pattern in patterns: for path in iglob(pattern): if path.endswith("~"): log.debug( f'ignoring license file "{path}" as it looks like a backup' ) continue if path not in files and os.path.isfile(path): log.info( f'adding license file "{path}" (matched pattern "{pattern}")' ) files.add(path) return files def egg2dist(self, egginfo_path: str, distinfo_path: str) -> None: """Convert an .egg-info directory into a .dist-info directory""" def adios(p: str) -> None: """Appropriately delete directory, file or link.""" if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p): _shutil.rmtree(p) elif os.path.exists(p): os.unlink(p) adios(distinfo_path) if not os.path.exists(egginfo_path): # There is no egg-info. This is probably because the egg-info # file/directory is not named matching the distribution name used # to name the archive file. Check for this case and report # accordingly. import glob pat = os.path.join(os.path.dirname(egginfo_path), "*.egg-info") possible = glob.glob(pat) err = f"Egg metadata expected at {egginfo_path} but not found" if possible: alt = os.path.basename(possible[0]) err += f" ({alt} found - possible misnamed archive file?)" raise ValueError(err) # .egg-info is a directory pkginfo_path = os.path.join(egginfo_path, "PKG-INFO") # ignore common egg metadata that is useless to wheel shutil.copytree( egginfo_path, distinfo_path, ignore=lambda x, y: { "PKG-INFO", "requires.txt", "SOURCES.txt", "not-zip-safe", }, ) # delete dependency_links if it is only whitespace dependency_links_path = os.path.join(distinfo_path, "dependency_links.txt") with open(dependency_links_path, encoding="utf-8") as dependency_links_file: dependency_links = dependency_links_file.read().strip() if not dependency_links: adios(dependency_links_path) metadata_path = os.path.join(distinfo_path, "METADATA") shutil.copy(pkginfo_path, metadata_path) licenses_folder_path = os.path.join(distinfo_path, "licenses") for license_path in self.license_paths: safe_path = _safe_license_file(license_path) dist_info_license_path = os.path.join(licenses_folder_path, safe_path) os.makedirs(os.path.dirname(dist_info_license_path), exist_ok=True) shutil.copy(license_path, dist_info_license_path) adios(egginfo_path) def _zip_compression(self) -> int: if ( isinstance(self.compression, int) and self.compression in self.supported_compressions.values() ): return self.compression compression = self.supported_compressions.get(str(self.compression)) if compression is not None: return compression raise ValueError(f"Unsupported compression: {self.compression!r}")
bdist_wheel
python
tornadoweb__tornado
tornado/test/auth_test.py
{ "start": 22088, "end": 22326 }
class ____(RequestHandler): def get(self): assert self.get_argument("access_token") == "fake-access-token" # return a fake user self.finish({"name": "Foo", "email": "foo@example.com"})
GoogleOAuth2UserinfoHandler
python
apache__airflow
airflow-core/src/airflow/serialization/serialized_objects.py
{ "start": 159066, "end": 160182 }
class ____(AssetWatcher): """JSON serializable representation of an asset watcher.""" trigger: dict @cache def _has_kubernetes(attempt_import: bool = False) -> bool: """ Check if kubernetes libraries are available. :param attempt_import: If true, attempt to import kubernetes libraries if not already loaded. If False, only check if already in sys.modules (avoids expensive import). :return: True if kubernetes libraries are available, False otherwise. """ # Check if kubernetes is already imported before triggering expensive import if "kubernetes.client" not in sys.modules and not attempt_import: return False # Loading kube modules is expensive, so delay it until the last moment try: from kubernetes.client import models as k8s from airflow.providers.cncf.kubernetes.pod_generator import PodGenerator globals()["k8s"] = k8s globals()["PodGenerator"] = PodGenerator return True except ImportError: return False AssetT = TypeVar("AssetT", bound=BaseAsset, covariant=True)
SerializedAssetWatcher
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_B.py
{ "start": 3526, "end": 4726 }
class ____(Benchmark): r""" BiggsExp03 objective function. The BiggsExp03 [1]_ global optimization problem is a multimodal minimization problem defined as follows .. math:: \begin{matrix}\ f_{\text{BiggsExp03}}(x) = \sum_{i=1}^{10} (e^{-t_i x_1} - x_3e^{-t_i x_2} - y_i)^2\\ t_i = 0.1i\\ y_i = e^{-t_i} - 5e^{-10 t_i}\\ \end{matrix} with :math:`x_i \in [0, 20]` for :math:`i = 1, 2, 3`. *Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10, 5]` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=3): Benchmark.__init__(self, dimensions) self._bounds = list(zip([0] * 3, [20] * 3)) self.global_optimum = [[1., 10., 5.]] self.fglob = 0 def fun(self, x, *args): self.nfev += 1 t = arange(1., 11.) * 0.1 y = exp(-t) - 5 * exp(-10 * t) vec = (exp(-t * x[0]) - x[2] * exp(-t * x[1]) - y) ** 2 return sum(vec)
BiggsExp03
python
pallets__flask
src/flask/wrappers.py
{ "start": 8110, "end": 9406 }
class ____(ResponseBase): """The response object that is used by default in Flask. Works like the response object from Werkzeug but is set to have an HTML mimetype by default. Quite often you don't have to create this object yourself because :meth:`~flask.Flask.make_response` will take care of that for you. If you want to replace the response object used you can subclass this and set :attr:`~flask.Flask.response_class` to your subclass. .. versionchanged:: 1.0 JSON support is added to the response, like the request. This is useful when testing to get the test client response data as JSON. .. versionchanged:: 1.0 Added :attr:`max_cookie_size`. """ default_mimetype: str | None = "text/html" json_module = json autocorrect_location_header = False @property def max_cookie_size(self) -> int: # type: ignore """Read-only view of the :data:`MAX_COOKIE_SIZE` config key. See :attr:`~werkzeug.wrappers.Response.max_cookie_size` in Werkzeug's docs. """ if current_app: return current_app.config["MAX_COOKIE_SIZE"] # type: ignore[no-any-return] # return Werkzeug's default when not in an app context return super().max_cookie_size
Response
python
openai__openai-python
src/openai/_client.py
{ "start": 39867, "end": 44645 }
class ____: _client: AsyncOpenAI def __init__(self, client: AsyncOpenAI) -> None: self._client = client @cached_property def completions(self) -> completions.AsyncCompletionsWithStreamingResponse: from .resources.completions import AsyncCompletionsWithStreamingResponse return AsyncCompletionsWithStreamingResponse(self._client.completions) @cached_property def chat(self) -> chat.AsyncChatWithStreamingResponse: from .resources.chat import AsyncChatWithStreamingResponse return AsyncChatWithStreamingResponse(self._client.chat) @cached_property def embeddings(self) -> embeddings.AsyncEmbeddingsWithStreamingResponse: from .resources.embeddings import AsyncEmbeddingsWithStreamingResponse return AsyncEmbeddingsWithStreamingResponse(self._client.embeddings) @cached_property def files(self) -> files.AsyncFilesWithStreamingResponse: from .resources.files import AsyncFilesWithStreamingResponse return AsyncFilesWithStreamingResponse(self._client.files) @cached_property def images(self) -> images.AsyncImagesWithStreamingResponse: from .resources.images import AsyncImagesWithStreamingResponse return AsyncImagesWithStreamingResponse(self._client.images) @cached_property def audio(self) -> audio.AsyncAudioWithStreamingResponse: from .resources.audio import AsyncAudioWithStreamingResponse return AsyncAudioWithStreamingResponse(self._client.audio) @cached_property def moderations(self) -> moderations.AsyncModerationsWithStreamingResponse: from .resources.moderations import AsyncModerationsWithStreamingResponse return AsyncModerationsWithStreamingResponse(self._client.moderations) @cached_property def models(self) -> models.AsyncModelsWithStreamingResponse: from .resources.models import AsyncModelsWithStreamingResponse return AsyncModelsWithStreamingResponse(self._client.models) @cached_property def fine_tuning(self) -> fine_tuning.AsyncFineTuningWithStreamingResponse: from .resources.fine_tuning import AsyncFineTuningWithStreamingResponse return AsyncFineTuningWithStreamingResponse(self._client.fine_tuning) @cached_property def vector_stores(self) -> vector_stores.AsyncVectorStoresWithStreamingResponse: from .resources.vector_stores import AsyncVectorStoresWithStreamingResponse return AsyncVectorStoresWithStreamingResponse(self._client.vector_stores) @cached_property def beta(self) -> beta.AsyncBetaWithStreamingResponse: from .resources.beta import AsyncBetaWithStreamingResponse return AsyncBetaWithStreamingResponse(self._client.beta) @cached_property def batches(self) -> batches.AsyncBatchesWithStreamingResponse: from .resources.batches import AsyncBatchesWithStreamingResponse return AsyncBatchesWithStreamingResponse(self._client.batches) @cached_property def uploads(self) -> uploads.AsyncUploadsWithStreamingResponse: from .resources.uploads import AsyncUploadsWithStreamingResponse return AsyncUploadsWithStreamingResponse(self._client.uploads) @cached_property def responses(self) -> responses.AsyncResponsesWithStreamingResponse: from .resources.responses import AsyncResponsesWithStreamingResponse return AsyncResponsesWithStreamingResponse(self._client.responses) @cached_property def realtime(self) -> realtime.AsyncRealtimeWithStreamingResponse: from .resources.realtime import AsyncRealtimeWithStreamingResponse return AsyncRealtimeWithStreamingResponse(self._client.realtime) @cached_property def conversations(self) -> conversations.AsyncConversationsWithStreamingResponse: from .resources.conversations import AsyncConversationsWithStreamingResponse return AsyncConversationsWithStreamingResponse(self._client.conversations) @cached_property def evals(self) -> evals.AsyncEvalsWithStreamingResponse: from .resources.evals import AsyncEvalsWithStreamingResponse return AsyncEvalsWithStreamingResponse(self._client.evals) @cached_property def containers(self) -> containers.AsyncContainersWithStreamingResponse: from .resources.containers import AsyncContainersWithStreamingResponse return AsyncContainersWithStreamingResponse(self._client.containers) @cached_property def videos(self) -> videos.AsyncVideosWithStreamingResponse: from .resources.videos import AsyncVideosWithStreamingResponse return AsyncVideosWithStreamingResponse(self._client.videos) Client = OpenAI AsyncClient = AsyncOpenAI
AsyncOpenAIWithStreamedResponse
python
jmcnamara__XlsxWriter
xlsxwriter/test/styles/test_styles05.py
{ "start": 380, "end": 6362 }
class ____(unittest.TestCase): """ Test assembling a complete Styles file. """ def test_assemble_xml_file(self): """Tests for diagonal border styles.""" self.maxDiff = None fh = StringIO() style = Styles() style._set_filehandle(fh) workbook = Workbook() workbook.add_format({"left": 1}) workbook.add_format({"right": 1}) workbook.add_format({"top": 1}) workbook.add_format({"bottom": 1}) workbook.add_format({"diag_type": 1, "diag_border": 1}) workbook.add_format({"diag_type": 2, "diag_border": 1}) workbook.add_format({"diag_type": 3}) # Test default border. workbook._set_default_xf_indices() workbook._prepare_format_properties() style._set_style_properties( [ workbook.xf_formats, workbook.palette, workbook.font_count, workbook.num_formats, workbook.border_count, workbook.fill_count, workbook.custom_colors, workbook.dxf_formats, workbook.has_comments, ] ) style._assemble_xml_file() workbook.fileclosed = 1 exp = _xml_to_list( """ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"> <fonts count="1"> <font> <sz val="11"/> <color theme="1"/> <name val="Calibri"/> <family val="2"/> <scheme val="minor"/> </font> </fonts> <fills count="2"> <fill> <patternFill patternType="none"/> </fill> <fill> <patternFill patternType="gray125"/> </fill> </fills> <borders count="8"> <border> <left/> <right/> <top/> <bottom/> <diagonal/> </border> <border> <left style="thin"> <color auto="1"/> </left> <right/> <top/> <bottom/> <diagonal/> </border> <border> <left/> <right style="thin"> <color auto="1"/> </right> <top/> <bottom/> <diagonal/> </border> <border> <left/> <right/> <top style="thin"> <color auto="1"/> </top> <bottom/> <diagonal/> </border> <border> <left/> <right/> <top/> <bottom style="thin"> <color auto="1"/> </bottom> <diagonal/> </border> <border diagonalUp="1"> <left/> <right/> <top/> <bottom/> <diagonal style="thin"> <color auto="1"/> </diagonal> </border> <border diagonalDown="1"> <left/> <right/> <top/> <bottom/> <diagonal style="thin"> <color auto="1"/> </diagonal> </border> <border diagonalUp="1" diagonalDown="1"> <left/> <right/> <top/> <bottom/> <diagonal style="thin"> <color auto="1"/> </diagonal> </border> </borders> <cellStyleXfs count="1"> <xf numFmtId="0" fontId="0" fillId="0" borderId="0"/> </cellStyleXfs> <cellXfs count="8"> <xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/> <xf numFmtId="0" fontId="0" fillId="0" borderId="1" xfId="0" applyBorder="1"/> <xf numFmtId="0" fontId="0" fillId="0" borderId="2" xfId="0" applyBorder="1"/> <xf numFmtId="0" fontId="0" fillId="0" borderId="3" xfId="0" applyBorder="1"/> <xf numFmtId="0" fontId="0" fillId="0" borderId="4" xfId="0" applyBorder="1"/> <xf numFmtId="0" fontId="0" fillId="0" borderId="5" xfId="0" applyBorder="1"/> <xf numFmtId="0" fontId="0" fillId="0" borderId="6" xfId="0" applyBorder="1"/> <xf numFmtId="0" fontId="0" fillId="0" borderId="7" xfId="0" applyBorder="1"/> </cellXfs> <cellStyles count="1"> <cellStyle name="Normal" xfId="0" builtinId="0"/> </cellStyles> <dxfs count="0"/> <tableStyles count="0" defaultTableStyle="TableStyleMedium9" defaultPivotStyle="PivotStyleLight16"/> </styleSheet> """ ) got = _xml_to_list(fh.getvalue()) self.assertEqual(exp, got)
TestAssembleStyles
python
getsentry__sentry
src/sentry/api/serializers/models/event.py
{ "start": 18511, "end": 19912 }
class ____(SqlFormatEventSerializer): """ Adds release, user report, sdk updates, and perf issue info to the event. """ def get_attrs( self, item_list: Sequence[Event | GroupEvent], user: User | RpcUser | AnonymousUser, **kwargs, ): is_public = kwargs.pop("is_public", False) return super().get_attrs(item_list, user, is_public=is_public, **kwargs) def _get_sdk_updates(self, obj): return list(get_suggested_updates(SdkSetupState.from_event_json(obj.data))) def _get_resolved_with(self, obj: Event) -> list[str]: stacktraces = find_stacktraces_in_data(obj.data) frame_lists = [stacktrace.get_frames() for stacktrace in stacktraces] frame_data = [frame.get("data") for frame_list in frame_lists for frame in frame_list] unique_resolution_methods = { frame.get("resolved_with") for frame in frame_data if frame is not None } return list(unique_resolution_methods) def serialize(self, obj, attrs, user, **kwargs) -> IssueEventSerializerResponse: result = super().serialize(obj, attrs, user, **kwargs) return { **result, "userReport": self._get_user_report(user, obj), "sdkUpdates": self._get_sdk_updates(obj), "resolvedWith": self._get_resolved_with(obj), }
IssueEventSerializer
python
wandb__wandb
wandb/vendor/pygments/lexers/parsers.py
{ "start": 23008, "end": 23668 }
class ____(DelegatingLexer): """ `ANTLR`_ with ActionScript Target .. versionadded:: 1.1 """ name = 'ANTLR With ActionScript Target' aliases = ['antlr-as', 'antlr-actionscript'] filenames = ['*.G', '*.g'] def __init__(self, **options): from pygments.lexers.actionscript import ActionScriptLexer super(AntlrActionScriptLexer, self).__init__(ActionScriptLexer, AntlrLexer, **options) def analyse_text(text): return AntlrLexer.analyse_text(text) and \ re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M)
AntlrActionScriptLexer
python
sphinx-doc__sphinx
sphinx/extension.py
{ "start": 495, "end": 3192 }
class ____: def __init__(self, name: str, module: Any, **kwargs: Any) -> None: self.name = name self.module = module self.metadata: ExtensionMetadata = kwargs # type: ignore[assignment] self.version = kwargs.pop('version', 'unknown version') # The extension supports parallel read or not. The default value # is ``None``. It means the extension does not tell the status. # It will be warned on parallel reading. self.parallel_read_safe = kwargs.pop('parallel_read_safe', None) # The extension supports parallel write or not. The default value # is ``True``. Sphinx writes parallelly documents even if # the extension does not tell its status. self.parallel_write_safe = kwargs.pop('parallel_write_safe', True) def verify_needs_extensions(app: Sphinx, config: Config) -> None: """Check that extensions mentioned in :confval:`needs_extensions` satisfy the version requirement, and warn if an extension is not loaded. Warns if an extension in :confval:`needs_extension` is not loaded. :raises VersionRequirementError: if the version of an extension in :confval:`needs_extension` is unknown or older than the required version. """ if config.needs_extensions is None: return for extname, reqversion in config.needs_extensions.items(): extension = app.extensions.get(extname) if extension is None: logger.warning( __( 'The %s extension is required by needs_extensions settings, ' 'but it is not loaded.' ), extname, ) continue fulfilled = True if extension.version == 'unknown version': fulfilled = False else: try: if Version(reqversion) > Version(extension.version): fulfilled = False except InvalidVersion: if reqversion > extension.version: fulfilled = False if not fulfilled: raise VersionRequirementError( __( 'This project needs the extension %s at least in ' 'version %s and therefore cannot be built with ' 'the loaded version (%s).' ) % (extname, reqversion, extension.version) ) def setup(app: Sphinx) -> ExtensionMetadata: app.connect('config-inited', verify_needs_extensions, priority=800) return { 'version': 'builtin', 'parallel_read_safe': True, 'parallel_write_safe': True, }
Extension
python
mlflow__mlflow
mlflow/gateway/schemas/completions.py
{ "start": 613, "end": 1157 }
class ____(ResponseModel): prompt_tokens: int | None = None completion_tokens: int | None = None total_tokens: int | None = None _RESPONSE_PAYLOAD_EXTRA_SCHEMA = { "example": { "id": "cmpl-123", "object": "text_completion", "created": 1589478378, "model": "gpt-4", "choices": [ {"text": "Hello! I am an AI Assistant!", "index": 0, "finish_reason": "length"} ], "usage": {"prompt_tokens": 5, "completion_tokens": 7, "total_tokens": 12}, } }
CompletionsUsage
python
tensorflow__tensorflow
tensorflow/python/keras/initializers/initializers_v2.py
{ "start": 1285, "end": 3572 }
class ____(object): """Initializer base class: all Keras initializers inherit from this class. Initializers should implement a `__call__` method with the following signature: ```python def __call__(self, shape, dtype=None, **kwargs): # returns a tensor of shape `shape` and dtype `dtype` # containing values drawn from a distribution of your choice. ``` Optionally, you an also implement the method `get_config` and the class method `from_config` in order to support serialization -- just like with any Keras object. Here's a simple example: a random normal initializer. ```python import tensorflow as tf class ExampleRandomNormal(tf.keras.initializers.Initializer): def __init__(self, mean, stddev): self.mean = mean self.stddev = stddev def __call__(self, shape, dtype=None, **kwargs): return tf.random.normal( shape, mean=self.mean, stddev=self.stddev, dtype=dtype) def get_config(self): # To support serialization return {"mean": self.mean, "stddev": self.stddev} ``` Note that we don't have to implement `from_config` in the example above since the constructor arguments of the class the keys in the config returned by `get_config` are the same. In this case, the default `from_config` works fine. """ def __call__(self, shape, dtype=None, **kwargs): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. **kwargs: Additional keyword arguments. """ raise NotImplementedError def get_config(self): """Returns the configuration of the initializer as a JSON-serializable dict. Returns: A JSON-serializable Python dict. """ return {} @classmethod def from_config(cls, config): """Instantiates an initializer from a configuration dictionary. Example: ```python initializer = RandomUniform(-1, 1) config = initializer.get_config() initializer = RandomUniform.from_config(config) ``` Args: config: A Python dictionary, the output of `get_config`. Returns: A `tf.keras.initializers.Initializer` instance. """ config.pop('dtype', None) return cls(**config)
Initializer
python
huggingface__transformers
src/transformers/models/maskformer/modeling_maskformer_swin.py
{ "start": 18614, "end": 19467 }
class ____(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() self.self = MaskFormerSwinSelfAttention(config, dim, num_heads, window_size) self.output = MaskFormerSwinSelfOutput(config, dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.swin.modeling_swin.SwinIntermediate with Swin->MaskFormerSwin
MaskFormerSwinAttention
python
pytorch__pytorch
torch/distributed/checkpoint/planner.py
{ "start": 1835, "end": 2243 }
class ____: # Read Item type: LoadItemType # Index into the state_dict dest_index: MetadataIndex # Offsets into destination tensor dest_offsets: torch.Size # Index into the checkpoint storage_index: MetadataIndex # Offset into the checkpoint data storage_offsets: torch.Size # Size of the hypercube to copy lengths: torch.Size @dataclass(frozen=True)
ReadItem
python
google__jax
tests/stack_test.py
{ "start": 773, "end": 1539 }
class ____(jtu.JaxTestCase): def test_empty(self): stack = Stack.create(7, jnp.zeros((), jnp.int32)) self.assertTrue(stack.empty()) def test_pushes_and_pops(self): stack = Stack.create(7, jnp.zeros((), jnp.int32)) stack = stack.push(jnp.int32(7)) self.assertFalse(stack.empty()) stack = stack.push(jnp.int32(8)) self.assertFalse(stack.empty()) x, stack = stack.pop() self.assertFalse(stack.empty()) self.assertEqual(8, x) stack = stack.push(jnp.int32(9)) x, stack = stack.pop() self.assertFalse(stack.empty()) self.assertEqual(9, x) x, stack = stack.pop() self.assertTrue(stack.empty()) self.assertEqual(7, x) if __name__ == '__main__': absltest.main(testLoader=jtu.JaxTestLoader())
StackTest
python
getsentry__sentry
src/sentry/workflow_engine/handlers/condition/event_frequency_query_handlers.py
{ "start": 14864, "end": 18173 }
class ____(BaseEventFrequencyQueryHandler): intervals: ClassVar[dict[str, tuple[str, timedelta]]] = PERCENT_INTERVALS def get_session_count( self, project_id: int, environment_id: int | None, start: datetime, end: datetime ) -> int: cache_key = f"r.c.spc:{project_id}-{environment_id}" session_count_last_hour = cache.get(cache_key) if session_count_last_hour is None: with options_override({"consistent": False}): session_count_last_hour = release_health.backend.get_project_sessions_count( project_id=project_id, environment_id=environment_id, rollup=60, start=end - timedelta(minutes=60), end=end, ) cache.set(cache_key, session_count_last_hour, 600) return session_count_last_hour def get_session_interval(self, session_count: int, duration: timedelta) -> int | None: if session_count >= MIN_SESSIONS_TO_FIRE: interval_in_minutes = duration.total_seconds() // 60 return int(session_count / (60 / interval_in_minutes)) return None def batch_query( self, groups: list[GroupValues], start: datetime, end: datetime, environment_id: int | None, filters: list[QueryFilter] | None = None, ) -> QueryResult: batch_percents: QueryResult = {} category_group_ids = self.get_group_ids_by_category(groups) project_id = self.get_value_from_groups(groups, "project_id") if not project_id: return {group["id"]: 0 for group in groups} session_count_last_hour = self.get_session_count(project_id, environment_id, start, end) duration = end - start # recalculated duration avg_sessions_in_interval = self.get_session_interval(session_count_last_hour, duration) if not avg_sessions_in_interval: return {group["id"]: 0 for group in groups} organization_id = self.get_value_from_groups(groups, "project__organization_id") if not organization_id: return batch_percents for category, issue_ids in category_group_ids.items(): # We do not have sessions for non-error issue types if category != GroupCategory.ERROR: for group_id in issue_ids: batch_percents[group_id] = 0 continue model = get_issue_tsdb_group_model(category) # InvalidFilter should not be raised for errors results = self.get_chunked_result( tsdb_function=tsdb.backend.get_sums, model=model, group_ids=issue_ids, organization_id=organization_id, start=start, end=end, environment_id=environment_id, referrer_suffix="wf_batch_alert_event_frequency_percent", filters=filters, group_on_time=False, ) for group_id, count in results.items(): percent: float = 100 * round(count / avg_sessions_in_interval, 4) batch_percents[group_id] = percent return batch_percents
PercentSessionsQueryHandler
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/unnecessaryContains1.py
{ "start": 1101, "end": 1315 }
class ____(Enum): a = "a" b = "b" c = "c" @property def is_ab(self): return self in (Enum1.a, Enum1.b) @property def is_c(self): return self not in (Enum1.a, Enum1.b)
Enum1
python
viewflow__viewflow
viewflow/workflow/exceptions.py
{ "start": 81, "end": 142 }
class ____(Exception): """Flow lock failed."""
FlowLockFailed
python
huggingface__transformers
src/transformers/models/sam/modeling_sam.py
{ "start": 18021, "end": 23339 }
class ____(nn.Module): def __init__(self, config: SamMaskDecoderConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_multimask_outputs = config.num_multimask_outputs self.num_mask_tokens = config.num_multimask_outputs + 1 self.iou_token = nn.Embedding(1, self.hidden_size) self.mask_tokens = nn.Embedding(self.num_mask_tokens, self.hidden_size) self.transformer = SamTwoWayTransformer(config) # should we create a new class for this? self.upscale_conv1 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 4, kernel_size=2, stride=2) self.upscale_conv2 = nn.ConvTranspose2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=2, stride=2) self.upscale_layer_norm = SamLayerNorm(self.hidden_size // 4, data_format="channels_first") self.activation = nn.GELU() mlps_list = [] for _ in range(self.num_mask_tokens): mlps_list += [SamFeedForward(self.hidden_size, self.hidden_size, self.hidden_size // 8, 3)] self.output_hypernetworks_mlps = nn.ModuleList(mlps_list) self.iou_prediction_head = SamFeedForward( self.hidden_size, config.iou_head_hidden_dim, self.num_mask_tokens, config.iou_head_depth ) def forward( self, image_embeddings: torch.Tensor, image_positional_embeddings: torch.Tensor, sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor, multimask_output: bool, attention_similarity: Optional[torch.Tensor] = None, target_embedding: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ Predict masks given image and prompt embeddings. Args: image_embeddings (`torch.Tensor`): the embeddings from the image encoder image_positional_embedding (`torch.Tensor`): positional encoding with the shape of image_embeddings sparse_prompt_embeddings (`torch.Tensor`): The embeddings of the points and boxes dense_prompt_embeddings (`torch.Tensor`): the embeddings of the mask inputs multimask_output (bool): Whether to return multiple masks or a single mask. """ batch_size, num_channels, height, width = image_embeddings.shape point_batch_size = sparse_prompt_embeddings.shape[1] if sparse_prompt_embeddings is not None else 1 # Concatenate output tokens output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1) if sparse_prompt_embeddings is not None: tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=2) else: tokens = output_tokens point_embeddings = tokens.to(self.iou_token.weight.dtype) # Expand per-image data in batch direction to be per-point image_embeddings = image_embeddings + dense_prompt_embeddings image_embeddings = image_embeddings.repeat_interleave(point_batch_size, 0) image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0) # Run the transformer, image_positional_embedding are consumed point_embedding, image_embeddings = self.transformer( point_embeddings=point_embeddings, image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, attention_similarity=attention_similarity, target_embedding=target_embedding, ) iou_token_out = point_embedding[:, :, 0, :] mask_tokens_out = point_embedding[:, :, 1 : (1 + self.num_mask_tokens), :] # Upscale mask embeddings and predict masks using the mask tokens image_embeddings = image_embeddings.transpose(2, 3).reshape( batch_size * point_batch_size, num_channels, height, width ) upscaled_embedding = self.upscale_conv1(image_embeddings) upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding)) upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding)) hyper_in_list = [] for i in range(self.num_mask_tokens): current_mlp = self.output_hypernetworks_mlps[i] hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])] hyper_in = torch.stack(hyper_in_list, dim=2) _, num_channels, height, width = upscaled_embedding.shape upscaled_embedding = upscaled_embedding.reshape(batch_size, point_batch_size, num_channels, height * width) masks = (hyper_in @ upscaled_embedding).reshape(batch_size, point_batch_size, -1, height, width) # Generate mask quality predictions iou_pred = self.iou_prediction_head(iou_token_out) # Select the correct mask or masks for output if multimask_output: mask_slice = slice(1, None) else: mask_slice = slice(0, 1) masks = masks[:, :, mask_slice, :, :] iou_pred = iou_pred[:, :, mask_slice] return masks, iou_pred
SamMaskDecoder
python
tensorflow__tensorflow
tensorflow/python/types/distribute.py
{ "start": 6718, "end": 7296 }
class ____(DistributedValues): """Holds a distributed value: a map from replica id to unsynchronized values. `PerReplica` values exist on the worker devices, with a different value for each replica. They can be produced many ways, often by iterating through a distributed dataset returned by `tf.distribute.Strategy.experimental_distribute_dataset` and `tf.distribute.Strategy.distribute_datasets_from_function`. They are also the typical result returned by `tf.distribute.Strategy.run`. """ @tf_export("types.experimental.distributed.Mirrored", v1=[])
PerReplica
python
charliermarsh__ruff
crates/ty_python_semantic/resources/corpus/sub_exprs_not_found_in_evaluate_expr_compare.py
{ "start": 134, "end": 284 }
class ____: def f(self, other: "C"): if self.a > other.b or self.b: return False if self: return True C().a
C
python
huggingface__transformers
src/transformers/models/hiera/modeling_hiera.py
{ "start": 40072, "end": 44352 }
class ____(nn.Module): def __init__(self, config: HieraConfig): super().__init__() num_features = int(config.embed_dim * config.embed_dim_multiplier ** (len(config.depths) - 1)) tokens_spatial_shape = [i // s for i, s in zip(config.image_size, config.patch_stride)] self.tokens_spatial_shape_final = [ i // s ** (config.num_query_pool) for i, s in zip(tokens_spatial_shape, config.query_stride) ] self.mask_unit_spatial_shape_final = [ i // s ** (config.num_query_pool) for i, s in zip(config.masked_unit_size, config.query_stride) ] self.decoder_embeddings = nn.Linear(num_features, config.decoder_hidden_size) self.mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size)) self.decoder_position_embeddings = nn.Parameter( torch.zeros(1, math.prod(self.tokens_spatial_shape_final), config.decoder_hidden_size) ) self.decoder_block = HieraStage( config=config, hidden_size=config.decoder_hidden_size, hidden_size_output=config.decoder_hidden_size, num_heads=config.decoder_num_heads, depth=config.decoder_depth, use_mask_unit_attn=False, drop_path=[0.0] * config.decoder_depth, query_stride=[1] * config.decoder_depth, window_size=0, ) self.decoder_norm = nn.LayerNorm(config.decoder_hidden_size, eps=config.layer_norm_eps) # patch stride of prediction self.pred_stride = config.patch_stride[-1] * (config.query_stride[-1] ** config.num_query_pool) pred_dim = (self.pred_stride ** len(config.query_stride)) * config.num_channels self.decoder_pred = nn.Linear(config.decoder_hidden_size, pred_dim) def forward( self, encoder_hidden_states: torch.Tensor, bool_masked_pos: torch.BoolTensor, output_attentions: bool = False, ) -> tuple[torch.Tensor, torch.BoolTensor]: # Embed tokens hidden_states = self.decoder_embeddings(encoder_hidden_states) # Combine visible and bool_masked_pos tokens # hidden_states : [batch_size, num_mask_units_visible, *mask_unit_spatial_shape_final, decoder_hidden_size] # bool_masked_pos: [batch_size, num_mask_units] mask_unit_height, mask_unit_width, decoder_hidden_size = hidden_states.shape[2:] batch_size, num_mask_units = bool_masked_pos.shape decoder_hidden_states = torch.zeros( batch_size, num_mask_units, mask_unit_height, mask_unit_width, decoder_hidden_size, device=hidden_states.device, dtype=hidden_states.dtype, ) mask_tokens = self.mask_token.view(1, 1, 1, 1, -1) bool_masked_pos = bool_masked_pos.reshape(batch_size, num_mask_units, 1, 1, 1) bool_masked_pos = bool_masked_pos.expand(-1, -1, mask_unit_height, mask_unit_width, decoder_hidden_size) decoder_hidden_states[bool_masked_pos] = hidden_states.flatten() decoder_hidden_states = ( 1 - bool_masked_pos.float() ) * mask_tokens + bool_masked_pos.float() * decoder_hidden_states # Get back spatial order hidden_states = undo_windowing( decoder_hidden_states, self.tokens_spatial_shape_final, self.mask_unit_spatial_shape_final, ) bool_masked_pos = undo_windowing( bool_masked_pos[..., 0:1], self.tokens_spatial_shape_final, self.mask_unit_spatial_shape_final, ) # Flatten hidden_states = hidden_states.reshape(hidden_states.shape[0], -1, hidden_states.shape[-1]) bool_masked_pos = bool_masked_pos.view(hidden_states.shape[0], -1) # Add pos embed hidden_states = hidden_states + self.decoder_position_embeddings # Apply decoder blocks hidden_states, attn_weights = self.decoder_block(hidden_states, output_attentions=output_attentions) hidden_states = self.decoder_norm(hidden_states) # Predictor projection hidden_states = self.decoder_pred(hidden_states) return hidden_states, bool_masked_pos
HieraDecoder
python
keras-team__keras
keras/src/ops/linalg.py
{ "start": 2878, "end": 3579 }
class ____(Operation): def call(self, x): return _det(x) def compute_output_spec(self, x): _assert_2d(x) _assert_square(x) return KerasTensor(x.shape[:-2], x.dtype) @keras_export(["keras.ops.det", "keras.ops.linalg.det"]) def det(x): """Computes the determinant of a square tensor. Args: x: Input tensor of shape `(..., M, M)`. Returns: A tensor of shape `(...,)` representing the determinant of `x`. """ if any_symbolic_tensors((x,)): return Det().symbolic_call(x) return _det(x) def _det(x): x = backend.convert_to_tensor(x) _assert_2d(x) _assert_square(x) return backend.linalg.det(x)
Det
python
spack__spack
lib/spack/spack/mirrors/utils.py
{ "start": 4824, "end": 5864 }
class ____: def __init__(self, spec): self.present = Counter() self.new = Counter() self.errors = Counter() self.spec = spec self.added_resources = set() self.existing_resources = set() def finalize(self): if self.spec: if self.added_resources: self.new[self.spec] = len(self.added_resources) if self.existing_resources: self.present[self.spec] = len(self.existing_resources) self.added_resources = set() self.existing_resources = set() def already_existed(self, resource): # If an error occurred after caching a subset of a spec's # resources, a secondary attempt may consider them already added if resource not in self.added_resources: self.existing_resources.add(resource) def added(self, resource): self.added_resources.add(resource) def error(self): if self.spec: self.errors[self.spec] += 1
MirrorStatsForOneSpec
python
apache__airflow
providers/telegram/src/airflow/providers/telegram/hooks/telegram.py
{ "start": 1028, "end": 6935 }
class ____(BaseHook): """ This hook allows you to post messages to Telegram using the telegram python-telegram-bot library. The library can be found here: https://github.com/python-telegram-bot/python-telegram-bot It accepts both telegram bot API token directly or connection that has telegram bot API token. If both supplied, token parameter will be given precedence, otherwise 'password' field in the connection from telegram_conn_id will be used. chat_id can also be provided in the connection using 'host' field in connection. Following is the details of a telegram_connection: name: 'telegram-connection-name' conn_type: 'telegram' password: 'TELEGRAM_TOKEN' (optional) host: 'chat_id' (optional) Examples: .. code-block:: python # Create hook telegram_hook = TelegramHook(telegram_conn_id="telegram_default") telegram_hook = TelegramHook() # will use telegram_default # or telegram_hook = TelegramHook(telegram_conn_id='telegram_default', chat_id='-1xxx') # or telegram_hook = TelegramHook(token='xxx:xxx', chat_id='-1xxx') # Call method from telegram bot client telegram_hook.send_message(None, {"text": "message", "chat_id": "-1xxx"}) # or telegram_hook.send_message(None', {"text": "message"}) :param telegram_conn_id: connection that optionally has Telegram API token in the password field :param token: optional telegram API token :param chat_id: optional chat_id of the telegram chat/channel/group """ conn_name_attr = "telegram_conn_id" default_conn_name = "telegram_default" conn_type = "telegram" hook_name = "Telegram" def __init__( self, telegram_conn_id: str | None = default_conn_name, token: str | None = None, chat_id: str | None = None, ) -> None: super().__init__() self.token = self.__get_token(token, telegram_conn_id) self.chat_id = self.__get_chat_id(chat_id, telegram_conn_id) self.connection = self.get_conn() @classmethod def get_ui_field_behaviour(cls) -> dict[str, Any]: """Return custom field behaviour.""" return { "hidden_fields": ["schema", "extra", "login", "port", "extra"], "relabeling": {}, } def get_conn(self) -> telegram.Bot: """ Return the telegram bot client. :return: telegram bot client """ return telegram.Bot(self.token) def __get_token(self, token: str | None, telegram_conn_id: str | None) -> str: """ Return the telegram API token. :param token: telegram API token :param telegram_conn_id: telegram connection name :return: telegram API token """ if token is not None: return token if telegram_conn_id is not None: conn = self.get_connection(telegram_conn_id) if not conn.password: raise AirflowException("Missing token(password) in Telegram connection") return conn.password raise AirflowException("Cannot get token: No valid Telegram connection supplied.") def __get_chat_id(self, chat_id: str | None, telegram_conn_id: str | None) -> str | None: """ Return the telegram chat ID for a chat/channel/group. :param chat_id: optional chat ID :param telegram_conn_id: telegram connection name :return: telegram chat ID """ if chat_id is not None: return chat_id if telegram_conn_id is not None: conn = self.get_connection(telegram_conn_id) return conn.host return None @tenacity.retry( retry=tenacity.retry_if_exception_type(telegram.error.TelegramError), stop=tenacity.stop_after_attempt(5), wait=tenacity.wait_fixed(1), ) def send_message(self, api_params: dict) -> None: """ Send the message to a telegram channel or chat. :param api_params: params for telegram_instance.send_message. It can also be used to override chat_id """ kwargs: dict[str, Any] = { "parse_mode": telegram.constants.ParseMode.HTML, "disable_web_page_preview": True, } if self.chat_id is not None: kwargs["chat_id"] = self.chat_id kwargs.update(api_params) if "text" not in kwargs or kwargs["text"] is None: raise AirflowException("'text' must be provided for telegram message") if kwargs.get("chat_id") is None: raise AirflowException("'chat_id' must be provided for telegram message") response = asyncio.run(self.connection.send_message(**kwargs)) self.log.debug(response) @tenacity.retry( retry=tenacity.retry_if_exception_type(telegram.error.TelegramError), stop=tenacity.stop_after_attempt(5), wait=tenacity.wait_fixed(1), ) def send_file(self, api_params: dict) -> None: """ Send the file to a telegram channel or chat. :param api_params: params for telegram_instance.send_document. It can also be used to override chat_id """ kwargs: dict[str, Any] = {} if self.chat_id is not None: kwargs["chat_id"] = self.chat_id kwargs.update(api_params) if "file" not in kwargs or kwargs["file"] is None: raise AirflowException( "'file' parameter must be provided for sending a Telegram document message" ) kwargs["document"] = kwargs.pop("file") # rename 'file' to 'document' if kwargs.get("chat_id") is None: raise AirflowException("'chat_id' must be provided for telegram document message") response = asyncio.run(self.connection.send_document(**kwargs)) self.log.debug(response)
TelegramHook
python
numba__numba
numba/cuda/tests/cudapy/test_extending.py
{ "start": 2712, "end": 4118 }
class ____(CUDATestCase): def test_attributes(self): @cuda.jit def f(r, x): iv = Interval(x[0], x[1]) r[0] = iv.lo r[1] = iv.hi x = np.asarray((1.5, 2.5)) r = np.zeros_like(x) f[1, 1](r, x) np.testing.assert_equal(r, x) def test_property(self): @cuda.jit def f(r, x): iv = Interval(x[0], x[1]) r[0] = iv.width x = np.asarray((1.5, 2.5)) r = np.zeros(1) f[1, 1](r, x) np.testing.assert_allclose(r[0], x[1] - x[0]) def test_extension_type_as_arg(self): @cuda.jit def f(r, x): iv = Interval(x[0], x[1]) r[0] = interval_width(iv) x = np.asarray((1.5, 2.5)) r = np.zeros(1) f[1, 1](r, x) np.testing.assert_allclose(r[0], x[1] - x[0]) def test_extension_type_as_retvalue(self): @cuda.jit def f(r, x): iv1 = Interval(x[0], x[1]) iv2 = Interval(x[2], x[3]) iv_sum = sum_intervals(iv1, iv2) r[0] = iv_sum.lo r[1] = iv_sum.hi x = np.asarray((1.5, 2.5, 3.0, 4.0)) r = np.zeros(2) f[1, 1](r, x) expected = np.asarray((x[0] + x[2], x[1] + x[3])) np.testing.assert_allclose(r, expected) if __name__ == '__main__': unittest.main()
TestExtending
python
gevent__gevent
src/gevent/libuv/watcher.py
{ "start": 23847, "end": 26776 }
class ____(_base.TimerMixin, watcher): _watcher_callback_name = '_gevent_timer_callback0' # In libuv, timer callbacks continue running while any timer is # expired, including newly added timers. Newly added non-zero # timers (especially of small duration) can be seen to be expired # if the loop time is updated while we are in a timer callback. # This can lead to us being stuck running timers for a terribly # long time, which is not good. So default to not updating the # time. # Also, newly-added timers of 0 duration can *also* stall the # loop, because they'll be seen to be expired immediately. # Updating the time can prevent that, *if* there was already a # timer for a longer duration scheduled. # To mitigate the above problems, our loop implementation turns # zero duration timers into check watchers instead using OneShotCheck. # This ensures the loop cycles. Of course, the 'again' method does # nothing on them and doesn't exist. In practice that's not an issue. _again = False def _watcher_ffi_init(self, args): self._watcher_init(self.loop.ptr, self._watcher) self._after, self._repeat = args if self._after and self._after < 0.001: import warnings # XXX: The stack level is hard to determine, could be getting here # through a number of different ways. warnings.warn("libuv only supports millisecond timer resolution; " "all times less will be set to 1 ms", stacklevel=6) # The alternative is to effectively pass in int(0.1) == 0, which # means no sleep at all, which leads to excessive wakeups self._after = 0.001 if self._repeat and self._repeat < 0.001: import warnings warnings.warn("libuv only supports millisecond timer resolution; " "all times less will be set to 1 ms", stacklevel=6) self._repeat = 0.001 def _watcher_ffi_start(self): if self._again: libuv.uv_timer_again(self._watcher) else: try: self._watcher_start(self._watcher, self._watcher_callback, int(self._after * 1000), int(self._repeat * 1000)) except ValueError: # in case of non-ints in _after/_repeat raise TypeError() def again(self, callback, *args, **kw): if not self.active: # If we've never been started, this is the same as starting us. # libuv makes the distinction, libev doesn't. self.start(callback, *args, **kw) return self._again = True try: self.start(callback, *args, **kw) finally: del self._again
timer
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 999533, "end": 999905 }
class ____(sgqlc.types.Type): """An edge in a connection.""" __schema__ = github_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") """A cursor for use in pagination.""" node = sgqlc.types.Field("Team", graphql_name="node") """The item at the end of the edge."""
TeamEdge
python
pexpect__pexpect
examples/chess.py
{ "start": 1387, "end": 4500 }
class ____: def __init__(self, engine = "/usr/local/bin/gnuchess -a -h 1"): self.child = pexpect.spawn (engine) self.term = ANSI.ANSI () self.child.expect ('Chess') if self.child.after != 'Chess': raise IOError('incompatible chess program') self.term.process_list (self.before) self.term.process_list (self.after) self.last_computer_move = '' def read_until_cursor (self, r,c): while 1: self.child.read(1, 60) self.term.process (c) if self.term.cur_r == r and self.term.cur_c == c: return 1 def do_first_move (self, move): self.child.expect ('Your move is') self.child.sendline (move) self.term.process_list (self.before) self.term.process_list (self.after) return move def do_move (self, move): self.read_until_cursor (19,60) self.child.sendline (move) return move def get_first_computer_move (self): self.child.expect ('My move is') self.child.expect (REGEX_MOVE) return self.child.after def get_computer_move (self): print('Here') i = self.child.expect ([r'\[17;59H', r'\[17;58H']) print(i) if i == 0: self.child.expect (REGEX_MOVE) if len(self.child.after) < 4: self.child.after = self.child.after + self.last_computer_move[3] if i == 1: self.child.expect (REGEX_MOVE_PART) self.child.after = self.last_computer_move[0] + self.child.after print('', self.child.after) self.last_computer_move = self.child.after return self.child.after def switch (self): self.child.sendline ('switch') def set_depth (self, depth): self.child.sendline ('depth') self.child.expect ('depth=') self.child.sendline ('%d' % depth) def quit(self): self.child.sendline ('quit') import sys print('Starting...') white = Chess() white.child.echo = 1 white.child.expect ('Your move is') white.set_depth(2) white.switch() move_white = white.get_first_computer_move() print('first move white:', move_white) white.do_move ('e7e5') move_white = white.get_computer_move() print('move white:', move_white) white.do_move ('f8c5') move_white = white.get_computer_move() print('move white:', move_white) white.do_move ('b8a6') move_white = white.get_computer_move() print('move white:', move_white) sys.exit(1) black = Chess() white = Chess() white.child.expect ('Your move is') white.switch() move_white = white.get_first_computer_move() print('first move white:', move_white) black.do_first_move (move_white) move_black = black.get_first_computer_move() print('first move black:', move_black) white.do_move (move_black) done = 0 while not done: move_white = white.get_computer_move() print('move white:', move_white) black.do_move (move_white) move_black = black.get_computer_move() print('move black:', move_black) white.do_move (move_black) print('tail of loop') g.quit()
Chess
python
sqlalchemy__sqlalchemy
test/ext/test_associationproxy.py
{ "start": 2370, "end": 2407 }
class ____(set): pass
SetCollection
python
kamyu104__LeetCode-Solutions
Python/left-and-right-sum-differences.py
{ "start": 42, "end": 371 }
class ____(object): def leftRigthDifference(self, nums): """ :type nums: List[int] :rtype: List[int] """ total = sum(nums) result = [] curr = 0 for x in nums: curr += x result.append(abs((curr-x)-(total-curr))) return result
Solution
python
huggingface__transformers
src/transformers/models/blt/modeling_blt.py
{ "start": 52382, "end": 57942 }
class ____(BltPreTrainedModel, GenerationMixin): config: BltConfig _can_compile_fullgraph = False base_model_prefix = "model" _tied_weights_keys = {"model.local_encoder.embed_tokens.weight": "lm_head.weight"} def __init__(self, config: BltConfig): super().__init__(config.get_text_config()) self.text_config = config.get_text_config() self.vocab_size = config.vocab_size self.model = BltModel(config) self.lm_head = nn.Linear(config.decoder_config.hidden_size, config.vocab_size, bias=False) self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, cross_attention_states: Optional[torch.LongTensor] = None, # Keep for compatibility cross_attention_mask: Optional[torch.LongTensor] = None, full_text_row_masked_out_mask: Optional[tuple[torch.Tensor, torch.Tensor]] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, CausalLMOutputWithPast]: r""" cross_attention_states (`torch.FloatTensor`, *optional*): Output of the vision model, used for cross-attention. This tensor contains the processed image features that the language model will attend to. cross_attention_mask (`torch.Tensor` of shape `(batch_size, seq_length, max_num_images, max_num_tiles)`, *optional*): Cross-attention mask to control the interaction between text tokens and image tiles. This 4D tensor defines which image tiles each text token should attend to. For each text token (in seq_length): - 1 indicates the token **should attend** to the corresponding image tile - 0 indicates the token **should not attend** to the corresponding image tile full_text_row_masked_out_mask (`tuple[torch.Tensor, torch.Tensor]`, *optional*): A tuple containing two tensors that mask out rows in the cross-attention mechanism: - The first tensor has shape `(batch_size, 1, seq_length, 1)` and contains values of 0 or 1. A value of 0 indicates that the corresponding text token's entire row in the cross-attention matrix should be masked out (all image tokens ignored). - The second tensor has the same shape and is used internally to apply the masking during the forward pass of cross-attention layers. This mask is derived from the cross_attention_mask and is used to handle cases where a text token should not attend to any image token. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, BltForCausalLM >>> model = BltForCausalLM.from_pretrained("Llama-3.2-11B-Vision") >>> tokenizer = AutoTokenizer.from_pretrained("Llama-3.2-11B-Vision") >>> prompt = "If I had to write a haiku, it would be:" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=40, do_sample=True, temperature=0.6) >>> result = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] >>> print(result) If I had to write a haiku, it would be: "Snowflakes gently fall" - simple, yet peaceful. I love the idea of snowflakes gently falling, each one ``` """ # Call parent forward but exclude cross_attention_states from model call outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, cross_attention_mask=cross_attention_mask, full_text_row_masked_out_mask=full_text_row_masked_out_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]).float() loss = None if labels is not None: loss = self.loss_function(logits, labels, self.vocab_size, **kwargs) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = ["BltPreTrainedModel", "BltModel", "BltPatcher", "BltForCausalLM"]
BltForCausalLM
python
cython__cython
Cython/Compiler/Nodes.py
{ "start": 207442, "end": 207538 }
class ____(GeneratorDefNode): gen_type_name = 'Coroutine' is_coroutine = True
AsyncDefNode
python
anthropics__anthropic-sdk-python
src/anthropic/resources/messages/messages.py
{ "start": 108349, "end": 108844 }
class ____: def __init__(self, messages: AsyncMessages) -> None: self._messages = messages self.create = async_to_streamed_response_wrapper( messages.create, ) self.count_tokens = async_to_streamed_response_wrapper( messages.count_tokens, ) @cached_property def batches(self) -> AsyncBatchesWithStreamingResponse: return AsyncBatchesWithStreamingResponse(self._messages.batches)
AsyncMessagesWithStreamingResponse
python
great-expectations__great_expectations
contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/metrics/data_profiler_metrics/data_profiler_table_column_list.py
{ "start": 651, "end": 3302 }
class ____(DataProfilerProfileMetricProvider): metric_name = "data_profiler.table_column_list" value_keys = ( "profile_path", "profile_report_filtering_key", "profile_report_accepted_filtering_values", ) @metric_value(engine=PandasExecutionEngine) def _pandas( cls, execution_engine, metric_domain_kwargs, metric_value_kwargs, metrics, runtime_configuration, ): profile_report_filtering_key = metric_value_kwargs["profile_report_filtering_key"] profile_report_accepted_filtering_values = metric_value_kwargs[ "profile_report_accepted_filtering_values" ] profile_report_column_data_stats: dict = metrics["data_profiler.table_column_infos"] profile_report_column_names: List[str] = list(profile_report_column_data_stats.keys()) profile_report_column_names = get_dbms_compatible_column_names( column_names=profile_report_column_names, batch_columns_list=metrics["table.columns"], ) profile_report_filtered_column_names: list = [] for col in profile_report_column_names: if ( metrics["data_profiler.table_column_infos"][col][profile_report_filtering_key] in profile_report_accepted_filtering_values ): profile_report_filtered_column_names.append(col) return profile_report_filtered_column_names @classmethod def _get_evaluation_dependencies( cls, metric: MetricConfiguration, configuration: Optional[ExpectationConfiguration] = None, execution_engine: Optional[ExecutionEngine] = None, runtime_configuration: Optional[dict] = None, ): dependencies: dict = super()._get_evaluation_dependencies( metric=metric, configuration=configuration, execution_engine=execution_engine, runtime_configuration=runtime_configuration, ) table_domain_kwargs: dict = { k: v for k, v in metric.metric_domain_kwargs.items() if k != "column" } dependencies["data_profiler.table_column_infos"] = MetricConfiguration( metric_name="data_profiler.table_column_infos", metric_domain_kwargs={}, metric_value_kwargs=metric.metric_value_kwargs, ) dependencies["table.columns"] = MetricConfiguration( metric_name="table.columns", metric_domain_kwargs=table_domain_kwargs, metric_value_kwargs=None, ) return dependencies
DataProfilerTableColumnList
python
mwaskom__seaborn
tests/_core/test_subplots.py
{ "start": 98, "end": 1682 }
class ____: def test_both_facets_and_wrap(self): err = "Cannot wrap facets when specifying both `col` and `row`." facet_spec = {"wrap": 3, "variables": {"col": "a", "row": "b"}} with pytest.raises(RuntimeError, match=err): Subplots({}, facet_spec, {}) def test_cross_xy_pairing_and_wrap(self): err = "Cannot wrap subplots when pairing on both `x` and `y`." pair_spec = {"wrap": 3, "structure": {"x": ["a", "b"], "y": ["y", "z"]}} with pytest.raises(RuntimeError, match=err): Subplots({}, {}, pair_spec) def test_col_facets_and_x_pairing(self): err = "Cannot facet the columns while pairing on `x`." facet_spec = {"variables": {"col": "a"}} pair_spec = {"structure": {"x": ["x", "y"]}} with pytest.raises(RuntimeError, match=err): Subplots({}, facet_spec, pair_spec) def test_wrapped_columns_and_y_pairing(self): err = "Cannot wrap the columns while pairing on `y`." facet_spec = {"variables": {"col": "a"}, "wrap": 2} pair_spec = {"structure": {"y": ["x", "y"]}} with pytest.raises(RuntimeError, match=err): Subplots({}, facet_spec, pair_spec) def test_wrapped_x_pairing_and_facetd_rows(self): err = "Cannot wrap the columns while faceting the rows." facet_spec = {"variables": {"row": "a"}} pair_spec = {"structure": {"x": ["x", "y"]}, "wrap": 2} with pytest.raises(RuntimeError, match=err): Subplots({}, facet_spec, pair_spec)
TestSpecificationChecks
python
PrefectHQ__prefect
src/prefect/client/schemas/filters.py
{ "start": 22975, "end": 23237 }
class ____(PrefectBaseModel): """Filter by `BlockSchema.capabilities`""" any_: Optional[List[str]] = Field( default=None, examples=[["2.0.0", "2.1.0"]], description="A list of block schema versions.", )
BlockSchemaFilterVersion
python
google__python-fire
fire/test_components.py
{ "start": 2490, "end": 2665 }
class ____: # pylint: disable=old-style-class,no-init def double(self, count=0): return 2 * count def triple(self, count=0): return 3 * count
OldStyleWithDefaults
python
pytorch__pytorch
test/quantization/eager/test_equalize_eager.py
{ "start": 348, "end": 8158 }
class ____(QuantizationTestCase): def checkChannelsEqualized(self, tensor1, tensor2, output_axis, input_axis): """Checks the channel ranges of tensor1, tensor2 are the same, which is an indication that equalization has been applied correctly """ output_channel_tensor1 = _equalize.channel_range(tensor1, output_axis) input_channel_tensor2 = _equalize.channel_range(tensor2, input_axis) # ensuring the channels ranges of tensor1's input is the same as # tensor2's output self.assertEqual(output_channel_tensor1, input_channel_tensor2) def getModule(self, model, name): """Given the name is a submodule to a model, return the submodule""" curr = model name = name.split(".") for subname in name: curr = curr._modules[subname] return curr def test_cross_layer_equalization(self): """applies _equalize.cross_layer_equalization on two modules and checks to make sure channels ranges are equivalent """ module1 = nn.Conv2d(3, 4, 2) module2 = nn.Linear(4, 4) module1_output_channel_axis = 0 module2_input_channel_axis = 1 _equalize.cross_layer_equalization(module1, module2) mod_tensor1, mod_tensor2 = module1.weight, module2.weight self.checkChannelsEqualized( mod_tensor1, mod_tensor2, module1_output_channel_axis, module2_input_channel_axis, ) def test_converged(self): """Sanity checks on _equalize.converged working identical modules should return true modules with high difference in weights should return false """ module1 = nn.Linear(3, 3) module2 = nn.Linear(3, 3) module1.weight = nn.parameter.Parameter(torch.ones(module1.weight.size())) module2.weight = nn.parameter.Parameter(torch.zeros(module1.weight.size())) # input is a dictionary dictionary_1 = {"linear1": module1} dictionary_2 = {"linear1": module2} self.assertTrue(_equalize.converged(dictionary_1, dictionary_1, 1e-6)) self.assertFalse(_equalize.converged(dictionary_1, dictionary_2, 1e-6)) def test_equalize(self): """First checks to see if _equalize.equalize can handle multiple pair modules as input then checks correctness of the function by ensuring the equalized and unequalized versions of the model yield the same output given the same input """ class ChainModule(nn.Module): def __init__(self) -> None: super().__init__() self.linear1 = nn.Linear(3, 4) self.linear2 = nn.Linear(4, 5) self.linear3 = nn.Linear(5, 6) def forward(self, x): x = self.linear1(x) x = self.linear2(x) x = self.linear3(x) return x chain1 = ChainModule() chain2 = copy.deepcopy(chain1) _equalize.equalize( chain1, [["linear1", "linear2"], ["linear2", "linear3"]], 1e-6 ) linear1 = self.getModule(chain1, "linear1") linear2 = self.getModule(chain1, "linear2") linear3 = self.getModule(chain1, "linear3") self.checkChannelsEqualized(linear1.weight, linear2.weight, 0, 1) self.checkChannelsEqualized(linear2.weight, linear3.weight, 0, 1) input = torch.randn(20, 3) self.assertEqual(chain1(input), chain2(input)) def test_equalize_fused_convrelu(self): """Checks to see if eager mode equalization supports fused ConvReLU2d models A model with 3 ConvReLU2d is constructed. Next, the conv2d and relu layers are fused together and adjacent conv2d layers have cross-layer equalization applied. Finally, we ensure that the channels have been equalized and that the equalized and unequalized versions of the model yield the same output given the same input """ class M(nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(3, 3, 1).to(dtype=torch.float) self.relu1 = nn.ReLU(inplace=False).to(dtype=torch.float) self.conv2 = nn.Conv2d(3, 3, 1).to(dtype=torch.float) self.relu2 = nn.ReLU(inplace=False).to(dtype=torch.float) self.conv3 = nn.Conv2d(3, 3, 1).to(dtype=torch.float) self.relu3 = nn.ReLU(inplace=False).to(dtype=torch.float) def forward(self, x): x = self.conv1(x) x = self.relu1(x) x = self.conv2(x) x = self.relu2(x) x = self.conv3(x) x = self.relu3(x) return x model = M() fused_model1 = fuse_modules( model, [["conv1", "relu1"], ["conv2", "relu2"], ["conv3", "relu3"]] ) fused_model2 = copy.deepcopy(fused_model1) _equalize.equalize(fused_model1, [["conv1", "conv2"], ["conv2", "conv3"]], 1e-6) conv1 = self.getModule(fused_model1, "conv1")[0] conv2 = self.getModule(fused_model1, "conv2")[0] conv3 = self.getModule(fused_model1, "conv3")[0] self.checkChannelsEqualized(conv1.weight, conv2.weight, 0, 1) self.checkChannelsEqualized(conv2.weight, conv3.weight, 0, 1) input = torch.randn(3, 3, 1, 1) self.assertEqual(fused_model1(input), fused_model2(input)) self.assertEqual(fused_model1(input), model(input)) def test_equalize_fused_linearrelu(self): """Checks to see if eager mode equalization supports fused LinearReLU models A model with 3 LinearReLU is constructed. Next, the linear and relu layers are fused together and adjacent linear layers have cross-layer equalization applied. Finally, we ensure that the channels have been equalized and that the equalized and unequalized versions of the model yield the same output given the same input """ class M(nn.Module): def __init__(self) -> None: super().__init__() self.linear1 = nn.Linear(3, 4) self.relu1 = nn.ReLU(inplace=False).to(dtype=torch.float) self.linear2 = nn.Linear(4, 5) self.relu2 = nn.ReLU(inplace=False).to(dtype=torch.float) self.linear3 = nn.Linear(5, 6) self.relu3 = nn.ReLU(inplace=False).to(dtype=torch.float) def forward(self, x): x = self.linear1(x) x = self.relu1(x) x = self.linear2(x) x = self.relu2(x) x = self.linear3(x) x = self.relu3(x) return x model = M() fused_model1 = fuse_modules( model, [["linear1", "relu1"], ["linear2", "relu2"], ["linear3", "relu3"]] ) fused_model2 = copy.deepcopy(fused_model1) _equalize.equalize( fused_model1, [["linear1", "linear2"], ["linear2", "linear3"]], 1e-6 ) linear1 = self.getModule(fused_model1, "linear1")[0] linear2 = self.getModule(fused_model1, "linear2")[0] linear3 = self.getModule(fused_model1, "linear3")[0] self.checkChannelsEqualized(linear1.weight, linear2.weight, 0, 1) self.checkChannelsEqualized(linear2.weight, linear3.weight, 0, 1) input = torch.randn(20, 3) self.assertEqual(fused_model1(input), fused_model2(input)) self.assertEqual(fused_model1(input), model(input)) if __name__ == "__main__": raise_on_run_directly("test/test_quantization.py")
TestEqualizeEager
python
cython__cython
tests/run/methodmangling_T5.py
{ "start": 2135, "end": 2736 }
class ____(object): """ >>> ut = _UnderscoreTest() >>> '__x' in dir(ut) False >>> '_UnderscoreTest__x' in dir(ut) True >>> ut._UnderscoreTest__x 1 >>> ut.get() 1 >>> ut._UnderscoreTest__UnderscoreNested().ret1() 1 >>> ut._UnderscoreTest__UnderscoreNested.__name__ '__UnderscoreNested' >>> ut._UnderscoreTest__prop 1 """ __x = 1 def get(self): return self.__x class __UnderscoreNested(object): def ret1(self): return 1 @property def __prop(self): return self.__x
_UnderscoreTest
python
encode__django-rest-framework
tests/test_generics.py
{ "start": 537, "end": 705 }
class ____(RESTFrameworkModel): text = models.CharField(max_length=100) slug = models.SlugField(max_length=32) # Model for regression test for #285
SlugBasedModel
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 138040, "end": 138567 }
class ____(VegaLiteSchema): """ AxisResolveMap schema wrapper. Parameters ---------- x : :class:`ResolveMode`, Literal['independent', 'shared'] y : :class:`ResolveMode`, Literal['independent', 'shared'] """ _schema = {"$ref": "#/definitions/AxisResolveMap"} def __init__( self, x: Optional[SchemaBase | ResolveMode_T] = Undefined, y: Optional[SchemaBase | ResolveMode_T] = Undefined, **kwds, ): super().__init__(x=x, y=y, **kwds)
AxisResolveMap
python
allegroai__clearml
clearml/backend_api/services/v2_23/frames.py
{ "start": 68789, "end": 86495 }
class ____(NonStrictDataModel): """ :param id: Frame id :type id: str :param augmentation: List of augmentations :type augmentation: Sequence[Augmentation] :param timestamp: Frame's offset in milliseconds, used primarily for video content. Used for the default frames sorting as the secondary key (with the primary key being 'context_id'). For images, this value should typically be 0. If not set, value is filled from the timestamp of the first source. We recommend using this field only in cases concerning the default sorting behavior. :type timestamp: int :param dataset: Frame's dataset version :type dataset: DatasetVersion :param saved: Last time frame was saved (timestamp) :type saved: int :param saved_in_version: Last version this frame was saved in (version ID) :type saved_in_version: str :param updated: Last time frame was saved (timestamp) :type updated: int :param updated_in_version: Last version this frame was updated in (version ID) :type updated_in_version: str :param rois: Frame regions of interest :type rois: Sequence[Roi] :param labels_size: Number of labels returned :type labels_size: int :param rule_name: Name of the filtering rule according to which this frame was provided (if applicable) :type rule_name: str :param video_gop: Video encoding GOP value for the source of this frame. Only valid for video frames :type video_gop: float :param is_key_frame: Is this a key frame (only applicable in frames who'se src is a video) :type is_key_frame: bool :param key_frame: ID of the key frame that this frame belongs to :type key_frame: str :param meta: Additional metadata dictionary for the frame. Please note that using this field effectively defines a schema (dictionary structure and types used as values) - frames within the same dataset cannot use conflicting schemas for this field (see documentation for more details). :type meta: dict :param blob: Raw data (blob) for the frame :type blob: str :param meta_blob: Non searchable metadata dictionary for the frame. The fields in this object cannot be searched by and are not added to the frame schema :type meta_blob: dict :param new_ver: Newer version of this frame, if asked to merge :type new_ver: Frame :param label_rule_counts: The number of matched roi per lable rule :type label_rule_counts: dict :param sources: Sources of this frame :type sources: Sequence[Source] :param context_id: Context ID. Used for the default frames sorting. If not set then it is filled from the uri of the first source. :type context_id: str """ _schema = { "properties": { "augmentation": { "description": "List of augmentations", "items": {"$ref": "#/definitions/augmentation"}, "type": ["array", "null"], }, "blob": { "description": "Raw data (blob) for the frame", "type": ["string", "null"], }, "context_id": { "description": ( "Context ID. Used for the default frames sorting. If not set then it is filled from the " "uri of the first source." ), "type": ["string", "null"], }, "dataset": { "description": "Frame's dataset version", "oneOf": [{"$ref": "#/definitions/dataset_version"}, {"type": "null"}], }, "id": {"description": "Frame id", "type": ["string", "null"]}, "is_key_frame": { "description": "Is this a key frame (only applicable in frames who'se src is a video)", "type": ["boolean", "null"], }, "key_frame": { "description": "ID of the key frame that this frame belongs to", "type": ["string", "null"], }, "label_rule_counts": { "additionalProperties": True, "description": "The number of matched roi per lable rule", "type": ["object", "null"], }, "labels_size": { "description": "Number of labels returned", "type": ["integer", "null"], }, "meta": { "additionalProperties": True, "description": ( "Additional metadata dictionary for the frame. Please note that using this field effectively" " defines a schema (dictionary structure and types used as values) - frames within the same dataset" " cannot use conflicting schemas for this field (see documentation for more details)." ), "type": ["object", "null"], }, "meta_blob": { "additionalProperties": True, "description": ( "Non searchable metadata dictionary for the frame. The fields in this object cannot be searched by" " and are not added to the frame schema" ), "type": ["object", "null"], }, "new_ver": { "description": "Newer version of this frame, if asked to merge", "oneOf": [{"$ref": "#/definitions/frame"}, {"type": "null"}], }, "rois": { "description": "Frame regions of interest", "items": {"$ref": "#/definitions/roi"}, "type": ["array", "null"], }, "rule_name": { "description": "Name of the filtering rule according to which this frame was provided (if applicable)", "type": ["string", "null"], }, "saved": { "description": "Last time frame was saved (timestamp)", "type": ["integer", "null"], }, "saved_in_version": { "description": "Last version this frame was saved in (version ID)", "type": ["string", "null"], }, "sources": { "description": "Sources of this frame", "items": {"$ref": "#/definitions/source"}, "type": ["array", "null"], }, "timestamp": { "description": ( "Frame's offset in milliseconds, used primarily for video content. Used for the default frames" " sorting as the secondary key (with the primary key being 'context_id'). For images, this value" " should typically be 0. If not set, value is filled from the timestamp of the first source. We" " recommend using this field only in cases concerning the default sorting behavior." ), "type": ["integer", "null"], }, "updated": { "description": "Last time frame was saved (timestamp)", "type": ["integer", "null"], }, "updated_in_version": { "description": "Last version this frame was updated in (version ID)", "type": ["string", "null"], }, "video_gop": { "description": "Video encoding GOP value for the source of this frame. Only valid for video frames", "type": ["number", "null"], }, }, "type": "object", } def __init__( self, id=None, augmentation=None, timestamp=None, dataset=None, saved=None, saved_in_version=None, updated=None, updated_in_version=None, rois=None, labels_size=None, rule_name=None, video_gop=None, is_key_frame=None, key_frame=None, meta=None, blob=None, meta_blob=None, new_ver=None, label_rule_counts=None, sources=None, context_id=None, **kwargs ): super(Frame, self).__init__(**kwargs) self.id = id self.augmentation = augmentation self.timestamp = timestamp self.dataset = dataset self.saved = saved self.saved_in_version = saved_in_version self.updated = updated self.updated_in_version = updated_in_version self.rois = rois self.labels_size = labels_size self.rule_name = rule_name self.video_gop = video_gop self.is_key_frame = is_key_frame self.key_frame = key_frame self.meta = meta self.blob = blob self.meta_blob = meta_blob self.new_ver = new_ver self.label_rule_counts = label_rule_counts self.sources = sources self.context_id = context_id @schema_property("id") def id(self): return self._property_id @id.setter def id(self, value): if value is None: self._property_id = None return self.assert_isinstance(value, "id", six.string_types) self._property_id = value @schema_property("augmentation") def augmentation(self): return self._property_augmentation @augmentation.setter def augmentation(self, value): if value is None: self._property_augmentation = None return self.assert_isinstance(value, "augmentation", (list, tuple)) if any(isinstance(v, dict) for v in value): value = [ Augmentation.from_dict(v) if isinstance(v, dict) else v for v in value ] else: self.assert_isinstance(value, "augmentation", Augmentation, is_array=True) self._property_augmentation = value @schema_property("timestamp") def timestamp(self): return self._property_timestamp @timestamp.setter def timestamp(self, value): if value is None: self._property_timestamp = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "timestamp", six.integer_types) self._property_timestamp = value @schema_property("dataset") def dataset(self): return self._property_dataset @dataset.setter def dataset(self, value): if value is None: self._property_dataset = None return if isinstance(value, dict): value = DatasetVersion.from_dict(value) else: self.assert_isinstance(value, "dataset", DatasetVersion) self._property_dataset = value @schema_property("saved") def saved(self): return self._property_saved @saved.setter def saved(self, value): if value is None: self._property_saved = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "saved", six.integer_types) self._property_saved = value @schema_property("saved_in_version") def saved_in_version(self): return self._property_saved_in_version @saved_in_version.setter def saved_in_version(self, value): if value is None: self._property_saved_in_version = None return self.assert_isinstance(value, "saved_in_version", six.string_types) self._property_saved_in_version = value @schema_property("updated") def updated(self): return self._property_updated @updated.setter def updated(self, value): if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value @schema_property("updated_in_version") def updated_in_version(self): return self._property_updated_in_version @updated_in_version.setter def updated_in_version(self, value): if value is None: self._property_updated_in_version = None return self.assert_isinstance(value, "updated_in_version", six.string_types) self._property_updated_in_version = value @schema_property("rois") def rois(self): return self._property_rois @rois.setter def rois(self, value): if value is None: self._property_rois = None return self.assert_isinstance(value, "rois", (list, tuple)) if any(isinstance(v, dict) for v in value): value = [Roi.from_dict(v) if isinstance(v, dict) else v for v in value] else: self.assert_isinstance(value, "rois", Roi, is_array=True) self._property_rois = value @schema_property("labels_size") def labels_size(self): return self._property_labels_size @labels_size.setter def labels_size(self, value): if value is None: self._property_labels_size = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "labels_size", six.integer_types) self._property_labels_size = value @schema_property("rule_name") def rule_name(self): return self._property_rule_name @rule_name.setter def rule_name(self, value): if value is None: self._property_rule_name = None return self.assert_isinstance(value, "rule_name", six.string_types) self._property_rule_name = value @schema_property("video_gop") def video_gop(self): return self._property_video_gop @video_gop.setter def video_gop(self, value): if value is None: self._property_video_gop = None return self.assert_isinstance(value, "video_gop", six.integer_types + (float,)) self._property_video_gop = value @schema_property("is_key_frame") def is_key_frame(self): return self._property_is_key_frame @is_key_frame.setter def is_key_frame(self, value): if value is None: self._property_is_key_frame = None return self.assert_isinstance(value, "is_key_frame", (bool,)) self._property_is_key_frame = value @schema_property("key_frame") def key_frame(self): return self._property_key_frame @key_frame.setter def key_frame(self, value): if value is None: self._property_key_frame = None return self.assert_isinstance(value, "key_frame", six.string_types) self._property_key_frame = value @schema_property("meta") def meta(self): return self._property_meta @meta.setter def meta(self, value): if value is None: self._property_meta = None return self.assert_isinstance(value, "meta", (dict,)) self._property_meta = value @schema_property("blob") def blob(self): return self._property_blob @blob.setter def blob(self, value): if value is None: self._property_blob = None return self.assert_isinstance(value, "blob", six.string_types) self._property_blob = value @schema_property("meta_blob") def meta_blob(self): return self._property_meta_blob @meta_blob.setter def meta_blob(self, value): if value is None: self._property_meta_blob = None return self.assert_isinstance(value, "meta_blob", (dict,)) self._property_meta_blob = value @schema_property("new_ver") def new_ver(self): return self._property_new_ver @new_ver.setter def new_ver(self, value): if value is None: self._property_new_ver = None return if isinstance(value, dict): value = Frame.from_dict(value) else: self.assert_isinstance(value, "new_ver", Frame) self._property_new_ver = value @schema_property("label_rule_counts") def label_rule_counts(self): return self._property_label_rule_counts @label_rule_counts.setter def label_rule_counts(self, value): if value is None: self._property_label_rule_counts = None return self.assert_isinstance(value, "label_rule_counts", (dict,)) self._property_label_rule_counts = value @schema_property("sources") def sources(self): return self._property_sources @sources.setter def sources(self, value): if value is None: self._property_sources = None return self.assert_isinstance(value, "sources", (list, tuple)) if any(isinstance(v, dict) for v in value): value = [Source.from_dict(v) if isinstance(v, dict) else v for v in value] else: self.assert_isinstance(value, "sources", Source, is_array=True) self._property_sources = value @schema_property("context_id") def context_id(self): return self._property_context_id @context_id.setter def context_id(self, value): if value is None: self._property_context_id = None return self.assert_isinstance(value, "context_id", six.string_types) self._property_context_id = value
Frame
python
wandb__wandb
wandb/vendor/gql-0.2.0/wandb_gql/client.py
{ "start": 249, "end": 563 }
class ____(Exception): """Custom exception thrown when retry logic fails""" def __init__(self, retries_count, last_exception): message = "Failed %s retries: %s" % (retries_count, last_exception) super(RetryError, self).__init__(message) self.last_exception = last_exception
RetryError
python
huggingface__transformers
src/transformers/models/deberta/modeling_deberta.py
{ "start": 1268, "end": 2042 }
class ____(nn.Module): """LayerNorm module (epsilon inside the square root).""" def __init__(self, size, eps=1e-12): super().__init__() self.weight = nn.Parameter(torch.ones(size)) self.bias = nn.Parameter(torch.zeros(size)) self.variance_epsilon = eps def forward(self, hidden_states): input_type = hidden_states.dtype hidden_states = hidden_states.float() mean = hidden_states.mean(-1, keepdim=True) variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True) hidden_states = (hidden_states - mean) / torch.sqrt(variance + self.variance_epsilon) hidden_states = hidden_states.to(input_type) y = self.weight * hidden_states + self.bias return y
DebertaLayerNorm
python
conda__conda
conda/common/configuration.py
{ "start": 22982, "end": 25886 }
class ____(LoadedParameter): """LoadedParameter type that holds a map (i.e. dict) of LoadedParameters.""" _type = frozendict def __init__( self, name, value, element_type, key_flag, value_flags, validation=None ): """ Args: value (Mapping): Map of string keys to LoadedParameter values. element_type (Parameter): The Parameter type that is held in value. value_flags (Mapping): Map of priority value flags. """ self._element_type = element_type super().__init__(name, value, key_flag, value_flags, validation) def collect_errors(self, instance, typed_value, source="<<merged>>"): errors = super().collect_errors(instance, typed_value, self.value) # recursively validate the values in the map if isinstance(self.value, Mapping): for key, value in self.value.items(): errors.extend(value.collect_errors(instance, typed_value[key], source)) return errors def merge(self, parameters: Sequence[MapLoadedParameter]) -> MapLoadedParameter: # get all values up to and including first important_match # but if no important_match, then all matches are important_matches parameters = LoadedParameter._first_important_matches(parameters) # ensure all parameter values are Mappings for parameter in parameters: if not isinstance(parameter.value, Mapping): raise InvalidTypeError( self.name, parameter.value, parameter.source, parameter.value.__class__.__name__, self._type.__name__, ) # map keys with final values, # first key has higher precedence than later ones final_map = { key: value for parameter in reversed(parameters) for key, value in parameter.value.items() if parameter.value_flags.get(key) == ParameterFlag.final } # map each value by recursively calling merge on any entries with the same key, # last key has higher precedence than earlier ones grouped_map = {} for parameter in parameters: for key, value in parameter.value.items(): grouped_map.setdefault(key, []).append(value) merged_map = { key: values[0].merge(values) for key, values in grouped_map.items() } # update merged_map with final_map values merged_value = frozendict({**merged_map, **final_map}) # create new parameter for the merged values return MapLoadedParameter( self._name, merged_value, self._element_type, self.key_flag, self.value_flags, validation=self._validation, )
MapLoadedParameter
python
numba__numba
numba/tests/test_fancy_indexing.py
{ "start": 12745, "end": 19713 }
class ____(MemoryLeakMixin, TestCase): # Every case has exactly one, one-dimensional array, # otherwise it's not fancy indexing. shape = (5, 6, 7, 8, 9, 10) indexing_cases = [ # Slices + Integers (slice(4, 5), 3, np.array([0, 1, 3, 4, 2]), 1), (3, np.array([0,1,3,4,2]), slice(None), slice(4)), # Ellipsis + Integers (Ellipsis, 1, np.array([0,1,3,4,2])), (np.array([0,1,3,4,2]), 3, Ellipsis), # Ellipsis + Slices + Integers (Ellipsis, 1, np.array([0,1,3,4,2]), 3, slice(1,5)), (np.array([0,1,3,4,2]), 3, Ellipsis, slice(1,5)), # Boolean Arrays + Integers (slice(4, 5), 3, np.array([True, False, True, False, True, False, False]), 1), (3, np.array([True, False, True, False, True, False]), slice(None), slice(4)), ] def setUp(self): super().setUp() self.rng = np.random.default_rng(1) def generate_random_indices(self): N = min(self.shape) slice_choices = [slice(None, None, None), slice(1, N - 1, None), slice(0, None, 2), slice(N - 1, None, -2), slice(-N + 1, -1, None), slice(-1, -N, -2), slice(0, N - 1, None), slice(-1, -N, -2) ] integer_choices = list(np.arange(N)) indices = [] # Generate K random slice cases. The value of K is arbitrary, the intent is # to create plenty of variation. K = 20 for _ in range(K): array_idx = self.rng.integers(0, 5, size=15) # Randomly select 4 slices from our list curr_idx = self.rng.choice(slice_choices, size=4).tolist() # Replace one of the slice with the array index _array_idx = self.rng.choice(4) curr_idx[_array_idx] = array_idx indices.append(tuple(curr_idx)) # Generate K random integer cases for _ in range(K): array_idx = self.rng.integers(0, 5, size=15) # Randomly select 4 integers from our list curr_idx = self.rng.choice(integer_choices, size=4).tolist() # Replace one of the slice with the array index _array_idx = self.rng.choice(4) curr_idx[_array_idx] = array_idx indices.append(tuple(curr_idx)) # Generate K random ellipsis cases for _ in range(K): array_idx = self.rng.integers(0, 5, size=15) # Randomly select 4 slices from our list curr_idx = self.rng.choice(slice_choices, size=4).tolist() # Generate two seperate random indices, replace one with # array and second with Ellipsis _array_idx = self.rng.choice(4, size=2, replace=False) curr_idx[_array_idx[0]] = array_idx curr_idx[_array_idx[1]] = Ellipsis indices.append(tuple(curr_idx)) # Generate K random boolean cases for _ in range(K): array_idx = self.rng.integers(0, 5, size=15) # Randomly select 4 slices from our list curr_idx = self.rng.choice(slice_choices, size=4).tolist() # Replace one of the slice with the boolean array index _array_idx = self.rng.choice(4) bool_arr_shape = self.shape[_array_idx] curr_idx[_array_idx] = np.array( self.rng.choice(2, size=bool_arr_shape), dtype=bool ) indices.append(tuple(curr_idx)) return indices def check_getitem_indices(self, arr_shape, index): @njit def numba_get_item(array, idx): return array[idx] arr = np.random.randint(0, 11, size=arr_shape) get_item = numba_get_item.py_func orig_base = arr.base or arr expected = get_item(arr, index) got = numba_get_item(arr, index) # Sanity check: In advanced indexing, the result is always a copy. self.assertIsNot(expected.base, orig_base) # Note: Numba may not return the same array strides and # contiguity as NumPy self.assertEqual(got.shape, expected.shape) self.assertEqual(got.dtype, expected.dtype) np.testing.assert_equal(got, expected) # Check a copy was *really* returned by Numba self.assertFalse(np.may_share_memory(got, expected)) def check_setitem_indices(self, arr_shape, index): @njit def set_item(array, idx, item): array[idx] = item arr = np.random.randint(0, 11, size=arr_shape) src = arr[index] expected = np.zeros_like(arr) got = np.zeros_like(arr) set_item.py_func(expected, index, src) set_item(got, index, src) # Note: Numba may not return the same array strides and # contiguity as NumPy self.assertEqual(got.shape, expected.shape) self.assertEqual(got.dtype, expected.dtype) np.testing.assert_equal(got, expected) def test_getitem(self): # Cases with a combination of integers + other objects indices = self.indexing_cases.copy() # Cases with permutations of either integers or objects indices += self.generate_random_indices() for idx in indices: with self.subTest(idx=idx): self.check_getitem_indices(self.shape, idx) def test_setitem(self): # Cases with a combination of integers + other objects indices = self.indexing_cases.copy() # Cases with permutations of either integers or objects indices += self.generate_random_indices() for idx in indices: with self.subTest(idx=idx): self.check_setitem_indices(self.shape, idx) def test_unsupported_condition_exceptions(self): err_idx_cases = [ # Cases with multi-dimensional indexing array ('Multi-dimensional indices are not supported.', (0, 3, np.array([[1, 2], [2, 3]]))), # Cases with more than one indexing array ('Using more than one non-scalar array index is unsupported.', (0, 3, np.array([1, 2]), np.array([1, 2]))), # Cases with more than one indexing subspace # (The subspaces here are separated by slice(None)) ("Using more than one indexing subspace is unsupported." + \ " An indexing subspace is a group of one or more consecutive" + \ " indices comprising integer or array types.", (0, np.array([1, 2]), slice(None), 3, 4)) ] for err, idx in err_idx_cases: with self.assertRaises(TypingError) as raises: self.check_getitem_indices(self.shape, idx) self.assertIn( err, str(raises.exception) ) if __name__ == '__main__': unittest.main()
TestFancyIndexingMultiDim
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-bagel/llama_index/readers/bagel/base.py
{ "start": 1531, "end": 5591 }
class ____(BaseReader): """Reader for Bagel files.""" def __init__(self, collection_name: str) -> None: """ Initialize BagelReader. Args: collection_name: Name of the collection to load from. Returns: None """ try: import bagel except ImportError: raise ImportError( "`bagel` package not found, please run `pip install bagel`" ) from bagel.config import Settings if not collection_name: raise ValueError("collection_name cannot be empty") self.collection_name = collection_name server_settings = Settings( bagel_api_impl="rest", bagel_server_host="api.bageldb.ai" ) self.client = bagel.Client(server_settings) self._collection = self.client.get_cluster(collection_name) def create_documents(self, results: Any) -> Any: """ Create documents from the results. Args: results: Results from the query. Returns: List of documents. """ documents = [] # create a list of results all_results = list( zip( results["ids"][0], results["documents"][0], results["embeddings"][0], results["metadatas"][0], ) ) # iterate through the results for result in all_results: # create a Llama Document document = Document( doc_id=result[0], text=result[1], embedding=result[2], metadata=result[3], ) documents.append(document) return documents def load_data( self, query_vector: Optional[OneOrMany[Embedding]] = None, query_texts: Optional[OneOrMany[Doc]] = None, limit: int = 10, where: Optional[Where] = None, where_document: Optional[WhereDocument] = None, include: Include = ["metadatas", "documents", "embeddings", "distances"], ) -> Any: """ Get the top n_results documents for provided query_embeddings or query_texts. Args: query_embeddings: The embeddings to get the closes neighbors of. Optional. query_texts: The document texts to get the closes neighbors of. Optional. n_results: The number of neighbors to return for each query. Optional. where: A Where type dict used to filter results by. Optional. where_document: A WhereDocument type dict used to filter. Optional. include: A list of what to include in the results. Optional. Returns: Llama Index Document(s) with the closest embeddings to the query_embeddings or query_texts. """ # get the results from the collection # If neither query_embeddings nor query_texts are provided, # or both are provided, raise an error if (query_vector is None and query_texts is None) or ( query_vector is not None and query_texts is not None ): raise ValueError( "You must provide either embeddings or texts to find, but not both" ) if where is None: where = {} if where_document is None: where_document = {} results = self._collection.find( query_embeddings=query_vector, query_texts=query_texts, n_results=limit, where=where, where_document=where_document, include=include, ) # check if there are results if not results: raise ValueError("No results found") # check if there are embeddings or documents if not results["embeddings"] and not results["documents"]: raise ValueError("No embeddings or documents found") # create documents from the results return self.create_documents(results)
BagelReader
python
allegroai__clearml
clearml/backend_api/services/v2_23/models.py
{ "start": 117931, "end": 120959 }
class ____(Request): """ Publish models :param ids: IDs of the models to publish :type ids: Sequence[str] :param force_publish_task: Publish the associated tasks (if exist) even if they are not in the 'stopped' state. Optional, the default value is False. :type force_publish_task: bool :param publish_tasks: Indicates that the associated tasks (if exist) should be published. Optional, the default value is True. :type publish_tasks: bool """ _service = "models" _action = "publish_many" _version = "2.23" _schema = { "definitions": {}, "properties": { "force_publish_task": { "description": "Publish the associated tasks (if exist) even if they are not in the 'stopped' state. Optional, the default value is False.", "type": "boolean", }, "ids": { "description": "IDs of the models to publish", "items": {"type": "string"}, "type": "array", }, "publish_tasks": { "description": "Indicates that the associated tasks (if exist) should be published. Optional, the default value is True.", "type": "boolean", }, }, "required": ["ids"], "type": "object", } def __init__( self, ids: List[str], force_publish_task: Optional[bool] = None, publish_tasks: Optional[bool] = None, **kwargs: Any ) -> None: super(PublishManyRequest, self).__init__(**kwargs) self.ids = ids self.force_publish_task = force_publish_task self.publish_tasks = publish_tasks @schema_property("ids") def ids(self) -> List[str]: return self._property_ids @ids.setter def ids(self, value: List[str]) -> None: if value is None: self._property_ids = None return self.assert_isinstance(value, "ids", (list, tuple)) self.assert_isinstance(value, "ids", six.string_types, is_array=True) self._property_ids = value @schema_property("force_publish_task") def force_publish_task(self) -> Optional[bool]: return self._property_force_publish_task @force_publish_task.setter def force_publish_task(self, value: Optional[bool]) -> None: if value is None: self._property_force_publish_task = None return self.assert_isinstance(value, "force_publish_task", (bool,)) self._property_force_publish_task = value @schema_property("publish_tasks") def publish_tasks(self) -> Optional[bool]: return self._property_publish_tasks @publish_tasks.setter def publish_tasks(self, value: Optional[bool]) -> None: if value is None: self._property_publish_tasks = None return self.assert_isinstance(value, "publish_tasks", (bool,)) self._property_publish_tasks = value
PublishManyRequest
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/model_query_return_annotation_extends.py
{ "start": 1297, "end": 1681 }
class ____: pass def test3_alarm1() -> Optional[Test3_C1]: return _test_source() def test3_alarm2() -> Optional[Test3_C2]: return _test_source() def test3_alarm3() -> Optional[Test3_C3]: return _test_source() def test3_alarm4() -> Optional[Test3_C4]: return _test_source() def test3_noalarm1() -> Union[Test3_C5, Test3_C1]: return _test_source()
Test3_C5
python
tiangolo__fastapi
docs_src/additional_responses/tutorial002.py
{ "start": 130, "end": 628 }
class ____(BaseModel): id: str value: str app = FastAPI() @app.get( "/items/{item_id}", response_model=Item, responses={ 200: { "content": {"image/png": {}}, "description": "Return the JSON item or an image.", } }, ) async def read_item(item_id: str, img: Union[bool, None] = None): if img: return FileResponse("image.png", media_type="image/png") else: return {"id": "foo", "value": "there goes my hero"}
Item
python
Textualize__textual
src/textual/containers.py
{ "start": 6073, "end": 6280 }
class ____(Widget): """A container which aligns children on the Y axis.""" DEFAULT_CSS = """ Middle { align-vertical: middle; width: auto; height: 1fr; } """
Middle
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/overloadImpl1.py
{ "start": 3803, "end": 4350 }
class ____(Generic[T]): ... @overload def func14(target: Callable[..., Awaitable[T]]) -> ClassG[T]: ... @overload def func14(target: Callable[..., T]) -> ClassG[T]: ... def func14( target: Callable[..., Awaitable[T]] | Callable[..., T], ) -> ClassG[T]: ... @overload def func15(client_id: str, client_secret: str, /) -> None: ... @overload def func15(client_id: str, client_secret: str) -> None: ... # This should generate an error because some of the keyword arguments are not present. def func15(*creds: str) -> None: pass
ClassG
python
bokeh__bokeh
src/bokeh/models/axes.py
{ "start": 2745, "end": 8379 }
class ____(GuideRenderer): ''' A base class that defines common properties for all axis types. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) dimension = Either(Auto, Enum(0, 1), default="auto", help=""" This allows to override the inferred dimensions in contexts that support this. This property has no effect when an axes is used as a frame axis. .. note:: This property is experimental and may change at any point. """) face = Either(Auto, Enum("front", "back"))(default="auto", help=""" The direction toward which the axis will face. .. note:: This property is experimental and may change at any point. """) bounds = Either(Auto, Tuple(Float, Float), Tuple(Datetime, Datetime), help=""" Bounds for the rendered axis. If unset, the axis will span the entire plot in the given dimension. """) ticker = Instance(Ticker, help=""" A Ticker to use for computing locations of axis components. The property may also be passed a sequence of floating point numbers as a shorthand for creating and configuring a ``FixedTicker``, e.g. the following code .. code-block:: python from bokeh.plotting import figure p = figure() p.xaxis.ticker = [10, 20, 37.4] is equivalent to: .. code-block:: python from bokeh.plotting import figure from bokeh.models import FixedTicker p = figure() p.xaxis.ticker = FixedTicker(ticks=[10, 20, 37.4]) """).accepts(Seq(Float), lambda ticks: FixedTicker(ticks=ticks)) formatter = Instance(TickFormatter, help=""" A ``TickFormatter`` to use for formatting the visual appearance of ticks. """) axis_label = Nullable(TextLike, help=""" A text or LaTeX notation label for the axis, displayed parallel to the axis rule. """) axis_label_standoff = Int(default=5, help=""" The distance in pixels that the axis labels should be offset from the tick labels or axis. """) axis_label_standoff_mode = Enum(AxisLabelStandoffMode, default="tick_labels", help=""" The reference point for the distance of the ``axis_label_standoff``. """) axis_label_orientation = Either(Enum(LabelOrientation), Float)(default="parallel", help=""" What direction the axis label text should be oriented. If a number is supplied, the angle of the text is measured from horizontal. """) axis_label_align = Enum(Align, default="center", help=""" The alignment of axis label along the axis. """) axis_label_props = Include(ScalarTextProps, prefix="axis_label", help=""" The {prop} of the axis label. """) axis_label_text_font_size = Override(default="13px") axis_label_text_font_style = Override(default="italic") major_label_standoff = Int(default=5, help=""" The distance in pixels that the major tick labels should be offset from the associated ticks. """) major_label_orientation = Either(Enum(LabelOrientation), Float)(default="horizontal", help=""" What direction the major label text should be oriented. If a number is supplied, the angle of the text is measured from horizontal. """) major_label_overrides = Dict(Either(Float, String), TextLike, default={}, help=""" Provide explicit tick label values for specific tick locations that override normal formatting. """) major_label_policy = Instance(LabelingPolicy, default=InstanceDefault(AllLabels), help=""" Allows to filter out labels, e.g. declutter labels to avoid overlap. """) major_label_props = Include(ScalarTextProps, prefix="major_label", help=""" The {prop} of the major tick labels. """) major_label_text_align = Override(default="center") major_label_text_baseline = Override(default="alphabetic") major_label_text_font_size = Override(default="11px") axis_props = Include(ScalarLineProps, prefix="axis", help=""" The {prop} of the axis line. """) major_tick_props = Include(ScalarLineProps,prefix="major_tick", help=""" The {prop} of the major ticks. """) major_tick_in = Int(default=2, help=""" The distance in pixels that major ticks should extend into the main plot area. """) major_tick_out = Int(default=6, help=""" The distance in pixels that major ticks should extend out of the main plot area. """) minor_tick_props = Include(ScalarLineProps, prefix="minor_tick", help=""" The {prop} of the minor ticks. """) minor_tick_in = Int(default=0, help=""" The distance in pixels that minor ticks should extend into the main plot area. """) minor_tick_out = Int(default=4, help=""" The distance in pixels that major ticks should extend out of the main plot area. """) fixed_location = Either(Null, Float, Factor, help=""" Set to specify a fixed coordinate location to draw the axis. The direction of ticks and major labels is determined by the side panel that the axis belongs to. .. note:: Axes labels are suppressed when axes are positioned at fixed locations inside the central plot area. """) background_fill_props = Include(ScalarFillProps, prefix="background", help=""" The {prop} of the axis background. """) background_hatch_props = Include(ScalarHatchProps, prefix="background", help=""" The {prop} of the axis background. """) background_fill_color = Override(default=None) @abstract
Axis
python
huggingface__transformers
examples/pytorch/text-classification/run_classification.py
{ "start": 7964, "end": 32736 }
class ____: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `hf auth login` (stored in `~/.huggingface`)." ) }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) ignore_mismatched_sizes: bool = field( default=False, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, ) def get_label_list(raw_dataset, split="train") -> list[str]: """Get the list of labels from a multi-label dataset""" if isinstance(raw_dataset[split]["label"][0], list): label_list = [label for sample in raw_dataset[split]["label"] for label in sample] label_list = list(set(label_list)) else: label_list = raw_dataset[split].unique("label") # we will treat the label list as a list of string instead of int, consistent with model.config.label2id label_list = [str(label) for label in label_list] return label_list def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files, or specify a dataset name # to load from huggingface/datasets. In ether case, you can specify a the key of the column(s) containing the text and # the key of the column containing the label. If multiple columns are specified for the text, they will be joined together # for the actual text value. # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # Try print some info about the dataset logger.info(f"Dataset loaded: {raw_datasets}") logger.info(raw_datasets) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(".")[-1] test_extension = data_args.test_file.split(".")[-1] assert test_extension == train_extension, ( "`test_file` should have the same extension (csv or json) as `train_file`." ) data_files["test"] = data_args.test_file else: raise ValueError("Need either a dataset name or a test file for `do_predict`.") for key in data_files: logger.info(f"load a local file for {key}: {data_files[key]}") if data_args.train_file.endswith(".csv"): # Loading a dataset from local csv files raw_datasets = load_dataset( "csv", data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) else: # Loading a dataset from local json files raw_datasets = load_dataset( "json", data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets. if data_args.remove_splits is not None: for split in data_args.remove_splits.split(","): logger.info(f"removing split {split}") raw_datasets.pop(split) if data_args.train_split_name is not None: logger.info(f"using {data_args.train_split_name} as train set") raw_datasets["train"] = raw_datasets[data_args.train_split_name] raw_datasets.pop(data_args.train_split_name) if data_args.validation_split_name is not None: logger.info(f"using {data_args.validation_split_name} as validation set") raw_datasets["validation"] = raw_datasets[data_args.validation_split_name] raw_datasets.pop(data_args.validation_split_name) if data_args.test_split_name is not None: logger.info(f"using {data_args.test_split_name} as test set") raw_datasets["test"] = raw_datasets[data_args.test_split_name] raw_datasets.pop(data_args.test_split_name) if data_args.remove_columns is not None: for split in raw_datasets: for column in data_args.remove_columns.split(","): logger.info(f"removing column {column} from split {split}") raw_datasets[split] = raw_datasets[split].remove_columns(column) if data_args.label_column_name is not None and data_args.label_column_name != "label": for key in raw_datasets: raw_datasets[key] = raw_datasets[key].rename_column(data_args.label_column_name, "label") # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = ( raw_datasets["train"].features["label"].dtype in ["float32", "float64"] if data_args.do_regression is None else data_args.do_regression ) is_multi_label = False if is_regression: label_list = None num_labels = 1 # regression requires float as label type, let's cast it if needed for split in raw_datasets: if raw_datasets[split].features["label"].dtype not in ["float32", "float64"]: logger.warning( f"Label type for {split} set to float32, was {raw_datasets[split].features['label'].dtype}" ) features = raw_datasets[split].features features.update({"label": Value("float32")}) try: raw_datasets[split] = raw_datasets[split].cast(features) except TypeError as error: logger.error( f"Unable to cast {split} set to float32, please check the labels are correct, or maybe try with --do_regression=False" ) raise error else: # classification if raw_datasets["train"].features["label"].dtype == "list": # multi-label classification is_multi_label = True logger.info("Label type is list, doing multi-label classification") # Trying to find the number of labels in a multi-label classification task # We have to deal with common cases that labels appear in the training set but not in the validation/test set. # So we build the label list from the union of labels in train/val/test. label_list = get_label_list(raw_datasets, split="train") for split in ["validation", "test"]: if split in raw_datasets: val_or_test_labels = get_label_list(raw_datasets, split=split) diff = set(val_or_test_labels).difference(set(label_list)) if len(diff) > 0: # add the labels that appear in val/test but not in train, throw a warning logger.warning( f"Labels {diff} in {split} set but not in training set, adding them to the label list" ) label_list += list(diff) # if label is -1, we throw a warning and remove it from the label list for label in label_list: if label == -1: logger.warning("Label -1 found in label list, removing it.") label_list.remove(label) label_list.sort() num_labels = len(label_list) if num_labels <= 1: raise ValueError("You need more than one label to do classification.") # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task="text-classification", cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) if is_regression: config.problem_type = "regression" logger.info("setting problem type to regression") elif is_multi_label: config.problem_type = "multi_label_classification" logger.info("setting problem type to multi label classification") else: config.problem_type = "single_label_classification" logger.info("setting problem type to single label classification") tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ) # Padding strategy if data_args.pad_to_max_length: padding = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # for training ,we will update the config with label infos, # if do_train is not set, we will use the label infos in the config if training_args.do_train and not is_regression: # classification, training label_to_id = {v: i for i, v in enumerate(label_list)} # update config with label infos if model.config.label2id != label_to_id: logger.warning( "The label2id key in the model config.json is not equal to the label2id key of this " "run. You can ignore this if you are doing finetuning." ) model.config.label2id = label_to_id model.config.id2label = {id: label for label, id in label_to_id.items()} elif not is_regression: # classification, but not training logger.info("using label infos in the model config") logger.info(f"label2id: {model.config.label2id}") label_to_id = model.config.label2id else: # regression label_to_id = None if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def multi_labels_to_ids(labels: list[str]) -> list[float]: ids = [0.0] * len(label_to_id) # BCELoss requires float as target type for label in labels: ids[label_to_id[label]] = 1.0 return ids def preprocess_function(examples): if data_args.text_column_names is not None: text_column_names = data_args.text_column_names.split(",") # join together text columns into "sentence" column examples["sentence"] = examples[text_column_names[0]] for column in text_column_names[1:]: for i in range(len(examples[column])): examples["sentence"][i] += data_args.text_column_delimiter + examples[column][i] # Tokenize the texts result = tokenizer(examples["sentence"], padding=padding, max_length=max_seq_length, truncation=True) if label_to_id is not None and "label" in examples: if is_multi_label: result["label"] = [multi_labels_to_ids(l) for l in examples["label"]] else: result["label"] = [(label_to_id[str(l)] if l != -1 else -1) for l in examples["label"]] return result # Running the preprocessing pipeline on all the datasets with training_args.main_process_first(desc="dataset map pre-processing"): raw_datasets = raw_datasets.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on dataset", ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset.") train_dataset = raw_datasets["train"] if data_args.shuffle_train_dataset: logger.info("Shuffling the training dataset") train_dataset = train_dataset.shuffle(seed=data_args.shuffle_seed) if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation or test dataset if validation is not defined.") else: logger.warning("Validation dataset not found. Falling back to test dataset for validation.") eval_dataset = raw_datasets["test"] else: eval_dataset = raw_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = raw_datasets["test"] # remove label column if it exists if data_args.max_predict_samples is not None: max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") if data_args.metric_name is not None: metric = ( evaluate.load(data_args.metric_name, config_name="multilabel", cache_dir=model_args.cache_dir) if is_multi_label else evaluate.load(data_args.metric_name, cache_dir=model_args.cache_dir) ) logger.info(f"Using metric {data_args.metric_name} for evaluation.") else: if is_regression: metric = evaluate.load("mse", cache_dir=model_args.cache_dir) logger.info("Using mean squared error (mse) as regression score, you can use --metric_name to overwrite.") else: if is_multi_label: metric = evaluate.load("f1", config_name="multilabel", cache_dir=model_args.cache_dir) logger.info( "Using multilabel F1 for multi-label classification task, you can use --metric_name to overwrite." ) else: metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) logger.info("Using accuracy as classification score, you can use --metric_name to overwrite.") def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions if is_regression: preds = np.squeeze(preds) result = metric.compute(predictions=preds, references=p.label_ids) elif is_multi_label: preds = np.array([np.where(p > 0, 1, 0) for p in preds]) # convert logits to multi-hot encoding # Micro F1 is commonly used in multi-label classification result = metric.compute(predictions=preds, references=p.label_ids, average="micro") else: preds = np.argmax(preds, axis=1) result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result["combined_score"] = np.mean(list(result.values())).item() return result # Data collator will default to DataCollatorWithPadding when the tokenizer is passed to Trainer, so we change it if # we already did the padding. if data_args.pad_to_max_length: data_collator = default_data_collator elif training_args.fp16: data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) else: data_collator = None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, processing_class=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate(eval_dataset=eval_dataset) max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Predict ***") # Removing the `label` columns if exists because it might contains -1 and Trainer won't like that. if "label" in predict_dataset.features: predict_dataset = predict_dataset.remove_columns("label") predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions if is_regression: predictions = np.squeeze(predictions) elif is_multi_label: # Convert logits to multi-hot encoding. We compare the logits to 0 instead of 0.5, because the sigmoid is not applied. # You can also pass `preprocess_logits_for_metrics=lambda logits, labels: nn.functional.sigmoid(logits)` to the Trainer # and set p > 0.5 below (less efficient in this case) predictions = np.array([np.where(p > 0, 1, 0) for p in predictions]) else: predictions = np.argmax(predictions, axis=1) output_predict_file = os.path.join(training_args.output_dir, "predict_results.txt") if trainer.is_world_process_zero(): with open(output_predict_file, "w") as writer: logger.info("***** Predict results *****") writer.write("index\tprediction\n") for index, item in enumerate(predictions): if is_regression: writer.write(f"{index}\t{item:3.3f}\n") elif is_multi_label: # recover from multi-hot encoding item = [label_list[i] for i in range(len(item)) if item[i] == 1] writer.write(f"{index}\t{item}\n") else: item = label_list[item] writer.write(f"{index}\t{item}\n") logger.info(f"Predict results saved at {output_predict_file}") kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
ModelArguments
python
sqlalchemy__sqlalchemy
test/typing/plain_files/orm/relationship.py
{ "start": 986, "end": 1242 }
class ____(Base): __tablename__ = "group" id: Mapped[int] = mapped_column(primary_key=True) name: Mapped[str] = mapped_column() addresses_style_one_anno_only: Mapped[List["User"]] addresses_style_two_anno_only: Mapped[Set["User"]]
Group
python
spack__spack
lib/spack/spack/vendor/ruamel/yaml/constructor.py
{ "start": 28327, "end": 38288 }
class ____(SafeConstructor): def construct_python_str(self, node): # type: (Any) -> Any return self.construct_scalar(node) def construct_python_unicode(self, node): # type: (Any) -> Any return self.construct_scalar(node) def construct_python_bytes(self, node): # type: (Any) -> Any try: value = self.construct_scalar(node).encode('ascii') except UnicodeEncodeError as exc: raise ConstructorError( None, None, _F('failed to convert base64 data into ascii: {exc!s}', exc=exc), node.start_mark, ) try: return base64.decodebytes(value) except binascii.Error as exc: raise ConstructorError( None, None, _F('failed to decode base64 data: {exc!s}', exc=exc), node.start_mark, ) def construct_python_long(self, node): # type: (Any) -> int val = self.construct_yaml_int(node) return val def construct_python_complex(self, node): # type: (Any) -> Any return complex(self.construct_scalar(node)) def construct_python_tuple(self, node): # type: (Any) -> Any return tuple(self.construct_sequence(node)) def find_python_module(self, name, mark): # type: (Any, Any) -> Any if not name: raise ConstructorError( 'while constructing a Python module', mark, 'expected non-empty name appended to the tag', mark, ) try: __import__(name) except ImportError as exc: raise ConstructorError( 'while constructing a Python module', mark, _F('cannot find module {name!r} ({exc!s})', name=name, exc=exc), mark, ) return sys.modules[name] def find_python_name(self, name, mark): # type: (Any, Any) -> Any if not name: raise ConstructorError( 'while constructing a Python object', mark, 'expected non-empty name appended to the tag', mark, ) if '.' in name: lname = name.split('.') lmodule_name = lname lobject_name = [] # type: List[Any] while len(lmodule_name) > 1: lobject_name.insert(0, lmodule_name.pop()) module_name = '.'.join(lmodule_name) try: __import__(module_name) # object_name = '.'.join(object_name) break except ImportError: continue else: module_name = builtins_module lobject_name = [name] try: __import__(module_name) except ImportError as exc: raise ConstructorError( 'while constructing a Python object', mark, _F( 'cannot find module {module_name!r} ({exc!s})', module_name=module_name, exc=exc, ), mark, ) module = sys.modules[module_name] object_name = '.'.join(lobject_name) obj = module while lobject_name: if not hasattr(obj, lobject_name[0]): raise ConstructorError( 'while constructing a Python object', mark, _F( 'cannot find {object_name!r} in the module {module_name!r}', object_name=object_name, module_name=module.__name__, ), mark, ) obj = getattr(obj, lobject_name.pop(0)) return obj def construct_python_name(self, suffix, node): # type: (Any, Any) -> Any value = self.construct_scalar(node) if value: raise ConstructorError( 'while constructing a Python name', node.start_mark, _F('expected the empty value, but found {value!r}', value=value), node.start_mark, ) return self.find_python_name(suffix, node.start_mark) def construct_python_module(self, suffix, node): # type: (Any, Any) -> Any value = self.construct_scalar(node) if value: raise ConstructorError( 'while constructing a Python module', node.start_mark, _F('expected the empty value, but found {value!r}', value=value), node.start_mark, ) return self.find_python_module(suffix, node.start_mark) def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False): # type: (Any, Any, Any, Any, bool) -> Any if not args: args = [] if not kwds: kwds = {} cls = self.find_python_name(suffix, node.start_mark) if newobj and isinstance(cls, type): return cls.__new__(cls, *args, **kwds) else: return cls(*args, **kwds) def set_python_instance_state(self, instance, state): # type: (Any, Any) -> None if hasattr(instance, '__setstate__'): instance.__setstate__(state) else: slotstate = {} # type: Dict[Any, Any] if isinstance(state, tuple) and len(state) == 2: state, slotstate = state if hasattr(instance, '__dict__'): instance.__dict__.update(state) elif state: slotstate.update(state) for key, value in slotstate.items(): setattr(instance, key, value) def construct_python_object(self, suffix, node): # type: (Any, Any) -> Any # Format: # !!python/object:module.name { ... state ... } instance = self.make_python_instance(suffix, node, newobj=True) self.recursive_objects[node] = instance yield instance deep = hasattr(instance, '__setstate__') state = self.construct_mapping(node, deep=deep) self.set_python_instance_state(instance, state) def construct_python_object_apply(self, suffix, node, newobj=False): # type: (Any, Any, bool) -> Any # Format: # !!python/object/apply # (or !!python/object/new) # args: [ ... arguments ... ] # kwds: { ... keywords ... } # state: ... state ... # listitems: [ ... listitems ... ] # dictitems: { ... dictitems ... } # or short format: # !!python/object/apply [ ... arguments ... ] # The difference between !!python/object/apply and !!python/object/new # is how an object is created, check make_python_instance for details. if isinstance(node, SequenceNode): args = self.construct_sequence(node, deep=True) kwds = {} # type: Dict[Any, Any] state = {} # type: Dict[Any, Any] listitems = [] # type: List[Any] dictitems = {} # type: Dict[Any, Any] else: value = self.construct_mapping(node, deep=True) args = value.get('args', []) kwds = value.get('kwds', {}) state = value.get('state', {}) listitems = value.get('listitems', []) dictitems = value.get('dictitems', {}) instance = self.make_python_instance(suffix, node, args, kwds, newobj) if bool(state): self.set_python_instance_state(instance, state) if bool(listitems): instance.extend(listitems) if bool(dictitems): for key in dictitems: instance[key] = dictitems[key] return instance def construct_python_object_new(self, suffix, node): # type: (Any, Any) -> Any return self.construct_python_object_apply(suffix, node, newobj=True) Constructor.add_constructor('tag:yaml.org,2002:python/none', Constructor.construct_yaml_null) Constructor.add_constructor('tag:yaml.org,2002:python/bool', Constructor.construct_yaml_bool) Constructor.add_constructor('tag:yaml.org,2002:python/str', Constructor.construct_python_str) Constructor.add_constructor( 'tag:yaml.org,2002:python/unicode', Constructor.construct_python_unicode ) Constructor.add_constructor( 'tag:yaml.org,2002:python/bytes', Constructor.construct_python_bytes ) Constructor.add_constructor('tag:yaml.org,2002:python/int', Constructor.construct_yaml_int) Constructor.add_constructor('tag:yaml.org,2002:python/long', Constructor.construct_python_long) Constructor.add_constructor('tag:yaml.org,2002:python/float', Constructor.construct_yaml_float) Constructor.add_constructor( 'tag:yaml.org,2002:python/complex', Constructor.construct_python_complex ) Constructor.add_constructor('tag:yaml.org,2002:python/list', Constructor.construct_yaml_seq) Constructor.add_constructor( 'tag:yaml.org,2002:python/tuple', Constructor.construct_python_tuple ) Constructor.add_constructor('tag:yaml.org,2002:python/dict', Constructor.construct_yaml_map) Constructor.add_multi_constructor( 'tag:yaml.org,2002:python/name:', Constructor.construct_python_name ) Constructor.add_multi_constructor( 'tag:yaml.org,2002:python/module:', Constructor.construct_python_module ) Constructor.add_multi_constructor( 'tag:yaml.org,2002:python/object:', Constructor.construct_python_object ) Constructor.add_multi_constructor( 'tag:yaml.org,2002:python/object/apply:', Constructor.construct_python_object_apply ) Constructor.add_multi_constructor( 'tag:yaml.org,2002:python/object/new:', Constructor.construct_python_object_new )
Constructor
python
scipy__scipy
scipy/sparse/tests/test_base.py
{ "start": 222332, "end": 223038 }
class ____(_NonCanonicalMixin): def _arg1_for_noncanonical(self, M, sorted_indices=None): """Return non-canonical constructor arg1 equivalent to M""" data, row, col = _same_sum_duplicate(M.data, M.row, M.col) return data, (row, col) def _insert_explicit_zero(self, M, i, j): M.data = np.r_[M.data.dtype.type(0), M.data] M.row = np.r_[M.row.dtype.type(i), M.row] M.col = np.r_[M.col.dtype.type(j), M.col] return M def test_setdiag_noncanonical(self): m = self.spcreator(np.eye(3)) m.sum_duplicates() m.setdiag([3, 2], k=1) m.sum_duplicates() assert_(np.all(np.diff(m.col) >= 0))
COONonCanonicalMixin
python
h5py__h5py
h5py/tests/test_attrs_data.py
{ "start": 9898, "end": 10291 }
class ____(BaseAttrs): """ Ensure failed attribute writes don't leave garbage behind. """ def test_write(self): """ ValueError on string write wipes out attribute """ s = b"Hello\x00Hello" with self.assertRaises(ValueError): self.f.attrs["x"] = s with self.assertRaises(KeyError): self.f.attrs["x"]
TestWriteException