language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
falconry__falcon
tests/test_inspect.py
{ "start": 1678, "end": 9629 }
class ____: def test_empty_app(self, asgi): ai = inspect.inspect_app(get_app(asgi, False)) assert ai.routes == [] assert ai.middleware.middleware_tree.request == [] assert ai.middleware.middleware_tree.resource == [] assert ai.middleware.middleware_tree.response == [] assert ai.middleware.middleware_classes == [] assert ai.middleware.independent is True assert ai.static_routes == [] assert ai.sinks == [] assert len(ai.error_handlers) == 4 if asgi else 3 assert ai.asgi is asgi def test_dependent_middleware(self, asgi): app = get_app(asgi, cors=False, independent_middleware=False) ai = inspect.inspect_app(app) assert ai.middleware.independent is False def test_app(self, asgi): ai = inspect.inspect_app(make_app_async() if asgi else make_app()) assert len(ai.routes) == 3 assert len(ai.middleware.middleware_tree.request) == 2 assert len(ai.middleware.middleware_tree.resource) == 1 assert len(ai.middleware.middleware_tree.response) == 3 assert len(ai.middleware.middleware_classes) == 3 assert len(ai.static_routes) == 2 assert len(ai.sinks) == 2 assert len(ai.error_handlers) == 5 if asgi else 4 assert ai.asgi is asgi def check_route(self, asgi, r, p, cn, ml, fnt): assert isinstance(r, inspect.RouteInfo) assert r.path == p if asgi: cn += 'Async' assert r.class_name == cn assert '_inspect_fixture.py' in r.source_info for m in r.methods: assert isinstance(m, inspect.RouteMethodInfo) internal = '_inspect_fixture.py' not in m.source_info assert m.internal is internal if not internal: assert m.method in ml assert '_inspect_fixture.py' in m.source_info assert m.function_name == fnt.format(m.method).lower() def test_routes(self, asgi): routes = inspect.inspect_routes(make_app_async() if asgi else make_app()) self.check_route( asgi, routes[0], '/foo', 'MyResponder', ['GET', 'POST', 'DELETE'], 'on_{}' ) self.check_route( asgi, routes[1], '/foo/{id}', 'MyResponder', ['GET', 'PUT', 'DELETE'], 'on_{}_id', ) self.check_route( asgi, routes[2], '/bar', 'OtherResponder', ['POST'], 'on_{}_id' ) def test_routes_empty_paths(self, asgi): app = get_app(asgi) r = i_f.MyResponderAsync() if asgi else i_f.MyResponder() app.add_route('/foo/bar/baz', r) routes = inspect.inspect_routes(app) assert len(routes) == 1 self.check_route( asgi, routes[0], '/foo/bar/baz', 'MyResponder', ['GET', 'POST', 'DELETE'], 'on_{}', ) def test_static_routes(self, asgi): routes = inspect.inspect_static_routes(make_app_async() if asgi else make_app()) assert all(isinstance(sr, inspect.StaticRouteInfo) for sr in routes) assert routes[-1].prefix == '/fal/' assert routes[-1].directory == os.path.abspath('falcon') assert routes[-1].fallback_filename is None assert routes[-2].prefix == '/tes/' assert routes[-2].directory == str(HERE) assert routes[-2].fallback_filename.endswith('conftest.py') def test_sink(self, asgi): sinks = inspect.inspect_sinks(make_app_async() if asgi else make_app()) assert all(isinstance(s, inspect.SinkInfo) for s in sinks) assert sinks[-1].prefix == '/sink_fn' assert sinks[-1].name == 'sinkFn' if not asgi: assert '_inspect_fixture.py' in sinks[-1].source_info assert sinks[-2].prefix == '/sink_cls' assert sinks[-2].name == 'SinkClass' if not asgi: assert '_inspect_fixture.py' in sinks[-2].source_info @pytest.mark.skipif(sys.version_info < (3, 6), reason='dict order is not stable') def test_error_handler(self, asgi): errors = inspect.inspect_error_handlers( make_app_async() if asgi else make_app() ) assert all(isinstance(e, inspect.ErrorHandlerInfo) for e in errors) assert errors[-1].error == 'RuntimeError' assert ( errors[-1].name == 'my_error_handler_async' if asgi else 'my_error_handler' ) assert '_inspect_fixture.py' in errors[-1].source_info assert errors[-1].internal is False for eh in errors[:-1]: assert eh.internal assert eh.error in ( 'WebSocketDisconnected', 'Exception', 'HTTPStatus', 'HTTPError', ) def test_middleware(self, asgi): mi = inspect.inspect_middleware(make_app_async() if asgi else make_app()) def test(m, cn, ml, inte): assert isinstance(m, inspect.MiddlewareClassInfo) assert m.name == cn if inte: assert '_inspect_fixture.py' not in m.source_info else: assert '_inspect_fixture.py' in m.source_info for mm in m.methods: assert isinstance(mm, inspect.MiddlewareMethodInfo) if inte: assert '_inspect_fixture.py' not in mm.source_info else: assert '_inspect_fixture.py' in mm.source_info assert mm.function_name in ml test( mi.middleware_classes[0], 'CORSMiddleware', ['process_response_async'] if asgi else ['process_response'], True, ) test( mi.middleware_classes[1], 'MyMiddlewareAsync' if asgi else 'MyMiddleware', ['process_request', 'process_resource', 'process_response'], False, ) test( mi.middleware_classes[2], 'OtherMiddlewareAsync' if asgi else 'OtherMiddleware', ['process_request', 'process_resource', 'process_response'], False, ) def test_middleware_tree(self, asgi): mi = inspect.inspect_middleware(make_app_async() if asgi else make_app()) def test(tl, names, cls): for t, n, c in zip(tl, names, cls): assert isinstance(t, inspect.MiddlewareTreeItemInfo) assert t.name == n assert t.class_name == c assert isinstance(mi.middleware_tree, inspect.MiddlewareTreeInfo) test( mi.middleware_tree.request, ['process_request'] * 2, [n + 'Async' if asgi else n for n in ['MyMiddleware', 'OtherMiddleware']], ) test( mi.middleware_tree.resource, ['process_resource'], ['MyMiddlewareAsync' if asgi else 'MyMiddleware'], ) test( mi.middleware_tree.response, [ 'process_response', 'process_response', 'process_response_async' if asgi else 'process_response', ], [ 'OtherMiddlewareAsync' if asgi else 'OtherMiddleware', 'MyMiddlewareAsync' if asgi else 'MyMiddleware', 'CORSMiddleware', ], ) def test_route_method_info_suffix(): ri = inspect.RouteMethodInfo('foo', '', 'on_get', False) assert ri.suffix == '' ri = inspect.RouteMethodInfo('foo', '', 'on_get_suffix', False) assert ri.suffix == 'suffix' ri = inspect.RouteMethodInfo('foo', '', 'on_get_multiple_underscores_suffix', False) assert ri.suffix == 'multiple_underscores_suffix' ri = inspect.RouteMethodInfo('foo', '', 'some_other_fn_name', False) assert ri.suffix == ''
TestInspectApp
python
ansible__ansible
test/units/module_utils/common/test_utils.py
{ "start": 287, "end": 1783 }
class ____: class Base: pass class BranchI(Base): pass class BranchII(Base): pass class BranchIA(BranchI): pass class BranchIB(BranchI): pass class BranchIIA(BranchII): pass class BranchIIB(BranchII): pass class MultipleInheritanceBase: pass class MultipleInheritanceBranchI(MultipleInheritanceBase): pass class MultipleInheritanceBranchII(MultipleInheritanceBase): pass class MultipleInheritanceChild(MultipleInheritanceBranchI, MultipleInheritanceBranchII): pass def test_bottom_level(self): assert get_all_subclasses(self.BranchIIB) == set() def test_one_inheritance(self): assert set(get_all_subclasses(self.BranchII)) == set([self.BranchIIA, self.BranchIIB]) def test_toplevel(self): assert set(get_all_subclasses(self.Base)) == set([self.BranchI, self.BranchII, self.BranchIA, self.BranchIB, self.BranchIIA, self.BranchIIB]) def test_multiple_inheritance(self) -> None: assert get_all_subclasses(self.MultipleInheritanceBase) == {self.MultipleInheritanceBranchI, self.MultipleInheritanceBranchII, self.MultipleInheritanceChild}
TestGetAllSubclasses
python
huggingface__transformers
src/transformers/models/distilbert/modeling_distilbert.py
{ "start": 16819, "end": 21693 }
class ____(DistilBertPreTrainedModel): _tied_weights_keys = {"vocab_projector.weight": "distilbert.embeddings.word_embeddings.weight"} def __init__(self, config: PreTrainedConfig): super().__init__(config) self.activation = get_activation(config.activation) self.distilbert = DistilBertModel(config) self.vocab_transform = nn.Linear(config.dim, config.dim) self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12) self.vocab_projector = nn.Linear(config.dim, config.vocab_size) # Initialize weights and apply final processing self.post_init() self.mlm_loss_fct = nn.CrossEntropyLoss() def get_position_embeddings(self) -> nn.Embedding: """ Returns the position embeddings """ return self.distilbert.get_position_embeddings() def resize_position_embeddings(self, new_num_position_embeddings: int): """ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`. Arguments: new_num_position_embeddings (`int`): The number of new position embedding matrix. If position embeddings are learned, increasing the size will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will add correct vectors at the end following the position encoding algorithm, whereas reducing the size will remove vectors from the end. """ self.distilbert.resize_position_embeddings(new_num_position_embeddings) def get_output_embeddings(self) -> nn.Module: return self.vocab_projector def set_output_embeddings(self, new_embeddings: nn.Module): self.vocab_projector = new_embeddings @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, position_ids: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[MaskedLMOutput, tuple[torch.Tensor, ...]]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ dlbrt_output = self.distilbert( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, position_ids=position_ids, return_dict=True, **kwargs, ) hidden_states = dlbrt_output[0] # (bs, seq_length, dim) prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim) prediction_logits = self.activation(prediction_logits) # (bs, seq_length, dim) prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim) prediction_logits = self.vocab_projector(prediction_logits) # (bs, seq_length, vocab_size) mlm_loss = None if labels is not None: mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1)) return MaskedLMOutput( loss=mlm_loss, logits=prediction_logits, hidden_states=dlbrt_output.hidden_states, attentions=dlbrt_output.attentions, ) @auto_docstring( custom_intro=""" DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ )
DistilBertForMaskedLM
python
tensorflow__tensorflow
tensorflow/python/distribute/combinations_test.py
{ "start": 8286, "end": 9497 }
class ____(test.TestCase, parameterized.TestCase): def setUp(self): super().setUp() if combinations.in_main_process(): num_gpus = combinations.env().total_phsyical_gpus if num_gpus != 2 and num_gpus != 4: self.skipTest("requires 2 or 4 GPUs") # Test cases are annotated with required_gpus only for them to run in gpu # targets, otherwise they will be skipped. @combinations.generate( combinations.combine(num_workers=2, required_gpus=1, share_gpu=True)) def testShareGPU(self): self.assertLen(context.context().list_physical_devices("GPU"), combinations.env().total_phsyical_gpus) @combinations.generate(combinations.combine(num_workers=2, required_gpus=1)) def testShareGPUByDefault(self): self.assertLen(context.context().list_physical_devices("GPU"), combinations.env().total_phsyical_gpus) @combinations.generate( combinations.combine(num_workers=2, required_gpus=1, share_gpu=False)) def testNotShareGPU(self): self.assertLen(context.context().list_physical_devices("GPU"), combinations.env().total_phsyical_gpus / 2) if __name__ == "__main__": test_util.main()
ShareGPUTest
python
PyCQA__pylint
tests/functional/i/invalid/invalid_metaclass.py
{ "start": 823, "end": 890 }
class ____(metaclass=2): # [invalid-metaclass] pass
ThirdInvalid
python
numba__numba
numba/tests/test_tracing.py
{ "start": 1013, "end": 1596 }
class ____(object): @tracing.trace @classmethod def class_method(cls): pass @tracing.trace @staticmethod def static_method(): pass __test = None def _test_get(self): return self.__test def _test_set(self, value): self.__test = value test = tracing.trace(property(_test_get, _test_set)) @tracing.trace def method(self, some, other='value', *args, **kwds): pass def __repr__(self): """Generate a deterministic string for testing.""" return '<Class instance>'
Class
python
Lightning-AI__lightning
tests/tests_pytorch/test_cli.py
{ "start": 26311, "end": 27833 }
class ____(BoringModel): def __init__( self, optimizer: OptimizerCallable = torch.optim.Adam, ): super().__init__() self.save_hyperparameters() self.optimizer = optimizer def configure_optimizers(self): optimizer = self.optimizer(self.parameters()) return {"optimizer": optimizer} def test_lightning_cli_link_arguments_subcommands_nested_target(cleandir): class MyLightningCLI(LightningCLI): def add_arguments_to_parser(self, parser): parser.link_arguments( "data.num_classes", "model.init_args.optimizer.init_args.num_classes", apply_on="instantiate", ) cli_args = [ "fit", "--data.batch_size=12", "--trainer.max_epochs=1", "--model=tests_pytorch.test_cli.DeepLinkTargetModel", "--model.optimizer=tests_pytorch.test_cli.CustomAdam", ] with mock.patch("sys.argv", ["any.py"] + cli_args): cli = MyLightningCLI( DeepLinkTargetModel, BoringDataModuleBatchSizeAndClasses, subclass_mode_model=True, auto_configure_optimizers=False, ) hparams_path = Path(cli.trainer.log_dir) / "hparams.yaml" assert hparams_path.is_file() hparams = yaml.safe_load(hparams_path.read_text()) assert hparams["optimizer"]["class_path"] == "tests_pytorch.test_cli.CustomAdam" assert hparams["optimizer"]["init_args"]["num_classes"] == 5
DeepLinkTargetModel
python
wandb__wandb
wandb/vendor/pygments/lexers/configs.py
{ "start": 19823, "end": 20636 }
class ____(RegexLexer): """ Lexer for `Docker <http://docker.io>`_ configuration files. .. versionadded:: 2.0 """ name = 'Docker' aliases = ['docker', 'dockerfile'] filenames = ['Dockerfile', '*.docker'] mimetypes = ['text/x-dockerfile-config'] _keywords = (r'(?:FROM|MAINTAINER|CMD|EXPOSE|ENV|ADD|ENTRYPOINT|' r'VOLUME|WORKDIR)') flags = re.IGNORECASE | re.MULTILINE tokens = { 'root': [ (r'^(ONBUILD)(\s+)(%s)\b' % (_keywords,), bygroups(Name.Keyword, Whitespace, Keyword)), (r'^(%s)\b(.*)' % (_keywords,), bygroups(Keyword, String)), (r'#.*', Comment), (r'RUN', Keyword), # Rest of line falls through (r'(.*\\\n)*.+', using(BashLexer)), ], }
DockerLexer
python
pandas-dev__pandas
asv_bench/benchmarks/tslibs/offsets.py
{ "start": 1183, "end": 1582 }
class ____: params = offset_objs param_names = ["offset"] def setup(self, offset): self.dates = [ datetime(2016, m, d) for m in [10, 11, 12] for d in [1, 2, 3, 28, 29, 30, 31] if not (m == 11 and d == 31) ] def time_on_offset(self, offset): for date in self.dates: offset.is_on_offset(date)
OnOffset
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/constructor28.py
{ "start": 979, "end": 1346 }
class ____(Generic[S]): @overload def __new__(cls, item: S, /) -> ClassD[S]: ... @overload def __new__(cls, item: S, __item2: S, /) -> ClassD[tuple[S, S]]: ... def __new__(cls, *items: Any) -> Any: ... def __call__(self, obj: Any) -> Any: ... func3(ClassD(""), ClassD("")) def func4(a: Iterable[tuple[str, ...]]): zip(a, zip(*a))
ClassD
python
getsentry__sentry
src/sentry/api/endpoints/event_attachments.py
{ "start": 619, "end": 2567 }
class ____(ProjectEndpoint): owner = ApiOwner.OWNERS_INGEST publish_status = { "GET": ApiPublishStatus.PRIVATE, } def get(self, request: Request, project, event_id) -> Response: """ Retrieve attachments for an event ````````````````````````````````` :pparam string organization_id_or_slug: the id or slug of the organization the issues belong to. :pparam string project_id_or_slug: the id or slug of the project the event belongs to. :pparam string event_id: the id of the event. :auth: required """ if not features.has( "organizations:event-attachments", project.organization, actor=request.user ): return self.respond(status=404) event = eventstore.backend.get_event_by_id(project.id, event_id) if event is None: return self.respond({"detail": "Event not found"}, status=404) queryset = EventAttachment.objects.filter(project_id=project.id, event_id=event.event_id) query = request.GET.get("query") if query: tokens = tokenize_query(query) for key, value in tokens.items(): if key == "query": value_s = " ".join(value) queryset = queryset.filter(name__icontains=value_s) elif key == "is": value_s = " ".join(value) if value_s == "screenshot": queryset = event_attachment_screenshot_filter(queryset) else: queryset = queryset.none() return self.paginate( request=request, queryset=queryset, order_by="name", on_results=lambda x: serialize(x, request.user), paginator_cls=OffsetPaginator, )
EventAttachmentsEndpoint
python
huggingface__transformers
tests/models/convnextv2/test_modeling_convnextv2.py
{ "start": 4813, "end": 10287 }
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ConvNextV2 does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( ConvNextV2Model, ConvNextV2ForImageClassification, ConvNextV2Backbone, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"image-feature-extraction": ConvNextV2Model, "image-classification": ConvNextV2ForImageClassification} if is_torch_available() else {} ) test_resize_embeddings = False has_attentions = False test_torch_exportable = True def setUp(self): self.model_tester = ConvNextV2ModelTester(self) self.config_tester = ConfigTester( self, config_class=ConvNextV2Config, has_text_modality=False, hidden_size=37, common_properties=["hidden_sizes", "num_channels"], ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ConvNextV2 does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not set to test training") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_with_labels() config.return_dict = True if model_class.__name__ in [ *get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), ]: continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not set to test training") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_with_labels() config.use_cache = False config.return_dict = True if ( model_class.__name__ in [*get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES)] or not model_class.supports_gradient_checkpointing ): continue model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "facebook/convnextv2-tiny-1k-224" model = ConvNextV2Model.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision
ConvNextV2ModelTest
python
getsentry__sentry
tests/sentry/core/endpoints/scim/test_scim_team_index.py
{ "start": 7857, "end": 10043 }
class ____(SCIMTestCase): endpoint = "sentry-api-0-organization-scim-team-index" method = "post" def setUp(self) -> None: super().setUp() self.post_data = { "schemas": ["urn:ietf:params:scim:schemas:core:2.0:Group"], "displayName": "Test SCIMv2", "members": [], } @patch("sentry.core.endpoints.scim.teams.metrics") def test_scim_team_index_create(self, mock_metrics: MagicMock) -> None: with receivers_raise_on_send(): response = self.get_success_response( self.organization.slug, **self.post_data, status_code=201 ) team_id = response.data["id"] assert response.data == { "schemas": ["urn:ietf:params:scim:schemas:core:2.0:Group"], "id": team_id, "displayName": "Test SCIMv2", "members": [], "meta": {"resourceType": "Group"}, } assert Team.objects.filter(id=team_id).exists() assert Team.objects.get(id=team_id).slug == "test-scimv2" assert Team.objects.get(id=team_id).name == "Test SCIMv2" assert Team.objects.get(id=team_id).idp_provisioned assert len(Team.objects.get(id=team_id).member_set) == 0 mock_metrics.incr.assert_called_with( "sentry.scim.team.provision", ) def test_scim_team_no_duplicate_names(self) -> None: self.create_team(organization=self.organization, name=self.post_data["displayName"]) response = self.get_error_response( self.organization.slug, **self.post_data, status_code=409 ) assert response.data["detail"] == "A team with this slug already exists." def test_scim_team_invalid_numeric_slug(self) -> None: invalid_post_data = {**self.post_data, "displayName": "1234"} response = self.get_error_response( self.organization.slug, **invalid_post_data, status_code=400 ) assert response.data["slug"][0] == ( "Enter a valid slug consisting of lowercase letters, numbers, underscores or " "hyphens. It cannot be entirely numeric." )
SCIMIndexCreateTest
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/external.py
{ "start": 19897, "end": 20151 }
class ____(graphene.Union): class Meta: types = ( GrapheneRepositoryConnection, GrapheneRepositoryNotFoundError, GraphenePythonError, ) name = "RepositoriesOrError"
GrapheneRepositoriesOrError
python
getsentry__sentry
fixtures/safe_migrations_apps/good_flow_delete_field_pending_with_fk_constraint_app/models.py
{ "start": 166, "end": 322 }
class ____(models.Model): field = models.IntegerField(default=0, null=False) fk_table = FlexibleForeignKey(FkTable, null=True, db_index=False)
TestTable
python
allegroai__clearml
clearml/backend_api/services/v2_23/tasks.py
{ "start": 166208, "end": 168717 }
class ____(Response): """ Response of tasks.completed endpoint. :param updated: Number of tasks updated (0 or 1) :type updated: int :param fields: Updated fields names and values :type fields: dict :param published: Number of tasks published (0 or 1) :type published: int """ _service = "tasks" _action = "completed" _version = "2.23" _schema = { "definitions": {}, "properties": { "fields": { "additionalProperties": True, "description": "Updated fields names and values", "type": ["object", "null"], }, "published": { "description": "Number of tasks published (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, "updated": { "description": "Number of tasks updated (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, }, "type": "object", } def __init__(self, updated=None, fields=None, published=None, **kwargs): super(CompletedResponse, self).__init__(**kwargs) self.updated = updated self.fields = fields self.published = published @schema_property("updated") def updated(self): return self._property_updated @updated.setter def updated(self, value): if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value @schema_property("fields") def fields(self): return self._property_fields @fields.setter def fields(self, value): if value is None: self._property_fields = None return self.assert_isinstance(value, "fields", (dict,)) self._property_fields = value @schema_property("published") def published(self): return self._property_published @published.setter def published(self, value): if value is None: self._property_published = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "published", six.integer_types) self._property_published = value
CompletedResponse
python
Lightning-AI__lightning
tests/tests_pytorch/plugins/test_async_checkpoint.py
{ "start": 215, "end": 1884 }
class ____(CheckpointIO): def __init__(self) -> None: self.saved: Optional[dict[str, Any]] = None def save_checkpoint(self, checkpoint: dict[str, Any], path: str, storage_options: Optional[Any] = None) -> None: # Simulate some delay to increase race window time.sleep(0.05) # Store the received checkpoint object (not a deep copy) to inspect tensor values self.saved = checkpoint def load_checkpoint(self, path: str, map_location: Optional[Any] = None) -> dict[str, Any]: raise NotImplementedError def remove_checkpoint(self, path: str) -> None: pass @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_async_checkpoint_should_snapshot_values_before_mutation(): base = _CaptureCheckpointIO() async_io = AsyncCheckpointIO(checkpoint_io=base) # a tensor that we will mutate after scheduling the save t = torch.tensor([0.0]) ckpt = {"w": t} # schedule async save async_io.save_checkpoint(ckpt, path="unused") # mutate immediately afterward to mimic training thread stepping params t.add_(1.0) # ensure background thread finished async_io.teardown() assert base.saved is not None, "Async save did not run" # EXPECTATION: AsyncCheckpointIO should have captured value 0.0 (pre-mutation) # CURRENT BEHAVIOR (bug): it captures 1.0 because the dict holds references assert torch.allclose(base.saved["w"], torch.tensor([0.0])), ( "AsyncCheckpointIO must snapshot the checkpoint (clone tensors) on the main thread " "to avoid races with parameter mutation; got mutated value instead" )
_CaptureCheckpointIO
python
django__django
django/contrib/messages/test.py
{ "start": 121, "end": 421 }
class ____: def assertMessages(self, response, expected_messages, *, ordered=True): request_messages = list(get_messages(response.wsgi_request)) assertion = self.assertEqual if ordered else self.assertCountEqual assertion(request_messages, expected_messages)
MessagesTestMixin
python
tensorflow__tensorflow
tensorflow/tools/tensorflow_builder/compat_checker/compat_checker_test.py
{ "start": 1810, "end": 4074 }
class ____(unittest.TestCase): def setUp(self): """Set up test.""" super(CompatCheckerTest, self).setUp() self.test_file = os.path.join(PATH_TO_DIR, "test_config.ini") def testWithUserConfigInRange(self): """Test a set of configs that are supported. Testing with the following combination should always return `success`: [1] A set of configurations that are supported and/or compatible. [2] `.ini` config file with proper formatting. """ # Initialize compatibility checker. self.compat_checker = compat_checker.ConfigCompatChecker( USER_CONFIG_IN_RANGE, self.test_file) # Compatibility check should succeed. self.assertTrue(self.compat_checker.check_compatibility()) # Make sure no warning or error messages are recorded. self.assertFalse(len(self.compat_checker.error_msg)) # Make sure total # of successes match total # of configs. cnt = len(list(USER_CONFIG_IN_RANGE.keys())) self.assertEqual(len(self.compat_checker.successes), cnt) def testWithUserConfigNotInRange(self): """Test a set of configs that are NOT supported. Testing with the following combination should always return `failure`: [1] A set of configurations that are NOT supported and/or compatible. [2] `.ini` config file with proper formatting. """ self.compat_checker = compat_checker.ConfigCompatChecker( USER_CONFIG_NOT_IN_RANGE, self.test_file) # Compatibility check should fail. self.assertFalse(self.compat_checker.check_compatibility()) # Check error and warning messages. err_msg_list = self.compat_checker.failures self.assertTrue(len(err_msg_list)) # Make sure total # of failures match total # of configs. cnt = len(list(USER_CONFIG_NOT_IN_RANGE.keys())) self.assertEqual(len(err_msg_list), cnt) def testWithUserConfigMissing(self): """Test a set of configs that are empty or missing specification.""" self.compat_checker = compat_checker.ConfigCompatChecker( USER_CONFIG_MISSING, self.test_file) # With missing specification in config file, the check should # always fail. self.assertFalse(self.compat_checker.check_compatibility()) if __name__ == "__main__": unittest.main()
CompatCheckerTest
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/resolved_asset_deps.py
{ "start": 520, "end": 9321 }
class ____: """An asset can depend on another asset without specifying the full asset key for the upstream asset, if the name and groups match. ResolvedAssetDependencies maps these flexible dependencies to precise key-based dependencies. """ def __init__( self, assets_defs: Iterable[AssetsDefinition], source_assets: Iterable[SourceAsset] ): self._deps_by_assets_def_id = resolve_assets_def_deps(assets_defs, source_assets) def get_resolved_upstream_asset_keys( self, assets_def: AssetsDefinition, asset_key: AssetKey ) -> AbstractSet[AssetKey]: resolved_keys_by_unresolved_key = self._deps_by_assets_def_id.get(id(assets_def), {}) return { resolved_keys_by_unresolved_key.get(unresolved_dep.asset_key, unresolved_dep.asset_key) for unresolved_dep in assets_def.specs_by_key[asset_key].deps } def get_resolved_asset_key_for_input( self, assets_def: AssetsDefinition, input_name: str ) -> AssetKey: unresolved_asset_key_for_input = assets_def.node_keys_by_input_name[input_name] return self._deps_by_assets_def_id.get(id(assets_def), {}).get( unresolved_asset_key_for_input, unresolved_asset_key_for_input ) def resolve_similar_asset_names( target_asset_key: AssetKey, asset_keys: Iterable[AssetKey], ) -> Sequence[AssetKey]: """Given a target asset key (an upstream dependency which we can't find), produces a list of similar asset keys from the list of asset definitions. We use this list to produce a helpful error message that can help users debug their asset dependencies. """ similar_names: list[AssetKey] = [] target_asset_key_split = ("/".join(target_asset_key.path)).split("/") for asset_key in asset_keys: *target_asset_key_prefix, target_asset_key_name = target_asset_key.path *asset_key_prefix, asset_key_name = asset_key.path try: from rapidfuzz import fuzz is_similar_name = bool( fuzz.ratio(asset_key_name, target_asset_key_name, score_cutoff=80) ) is_similar_prefix = bool( fuzz.ratio( " ".join(asset_key_prefix), " ".join(target_asset_key_prefix), score_cutoff=80, ) ) except ImportError: from difflib import get_close_matches is_similar_name = bool( get_close_matches(asset_key_name, [target_asset_key_name], cutoff=0.8) ) is_similar_prefix = bool( get_close_matches( " ".join(asset_key_prefix), [" ".join(target_asset_key_prefix)], cutoff=0.8 ) ) # Whether the asset key or upstream key has the same prefix and a similar # name # e.g. [snowflake, elementl, key] and [snowflake, elementl, ey] is_same_prefix_similar_name = ( asset_key_prefix == target_asset_key_prefix and is_similar_name ) # Whether the asset key or upstream key has a similar prefix and the same # name # e.g. [snowflake, elementl, key] and [nowflake, elementl, key] is_similar_prefix_same_name = asset_key_name == target_asset_key_name and is_similar_prefix # Whether the asset key or upstream key has one more prefix component than # the other, and the same name # e.g. [snowflake, elementl, key] and [snowflake, elementl, prod, key] is_off_by_one_prefix_component_same_name = ( asset_key.path[-1] == target_asset_key.path[-1] and len(set(asset_key.path).symmetric_difference(set(target_asset_key.path))) == 1 and max(len(asset_key.path), len(target_asset_key.path)) > 1 ) # If the asset key provided has no prefix and the upstream key has # the same name but a prefix of any length no_prefix_but_is_match_with_prefix = ( len(target_asset_key.path) == 1 and asset_key.path[-1] == target_asset_key.path[-1] ) matches_slashes_turned_to_prefix_gaps = asset_key.path == target_asset_key_split if ( is_same_prefix_similar_name or is_similar_prefix_same_name or is_off_by_one_prefix_component_same_name or no_prefix_but_is_match_with_prefix or matches_slashes_turned_to_prefix_gaps ): similar_names.append(asset_key) return sorted(similar_names, key=lambda key: key.to_string()) def resolve_assets_def_deps( assets_defs: Iterable[AssetsDefinition], source_assets: Iterable[SourceAsset] ) -> Mapping[int, Mapping[AssetKey, AssetKey]]: """For each AssetsDefinition, resolves its inputs to upstream asset keys. Matches based on either of two criteria: - The input asset key exactly matches an asset key. - The input asset key has one component, that component matches the final component of an asset key, and they're both in the same asset group. The returned dictionary only contains entries for assets definitions with group-resolved asset dependencies. """ group_names_by_key: dict[AssetKey, str] = {} for assets_def in assets_defs: for spec in assets_def.specs: group_names_by_key[spec.key] = check.not_none(spec.group_name) for source_asset in source_assets: group_names_by_key[source_asset.key] = source_asset.group_name all_asset_keys = group_names_by_key.keys() asset_keys_by_group_and_name: dict[tuple[str, str], list[AssetKey]] = defaultdict(list) for key, group in group_names_by_key.items(): asset_keys_by_group_and_name[(group, key.path[-1])].append(key) warned = False result: dict[int, Mapping[AssetKey, AssetKey]] = {} for assets_def in assets_defs: # If all keys have the same group name, use that group_names = {spec.group_name for spec in assets_def.specs} group_name = next(iter(group_names)) if len(group_names) == 1 else None resolved_keys_by_unresolved_key: dict[AssetKey, AssetKey] = {} for input_name, upstream_key in assets_def.keys_by_input_name.items(): group_and_upstream_name = (group_name, upstream_key.path[-1]) matching_asset_keys = asset_keys_by_group_and_name.get( cast("tuple[str, str]", group_and_upstream_name) ) if upstream_key in all_asset_keys: pass elif ( group_name is not None and len(upstream_key.path) == 1 and matching_asset_keys and len(matching_asset_keys) == 1 and matching_asset_keys[0] not in assets_def.keys ): resolved_key = matching_asset_keys[0] resolved_keys_by_unresolved_key[upstream_key] = resolved_key if not warned: beta_warning( f"Asset {next(iter(assets_def.keys)).to_string()}'s dependency" f" '{upstream_key.path[-1]}' was resolved to upstream asset" f" {resolved_key.to_string()}, because the name matches and they're in the" " same group. This is a beta functionality that may change in a" " future release" ) warned = True elif not assets_def.node_def.input_def_named(input_name).dagster_type.is_nothing: msg = ( f"Input asset '{upstream_key.to_string()}' for asset " f"'{next(iter(assets_def.keys)).to_string()}' is not " "produced by any of the provided asset ops and is not one of the provided " "sources." ) similar_names = resolve_similar_asset_names( upstream_key, list( itertools.chain.from_iterable(asset_def.keys for asset_def in assets_defs) ), ) if similar_names: # Arbitrarily limit to 10 similar names to avoid a huge error message subset_similar_names = similar_names[:10] similar_to_string = ", ".join( similar.to_string() for similar in subset_similar_names ) msg += f" Did you mean one of the following?\n\t{similar_to_string}" raise DagsterInvalidDefinitionError(msg) if resolved_keys_by_unresolved_key: result[id(assets_def)] = resolved_keys_by_unresolved_key return result
ResolvedAssetDependencies
python
redis__redis-py
redis/asyncio/client.py
{ "start": 2184, "end": 2377 }
class ____(Protocol): async def __call__(self, response: Any, **kwargs): ... ResponseCallbackT = Union[ResponseCallbackProtocol, AsyncResponseCallbackProtocol]
AsyncResponseCallbackProtocol
python
wandb__wandb
tests/fixtures/mock_wandb_log.py
{ "start": 1340, "end": 3096 }
class ____: """Helper to test wandb.term*() calls. See the `mock_wandb_log` fixture. """ def __init__( self, termlog: unittest.mock.MagicMock, termwarn: unittest.mock.MagicMock, termerror: unittest.mock.MagicMock, ): self._termlog = termlog self._termwarn = termwarn self._termerror = termerror def assert_logged(self, msg: str) -> None: """Raise if no message passed to termlog() contains msg_re.""" self._assert_logged(self._termlog, msg) def assert_warned(self, msg: str) -> None: """Raise if no message passed to termwarn() contains msg_re.""" self._assert_logged(self._termwarn, msg) def assert_errored(self, msg: str) -> None: """Raise if no message passed to termerror() contains msg_re.""" self._assert_logged(self._termerror, msg) def _assert_logged( self, termfunc: unittest.mock.MagicMock, expected_msg: str, ) -> None: messages = list(self._logs(termfunc)) for msg in messages: if expected_msg in msg: return else: messages_pretty = textwrap.indent("\n".join(messages), "> ") raise AssertionError(f"{expected_msg!r} not in any of\n{messages_pretty}") def _logs(self, termfunc: unittest.mock.MagicMock) -> Iterable[str]: # All the term*() functions have a similar API: the message is the # first argument, which may also be passed as a keyword argument called # "string". for call in termfunc.call_args_list: if "string" in call.kwargs: yield call.kwargs["string"] else: yield call.args[0]
MockWandbLog
python
ansible__ansible
test/lib/ansible_test/_internal/commands/sanity/__init__.py
{ "start": 27899, "end": 32948 }
class ____(metaclass=abc.ABCMeta): """Sanity test base class.""" ansible_only = False def __init__(self, name: t.Optional[str] = None) -> None: if not name: name = self.__class__.__name__ name = re.sub(r'Test$', '', name) # drop Test suffix name = re.sub(r'(.)([A-Z][a-z]+)', r'\1-\2', name).lower() # use dashes instead of capitalization self.name = name self.enabled = True # Optional error codes represent errors which spontaneously occur without changes to the content under test, such as those based on the current date. # Because these errors can be unpredictable they behave differently than normal error codes: # * They are not reported by default. The `--enable-optional-errors` option must be used to display these errors. # * They cannot be ignored. This is done to maintain the integrity of the ignore system. self.optional_error_codes: set[str] = set() @property def error_code(self) -> t.Optional[str]: """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes.""" return None @property def can_ignore(self) -> bool: """True if the test supports ignore entries.""" return True @property def can_skip(self) -> bool: """True if the test supports skip entries.""" return not self.all_targets and not self.no_targets @property def all_targets(self) -> bool: """True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets.""" return False @property def no_targets(self) -> bool: """True if the test does not use test targets. Mutually exclusive with all_targets.""" return False @property def include_directories(self) -> bool: """True if the test targets should include directories.""" return False @property def include_symlinks(self) -> bool: """True if the test targets should include symlinks.""" return False @property def supported_python_versions(self) -> t.Optional[tuple[str, ...]]: """A tuple of supported Python versions or None if the test does not depend on specific Python versions.""" return CONTROLLER_PYTHON_VERSIONS def origin_hook(self, args: SanityConfig) -> None: """This method is called on the origin, before the test runs or delegation occurs.""" def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]: # pylint: disable=unused-argument """Return the given list of test targets, filtered to include only those relevant for the test.""" if self.no_targets: return [] raise NotImplementedError('Sanity test "%s" must implement "filter_targets" or set "no_targets" to True.' % self.name) def filter_targets_by_version(self, args: SanityConfig, targets: list[TestTarget], python_version: str) -> list[TestTarget]: """Return the given list of test targets, filtered to include only those relevant for the test, taking into account the Python version.""" del args # args is not used here, but derived classes may make use of it del python_version # python_version is not used here, but derived classes may make use of it targets = self.filter_targets(targets) return targets @staticmethod def filter_remote_targets(targets: list[TestTarget]) -> list[TestTarget]: """Return a filtered list of the given targets, including only those that require support for remote-only Python versions.""" targets = [target for target in targets if ( is_subdir(target.path, data_context().content.module_path) or is_subdir(target.path, data_context().content.module_utils_path) or is_subdir(target.path, data_context().content.unit_module_path) or is_subdir(target.path, data_context().content.unit_module_utils_path) or # include modules/module_utils within integration test library directories re.search('^%s/.*/library/' % re.escape(data_context().content.integration_targets_path), target.path) or # special handling for content in ansible-core (data_context().content.is_ansible and ( # utility code that runs in target environments and requires support for remote-only Python versions is_subdir(target.path, 'test/lib/ansible_test/_util/target/') or # integration test support modules/module_utils continue to require support for remote-only Python versions re.search('^test/support/integration/.*/(modules|module_utils)/', target.path) or # collection loader requires support for remote-only Python versions re.search('^lib/ansible/utils/collection_loader/', target.path) )) )] return targets
SanityTest
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/collective_ops_test.py
{ "start": 54185, "end": 55868 }
class ____(test.TestCase, parameterized.TestCase): def setUp(self): _setup_context() super().setUp() def testInvalidGroupKey(self, collective_op, device, communication): dev0 = '/device:%s:0' % device group_size = 2 group_key = [100] instance_key = 100 in_tensor = constant_op.constant([1.]) with self.assertRaises(errors.InvalidArgumentError): with ops.device(dev0): collective_op( in_tensor, group_size, group_key, instance_key, ordering_token=create_ordering_token(), communication_hint=communication) def testInvalidGroupSize(self, collective_op, device, communication): dev0 = '/device:%s:0' % device group_size = -2 group_key = 100 instance_key = 100 in_tensor = constant_op.constant([1.]) with self.assertRaises(errors.InvalidArgumentError): with ops.device(dev0): collective_op( in_tensor, group_size, group_key, instance_key, ordering_token=create_ordering_token(), communication_hint=communication) def testInvalidInstanceKey(self, collective_op, device, communication): dev0 = '/device:%s:0' % device group_size = 2 group_key = 100 instance_key = [100] in_tensor = constant_op.constant([1.]) with self.assertRaises(errors.InvalidArgumentError): with ops.device(dev0): collective_op( in_tensor, group_size, group_key, instance_key, ordering_token=create_ordering_token(), communication_hint=communication)
InvalidInputTest
python
google__jax
jax/_src/core.py
{ "start": 151295, "end": 152472 }
class ____: def __init__(self, trace_ref): self._trace_ref = trace_ref def __eq__(self, other): if isinstance(other, OpaqueTraceState): return self._trace_ref == other._trace_ref else: return False def get_opaque_trace_state(convention=None): del convention return OpaqueTraceState(trace_ctx.trace._weakref) def nonempty_axis_env() -> bool: return bool(trace_ctx.axis_env.axis_sizes) def unsafe_am_i_under_a_jit() -> bool: return 'DynamicJaxprTrace' in str(unsafe_get_trace_stack(trace_ctx.trace)) def unsafe_am_i_under_a_vmap() -> bool: return 'BatchTrace' in str(unsafe_get_trace_stack(trace_ctx.trace)) # TODO(douglam): deprecate/delete def find_top_trace(_): return unsafe_get_current_trace() def unsafe_get_current_trace(): return trace_ctx.trace def unsafe_get_trace_stack(trace): if hasattr(trace, "parent_trace"): return unsafe_get_trace_stack(trace.parent_trace) + [trace] else: return [trace] def unsafe_get_axis_names() -> list[Any]: return list(trace_ctx.axis_env.axis_sizes) # TODO(douglam): deprecate/delete def axis_frame(axis_name): return trace_ctx.axis_env.axis_size(axis_name)
OpaqueTraceState
python
sqlalchemy__sqlalchemy
test/dialect/postgresql/test_types.py
{ "start": 186217, "end": 186915 }
class ____: _col_type = TSTZRANGE _col_str = "TSTZRANGE" def tstzs(self): tz = datetime.timezone(-datetime.timedelta(hours=5, minutes=30)) return ( datetime.datetime(2013, 3, 23, 14, 30, tzinfo=tz), datetime.datetime(2013, 3, 30, 23, 30, tzinfo=tz), ) def _data_str(self): l, r = self.tstzs() return f"[{l},{r})" def _data_obj(self): return Range(*self.tstzs()) _epsilon = datetime.timedelta(days=1) def _step_value_up(self, value): return value + datetime.timedelta(days=1) def _step_value_down(self, value): return value - datetime.timedelta(days=1)
_DateTimeTZRangeTests
python
allegroai__clearml
clearml/utilities/requests_toolbelt/multipart/encoder.py
{ "start": 286, "end": 364 }
class ____(Exception): """File not supported error."""
FileNotSupportedError
python
django-import-export__django-import-export
tests/core/tests/admin_integration/test_action_export.py
{ "start": 9206, "end": 14812 }
class ____(AdminTestMixin, TestCase): """ Test cases for issue #2097: Admin filters are lost during export actions. Tests that admin changelist filters are properly preserved when exporting selected items through the export action using the AuthorBirthdayListFilter. """ def setUp(self): super().setUp() # Create authors from different eras to test the AuthorBirthdayListFilter self.old_author1 = Author.objects.create( name="Old Author 1", birthday=date(1850, 1, 1) ) self.old_author2 = Author.objects.create( name="Old Author 2", birthday=date(1880, 6, 15) ) self.new_author1 = Author.objects.create( name="New Author 1", birthday=date(1950, 3, 10) ) self.new_author2 = Author.objects.create( name="New Author 2", birthday=date(1970, 12, 25) ) # Create books with authors from different eras self.old_book1 = Book.objects.create(name="Old Book 1", author=self.old_author1) self.old_book2 = Book.objects.create(name="Old Book 2", author=self.old_author2) self.new_book1 = Book.objects.create(name="New Book 1", author=self.new_author1) self.new_book2 = Book.objects.create(name="New Book 2", author=self.new_author2) # fields payload for `BookResource` - for `SelectableFieldsExportForm` self.resource_fields_payload = { "bookresource_id": True, "bookresource_name": True, "bookresource_author": True, } def test_export_action_preserves_admin_filters(self): """ Test that admin filters are preserved when exporting selected items. This reproduces issue #2097 where applied filters are lost during export. Uses the AuthorBirthdayListFilter to test filter preservation with books. The issue occurs when: 1. User applies filters in admin changelist (authors born before 1900) 2. User selects items from filtered results 3. User chooses "Export selected items" action 4. The export URL loses the filter context, causing unfiltered export """ # Step 1: Simulate POST action with AuthorBirthdayListFilter applied data = { "action": ["export_admin_action"], "_selected_action": [ str(self.old_book1.id), str(self.old_book2.id), ], } # Add filter parameters to simulate applied admin filters filter_params = "?birthday=before" url_with_filters = self.core_book_url + filter_params # Make the request with filters applied response = self._post_url_response(url_with_filters, data) # Should get an export form self.assertIn("form", response.context) # Check that export_url preserves the filter parameters export_url = response.context.get("export_url", "") self.assertIn( "birthday=before", export_url, f"Export URL should preserve AuthorBirthdayListFilter parameters. " f"Got URL: '{export_url}'. Filter preservation is working!", ) def test_export_action_filter_preservation_end_to_end(self): """ Test the complete filter preservation workflow from action to final export. This test follows the complete flow: action -> form -> export with filters. """ # Step 1: First trigger the export action with filters action_data = { "action": ["export_admin_action"], "_selected_action": [ str(self.old_book1.id), str(self.old_book2.id), ], } # POST to changelist with filter to get export form filter_params = "?birthday=before" url_with_filters = self.core_book_url + filter_params action_response = self._post_url_response(url_with_filters, action_data) # Should get an export form with preserved filter URL self.assertIn("form", action_response.context) export_url = action_response.context.get("export_url", "") self.assertIn("birthday=before", export_url) # Step 2: Now submit the export form to the preserved URL export_data = { "format": "0", "export_items": [str(self.old_book1.id), str(self.old_book2.id)], **self.resource_fields_payload, } self._prepend_form_prefix(export_data) # POST to the export URL that should have preserved filters # Suppress the deprecation warning for get_valid_export_item_pks with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=r"The 'get_valid_export_item_pks\(\)' method", category=DeprecationWarning, ) final_response = self._post_url_response(export_url, export_data) # Should get CSV export that respects the filter context self.assertEqual(final_response["Content-Type"], "text/csv") content = final_response.content.decode() # Verify the export contains the expected filtered data lines = content.strip().split("\n") if len(lines) > 1: data_lines = lines[1:] # Remove header # Should only contain the 2 selected books self.assertEqual( len(data_lines), 2, f"Filter preservation working! Expected 2 books, got {len(data_lines)}", )
TestExportFilterPreservation
python
PyCQA__pylint
tests/functional/t/too/too_many_ancestors_ignored_parents.py
{ "start": 523, "end": 555 }
class ____(F): """1 parent"""
E
python
realpython__materials
wordcount/tests/task_02.py
{ "start": 708, "end": 1782 }
class ____: def test_reports_zeros_on_an_empty_stream(self, wc): assert_equals(b"0 0 0\n", wc()) def test_handles_a_short_word_without_trailing_newline(self, wc): assert_equals_if(b"0 1 5\n", wc(stdin=b"caffe")) def test_handles_a_short_word_with_trailing_newline(self, wc): assert_equals_if(b"1 1 6\n", wc(stdin=b"caffe\n")) def test_delimits_words_on_whitespace(self, wc): assert_equals_if( expected=b"1 1 9\n", actual=wc(stdin=b"back-end\n"), message="Pay attention to punctuation and special characters.", ) def test_handles_linux_newline(self, wc): r"""Handles the Linux newline (\n)""" assert_equals_if(b"1 2 7\n", wc(stdin=b"hot\ntea")) def test_handles_macos_newline(self, wc): r"""Handles the macOS newline (\r)""" assert_equals_if(b"0 2 7\n", wc(stdin=b"hot\rtea")) def test_handles_windows_newline(self, wc): r"""Handles the Windows newline (\r\n)""" assert_equals_if(b"1 2 8\n", wc(stdin=b"hot\r\ntea"))
Test
python
pytorch__pytorch
test/inductor/test_decompose_mem_bound_mm.py
{ "start": 655, "end": 984 }
class ____(torch.nn.Module): def __init__( self, n_input: int, n_output: int, has_bias: bool, device=GPU_TYPE ) -> None: super().__init__() self.linear = torch.nn.Linear(n_input, n_output, bias=has_bias) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.linear(x)
MyModule
python
scikit-learn__scikit-learn
sklearn/tests/test_base.py
{ "start": 2122, "end": 2188 }
class ____(DiamondOverwriteTag): pass
InheritDiamondOverwriteTag
python
pypa__warehouse
tests/unit/subscriptions/test_services.py
{ "start": 996, "end": 2801 }
class ____: def test_verify_service(self): assert verifyClass(IBillingService, StripeBillingService) def test_basic_init(self): api = pretend.stub() billing_service = StripeBillingService( api=api, publishable_key="secret_to_everybody", webhook_secret="keep_it_secret_keep_it_safe", domain="tests", ) assert billing_service.api is api assert billing_service.publishable_key == "secret_to_everybody" assert billing_service.webhook_secret == "keep_it_secret_keep_it_safe" assert billing_service.domain == "tests" def test_create_service(self): # Reload stripe to reset the global stripe.api_key to default. importlib.reload(stripe) request = pretend.stub( registry=pretend.stub( settings={ "billing.api_base": "http://localhost:12111", "billing.api_version": "2020-08-27", "billing.secret_key": "sk_test_123", "billing.publishable_key": "pk_test_123", "billing.webhook_key": "whsec_123", "billing.domain": "tests", } ) ) billing_service = StripeBillingService.create_service(None, request) # Assert api_base isn't overwritten with mock service even if we try assert not billing_service.api.api_base == "http://localhost:12111" assert billing_service.api.api_version == "2020-08-27" assert billing_service.api.api_key == "sk_test_123" assert billing_service.publishable_key == "pk_test_123" assert billing_service.webhook_secret == "whsec_123" assert billing_service.domain == "tests"
TestStripeBillingService
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 1144816, "end": 1145091 }
class ____(VegaLiteSchema): """ScaleInvalidDataShowAsstrokeDash schema wrapper.""" _schema = {"$ref": '#/definitions/ScaleInvalidDataShowAs<"strokeDash">'} def __init__(self, *args, **kwds): super().__init__(*args, **kwds)
ScaleInvalidDataShowAsstrokeDash
python
davidhalter__parso
parso/tree.py
{ "start": 15614, "end": 16153 }
class ____(Leaf): """ A leaf that is either completely invalid in a language (like `$` in Python) or is invalid at that position. Like the star in `1 +* 1`. """ __slots__ = ('token_type',) type = 'error_leaf' def __init__(self, token_type, value, start_pos, prefix=''): super().__init__(value, start_pos, prefix) self.token_type = token_type def __repr__(self): return "<%s: %s:%s, %s>" % \ (type(self).__name__, self.token_type, repr(self.value), self.start_pos)
ErrorLeaf
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 122675, "end": 123068 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("field", "direction") field = sgqlc.types.Field( sgqlc.types.non_null(ProjectV2OrderField), graphql_name="field" ) direction = sgqlc.types.Field( sgqlc.types.non_null(OrderDirection), graphql_name="direction" )
ProjectV2Order
python
airbytehq__airbyte
airbyte-integrations/connectors/source-tiktok-marketing/components.py
{ "start": 3703, "end": 4218 }
class ____(RecordTransformation): empty_value = "-" def transform( self, record: Mapping[str, Any], config: Optional[Config] = None, stream_state: Optional[StreamState] = None, stream_slice: Optional[StreamSlice] = None, ) -> Mapping[str, Any]: for metric_key, metric_value in record.get("metrics", {}).items(): if metric_value == self.empty_value: record["metrics"][metric_key] = None return record
TransformEmptyMetrics
python
dagster-io__dagster
python_modules/dagster/dagster/_core/execution/api.py
{ "start": 34228, "end": 39240 }
class ____: """Utility class to consolidate execution logic. This is a class and not a function because, e.g., in constructing a `scoped_pipeline_context` for `JobExecutionResult`, we need to pull out the `pipeline_context` after we're done yielding events. This broadly follows a pattern we make use of in other places, cf. `dagster._utils.EventGenerationManager`. """ def __init__( self, execution_plan: ExecutionPlan, iterator: Callable[..., Iterator[DagsterEvent]], execution_context_manager: ExecutionContextManager[Any], ): self.execution_plan = check.inst_param(execution_plan, "execution_plan", ExecutionPlan) self.iterator = check.callable_param(iterator, "iterator") self.execution_context_manager = check.inst_param( execution_context_manager, "execution_context_manager", ExecutionContextManager ) self.job_context = None def __iter__(self) -> Iterator[DagsterEvent]: # Since interrupts can't be raised at arbitrary points safely, delay them until designated # checkpoints during the execution. # To be maximally certain that interrupts are always caught during an execution process, # you can safely add an additional `with capture_interrupts()` at the very beginning of the # process that performs the execution. with capture_interrupts(): yield from self.execution_context_manager.prepare_context() self.job_context = self.execution_context_manager.get_context() generator_closed = False try: if self.job_context: # False if we had a pipeline init failure yield from self.iterator( execution_plan=self.execution_plan, job_context=self.job_context, ) except GeneratorExit: # Shouldn't happen, but avoid runtime-exception in case this generator gets GC-ed # (see https://amir.rachum.com/blog/2017/03/03/generator-cleanup/). generator_closed = True raise finally: for event in self.execution_context_manager.shutdown_context(): if not generator_closed: yield event def _check_execute_job_args( job_arg: Union[JobDefinition, IJob], run_config: Optional[Mapping[str, object]], tags: Optional[Mapping[str, str]], op_selection: Optional[Sequence[str]] = None, ) -> tuple[ IJob, Optional[Mapping], Mapping[str, str], Optional[AbstractSet[str]], Optional[Sequence[str]], ]: ijob = InMemoryJob(job_arg) if isinstance(job_arg, JobDefinition) else job_arg job_def = job_arg if isinstance(job_arg, JobDefinition) else job_arg.get_definition() run_config = check.opt_mapping_param(run_config, "run_config") tags = check.opt_mapping_param(tags, "tags", key_type=str) check.opt_sequence_param(op_selection, "op_selection", of_type=str) tags = merge_dicts(job_def.run_tags, tags) # generate job subset from the given op_selection if op_selection: ijob = ijob.get_subset(op_selection=op_selection) return ( ijob, run_config, tags, ijob.resolved_op_selection, op_selection, ) def _resolve_reexecute_step_selection( instance: DagsterInstance, job: IJob, run_config: Optional[Mapping], parent_dagster_run: DagsterRun, step_selection: Sequence[str], ) -> ExecutionPlan: if parent_dagster_run.op_selection: job = job.get_subset(op_selection=parent_dagster_run.op_selection) state = KnownExecutionState.build_for_reexecution(instance, parent_dagster_run) parent_plan = create_execution_plan( job, parent_dagster_run.run_config, known_state=state, ) step_keys_to_execute = parse_step_selection(parent_plan.get_all_step_deps(), step_selection) return create_execution_plan( job, run_config, step_keys_to_execute=list(step_keys_to_execute), known_state=state.update_for_step_selection(step_keys_to_execute), tags=parent_dagster_run.tags, ) def _job_with_repository_load_data( job_arg: Union[JobDefinition, IJob], ) -> tuple[Union[JobDefinition, IJob], Optional[RepositoryLoadData]]: """For ReconstructableJob, generate and return any required RepositoryLoadData, alongside a ReconstructableJob with this repository load data baked in. """ if isinstance(job_arg, ReconstructableJob): # Unless this ReconstructableJob alread has repository_load_data attached, this will # force the repository_load_data to be computed from scratch. repository_load_data = job_arg.repository.get_definition().repository_load_data return job_arg.with_repository_load_data(repository_load_data), repository_load_data return job_arg, None
ExecuteRunWithPlanIterable
python
scipy__scipy
scipy/signal/tests/test_filter_design.py
{ "start": 64774, "end": 69079 }
class ____: """Tests for function `signal.bilinear`. """ def test_exceptions(self): """Raise all exceptions in `bilinear()`. """ with pytest.raises(ValueError, match="Parameter a is not .*"): bilinear(1., np.array([[1, 2, 3]])) with pytest.raises(ValueError, match="Parameter b is not .*"): bilinear(np.ones((2,3)), 1. ) @pytest.mark.xfail(DEFAULT_F32, reason="wrong answer with torch/float32") def test_basic(self, xp): # reference output values computed with sympy b = [0.14879732743343033] a = [1, 0.54552236880522209, 0.14879732743343033] b, a = map(xp.asarray, (b, a)) b_zref = xp.asarray( [0.08782128175913713, 0.17564256351827426, 0.08782128175913713] ) a_zref = xp.asarray( [1.0, -1.0047722097030667, 0.3560573367396151] ) b_z, a_z = bilinear(b, a, 0.5) xp_assert_close_nulp(b_z, b_zref) xp_assert_close_nulp(a_z, a_zref) b = xp.asarray([1, 0, 0.17407467530697837]) a = xp.asarray([1, 0.18460575326152251, 0.17407467530697837]) b_zref = xp.asarray( [0.8641286432189045, -1.2157757001964216, 0.8641286432189045] ) a_zref = xp.asarray([1.0, -1.2157757001964216, 0.7282572864378091]) b_z, a_z = bilinear(b, a, 0.5) xp_assert_close_nulp(b_z, xp.asarray(b_zref)) xp_assert_close_nulp(a_z, xp.asarray(a_zref)) @pytest.mark.xfail(DEFAULT_F32, reason="wrong answer with torch/float32") @xfail_xp_backends("cupy", reason="https://github.com/cupy/cupy/issues/9404") def test_ignore_leading_zeros(self, xp): # regression for gh-6606 # results shouldn't change when leading zeros are added to # input numerator or denominator b = [0.14879732743343033] a = [1, 0.54552236880522209, 0.14879732743343033] b, a = map(xp.asarray, (b, a)) b_zref = xp.asarray( [0.08782128175913713, 0.17564256351827426, 0.08782128175913713] ) a_zref = xp.asarray([1.0, -1.0047722097030667, 0.3560573367396151]) for lzn, lzd in product(range(4), range(4)): b_z, a_z = bilinear(xpx.pad(b, (lzn, 0), xp=xp), xpx.pad(a, (lzd, 0), xp=xp), 0.5) xp_assert_close_nulp(b_z, b_zref) xp_assert_close_nulp(a_z, a_zref) @pytest.mark.xfail(DEFAULT_F32, reason="wrong answer with torch/float32") @xfail_xp_backends("cupy", reason="complex inputs not supported") def test_complex(self, xp): # reference output values computed with sympy # this is an elliptical filter, 5Hz width, centered at +50Hz: # z, p, k = signal.ellip(2, 0.5, 20, 2*np.pi*5/2, output='zpk', analog=True) # z = z.astype(complex) + 2j * np.pi * 50 # p = p.astype(complex) + 2j * np.pi * 50 # b, a = signal.zpk2tf(z, p, k) b = [(0.09999999999999991+0j), -62.831853071795805j, (-9505.857007071314+0j)] a = [(1+0j), (21.09511000343942-628.3185307179587j), (-98310.74322875646-6627.2242613473845j)] b, a = map(xp.asarray, (b, a)) # sample frequency fs = 1000 b_zref = [(0.09905575106715676-0.00013441423112828688j), (-0.18834281923181084-0.06032810039049478j), (0.08054306669414343+0.05766172295523972j)] a_zref = [(1+0j), (-1.8839476369292854-0.606808151331815j), (0.7954687330018285+0.5717459398142481j)] b_zref, a_zref = map(xp.asarray, (b_zref, a_zref)) b_z, a_z = bilinear(b, a, fs) # the 3 ulp difference determined from testing xp_assert_close_nulp(b_z, b_zref, nulp=3) xp_assert_close_nulp(a_z, a_zref, nulp=3) def test_fs_validation(self): b = [0.14879732743343033] a = [1, 0.54552236880522209, 0.14879732743343033] with pytest.raises(ValueError, match="Sampling.*single scalar"): bilinear(b, a, fs=np.array([10, 20])) with pytest.raises(ValueError, match="Sampling.*be none"): bilinear(b, a, fs=None) @make_xp_test_case(lp2lp_zpk)
TestBilinear
python
pytorch__pytorch
test/dynamo/test_subclasses.py
{ "start": 6482, "end": 6927 }
class ____(torch.Tensor): @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): if func == torch.Tensor.sigmoid: return super().__torch_function__(torch.Tensor.exp, types, args, kwargs) return super().__torch_function__(func, types, args, kwargs) # Wrapper subclass with two inner tensors: data and scale # data has same shape as outer, and scale has single dim size
SigmoidToExpSubclass
python
tensorflow__tensorflow
tensorflow/python/keras/layers/pooling.py
{ "start": 42054, "end": 43953 }
class ____(GlobalPooling2D): """Global max pooling operation for spatial data. Examples: >>> input_shape = (2, 4, 5, 3) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.GlobalMaxPool2D()(x) >>> print(y.shape) (2, 3) Args: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". keepdims: A boolean, whether to keep the spatial dimensions or not. If `keepdims` is `False` (default), the rank of the tensor is reduced for spatial dimensions. If `keepdims` is `True`, the spatial dimensions are retained with length 1. The behavior is the same as for `tf.reduce_max` or `np.max`. Input shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, rows, cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, rows, cols)`. Output shape: - If `keepdims`=False: 2D tensor with shape `(batch_size, channels)`. - If `keepdims`=True: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, 1, 1, channels)` - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, 1, 1)` """ def call(self, inputs): if self.data_format == 'channels_last': return backend.max(inputs, axis=[1, 2], keepdims=self.keepdims) else: return backend.max(inputs, axis=[2, 3], keepdims=self.keepdims)
GlobalMaxPooling2D
python
rapidsai__cudf
python/cudf/cudf/core/index.py
{ "start": 113405, "end": 152857 }
class ____(Index): """ Immutable , ordered and sliceable sequence of datetime64 data, represented internally as int64. Parameters ---------- data : array-like (1-dimensional), optional Optional datetime-like data to construct index with. copy : bool Make a copy of input. freq : str, optional Frequency of the DatetimeIndex tz : pytz.timezone or dateutil.tz.tzfile This is not yet supported ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' This is not yet supported name : object Name to be stored in the index. dayfirst : bool, default False If True, parse dates in data with the day first order. This is not yet supported yearfirst : bool, default False If True parse dates in data with the year first order. This is not yet supported Attributes ---------- year month day hour minute second microsecond nanosecond date time dayofyear day_of_year weekday quarter freq Methods ------- ceil floor round tz_convert tz_localize Returns ------- DatetimeIndex Examples -------- >>> import cudf >>> cudf.DatetimeIndex([1, 2, 3, 4], name="a") DatetimeIndex(['1970-01-01 00:00:00.000000001', '1970-01-01 00:00:00.000000002', '1970-01-01 00:00:00.000000003', '1970-01-01 00:00:00.000000004'], dtype='datetime64[ns]', name='a') """ timestamp_to_timedelta = { plc.DataType(plc.TypeId.TIMESTAMP_NANOSECONDS): plc.DataType( plc.TypeId.DURATION_NANOSECONDS ), plc.DataType(plc.TypeId.TIMESTAMP_MICROSECONDS): plc.DataType( plc.TypeId.DURATION_MICROSECONDS ), plc.DataType(plc.TypeId.TIMESTAMP_SECONDS): plc.DataType( plc.TypeId.DURATION_SECONDS ), plc.DataType(plc.TypeId.TIMESTAMP_MILLISECONDS): plc.DataType( plc.TypeId.DURATION_MILLISECONDS ), } _allowed = ( "days", "hours", "minutes", "seconds", "milliseconds", "microseconds", "nanoseconds", ) MONTHLY_PERIODS = { pd.Timedelta("28 days"), pd.Timedelta("29 days"), pd.Timedelta("30 days"), pd.Timedelta("31 days"), } YEARLY_PERIODS = { pd.Timedelta("365 days"), pd.Timedelta("366 days"), } @_performance_tracking def __init__( self, data=None, freq=None, tz=None, normalize: bool = False, closed=None, ambiguous: Literal["raise"] = "raise", dayfirst: bool = False, yearfirst: bool = False, dtype=None, copy: bool = False, name=None, nan_as_null=no_default, ): self._freq = None # we should be more strict on what we accept here but # we'd have to go and figure out all the semantics around # pandas dtindex creation first which. For now # just make sure we handle np.datetime64 arrays # and then just dispatch upstream was_pd_index = isinstance(data, pd.DatetimeIndex) if tz is not None: raise NotImplementedError("tz is not yet supported") if normalize is not False: warnings.warn( "The 'normalize' keyword is " "deprecated and will be removed in a future version. ", FutureWarning, ) raise NotImplementedError("normalize == True is not yet supported") if closed is not None: warnings.warn( "The 'closed' keyword is " "deprecated and will be removed in a future version. ", FutureWarning, ) raise NotImplementedError("closed is not yet supported") if ambiguous != "raise": raise NotImplementedError("ambiguous is not yet supported") if dayfirst is not False: raise NotImplementedError("dayfirst == True is not yet supported") if yearfirst is not False: raise NotImplementedError("yearfirst == True is not yet supported") if freq is None: if isinstance(data, type(self)): freq = data.freq if was_pd_index and data.freq is not None: freq = data.freq.freqstr name = _getdefault_name(data, name=name) data = as_column(data) if data.dtype.kind == "b": raise ValueError( "Boolean data cannot be converted to a DatetimeIndex" ) if dtype is not None: dtype = cudf.dtype(dtype) if dtype.kind != "M": raise TypeError("dtype must be a datetime type") elif not isinstance(data.dtype, pd.DatetimeTZDtype): data = data.astype(dtype) elif data.dtype.kind != "M": # nanosecond default matches pandas data = data.astype(np.dtype("datetime64[ns]")) if copy: data = data.copy() SingleColumnFrame.__init__( self, ColumnAccessor({name: data}, verify=False) ) self._freq = _validate_freq(freq) # existing pandas index needs no additional validation if self._freq is not None and not was_pd_index: unique_vals = self.to_series().diff().unique() if self._freq == cudf.DateOffset(months=1): possible = pd.Series(list(self.MONTHLY_PERIODS | {pd.NaT})) if unique_vals.isin(possible).sum() != len(unique_vals): raise ValueError("No unique frequency found") elif self._freq == cudf.DateOffset(years=1): possible = pd.Series(list(self.YEARLY_PERIODS | {pd.NaT})) if unique_vals.isin(possible).sum() != len(unique_vals): raise ValueError("No unique frequency found") else: if len(unique_vals) > 2 or ( len(unique_vals) == 2 and unique_vals[1] != self._freq._maybe_as_fast_pandas_offset() ): raise ValueError("No unique frequency found") @_performance_tracking def serialize(self): header, frames = super().serialize() if self.freq is not None: header["freq"] = { "kwds": self.freq.kwds, } else: header["freq"] = None return header, frames @classmethod @_performance_tracking def deserialize(cls, header, frames): obj = super().deserialize(header, frames) if (header_payload := header.get("freq")) is not None: freq = cudf.DateOffset(**header_payload["kwds"]) else: freq = None obj._freq = _validate_freq(freq) return obj @_performance_tracking def _copy_type_metadata(self: Self, other: Self) -> Self: super()._copy_type_metadata(other) self._freq = _validate_freq(other._freq) return self @classmethod def _from_data( cls, data: MutableMapping, name: Any = no_default, freq: Any = None ): result = super()._from_data(data, name) result._freq = _validate_freq(freq) return result @classmethod @_performance_tracking def _from_column( cls, column: ColumnBase, *, name: Hashable = None, freq: Any = None ) -> Self: if column.dtype.kind != "M": raise ValueError("column must have a datetime type.") result = super()._from_column(column, name=name) result._freq = _validate_freq(freq) return result def __getitem__(self, index): value = super().__getitem__(index) if cudf.get_option("mode.pandas_compatible") and isinstance( value, np.datetime64 ): return pd.Timestamp(value) return value def find_label_range(self, loc: slice) -> slice: # For indexing, try to interpret slice arguments as datetime-convertible if any( not (val is None or isinstance(val, (str, datetime.datetime))) for val in (loc.start, loc.stop) ): raise TypeError( "Can only slice DatetimeIndex with a string or datetime objects" ) new_slice = slice( pd.to_datetime(loc.start) if loc.start is not None else None, pd.to_datetime(loc.stop) if loc.stop is not None else None, loc.step, ) return super().find_label_range(new_slice) @_performance_tracking def copy(self, name=None, deep=False): idx_copy = super().copy(name=name, deep=deep) return idx_copy._copy_type_metadata(self) def as_unit(self, unit: str, round_ok: bool = True) -> Self: """ Convert to a dtype with the given unit resolution. Currently not implemented. Parameters ---------- unit : {'s', 'ms', 'us', 'ns'} round_ok : bool, default True If False and the conversion requires rounding, raise ValueError. """ raise NotImplementedError("as_unit is currently not implemented") def mean(self, *, skipna: bool = True, axis: int | None = 0): return self._column.mean(skipna=skipna) def std(self, *, skipna: bool = True, axis: int | None = 0, ddof: int = 1): return self._column.std(skipna=skipna, ddof=ddof) def strftime(self, date_format: str) -> Index: """ Convert to Index using specified date_format. Return an Index of formatted strings specified by date_format, which supports the same string format as the python standard library. Parameters ---------- date_format : str Date format string (e.g. "%Y-%m-%d"). """ return Index._from_column( self._column.strftime(date_format), name=self.name ) @cached_property def _constructor(self): return DatetimeIndex @cached_property def inferred_type(self) -> str: return "datetime64" @cached_property def asi8(self) -> cupy.ndarray: return self._column.astype(np.dtype(np.int64)).values @property def inferred_freq(self) -> DateOffset | MonthEnd | YearEnd | None: if self._freq: return self._freq plc_col = self._column.to_pylibcudf(mode="read") shifted = plc.copying.shift( plc_col, 1, plc.Scalar.from_py(None, dtype=plc_col.type()) ) diff = plc.binaryop.binary_operation( plc_col, shifted, plc.binaryop.BinaryOperator.SUB, self.timestamp_to_timedelta[plc_col.type()], ) offset = plc.Column( diff.type(), diff.size() - 1, diff.data(), diff.null_mask(), diff.null_count(), 1, diff.children(), ) uniques = plc.stream_compaction.stable_distinct( plc.Table([offset]), [0], plc.stream_compaction.DuplicateKeepOption.KEEP_FIRST, plc.types.NullEquality.EQUAL, plc.types.NanEquality.ALL_EQUAL, ).columns()[0] if uniques.size() <= 4: # inspect a small host copy for special cases uniques_host = uniques.to_arrow().to_pylist() if uniques.size() == 1: # base case of a fixed frequency freq = uniques_host[0] # special case of YS-JAN, YS-FEB, etc # 365 days is allowable, but if it's the first of the month, pandas # has a special freq for it, which would take more work to determine # same with specifically 7 day intervals, where pandas has a unique # frequency depending on the day of the week corresponding to the days if freq == pd.Timedelta("365 days") and not self.is_year_end.all(): raise NotImplementedError("Can't infer anchored year start") elif freq == pd.Timedelta("7 days"): raise NotImplementedError("Can't infer anchored week") assert isinstance(freq, pd.Timedelta) # pacify mypy cmps = freq.components kwds = {} for component in self._allowed: if (c := getattr(cmps, component)) != 0: kwds[component] = c return cudf.DateOffset(**kwds) # maximum unique count supported is months with 4 unique lengths # bail above that for now elif 1 < uniques.size() <= 4: # length between 1 and 4, small host copy if all(x in self.YEARLY_PERIODS for x in uniques_host): # Could be year end or could be an anchored year end if self.is_year_end.all(): return cudf.DateOffset._from_freqstr("YE-DEC") else: raise NotImplementedError() elif all(x in self.MONTHLY_PERIODS for x in uniques_host): if self.is_month_end.all(): return cudf.DateOffset._from_freqstr("ME") else: raise NotImplementedError else: return None return None def _get_slice_frequency(self, slc=None): if slc.step in (1, None): # no change in freq return self._freq if slc == slice(None, None, None): return self._freq else: if slc: # fastpath: dont introspect new_freq = slc.step * pd.Timedelta( self._freq._maybe_as_fast_pandas_offset() ) return cudf.DateOffset._from_freqstr( pd.tseries.frequencies.to_offset(new_freq).freqstr ) else: return self.inferred_freq @property def freq(self) -> DateOffset | None: return self._freq @freq.setter def freq(self) -> None: raise NotImplementedError("Setting freq is currently not supported.") @property def freqstr(self) -> str: raise NotImplementedError("freqstr is currently not implemented") @property def resolution(self) -> str: """ Returns day, hour, minute, second, millisecond or microsecond """ raise NotImplementedError("resolution is currently not implemented") @property def unit(self) -> str: return self._column.time_unit @property def tz(self) -> tzinfo | None: """ Return the timezone. Returns ------- datetime.tzinfo or None Returns None when the array is tz-naive. """ return self._column.tz @property def tzinfo(self) -> tzinfo | None: """ Alias for tz attribute """ return self.tz def to_pydatetime(self) -> np.ndarray: """ Return an ndarray of ``datetime.datetime`` objects. Returns ------- numpy.ndarray An ndarray of ``datetime.datetime`` objects. """ return self.to_pandas().to_pydatetime() def to_julian_date(self) -> Index: return Index._from_column( self._column.to_julian_date(), name=self.name ) def to_period(self, freq) -> pd.PeriodIndex: return self.to_pandas().to_period(freq=freq) def normalize(self) -> Self: """ Convert times to midnight. Currently not implemented. """ return type(self)._from_column( self._column.normalize(), name=self.name ) @cached_property def time(self) -> np.ndarray: """ Returns numpy array of ``datetime.time`` objects. The time part of the Timestamps. """ return self.to_pandas().time @cached_property def timetz(self) -> np.ndarray: """ Returns numpy array of ``datetime.time`` objects with timezones. The time part of the Timestamps. """ return self.to_pandas().timetz @cached_property def date(self) -> np.ndarray: """ Returns numpy array of python ``datetime.date`` objects. Namely, the date part of Timestamps without time and timezone information. """ return self.to_pandas().date @property def is_month_start(self) -> cupy.ndarray: """ Booleans indicating if dates are the first day of the month. """ # .is_month_start is already a cached_property return self._column.is_month_start.values @property def is_month_end(self) -> cupy.ndarray: """ Booleans indicating if dates are the last day of the month. """ # .is_month_end is already a cached_property return self._column.is_month_end.values @property def is_quarter_end(self) -> cupy.ndarray: """ Booleans indicating if dates are the last day of the quarter. """ # .is_quarter_end is already a cached_property return self._column.is_quarter_end.values @property def is_quarter_start(self) -> cupy.ndarray: """ Booleans indicating if dates are the start day of the quarter. """ # .is_quarter_start is already a cached_property return self._column.is_quarter_start.values @property def is_year_end(self) -> cupy.ndarray: """ Booleans indicating if dates are the last day of the year. """ # .is_year_end is already a cached_property return self._column.is_year_end.values @property def is_year_start(self) -> cupy.ndarray: """ Booleans indicating if dates are the first day of the year. """ # .is_year_start is already a cached_property return self._column.is_year_start.values @property def is_normalized(self) -> bool: """ Returns True if all of the dates are at midnight ("no time") """ # .is_normalized is already a cached_property return self._column.is_normalized @property def days_in_month(self) -> Index: """ Get the total number of days in the month that the date falls on. """ # .days_in_month is already a cached_property return Index._from_column(self._column.days_in_month, name=self.name) daysinmonth = days_in_month @property def day_of_week(self) -> Index: """ Get the day of week that the date falls on. """ # .day_of_week is already a cached_property return Index._from_column(self._column.day_of_week, name=self.name) @property @_performance_tracking def year(self) -> Index: """ The year of the datetime. Examples -------- >>> import cudf >>> import pandas as pd >>> datetime_index = cudf.Index(pd.date_range("2000-01-01", ... periods=3, freq="Y")) >>> datetime_index DatetimeIndex(['2000-12-31', '2001-12-31', '2002-12-31'], dtype='datetime64[ns]', freq='YE-DEC') >>> datetime_index.year Index([2000, 2001, 2002], dtype='int16') """ # .year is already a cached_property return Index._from_column(self._column.year, name=self.name) @property @_performance_tracking def month(self) -> Index: """ The month as January=1, December=12. Examples -------- >>> import cudf >>> import pandas as pd >>> datetime_index = cudf.Index(pd.date_range("2000-01-01", ... periods=3, freq="M")) >>> datetime_index DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31'], dtype='datetime64[ns]', freq='ME') >>> datetime_index.month Index([1, 2, 3], dtype='int16') """ # .month is already a cached_property return Index._from_column(self._column.month, name=self.name) @property @_performance_tracking def day(self) -> Index: """ The day of the datetime. Examples -------- >>> import pandas as pd >>> import cudf >>> datetime_index = cudf.Index(pd.date_range("2000-01-01", ... periods=3, freq="D")) >>> datetime_index DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'], dtype='datetime64[ns]', freq='D') >>> datetime_index.day Index([1, 2, 3], dtype='int16') """ # .day is already a cached_property return Index._from_column(self._column.day, name=self.name) @property @_performance_tracking def hour(self) -> Index: """ The hours of the datetime. Examples -------- >>> import pandas as pd >>> import cudf >>> datetime_index = cudf.Index(pd.date_range("2000-01-01", ... periods=3, freq="h")) >>> datetime_index DatetimeIndex(['2000-01-01 00:00:00', '2000-01-01 01:00:00', '2000-01-01 02:00:00'], dtype='datetime64[ns]', freq='h') >>> datetime_index.hour Index([0, 1, 2], dtype='int16') """ # .hour is already a cached_property return Index._from_column(self._column.hour, name=self.name) @property @_performance_tracking def minute(self) -> Index: """ The minutes of the datetime. Examples -------- >>> import pandas as pd >>> import cudf >>> datetime_index = cudf.Index(pd.date_range("2000-01-01", ... periods=3, freq="T")) >>> datetime_index DatetimeIndex(['2000-01-01 00:00:00', '2000-01-01 00:01:00', '2000-01-01 00:02:00'], dtype='datetime64[ns]', freq='min') >>> datetime_index.minute Index([0, 1, 2], dtype='int16') """ # .minute is already a cached_property return Index._from_column(self._column.minute, name=self.name) @property @_performance_tracking def second(self) -> Index: """ The seconds of the datetime. Examples -------- >>> import pandas as pd >>> import cudf >>> datetime_index = cudf.Index(pd.date_range("2000-01-01", ... periods=3, freq="s")) >>> datetime_index DatetimeIndex(['2000-01-01 00:00:00', '2000-01-01 00:00:01', '2000-01-01 00:00:02'], dtype='datetime64[ns]', freq='s') >>> datetime_index.second Index([0, 1, 2], dtype='int16') """ # .second is already a cached_property return Index._from_column(self._column.second, name=self.name) @property @_performance_tracking def microsecond(self) -> Index: """ The microseconds of the datetime. Examples -------- >>> import pandas as pd >>> import cudf >>> datetime_index = cudf.Index(pd.date_range("2000-01-01", ... periods=3, freq="us")) >>> datetime_index DatetimeIndex([ '2000-01-01 00:00:00', '2000-01-01 00:00:00.000001', '2000-01-01 00:00:00.000002'], dtype='datetime64[ns]', freq='us') >>> datetime_index.microsecond Index([0, 1, 2], dtype='int16') """ # .microsecond is already a cached_property return Index._from_column(self._column.microsecond, name=self.name) @property @_performance_tracking def nanosecond(self) -> Index: """ The nanoseconds of the datetime. Examples -------- >>> import pandas as pd >>> import cudf >>> datetime_index = cudf.Index(pd.date_range("2000-01-01", ... periods=3, freq="ns")) >>> datetime_index DatetimeIndex([ '2000-01-01 00:00:00', '2000-01-01 00:00:00.000000001', '2000-01-01 00:00:00.000000002'], dtype='datetime64[ns]', freq='ns') >>> datetime_index.nanosecond Index([0, 1, 2], dtype='int16') """ # .nanosecond is already a cached_property return Index._from_column(self._column.nanosecond, name=self.name) @property @_performance_tracking def weekday(self) -> Index: """ The day of the week with Monday=0, Sunday=6. Examples -------- >>> import pandas as pd >>> import cudf >>> datetime_index = cudf.Index(pd.date_range("2016-12-31", ... "2017-01-08", freq="D")) >>> datetime_index DatetimeIndex(['2016-12-31', '2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04', '2017-01-05', '2017-01-06', '2017-01-07', '2017-01-08'], dtype='datetime64[ns]', freq='D') >>> datetime_index.weekday Index([5, 6, 0, 1, 2, 3, 4, 5, 6], dtype='int16') """ # .weekday is already a cached_property return Index._from_column(self._column.weekday, name=self.name) @property @_performance_tracking def dayofweek(self) -> Index: """ The day of the week with Monday=0, Sunday=6. Examples -------- >>> import pandas as pd >>> import cudf >>> datetime_index = cudf.Index(pd.date_range("2016-12-31", ... "2017-01-08", freq="D")) >>> datetime_index DatetimeIndex(['2016-12-31', '2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04', '2017-01-05', '2017-01-06', '2017-01-07', '2017-01-08'], dtype='datetime64[ns]', freq='D') >>> datetime_index.dayofweek Index([5, 6, 0, 1, 2, 3, 4, 5, 6], dtype='int16') """ # .weekday is already a cached_property return Index._from_column(self._column.weekday, name=self.name) @property @_performance_tracking def dayofyear(self) -> Index: """ The day of the year, from 1-365 in non-leap years and from 1-366 in leap years. Examples -------- >>> import pandas as pd >>> import cudf >>> datetime_index = cudf.Index(pd.date_range("2016-12-31", ... "2017-01-08", freq="D")) >>> datetime_index DatetimeIndex(['2016-12-31', '2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04', '2017-01-05', '2017-01-06', '2017-01-07', '2017-01-08'], dtype='datetime64[ns]', freq='D') >>> datetime_index.dayofyear Index([366, 1, 2, 3, 4, 5, 6, 7, 8], dtype='int16') """ # .day_of_year is already a cached_property return Index._from_column(self._column.day_of_year, name=self.name) @property @_performance_tracking def day_of_year(self) -> Index: """ The day of the year, from 1-365 in non-leap years and from 1-366 in leap years. Examples -------- >>> import pandas as pd >>> import cudf >>> datetime_index = cudf.Index(pd.date_range("2016-12-31", ... "2017-01-08", freq="D")) >>> datetime_index DatetimeIndex(['2016-12-31', '2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04', '2017-01-05', '2017-01-06', '2017-01-07', '2017-01-08'], dtype='datetime64[ns]', freq='D') >>> datetime_index.day_of_year Index([366, 1, 2, 3, 4, 5, 6, 7, 8], dtype='int16') """ # .day_of_year is already a cached_property return Index._from_column(self._column.day_of_year, name=self.name) @property @_performance_tracking def is_leap_year(self) -> cupy.ndarray: """ Boolean indicator if the date belongs to a leap year. A leap year is a year, which has 366 days (instead of 365) including 29th of February as an intercalary day. Leap years are years which are multiples of four with the exception of years divisible by 100 but not by 400. Returns ------- ndarray Booleans indicating if dates belong to a leap year. """ # .is_leap_year is already a cached_property res = self._column.is_leap_year.fillna(False) return cupy.asarray(res) @property @_performance_tracking def quarter(self) -> Index: """ Integer indicator for which quarter of the year the date belongs in. There are 4 quarters in a year. With the first quarter being from January - March, second quarter being April - June, third quarter being July - September and fourth quarter being October - December. Returns ------- Index Integer indicating which quarter the date belongs to. Examples -------- >>> import cudf >>> gIndex = cudf.DatetimeIndex(["2020-05-31 08:00:00", ... "1999-12-31 18:40:00"]) >>> gIndex.quarter Index([2, 4], dtype='int8') """ # .quarter is already a cached_property return Index._from_column( self._column.quarter.astype(np.dtype(np.int8)) ) @_performance_tracking def day_name(self, locale: str | None = None) -> Index: """ Return the day names. Currently supports English locale only. Examples -------- >>> import cudf >>> datetime_index = cudf.date_range("2016-12-31", "2017-01-08", freq="D") >>> datetime_index DatetimeIndex(['2016-12-31', '2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04', '2017-01-05', '2017-01-06', '2017-01-07', '2017-01-08'], dtype='datetime64[ns]', freq='D') >>> datetime_index.day_name() Index(['Saturday', 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'], dtype='object') """ day_names = self._column.get_day_names(locale) return Index._from_column(day_names, name=self.name) @_performance_tracking def month_name(self, locale: str | None = None) -> Index: """ Return the month names. Currently supports English locale only. Examples -------- >>> import cudf >>> datetime_index = cudf.date_range("2017-12-30", periods=6, freq='W') >>> datetime_index DatetimeIndex(['2017-12-30', '2018-01-06', '2018-01-13', '2018-01-20', '2018-01-27', '2018-02-03'], dtype='datetime64[ns]', freq='7D') >>> datetime_index.month_name() Index(['December', 'January', 'January', 'January', 'January', 'February'], dtype='object') """ month_names = self._column.get_month_names(locale) return Index._from_column(month_names, name=self.name) @_performance_tracking def isocalendar(self) -> DataFrame: """ Returns a DataFrame with the year, week, and day calculated according to the ISO 8601 standard. Returns ------- DataFrame with columns year, week and day Examples -------- >>> gIndex = cudf.DatetimeIndex(["2020-05-31 08:00:00", ... "1999-12-31 18:40:00"]) >>> gIndex.isocalendar() year week day 2020-05-31 08:00:00 2020 22 7 1999-12-31 18:40:00 1999 52 5 """ ca = ColumnAccessor(self._column.isocalendar(), verify=False) return cudf.DataFrame._from_data(ca, index=self) @_performance_tracking def to_pandas( self, *, nullable: bool = False, arrow_type: bool = False ) -> pd.DatetimeIndex: result = super().to_pandas(nullable=nullable, arrow_type=arrow_type) if not arrow_type and self._freq is not None: result.freq = self._freq._maybe_as_fast_pandas_offset() return result def _is_boolean(self) -> bool: return False @_performance_tracking def ceil(self, freq: str) -> Self: """ Perform ceil operation on the data to the specified freq. Parameters ---------- freq : str One of ["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"]. Must be a fixed frequency like 'S' (second) not 'ME' (month end). See `frequency aliases <https://pandas.pydata.org/docs/\ user_guide/timeseries.html#timeseries-offset-aliases>`__ for more details on these aliases. Returns ------- DatetimeIndex Index of the same type for a DatetimeIndex Examples -------- >>> import cudf >>> gIndex = cudf.DatetimeIndex([ ... "2020-05-31 08:05:42", ... "1999-12-31 18:40:30", ... ]) >>> gIndex.ceil("T") DatetimeIndex(['2020-05-31 08:06:00', '1999-12-31 18:41:00'], dtype='datetime64[ns]') """ return type(self)._from_column(self._column.ceil(freq), name=self.name) @_performance_tracking def floor(self, freq: str) -> Self: """ Perform floor operation on the data to the specified freq. Parameters ---------- freq : str One of ["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"]. Must be a fixed frequency like 'S' (second) not 'ME' (month end). See `frequency aliases <https://pandas.pydata.org/docs/\ user_guide/timeseries.html#timeseries-offset-aliases>`__ for more details on these aliases. Returns ------- DatetimeIndex Index of the same type for a DatetimeIndex Examples -------- >>> import cudf >>> gIndex = cudf.DatetimeIndex([ ... "2020-05-31 08:59:59", ... "1999-12-31 18:44:59", ... ]) >>> gIndex.floor("T") DatetimeIndex(['2020-05-31 08:59:00', '1999-12-31 18:44:00'], dtype='datetime64[ns]') """ return type(self)._from_column( self._column.floor(freq), name=self.name ) @_performance_tracking def round(self, freq: str) -> Self: """ Perform round operation on the data to the specified freq. Parameters ---------- freq : str One of ["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"]. Must be a fixed frequency like 'S' (second) not 'ME' (month end). See `frequency aliases <https://pandas.pydata.org/docs/\ user_guide/timeseries.html#timeseries-offset-aliases>`__ for more details on these aliases. Returns ------- DatetimeIndex Index containing rounded datetimes. Examples -------- >>> import cudf >>> dt_idx = cudf.Index([ ... "2001-01-01 00:04:45", ... "2001-01-01 00:04:58", ... "2001-01-01 00:05:04", ... ], dtype="datetime64[ns]") >>> dt_idx DatetimeIndex(['2001-01-01 00:04:45', '2001-01-01 00:04:58', '2001-01-01 00:05:04'], dtype='datetime64[ns]') >>> dt_idx.round('H') DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01'], dtype='datetime64[ns]') >>> dt_idx.round('T') DatetimeIndex(['2001-01-01 00:05:00', '2001-01-01 00:05:00', '2001-01-01 00:05:00'], dtype='datetime64[ns]') """ return type(self)._from_column( self._column.round(freq), name=self.name ) def tz_localize( self, tz: str | None, ambiguous: Literal["NaT"] = "NaT", nonexistent: Literal["NaT"] = "NaT", ) -> Self: """ Localize timezone-naive data to timezone-aware data. Parameters ---------- tz : str Timezone to convert timestamps to. Returns ------- DatetimeIndex containing timezone aware timestamps. Examples -------- >>> import cudf >>> import pandas as pd >>> tz_naive = cudf.date_range('2018-03-01 09:00', periods=3, freq='D') >>> tz_aware = tz_naive.tz_localize("America/New_York") >>> tz_aware DatetimeIndex(['2018-03-01 09:00:00-05:00', '2018-03-02 09:00:00-05:00', '2018-03-03 09:00:00-05:00'], dtype='datetime64[ns, America/New_York]', freq='D') Ambiguous or nonexistent datetimes are converted to NaT. >>> s = cudf.to_datetime(cudf.Series(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.dt.tz_localize("CET") 0 2018-10-28 01:20:00.000000000 1 NaT 2 2018-10-28 03:46:00.000000000 dtype: datetime64[ns, CET] Notes ----- 'NaT' is currently the only supported option for the ``ambiguous`` and ``nonexistent`` arguments. Any ambiguous or nonexistent timestamps are converted to 'NaT'. """ result_col = self._column.tz_localize(tz, ambiguous, nonexistent) return DatetimeIndex._from_column( result_col, name=self.name, freq=self._freq ) def tz_convert(self, tz: str | None) -> Self: """ Convert tz-aware datetimes from one time zone to another. Parameters ---------- tz : str Time zone for time. Corresponding timestamps would be converted to this time zone of the Datetime Array/Index. A `tz` of None will convert to UTC and remove the timezone information. Returns ------- DatetimeIndex containing timestamps corresponding to the timezone `tz`. Examples -------- >>> import cudf >>> dti = cudf.date_range('2018-03-01 09:00', periods=3, freq='D') >>> dti = dti.tz_localize("America/New_York") >>> dti DatetimeIndex(['2018-03-01 09:00:00-05:00', '2018-03-02 09:00:00-05:00', '2018-03-03 09:00:00-05:00'], dtype='datetime64[ns, America/New_York]', freq='D') >>> dti.tz_convert("Europe/London") DatetimeIndex(['2018-03-01 14:00:00+00:00', '2018-03-02 14:00:00+00:00', '2018-03-03 14:00:00+00:00'], dtype='datetime64[ns, Europe/London]') """ result_col = self._column.tz_convert(tz) return DatetimeIndex._from_column(result_col, name=self.name) def repeat(self, repeats, axis=None) -> Self: res = super().repeat(repeats, axis=axis) res._freq = None return res
DatetimeIndex
python
django-import-export__django-import-export
tests/core/tests/admin_integration/test_action_export.py
{ "start": 714, "end": 9206 }
class ____(AdminTestMixin, TestCase): def setUp(self): super().setUp() self.cat1 = Category.objects.create(name="Cat 1") self.cat2 = Category.objects.create(name="Cat 2") # fields payload for `CategoryResource` - # to export using `SelectableFieldsExportForm` self.resource_fields_payload = { "categoryresource_id": True, "categoryresource_name": True, } def _check_export_response(self, response): self.assertContains(response, self.cat1.name, status_code=200) self.assertNotContains(response, self.cat2.name, status_code=200) self.assertTrue(response.has_header("Content-Disposition")) date_str = datetime.now().strftime("%Y-%m-%d") self.assertEqual( response["Content-Disposition"], f'attachment; filename="Category-{date_str}.csv"', ) @override_settings(IMPORT_EXPORT_SKIP_ADMIN_ACTION_EXPORT_UI=True) def test_export_skips_export_ui_page(self): data = { "action": ["export_admin_action"], "_selected_action": [str(self.cat1.id)], } response = self._post_url_response(self.category_change_url, data) self._check_export_response(response) def test_export_displays_ui_select_page(self): data = { "action": ["export_admin_action"], "_selected_action": [str(self.cat1.id)], } response = self._post_url_response(self.category_change_url, data) self.assertIn("form", response.context) export_form = response.context["form"] data = export_form.initial self.assertEqual([self.cat1.id], data["export_items"]) self.assertIn("Export 1 selected item.", response.content.decode()) def test_export_displays_ui_select_page_multiple_items(self): data = { "action": ["export_admin_action"], "_selected_action": [str(self.cat1.id), str(self.cat2.id)], } response = self._post_url_response(self.category_change_url, data) self.assertIn("form", response.context) export_form = response.context["form"] data = export_form.initial self.assertEqual( sorted([self.cat1.id, self.cat2.id]), sorted(data["export_items"]) ) self.assertIn("Export 2 selected items.", response.content.decode()) def test_action_export_model_with_custom_PK(self): # issue 1800 cat = UUIDCategory.objects.create(name="UUIDCategory") data = { "action": ["export_admin_action"], "_selected_action": [str(cat.pk)], } response = self._post_url_response(self.uuid_category_change_url, data) self.assertIn("form", response.context) export_form = response.context["form"] data = export_form.initial self.assertEqual([cat.pk], data["export_items"]) self.assertIn("Export 1 selected item.", response.content.decode()) def test_export_post(self): # create a POST request with data selected from the 'action' export data = { "format": "0", "export_items": [str(self.cat1.id)], **self.resource_fields_payload, } self._prepend_form_prefix(data) date_str = datetime.now().strftime("%Y-%m-%d") with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) response = self._post_url_response(self.category_export_url, data) self.assertTrue(response.has_header("Content-Disposition")) self.assertEqual(response["Content-Type"], "text/csv") self.assertEqual( response["Content-Disposition"], f'attachment; filename="Category-{date_str}.csv"', ) target_str = f"id,name\r\n{self.cat1.id},Cat 1\r\n" self.assertEqual(target_str.encode(), response.content) def test_export_admin_action(self): with mock.patch( "core.admin.CategoryAdmin.export_admin_action" ) as mock_export_admin_action: response = self.client.post( self.category_change_url, { "action": "export_admin_action", "index": "0", "selected_across": "0", "_selected_action": "0", }, ) self.assertTrue(200 <= response.status_code <= 399) mock_export_admin_action.assert_called() def test_export_admin_action_with_restricted_pks(self): data = { "format": "0", "export_items": [str(self.cat1.id)], **self.resource_fields_payload, } self._prepend_form_prefix(data) # mock queryset to return a different set of pks than what's submitted with mock.patch("core.admin.CategoryAdmin.get_queryset") as mock_get_queryset: mock_queryset = mock.MagicMock() mock_queryset.values_list.return_value = [ 999 ] # Different pk than submitted mock_get_queryset.return_value = mock_queryset response = self._post_url_response(self.category_export_url, data) self.assertIn( "Select a valid choice. " f"{self.cat1.id} is not one of the available choices.", response.content.decode(), ) def _perform_export_action_calls_modeladmin_get_queryset_test(self, data): # Issue #1864 # ModelAdmin's get_queryset should be used in the ModelAdmin mixins with ( mock.patch( "core.admin.CategoryAdmin.get_queryset" ) as mock_modeladmin_get_queryset, mock.patch( "import_export.admin.ExportMixin.get_data_for_export" ) as mock_get_data_for_export, ): mock_queryset = mock.MagicMock(name="MockQuerySet") mock_queryset.filter.return_value = mock_queryset mock_queryset.order_by.return_value = mock_queryset # Mock values_list for the new queryset.values_list("pk", flat=True) call mock_queryset.values_list.return_value = [self.cat1.id] mock_modeladmin_get_queryset.return_value = mock_queryset self._post_url_response(self.category_export_url, data) mock_modeladmin_get_queryset.assert_called() mock_get_data_for_export.assert_called() args, kwargs = mock_get_data_for_export.call_args mock_get_data_for_export.assert_called_with( args[0], mock_queryset, **kwargs ) def test_export_action_calls_modeladmin_get_queryset(self): # Issue #1864 # Test with specific export items data = { "format": "0", "export_items": [str(self.cat1.id)], **self.resource_fields_payload, } self._prepend_form_prefix(data) with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) self._perform_export_action_calls_modeladmin_get_queryset_test(data) def test_export_action_calls_modeladmin_get_queryset_all_items(self): # Issue #1864 # Test without specific export items data = { "format": "0", **self.resource_fields_payload, } self._prepend_form_prefix(data) self._perform_export_action_calls_modeladmin_get_queryset_test(data) @override_settings(IMPORT_EXPORT_SKIP_ADMIN_EXPORT_UI=True) def test_export_action_calls_modeladmin_get_queryset_skip_export_ui(self): # Issue #1864 # Test with specific export items and skip UI data = { "format": "0", "export_items": [str(self.cat1.id)], **self.resource_fields_payload, } self._perform_export_action_calls_modeladmin_get_queryset_test(data) def test_get_export_data_raises_PermissionDenied_when_no_export_permission_assigned( self, ): request = MagicMock(spec=HttpRequest) class TestMixin(ExportMixin): model = Book def has_export_permission(self, request): return False m = TestMixin() with self.assertRaises(PermissionDenied): m.get_export_data("0", request, Book.objects.none())
ExportActionAdminIntegrationTest
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/operators/test_glue_databrew.py
{ "start": 1359, "end": 3558 }
class ____: def test_init(self): op = GlueDataBrewStartJobOperator( task_id="task_test", job_name=JOB_NAME, aws_conn_id="fake-conn-id", region_name="eu-central-1", verify="/spam/egg.pem", botocore_config={"read_timeout": 42}, ) assert op.hook.client_type == "databrew" assert op.hook.resource_type is None assert op.hook.aws_conn_id == "fake-conn-id" assert op.hook._region_name == "eu-central-1" assert op.hook._verify == "/spam/egg.pem" assert op.hook._config is not None assert op.hook._config.read_timeout == 42 op = GlueDataBrewStartJobOperator(task_id="fake_task_id", job_name=JOB_NAME) assert op.hook.aws_conn_id == "aws_default" assert op.hook._region_name is None assert op.hook._verify is None assert op.hook._config is None @mock.patch.object(GlueDataBrewHook, "conn") @mock.patch.object(GlueDataBrewHook, "get_waiter") def test_start_job_wait_for_completion(self, mock_hook_get_waiter, mock_conn): TEST_RUN_ID = "12345" operator = GlueDataBrewStartJobOperator( task_id="task_test", job_name=JOB_NAME, wait_for_completion=True, aws_conn_id="aws_default" ) mock_conn.start_job_run(mock.MagicMock(), return_value=TEST_RUN_ID) operator.execute(None) mock_hook_get_waiter.assert_called_once_with("job_complete") @mock.patch.object(GlueDataBrewHook, "conn") @mock.patch.object(GlueDataBrewHook, "get_waiter") def test_start_job_no_wait(self, mock_hook_get_waiter, mock_conn): TEST_RUN_ID = "12345" operator = GlueDataBrewStartJobOperator( task_id="task_test", job_name=JOB_NAME, wait_for_completion=False, aws_conn_id="aws_default" ) mock_conn.start_job_run(mock.MagicMock(), return_value=TEST_RUN_ID) operator.execute(None) mock_hook_get_waiter.assert_not_called() def test_template_fields(self): operator = GlueDataBrewStartJobOperator(task_id="fake_task_id", job_name=JOB_NAME) validate_template_fields(operator)
TestGlueDataBrewOperator
python
getsentry__sentry
tests/sentry/services/eventstore/test_query_preprocessing.py
{ "start": 395, "end": 2729 }
class ____(TestCase): def setUp(self) -> None: self.g1 = self.create_group(id=1) self.g2 = self.create_group(id=2) self.g3 = self.create_group(id=3) self.g4 = self.create_group(id=4) self.gr31 = GroupRedirect.objects.create( id=10001, organization_id=self.g1.project.organization_id, group_id=self.g1.id, previous_group_id=self.g3.id, date_added=datetime.now(UTC) - timedelta(hours=4), ) self.gr21 = GroupRedirect.objects.create( id=10002, organization_id=self.g1.project.organization_id, group_id=self.g1.id, previous_group_id=self.g2.id, date_added=datetime.now(UTC) - timedelta(hours=1), ) def test_get_all_merged_group_ids(self) -> None: assert get_all_merged_group_ids([self.g1.id]) == {self.g1.id, self.g2.id, self.g3.id} assert get_all_merged_group_ids([self.g2.id]) == {self.g1.id, self.g2.id} assert get_all_merged_group_ids([self.g3.id]) == {self.g1.id, self.g3.id} assert get_all_merged_group_ids([self.g2.id, self.g3.id]) == { self.g1.id, self.g2.id, self.g3.id, } def test_threshold(self) -> None: group = self.create_group(id=128) i = 999 local_threshold = 200 for _ in range(local_threshold + 100): old = self.create_group(id=i) i += 1 GroupRedirect.objects.create( organization_id=group.project.organization_id, group_id=group.id, previous_group_id=old.id, date_added=datetime.now(UTC) - timedelta(hours=1), ) assert len(get_all_merged_group_ids([group.id], local_threshold)) <= local_threshold + 2 def test_cache(self) -> None: from django.core.cache import cache cache.set( _build_group_redirect_by_group_id_cache_key(self.g1.id), {(self.g2.id, self.gr21.date_added), (self.g3.id, self.gr31.date_added)}, ) res = _try_get_from_cache({self.g1.id, self.g4.id}) assert res == ( {(self.g2.id, self.gr21.date_added), (self.g3.id, self.gr31.date_added)}, {self.g4.id}, )
TestQueryPreprocessing
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/many_virtual_consumer/package.py
{ "start": 216, "end": 771 }
class ____(Package): """PAckage that depends on many virtual packages""" url = "http://www.example.com/" url = "http://www.example.com/2.0.tar.gz" version("1.0", md5="abcdef1234567890abcdef1234567890") depends_on("mpi") depends_on("lapack") # This directive is an example of imposing a constraint on a # dependency is that dependency is in the DAG. This pattern # is mainly used with virtual providers. depends_on("low-priority-provider@1.0", when="^[virtuals=mpi,lapack] low-priority-provider")
ManyVirtualConsumer
python
huggingface__transformers
src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py
{ "start": 95967, "end": 96032 }
class ____(MimiLayerScale): pass
Qwen3OmniMoeCode2WavLayerScale
python
dagster-io__dagster
python_modules/dagster/dagster_tests/logging_tests/test_logging.py
{ "start": 6715, "end": 6866 }
class ____(logging.Formatter): def format(self, record): record.msg = "I was formatted" return super().format(record)
CustomFormatter
python
tensorflow__tensorflow
tensorflow/python/keras/callbacks.py
{ "start": 99857, "end": 104246 }
class ____(Callback): """Reduce learning rate when a metric has stopped improving. Models often benefit from reducing the learning rate by a factor of 2-10 once learning stagnates. This callback monitors a quantity and if no improvement is seen for a 'patience' number of epochs, the learning rate is reduced. Example: ```python reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.001) model.fit(X_train, Y_train, callbacks=[reduce_lr]) ``` Args: monitor: quantity to be monitored. factor: factor by which the learning rate will be reduced. `new_lr = lr * factor`. patience: number of epochs with no improvement after which learning rate will be reduced. verbose: int. 0: quiet, 1: update messages. mode: one of `{'auto', 'min', 'max'}`. In `'min'` mode, the learning rate will be reduced when the quantity monitored has stopped decreasing; in `'max'` mode it will be reduced when the quantity monitored has stopped increasing; in `'auto'` mode, the direction is automatically inferred from the name of the monitored quantity. min_delta: threshold for measuring the new optimum, to only focus on significant changes. cooldown: number of epochs to wait before resuming normal operation after lr has been reduced. min_lr: lower bound on the learning rate. """ def __init__(self, monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', min_delta=1e-4, cooldown=0, min_lr=0, **kwargs): super(ReduceLROnPlateau, self).__init__() self.monitor = monitor if factor >= 1.0: raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.') if 'epsilon' in kwargs: min_delta = kwargs.pop('epsilon') logging.warning('`epsilon` argument is deprecated and ' 'will be removed, use `min_delta` instead.') self.factor = factor self.min_lr = min_lr self.min_delta = min_delta self.patience = patience self.verbose = verbose self.cooldown = cooldown self.cooldown_counter = 0 # Cooldown counter. self.wait = 0 self.best = 0 self.mode = mode self.monitor_op = None self._reset() def _reset(self): """Resets wait counter and cooldown counter. """ if self.mode not in ['auto', 'min', 'max']: logging.warning('Learning rate reduction mode %s is unknown, ' 'fallback to auto mode.', self.mode) self.mode = 'auto' if (self.mode == 'min' or (self.mode == 'auto' and 'acc' not in self.monitor)): self.monitor_op = lambda a, b: np.less(a, b - self.min_delta) self.best = np.inf else: self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta) self.best = -np.inf self.cooldown_counter = 0 self.wait = 0 def on_train_begin(self, logs=None): self._reset() def on_epoch_end(self, epoch, logs=None): logs = logs or {} logs['lr'] = backend.get_value(self.model.optimizer.lr) current = logs.get(self.monitor) if current is None: logging.warning('Learning rate reduction is conditioned on metric `%s` ' 'which is not available. Available metrics are: %s', self.monitor, ','.join(list(logs.keys()))) else: if self.in_cooldown(): self.cooldown_counter -= 1 self.wait = 0 if self.monitor_op(current, self.best): self.best = current self.wait = 0 elif not self.in_cooldown(): self.wait += 1 if self.wait >= self.patience: old_lr = backend.get_value(self.model.optimizer.lr) if old_lr > np.float32(self.min_lr): new_lr = old_lr * self.factor new_lr = max(new_lr, self.min_lr) backend.set_value(self.model.optimizer.lr, new_lr) if self.verbose > 0: print('\nEpoch %05d: ReduceLROnPlateau reducing learning ' 'rate to %s.' % (epoch + 1, new_lr)) self.cooldown_counter = self.cooldown self.wait = 0 def in_cooldown(self): return self.cooldown_counter > 0
ReduceLROnPlateau
python
sqlalchemy__sqlalchemy
test/dialect/postgresql/test_reflection.py
{ "start": 2555, "end": 4733 }
class ____( ReflectionFixtures, fixtures.TablesTest, AssertsExecutionResults ): """Test reflection on foreign tables""" __requires__ = ("postgresql_test_dblink",) __only_on__ = "postgresql >= 9.3" __sparse_driver_backend__ = True @classmethod def define_tables(cls, metadata): from sqlalchemy.testing import config dblink = config.file_config.get( "sqla_testing", "postgres_test_db_link" ) Table( "testtable", metadata, Column("id", Integer, primary_key=True), Column("data", String(30)), ) for ddl in [ "CREATE SERVER test_server FOREIGN DATA WRAPPER postgres_fdw " "OPTIONS (dbname 'test', host '%s')" % dblink, "CREATE USER MAPPING FOR public \ SERVER test_server options (user 'scott', password 'tiger')", "CREATE FOREIGN TABLE test_foreigntable ( " " id INT, " " data VARCHAR(30) " ") SERVER test_server OPTIONS (table_name 'testtable')", ]: sa.event.listen(metadata, "after_create", sa.DDL(ddl)) for ddl in [ "DROP FOREIGN TABLE test_foreigntable", "DROP USER MAPPING FOR public SERVER test_server", "DROP SERVER test_server", ]: sa.event.listen(metadata, "before_drop", sa.DDL(ddl)) def test_foreign_table_is_reflected(self, connection): metadata = MetaData() table = Table("test_foreigntable", metadata, autoload_with=connection) eq_( set(table.columns.keys()), {"id", "data"}, "Columns of reflected foreign table didn't equal expected columns", ) def test_get_foreign_table_names(self, inspect_fixture): inspector, conn = inspect_fixture ft_names = inspector.get_foreign_table_names() eq_(ft_names, ["test_foreigntable"]) def test_get_table_names_no_foreign(self, connection): inspector = inspect(connection) names = inspector.get_table_names() eq_(names, ["testtable"])
ForeignTableReflectionTest
python
ray-project__ray
release/nightly_tests/multimodal_inference_benchmarks/document_embedding/daft_main.py
{ "start": 2084, "end": 3945 }
class ____: def __init__(self): from sentence_transformers import SentenceTransformer device = "cuda" if torch.cuda.is_available() else "cpu" self.model = SentenceTransformer(EMBED_MODEL_ID, device=device) self.model.compile() def __call__(self, text_col): if len(text_col) == 0: return [] embeddings = self.model.encode( text_col.to_pylist(), convert_to_tensor=True, # torch_dtype=torch.bfloat16, ) return embeddings.cpu().numpy() start_time = time.time() df = daft.read_parquet(INPUT_PATH) df = df.where(daft.col("file_name").str.endswith(".pdf")) df = df.with_column("pdf_bytes", df["uploaded_pdf_path"].url.download()) pages_struct_type = daft.DataType.struct( fields={"text": daft.DataType.string(), "page_number": daft.DataType.int32()} ) df = df.with_column( "pages", df["pdf_bytes"].apply( extract_text_from_parsed_pdf, return_dtype=daft.DataType.list(pages_struct_type), ), ) df = df.explode("pages") df = df.with_columns( {"page_text": col("pages")["text"], "page_number": col("pages")["page_number"]} ) df = df.where(daft.col("page_text").not_null()) chunks_struct_type = daft.DataType.struct( fields={"text": daft.DataType.string(), "chunk_id": daft.DataType.int32()} ) df = df.with_column( "chunks", df["page_text"].apply(chunk, return_dtype=daft.DataType.list(chunks_struct_type)), ) df = df.explode("chunks") df = df.with_columns( {"chunk": col("chunks")["text"], "chunk_id": col("chunks")["chunk_id"]} ) df = df.where(daft.col("chunk").not_null()) df = df.with_column("embedding", Embedder(df["chunk"])) df = df.select("uploaded_pdf_path", "page_number", "chunk_id", "chunk", "embedding") df.write_parquet(OUTPUT_PATH) print("Runtime:", time.time() - start_time)
Embedder
python
ray-project__ray
python/ray/tune/examples/mnist_pytorch.py
{ "start": 530, "end": 5044 }
class ____(nn.Module): def __init__(self): super(ConvNet, self).__init__() self.conv1 = nn.Conv2d(1, 3, kernel_size=3) self.fc = nn.Linear(192, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 3)) x = x.view(-1, 192) x = self.fc(x) return F.log_softmax(x, dim=1) def train_func(model, optimizer, train_loader, device=None): device = device or torch.device("cpu") model.train() for batch_idx, (data, target) in enumerate(train_loader): if batch_idx * len(data) > EPOCH_SIZE: return data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() def test_func(model, data_loader, device=None): device = device or torch.device("cpu") model.eval() correct = 0 total = 0 with torch.no_grad(): for batch_idx, (data, target) in enumerate(data_loader): if batch_idx * len(data) > TEST_SIZE: break data, target = data.to(device), target.to(device) outputs = model(data) _, predicted = torch.max(outputs.data, 1) total += target.size(0) correct += (predicted == target).sum().item() return correct / total def get_data_loaders(batch_size=64): mnist_transforms = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ) # We add FileLock here because multiple workers will want to # download data, and this may cause overwrites since # DataLoader is not threadsafe. with FileLock(os.path.expanduser("~/data.lock")): train_loader = torch.utils.data.DataLoader( datasets.MNIST( "~/data", train=True, download=True, transform=mnist_transforms ), batch_size=batch_size, shuffle=True, ) test_loader = torch.utils.data.DataLoader( datasets.MNIST( "~/data", train=False, download=True, transform=mnist_transforms ), batch_size=batch_size, shuffle=True, ) return train_loader, test_loader def train_mnist(config): should_checkpoint = config.get("should_checkpoint", False) use_cuda = torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") train_loader, test_loader = get_data_loaders() model = ConvNet().to(device) optimizer = optim.SGD( model.parameters(), lr=config["lr"], momentum=config["momentum"] ) while True: train_func(model, optimizer, train_loader, device) acc = test_func(model, test_loader, device) metrics = {"mean_accuracy": acc} # Report metrics (and possibly a checkpoint) if should_checkpoint: with tempfile.TemporaryDirectory() as tempdir: torch.save(model.state_dict(), os.path.join(tempdir, "model.pt")) tune.report(metrics, checkpoint=Checkpoint.from_directory(tempdir)) else: tune.report(metrics) if __name__ == "__main__": parser = argparse.ArgumentParser(description="PyTorch MNIST Example") parser.add_argument( "--cuda", action="store_true", default=False, help="Enables GPU training" ) parser.add_argument( "--smoke-test", action="store_true", help="Finish quickly for testing" ) args, _ = parser.parse_known_args() ray.init(num_cpus=2 if args.smoke_test else None) # for early stopping sched = AsyncHyperBandScheduler() resources_per_trial = {"cpu": 2, "gpu": int(args.cuda)} # set this for GPUs tuner = tune.Tuner( tune.with_resources(train_mnist, resources=resources_per_trial), tune_config=tune.TuneConfig( metric="mean_accuracy", mode="max", scheduler=sched, num_samples=1 if args.smoke_test else 50, ), run_config=tune.RunConfig( name="exp", stop={ "mean_accuracy": 0.98, "training_iteration": 5 if args.smoke_test else 100, }, ), param_space={ "lr": tune.loguniform(1e-4, 1e-2), "momentum": tune.uniform(0.1, 0.9), }, ) results = tuner.fit() print("Best config is:", results.get_best_result().config) assert not results.errors
ConvNet
python
apache__airflow
airflow-core/tests/unit/utils/test_trigger_rule.py
{ "start": 890, "end": 1979 }
class ____: def test_valid_trigger_rules(self): assert TriggerRule.is_valid(TriggerRule.ALL_SUCCESS) assert TriggerRule.is_valid(TriggerRule.ALL_FAILED) assert TriggerRule.is_valid(TriggerRule.ALL_DONE) assert TriggerRule.is_valid(TriggerRule.ALL_SKIPPED) assert TriggerRule.is_valid(TriggerRule.ONE_SUCCESS) assert TriggerRule.is_valid(TriggerRule.ONE_FAILED) assert TriggerRule.is_valid(TriggerRule.ONE_DONE) assert TriggerRule.is_valid(TriggerRule.NONE_FAILED) assert TriggerRule.is_valid(TriggerRule.NONE_SKIPPED) assert TriggerRule.is_valid(TriggerRule.ALWAYS) assert TriggerRule.is_valid(TriggerRule.NONE_FAILED_MIN_ONE_SUCCESS) assert TriggerRule.is_valid(TriggerRule.ALL_DONE_SETUP_SUCCESS) assert TriggerRule.is_valid(TriggerRule.ALL_DONE_MIN_ONE_SUCCESS) assert len(TriggerRule.all_triggers()) == 13 with pytest.raises(ValueError, match="'NOT_EXIST_TRIGGER_RULE' is not a valid TriggerRule"): TriggerRule("NOT_EXIST_TRIGGER_RULE")
TestTriggerRule
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1252677, "end": 1252944 }
class ____(sgqlc.types.Type, Node, AuditEntry, OauthApplicationAuditEntryData, OrganizationAuditEntryData): """Audit log entry for a org.oauth_app_access_approved event.""" __schema__ = github_schema __field_names__ = ()
OrgOauthAppAccessApprovedAuditEntry
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/postgresql/_psycopg_common.py
{ "start": 804, "end": 1886 }
class ____(sqltypes.NumericCommon): def bind_processor(self, dialect): return None def result_processor(self, dialect, coltype): if self.asdecimal: if coltype in _FLOAT_TYPES: return processors.to_decimal_processor_factory( decimal.Decimal, self._effective_decimal_return_scale ) elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: # psycopg returns Decimal natively for 1700 return None else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype ) else: if coltype in _FLOAT_TYPES: # psycopg returns float natively for 701 return None elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: return processors.to_float else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype )
_PsycopgNumericCommon
python
dagster-io__dagster
python_modules/dagster/dagster/_core/scheduler/instigation.py
{ "start": 32404, "end": 33254 }
class ____(Generic[T_EntityKey]): id: int serialized_evaluation_body: str evaluation_id: int timestamp: float key: T_EntityKey @classmethod def from_db_row(cls, row) -> "AutoMaterializeAssetEvaluationRecord": return AutoMaterializeAssetEvaluationRecord( id=row["id"], serialized_evaluation_body=row["asset_evaluation_body"], evaluation_id=row["evaluation_id"], timestamp=utc_datetime_from_naive(row["create_timestamp"]).timestamp(), key=entity_key_from_db_string(row["asset_key"]), ) def get_evaluation_with_run_ids(self) -> AutomationConditionEvaluationWithRunIds[T_EntityKey]: return deserialize_value( self.serialized_evaluation_body, AutomationConditionEvaluationWithRunIds )
AutoMaterializeAssetEvaluationRecord
python
pytorch__pytorch
torch/nn/modules/activation.py
{ "start": 9663, "end": 10724 }
class ____(Module): r"""Applies the Hardsigmoid function element-wise. Hardsigmoid is defined as: .. math:: \text{Hardsigmoid}(x) = \begin{cases} 0 & \text{if~} x \le -3, \\ 1 & \text{if~} x \ge +3, \\ x / 6 + 1 / 2 & \text{otherwise} \end{cases} Args: inplace: can optionally do the operation in-place. Default: ``False`` Shape: - Input: :math:`(*)`, where :math:`*` means any number of dimensions. - Output: :math:`(*)`, same shape as the input. .. image:: ../scripts/activation_images/Hardsigmoid.png Examples:: >>> m = nn.Hardsigmoid() >>> input = torch.randn(2) >>> output = m(input) """ __constants__ = ["inplace"] inplace: bool def __init__(self, inplace: bool = False) -> None: super().__init__() self.inplace = inplace def forward(self, input: Tensor) -> Tensor: """ Runs the forward pass. """ return F.hardsigmoid(input, self.inplace)
Hardsigmoid
python
pytorch__pytorch
tools/experimental/torchfuzz/operators/registry.py
{ "start": 1699, "end": 7355 }
class ____: """Registry for managing operator instances.""" def __init__(self): """Initialize the registry with default operators.""" self._operators: dict[str, Operator] = {} self._register_default_operators() def _register_default_operators(self): """Register the default set of operators.""" # Individual tensor pointwise operators (preferred) self.register(AddOperator()) self.register(MulOperator()) self.register(SubOperator()) self.register(DivOperator()) self.register(ClampOperator()) self.register(CumsumOperator()) # Individual scalar pointwise operators (preferred) self.register(ScalarAddOperator()) self.register(ScalarMulOperator()) self.register(ScalarSubOperator()) self.register(ScalarDivOperator()) # Leaf Input operators self.register(ConstantOperator()) self.register(ArgOperator()) # # Data-dependent operators self.register(NonzeroOperator()) self.register(MaskedSelectOperator()) self.register(GatherOperator()) self.register(IndexSelectOperator()) self.register(ArgsortOperator()) self.register(ItemOperator()) self.register(UniqueOperator()) # Tensor layout operators self.register(ViewOperator()) self.register(ReshapeOperator()) self.register(FlattenOperator()) self.register(SqueezeOperator()) self.register(UnsqueezeOperator()) self.register(CatOperator()) self.register(StackOperator()) self.register(ChunkOperator()) # Matrix multiplication operators self.register(MMOperator()) self.register(AddmmOperator()) self.register(BmmOperator()) self.register(MatmulOperator()) # Neural network functional operators self.register(EmbeddingOperator()) self.register(LinearOperator()) self.register(ScaledDotProductAttentionOperator()) self.register(MultiHeadAttentionForwardOperator()) # Activation functions self.register(ReLUOperator()) self.register(LeakyReLUOperator()) self.register(ELUOperator()) self.register(GELUOperator()) self.register(SiLUOperator()) self.register(SigmoidOperator()) self.register(TanhOperator()) self.register(SoftmaxOperator()) # Normalization layers self.register(LayerNormOperator()) self.register(RMSNormOperator()) self.register(BatchNormOperator()) self.register(GroupNormOperator()) # Regularization self.register(DropoutOperator()) def register(self, operator: Operator): """Register an operator in the registry.""" self._operators[operator.name] = operator def get(self, op_name: str) -> Operator | None: """Get an operator by name.""" # Handle special arg_ operations by mapping them to the ArgOperator if op_name.startswith("arg_"): return self._operators.get("arg") return self._operators.get(op_name) def list_operators(self) -> dict[str, Operator]: """List all registered operators.""" return self._operators.copy() # Global registry instance _global_registry = OperatorRegistry() def get_operator(op_name: str) -> Operator | None: """Get an operator from the global registry.""" return _global_registry.get(op_name) def register_operator(operator: Operator): """Register an operator in the global registry.""" _global_registry.register(operator) def list_operators() -> dict[str, Operator]: """List all operators in the global registry.""" return _global_registry.list_operators() def set_operator_weight(op_name: str, weight: float) -> None: """Set the selection weight for a specific operator. Args: op_name: The registered operator name (e.g., "add", "arg") OR fully-qualified torch op (e.g., "torch.nn.functional.relu", "torch.matmul") weight: New relative selection weight (must be > 0) """ if weight <= 0: raise ValueError("Operator weight must be > 0") # Try by registry key op = _global_registry.get(op_name) if op is not None: op.weight = float(weight) return # Fallback: try to locate by fully-qualified torch op name for candidate in _global_registry.list_operators().values(): if getattr(candidate, "torch_op_name", None) == op_name: candidate.weight = float(weight) return raise KeyError(f"Operator '{op_name}' not found by registry name or torch op name") def set_operator_weights(weights: dict[str, float]) -> None: """Bulk-update operator weights from a mapping of name -> weight.""" for name, w in weights.items(): set_operator_weight(name, w) def set_operator_weight_by_torch_op(torch_op_name: str, weight: float) -> None: """Set operator weight by fully-qualified torch op name.""" if weight <= 0: raise ValueError("Operator weight must be > 0") for candidate in _global_registry.list_operators().values(): if getattr(candidate, "torch_op_name", None) == torch_op_name: candidate.weight = float(weight) return raise KeyError(f"Torch op '{torch_op_name}' not found in registry") def set_operator_weights_by_torch_op(weights: dict[str, float]) -> None: """Bulk-update weights by fully-qualified torch op names.""" for name, w in weights.items(): set_operator_weight_by_torch_op(name, w)
OperatorRegistry
python
ipython__ipython
IPython/core/prefilter.py
{ "start": 22287, "end": 24931 }
class ____(PrefilterHandler): handler_name = Unicode('auto') esc_strings = List([ESC_PAREN, ESC_QUOTE, ESC_QUOTE2]) def handle(self, line_info): """Handle lines which can be auto-executed, quoting if requested.""" line = line_info.line ifun = line_info.ifun the_rest = line_info.the_rest esc = line_info.esc continue_prompt = line_info.continue_prompt obj = line_info.ofind(self.shell).obj # This should only be active for single-line input! if continue_prompt: return line force_auto = isinstance(obj, IPyAutocall) # User objects sometimes raise exceptions on attribute access other # than AttributeError (we've seen it in the past), so it's safest to be # ultra-conservative here and catch all. try: auto_rewrite = obj.rewrite except Exception: auto_rewrite = True if esc == ESC_QUOTE: # Auto-quote splitting on whitespace newcmd = '%s("%s")' % (ifun,'", "'.join(the_rest.split()) ) elif esc == ESC_QUOTE2: # Auto-quote whole string newcmd = '%s("%s")' % (ifun,the_rest) elif esc == ESC_PAREN: newcmd = '%s(%s)' % (ifun,",".join(the_rest.split())) else: # Auto-paren. if force_auto: # Don't rewrite if it is already a call. do_rewrite = not the_rest.startswith('(') else: if not the_rest: # We only apply it to argument-less calls if the autocall # parameter is set to 2. do_rewrite = (self.shell.autocall >= 2) elif the_rest.startswith('[') and hasattr(obj, '__getitem__'): # Don't autocall in this case: item access for an object # which is BOTH callable and implements __getitem__. do_rewrite = False else: do_rewrite = True # Figure out the rewritten command if do_rewrite: if the_rest.endswith(';'): newcmd = '%s(%s);' % (ifun.rstrip(),the_rest[:-1]) else: newcmd = '%s(%s)' % (ifun.rstrip(), the_rest) else: normal_handler = self.prefilter_manager.get_handler_by_name('normal') return normal_handler.handle(line_info) # Display the rewritten call if auto_rewrite: self.shell.auto_rewrite_input(newcmd) return newcmd
AutoHandler
python
django__django
tests/auth_tests/test_mixins.py
{ "start": 795, "end": 866 }
class ____(AlwaysFalseMixin, EmptyResponseView): pass
AlwaysFalseView
python
Textualize__textual
tests/snapshot_tests/snapshot_apps/fr_with_min.py
{ "start": 164, "end": 1129 }
class ____(App[None]): CSS = """ Horizontal { width: 1fr; } Vertical { width: 1fr; background: blue; min-width: 20; } #scroll1 { width: 1fr; background: $panel; } #scroll2 { width: 2fr; background: $panel; } #scroll1 Static, #scroll2 Static { width: 1fr; content-align: center middle; background: $boost; } """ def compose(self) -> ComposeResult: yield Header() with Horizontal(): yield Vertical() with VerticalScroll(id="scroll1"): for n in range(100): yield Static(f"This is content number {n}") with VerticalScroll(id="scroll2"): for n in range(100): yield Static(f"This is content number {n}") yield Footer() if __name__ == "__main__": ScreenSplitApp().run()
ScreenSplitApp
python
ray-project__ray
python/ray/data/_internal/datasource/json_datasource.py
{ "start": 745, "end": 6390 }
class ____(FileBasedDatasource): """JSON datasource, for reading and writing JSON and JSONL files.""" def __init__( self, paths: Union[str, List[str]], *, arrow_json_args: Optional[Dict[str, Any]] = None, **file_based_datasource_kwargs, ): from pyarrow import json super().__init__(paths, **file_based_datasource_kwargs) if arrow_json_args is None: arrow_json_args = {} self.read_options = arrow_json_args.pop( "read_options", json.ReadOptions(use_threads=False) ) self.arrow_json_args = arrow_json_args def _read_with_pyarrow_read_json(self, buffer: "pyarrow.lib.Buffer"): """Read with PyArrow JSON reader, trying to auto-increase the read block size in the case of the read object straddling block boundaries.""" import pyarrow as pa import pyarrow.json as pajson # When reading large files, the default block size configured in PyArrow can be # too small, resulting in the following error: `pyarrow.lib.ArrowInvalid: # straddling object straddles two block boundaries (try to increase block # size?)`. More information on this issue can be found here: # https://github.com/apache/arrow/issues/25674 # The read will be retried with geometrically increasing block size # until the size reaches `DataContext.get_current().target_max_block_size`. # The initial block size will start at the PyArrow default block size # or it can be manually set through the `read_options` parameter as follows. # >>> import pyarrow.json as pajson # >>> block_size = 10 << 20 # Set block size to 10MB # >>> ray.data.read_json( # doctest: +SKIP # ... "s3://anonymous@ray-example-data/log.json", # ... read_options=pajson.ReadOptions(block_size=block_size) # ... ) init_block_size = self.read_options.block_size max_block_size = DataContext.get_current().target_max_block_size while True: try: yield pajson.read_json( io.BytesIO(buffer), read_options=self.read_options, **self.arrow_json_args, ) self.read_options.block_size = init_block_size break except pa.ArrowInvalid as e: if "straddling object straddles two block boundaries" in str(e): if ( max_block_size is None or self.read_options.block_size < max_block_size ): # Increase the block size in case it was too small. logger.debug( f"JSONDatasource read failed with " f"block_size={self.read_options.block_size}. Retrying with " f"block_size={self.read_options.block_size * 2}." ) self.read_options.block_size *= 2 else: raise pa.ArrowInvalid( f"{e} - Auto-increasing block size to " f"{self.read_options.block_size} bytes failed. " f"Please try manually increasing the block size through " f"the `read_options` parameter to a larger size. " f"For example: `read_json(..., read_options=" f"pyarrow.json.ReadOptions(block_size=10 << 25))`" f"More information on this issue can be found here: " f"https://github.com/apache/arrow/issues/25674" ) else: # unrelated error, simply reraise raise e def _read_with_python_json(self, buffer: "pyarrow.lib.Buffer"): """Fallback method to read JSON files with Python's native json.load(), in case the default pyarrow json reader fails.""" import json import pyarrow as pa # Check if the buffer is empty if buffer.size == 0: return parsed_json = json.load(io.BytesIO(buffer)) try: yield pa.Table.from_pylist(parsed_json) except AttributeError as e: # For PyArrow < 7.0.0, `pa.Table.from_pylist()` is not available. # Construct a dict from the list and call # `pa.Table.from_pydict()` instead. assert "no attribute 'from_pylist'" in str(e), str(e) from collections import defaultdict dct = defaultdict(list) for row in parsed_json: for k, v in row.items(): dct[k].append(v) yield pyarrow_table_from_pydict(dct) # TODO(ekl) The PyArrow JSON reader doesn't support streaming reads. def _read_stream(self, f: "pyarrow.NativeFile", path: str): import pyarrow as pa buffer: pa.lib.Buffer = f.read_buffer() try: yield from self._read_with_pyarrow_read_json(buffer) except pa.ArrowInvalid as e: # If read with PyArrow fails, try falling back to native json.load(). logger.warning( f"Error reading with pyarrow.json.read_json(). " f"Falling back to native json.load(), which may be slower. " f"PyArrow error was:\n{e}" ) yield from self._read_with_python_json(buffer)
ArrowJSONDatasource
python
plotly__plotly.py
plotly/graph_objs/scatter/_fillpattern.py
{ "start": 233, "end": 15287 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "scatter" _path_str = "scatter.fillpattern" _valid_props = { "bgcolor", "bgcolorsrc", "fgcolor", "fgcolorsrc", "fgopacity", "fillmode", "path", "pathsrc", "shape", "shapesrc", "size", "sizesrc", "solidity", "soliditysrc", } @property def bgcolor(self): """ When there is no colorscale sets the color of background pattern fill. Defaults to a `marker.color` background when `fillmode` is "overlay". Otherwise, defaults to a transparent background. The 'bgcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["bgcolor"] @bgcolor.setter def bgcolor(self, val): self["bgcolor"] = val @property def bgcolorsrc(self): """ Sets the source reference on Chart Studio Cloud for `bgcolor`. The 'bgcolorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["bgcolorsrc"] @bgcolorsrc.setter def bgcolorsrc(self, val): self["bgcolorsrc"] = val @property def fgcolor(self): """ When there is no colorscale sets the color of foreground pattern fill. Defaults to a `marker.color` background when `fillmode` is "replace". Otherwise, defaults to dark grey or white to increase contrast with the `bgcolor`. The 'fgcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["fgcolor"] @fgcolor.setter def fgcolor(self, val): self["fgcolor"] = val @property def fgcolorsrc(self): """ Sets the source reference on Chart Studio Cloud for `fgcolor`. The 'fgcolorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["fgcolorsrc"] @fgcolorsrc.setter def fgcolorsrc(self, val): self["fgcolorsrc"] = val @property def fgopacity(self): """ Sets the opacity of the foreground pattern fill. Defaults to a 0.5 when `fillmode` is "overlay". Otherwise, defaults to 1. The 'fgopacity' property is a number and may be specified as: - An int or float in the interval [0, 1] Returns ------- int|float """ return self["fgopacity"] @fgopacity.setter def fgopacity(self, val): self["fgopacity"] = val @property def fillmode(self): """ Determines whether `marker.color` should be used as a default to `bgcolor` or a `fgcolor`. The 'fillmode' property is an enumeration that may be specified as: - One of the following enumeration values: ['replace', 'overlay'] Returns ------- Any """ return self["fillmode"] @fillmode.setter def fillmode(self, val): self["fillmode"] = val @property def path(self): """ Sets a custom path for pattern fill. Use with no `shape` or `solidity`, provide an SVG path string for the regions of the square from (0,0) to (`size`,`size`) to color. The 'path' property is a string and must be specified as: - A string - A number that will be converted to a string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray """ return self["path"] @path.setter def path(self, val): self["path"] = val @property def pathsrc(self): """ Sets the source reference on Chart Studio Cloud for `path`. The 'pathsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["pathsrc"] @pathsrc.setter def pathsrc(self, val): self["pathsrc"] = val @property def shape(self): """ Sets the shape of the pattern fill. By default, no pattern is used for filling the area. The 'shape' property is an enumeration that may be specified as: - One of the following enumeration values: ['', '/', '\\', 'x', '-', '|', '+', '.'] - A tuple, list, or one-dimensional numpy array of the above Returns ------- Any|numpy.ndarray """ return self["shape"] @shape.setter def shape(self, val): self["shape"] = val @property def shapesrc(self): """ Sets the source reference on Chart Studio Cloud for `shape`. The 'shapesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["shapesrc"] @shapesrc.setter def shapesrc(self, val): self["shapesrc"] = val @property def size(self): """ Sets the size of unit squares of the pattern fill in pixels, which corresponds to the interval of repetition of the pattern. The 'size' property is a number and may be specified as: - An int or float in the interval [0, inf] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray """ return self["size"] @size.setter def size(self, val): self["size"] = val @property def sizesrc(self): """ Sets the source reference on Chart Studio Cloud for `size`. The 'sizesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["sizesrc"] @sizesrc.setter def sizesrc(self, val): self["sizesrc"] = val @property def solidity(self): """ Sets the solidity of the pattern fill. Solidity is roughly the fraction of the area filled by the pattern. Solidity of 0 shows only the background color without pattern and solidty of 1 shows only the foreground color without pattern. The 'solidity' property is a number and may be specified as: - An int or float in the interval [0, 1] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray """ return self["solidity"] @solidity.setter def solidity(self, val): self["solidity"] = val @property def soliditysrc(self): """ Sets the source reference on Chart Studio Cloud for `solidity`. The 'soliditysrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["soliditysrc"] @soliditysrc.setter def soliditysrc(self, val): self["soliditysrc"] = val @property def _prop_descriptions(self): return """\ bgcolor When there is no colorscale sets the color of background pattern fill. Defaults to a `marker.color` background when `fillmode` is "overlay". Otherwise, defaults to a transparent background. bgcolorsrc Sets the source reference on Chart Studio Cloud for `bgcolor`. fgcolor When there is no colorscale sets the color of foreground pattern fill. Defaults to a `marker.color` background when `fillmode` is "replace". Otherwise, defaults to dark grey or white to increase contrast with the `bgcolor`. fgcolorsrc Sets the source reference on Chart Studio Cloud for `fgcolor`. fgopacity Sets the opacity of the foreground pattern fill. Defaults to a 0.5 when `fillmode` is "overlay". Otherwise, defaults to 1. fillmode Determines whether `marker.color` should be used as a default to `bgcolor` or a `fgcolor`. path Sets a custom path for pattern fill. Use with no `shape` or `solidity`, provide an SVG path string for the regions of the square from (0,0) to (`size`,`size`) to color. pathsrc Sets the source reference on Chart Studio Cloud for `path`. shape Sets the shape of the pattern fill. By default, no pattern is used for filling the area. shapesrc Sets the source reference on Chart Studio Cloud for `shape`. size Sets the size of unit squares of the pattern fill in pixels, which corresponds to the interval of repetition of the pattern. sizesrc Sets the source reference on Chart Studio Cloud for `size`. solidity Sets the solidity of the pattern fill. Solidity is roughly the fraction of the area filled by the pattern. Solidity of 0 shows only the background color without pattern and solidty of 1 shows only the foreground color without pattern. soliditysrc Sets the source reference on Chart Studio Cloud for `solidity`. """ def __init__( self, arg=None, bgcolor=None, bgcolorsrc=None, fgcolor=None, fgcolorsrc=None, fgopacity=None, fillmode=None, path=None, pathsrc=None, shape=None, shapesrc=None, size=None, sizesrc=None, solidity=None, soliditysrc=None, **kwargs, ): """ Construct a new Fillpattern object Sets the pattern within the marker. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.scatter.Fillpattern` bgcolor When there is no colorscale sets the color of background pattern fill. Defaults to a `marker.color` background when `fillmode` is "overlay". Otherwise, defaults to a transparent background. bgcolorsrc Sets the source reference on Chart Studio Cloud for `bgcolor`. fgcolor When there is no colorscale sets the color of foreground pattern fill. Defaults to a `marker.color` background when `fillmode` is "replace". Otherwise, defaults to dark grey or white to increase contrast with the `bgcolor`. fgcolorsrc Sets the source reference on Chart Studio Cloud for `fgcolor`. fgopacity Sets the opacity of the foreground pattern fill. Defaults to a 0.5 when `fillmode` is "overlay". Otherwise, defaults to 1. fillmode Determines whether `marker.color` should be used as a default to `bgcolor` or a `fgcolor`. path Sets a custom path for pattern fill. Use with no `shape` or `solidity`, provide an SVG path string for the regions of the square from (0,0) to (`size`,`size`) to color. pathsrc Sets the source reference on Chart Studio Cloud for `path`. shape Sets the shape of the pattern fill. By default, no pattern is used for filling the area. shapesrc Sets the source reference on Chart Studio Cloud for `shape`. size Sets the size of unit squares of the pattern fill in pixels, which corresponds to the interval of repetition of the pattern. sizesrc Sets the source reference on Chart Studio Cloud for `size`. solidity Sets the solidity of the pattern fill. Solidity is roughly the fraction of the area filled by the pattern. Solidity of 0 shows only the background color without pattern and solidty of 1 shows only the foreground color without pattern. soliditysrc Sets the source reference on Chart Studio Cloud for `solidity`. Returns ------- Fillpattern """ super().__init__("fillpattern") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.scatter.Fillpattern constructor must be a dict or an instance of :class:`plotly.graph_objs.scatter.Fillpattern`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("bgcolor", arg, bgcolor) self._set_property("bgcolorsrc", arg, bgcolorsrc) self._set_property("fgcolor", arg, fgcolor) self._set_property("fgcolorsrc", arg, fgcolorsrc) self._set_property("fgopacity", arg, fgopacity) self._set_property("fillmode", arg, fillmode) self._set_property("path", arg, path) self._set_property("pathsrc", arg, pathsrc) self._set_property("shape", arg, shape) self._set_property("shapesrc", arg, shapesrc) self._set_property("size", arg, size) self._set_property("sizesrc", arg, sizesrc) self._set_property("solidity", arg, solidity) self._set_property("soliditysrc", arg, soliditysrc) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Fillpattern
python
apache__airflow
providers/snowflake/src/airflow/providers/snowflake/decorators/snowpark.py
{ "start": 1138, "end": 5219 }
class ____(DecoratedOperator, SnowparkOperator): """ Wraps a Python callable that contains Snowpark code and captures args/kwargs when called for execution. :param snowflake_conn_id: Reference to :ref:`Snowflake connection id<howto/connection:snowflake>` :param python_callable: A reference to an object that is callable :param op_args: a list of positional arguments that will get unpacked when calling your callable :param op_kwargs: a dictionary of keyword arguments that will get unpacked in your function :param warehouse: name of warehouse (will overwrite any warehouse defined in the connection's extra JSON) :param database: name of database (will overwrite database defined in connection) :param schema: name of schema (will overwrite schema defined in connection) :param role: name of role (will overwrite any role defined in connection's extra JSON) :param authenticator: authenticator for Snowflake. 'snowflake' (default) to use the internal Snowflake authenticator 'externalbrowser' to authenticate using your web browser and Okta, ADFS or any other SAML 2.0-compliant identify provider (IdP) that has been defined for your account 'https://<your_okta_account_name>.okta.com' to authenticate through native Okta. :param session_parameters: You can set session-level parameters at the time you connect to Snowflake :param multiple_outputs: If set to True, the decorated function's return value will be unrolled to multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False. """ custom_operator_name = "@task.snowpark" def __init__( self, *, snowflake_conn_id: str = "snowflake_default", python_callable: Callable, op_args: Sequence | None = None, op_kwargs: dict | None = None, warehouse: str | None = None, database: str | None = None, role: str | None = None, schema: str | None = None, authenticator: str | None = None, session_parameters: dict | None = None, **kwargs, ) -> None: kwargs_to_upstream = { "python_callable": python_callable, "op_args": op_args, "op_kwargs": op_kwargs, } super().__init__( kwargs_to_upstream=kwargs_to_upstream, snowflake_conn_id=snowflake_conn_id, python_callable=python_callable, op_args=op_args, # airflow.decorators.base.DecoratedOperator checks if the functions are bindable, so we have to # add an artificial value to pass the validation if there is a keyword argument named `session` # in the signature of the python callable. The real value is determined at runtime. op_kwargs=inject_session_into_op_kwargs(python_callable, op_kwargs, None) if op_kwargs is not None else op_kwargs, warehouse=warehouse, database=database, role=role, schema=schema, authenticator=authenticator, session_parameters=session_parameters, **kwargs, ) def snowpark_task( python_callable: Callable | None = None, multiple_outputs: bool | None = None, **kwargs, ) -> TaskDecorator: """ Wrap a function that contains Snowpark code into an Airflow operator. Accepts kwargs for operator kwarg. Can be reused in a single DAG. :param python_callable: Function to decorate :param multiple_outputs: If set to True, the decorated function's return value will be unrolled to multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False. """ return task_decorator_factory( python_callable=python_callable, multiple_outputs=multiple_outputs, decorated_operator_class=_SnowparkDecoratedOperator, **kwargs, )
_SnowparkDecoratedOperator
python
cython__cython
Cython/Compiler/FlowControl.py
{ "start": 2596, "end": 2668 }
class ____: def __init__(self): self.stats = []
AssignmentList
python
Textualize__textual
docs/examples/styles/align_all.py
{ "start": 120, "end": 894 }
class ____(App): """App that illustrates all alignments.""" CSS_PATH = "align_all.tcss" def compose(self) -> ComposeResult: yield Container(Label("left top"), id="left-top") yield Container(Label("center top"), id="center-top") yield Container(Label("right top"), id="right-top") yield Container(Label("left middle"), id="left-middle") yield Container(Label("center middle"), id="center-middle") yield Container(Label("right middle"), id="right-middle") yield Container(Label("left bottom"), id="left-bottom") yield Container(Label("center bottom"), id="center-bottom") yield Container(Label("right bottom"), id="right-bottom") if __name__ == "__main__": AlignAllApp().run()
AlignAllApp
python
streamlit__streamlit
lib/tests/streamlit/external/langchain/capturing_callback_handler.py
{ "start": 1046, "end": 1687 }
class ____: ON_LLM_START = "on_llm_start" ON_LLM_NEW_TOKEN = "on_llm_new_token" ON_LLM_END = "on_llm_end" ON_LLM_ERROR = "on_llm_error" ON_TOOL_START = "on_tool_start" ON_TOOL_END = "on_tool_end" ON_TOOL_ERROR = "on_tool_error" ON_TEXT = "on_text" ON_CHAIN_START = "on_chain_start" ON_CHAIN_END = "on_chain_end" ON_CHAIN_ERROR = "on_chain_error" ON_AGENT_ACTION = "on_agent_action" ON_AGENT_FINISH = "on_agent_finish" # We use TypedDict, rather than NamedTuple, so that we avoid serializing a # custom class with pickle. All of this class's members should be basic Python types.
CallbackType
python
ray-project__ray
python/ray/data/_internal/execution/interfaces/op_runtime_metrics.py
{ "start": 1600, "end": 3949 }
class ____: """Metadata for a metric. Args: name: The name of the metric. description: A human-readable description of the metric, also used as the chart description on the Ray Data dashboard. metrics_group: The group of the metric, used to organize metrics into groups in 'StatsActor' and on the Ray Data dashboard. map_only: Whether the metric is only measured for 'MapOperators'. """ name: str description: str metrics_group: str metrics_type: MetricsType metrics_args: Dict[str, Any] # TODO: Let's refactor this parameter so it isn't tightly coupled with a specific # operator type (MapOperator). map_only: bool = False internal_only: bool = False # do not expose this metric to the user def metric_field( *, description: str, metrics_group: str, metrics_type: MetricsType = MetricsType.Gauge, metrics_args: Dict[str, Any] = None, map_only: bool = False, internal_only: bool = False, # do not expose this metric to the user **field_kwargs, ): """A dataclass field that represents a metric.""" metadata = field_kwargs.get("metadata", {}) metadata[_IS_FIELD_METRIC_KEY] = True metadata[_METRIC_FIELD_DESCRIPTION_KEY] = description metadata[_METRIC_FIELD_METRICS_GROUP_KEY] = metrics_group metadata[_METRIC_FIELD_METRICS_TYPE_KEY] = metrics_type metadata[_METRIC_FIELD_METRICS_ARGS_KEY] = metrics_args or {} metadata[_METRIC_FIELD_IS_MAP_ONLY_KEY] = map_only return field(metadata=metadata, **field_kwargs) def metric_property( *, description: str, metrics_group: str, metrics_type: MetricsType = MetricsType.Gauge, metrics_args: Dict[str, Any] = None, map_only: bool = False, internal_only: bool = False, # do not expose this metric to the user ): """A property that represents a metric.""" def wrap(func): metric = MetricDefinition( name=func.__name__, description=description, metrics_group=metrics_group, metrics_type=metrics_type, metrics_args=(metrics_args or {}), map_only=map_only, internal_only=internal_only, ) _METRICS.append(metric) return property(func) return wrap @dataclass
MetricDefinition
python
pytorch__pytorch
torch/_dynamo/side_effects.py
{ "start": 2907, "end": 53504 }
class ____: """ Maintain records of mutations and provide methods to apply them during code generation. Handles tracking and applying side effects during PyTorch Dynamo compilation, maintaining Python semantics by managing mutations, attribute modifications, and other side effects that occur during program execution. Key responsibilities: - Tracks mutations to Python objects, lists, and dictionaries that need to be applied after an FX graph is run. - Manages attribute modifications and deletions - Handles tensor hooks and backward pass state - Tracks cell variable mutations and global variable changes - Ensures correct ordering and application of side effects after graph execution This ensures that optimized code behaves identically to the original Python code with respect to object mutations and other side effects. """ id_to_variable: dict[int, VariableTracker] store_attr_mutations: dict[VariableTracker, dict[str, VariableTracker]] keepalive: list[Any] def __init__( self, output_graph: "OutputGraph", id_to_variable: Optional[dict[int, VariableTracker]] = None, store_attr_mutations: Optional[ dict[VariableTracker, dict[str, VariableTracker]] ] = None, keepalive: Optional[list[Any]] = None, save_for_backward: Optional[ list[tuple[AutogradFunctionContextVariable, list[VariableTracker]]] ] = None, tensor_hooks: Optional[ dict[ int, tuple[ "variables.TensorVariable", VariableTracker, "variables.RemovableHandleVariable", str, ], ] ] = None, ) -> None: super().__init__() self.output_graph_weakref = weakref.ref(output_graph) self.id_to_variable = id_to_variable or {} self.store_attr_mutations = store_attr_mutations or {} self.keepalive = keepalive or [] self.save_for_backward = save_for_backward or [] self.tensor_hooks = tensor_hooks or {} # Used by MappingProxyVariable to graph break in case of any mutated # dict self._has_existing_dict_mutation = False # Track Compiled Autograd final callbacks that must be called at the end of Compiled Autograd backward graph. # Only applicable if this graph is created from Dynamo tracing in Compiled Autograd. self.ca_final_callbacks_var: Optional[ListVariable] = None # Tracks VariableTracker objects whose mutations can be skipped. # For normal mutated variables, Dynamo generates code to replay/reconstruct # the mutations after graph execution. However, variables in this set have # their mutations ignored - the mutations happen during # execution but don't need to be replayed in the generated code. # Used for temporary mutations in contexts like torch.func.functional_call, # where module parameters/buffers are modified but later restored. self.ignore_mutation_on_these_variables: set[VariableTracker] = set() def ignore_mutations_on(self, var: VariableTracker) -> None: """Mutations to this variable will be executed but not not tracked, typically used for temporary mutations that are later restored.""" self.ignore_mutation_on_these_variables.add(var) def stop_ignoring_mutations_on(self, var: VariableTracker) -> None: """Remove a variable from the skip mutation set, restoring normal mutation tracking.""" if var in self.ignore_mutation_on_these_variables: self.ignore_mutation_on_these_variables.remove(var) def __eq__(self, other: object) -> bool: assert isinstance(other, SideEffects) # NB: do NOT test keepalive return ( self.id_to_variable == other.id_to_variable and self.store_attr_mutations == other.store_attr_mutations and self.save_for_backward == other.save_for_backward and self.tensor_hooks == other.tensor_hooks ) def diff(self, other: "SideEffects") -> Optional[str]: if self.id_to_variable != other.id_to_variable: sk_itv = self.id_to_variable.keys() ok_itv = other.id_to_variable.keys() if sk_itv != ok_itv: return f"id_to_variable keys: {sk_itv} != {ok_itv}" # Feel free to augment this with more fancy diffing logic # if needed for debugging return "id_to_variable: unknown diff" elif self.store_attr_mutations != other.store_attr_mutations: sk_sam = self.store_attr_mutations.keys() ok_sam = other.store_attr_mutations.keys() if sk_sam != ok_sam: return f"store_attr_mutations keys: {sk_sam} != {ok_sam}" return "store_attr_mutations: unknown diff" elif self.save_for_backward != other.save_for_backward: return "save_for_backward" elif self.tensor_hooks != other.tensor_hooks: return "tensor_hooks" else: return None def clone(self) -> "SideEffects": """Create a shallow copy""" ref = self.output_graph_weakref() assert ref is not None return self.__class__( output_graph=ref, id_to_variable=dict(self.id_to_variable), store_attr_mutations={ k: dict(v) for k, v in self.store_attr_mutations.items() }, keepalive=list(self.keepalive), save_for_backward=self.save_for_backward, tensor_hooks=self.tensor_hooks, ) def __contains__(self, item: Any) -> bool: return id(item) in self.id_to_variable def __getitem__(self, item: Any) -> VariableTracker: return self.id_to_variable[id(item)] def should_allow_side_effects_under_checkpoint(self) -> bool: output_graph = self.output_graph_weakref() return bool( output_graph and output_graph.current_tx.output.current_tracer.under_activation_checkpoint and ( output_graph.current_tx.output.current_tracer.allow_side_effects_under_checkpoint or torch._dynamo.config.skip_fwd_side_effects_in_bwd_under_checkpoint ) ) def should_allow_externally_visible_side_effects_in_subtracer(self) -> bool: output_graph = self.output_graph_weakref() return bool( output_graph and output_graph.current_tx.output.current_tracer.unsafe_allow_externally_visible_side_effects ) def is_reconstructing_generator(self) -> bool: output_graph = self.output_graph_weakref() return bool( output_graph and output_graph.current_tx.output.current_tracer.is_reconstructing_generator ) def check_allowed_side_effect(self, item: VariableTracker) -> bool: from torch._dynamo.variables.misc import AutogradFunctionContextVariable # People do things like self.dim = dim inside autograd.Function. # These are benign. if isinstance(item, AutogradFunctionContextVariable): return True if self.should_allow_externally_visible_side_effects_in_subtracer(): return True if self.should_allow_side_effects_under_checkpoint(): return True if self.is_reconstructing_generator(): # This is missing the case where one mutates a tensor. See # test_generator.py::test_reconstruct_generator_tensor_mutation raise SideEffectsError( "Cannot reconstruct a generator with variable mutations. " "Dynamo needs to fully exhaust the generator, which may cause " "unintended variable modifications." ) assert item.mutation_type is not None if not is_side_effect_safe(item.mutation_type): # TODO plumb HOP information here unimplemented( gb_type="HigherOrderOperator: Mutating a variable not in the current scope (SideEffects)", context="", explanation="This is not supported.", hints=[], ) return False def store_attr( self, item: VariableTracker, name: str, value: VariableTracker ) -> None: assert self.is_attribute_mutation(item) self.check_allowed_side_effect(item) if item not in self.store_attr_mutations: self.store_attr_mutations[item] = {} self.store_attr_mutations[item][name] = value def load_attr( self, item: VariableTracker, name: str, deleted_ok: bool = False, check: bool = False, ) -> VariableTracker: if check: assert self.is_attribute_mutation(item) result = self.store_attr_mutations[item][name] if not deleted_ok and isinstance(result, variables.DeletedVariable): unimplemented( gb_type="Attempted to read a deleted variable", context=f"item: {item}, name: {name}", explanation="", hints=[*graph_break_hints.USER_ERROR], ) return result def store_cell(self, cellvar: VariableTracker, value: VariableTracker) -> None: if cellvar.is_immutable(): unimplemented( gb_type="Write to immutable cell", context=f"cellvar: {cellvar}, value: {value}", explanation="Dynamo doesn't support writing to immutable/sourceless cell variables.", hints=[*graph_break_hints.DIFFICULT], ) assert isinstance(cellvar, variables.CellVariable) assert isinstance(value, variables.VariableTracker) self.store_attr(cellvar, "cell_contents", value) def load_cell(self, cellvar: VariableTracker) -> VariableTracker: assert isinstance(cellvar, variables.CellVariable) if self.has_pending_mutation_of_attr(cellvar, "cell_contents"): return self.load_attr(cellvar, "cell_contents", check=False) if cellvar.pre_existing_contents: return cellvar.pre_existing_contents unimplemented( gb_type="Read uninitialized cell", context=str(cellvar), explanation="Attempted to read a cell variable that has not been populated yet.", hints=[*graph_break_hints.USER_ERROR], ) def load_global(self, gvar: VariableTracker, name: str) -> VariableTracker: assert isinstance(gvar, variables.VariableTracker) return self.load_attr(gvar, name) def store_global( self, gvar: VariableTracker, name: str, value: VariableTracker ) -> None: assert isinstance(gvar, variables.VariableTracker) assert isinstance(value, variables.VariableTracker) self.store_attr(gvar, name, value) @staticmethod def cls_supports_mutation_side_effects(cls: type) -> bool: return inspect.getattr_static(cls, "__getattribute__", None) in ( object.__getattribute__, dict.__getattribute__, set.__getattribute__, frozenset.__getattribute__, int.__getattribute__, str.__getattribute__, list.__getattribute__, tuple.__getattribute__, BaseException.__getattribute__, ) def is_attribute_mutation(self, item: VariableTracker) -> bool: return isinstance(item.mutation_type, AttributeMutation) def has_pending_mutation(self, item: VariableTracker) -> bool: return self.is_attribute_mutation(item) and bool( self.store_attr_mutations.get(item) ) def has_pending_mutation_of_attr(self, item: VariableTracker, name: str) -> bool: return self.is_attribute_mutation( item ) and name in self.store_attr_mutations.get(item, ()) def is_modified(self, item: VariableTracker) -> bool: if item.is_immutable(): return False if isinstance(item.mutation_type, (AttributeMutationNew, ValueMutationNew)): return True if isinstance(item, variables.UserDefinedObjectVariable): # Checks if the underlying dict or tuple vt has been modified return item in self.store_attr_mutations or item.is_underlying_vt_modified( self ) if self.is_attribute_mutation(item): return item in self.store_attr_mutations assert item.mutation_type is not None return item.mutation_type.is_modified # type: ignore[attr-defined] def _track_obj( self, item: Any, variable: VariableTracker, mutation_type_cls: type = ValueMutationExisting, ) -> VariableTracker: """Start tracking an existing or new variable for mutation""" if id(item) in self.id_to_variable: raise AssertionError( f"{variable} is already tracked for mutation. This could be " "because you are not using VariableBuilder to construct " "the variable tracker. " f"Source of new object: {variable.source}. " f"Source of previously tracked object: {self.id_to_variable[id(item)].source}." ) variable.mutation_type = mutation_type_cls() self.id_to_variable[id(item)] = variable self.keepalive.append(item) return variable track_mutable = _track_obj def track_object_existing( self, item: Any, variable: VariableTracker, ) -> VariableTracker: return self._track_obj( item, variable, mutation_type_cls=AttributeMutationExisting, ) def track_object_new( self, cls_source: Source, user_cls: Any, variable_cls: Any, options: dict[str, Any], ) -> VariableTracker: if user_cls is torch.autograd.function.FunctionCtx: with warnings.catch_warnings(record=True): obj = torch.autograd.Function() else: obj = object_new(user_cls) variable = variable_cls( obj, mutation_type=AttributeMutationNew(cls_source), **options, ) self.id_to_variable[id(obj)] = variable self.keepalive.append(obj) return variable def get_variable_cls(self, user_cls: type) -> type: from torch.overrides import TorchFunctionMode from .variables.ctx_manager import GenericContextWrappingVariable from .variables.torch_function import TorchFunctionModeVariable from .variables.user_defined import is_forbidden_context_manager variable_cls: type[variables.UserDefinedObjectVariable] = ( variables.UserDefinedObjectVariable ) if issubclass( user_cls, TorchFunctionMode ) and TorchFunctionModeVariable.is_supported_torch_function_mode(user_cls): variable_cls = TorchFunctionModeVariable elif ( hasattr(user_cls, "__enter__") and hasattr(user_cls, "__exit__") and not is_forbidden_context_manager(user_cls) ): variable_cls = GenericContextWrappingVariable elif issubclass(user_cls, torch.nn.Module): variable_cls = variables.UnspecializedNNModuleVariable elif issubclass(user_cls, (dict, collections.OrderedDict)): variable_cls = variables.UserDefinedDictVariable elif issubclass(user_cls, (set, frozenset)): variable_cls = variables.UserDefinedSetVariable elif issubclass(user_cls, tuple): variable_cls = variables.UserDefinedTupleVariable elif issubclass(user_cls, list): variable_cls = variables.UserDefinedListVariable elif issubclass(user_cls, MutableMapping): variable_cls = variables.MutableMappingVariable elif is_frozen_dataclass(user_cls): variable_cls = FrozenDataClassVariable elif issubclass(user_cls, BaseException): variable_cls = variables.UserDefinedExceptionObjectVariable assert issubclass(variable_cls, variables.UserDefinedObjectVariable) return variable_cls def get_example_value( self, base_cls_vt: VariableTracker, cls_vt: VariableTracker, init_args: list[VariableTracker], ) -> Any: user_cls = cls_vt.value # type: ignore[attr-defined] if issubclass(user_cls, torch.nn.Module): # TODO(anijain2305) - Is it possible to remove this specialization? obj = nn_module_new(user_cls) else: if isinstance(base_cls_vt, variables.BuiltinVariable): base_cls = base_cls_vt.fn elif isinstance(base_cls_vt, variables.UserDefinedClassVariable): base_cls = base_cls_vt.value else: raise RuntimeError(f"Unexpected base_cls_vt {base_cls_vt}") assert variables.UserDefinedClassVariable.is_supported_new_method( base_cls.__new__ ) # TODO(anijain2305) - Consider adding get_example_value method to # each VT to get an example value for all args. As we expand the # scope to other __new__ methods, we might need to call __new__ with # init_args (like functools.partial) # init_args = [arg.get_example_value() for arg in init_args] # obj = base_cls.__new__(user_cls, *init_args) obj = base_cls.__new__(user_cls) return obj def track_new_user_defined_object( self, base_cls_vt: VariableTracker, cls_vt: VariableTracker, init_args: list[VariableTracker], ) -> VariableTracker: """ Creates a UserDefinedObjectVariable (or its subclass) variable tracker and mark it for attribute mutation tracking. Also records the variable trackers to call __new__ method on reconstruction. Roughly, the reconstruction looks like this base_cls_vt.__new__(user_cls, *init_args) """ cls_source = cls_vt.source user_cls = cls_vt.value # type: ignore[attr-defined] variable_cls = self.get_variable_cls(user_cls) obj = self.get_example_value(base_cls_vt, cls_vt, init_args) variable = variable_cls( obj, cls_source=cls_vt.source, base_cls_vt=base_cls_vt, init_args=init_args, mutation_type=AttributeMutationNew(cls_source), ) self.id_to_variable[id(obj)] = variable self.keepalive.append(obj) return variable def track_cell_new( self, ) -> VariableTracker: obj = object() variable = variables.CellVariable( mutation_type=AttributeMutationNew(), ) self.id_to_variable[id(obj)] = variable self.keepalive.append(obj) return variable def track_cell_existing( self, source: Optional[Source], cell: CellType, contents: VariableTracker ) -> VariableTracker: variable = variables.CellVariable( # We don't support mutation to cell without source because we need # source to properly codegen the mutations. mutation_type=None if source is None else AttributeMutationExisting(), pre_existing_contents=contents, source=source, ) self.id_to_variable[id(cell)] = variable self.keepalive.append(cell) return variable def track_global_existing(self, source: Source, item: Any) -> VariableTracker: variable = variables.NewGlobalVariable( mutation_type=AttributeMutationExisting(), source=source, ) self.id_to_variable[id(item)] = variable self.keepalive.append(item) return variable def track_save_for_backward( self, ctx: VariableTracker, args: list[VariableTracker] ) -> None: assert isinstance(ctx, variables.AutogradFunctionContextVariable) self.save_for_backward.append((ctx, args)) def track_runahead_tensor_and_symvar_side_effects( self, other: "SideEffects" ) -> None: # In higher order ops we want to keep track of tensors seen in the # speculate_subgraph so that we don't lift them again as a new input in # other speculate_subgraph or in the root tracer. for other_item in other.keepalive: other_id = id(other_item) other_variable = other.id_to_variable[other_id] if other_id not in self.id_to_variable and isinstance( other_variable, (variables.TensorVariable, variables.SymNodeVariable) ): self.track_object_existing(other_item, other_variable) def prune_dead_object_new(self, tx: "InstructionTranslatorBase") -> None: # Avoid VT cycles from e.g., recursive function. visited: set[VariableTracker] = set() live_new_objects: set[VariableTracker] = set() def visit(var: VariableTracker) -> None: if var in visited: return visited.add(var) # Object may have been mutated, store this mutation. if isinstance(var.mutation_type, AttributeMutationNew): live_new_objects.add(var) # It's possible that we have mutated the value of this variable # to be another one. The new value is in store_attr_mutations. # Also recurse through the new value to detect alive AttributeMutationNew. if var in self.store_attr_mutations: VariableTracker.visit( visit, # noqa: F821 self.store_attr_mutations[var], ) def is_live(var: VariableTracker) -> bool: if isinstance(var.mutation_type, AttributeMutationNew): return var in live_new_objects return True pre_existing_vars = [ var for var in self.id_to_variable.values() if not isinstance(var.mutation_type, AttributeMutationNew) ] # The only live side effects come from returns (tx.stack), any intermediates # during a graph break (tx.symbolic_locals), and mutation on pre-existing variables. # Recursively visit Variables and see if any of them have been mutated. init_live_vars = [] # gather stack/symbolic_locals for all tx's up the chain cur_tx: Optional[InstructionTranslatorBase] = tx while cur_tx is not None: init_live_vars.extend([cur_tx.stack, cur_tx.symbolic_locals]) if cur_tx.parent is not None: # for non-root tx'es, also keep the cells/freevars alive so they get codegen'd properly # TODO see if we could prune dead cells - cell pruning information needs to be forwarded # to the resume function creation as well. assert cur_tx.post_prune_cell_and_freevars is not None init_live_vars.append(cur_tx.post_prune_cell_and_freevars) cur_tx = cur_tx.parent VariableTracker.visit( visit, # TODO track from all possible sources. init_live_vars + [ pre_existing_vars, tx.output.backward_state, self.tensor_hooks, ], ) # Manually release the self-referential function, which indirectly # captures certain `VariableTracker` and affects parts of PT test/logic # that are sensitive to when certain objects get released. del visit # NB: cell variable handling.is tricky. # cell variables must stay alive if any NestedUserFunctionVariable # are live. "visit"-ing the NestedUserFunctionVariable visits # the .closures field, from which we will see if we need to keep # any mutations to cell variables alive. self.id_to_variable = { k: v for k, v in self.id_to_variable.items() if is_live(v) } self.store_attr_mutations = { k: v for k, v in self.store_attr_mutations.items() if is_live(k) } def mutation(self, var: VariableTracker) -> None: if var in self.ignore_mutation_on_these_variables: return self.check_allowed_side_effect(var) if isinstance(var.mutation_type, ValueMutationExisting): var.mutation_type.is_modified = True if ( var.source and isinstance(var, variables.ConstDictVariable) and not isinstance(var, variables.SetVariable) ): self._has_existing_dict_mutation = True def has_existing_dict_mutation(self) -> bool: return self._has_existing_dict_mutation def _get_modified_vars(self) -> list[VariableTracker]: return [var for var in self.id_to_variable.values() if self.is_modified(var)] def codegen_save_tempvars(self, cg: PyCodegen) -> None: # We must codegen modified VT to their source by default, so that # mutation and aliasing are properly accounted for. # # Since newly constructed objects don't have a source, we manually # codegen their construction and store them to a newly assigned local # source. Note that `ValueMutationNew` isn't tracked by SideEffects. for var in self._get_modified_vars(): if not isinstance(var.mutation_type, AttributeMutationNew): assert var.source is not None continue if isinstance(var, variables.CellVariable): # Cells created in the root frame are created either by # `MAKE_CELL` or by them being in `co_cellvars`, so we only emit # `make_cell` for the non-root-frame cells here. # TODO generalize this so we never need to call `make_cell`. if var.local_name is None: cg.add_push_null( lambda: cg.load_import_from(utils.__name__, "make_cell") ) cg.extend_output(create_call_function(0, False)) cg.add_cache(var) var.source = TempLocalSource(cg.tempvars[var]) # type: ignore[attr-defined] elif var.source is None: # pyrefly: ignore [bad-assignment] var.source = LocalCellSource(var.local_name) elif isinstance(var, variables.TensorVariable): # NOTE: for historical reasons we never assigned local sources # to newly constructed tensor object, so we keep it that way. # They are always loaded from output of the fx graph, so one can # think of it as having a "OutputGraphSource" for codegen # purposes. # # However, tensor subclass objects are different, because the # reconstruction logic in `PyCodegen` loads the data tensor from # graph output and then calls `as_subclass`, meaning we must # assign a source to it to ensure we only reconstruct one # subclass instance. if isinstance( var, variables.torch_function.TensorWithTFOverrideVariable ): # Don't codegen from temp source assigned from the 1st pass. cg(var, allow_cache=False) cg.add_cache(var) # `add_cache` generates STORE and consumes TOS, but we never # cleared it. TODO move this call into `add_cache` cg.clear_tos() var.source = TempLocalSource(cg.tempvars[var]) elif isinstance(var, variables.AutogradFunctionContextVariable): unimplemented( gb_type="AutogradFunctionContextVariable escaped Dynamo-traced region", context="", explanation="We cannot reconstruct a torch.autograd.Function's context object.", hints=[], ) else: # Reconstruct the bytecode for # base_cls.__new__(user_cls, *args) if isinstance(var, variables.UserDefinedObjectVariable): def load_new_method() -> None: # pyrefly: ignore [missing-attribute] assert var.base_cls_vt is not None cg(var.base_cls_vt) # type: ignore[attr-defined] cg.extend_output([cg.create_load_attr("__new__")]) cg.add_push_null(load_new_method) else: cg.add_push_null( lambda: cg.load_import_from(utils.__name__, "object_new") ) assert var.mutation_type.cls_source is not None cg(var.mutation_type.cls_source) # Generate the args to the __new__ method for arg in var.init_args: # type: ignore[attr-defined] cg(arg) # Call the __new__ method cg.extend_output(create_call_function(1 + len(var.init_args), False)) # type: ignore[attr-defined] cg.add_cache(var) var.source = TempLocalSource(cg.tempvars[var]) for ctx, args in self.save_for_backward: cg(ctx.source) cg.load_method("save_for_backward") for arg in args: cg(arg) cg.extend_output( [ *create_call_method(len(args)), create_instruction("POP_TOP"), ] ) def register_hook( self, tensor: "variables.TensorVariable", hook: VariableTracker, handle: "variables.RemovableHandleVariable", name: str, ) -> None: assert isinstance(tensor, variables.TensorVariable) assert isinstance(hook, variables.VariableTracker) assert ( isinstance(handle, variables.RemovableHandleVariable) and handle.is_mutable() ) assert hasattr(torch.Tensor, name) idx = len(self.tensor_hooks.keys()) # duplicate index possible because of self.remove_hook() while idx in self.tensor_hooks: idx += 1 self.tensor_hooks[idx] = (tensor, hook, handle, name) assert not handle.idx handle.idx = idx def remove_hook(self, idx: int) -> None: del self.tensor_hooks[idx] def codegen_hooks(self, cg: PyCodegen) -> None: for ( tensor, hook, handle, name, ) in self.tensor_hooks.values(): # Note: [On tensor.register_hook] # # register_hook on a tensor, AKA backward hooks, have slightly nuanced differences in how they are implemented # when it comes to hooks on objects with sources (inputs, params) vs objects without sources (intermediaries). # # For tensors with a source, we bypass direct inclusion of register_hook calls in the graph. # Instead, these are tracked and stashed as a global variable, enabling their association with tensors in # the residuals. During dynamo's frame creation, these hooks are invoked seamlessly on known reconstructible/fetch-able # tensors. Because a source indicates knowledge of this object outside the torch compile region, and # because we are running residuals firmly before .backward() can be run, it is sound to invoke # `register_hook` on a known tensor. # # For tensors without a source, we support a limited subset of hooks. Global functions only, and # compiled_autograd must be enabled or we will graph break. # # Handling the Handle: When a user retains the register_hook result in a handle, we intercept the # STORE_FAST operation to record the user-designated local variable name. This ensures the reconstructed # bytecode retains this name. If no handle is defined, we simply pop the generated value to keep the # stack intact. # # Dynamo Tensor Hooks Workflow: # - Functions passed to register_hook are lifted globally. # - For tensors with sources: # - In the "side_effects" phase of codegen, we iterate over tensors with hooks to: # - Generate the tensor. # - Issue a register_hook call on the tensor, linking to the globally stored function. # - Incorporate a handle if one was established in the eager phase. # - For tensors without sources: # - We don't generate any instructions for registering a hook. # - Handles from intermediary hooks are NYI. # - We produce a call function that utilizes the trace_wrapped higher order op, closing over it. # - We then manually insert the call function above into the graph. # - The handle's exact user-specified name, "user_code_variable_name", is discerned and associated during STORE_FAST. assert tensor.source, "Hooks on non input tensors NYI - should not get here" def gen_fn() -> None: cg(tensor) cg.extend_output([cg.create_load_attr(name)]) cg.add_push_null(gen_fn) cg(hook) cg.extend_output(create_call_function(1, False)) # Adding the handle to the cache means RemovableHandleVariable().reconstruct() will # be associated with the return value of register_hook(). This consumes the top of stack. cg.add_cache(handle) def get_ca_final_callbacks_var(self) -> "variables.ListVariable": from .variables.base import ValueMutationNew if self.ca_final_callbacks_var is None: self.ca_final_callbacks_var = variables.ListVariable( [], mutation_type=ValueMutationNew() ) return self.ca_final_callbacks_var def codegen_update_mutated(self, cg: PyCodegen) -> None: suffixes = [] for var in self._get_modified_vars(): if isinstance(var, variables.ListVariable): # old[:] = new cg(var, allow_cache=False) # Don't codegen via source cg(var.source) # type: ignore[attr-defined] cg.extend_output( [ cg.create_load_const(None), cg.create_load_const(None), create_instruction("BUILD_SLICE", arg=2), ] ) suffixes.append([create_instruction("STORE_SUBSCR")]) elif isinstance(var, variables.lists.DequeVariable): # For limited maxlen, the order of operations matter for side # effect, but we currently don't track the order, so no support. if not ( isinstance(var.maxlen, variables.ConstantVariable) and var.maxlen.value is None ): unimplemented( gb_type="Side effect on existing deque with limited maxlen", context="", explanation="This is not supported.", hints=[ "Don't use a deque with `maxlen` specified.", ], ) # old.extend(new), this runs last cg(var.source) cg.load_method("extend") cg(var, allow_cache=False) # Don't codegen via source suffixes.append( [ *create_call_method(1), create_instruction("POP_TOP"), ] ) # old.clear(), this runs first cg(var.source) cg.load_method("clear") suffixes.append( [ *create_call_method(0), create_instruction("POP_TOP"), ] ) elif isinstance(var, variables.ConstDictVariable): # Reconstruct works as follow: # (1) Skip codegen if there are no new items # (2) codegen(...) each pair of key/value # (3) create a new dictionary with the pairs of key/values above # (4) clear the original dictionary # + only if a key was removed from the input dict # (5) update the original dictionary with the dict created in (2) if var.has_new_items(): cg(var.source) # type: ignore[attr-defined] cg.load_method("update") cg(var, allow_cache=False) # Don't codegen via source if var.should_reconstruct_all: cg(var.source) # type: ignore[attr-defined] cg.load_method("clear") suffixes.append( [ *create_call_method(1), # update create_instruction("POP_TOP"), ] ) if var.should_reconstruct_all: # clear will appear before "update" as the suffixes are # applied in reverse order. suffixes.append( [ *create_call_method(0), # clear create_instruction("POP_TOP"), ] ) elif isinstance( var, variables.torch_function.TorchFunctionModeStackVariable ): # Needed in the finally block for stack restoration cg.add_push_null( lambda: cg.load_import_from( utils.__name__, "get_torch_function_mode_stack" ) ) cg.call_function(0, False) name = variables.torch_function.get_prev_stack_var_name() cg.code_options["co_varnames"] += (name,) cg.append_output(create_instruction("STORE_FAST", argval=name)) cg.add_push_null( lambda: cg.load_import_from( utils.__name__, "set_torch_function_mode_stack" ) ) cg.foreach(var.symbolic_stack) cg.append_output( create_instruction("BUILD_LIST", arg=len(var.symbolic_stack)) ) cg.call_function(1, False) cg.append_output(create_instruction("POP_TOP")) elif isinstance(var, variables.CellVariable) and var.local_name is not None: # Emit more readable and performant bytecode. # TODO generalize this for cells created during inlining. if var in self.store_attr_mutations: contents_var = self.load_cell(var) cg(contents_var) suffixes.append([cg.create_store_deref(var.local_name)]) elif self.is_attribute_mutation(var): if isinstance( var, variables.UserDefinedDictVariable, # pyrefly: ignore [bad-argument-type] ) and self.is_modified(var._dict_vt): # Do dict related update manually here. The store_attr # mutations will be applied later. varname_map = {} for name in _manual_dict_setitem.__code__.co_varnames: varname_map[name] = cg.tx.output.new_var() try: mro_index = type(var.value).__mro__.index( collections.OrderedDict ) except ValueError: mro_index = type(var.value).__mro__.index(dict) cg.extend_output( [ create_instruction("LOAD_CONST", argval=mro_index), create_instruction( "STORE_FAST", argval=varname_map["mro_index"] ), ] ) cg(var.source) # type: ignore[attr-defined] cg.extend_output( [ create_instruction( "STORE_FAST", argval=varname_map["dict_to"] ) ] ) # pyrefly: ignore [bad-argument-type] cg(var._dict_vt, allow_cache=False) # Don't codegen via source cg.extend_output( [ create_instruction( "STORE_FAST", argval=varname_map["dict_from"] ) ] ) dict_update_insts = bytecode_from_template( _manual_dict_setitem, varname_map=varname_map ) suffixes.append( [ *dict_update_insts, create_instruction("POP_TOP"), ] ) elif isinstance( var, variables.UserDefinedListVariable, # pyrefly: ignore [bad-argument-type] ) and self.is_modified(var._list_vt): # Update the list to the updated items. Be careful in # calling the list methods and not the overridden methods. varname_map = {} for name in _manual_list_update.__code__.co_varnames: varname_map[name] = cg.tx.output.new_var() cg(var.source) # type: ignore[attr-defined] cg.extend_output( [ create_instruction( "STORE_FAST", argval=varname_map["list_to"] ) ] ) # pyrefly: ignore [bad-argument-type] cg(var._list_vt, allow_cache=False) # Don't codegen via source cg.extend_output( [ create_instruction( "STORE_FAST", argval=varname_map["list_from"] ) ] ) list_update_insts = bytecode_from_template( _manual_list_update, varname_map=varname_map ) suffixes.append( [ *list_update_insts, create_instruction("POP_TOP"), ] ) # Applying mutations involves two steps: 1) Push all # reconstructed objects onto the stack. 2) Call STORE_ATTR to # apply the mutations. # # Dynamo must ensure that mutations are applied in the same # order as in the original program. Therefore, two reverse # operations occur below. # # The first reverse operation concerns `suffixes`. We apply # suffixes in reverse order due to the way Python handles the # stack. In Step 1, we push all reconstructed objects onto the # stack, but the item at the top of the stack refers to the last # attribute in the mutation order. If not fixed, this will apply # the mutations of attributes in the reverse order. To account # for this reversal, we iterate through the mutable attributes # in reverse order. for name, value in reversed( self.store_attr_mutations.get(var, {}).items() ): if isinstance(var, variables.NewGlobalVariable): cg.tx.output.update_co_names(name) cg(value) assert isinstance(var.source, GlobalSource) # type: ignore[attr-defined] suffixes.append( [create_instruction("STORE_GLOBAL", argval=name)] ) elif isinstance(value, variables.DeletedVariable): if isinstance( var.mutation_type, AttributeMutationExisting ) and hasattr(getattr(var, "value", None), name): cg.tx.output.update_co_names(name) cg(var.source) suffixes.append( [create_instruction("DELETE_ATTR", argval=name)] ) elif isinstance( var, variables.UserDefinedObjectVariable ) and var.should_skip_descriptor_setter(name): cg.add_push_null( lambda: cg.load_import_from( utils.__name__, "object_setattr_ignore_descriptor" ) ) cg(var.source) # type: ignore[attr-defined] cg(variables.ConstantVariable(name)) cg(value) suffixes.append( [ *create_call_function(3, False), create_instruction("POP_TOP"), ] ) elif ( isinstance(var, variables.UserDefinedObjectVariable) and var.needs_slow_setattr() ): # __setattr__ is defined on this object, so call object.__setattr__ directly cg.load_import_from("builtins", "object") cg.load_method("__setattr__") cg(var.source) # type: ignore[attr-defined] cg(variables.ConstantVariable(name)) cg(value) suffixes.append( [*create_call_method(3), create_instruction("POP_TOP")] ) else: cg.tx.output.update_co_names(name) cg(value) cg(var) suffixes.append([create_instruction("STORE_ATTR", argval=name)]) elif isinstance(var, variables.ListIteratorVariable): for _ in range(var.index): cg.add_push_null( lambda: cg.load_import_from(utils.__name__, "iter_next") ) cg(var.source) # type: ignore[attr-defined] cg.call_function(1, False) cg.pop_top() elif isinstance(var, variables.RandomVariable): # set correct random seed state def gen_fn() -> None: cg(var.source) # type: ignore[attr-defined] cg.load_attr("setstate") cg.add_push_null(gen_fn) cg(var.wrap_state(var.random.getstate())) suffixes.append( [ *create_call_function(1, False), # setstate create_instruction("POP_TOP"), ] ) else: raise AssertionError(type(var)) # do all the actual mutations at the very end to handle dependencies for suffix in reversed(suffixes): cg.extend_output(suffix) def is_empty(self) -> bool: return not ( any(map(self.is_modified, self.id_to_variable.values())) or self.tensor_hooks or self.save_for_backward or self.tensor_hooks ) def clear(self) -> None: self.keepalive.clear() self.id_to_variable.clear() @contextlib.contextmanager def allow_side_effects_under_checkpoint( tx: "InstructionTranslatorBase", ) -> Generator[None, None, None]: assert tx.output.current_tracer.under_activation_checkpoint orig_val = tx.output.current_tracer.allow_side_effects_under_checkpoint try: tx.output.current_tracer.allow_side_effects_under_checkpoint = True yield finally: tx.output.current_tracer.allow_side_effects_under_checkpoint = orig_val @contextlib.contextmanager def allow_externally_visible_side_effects_in_subtracer( tx: "InstructionTranslatorBase", ) -> Generator[None, None, None]: orig_val = tx.output.current_tracer.unsafe_allow_externally_visible_side_effects try: tx.output.current_tracer.unsafe_allow_externally_visible_side_effects = True yield finally: tx.output.current_tracer.unsafe_allow_externally_visible_side_effects = orig_val @contextlib.contextmanager def disallow_side_effects_in_generator( tx: "InstructionTranslatorBase", ) -> Generator[None, None, None]: orig_val = tx.output.current_tracer.is_reconstructing_generator try: tx.output.current_tracer.is_reconstructing_generator = True yield finally: tx.output.current_tracer.is_reconstructing_generator = orig_val
SideEffects
python
kamyu104__LeetCode-Solutions
Python/sell-diminishing-valued-colored-balls.py
{ "start": 88, "end": 925 }
class ____(object): def maxProfit(self, inventory, orders): """ :type inventory: List[int] :type orders: int :rtype: int """ MOD = 10**9+7 def check(inventory, orders, x): return count(inventory, x) > orders def count(inventory, x): return sum(count-x+1 for count in inventory if count >= x) left, right = 1, max(inventory) while left <= right: mid = left + (right-left)//2 if not check(inventory, orders, mid): right = mid-1 else: left = mid+1 # assert(orders-count(inventory, left) >= 0) return (sum((left+cnt)*(cnt-left+1)//2 for cnt in inventory if cnt >= left) + (left-1)*(orders-count(inventory, left)))% MOD
Solution
python
getsentry__sentry
src/sentry/api/endpoints/seer_models.py
{ "start": 1037, "end": 1158 }
class ____(APIException): status_code = 502 default_detail = "Failed to fetch models from Seer"
SeerConnectionError
python
rapidsai__cudf
python/cudf/cudf/io/parquet.py
{ "start": 60113, "end": 69831 }
class ____: """ ParquetWriter lets you incrementally write out a Parquet file from a series of cudf tables Parameters ---------- filepath_or_buffer : str, io.IOBase, os.PathLike, or list File path or buffer to write to. The argument may also correspond to a list of file paths or buffers. index : bool or None, default None If ``True``, include a dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, index(es) other than RangeIndex will be saved as columns. compression : {'snappy', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. statistics : {'ROWGROUP', 'PAGE', 'COLUMN', 'NONE'}, default 'ROWGROUP' Level at which column statistics should be included in file. row_group_size_bytes: int, default ``uint64 max`` Maximum size of each stripe of the output. By default, a virtually infinite size equal to ``uint64 max`` will be used. row_group_size_rows: int, default 1000000 Maximum number of rows of each stripe of the output. By default, 1000000 (10^6 rows) will be used. max_page_size_bytes: int, default 524288 Maximum uncompressed size of each page of the output. By default, 524288 (512KB) will be used. max_page_size_rows: int, default 20000 Maximum number of rows of each page of the output. By default, 20000 will be used. max_dictionary_size: int, default 1048576 Maximum size of the dictionary page for each output column chunk. Dictionary encoding for column chunks that exceeds this limit will be disabled. By default, 1048576 (1MB) will be used. use_dictionary : bool, default True If ``True``, enable dictionary encoding for Parquet page data subject to ``max_dictionary_size`` constraints. If ``False``, disable dictionary encoding for Parquet page data. store_schema : bool, default False If ``True``, enable computing and writing arrow schema to Parquet file footer's key-value metadata section for faithful round-tripping. See Also -------- cudf.io.parquet.write_parquet """ def __init__( self, filepath_or_buffer, index: bool | None = None, compression: Literal["snappy", "ZSTD", "ZLIB", "LZ4", None] = "snappy", statistics: Literal["ROWGROUP", "PAGE", "COLUMN", "NONE"] = "ROWGROUP", row_group_size_bytes: int = int(np.iinfo(np.uint64).max), row_group_size_rows: int = 1000000, max_page_size_bytes: int = 524288, max_page_size_rows: int = 20000, max_dictionary_size: int = 1048576, use_dictionary: bool = True, store_schema: bool = False, ): filepaths_or_buffers = ( list(filepath_or_buffer) if is_list_like(filepath_or_buffer) else [filepath_or_buffer] ) self.sink = plc.io.SinkInfo(filepaths_or_buffers) self.statistics = statistics self.compression = compression self.index = index self.initialized = False self.row_group_size_bytes = row_group_size_bytes self.row_group_size_rows = row_group_size_rows self.max_page_size_bytes = max_page_size_bytes self.max_page_size_rows = max_page_size_rows self.max_dictionary_size = max_dictionary_size self.use_dictionary = use_dictionary self.write_arrow_schema = store_schema def write_table(self, table, partitions_info=None) -> None: """Writes a single table to the file""" if not self.initialized: self._initialize_chunked_state( table, num_partitions=len(partitions_info) if partitions_info else 1, ) if self.index is not False and ( table.index.name is not None or isinstance(table.index, MultiIndex) ): columns = itertools.chain(table.index._columns, table._columns) plc_table = plc.Table( [col.to_pylibcudf(mode="read") for col in columns] ) else: plc_table = plc.Table( [col.to_pylibcudf(mode="read") for col in table._columns] ) self.writer.write(plc_table, partitions_info) def close(self, metadata_file_path=None) -> np.ndarray | None: if not self.initialized: return None column_chunks_file_paths = [] if metadata_file_path is not None: if is_list_like(metadata_file_path): column_chunks_file_paths = list(metadata_file_path) else: column_chunks_file_paths = [metadata_file_path] blob = self.writer.close(column_chunks_file_paths) if metadata_file_path is not None: return np.asarray(blob.obj) return None def __enter__(self) -> Self: return self def __exit__(self, *args) -> None: self.close() def _initialize_chunked_state( self, table, num_partitions: int = 1 ) -> None: """Prepares all the values required to build the chunked_parquet_writer_options and creates a writer """ # Set the table_metadata num_index_cols_meta = 0 plc_table = plc.Table( [col.to_pylibcudf(mode="read") for col in table._columns] ) self.tbl_meta = plc.io.types.TableInputMetadata(plc_table) if self.index is not False: if isinstance(table.index, MultiIndex): plc_table = plc.Table( [ col.to_pylibcudf(mode="read") for col in itertools.chain( table.index._columns, table._columns ) ] ) self.tbl_meta = plc.io.types.TableInputMetadata(plc_table) for level, idx_name in enumerate(table.index.names): self.tbl_meta.column_metadata[level].set_name(idx_name) num_index_cols_meta = len(table.index.names) else: if table.index.name is not None: plc_table = plc.Table( [ col.to_pylibcudf(mode="read") for col in itertools.chain( table.index._columns, table._columns ) ] ) self.tbl_meta = plc.io.types.TableInputMetadata(plc_table) self.tbl_meta.column_metadata[0].set_name(table.index.name) num_index_cols_meta = 1 for i, name in enumerate(table._column_names, num_index_cols_meta): self.tbl_meta.column_metadata[i].set_name(name) _set_col_metadata( table[name]._column, self.tbl_meta.column_metadata[i], ) index = False if isinstance(table.index, RangeIndex) else self.index user_data = [ {"pandas": ioutils.generate_pandas_metadata(table, index)} ] * num_partitions comp_type = _get_comp_type(self.compression) stat_freq = _get_stat_freq(self.statistics) dict_policy = ( plc.io.types.DictionaryPolicy.ADAPTIVE if self.use_dictionary else plc.io.types.DictionaryPolicy.NEVER ) options = ( plc.io.parquet.ChunkedParquetWriterOptions.builder(self.sink) .metadata(self.tbl_meta) .key_value_metadata(user_data) .compression(comp_type) .stats_level(stat_freq) .row_group_size_bytes(self.row_group_size_bytes) .row_group_size_rows(self.row_group_size_rows) .max_page_size_bytes(self.max_page_size_bytes) .max_page_size_rows(self.max_page_size_rows) .max_dictionary_size(self.max_dictionary_size) .write_arrow_schema(self.write_arrow_schema) .build() ) options.set_dictionary_policy(dict_policy) self.writer = plc.io.parquet.ChunkedParquetWriter.from_options(options) self.initialized = True def _parse_bytes(s: str) -> int: """Parse byte string to numbers Utility function vendored from Dask. >>> _parse_bytes('100') 100 >>> _parse_bytes('100 MB') 100000000 >>> _parse_bytes('100M') 100000000 >>> _parse_bytes('5kB') 5000 >>> _parse_bytes('5.4 kB') 5400 >>> _parse_bytes('1kiB') 1024 >>> _parse_bytes('1e6') 1000000 >>> _parse_bytes('1e6 kB') 1000000000 >>> _parse_bytes('MB') 1000000 >>> _parse_bytes(123) 123 >>> _parse_bytes('5 foos') Traceback (most recent call last): ... ValueError: Could not interpret 'foos' as a byte unit """ if isinstance(s, (int, float)): return int(s) s = s.replace(" ", "") if not any(char.isdigit() for char in s): s = "1" + s for i in range(len(s) - 1, -1, -1): if not s[i].isalpha(): break index = i + 1 prefix = s[:index] suffix = s[index:] try: n = float(prefix) except ValueError as e: raise ValueError( "Could not interpret '%s' as a number" % prefix ) from e try: multiplier = BYTE_SIZES[suffix.lower()] except KeyError as e: raise ValueError( "Could not interpret '%s' as a byte unit" % suffix ) from e result = n * multiplier return int(result)
ParquetWriter
python
lepture__authlib
tests/flask/test_oauth2/test_authorization_code_iss_parameter.py
{ "start": 614, "end": 2669 }
class ____(_IssuerParameter): def get_issuer(self) -> str: return "https://auth.test" @pytest.fixture(autouse=True) def server(server): server.register_grant(AuthorizationCodeGrant) return server @pytest.fixture(autouse=True) def client(client, db): client.set_client_metadata( { "redirect_uris": ["https://client.test"], "scope": "profile address", "token_endpoint_auth_method": "client_secret_basic", "response_types": ["code"], "grant_types": ["authorization_code"], } ) db.session.add(client) db.session.commit() return client def test_rfc9207_enabled_success(test_client, server): """Check that when RFC9207 is implemented, the authorization response has an ``iss`` parameter.""" server.register_extension(IssuerParameter()) url = authorize_url + "&state=bar" rv = test_client.post(url, data={"user_id": "1"}) assert "iss=https%3A%2F%2Fauth.test" in rv.location def test_rfc9207_disabled_success_no_iss(test_client): """Check that when RFC9207 is not implemented, the authorization response contains no ``iss`` parameter.""" url = authorize_url + "&state=bar" rv = test_client.post(url, data={"user_id": "1"}) assert "iss=" not in rv.location def test_rfc9207_enabled_error(test_client, server): """Check that when RFC9207 is implemented, the authorization response has an ``iss`` parameter, even when an error is returned.""" server.register_extension(IssuerParameter()) rv = test_client.post(authorize_url) assert "error=access_denied" in rv.location assert "iss=https%3A%2F%2Fauth.test" in rv.location def test_rfc9207_disbled_error_no_iss(test_client): """Check that when RFC9207 is not implemented, the authorization response contains no ``iss`` parameter, even when an error is returned.""" rv = test_client.post(authorize_url) assert "error=access_denied" in rv.location assert "iss=" not in rv.location
IssuerParameter
python
dagster-io__dagster
examples/docs_projects/project_ml/src/project_ml/defs/resources.py
{ "start": 679, "end": 1994 }
class ____(ModelStoreResource): """Local file system model storage.""" models_path: str = "./models" def save_model(self, model_data: dict[str, Any], model_name: str): """Save model data to local filesystem.""" os.makedirs(self.models_path, exist_ok=True) model_path = os.path.join(self.models_path, f"{model_name}.pkl") with open(model_path, "wb") as f: pickle.dump(model_data, f) def load_model(self, model_name: str) -> dict[str, Any]: """Load model data from local filesystem.""" model_path = os.path.join(self.models_path, f"{model_name}.pkl") with open(model_path, "rb") as f: return pickle.load(f) def list_models(self) -> list[str]: """List available models, sorted by modification time (newest first).""" if not os.path.exists(self.models_path): return [] model_files = [f for f in os.listdir(self.models_path) if f.endswith(".pkl")] # Sort by modification time, newest first model_files.sort( key=lambda x: os.path.getmtime(os.path.join(self.models_path, x)), reverse=True, ) # Return just the model names without extension return [os.path.splitext(f)[0] for f in model_files]
LocalModelStoreResource
python
coleifer__peewee
tests/shortcuts.py
{ "start": 1852, "end": 1954 }
class ____(TestModel): id = IntegerField(primary_key=True) basket = ForeignKeyField(Basket)
Item
python
pyparsing__pyparsing
pyparsing/core.py
{ "start": 149348, "end": 150775 }
class ____(PositionToken): r"""Matches if current position is at the logical beginning of a line (after skipping whitespace) within the parse string Example: .. testcode:: test = '''\ AAA this line AAA and this line AAA and even this line B AAA but definitely not this line ''' for t in (LineStart() + 'AAA' + rest_of_line).search_string(test): print(t) prints: .. testoutput:: ['AAA', ' this line'] ['AAA', ' and this line'] ['AAA', ' and even this line'] """ def __init__(self) -> None: super().__init__() self.leave_whitespace() self.orig_whiteChars = set() | self.whiteChars self.whiteChars.discard("\n") self.skipper = Empty().set_whitespace_chars(self.whiteChars) self.set_name("start of line") def preParse(self, instring: str, loc: int) -> int: if loc == 0: return loc ret = self.skipper.preParse(instring, loc) if "\n" in self.orig_whiteChars: while instring[ret : ret + 1] == "\n": ret = self.skipper.preParse(instring, ret + 1) return ret def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: if col(loc, instring) == 1: return loc, [] raise ParseException(instring, loc, self.errmsg, self)
LineStart
python
boto__boto3
boto3/crt.py
{ "start": 3818, "end": 7254 }
class ____: """ This wrapper keeps track of our underlying CRT client, the lock used to acquire it and the region we've used to instantiate the client. Due to limitations in the existing CRT interfaces, we can only make calls in a single region and does not support redirects. We track the region to ensure we don't use the CRT client when a successful request cannot be made. """ def __init__(self, crt_client, process_lock, region, cred_provider): self.crt_client = crt_client self.process_lock = process_lock self.region = region self.cred_provider = cred_provider def is_crt_compatible_request(client, crt_s3_client): """ Boto3 client must use same signing region and credentials as the CRT_S3_CLIENT singleton. Otherwise fallback to classic. """ if crt_s3_client is None: return False boto3_creds = client._get_credentials() if boto3_creds is None: return False is_same_identity = compare_identity( boto3_creds.get_frozen_credentials(), crt_s3_client.cred_provider ) is_same_region = client.meta.region_name == crt_s3_client.region return is_same_region and is_same_identity def compare_identity(boto3_creds, crt_s3_creds): try: crt_creds = crt_s3_creds() except botocore.exceptions.NoCredentialsError: return False is_matching_identity = ( boto3_creds.access_key == crt_creds.access_key_id and boto3_creds.secret_key == crt_creds.secret_access_key and boto3_creds.token == crt_creds.session_token ) return is_matching_identity def _validate_crt_transfer_config(config): if config is None: return # CRT client can also be configured via `AUTO_RESOLVE_TRANSFER_CLIENT` # but it predates this validation. We only validate against CRT client # configured via `CRT_TRANSFER_CLIENT` to preserve compatibility. if config.preferred_transfer_client != CRT_TRANSFER_CLIENT: return invalid_crt_args = [] for param in config.DEFAULTS.keys(): val = config.get_deep_attr(param) if ( param not in _ALLOWED_CRT_TRANSFER_CONFIG_OPTIONS and val is not config.UNSET_DEFAULT ): invalid_crt_args.append(param) if len(invalid_crt_args) > 0: raise InvalidCrtTransferConfigError( "The following transfer config options are invalid " "when preferred_transfer_client is set to crt: " f"{', '.join(invalid_crt_args)}`" ) def create_crt_transfer_manager(client, config): """Create a CRTTransferManager for optimized data transfer.""" crt_s3_client = get_crt_s3_client(client, config) if is_crt_compatible_request(client, crt_s3_client): crt_transfer_manager_kwargs = { 'crt_s3_client': crt_s3_client.crt_client, 'crt_request_serializer': BOTOCORE_CRT_SERIALIZER, } if TRANSFER_CONFIG_SUPPORTS_CRT: _validate_crt_transfer_config(config) crt_transfer_manager_kwargs['config'] = config if not TRANSFER_CONFIG_SUPPORTS_CRT and config: logger.warning( 'Using TransferConfig with CRT client requires ' 's3transfer >= 0.16.0, configured values will be ignored.' ) return CRTTransferManager(**crt_transfer_manager_kwargs) return None
CRTS3Client
python
joke2k__faker
faker/providers/color/uz_UZ/__init__.py
{ "start": 98, "end": 1940 }
class ____(ColorProvider): """Implement color provider for ``uz_UZ`` locale.""" # Source: https://uz.wiktionary.org/wiki/Vikilug%E2%80%98at:Ranglar all_colors = OrderedDict( ( ("Akvamarin", "#7FFFD4"), ("Anor", "#800000"), ("Apelsin", "#FFA000"), ("Bej", "#F5F5DC"), ("Binafsha", "#8B00FF"), ("Bodom", "#FFEBCD"), ("Bordo rang", "#800000"), ("Doimiy sariq", "#FFBF00"), ("Hantal", "#120A8F"), ("Havo rang", "#000080"), ("Indigo", "#4B0082"), ("Jigar rang", "#964B00"), ("Kul", "#808080"), ("Kumush", "#C0C0C0"), ("Koʻk", "#0000FF"), ("Kremi", "#FFFDD0"), ("Magenta", "#FF00FF"), ("Malina", "#DC143C"), ("Marjon", "#FF7F50"), ("Moshrang", "#C3B091"), ("Oq", "#FFFFFF"), ("Oxra", "#CC7722"), ("Oltin", "#FFD700"), ("Pushti", "#FFC0CB"), ("Qizil", "#FF0000"), ("Qizgʻish binafsharang", "#E0B0FF"), ("Qora", "#000000"), ("Qizil-sariq", "#FF8C69"), ("Samoviy", "#87CEFF"), ("Sariq", "#FFFF00"), ("Siyohrang", "#660099"), ("Sepya", "#705714"), ("Siena", "#FF8247"), ("Suv", "#00FFFF"), ("Terrakota", "#E2725B"), ("Turkuaz", "#30D5C8"), ("Ultramarin", "#120A8F"), ("Yashil", "#00FF00"), ("Zumrad", "#50C878"), ) ) safe_colors = ( "Oq", "Qora", "Yashil", "Ko'k", "Qizil", "Sariq", "Pushti", "Olov", "Qaymoq", "Laym", "Kumush", "Kulrang", )
Provider
python
pytorch__pytorch
test/test_dataloader.py
{ "start": 112179, "end": 112639 }
class ____(TestCase): def setUp(self): super().setUp() self.dataset = StringDataset() @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") def test_shuffle_pin_memory(self): loader = DataLoader( self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True ) for s, n in loader: self.assertIsInstance(s[0], str) self.assertTrue(n.is_pinned())
TestStringDataLoader
python
mlflow__mlflow
mlflow/genai/utils/enum_utils.py
{ "start": 34, "end": 303 }
class ____(EnumMeta): """Metaclass for Enum classes that allows to check if a value is a valid member of the Enum.""" def __contains__(cls, item): try: cls(item) except ValueError: return False return True
MetaEnum
python
google__jax
tests/state_test.py
{ "start": 38464, "end": 41309 }
class ____(NamedTuple): index_param: IndexParam ref_bdim: int | None non_slice_idx_bdims: tuple[int | None, ...] slice_bdim: int bat_ref_aval: shaped_array_ref bat_ref_shape: Shape bat_non_slice_idx_avals: tuple[core.ShapedArray, ...] bat_non_slice_idx_shapes: tuple[Shape, ...] bat_slice_aval: core.ShapedArray bat_slice_shape: Shape def maybe_tuple_insert(t: tuple[Any, ...], idx: int | None, val: Any) -> tuple[Any, ...]: if idx is None: return t return tuple_insert(t, idx, val) @hps.composite def vmappable_index_params(draw, *, op_type: str): axis_size = draw(hps.integers(min_value=1, max_value=7), label='axis_size') index_param: IndexParam = draw(index_params()) non_slice_idx_bdims = tuple( draw(hps.one_of( hps.none(), hps.integers(min_value=0, max_value=len(index_param.idx_shape)))) for b in index_param.indexed_dims if b) bat_non_slice_idx_shapes = tuple( maybe_tuple_insert(index_param.idx_shape, idx_bdim, axis_size) for idx_bdim in non_slice_idx_bdims) if op_type == "swap": # In a swap, the ref *must* be batched ref_bdim = draw(hps.integers(min_value=0, max_value=len(index_param.ref_shape))) if any(idx_bdim is not None for idx_bdim in non_slice_idx_bdims): # If it's a swap, if indices are batched, val must be batched. slice_bdim = draw(hps.integers( min_value=0, max_value=len(index_param.slice_shape))) else: slice_bdim = draw(hps.one_of(hps.none(), hps.integers( min_value=0, max_value=len(index_param.slice_shape)))) elif op_type == "get": # In a get, the indices must be batched or ref is batched if all(idx_bdim is None for idx_bdim in non_slice_idx_bdims): ref_bdim = draw(hps.integers(min_value=0, max_value=len(index_param.ref_shape))) else: ref_bdim = draw(hps.one_of(hps.none(), hps.integers(min_value=0, max_value=len(index_param.ref_shape)))) slice_bdim = draw(hps.integers( min_value=0, max_value=len(index_param.slice_shape))) bat_ref_shape = maybe_tuple_insert(index_param.ref_shape, ref_bdim, axis_size) bat_ref_aval = shaped_array_ref(bat_ref_shape, np.float32) bat_non_slice_idx_avals = tuple( core.ShapedArray(shape, np.int32) for shape in bat_non_slice_idx_shapes) bat_slice_shape = maybe_tuple_insert(index_param.slice_shape, slice_bdim, axis_size) bat_slice_aval = core.ShapedArray(bat_slice_shape, np.float32) return VmappableIndexParam(index_param, ref_bdim, non_slice_idx_bdims, slice_bdim, bat_ref_aval, bat_ref_shape, bat_non_slice_idx_avals, bat_non_slice_idx_shapes, bat_slice_aval, bat_slice_shape)
VmappableIndexParam
python
sympy__sympy
sympy/integrals/transforms.py
{ "start": 47783, "end": 49441 }
class ____(HankelTypeTransform): """ Class representing unevaluated Hankel transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute Hankel transforms, see the :func:`hankel_transform` docstring. """ _name = 'Hankel' def hankel_transform(f, r, k, nu, **hints): r""" Compute the Hankel transform of `f`, defined as .. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r. Explanation =========== If the transform cannot be computed in closed form, this function returns an unevaluated :class:`HankelTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. Examples ======== >>> from sympy import hankel_transform, inverse_hankel_transform >>> from sympy import exp >>> from sympy.abc import r, k, m, nu, a >>> ht = hankel_transform(1/r**m, r, k, nu) >>> ht 2*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/(2**m*gamma(m/2 + nu/2)) >>> inverse_hankel_transform(ht, k, r, nu) r**(-m) >>> ht = hankel_transform(exp(-a*r), r, k, 0) >>> ht a/(k**3*(a**2/k**2 + 1)**(3/2)) >>> inverse_hankel_transform(ht, k, r, 0) exp(-a*r) See Also ======== fourier_transform, inverse_fourier_transform sine_transform, inverse_sine_transform cosine_transform, inverse_cosine_transform inverse_hankel_transform mellin_transform, laplace_transform """ return HankelTransform(f, r, k, nu).doit(**hints)
HankelTransform
python
ijl__orjson
test/test_indent.py
{ "start": 153, "end": 3578 }
class ____: def test_equivalent(self): """ OPT_INDENT_2 is equivalent to indent=2 """ obj = {"a": "b", "c": {"d": True}, "e": [1, 2]} assert orjson.dumps(obj, option=orjson.OPT_INDENT_2) == json.dumps( obj, indent=2, ).encode("utf-8") def test_sort(self): obj = {"b": 1, "a": 2} assert ( orjson.dumps(obj, option=orjson.OPT_INDENT_2 | orjson.OPT_SORT_KEYS) == b'{\n "a": 2,\n "b": 1\n}' ) def test_non_str(self): obj = {1: 1, "a": 2} assert ( orjson.dumps(obj, option=orjson.OPT_INDENT_2 | orjson.OPT_NON_STR_KEYS) == b'{\n "1": 1,\n "a": 2\n}' ) def test_options(self): obj = { 1: 1, "b": True, "a": datetime.datetime(1970, 1, 1), } assert ( orjson.dumps( obj, option=orjson.OPT_INDENT_2 | orjson.OPT_SORT_KEYS | orjson.OPT_NON_STR_KEYS | orjson.OPT_NAIVE_UTC, ) == b'{\n "1": 1,\n "a": "1970-01-01T00:00:00+00:00",\n "b": true\n}' ) def test_empty(self): obj = [{}, [[[]]], {"key": []}] ref = b'[\n {},\n [\n [\n []\n ]\n ],\n {\n "key": []\n }\n]' assert orjson.dumps(obj, option=orjson.OPT_INDENT_2) == ref def test_list_max(self): fixture = b"".join( (b"".join(b"[" for _ in range(255)), b"".join(b"]" for _ in range(255))), ) obj = orjson.loads(fixture) serialized = orjson.dumps( obj, option=orjson.OPT_INDENT_2, ) assert orjson.loads(serialized) == obj def test_dict_max(self): fixture = {"key": None} target = fixture for _ in range(253): target["key"] = {"key": None} # type:ignore target = target["key"] # type: ignore serialized = orjson.dumps( fixture, option=orjson.OPT_INDENT_2, ) assert orjson.loads(serialized) == fixture def test_twitter_pretty(self): """ twitter.json pretty """ obj = read_fixture_obj("twitter.json.xz") assert orjson.dumps(obj, option=orjson.OPT_INDENT_2) == json.dumps( obj, indent=2, ensure_ascii=False, ).encode("utf-8") def test_github_pretty(self): """ github.json pretty """ obj = read_fixture_obj("github.json.xz") assert orjson.dumps(obj, option=orjson.OPT_INDENT_2) == json.dumps( obj, indent=2, ensure_ascii=False, ).encode("utf-8") def test_canada_pretty(self): """ canada.json pretty """ obj = read_fixture_obj("canada.json.xz") assert orjson.dumps(obj, option=orjson.OPT_INDENT_2) == json.dumps( obj, indent=2, ensure_ascii=False, ).encode("utf-8") def test_citm_catalog_pretty(self): """ citm_catalog.json pretty """ obj = read_fixture_obj("citm_catalog.json.xz") assert orjson.dumps(obj, option=orjson.OPT_INDENT_2) == json.dumps( obj, indent=2, ensure_ascii=False, ).encode("utf-8")
TestIndentedOutput
python
lazyprogrammer__machine_learning_examples
cnn_class2/tf_resnet_convblock.py
{ "start": 482, "end": 1285 }
class ____: def __init__(self, d, mi, mo, stride=2, padding='VALID'): self.W = tf.Variable(init_filter(d, mi, mo, stride)) self.b = tf.Variable(np.zeros(mo, dtype=np.float32)) self.stride = stride self.padding = padding def forward(self, X): X = tf.nn.conv2d( X, self.W, strides=[1, self.stride, self.stride, 1], padding=self.padding ) X = X + self.b return X def copyFromKerasLayers(self, layer): # only 1 layer to copy from W, b = layer.get_weights() op1 = self.W.assign(W) op2 = self.b.assign(b) self.session.run((op1, op2)) # def copyFromWeights(self, W, b): # op1 = self.W.assign(W) # op2 = self.b.assign(b) # self.session.run((op1, op2)) def get_params(self): return [self.W, self.b]
ConvLayer
python
doocs__leetcode
solution/2200-2299/2287.Rearrange Characters to Make Target String/Solution.py
{ "start": 0, "end": 194 }
class ____: def rearrangeCharacters(self, s: str, target: str) -> int: cnt1 = Counter(s) cnt2 = Counter(target) return min(cnt1[c] // v for c, v in cnt2.items())
Solution
python
encode__starlette
starlette/datastructures.py
{ "start": 10162, "end": 12024 }
class ____(ImmutableMultiDict[Any, Any]): def __setitem__(self, key: Any, value: Any) -> None: self.setlist(key, [value]) def __delitem__(self, key: Any) -> None: self._list = [(k, v) for k, v in self._list if k != key] del self._dict[key] def pop(self, key: Any, default: Any = None) -> Any: self._list = [(k, v) for k, v in self._list if k != key] return self._dict.pop(key, default) def popitem(self) -> tuple[Any, Any]: key, value = self._dict.popitem() self._list = [(k, v) for k, v in self._list if k != key] return key, value def poplist(self, key: Any) -> list[Any]: values = [v for k, v in self._list if k == key] self.pop(key) return values def clear(self) -> None: self._dict.clear() self._list.clear() def setdefault(self, key: Any, default: Any = None) -> Any: if key not in self: self._dict[key] = default self._list.append((key, default)) return self[key] def setlist(self, key: Any, values: list[Any]) -> None: if not values: self.pop(key, None) else: existing_items = [(k, v) for (k, v) in self._list if k != key] self._list = existing_items + [(key, value) for value in values] self._dict[key] = values[-1] def append(self, key: Any, value: Any) -> None: self._list.append((key, value)) self._dict[key] = value def update( self, *args: MultiDict | Mapping[Any, Any] | list[tuple[Any, Any]], **kwargs: Any, ) -> None: value = MultiDict(*args, **kwargs) existing_items = [(k, v) for (k, v) in self._list if k not in value.keys()] self._list = existing_items + value.multi_items() self._dict.update(value)
MultiDict
python
encode__django-rest-framework
tests/test_throttling.py
{ "start": 12626, "end": 12972 }
class ____(XffTestingBase): def test_accepts_request_under_limit(self): self.config_proxy(0) assert self.view(self.request).status_code == 200 def test_denies_request_over_limit(self): self.config_proxy(0) self.view(self.request) assert self.view(self.request).status_code == 429
IdWithXffBasicTests
python
jmcnamara__XlsxWriter
xlsxwriter/test/worksheet/test_encode_password.py
{ "start": 301, "end": 1972 }
class ____(unittest.TestCase): """ Test the Worksheet _encode_password() methods. """ def setUp(self): self.fh = StringIO() self.worksheet = Worksheet() self.worksheet._set_filehandle(self.fh) def test__encode_password(self): """Test the _encode_password() function""" tests = [ ("password", "83AF"), ("This is a longer phrase", "D14E"), ("0", "CE2A"), ("01", "CEED"), ("012", "CF7C"), ("0123", "CC4B"), ("01234", "CACA"), ("012345", "C789"), ("0123456", "DC88"), ("01234567", "EB87"), ("012345678", "9B86"), ("0123456789", "FF84"), ("01234567890", "FF86"), ("012345678901", "EF87"), ("0123456789012", "AF8A"), ("01234567890123", "EF90"), ("012345678901234", "EFA5"), ("0123456789012345", "EFD0"), ("01234567890123456", "EF09"), ("012345678901234567", "EEB2"), ("0123456789012345678", "ED33"), ("01234567890123456789", "EA14"), ("012345678901234567890", "E615"), ("0123456789012345678901", "FE96"), ("01234567890123456789012", "CC97"), ("012345678901234567890123", "AA98"), ("0123456789012345678901234", "FA98"), ("01234567890123456789012345", "D298"), ("0123456789012345678901234567890", "D2D3"), ] for password, exp in tests: got = self.worksheet._encode_password(password) self.assertEqual(exp, got)
TestEncodePassword
python
huggingface__transformers
tests/utils/test_hf_argparser.py
{ "start": 1890, "end": 2019 }
class ____: foo: BasicEnum = "toto" def __post_init__(self): self.foo = BasicEnum(self.foo) @dataclass
EnumExample
python
tensorflow__tensorflow
tensorflow/python/distribute/input_lib.py
{ "start": 54358, "end": 56861 }
class ____(object): """Iterator for a single `tf.data.Dataset`.""" def __init__(self, dataset, worker, devices, options=None): """Create iterator for the `dataset` to fetch data to worker's `devices` . A `MultiDeviceIterator` or `OwnedMultiDeviceIterator` is used to prefetch input to the devices on the given worker. Args: dataset: A `tf.data.Dataset` instance. worker: Worker on which ops should be created. devices: Distribute data from `dataset` to these devices. options: options. """ self._dataset = dataset self._worker = worker self._devices = devices self._element_spec = dataset.element_spec self._options = options self._make_iterator() def _make_iterator(self): raise NotImplementedError("must be implemented in descendants") def _format_data_list_with_options(self, data_list): """Change the data in to a list type if required. The OwnedMultiDeviceIterator returns the list data type, while the PER_REPLICA iterator (when used with prefetch disabled) returns without the enclosed list. This is to fix the inconsistency. Args: data_list: data_list Returns: list """ if (self._options and self._options.experimental_replication_mode == InputReplicationMode.PER_REPLICA and not self._options.experimental_fetch_to_device): return [data_list] else: return data_list def get_next(self, device, name=None): """Get next element for the given device.""" del name with ops.device(self._worker): if _should_use_multi_device_iterator(self._options): return self._iterator.get_next(device) else: return self._iterator.get_next() def get_next_as_list(self, name=None): """Get next element from the underlying iterator. Runs the iterator get_next() within a device scope. Since this doesn't use get_next_as_optional(), it is considerably faster than get_next_as_list(), but it raises EOFError if any of the device doesn't get any data. Args: name: not used. Returns: A list consisting of the next data from each device. """ del name with ops.device(self._worker): return self._format_data_list_with_options(self._iterator.get_next()) def get_next_as_optional_list(self): with ops.device(self._worker): return self._format_data_list_with_options( self._iterator.get_next_as_optional())
_SingleWorkerDatasetIteratorBase
python
astropy__astropy
astropy/visualization/wcsaxes/tests/test_wcsapi.py
{ "start": 20247, "end": 24618 }
class ____(BaseLowLevelWCS): pixel_dim = 2 @property def pixel_n_dim(self): return self.pixel_dim @property def world_n_dim(self): return 5 @property def world_axis_physical_types(self): return [ "em.freq", "time", "pos.eq.ra", "pos.eq.dec", "phys.polarization.stokes", ] @property def world_axis_units(self): return ["Hz", "day", "deg", "deg", ""] @property def world_axis_names(self): return ["Frequency", "", "RA", "DEC", ""] def pixel_to_world_values(self, *pixel_arrays): pixel_arrays = (list(pixel_arrays) * 3)[:-1] # make list have 5 elements return [ np.asarray(pix) * scale for pix, scale in zip(pixel_arrays, [10, 0.2, 0.4, 0.39, 2]) ] def world_to_pixel_values(self, *world_arrays): world_arrays = world_arrays[:2] # make list have 2 elements return [ np.asarray(world) / scale for world, scale in zip(world_arrays, [10, 0.2]) ] @property def world_axis_object_components(self): return [ ("freq", 0, "value"), ("time", 0, "mjd"), ("celestial", 0, "spherical.lon.degree"), ("celestial", 1, "spherical.lat.degree"), ("stokes", 0, "value"), ] @property def world_axis_object_classes(self): return { "celestial": (SkyCoord, (), {"unit": "deg"}), "time": (Time, (), {"format": "mjd"}), "freq": (Quantity, (), {"unit": "Hz"}), "stokes": (Quantity, (), {"unit": "one"}), } def test_edge_axes(): # Check that axes on the edge of a spherical projection are shown properley # (see https://github.com/astropy/astropy/issues/10441) shape = [180, 360] data = np.random.rand(*shape) header = { "wcsaxes": 2, "crpix1": 180.5, "crpix2": 90.5, "cdelt1": 1.0, "cdelt2": 1.0, "cunit1": "deg", "cunit2": "deg", "ctype1": "CRLN-CAR", "ctype2": "CRLT-CAR", "crval1": 0.0, "crval2": 0.0, "lonpole": 0.0, "latpole": 90.0, } wcs = WCS(header) fig = Figure() canvas = FigureCanvasAgg(fig) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=wcs) ax.imshow(data, origin="lower") # By default the x- and y- axes should be drawn lon = ax.coords[0] lat = ax.coords[1] canvas.draw() np.testing.assert_equal( lon._ticks.world["b"], np.array([90.0, 180.0, 180.0, 270.0, 0.0]) ) np.testing.assert_equal( lat._ticks.world["l"], np.array([-90.0, -60.0, -30.0, 0.0, 30.0, 60.0, 90.0]) ) def test_coord_meta_wcsapi(): wcs = LowLevelWCS5D() wcs.pixel_dim = 5 _, coord_meta = transform_coord_meta_from_wcs( wcs, RectangularFrame, slices=[0, 0, "x", "y", 0] ) assert coord_meta["name"] == [ ("em.freq", "Frequency"), "time", ("pos.eq.ra", "RA"), ("pos.eq.dec", "DEC"), "phys.polarization.stokes", ] assert coord_meta["type"] == ["scalar", "scalar", "longitude", "latitude", "scalar"] assert coord_meta["wrap"] == [None, None, None, None, None] assert coord_meta["unit"] == [ u.Unit("Hz"), u.Unit("d"), u.Unit("deg"), u.Unit("deg"), u.one, ] assert coord_meta["visible"] == [True, True, True, True, True] assert coord_meta["format_unit"] == [ u.Unit("Hz"), u.Unit("d"), u.Unit("hourangle"), u.Unit("deg"), u.one, ] assert coord_meta["default_axislabel_position"] == ["#", "#", "#", "#", "#"] assert coord_meta["default_ticklabel_position"] == ["#", "#", "#", "#", "#"] assert coord_meta["default_ticks_position"] == ["#", "#", "#", "#", "#"] assert coord_meta["default_axis_label"] == [ "Frequency", "time", "RA", "DEC", "phys.polarization.stokes", ] @figure_test def test_wcsapi_5d_with_names(): # Test for plotting image and also setting values of ticks fig = Figure(figsize=(6, 6)) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=LowLevelWCS5D()) ax.set_xlim(-0.5, 148.5) ax.set_ylim(-0.5, 148.5) return fig
LowLevelWCS5D
python
ansible__ansible
lib/ansible/playbook/role/definition.py
{ "start": 1545, "end": 9400 }
class ____(Base, Conditional, Taggable, CollectionSearch): role = NonInheritableFieldAttribute(isa='string') def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None, collection_list=None): super(RoleDefinition, self).__init__() self._play = play self._variable_manager = variable_manager self._loader = loader self._role_path = None self._role_collection = None self._role_basedir = role_basedir self._role_params = dict() self._collection_list = collection_list # def __repr__(self): # return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>') @staticmethod def load(data, variable_manager=None, loader=None): raise AnsibleError("not implemented") def preprocess_data(self, ds): # role names that are simply numbers can be parsed by PyYAML # as integers even when quoted, so turn it into a string type if isinstance(ds, int): ds = "%s" % ds if not isinstance(ds, dict) and not isinstance(ds, str): raise AnsibleAssertionError() if isinstance(ds, dict): ds = super(RoleDefinition, self).preprocess_data(ds) # save the original ds for use later self._ds = ds # the new, cleaned datastructure, which will have legacy items reduced to a standard structure suitable for the # attributes of the task class; copy any tagged data to preserve things like origin new_ds = AnsibleTagHelper.tag_copy(ds, {}) # first we pull the role name out of the data structure, # and then use that to determine the role path (which may # result in a new role name, if it was a file path) role_name = self._load_role_name(ds) (role_name, role_path) = self._load_role_path(role_name) # next, we split the role params out from the valid role # attributes and update the new datastructure with that # result and the role name if isinstance(ds, dict): (new_role_def, role_params) = self._split_role_params(ds) new_ds |= new_role_def self._role_params = role_params # set the role name in the new ds new_ds['role'] = role_name # we store the role path internally self._role_path = role_path # and return the cleaned-up data structure return new_ds def _load_role_name(self, ds): """ Returns the role name (either the role: or name: field) from the role definition, or (when the role definition is a simple string), just that string """ if isinstance(ds, str): return ds role_name = ds.get('role', ds.get('name')) if not role_name or not isinstance(role_name, str): raise AnsibleError('role definitions must contain a role name', obj=ds) # if we have the required datastructures, and if the role_name # contains a variable, try and template it now if self._variable_manager: all_vars = self._variable_manager.get_vars(play=self._play) templar = TemplateEngine(loader=self._loader, variables=all_vars) role_name = templar.template(role_name) return role_name def _load_role_path(self, role_name): """ the 'role', as specified in the ds (or as a bare string), can either be a simple name or a full path. If it is a full path, we use the basename as the role name, otherwise we take the name as-given and append it to the default role path """ # create a templar class to template the dependency names, in # case they contain variables if self._variable_manager is not None: all_vars = self._variable_manager.get_vars(play=self._play) else: all_vars = dict() templar = TemplateEngine(loader=self._loader, variables=all_vars) role_name = templar.template(role_name) role_tuple = None # try to load as a collection-based role first if self._collection_list or AnsibleCollectionRef.is_valid_fqcr(role_name): role_tuple = _get_collection_role_path(role_name, self._collection_list) if role_tuple: # we found it, stash collection data and return the name/path tuple self._role_collection = role_tuple[2] return role_tuple[0:2] # We didn't find a collection role, look in defined role paths # FUTURE: refactor this to be callable from internal so we can properly order # ansible.legacy searches with the collections keyword # we always start the search for roles in the base directory of the playbook role_search_paths = [ os.path.join(self._loader.get_basedir(), u'roles'), ] # also search in the configured roles path if C.DEFAULT_ROLES_PATH: role_search_paths.extend(C.DEFAULT_ROLES_PATH) # next, append the roles basedir, if it was set, so we can # search relative to that directory for dependent roles if self._role_basedir: role_search_paths.append(self._role_basedir) # finally as a last resort we look in the current basedir as set # in the loader (which should be the playbook dir itself) but without # the roles/ dir appended role_search_paths.append(self._loader.get_basedir()) # now iterate through the possible paths and return the first one we find for path in role_search_paths: path = templar.template(path) role_path = unfrackpath(os.path.join(path, role_name)) if self._loader.path_exists(role_path): return (role_name, role_path) # if not found elsewhere try to extract path from name role_path = unfrackpath(role_name) if self._loader.path_exists(role_path): role_name = os.path.basename(role_name) return (role_name, role_path) searches = (self._collection_list or []) + role_search_paths raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(searches)), obj=self._ds) def _split_role_params(self, ds): """ Splits any random role params off from the role spec and store them in a dictionary of params for parsing later """ role_def = dict() role_params = dict() base_attribute_names = frozenset(self.fattributes) for (key, value) in ds.items(): # use the list of FieldAttribute values to determine what is and is not # an extra parameter for this role (or sub-class of this role) # FIXME: hard-coded list of exception key names here corresponds to the # connection fields in the Base class. There may need to be some # other mechanism where we exclude certain kinds of field attributes, # or make this list more automatic in some way so we don't have to # remember to update it manually. if key not in base_attribute_names: # this key does not match a field attribute, so it must be a role param role_params[key] = value else: # this is a field attribute, so copy it over directly role_def[key] = value return (role_def, role_params) def get_role_params(self): return self._role_params.copy() def get_role_path(self): return self._role_path def get_name(self, include_role_fqcn=True): if include_role_fqcn: return '.'.join(x for x in (self._role_collection, self.role) if x) return self.role
RoleDefinition
python
lxml__lxml
src/lxml/tests/test_classlookup.py
{ "start": 334, "end": 3000 }
class ____(HelperTestCase): """Basic tests for element proxy behaviour. """ etree = etree def test_proxy_reuse(self): root = etree.XML('<a><b><c/></b></a>') b = root.find('b') self.assertTrue(b is root[0]) def test_proxy_reuse_after_gc(self): root = etree.XML('<a><b><c/></b></a>') b = root.find('b') self.assertTrue(self.etree.iselement(b)) gc.collect() self.assertTrue(b is root[0]) def test_proxy_reuse_after_del_root(self): root = etree.XML('<a><b><c/></b></a>') b = root.find('b') self.assertTrue(self.etree.iselement(b)) c = b.find('c') self.assertTrue(self.etree.iselement(c)) del root gc.collect() self.assertTrue(b[0] is c) def test_proxy_hashing(self): root = etree.XML('<a><b><c/></b></a>') old_elements = set(root.iter()) elements = root.iter() del root gc.collect() missing = len(old_elements) self.assertEqual(3, missing) for new in elements: for old in old_elements: if old == new: self.assertTrue(old is new) missing -= 1 break else: self.assertTrue(False, "element '%s' is missing" % new.tag) self.assertEqual(0, missing) def test_element_base(self): el = self.etree.ElementBase() self.assertEqual('ElementBase', el.tag) root = self.etree.ElementBase() root.append(el) self.assertEqual('ElementBase', root[0].tag) def test_element_base_children(self): el = self.etree.ElementBase(etree.ElementBase()) self.assertEqual('ElementBase', el.tag) self.assertEqual(1, len(el)) self.assertEqual('ElementBase', el[0].tag) root = self.etree.ElementBase() root.append(el) self.assertEqual('ElementBase', root[0].tag) self.assertEqual('ElementBase', root[0][0].tag) def test_comment_base(self): el = self.etree.CommentBase('some text') self.assertEqual(self.etree.Comment, el.tag) self.assertEqual('some text', el.text) root = self.etree.Element('root') root.append(el) self.assertEqual('some text', root[0].text) def test_pi_base(self): el = self.etree.PIBase('the target', 'some text') self.assertEqual(self.etree.ProcessingInstruction, el.tag) self.assertEqual('some text', el.text) root = self.etree.Element('root') root.append(el) self.assertEqual('some text', root[0].text)
ProxyTestCase
python
django__django
tests/admin_changelist/models.py
{ "start": 301, "end": 521 }
class ____(models.Model): parent = models.ForeignKey(Parent, models.SET_NULL, editable=False, null=True) name = models.CharField(max_length=30, blank=True) age = models.IntegerField(null=True, blank=True)
Child
python
tornadoweb__tornado
tornado/template.py
{ "start": 25557, "end": 26250 }
class ____(Exception): """Raised for template syntax errors. ``ParseError`` instances have ``filename`` and ``lineno`` attributes indicating the position of the error. .. versionchanged:: 4.3 Added ``filename`` and ``lineno`` attributes. """ def __init__( self, message: str, filename: Optional[str] = None, lineno: int = 0 ) -> None: self.message = message # The names "filename" and "lineno" are chosen for consistency # with python SyntaxError. self.filename = filename self.lineno = lineno def __str__(self) -> str: return "%s at %s:%d" % (self.message, self.filename, self.lineno)
ParseError
python
sympy__sympy
sympy/solvers/ode/single.py
{ "start": 37868, "end": 41325 }
class ____(SinglePatternODESolver): r""" Solves 2nd order Liouville differential equations. The general form of a Liouville ODE is .. math:: \frac{d^2 y}{dx^2} + g(y) \left(\! \frac{dy}{dx}\!\right)^2 + h(x) \frac{dy}{dx}\text{.} The general solution is: >>> from sympy import Function, dsolve, Eq, pprint, diff >>> from sympy.abc import x >>> f, g, h = map(Function, ['f', 'g', 'h']) >>> genform = Eq(diff(f(x),x,x) + g(f(x))*diff(f(x),x)**2 + ... h(x)*diff(f(x),x), 0) >>> pprint(genform) 2 2 /d \ d d g(f(x))*|--(f(x))| + h(x)*--(f(x)) + ---(f(x)) = 0 \dx / dx 2 dx >>> pprint(dsolve(genform, f(x), hint='Liouville_Integral')) f(x) / / | | | / | / | | | | | - | h(x) dx | | g(y) dy | | | | | / | / C1 + C2* | e dx + | e dy = 0 | | / / Examples ======== >>> from sympy import Function, dsolve, Eq, pprint >>> from sympy.abc import x >>> f = Function('f') >>> pprint(dsolve(diff(f(x), x, x) + diff(f(x), x)**2/f(x) + ... diff(f(x), x)/x, f(x), hint='Liouville')) ________________ ________________ [f(x) = -\/ C1 + C2*log(x) , f(x) = \/ C1 + C2*log(x) ] References ========== - Goldstein and Braun, "Advanced Methods for the Solution of Differential Equations", pp. 98 - https://www.maplesoft.com/support/help/Maple/view.aspx?path=odeadvisor/Liouville # indirect doctest """ hint = "Liouville" has_integral = True order = [2] def _wilds(self, f, x, order): d = Wild('d', exclude=[f(x).diff(x), f(x).diff(x, 2)]) e = Wild('e', exclude=[f(x).diff(x)]) k = Wild('k', exclude=[f(x).diff(x)]) return d, e, k def _equation(self, fx, x, order): # Liouville ODE in the form # f(x).diff(x, 2) + g(f(x))*(f(x).diff(x))**2 + h(x)*f(x).diff(x) # See Goldstein and Braun, "Advanced Methods for the Solution of # Differential Equations", pg. 98 d, e, k = self.wilds() return d*fx.diff(x, 2) + e*fx.diff(x)**2 + k*fx.diff(x) def _verify(self, fx): d, e, k = self.wilds_match() self.y = Dummy('y') x = self.ode_problem.sym self.g = simplify(e/d).subs(fx, self.y) self.h = simplify(k/d).subs(fx, self.y) if self.y in self.h.free_symbols or x in self.g.free_symbols: return False return True def _get_general_solution(self, *, simplify_flag: bool = True): d, e, k = self.wilds_match() fx = self.ode_problem.func x = self.ode_problem.sym C1, C2 = self.ode_problem.get_numbered_constants(num=2) int = Integral(exp(Integral(self.g, self.y)), (self.y, None, fx)) gen_sol = Eq(int + C1*Integral(exp(-Integral(self.h, x)), x) + C2, 0) return [gen_sol]
Liouville
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py
{ "start": 4836, "end": 5036 }
class ____(ParentD): def f(self): def x(): __class__ = 1 super # Python injects __class__ into scope builtins.super(ChildD10, self).f() # Must be ignored
ChildD10
python
donnemartin__interactive-coding-challenges
sorting_searching/rotated_array_search/test_search_sorted_array.py
{ "start": 18, "end": 745 }
class ____(unittest.TestCase): def test_search_sorted_array(self): array = Array() self.assertRaises(TypeError, array.search_sorted_array, None) self.assertEqual(array.search_sorted_array([3, 1, 2], 0), None) self.assertEqual(array.search_sorted_array([3, 1, 2], 0), None) data = [10, 12, 14, 1, 3, 5, 6, 7, 8, 9] self.assertEqual(array.search_sorted_array(data, val=1), 3) data = [ 1, 1, 2, 1, 1, 1, 1, 1, 1, 1] self.assertEqual(array.search_sorted_array(data, val=2), 2) print('Success: test_search_sorted_array') def main(): test = TestArray() test.test_search_sorted_array() if __name__ == '__main__': main()
TestArray
python
tensorflow__tensorflow
tensorflow/python/framework/extension_type_field_test.py
{ "start": 1489, "end": 5637 }
class ____(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ # Without default values: ('x', int), ('f', float), ('t', tensor.Tensor), # With default values: ('x', int, 33), ('y', float, 33.8), ('t', tensor.Tensor, [[1, 2], [3, 4]]), ('t', tensor.Tensor, lambda: constant_op.constant([[1, 2], [3, 4]])), ('r', ragged_tensor.RaggedTensor, lambda: ragged_factory_ops.constant([[1, 2], [3]])), ('seq', typing.Tuple[typing.Union[int, float], ...], (33, 12.8, 9, 0)), ('seq', typing.Tuple[typing.Union[int, float], ...], [33, 12.8, 9, 0], (33, 12.8, 9, 0)), ('seq', _TUPLE[typing.Union[int, float], ...], (33, 12.8, 9, 0)), ('seq', _TUPLE[typing.Union[int, float], ...], (33, 12.8, 9, 0)), ('s', tensor_shape.TensorShape, [1, 2], tensor_shape.TensorShape([1, 2])), ('dtype', dtypes.DType, np.int32, dtypes.int32), ]) def testConstruction( self, name, value_type, default=extension_type_field.ExtensionTypeField.NO_DEFAULT, converted_default=None): if callable(default): default = default() # deferred construction (contains tensor) field = extension_type_field.ExtensionTypeField(name, value_type, default) if converted_default is not None: default = converted_default self.assertEqual(field.name, name) self.assertEqual(field.value_type, value_type) if isinstance(default, (tensor.Tensor, ragged_tensor.RaggedTensor)): self.assertAllEqual(field.default, default) else: self.assertEqual(field.default, default) @parameterized.parameters([ ('i', int, 8.3, "default value for i: expected 'int', got 'float'"), ('f', float, 8, "default value for f: expected 'float', got 'int'"), ('x', int, 'hello world', "default value for x: expected 'int', got 'str'"), ('seq', typing.Tuple[typing.Union[int, float], ...], [33, 12.8, 'zero'], (r'default value for seq\[2\]: expected ' r"typing.Union\[int, float\], got 'str'")), ('seq', _TUPLE[typing.Union[int, float], ...], [33, 12.8, 'zero'], (r'default value for seq\[2\]: expected ' r"typing.Union\[int, float\], got 'str'")), ('t', tensor.TensorSpec(None, dtypes.int32), lambda: constant_op.constant(0.0), 'Unsupported type annotation TensorSpec.*'), ('x', dict, {}, "In field 'x': Unsupported type annotation 'dict'"), ('y', typing.Union[int, list], 3, "In field 'y': Unsupported type annotation 'list'"), ('z', typing.Mapping[tensor.Tensor, int], {}, "In field 'z': Mapping had a key 'Tensor' with type 'type'"), ]) def testConstructionError(self, name, value_type, default, error): if callable(default): default = default() # deferred construction (contains tensor) with self.assertRaisesRegex(TypeError, error): extension_type_field.ExtensionTypeField(name, value_type, default) @parameterized.parameters([ ("ExtensionTypeField(name='i', value_type=<class 'int'>, " 'default=ExtensionTypeField.NO_DEFAULT)', 'i', int), ("ExtensionTypeField(name='x', value_type=typing.Tuple" '[typing.Union[int, str], ...], default=ExtensionTypeField.NO_DEFAULT)', 'x', typing.Tuple[typing.Union[int, str], ...]), ("ExtensionTypeField(name='j', value_type=<class 'int'>, default=3)", 'j', int, 3), ]) def testRepr(self, expected, name, value_type, default=extension_type_field.ExtensionTypeField.NO_DEFAULT): field = extension_type_field.ExtensionTypeField(name, value_type, default) self.assertEqual(repr(field), expected) @parameterized.parameters([ ('Spec', True), ('_type_spec', True), ('self', True), ('x', False), ('_tf_extension_type_foo_bar', True), ]) def testIsReservedName(self, name, expected): self.assertEqual( extension_type_field.ExtensionTypeField.is_reserved_name(name), expected)
ExtensionTypeFieldTest
python
ray-project__ray
python/ray/data/collate_fn.py
{ "start": 6339, "end": 9251 }
class ____(ArrowBatchCollateFn): """Default collate function for converting Arrow batches to PyTorch tensors.""" _DEFAULT_NUM_WORKERS = env_integer( "RAY_DATA_DEFAULT_COLLATE_FN_THREADPOOL_MAX_WORKERS", 4, ) def __init__( self, dtypes: Optional[Union["torch.dtype", Dict[str, "torch.dtype"]]] = None, device: Optional["TorchDeviceType"] = None, pin_memory: bool = False, num_workers: int = _DEFAULT_NUM_WORKERS, ): """Initialize the collate function. Args: dtypes: The torch dtype(s) for the created tensor(s); if None, the dtype will be inferred from the tensor data. device: The device on which the tensor should be placed. Can be a string (e.g. "cpu", "cuda:0") or a torch.device object. pin_memory: Whether to pin the memory of the created tensors. num_workers: Number of worker threads for parallel tensor conversion. Defaults to `RAY_DATA_DEFAULT_COLLATE_FN_THREADPOOL_MAX_WORKERS`. """ import torch super().__init__() self.dtypes = dtypes if isinstance(device, (str, int)): self.device = torch.device(device) else: self.device = device self.pin_memory = pin_memory self.num_workers = num_workers self._threadpool: Optional[ThreadPoolExecutor] = None def __del__(self): """Clean up threadpool on destruction.""" if getattr(self, "_threadpool", None): self._threadpool.shutdown(wait=False) def __call__( self, batch: "pyarrow.Table" ) -> Union[Dict[str, "torch.Tensor"], Dict[str, List["torch.Tensor"]]]: """Convert an Arrow batch to PyTorch tensors. Args: batch: PyArrow Table to convert Returns: Dictionary mapping column names to lists of tensors """ from ray.air._internal.torch_utils import ( arrow_batch_to_tensors, ) if self.num_workers > 0 and self._threadpool is None: self._threadpool = ThreadPoolExecutor(max_workers=self.num_workers) # For GPU transfer, we can skip the combining chunked arrays. This is because # we can convert the chunked arrays to corresponding numpy format and then to # Tensors and transfer the corresponding list of Tensors to GPU directly. # However, for CPU transfer, we need to combine the chunked arrays first # before converting to numpy format and then to Tensors. combine_chunks = self.device is not None and self.device.type == "cpu" return arrow_batch_to_tensors( batch, dtypes=self.dtypes, combine_chunks=combine_chunks, pin_memory=self.pin_memory, threadpool=self._threadpool, )
DefaultCollateFn