language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
huggingface__transformers
tests/trainer/test_trainer_seq2seq.py
{ "start": 1031, "end": 8457 }
class ____(TestCasePlus): @slow @require_torch def test_finetune_bert2bert(self): bert2bert = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny", "prajjwal1/bert-tiny") tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") bert2bert.config.vocab_size = bert2bert.config.encoder.vocab_size tokenizer.eos_token_id = tokenizer.sep_token_id bert2bert.generation_config.decoder_start_token_id = tokenizer.cls_token_id bert2bert.config.max_length = 128 train_dataset = datasets.load_dataset("abisee/cnn_dailymail", "3.0.0", split="train[:1%]") val_dataset = datasets.load_dataset("abisee/cnn_dailymail", "3.0.0", split="validation[:1%]") train_dataset = train_dataset.select(range(32)) val_dataset = val_dataset.select(range(16)) batch_size = 4 def _map_to_encoder_decoder_inputs(batch): # Tokenizer will automatically set [BOS] <text> [EOS] inputs = tokenizer(batch["article"], padding="max_length", truncation=True, max_length=512) outputs = tokenizer(batch["highlights"], padding="max_length", truncation=True, max_length=128) batch["input_ids"] = inputs.input_ids batch["attention_mask"] = inputs.attention_mask batch["decoder_input_ids"] = outputs.input_ids batch["labels"] = outputs.input_ids.copy() batch["labels"] = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] batch["decoder_attention_mask"] = outputs.attention_mask assert all(len(x) == 512 for x in inputs.input_ids) assert all(len(x) == 128 for x in outputs.input_ids) return batch def _compute_metrics(pred): labels_ids = pred.label_ids pred_ids = pred.predictions # all unnecessary tokens are removed pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) label_str = tokenizer.batch_decode(labels_ids, skip_special_tokens=True) accuracy = sum(int(pred_str[i] == label_str[i]) for i in range(len(pred_str))) / len(pred_str) return {"accuracy": accuracy} # map train dataset train_dataset = train_dataset.map( _map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) train_dataset.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) # same for validation dataset val_dataset = val_dataset.map( _map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) val_dataset.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) output_dir = self.get_auto_remove_tmp_dir() training_args = Seq2SeqTrainingArguments( output_dir=output_dir, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, predict_with_generate=True, eval_strategy="steps", do_train=True, do_eval=True, warmup_steps=0, eval_steps=2, logging_steps=2, report_to="none", ) # instantiate trainer trainer = Seq2SeqTrainer( model=bert2bert, args=training_args, compute_metrics=_compute_metrics, train_dataset=train_dataset, eval_dataset=val_dataset, processing_class=tokenizer, ) # start training trainer.train() @slow @require_torch def test_return_sequences(self): # Tests that the number of generated sequences is correct when num_return_sequences > 1 # and essentially ensuring that `accelerator.gather()` is used instead of `gather_for_metrics` INPUT_COLUMN = "question" TARGET_COLUMN = "answer" MAX_INPUT_LENGTH = 256 MAX_TARGET_LENGTH = 256 dataset = datasets.load_dataset("openai/gsm8k", "main", split="train[:38]") model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small") tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="pt", padding="longest") gen_config = GenerationConfig.from_pretrained( "google-t5/t5-small", max_length=None, min_length=None, max_new_tokens=256, min_new_tokens=1, num_beams=5 ) training_args = Seq2SeqTrainingArguments(".", predict_with_generate=True, report_to="none") trainer = Seq2SeqTrainer( model=model, args=training_args, processing_class=tokenizer, data_collator=data_collator, compute_metrics=lambda x: {"samples": x[0].shape[0]}, ) def prepare_data(examples): # Remove pairs where at least one record is none inputs = examples[INPUT_COLUMN] targets = examples[TARGET_COLUMN] model_inputs = tokenizer(inputs, max_length=MAX_INPUT_LENGTH, truncation=True) labels = tokenizer(text_target=targets, max_length=MAX_TARGET_LENGTH, truncation=True) model_inputs["labels"] = labels["input_ids"] return model_inputs prepared_dataset = dataset.map(prepare_data, batched=True, remove_columns=[INPUT_COLUMN, TARGET_COLUMN]) dataset_len = len(prepared_dataset) # 38 for num_return_sequences in range(3, 0, -1): gen_config.num_return_sequences = num_return_sequences metrics = trainer.evaluate(eval_dataset=prepared_dataset, generation_config=gen_config) assert metrics["eval_samples"] == dataset_len * num_return_sequences, ( f"Got {metrics['eval_samples']}, expected: {dataset_len * num_return_sequences}" ) @require_torch def test_bad_generation_config_fail_early(self): # Tests that a bad generation config causes the trainer to fail early model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small") tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="pt", padding="longest") gen_config = GenerationConfig(do_sample=False, top_p=0.9) # bad: top_p is not compatible with do_sample=False training_args = Seq2SeqTrainingArguments( ".", predict_with_generate=True, generation_config=gen_config, report_to="none" ) with self.assertRaises(ValueError) as exc: _ = Seq2SeqTrainer( model=model, args=training_args, processing_class=tokenizer, data_collator=data_collator, compute_metrics=lambda x: {"samples": x[0].shape[0]}, ) self.assertIn("Fix these issues to train your model", str(exc.exception))
Seq2seqTrainerTester
python
scrapy__scrapy
scrapy/crawler.py
{ "start": 1608, "end": 11776 }
class ____: def __init__( self, spidercls: type[Spider], settings: dict[str, Any] | Settings | None = None, init_reactor: bool = False, ): if isinstance(spidercls, Spider): raise ValueError("The spidercls argument must be a class, not an object") if isinstance(settings, dict) or settings is None: settings = Settings(settings) self.spidercls: type[Spider] = spidercls self.settings: Settings = settings.copy() self.spidercls.update_settings(self.settings) self._update_root_log_handler() self.addons: AddonManager = AddonManager(self) self.signals: SignalManager = SignalManager(self) self._init_reactor: bool = init_reactor self.crawling: bool = False self._started: bool = False self.extensions: ExtensionManager | None = None self.stats: StatsCollector | None = None self.logformatter: LogFormatter | None = None self.request_fingerprinter: RequestFingerprinterProtocol | None = None self.spider: Spider | None = None self.engine: ExecutionEngine | None = None def _update_root_log_handler(self) -> None: if get_scrapy_root_handler() is not None: # scrapy root handler already installed: update it with new settings install_scrapy_root_handler(self.settings) def _apply_settings(self) -> None: if self.settings.frozen: return self.addons.load_settings(self.settings) self.stats = load_object(self.settings["STATS_CLASS"])(self) lf_cls: type[LogFormatter] = load_object(self.settings["LOG_FORMATTER"]) self.logformatter = lf_cls.from_crawler(self) self.request_fingerprinter = build_from_crawler( load_object(self.settings["REQUEST_FINGERPRINTER_CLASS"]), self, ) reactor_class: str = self.settings["TWISTED_REACTOR"] event_loop: str = self.settings["ASYNCIO_EVENT_LOOP"] if self._init_reactor: # this needs to be done after the spider settings are merged, # but before something imports twisted.internet.reactor if reactor_class: install_reactor(reactor_class, event_loop) else: from twisted.internet import reactor # noqa: F401 if reactor_class: verify_installed_reactor(reactor_class) if is_asyncio_reactor_installed() and event_loop: verify_installed_asyncio_event_loop(event_loop) if self._init_reactor or reactor_class: log_reactor_info() self.extensions = ExtensionManager.from_crawler(self) self.settings.freeze() d = dict(overridden_settings(self.settings)) logger.info( "Overridden settings:\n%(settings)s", {"settings": pprint.pformat(d)} ) # Cannot use @deferred_f_from_coro_f because that relies on the reactor # being installed already, which is done within _apply_settings(), inside # this method. @inlineCallbacks def crawl(self, *args: Any, **kwargs: Any) -> Generator[Deferred[Any], Any, None]: """Start the crawler by instantiating its spider class with the given *args* and *kwargs* arguments, while setting the execution engine in motion. Should be called only once. Return a deferred that is fired when the crawl is finished. """ if self.crawling: raise RuntimeError("Crawling already taking place") if self._started: raise RuntimeError( "Cannot run Crawler.crawl() more than once on the same instance." ) self.crawling = self._started = True try: self.spider = self._create_spider(*args, **kwargs) self._apply_settings() self._update_root_log_handler() self.engine = self._create_engine() yield deferred_from_coro(self.engine.open_spider_async()) yield deferred_from_coro(self.engine.start_async()) except Exception: self.crawling = False if self.engine is not None: yield deferred_from_coro(self.engine.close_async()) raise async def crawl_async(self, *args: Any, **kwargs: Any) -> None: """Start the crawler by instantiating its spider class with the given *args* and *kwargs* arguments, while setting the execution engine in motion. Should be called only once. .. versionadded:: VERSION Complete when the crawl is finished. This function requires :class:`~twisted.internet.asyncioreactor.AsyncioSelectorReactor` to be installed. """ if not is_asyncio_available(): raise RuntimeError("Crawler.crawl_async() requires AsyncioSelectorReactor.") if self.crawling: raise RuntimeError("Crawling already taking place") if self._started: raise RuntimeError( "Cannot run Crawler.crawl_async() more than once on the same instance." ) self.crawling = self._started = True try: self.spider = self._create_spider(*args, **kwargs) self._apply_settings() self._update_root_log_handler() self.engine = self._create_engine() await self.engine.open_spider_async() await self.engine.start_async() except Exception: self.crawling = False if self.engine is not None: await self.engine.close_async() raise def _create_spider(self, *args: Any, **kwargs: Any) -> Spider: return self.spidercls.from_crawler(self, *args, **kwargs) def _create_engine(self) -> ExecutionEngine: return ExecutionEngine(self, lambda _: self.stop_async()) def stop(self) -> Deferred[None]: """Start a graceful stop of the crawler and return a deferred that is fired when the crawler is stopped.""" warnings.warn( "Crawler.stop() is deprecated, use stop_async() instead", ScrapyDeprecationWarning, stacklevel=2, ) return deferred_from_coro(self.stop_async()) async def stop_async(self) -> None: """Start a graceful stop of the crawler and complete when the crawler is stopped. .. versionadded:: VERSION """ if self.crawling: self.crawling = False assert self.engine if self.engine.running: await self.engine.stop_async() @staticmethod def _get_component( component_class: type[_T], components: Iterable[Any] ) -> _T | None: for component in components: if isinstance(component, component_class): return component return None def get_addon(self, cls: type[_T]) -> _T | None: """Return the run-time instance of an :ref:`add-on <topics-addons>` of the specified class or a subclass, or ``None`` if none is found. .. versionadded:: 2.12 """ return self._get_component(cls, self.addons.addons) def get_downloader_middleware(self, cls: type[_T]) -> _T | None: """Return the run-time instance of a :ref:`downloader middleware <topics-downloader-middleware>` of the specified class or a subclass, or ``None`` if none is found. .. versionadded:: 2.12 This method can only be called after the crawl engine has been created, e.g. at signals :signal:`engine_started` or :signal:`spider_opened`. """ if not self.engine: raise RuntimeError( "Crawler.get_downloader_middleware() can only be called after " "the crawl engine has been created." ) return self._get_component(cls, self.engine.downloader.middleware.middlewares) def get_extension(self, cls: type[_T]) -> _T | None: """Return the run-time instance of an :ref:`extension <topics-extensions>` of the specified class or a subclass, or ``None`` if none is found. .. versionadded:: 2.12 This method can only be called after the extension manager has been created, e.g. at signals :signal:`engine_started` or :signal:`spider_opened`. """ if not self.extensions: raise RuntimeError( "Crawler.get_extension() can only be called after the " "extension manager has been created." ) return self._get_component(cls, self.extensions.middlewares) def get_item_pipeline(self, cls: type[_T]) -> _T | None: """Return the run-time instance of a :ref:`item pipeline <topics-item-pipeline>` of the specified class or a subclass, or ``None`` if none is found. .. versionadded:: 2.12 This method can only be called after the crawl engine has been created, e.g. at signals :signal:`engine_started` or :signal:`spider_opened`. """ if not self.engine: raise RuntimeError( "Crawler.get_item_pipeline() can only be called after the " "crawl engine has been created." ) return self._get_component(cls, self.engine.scraper.itemproc.middlewares) def get_spider_middleware(self, cls: type[_T]) -> _T | None: """Return the run-time instance of a :ref:`spider middleware <topics-spider-middleware>` of the specified class or a subclass, or ``None`` if none is found. .. versionadded:: 2.12 This method can only be called after the crawl engine has been created, e.g. at signals :signal:`engine_started` or :signal:`spider_opened`. """ if not self.engine: raise RuntimeError( "Crawler.get_spider_middleware() can only be called after the " "crawl engine has been created." ) return self._get_component(cls, self.engine.scraper.spidermw.middlewares)
Crawler
python
PyCQA__pylint
doc/data/messages/u/useless-parent-delegation/bad.py
{ "start": 0, "end": 73 }
class ____: def eat(self, food): print(f"Eating {food}")
Animal
python
Lightning-AI__lightning
src/lightning/fabric/strategies/dp.py
{ "start": 1254, "end": 4148 }
class ____(ParallelStrategy): """Implements data-parallel training in a single process, i.e., the model gets replicated to each device and each gets a split of the data.""" def __init__( self, accelerator: Optional[Accelerator] = None, parallel_devices: Optional[list[torch.device]] = None, checkpoint_io: Optional[CheckpointIO] = None, precision: Optional[Precision] = None, ): super().__init__( accelerator=accelerator, parallel_devices=parallel_devices, cluster_environment=None, checkpoint_io=checkpoint_io, precision=precision, ) @property @override def root_device(self) -> torch.device: assert self.parallel_devices is not None return self.parallel_devices[0] @property @override def distributed_sampler_kwargs(self) -> None: return None @override def setup_module(self, module: Module) -> DataParallel: """Wraps the given model into a :class:`~torch.nn.DataParallel` module.""" return DataParallel(module=module, device_ids=self.parallel_devices) @override def module_to_device(self, module: Module) -> None: module.to(self.root_device) @override def batch_to_device(self, batch: Any, device: Optional[torch.device] = None) -> Any: # DataParallel handles the transfer of batch to the device return batch @override def all_reduce( self, collection: TReduce, group: Optional[Any] = None, reduce_op: Optional[Union[ReduceOp, str]] = "mean" ) -> TReduce: def mean(t: Tensor) -> Tensor: original_dtype = t.dtype return t.float().mean().to(original_dtype) return apply_to_collection(collection, Tensor, mean) @override def barrier(self, *args: Any, **kwargs: Any) -> None: pass @override def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast: return obj @override def reduce_boolean_decision(self, decision: bool, all: bool = True) -> bool: return decision @override def get_module_state_dict(self, module: Module) -> dict[str, Union[Any, Tensor]]: if isinstance(module, DataParallel): module = module.module return super().get_module_state_dict(module) @override def load_module_state_dict( self, module: Module, state_dict: dict[str, Union[Any, Tensor]], strict: bool = True ) -> None: if isinstance(module, DataParallel): module = module.module super().load_module_state_dict(module=module, state_dict=state_dict, strict=strict) @classmethod @override def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: strategy_registry.register("dp", cls, description=cls.__name__)
DataParallelStrategy
python
tensorflow__tensorflow
tensorflow/python/saved_model/nested_structure_coder.py
{ "start": 1641, "end": 3687 }
class ____(Exception): """Error raised when a coder cannot encode an object.""" def register_codec(x): """Registers a codec to use for encoding/decoding. Args: x: The codec object to register. The object must implement can_encode, do_encode, can_decode, and do_decode. See the various _*Codec classes for examples. """ _codecs.append(x) def _get_encoders(): return [(c.can_encode, c.do_encode) for c in _codecs] def _get_decoders(): return [(c.can_decode, c.do_decode) for c in _codecs] def _map_structure(pyobj, coders): # Iterate through the codecs in the reverse order they were registered in, # as the most specific codec should be checked first. for can, do in reversed(coders): if can(pyobj): recursion_fn = functools.partial(_map_structure, coders=coders) return do(pyobj, recursion_fn) raise NotEncodableError( f"No encoder for object {str(pyobj)} of type {type(pyobj)}.") @tf_export("__internal__.saved_model.encode_structure", v1=[]) def encode_structure(nested_structure): """Encodes nested structures composed of encodable types into a proto. Args: nested_structure: Structure to encode. Returns: Encoded proto. Raises: NotEncodableError: For values for which there are no encoders. """ return _map_structure(nested_structure, _get_encoders()) def can_encode(nested_structure): """Determines whether a nested structure can be encoded into a proto. Args: nested_structure: Structure to encode. Returns: True if the nested structured can be encoded. """ try: encode_structure(nested_structure) except NotEncodableError: return False return True @tf_export("__internal__.saved_model.decode_proto", v1=[]) def decode_proto(proto): """Decodes proto representing a nested structure. Args: proto: Proto to decode. Returns: Decoded structure. Raises: NotEncodableError: For values for which there are no encoders. """ return _map_structure(proto, _get_decoders())
NotEncodableError
python
pytorch__pytorch
test/inductor/test_loop_ordering.py
{ "start": 35878, "end": 40441 }
class ____(TestCase): def T(self, layout: str): SIZE_A = 128 SIZE_B = 256 SIZE_C = 512 if layout == "cont": return torch.rand(SIZE_A, SIZE_B, SIZE_C, device=GPU_TYPE).unsqueeze(0) elif layout == "T": return ( torch.rand(SIZE_A, SIZE_B, SIZE_C, device=GPU_TYPE) .transpose(1, 2) .contiguous() .transpose(1, 2) .unsqueeze(0) ) else: assert layout == "NHWC" return torch.rand([1, SIZE_A, SIZE_B, SIZE_C], device=GPU_TYPE).to( memory_format=torch.channels_last ) @parametrize("a", layouts) @parametrize("b", layouts) def test_pointwise(self, a, b): def foo(x, y): return x + y x, y = self.T(a), self.T(b) res, code = run_and_get_code(torch.compile(foo), x, y) if a != b: FileCheck().check("ynumel").run(code[0]) else: FileCheck().check_not("ynumel").run(code[0]) self.assertEqual(res, foo(x, y)) def test_tiled_reduction(self): def f(a, b): return (a * b).sum(dim=-1) N = 512 inps = ( torch.randn(N, N, N, device=GPU_TYPE).permute(2, 1, 0), torch.randn(N, N, N, device=GPU_TYPE).permute(1, 2, 0), ) f_c = torch.compile(f) out, code = run_and_get_code(f_c, *inps) FileCheck().check_dag("xnumel = 512").check_dag("ynumel = 512").check_dag( "rnumel" ).run(code[0]) self.assertEqual(out, f(*inps), atol=0.001, rtol=0.04) def test_3d_pointwise(self): inps = (self.T("cont"), self.T("T"), self.T("NHWC")) def f(x, y, z): return x + y + z f_c = torch.compile(f) out, code = run_and_get_code(f_c, *inps) FileCheck().check_dag("znumel").check_dag("ynumel").check_dag("xnumel").run( code[0] ) self.assertEqual(out, f(*inps)) def test_cat(self): # test unwrapping Identity def f(x, y): return torch.cat((x, y)) + 1 x = self.T("cont") y = self.T("T") inps = (x, y) f_c = torch.compile(f) out, code = run_and_get_code(f_c, *inps) FileCheck().check_dag("ynumel").check_dag("xnumel").run(code[0]) self.assertEqual(out, f(*inps)) def test_penalized_small_dim(self): x = torch.rand([2000, 1], device=GPU_TYPE) y = torch.rand([4, 1], device=GPU_TYPE).T # don't tile when it doesn't affect total coalesced mem accesses much def f(x, y): return x + y inps = (x, y) f_c = torch.compile(f) out, code = run_and_get_code(f_c, *inps) FileCheck().check_not("ynumel").check_dag("xnumel").run(code[0]) self.assertEqual(out, f(*inps)) def test_mutation_deps(self): def f(x): return x.add_(1) x = self.T("cont") from torch._inductor import tiling_utils def fn(nodes): self.assertTrue(len(nodes) == 1) coalesce_analysis = tiling_utils.analyze_memory_coalescing(nodes[0]) assert coalesce_analysis is not None reads = coalesce_analysis.norm_read_writes.reads writes = coalesce_analysis.norm_read_writes.writes self.assertTrue(len(reads) == 1 and len(writes) == 1) self.assertEqual( list(coalesce_analysis.norm_read_writes.reads.values()), [OrderedSet(("arg0_1",))], ) self.assertEqual( list(coalesce_analysis.norm_read_writes.writes.values()), [OrderedSet(("buf1",))], ) return nodes with torch._inductor.config.patch(_post_fusion_custom_pass=fn), torch.no_grad(): torch.compile(f)(x) def test_find_broadcast_var(self): """Test broadcast variable detection for tiling improvements.""" from torch._inductor import tiling_utils i, j, k = sympy.symbols("i j k", integer=True) # Test broadcast pattern detection: FloorDiv creates broadcast result = tiling_utils.find_broadcast_var( FloorDiv(i, 10), {i: 100, j: 50, k: 20} ) self.assertEqual(result, i) # Test non-broadcast: linear access pattern result = tiling_utils.find_broadcast_var(i + j * 10, {i: 10, j: 8, k: 20}) self.assertEqual(result, None)
TestTiling
python
walkccc__LeetCode
solutions/53. Maximum Subarray/53.py
{ "start": 0, "end": 265 }
class ____: def maxSubArray(self, nums: list[int]) -> int: # dp[i] := the maximum sum subarray ending in i dp = [0] * len(nums) dp[0] = nums[0] for i in range(1, len(nums)): dp[i] = max(nums[i], dp[i - 1] + nums[i]) return max(dp)
Solution
python
lepture__authlib
authlib/oidc/discovery/models.py
{ "start": 128, "end": 12442 }
class ____(AuthorizationServerMetadata): REGISTRY_KEYS = [ "issuer", "authorization_endpoint", "token_endpoint", "jwks_uri", "registration_endpoint", "scopes_supported", "response_types_supported", "response_modes_supported", "grant_types_supported", "token_endpoint_auth_methods_supported", "service_documentation", "ui_locales_supported", "op_policy_uri", "op_tos_uri", # added by OpenID "token_endpoint_auth_signing_alg_values_supported", "acr_values_supported", "subject_types_supported", "id_token_signing_alg_values_supported", "id_token_encryption_alg_values_supported", "id_token_encryption_enc_values_supported", "userinfo_signing_alg_values_supported", "userinfo_encryption_alg_values_supported", "userinfo_encryption_enc_values_supported", "request_object_signing_alg_values_supported", "request_object_encryption_alg_values_supported", "request_object_encryption_enc_values_supported", "display_values_supported", "claim_types_supported", "claims_supported", "claims_locales_supported", "claims_parameter_supported", "request_parameter_supported", "request_uri_parameter_supported", "require_request_uri_registration", # not defined by OpenID # 'revocation_endpoint', # 'revocation_endpoint_auth_methods_supported', # 'revocation_endpoint_auth_signing_alg_values_supported', # 'introspection_endpoint', # 'introspection_endpoint_auth_methods_supported', # 'introspection_endpoint_auth_signing_alg_values_supported', # 'code_challenge_methods_supported', ] def validate_jwks_uri(self): # REQUIRED in OpenID Connect jwks_uri = self.get("jwks_uri") if jwks_uri is None: raise ValueError('"jwks_uri" is required') return super().validate_jwks_uri() def validate_acr_values_supported(self): """OPTIONAL. JSON array containing a list of the Authentication Context Class References that this OP supports. """ validate_array_value(self, "acr_values_supported") def validate_subject_types_supported(self): """REQUIRED. JSON array containing a list of the Subject Identifier types that this OP supports. Valid types include pairwise and public. """ # 1. REQUIRED values = self.get("subject_types_supported") if values is None: raise ValueError('"subject_types_supported" is required') # 2. JSON array if not isinstance(values, list): raise ValueError('"subject_types_supported" MUST be JSON array') # 3. Valid types include pairwise and public valid_types = {"pairwise", "public"} if not valid_types.issuperset(set(values)): raise ValueError('"subject_types_supported" contains invalid values') def validate_id_token_signing_alg_values_supported(self): """REQUIRED. JSON array containing a list of the JWS signing algorithms (alg values) supported by the OP for the ID Token to encode the Claims in a JWT [JWT]. The algorithm RS256 MUST be included. The value none MAY be supported, but MUST NOT be used unless the Response Type used returns no ID Token from the Authorization Endpoint (such as when using the Authorization Code Flow). """ # 1. REQUIRED values = self.get("id_token_signing_alg_values_supported") if values is None: raise ValueError('"id_token_signing_alg_values_supported" is required') # 2. JSON array if not isinstance(values, list): raise ValueError( '"id_token_signing_alg_values_supported" MUST be JSON array' ) # 3. The algorithm RS256 MUST be included if "RS256" not in values: raise ValueError( '"RS256" MUST be included in "id_token_signing_alg_values_supported"' ) def validate_id_token_encryption_alg_values_supported(self): """OPTIONAL. JSON array containing a list of the JWE encryption algorithms (alg values) supported by the OP for the ID Token to encode the Claims in a JWT. """ validate_array_value(self, "id_token_encryption_alg_values_supported") def validate_id_token_encryption_enc_values_supported(self): """OPTIONAL. JSON array containing a list of the JWE encryption algorithms (enc values) supported by the OP for the ID Token to encode the Claims in a JWT. """ validate_array_value(self, "id_token_encryption_enc_values_supported") def validate_userinfo_signing_alg_values_supported(self): """OPTIONAL. JSON array containing a list of the JWS signing algorithms (alg values) [JWA] supported by the UserInfo Endpoint to encode the Claims in a JWT. The value none MAY be included. """ validate_array_value(self, "userinfo_signing_alg_values_supported") def validate_userinfo_encryption_alg_values_supported(self): """OPTIONAL. JSON array containing a list of the JWE encryption algorithms (alg values) [JWA] supported by the UserInfo Endpoint to encode the Claims in a JWT. """ validate_array_value(self, "userinfo_encryption_alg_values_supported") def validate_userinfo_encryption_enc_values_supported(self): """OPTIONAL. JSON array containing a list of the JWE encryption algorithms (enc values) [JWA] supported by the UserInfo Endpoint to encode the Claims in a JWT. """ validate_array_value(self, "userinfo_encryption_enc_values_supported") def validate_request_object_signing_alg_values_supported(self): """OPTIONAL. JSON array containing a list of the JWS signing algorithms (alg values) supported by the OP for Request Objects, which are described in Section 6.1 of OpenID Connect Core 1.0. These algorithms are used both when the Request Object is passed by value (using the request parameter) and when it is passed by reference (using the request_uri parameter). Servers SHOULD support none and RS256. """ values = self.get("request_object_signing_alg_values_supported") if not values: return if not isinstance(values, list): raise ValueError( '"request_object_signing_alg_values_supported" MUST be JSON array' ) def validate_request_object_encryption_alg_values_supported(self): """OPTIONAL. JSON array containing a list of the JWE encryption algorithms (alg values) supported by the OP for Request Objects. These algorithms are used both when the Request Object is passed by value and when it is passed by reference. """ validate_array_value(self, "request_object_encryption_alg_values_supported") def validate_request_object_encryption_enc_values_supported(self): """OPTIONAL. JSON array containing a list of the JWE encryption algorithms (enc values) supported by the OP for Request Objects. These algorithms are used both when the Request Object is passed by value and when it is passed by reference. """ validate_array_value(self, "request_object_encryption_enc_values_supported") def validate_display_values_supported(self): """OPTIONAL. JSON array containing a list of the display parameter values that the OpenID Provider supports. These values are described in Section 3.1.2.1 of OpenID Connect Core 1.0. """ values = self.get("display_values_supported") if not values: return if not isinstance(values, list): raise ValueError('"display_values_supported" MUST be JSON array') valid_values = {"page", "popup", "touch", "wap"} if not valid_values.issuperset(set(values)): raise ValueError('"display_values_supported" contains invalid values') def validate_claim_types_supported(self): """OPTIONAL. JSON array containing a list of the Claim Types that the OpenID Provider supports. These Claim Types are described in Section 5.6 of OpenID Connect Core 1.0. Values defined by this specification are normal, aggregated, and distributed. If omitted, the implementation supports only normal Claims. """ values = self.get("claim_types_supported") if not values: return if not isinstance(values, list): raise ValueError('"claim_types_supported" MUST be JSON array') valid_values = {"normal", "aggregated", "distributed"} if not valid_values.issuperset(set(values)): raise ValueError('"claim_types_supported" contains invalid values') def validate_claims_supported(self): """RECOMMENDED. JSON array containing a list of the Claim Names of the Claims that the OpenID Provider MAY be able to supply values for. Note that for privacy or other reasons, this might not be an exhaustive list. """ validate_array_value(self, "claims_supported") def validate_claims_locales_supported(self): """OPTIONAL. Languages and scripts supported for values in Claims being returned, represented as a JSON array of BCP47 [RFC5646] language tag values. Not all languages and scripts are necessarily supported for all Claim values. """ validate_array_value(self, "claims_locales_supported") def validate_claims_parameter_supported(self): """OPTIONAL. Boolean value specifying whether the OP supports use of the claims parameter, with true indicating support. If omitted, the default value is false. """ _validate_boolean_value(self, "claims_parameter_supported") def validate_request_parameter_supported(self): """OPTIONAL. Boolean value specifying whether the OP supports use of the request parameter, with true indicating support. If omitted, the default value is false. """ _validate_boolean_value(self, "request_parameter_supported") def validate_request_uri_parameter_supported(self): """OPTIONAL. Boolean value specifying whether the OP supports use of the request_uri parameter, with true indicating support. If omitted, the default value is true. """ _validate_boolean_value(self, "request_uri_parameter_supported") def validate_require_request_uri_registration(self): """OPTIONAL. Boolean value specifying whether the OP requires any request_uri values used to be pre-registered using the request_uris registration parameter. Pre-registration is REQUIRED when the value is true. If omitted, the default value is false. """ _validate_boolean_value(self, "require_request_uri_registration") @property def claim_types_supported(self): # If omitted, the implementation supports only normal Claims return self.get("claim_types_supported", ["normal"]) @property def claims_parameter_supported(self): # If omitted, the default value is false. return self.get("claims_parameter_supported", False) @property def request_parameter_supported(self): # If omitted, the default value is false. return self.get("request_parameter_supported", False) @property def request_uri_parameter_supported(self): # If omitted, the default value is true. return self.get("request_uri_parameter_supported", True) @property def require_request_uri_registration(self): # If omitted, the default value is false. return self.get("require_request_uri_registration", False) def _validate_boolean_value(metadata, key): if key not in metadata: return if metadata[key] not in (True, False): raise ValueError(f'"{key}" MUST be boolean')
OpenIDProviderMetadata
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeVarTuple1.py
{ "start": 959, "end": 1606 }
class ____(Generic[_Xs]): ... # This should generate an error. x: list[_Xs] = [] # This should generate an error. y: _Xs = () # This should generate an error because of the name mismatch. BadName = TypeVarTuple("Ts1") # This should generate TypeVarTuple cannot have constraints. Ts2 = TypeVarTuple("Ts2", int, str) # This should generate TypeVarTuple cannot be covariant. Ts3 = TypeVarTuple("Ts3", covariant=True) # This should generate TypeVarTuple cannot be contravariant. Ts4 = TypeVarTuple("Ts4", contravariant=True) # This should generate TypeVarTuple does not accept other keyword arguments. Ts5 = TypeVarTuple("Ts5", other=True)
ClassB
python
davidhalter__jedi
jedi/inference/lazy_value.py
{ "start": 96, "end": 384 }
class ____: def __init__(self, data, min=1, max=1): self.data = data self.min = min self.max = max def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.data) def infer(self): raise NotImplementedError
AbstractLazyValue
python
getsentry__sentry
src/sentry/monitors/endpoints/organization_monitor_processing_errors_index.py
{ "start": 928, "end": 2077 }
class ____(OrganizationEndpoint): publish_status = { "GET": ApiPublishStatus.PRIVATE, } owner = ApiOwner.CRONS @extend_schema( operation_id="Retrieve checkin processing errors for an Organization", parameters=[ GlobalParams.ORG_ID_OR_SLUG, ], responses={ 200: inline_sentry_response_serializer( "CheckinProcessingError", list[CheckinProcessingErrorData] ), 401: RESPONSE_UNAUTHORIZED, 403: RESPONSE_FORBIDDEN, 404: RESPONSE_NOT_FOUND, }, ) def get(self, request: AuthenticatedHttpRequest, organization: Organization) -> Response: """ Retrieves checkin processing errors for an organization """ projects = self.get_projects(request, organization) paginator = SequencePaginator(list(enumerate(get_errors_for_projects(projects)))) return self.paginate( request=request, paginator=paginator, on_results=lambda results: serialize(results, request.user), )
OrganizationMonitorProcessingErrorsIndexEndpoint
python
ansible__ansible
test/lib/ansible_test/_internal/commands/integration/filters.py
{ "start": 758, "end": 5537 }
class ____[THostConfig: HostConfig](metaclass=abc.ABCMeta): """Base class for target filters.""" def __init__(self, args: IntegrationConfig, configs: list[THostConfig], controller: bool) -> None: self.args = args self.configs = configs self.controller = controller self.host_type = 'controller' if controller else 'target' # values which are not host specific self.include_targets = args.include self.allow_root = args.allow_root self.allow_destructive = args.allow_destructive @property def config(self) -> THostConfig: """The configuration to filter. Only valid when there is a single config.""" if len(self.configs) != 1: raise Exception() return self.configs[0] def skip( self, skip: str, reason: str, targets: list[IntegrationTarget], exclude: set[str], override: t.Optional[list[str]] = None, ) -> None: """Apply the specified skip rule to the given targets by updating the provided exclude list.""" if skip.startswith('skip/'): skipped = [target.name for target in targets if skip in target.skips and (not override or target.name not in override)] else: skipped = [target.name for target in targets if f'{skip}/' in target.aliases and (not override or target.name not in override)] self.apply_skip(f'"{skip}"', reason, skipped, exclude) def apply_skip(self, marked: str, reason: str, skipped: list[str], exclude: set[str]) -> None: """Apply the provided skips to the given exclude list.""" if not skipped: return exclude.update(skipped) display.warning(f'Excluding {self.host_type} tests marked {marked} {reason}: {", ".join(skipped)}') def filter_profiles[THostProfile: HostProfile](self, profiles: list[THostProfile], target: IntegrationTarget) -> list[THostProfile]: """Filter the list of profiles, returning only those which are not skipped for the given target.""" del target return profiles def filter_targets(self, targets: list[IntegrationTarget], exclude: set[str]) -> None: """Filter the list of targets, adding any which this host profile cannot support to the provided exclude list.""" if self.controller and self.args.host_settings.controller_fallback and targets: affected_targets = [target.name for target in targets] reason = self.args.host_settings.controller_fallback.reason if reason == FallbackReason.ENVIRONMENT: exclude.update(affected_targets) display.warning(f'Excluding {self.host_type} tests since a fallback controller is in use: {", ".join(affected_targets)}') elif reason == FallbackReason.PYTHON: display.warning(f'Some {self.host_type} tests may be redundant since a fallback python is in use: {", ".join(affected_targets)}') if not self.allow_destructive and not self.config.is_managed: override_destructive = set(target for target in self.include_targets if target.startswith('destructive/')) override = [target.name for target in targets if override_destructive & set(target.aliases)] self.skip('destructive', 'which require --allow-destructive or prefixing with "destructive/" to run on unmanaged hosts', targets, exclude, override) if not self.args.allow_disabled: override_disabled = set(target for target in self.args.include if target.startswith('disabled/')) override = [target.name for target in targets if override_disabled & set(target.aliases)] self.skip('disabled', 'which require --allow-disabled or prefixing with "disabled/"', targets, exclude, override) if not self.args.allow_unsupported: override_unsupported = set(target for target in self.args.include if target.startswith('unsupported/')) override = [target.name for target in targets if override_unsupported & set(target.aliases)] self.skip('unsupported', 'which require --allow-unsupported or prefixing with "unsupported/"', targets, exclude, override) if not self.args.allow_unstable: override_unstable = set(target for target in self.args.include if target.startswith('unstable/')) if self.args.allow_unstable_changed: override_unstable |= set(self.args.metadata.change_description.focused_targets or []) override = [target.name for target in targets if override_unstable & set(target.aliases)] self.skip('unstable', 'which require --allow-unstable or prefixing with "unstable/"', targets, exclude, override)
TargetFilter
python
RaRe-Technologies__gensim
gensim/similarities/termsim.py
{ "start": 1912, "end": 3073 }
class ____(TermSimilarityIndex): """ Retrieves most similar terms for a given term under the hypothesis that the similarities between distinct terms are uniform. Parameters ---------- dictionary : :class:`~gensim.corpora.dictionary.Dictionary` A dictionary that specifies the considered terms. term_similarity : float, optional The uniform similarity between distinct terms. See Also -------- :class:`~gensim.similarities.termsim.SparseTermSimilarityMatrix` A sparse term similarity matrix built using a term similarity index. Notes ----- This class is mainly intended for testing SparseTermSimilarityMatrix and other classes that depend on the TermSimilarityIndex. """ def __init__(self, dictionary, term_similarity=0.5): self.dictionary = sorted(dictionary.items()) self.term_similarity = term_similarity def most_similar(self, t1, topn=10): for __, (t2_index, t2) in zip(range(topn), ( (t2_index, t2) for t2_index, t2 in self.dictionary if t2 != t1)): yield (t2, self.term_similarity)
UniformTermSimilarityIndex
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI019_0.py
{ "start": 3237, "end": 3467 }
class ____: ... # Our stable-mode logic uses heuristics and thinks this is a `TypeVar` # because `self` and the return annotation use the same name as their annotation, # but our preview-mode logic is smarter about this.
_NotATypeVar
python
SmileyChris__easy-thumbnails
easy_thumbnails/conf.py
{ "start": 2748, "end": 10613 }
class ____(AppSettings): """ These default settings for easy-thumbnails can be specified in your Django project's settings module to alter the behaviour of easy-thumbnails. """ THUMBNAIL_DEBUG = False """ If this boolean setting is set to ``True``, display errors creating a thumbnail when using the :ref:`thumbnail_tag` rather than failing silently. """ THUMBNAIL_DEFAULT_STORAGE = ( 'easy_thumbnails.storage.ThumbnailFileSystemStorage') """ (DEPRECATED) The default Django storage for *saving* generated thumbnails. """ THUMBNAIL_DEFAULT_STORAGE_ALIAS = 'easy_thumbnails' """ Django 4.2+: The default Django storage name for *saving* generated thumbnails. """ THUMBNAIL_MEDIA_ROOT = '' """ Used by easy-thumbnail's default storage to locate where thumbnails are stored on the file system. If not provided, Django's standard ``MEDIA_ROOT`` setting is used. """ THUMBNAIL_MEDIA_URL = '' """ Used by easy-thumbnail's default storage to build the absolute URL for a generated thumbnail. If not provided, Django's standard ``MEDIA_URL`` setting is used. """ THUMBNAIL_BASEDIR = '' """ Save thumbnail images to a directory directly off ``MEDIA_ROOT``, still keeping the relative directory structure of the source image. For example, using the ``{% thumbnail "photos/1.jpg" 150x150 %}`` tag with a ``THUMBNAIL_BASEDIR`` of ``'thumbs'`` would result in the following thumbnail filename:: MEDIA_ROOT + 'thumbs/photos/1_jpg_150x150_q85.jpg' """ THUMBNAIL_SUBDIR = '' """ Save thumbnail images to a sub-directory relative to the source image. For example, using the ``{% thumbnail "photos/1.jpg" 150x150 %}`` tag with a ``THUMBNAIL_SUBDIR`` of ``'thumbs'`` would result in the following thumbnail filename:: MEDIA_ROOT + 'photos/thumbs/1_jpg_150x150_q85.jpg' """ THUMBNAIL_PREFIX = '' """ Prepend thumbnail filenames with the specified prefix. For example, using the ``{% thumbnail "photos/1.jpg" 150x150 %}`` tag with a ``THUMBNAIL_PREFIX`` of ``'thumbs_'`` would result in the following thumbnail filename:: MEDIA_ROOT + 'photos/thumbs_1_jpg_150x150_q85.jpg' """ THUMBNAIL_QUALITY = 85 """ The default quality level for JPG images on a scale from 1 (worst) to 95 (best). Technically, values up to 100 are allowed, but this is not recommended. """ THUMBNAIL_PROGRESSIVE = 100 """ Use progressive JPGs for thumbnails where either dimension is at least this many pixels. For example, a 90x90 image will be saved as a baseline JPG while a 728x90 image will be saved as a progressive JPG. Set to ``False`` to never use progressive encoding. """ THUMBNAIL_EXTENSION = 'jpg' """ The type of image to save thumbnails with no transparency layer as. Note that changing the extension will most likely cause the ``THUMBNAIL_QUALITY`` setting to have no effect. """ THUMBNAIL_PRESERVE_EXTENSIONS = None """ To preserve specific extensions, for instance if you always want to create lossless PNG thumbnails from PNG sources, you can specify these extensions using this setting, for example:: THUMBNAIL_PRESERVE_EXTENSIONS = ['png'] All extensions should be lowercase. Instead of a tuple, you can also set this to ``True`` in order to always preserve the original extension. """ THUMBNAIL_TRANSPARENCY_EXTENSION = 'png' """ The type of image to save thumbnails with a transparency layer (e.g. GIFs or transparent PNGs). """ THUMBNAIL_NAMER = 'easy_thumbnails.namers.default' """ The function used to generate the filename for thumbnail images. Four namers are included in easy_thumbnails: ``easy_thumbnails.namers.default`` Descriptive filename containing the source and options like ``source.jpg.100x100_q80_crop_upscale.jpg``. ``easy_thumbnails.namers.hashed`` Short hashed filename like ``1xedFtqllFo9.jpg``. ``easy_thumbnails.namers.alias`` Filename based on ``THUMBNAIL_ALIASES`` dictionary key like ``source.jpg.medium_large.jpg``. ``easy_thumbnails.namers.source_hashed`` Filename with source hashed, size, then options hashed like ``1xedFtqllFo9_100x100_QHCa6G1l.jpg``. To write a custom namer, always catch all other keyword arguments arguments (with \\*\\*kwargs). You have access to the following arguments: ``thumbnailer``, ``source_filename``, ``thumbnail_extension`` (does *not* include the ``'.'``), ``thumbnail_options``, ``prepared_options``. The ``thumbnail_options`` are a copy of the options dictionary used to build the thumbnail, ``prepared_options`` is a list of options prepared as text, and excluding options that shouldn't be included in the filename. """ THUMBNAIL_PROCESSORS = ( 'easy_thumbnails.processors.colorspace', 'easy_thumbnails.processors.autocrop', 'easy_thumbnails.processors.scale_and_crop', 'easy_thumbnails.processors.filters', 'easy_thumbnails.processors.background', ) """ Defaults to:: THUMBNAIL_PROCESSORS = ( 'easy_thumbnails.processors.colorspace', 'easy_thumbnails.processors.autocrop', 'easy_thumbnails.processors.scale_and_crop', 'easy_thumbnails.processors.filters', 'easy_thumbnails.processors.background', ) The :doc:`processors` through which the source image is run when you create a thumbnail. The order of the processors is the order in which they are sequentially called to process the image. """ THUMBNAIL_SOURCE_GENERATORS = ( 'easy_thumbnails.source_generators.pil_image', 'easy_thumbnails.source_generators.vil_image', ) """ The :doc:`source_generators` through which the base image is created from the source file. The order of the processors is the order in which they are sequentially tried. """ THUMBNAIL_CHECK_CACHE_MISS = False """ If this boolean setting is set to ``True``, and a thumbnail cannot be found in the database tables, we ask the storage if it has the thumbnail. If it does we add the row in the database, and we don't need to generate the thumbnail. Switch this to True if your easy_thumbnails_thumbnail table has been wiped but your storage still has the thumbnail files. """ THUMBNAIL_ALIASES = None """ A dictionary of predefined alias options for different targets. See the :ref:`usage documentation <thumbnail-aliases>` for details. """ THUMBNAIL_DEFAULT_OPTIONS = None """ Set this to a dictionary of options to provide as the default for all thumbnail calls. For example, to make all images greyscale:: THUMBNAIL_DEFAULT_OPTIONS = {'bw': True} """ THUMBNAIL_CACHE_DIMENSIONS = False """ Save thumbnail dimensions to the database. When using remote storage backends it can be a slow process to get image dimensions for a thumbnailed file. This option will store them in the database to be recalled quickly when required. Note: the old method still works as a fall back. """ THUMBNAIL_WIDGET_OPTIONS = {'size': (80, 80)} """ Default options for the :class:`easy_thumbnails.widgets.ImageClearableFileInput` widget. """ THUMBNAIL_IMAGE_SAVE_OPTIONS = { 'JPEG': { 'quality': 85, }, 'WEBP': { 'quality': 85, }, } """ Allows customising Image.save parameters based on format, for example: `{'WEBP': {'method': 6}}` """ settings = Settings()
Settings
python
tornadoweb__tornado
tornado/test/circlerefs_test.py
{ "start": 3140, "end": 7321 }
class ____(unittest.TestCase): def test_known_leak(self): # Construct a known leak scenario to make sure the test harness works. class C: def __init__(self, name): self.name = name self.a: typing.Optional[C] = None self.b: typing.Optional[C] = None self.c: typing.Optional[C] = None def __repr__(self): return f"name={self.name}" with self.assertRaises(AssertionError) as cm: with assert_no_cycle_garbage(): # a and b form a reference cycle. c is not part of the cycle, # but it cannot be GC'd while a and b are alive. a = C("a") b = C("b") c = C("c") a.b = b a.c = c b.a = a b.c = c del a, b self.assertIn("Circular", str(cm.exception)) # Leading spaces ensure we only catch these at the beginning of a line, meaning they are a # cycle participant and not simply the contents of a locals dict or similar container. (This # depends on the formatting above which isn't ideal but this test evolved from a # command-line script) Note that the behavior here changed in python 3.11; in newer pythons # locals are handled a bit differently and the test passes without the spaces. self.assertIn(" name=a", str(cm.exception)) self.assertIn(" name=b", str(cm.exception)) self.assertNotIn(" name=c", str(cm.exception)) async def run_handler(self, handler_class): app = web.Application( [ (r"/", handler_class), ] ) socket, port = tornado.testing.bind_unused_port() server = tornado.httpserver.HTTPServer(app) server.add_socket(socket) client = httpclient.AsyncHTTPClient() with assert_no_cycle_garbage(): # Only the fetch (and the corresponding server-side handler) # are being tested for cycles. In particular, the Application # object has internal cycles (as of this writing) which we don't # care to fix since in real world usage the Application object # is effectively a global singleton. await client.fetch(f"http://127.0.0.1:{port}/") client.close() server.stop() socket.close() def test_sync_handler(self): class Handler(web.RequestHandler): def get(self): self.write("ok\n") asyncio.run(self.run_handler(Handler)) def test_finish_exception_handler(self): class Handler(web.RequestHandler): def get(self): raise web.Finish("ok\n") asyncio.run(self.run_handler(Handler)) def test_coro_handler(self): class Handler(web.RequestHandler): @gen.coroutine def get(self): yield asyncio.sleep(0.01) self.write("ok\n") asyncio.run(self.run_handler(Handler)) def test_async_handler(self): class Handler(web.RequestHandler): async def get(self): await asyncio.sleep(0.01) self.write("ok\n") asyncio.run(self.run_handler(Handler)) def test_run_on_executor(self): # From https://github.com/tornadoweb/tornado/issues/2620 # # When this test was introduced it found cycles in IOLoop.add_future # and tornado.concurrent.chain_future. import concurrent.futures with concurrent.futures.ThreadPoolExecutor(1) as thread_pool: class Factory: executor = thread_pool @tornado.concurrent.run_on_executor def run(self): return None factory = Factory() async def main(): # The cycle is not reported on the first call. It's not clear why. for i in range(2): await factory.run() with assert_no_cycle_garbage(): asyncio.run(main())
CircleRefsTest
python
ray-project__ray
python/ray/train/v2/_internal/execution/controller/state.py
{ "start": 3617, "end": 3845 }
class ____(TrainControllerState): def __init__(self, scaling_decision: ScalingDecision): super().__init__(state_type=TrainControllerStateType.SCHEDULING) self.scaling_decision = scaling_decision
SchedulingState
python
spack__spack
lib/spack/spack/vendor/jinja2/nodes.py
{ "start": 33705, "end": 34073 }
class ____(Stmt): """Modifies the eval context. For each option that should be modified, a :class:`Keyword` has to be added to the :attr:`options` list. Example to change the `autoescape` setting:: EvalContextModifier(options=[Keyword('autoescape', Const(True))]) """ fields = ("options",) options: t.List[Keyword]
EvalContextModifier
python
tensorflow__tensorflow
tensorflow/python/distribute/multi_worker_test_base_test.py
{ "start": 986, "end": 3185 }
class ____(test.TestCase): def setUp(self): super(MultiProcessClusterTest, self).setUp() self._cluster = multi_worker_test_base.create_multi_process_cluster( num_workers=2, num_ps=1, has_chief=True, rpc_layer="grpc") remote.connect_to_cluster( self._cluster.cluster_resolver.cluster_spec(), protocol="grpc") context.ensure_initialized() def testClusterIsAlive(self): self.assertTrue(context.check_alive("/job:worker/replica:0/task:0")) self.assertTrue(context.check_alive("/job:worker/replica:0/task:1")) self.assertTrue(context.check_alive("/job:ps/replica:0/task:0")) self.assertTrue(context.check_alive("/job:chief/replica:0/task:0")) def testKillAndStartTask(self): self.assertTrue(context.check_alive("/job:worker/replica:0/task:0")) # It is not allowed to start a task before killing it. with self.assertRaises(ValueError): self._cluster.start_task("worker", 0) self._cluster.kill_task("worker", 0) self.assertFalse(context.check_alive("/job:worker/replica:0/task:0")) # The task is already killed. with self.assertRaises(ValueError): self._cluster.kill_task("worker", 0) self._cluster.start_task("worker", 0) # Without a call to update_server_def, the next check_alive will return # False. Alternatively sleeping for 2 seconds here also works. context.context().update_server_def(context.get_server_def()) self.assertTrue(context.check_alive("/job:worker/replica:0/task:0")) def testStop(self): self._cluster.stop() self.assertFalse(context.check_alive("/job:worker/replica:0/task:0")) self.assertFalse(context.check_alive("/job:worker/replica:0/task:1")) self.assertFalse(context.check_alive("/job:ps/replica:0/task:0")) self.assertFalse(context.check_alive("/job:chief/replica:0/task:0")) def testClusterResolverProperty(self): cluster_spec = self._cluster.cluster_resolver.cluster_spec().as_dict() self.assertEqual(len(cluster_spec["worker"]), 2) self.assertEqual(len(cluster_spec["ps"]), 1) self.assertEqual(len(cluster_spec["chief"]), 1) if __name__ == "__main__": multi_process_runner.test_main()
MultiProcessClusterTest
python
aio-libs__aiohttp
aiohttp/web_exceptions.py
{ "start": 6162, "end": 6223 }
class ____(HTTPMove): status_code = 300
HTTPMultipleChoices
python
huggingface__transformers
src/transformers/models/prophetnet/configuration_prophetnet.py
{ "start": 865, "end": 8920 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`ProphetNetModel`]. It is used to instantiate a ProphetNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ProphetNet [microsoft/prophetnet-large-uncased](https://huggingface.co/microsoft/prophetnet-large-uncased) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the ProphetNET model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ProphetNetModel`]. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. num_encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. num_encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the `intermediate` (often named feed-forward) layer in decoder. num_decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. num_decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. add_cross_attention (`bool`, *optional*, defaults to `True`): Whether cross-attention layers should be added to the model. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether this is an encoder/decoder model. pad_token_id (`int`, *optional*, defaults to 1) Padding token id. bos_token_id (`int`, *optional*, defaults to 0) Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2) End of stream token id. ngram (`int`, *optional*, defaults to 2) Number of future tokens to predict. Set to 1 to be same as traditional Language model to predict next first token. num_buckets (`int`, *optional*, defaults to 32) The number of buckets to use for each attention layer. This is for relative position calculation. See the [T5 paper](see https://huggingface.co/papers/1910.10683) for more details. relative_max_distance (`int`, *optional*, defaults to 128) Relative distances greater than this number will be put into the last same bucket. This is for relative position calculation. See the [T5 paper](see https://huggingface.co/papers/1910.10683) for more details. disable_ngram_loss (`bool`, *optional*, defaults to `False`): Whether be trained predicting only the next first token. eps (`float`, *optional*, defaults to 0.0): Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = "prophetnet" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_attention_heads": "num_encoder_attention_heads", } def __init__( self, activation_dropout: Optional[float] = 0.1, activation_function: Optional[Union[str, Callable]] = "gelu", vocab_size: Optional[int] = 30522, hidden_size: Optional[int] = 1024, encoder_ffn_dim: Optional[int] = 4096, num_encoder_layers: Optional[int] = 12, num_encoder_attention_heads: Optional[int] = 16, decoder_ffn_dim: Optional[int] = 4096, num_decoder_layers: Optional[int] = 12, num_decoder_attention_heads: Optional[int] = 16, attention_dropout: Optional[float] = 0.1, dropout: Optional[float] = 0.1, max_position_embeddings: Optional[int] = 512, init_std: Optional[float] = 0.02, is_encoder_decoder: Optional[bool] = True, add_cross_attention: Optional[bool] = True, decoder_start_token_id: Optional[int] = 0, ngram: Optional[int] = 2, num_buckets: Optional[int] = 32, relative_max_distance: Optional[int] = 128, disable_ngram_loss: Optional[bool] = False, eps: Optional[float] = 0.0, use_cache: Optional[bool] = True, pad_token_id: Optional[int] = 0, bos_token_id: Optional[int] = 1, eos_token_id: Optional[int] = 2, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.encoder_ffn_dim = encoder_ffn_dim self.num_encoder_layers = num_encoder_layers self.num_encoder_attention_heads = num_encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.num_decoder_layers = num_decoder_layers self.num_decoder_attention_heads = num_decoder_attention_heads self.max_position_embeddings = max_position_embeddings self.init_std = init_std # Normal(0, this parameter) self.activation_function = activation_function # parameters for prophetnet self.ngram = ngram self.num_buckets = num_buckets self.relative_max_distance = relative_max_distance self.disable_ngram_loss = disable_ngram_loss self.eps = eps # 3 Types of Dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.dropout = dropout self.use_cache = use_cache super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, add_cross_attention=add_cross_attention, decoder_start_token_id=decoder_start_token_id, **kwargs, ) @property def num_hidden_layers(self) -> int: return self.num_encoder_layers @num_hidden_layers.setter def num_hidden_layers(self, value): raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`." ) __all__ = ["ProphetNetConfig"]
ProphetNetConfig
python
fluentpython__example-code-2e
10-dp-1class-func/classic_strategy.py
{ "start": 1468, "end": 2043 }
class ____(NamedTuple): # the Context customer: Customer cart: Sequence[LineItem] promotion: Optional['Promotion'] = None def total(self) -> Decimal: totals = (item.total() for item in self.cart) return sum(totals, start=Decimal(0)) def due(self) -> Decimal: if self.promotion is None: discount = Decimal(0) else: discount = self.promotion.discount(self) return self.total() - discount def __repr__(self): return f'<Order total: {self.total():.2f} due: {self.due():.2f}>'
Order
python
scipy__scipy
scipy/io/matlab/_mio4.py
{ "start": 10200, "end": 15037 }
class ____(MatFileReader): ''' Reader for Mat4 files ''' @docfiller def __init__(self, mat_stream, *args, **kwargs): ''' Initialize matlab 4 file reader %(matstream_arg)s %(load_args)s ''' super().__init__(mat_stream, *args, **kwargs) self._matrix_reader = None def guess_byte_order(self): self.mat_stream.seek(0) mopt = read_dtype(self.mat_stream, np.dtype('i4')) self.mat_stream.seek(0) if mopt == 0: return '<' if mopt < 0 or mopt > 5000: # Number must have been byteswapped return SYS_LITTLE_ENDIAN and '>' or '<' # Not byteswapped return SYS_LITTLE_ENDIAN and '<' or '>' def initialize_read(self): ''' Run when beginning read of variables Sets up readers from parameters in `self` ''' self.dtypes = convert_dtypes(mdtypes_template, self.byte_order) self._matrix_reader = VarReader4(self) def read_var_header(self): ''' Read and return header, next position Parameters ---------- None Returns ------- header : object object that can be passed to self.read_var_array, and that has attributes ``name`` and ``is_global`` next_position : int position in stream of next variable ''' hdr = self._matrix_reader.read_header() # Fast product for large (>2GB) arrays. remaining_bytes = reduce(mul, hdr.dims, np.int64(hdr.dtype.itemsize)) if hdr.is_complex and not hdr.mclass == mxSPARSE_CLASS: remaining_bytes *= 2 next_position = self.mat_stream.tell() + remaining_bytes return hdr, next_position def read_var_array(self, header, process=True): ''' Read array, given `header` Parameters ---------- header : header object object with fields defining variable header process : {True, False}, optional If True, apply recursive post-processing during loading of array. Returns ------- arr : array array with post-processing applied or not according to `process`. ''' return self._matrix_reader.array_from_header(header, process) def get_variables(self, variable_names=None): ''' get variables from stream as dictionary Parameters ---------- variable_names : None or str or sequence of str, optional variable name, or sequence of variable names to get from Mat file / file stream. If None, then get all variables in file. ''' if isinstance(variable_names, str): variable_names = [variable_names] elif variable_names is not None: variable_names = list(variable_names) self.mat_stream.seek(0) # set up variable reader self.initialize_read() mdict = {} while not self.end_of_stream(): hdr, next_position = self.read_var_header() name = 'None' if hdr.name is None else hdr.name.decode('latin1') if variable_names is not None and name not in variable_names: self.mat_stream.seek(next_position) continue mdict[name] = self.read_var_array(hdr) self.mat_stream.seek(next_position) if variable_names is not None: variable_names.remove(name) if len(variable_names) == 0: break return mdict def list_variables(self): ''' list variables from stream ''' self.mat_stream.seek(0) # set up variable reader self.initialize_read() vars = [] while not self.end_of_stream(): hdr, next_position = self.read_var_header() name = 'None' if hdr.name is None else hdr.name.decode('latin1') shape = self._matrix_reader.shape_from_header(hdr) info = mclass_info.get(hdr.mclass, 'unknown') vars.append((name, shape, info)) self.mat_stream.seek(next_position) return vars def arr_to_2d(arr, oned_as='row'): ''' Make ``arr`` exactly two dimensional If `arr` has more than 2 dimensions, raise a ValueError Parameters ---------- arr : array oned_as : {'row', 'column'}, optional Whether to reshape 1-D vectors as row vectors or column vectors. See documentation for ``matdims`` for more detail Returns ------- arr2d : array 2-D version of the array ''' dims = matdims(arr, oned_as) if len(dims) > 2: raise ValueError('Matlab 4 files cannot save arrays with more than ' '2 dimensions') return arr.reshape(dims)
MatFile4Reader
python
fluentpython__example-code
17-futures/countries/flags3_asyncio.py
{ "start": 361, "end": 3511 }
class ____(Exception): def __init__(self, country_code): self.country_code = country_code # BEGIN FLAGS3_ASYNCIO @asyncio.coroutine def http_get(url): res = yield from aiohttp.request('GET', url) if res.status == 200: ctype = res.headers.get('Content-type', '').lower() if 'json' in ctype or url.endswith('json'): data = yield from res.json() # <1> else: data = yield from res.read() # <2> return data elif res.status == 404: raise web.HTTPNotFound() else: raise aiohttp.errors.HttpProcessingError( code=res.status, message=res.reason, headers=res.headers) @asyncio.coroutine def get_country(base_url, cc): url = '{}/{cc}/metadata.json'.format(base_url, cc=cc.lower()) metadata = yield from http_get(url) # <3> return metadata['country'] @asyncio.coroutine def get_flag(base_url, cc): url = '{}/{cc}/{cc}.gif'.format(base_url, cc=cc.lower()) return (yield from http_get(url)) # <4> @asyncio.coroutine def download_one(cc, base_url, semaphore, verbose): try: with (yield from semaphore): # <5> image = yield from get_flag(base_url, cc) with (yield from semaphore): country = yield from get_country(base_url, cc) except web.HTTPNotFound: status = HTTPStatus.not_found msg = 'not found' except Exception as exc: raise FetchError(cc) from exc else: country = country.replace(' ', '_') filename = '{}-{}.gif'.format(country, cc) loop = asyncio.get_event_loop() loop.run_in_executor(None, save_flag, image, filename) status = HTTPStatus.ok msg = 'OK' if verbose and msg: print(cc, msg) return Result(status, cc) # END FLAGS3_ASYNCIO @asyncio.coroutine def downloader_coro(cc_list, base_url, verbose, concur_req): counter = collections.Counter() semaphore = asyncio.Semaphore(concur_req) to_do = [download_one(cc, base_url, semaphore, verbose) for cc in sorted(cc_list)] to_do_iter = asyncio.as_completed(to_do) if not verbose: to_do_iter = tqdm.tqdm(to_do_iter, total=len(cc_list)) for future in to_do_iter: try: res = yield from future except FetchError as exc: country_code = exc.country_code try: error_msg = exc.__cause__.args[0] except IndexError: error_msg = exc.__cause__.__class__.__name__ if verbose and error_msg: msg = '*** Error for {}: {}' print(msg.format(country_code, error_msg)) status = HTTPStatus.error else: status = res.status counter[status] += 1 return counter def download_many(cc_list, base_url, verbose, concur_req): loop = asyncio.get_event_loop() coro = downloader_coro(cc_list, base_url, verbose, concur_req) counts = loop.run_until_complete(coro) loop.close() return counts if __name__ == '__main__': main(download_many, DEFAULT_CONCUR_REQ, MAX_CONCUR_REQ)
FetchError
python
huggingface__transformers
src/transformers/models/arcee/modeling_arcee.py
{ "start": 21917, "end": 22206 }
class ____(GenericForTokenClassification, ArceePreTrainedModel): pass __all__ = [ "ArceeForCausalLM", "ArceeForQuestionAnswering", "ArceeForSequenceClassification", "ArceeForTokenClassification", "ArceeModel", "ArceePreTrainedModel", ]
ArceeForTokenClassification
python
kamyu104__LeetCode-Solutions
Python/accounts-merge.py
{ "start": 158, "end": 659 }
class ____(object): def __init__(self): self.set = [] def get_id(self): self.set.append(len(self.set)) return len(self.set)-1 def find_set(self, x): if self.set[x] != x: self.set[x] = self.find_set(self.set[x]) # path compression. return self.set[x] def union_set(self, x, y): x_root, y_root = map(self.find_set, (x, y)) if x_root != y_root: self.set[min(x_root, y_root)] = max(x_root, y_root)
UnionFind
python
etianen__django-reversion
tests/test_app/tests/test_models.py
{ "start": 2379, "end": 3085 }
class ____(TestModelMixin, TestBase): databases = {"default", "mysql", "postgres"} def testGetForObjectDb(self): with reversion.create_revision(using="postgres"): obj = TestModel.objects.create() self.assertEqual(Version.objects.get_for_object(obj).count(), 0) self.assertEqual(Version.objects.using("postgres").get_for_object(obj).count(), 1) def testGetForObjectDbMySql(self): with reversion.create_revision(using="mysql"): obj = TestModel.objects.create() self.assertEqual(Version.objects.get_for_object(obj).count(), 0) self.assertEqual(Version.objects.using("mysql").get_for_object(obj).count(), 1)
GetForObjectDbTest
python
tensorflow__tensorflow
tensorflow/python/checkpoint/checkpoint_test.py
{ "start": 2468, "end": 2665 }
class ____(autotrackable.AutoTrackable): def __init__(self): super().__init__() self.a_variable = trackable_utils.add_variable( self, name="a_variable", shape=[])
NonLayerTrackable
python
mwaskom__seaborn
seaborn/external/version.py
{ "start": 1687, "end": 3240 }
class ____: def __repr__(self) -> str: return "-Infinity" def __hash__(self) -> int: return hash(repr(self)) def __lt__(self, other: object) -> bool: return True def __le__(self, other: object) -> bool: return True def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) def __ne__(self, other: object) -> bool: return not isinstance(other, self.__class__) def __gt__(self, other: object) -> bool: return False def __ge__(self, other: object) -> bool: return False def __neg__(self: object) -> InfinityType: return Infinity NegativeInfinity = NegativeInfinityType() # Vendored from https://github.com/pypa/packaging/blob/main/packaging/version.py InfiniteTypes = Union[InfinityType, NegativeInfinityType] PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] SubLocalType = Union[InfiniteTypes, int, str] LocalType = Union[ NegativeInfinityType, Tuple[ Union[ SubLocalType, Tuple[SubLocalType, str], Tuple[NegativeInfinityType, SubLocalType], ], ..., ], ] CmpKey = Tuple[ int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType ] LegacyCmpKey = Tuple[int, Tuple[str, ...]] VersionComparisonMethod = Callable[ [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool ] _Version = collections.namedtuple( "_Version", ["epoch", "release", "dev", "pre", "post", "local"] )
NegativeInfinityType
python
jazzband__django-simple-history
simple_history/tests/models.py
{ "start": 15099, "end": 15186 }
class ____(models.Model): code = models.CharField(max_length=15, unique=True)
Country
python
catalyst-team__catalyst
catalyst/metrics/_auc.py
{ "start": 527, "end": 7276 }
class ____(ICallbackLoaderMetric): """AUC metric, Args: compute_on_call: if True, computes and returns metric value during metric call compute_per_class_metrics: boolean flag to compute per-class metrics (default: SETTINGS.compute_per_class_metrics or False). prefix: metric prefix suffix: metric suffix .. warning:: This metric is under API improvement. Examples: .. code-block:: python import torch from catalyst import metrics scores = torch.tensor([ [0.9, 0.1], [0.1, 0.9], ]) targets = torch.tensor([ [1, 0], [0, 1], ]) metric = metrics.AUCMetric() # for efficient statistics storage metric.reset(num_batches=1, num_samples=len(scores)) metric.update(scores, targets) metric.compute() # ( # tensor([1., 1.]) # per class # 1.0, # micro # 1.0, # macro # 1.0 # weighted # ) metric.compute_key_value() # { # 'auc': 1.0, # 'auc/_micro': 1.0, # 'auc/_macro': 1.0, # 'auc/_weighted': 1.0 # 'auc/class_00': 1.0, # 'auc/class_01': 1.0, # } metric.reset(num_batches=1, num_samples=len(scores)) metric(scores, targets) # ( # tensor([1., 1.]) # per class # 1.0, # micro # 1.0, # macro # 1.0 # weighted # ) .. code-block:: python import torch from torch.utils.data import DataLoader, TensorDataset from catalyst import dl # sample data num_samples, num_features, num_classes = int(1e4), int(1e1), 4 X = torch.rand(num_samples, num_features) y = (torch.rand(num_samples,) * num_classes).to(torch.int64) # pytorch loaders dataset = TensorDataset(X, y) loader = DataLoader(dataset, batch_size=32, num_workers=1) loaders = {"train": loader, "valid": loader} # model, criterion, optimizer, scheduler model = torch.nn.Linear(num_features, num_classes) criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters()) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2]) # model training runner = dl.SupervisedRunner( input_key="features", output_key="logits", target_key="targets", loss_key="loss", ) runner.train( model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler, loaders=loaders, logdir="./logdir", num_epochs=3, valid_loader="valid", valid_metric="accuracy03", minimize_valid_metric=False, verbose=True, callbacks=[ dl.AccuracyCallback( input_key="logits", target_key="targets", num_classes=num_classes ), dl.PrecisionRecallF1SupportCallback( input_key="logits", target_key="targets", num_classes=num_classes ), dl.AUCCallback(input_key="logits", target_key="targets"), ], ) .. note:: Please follow the `minimal examples`_ sections for more use cases. .. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505 """ def __init__( self, compute_on_call: bool = True, compute_per_class_metrics: bool = SETTINGS.compute_per_class_metrics, prefix: str = None, suffix: str = None, ): """Init.""" super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix) self.metric_name = f"{self.prefix}auc{self.suffix}" self._ddp_backend = None self.compute_per_class_metrics = compute_per_class_metrics self.scores = [] self.targets = [] self.reset(0, 0) def reset(self, num_batches, num_samples) -> None: """Resets all fields""" self._ddp_backend = get_backend() self.scores = [] self.targets = [] def update(self, scores: torch.Tensor, targets: torch.Tensor) -> None: """Updates metric value with statistics for new data. Args: scores: tensor with scores targets: tensor with targets """ self.scores.append(scores.cpu().detach()) self.targets.append(targets.cpu().detach()) def compute(self) -> Tuple[torch.Tensor, float, float, float]: """Computes the AUC metric based on saved statistics.""" targets = torch.cat(self.targets) scores = torch.cat(self.scores) # ddp hotfix, could be done better # but metric must handle DDP on it's own if self._ddp_backend == "xla": # if you have "RuntimeError: Aborted: Session XXX is not found" here # please, ask Google for a more powerful TPU setup ;) device = get_device() scores = xm.all_gather(scores.to(device)).cpu().detach() targets = xm.all_gather(targets.to(device)).cpu().detach() elif self._ddp_backend == "ddp": scores = torch.cat(all_gather(scores)) targets = torch.cat(all_gather(targets)) scores, targets, _, _ = process_multilabel_components( outputs=scores, targets=targets ) per_class = auc(scores=scores, targets=targets) micro = binary_auc(scores=scores.view(-1), targets=targets.view(-1))[0] macro = per_class.mean().item() weights = targets.sum(axis=0) / len(targets) weighted = (per_class * weights).sum().item() if self.compute_per_class_metrics: return per_class, micro, macro, weighted else: return [], micro, macro, weighted def compute_key_value(self) -> Dict[str, float]: """Computes the AUC metric and returns key-value results.""" per_class_auc, micro_auc, macro_auc, weighted_auc = self.compute() output = { f"{self.metric_name}/class_{i:02d}": value.item() for i, value in enumerate(per_class_auc) } output[f"{self.metric_name}/_micro"] = micro_auc output[self.metric_name] = macro_auc output[f"{self.metric_name}/_macro"] = macro_auc output[f"{self.metric_name}/_weighted"] = weighted_auc return output __all__ = ["AUCMetric"]
AUCMetric
python
bokeh__bokeh
src/bokeh/models/grids.py
{ "start": 1825, "end": 4520 }
class ____(GuideRenderer): ''' Display horizontal or vertical grid lines at locations given by a supplied ``Ticker``. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) dimension = Int(0, help=""" Which dimension the Axis Grid lines will intersect. The x-axis is dimension 0 (vertical Grid lines) and the y-axis is dimension 1 (horizontal Grid lines). """) bounds = Either(Auto, Tuple(Float, Float), help=""" Bounds for the rendered grid lines. By default, a grid will look for a corresponding axis to ask for bounds. If one cannot be found, the grid will span the entire visible range. """) cross_bounds = Either(Auto, Tuple(Float, Float), help=""" Bounds for the rendered grid lines in the orthogonal direction. By default, a grid will span the entire visible range. """) axis = Nullable(Instance(Axis), help=""" An Axis to delegate ticking to. If the ticker property is None, then the Grid will use the ticker on the specified axis for computing where to draw grid lines. Otherwise, it ticker is not None, it will take precedence over any Axis. """) ticker = Nullable(Instance(Ticker), help=""" A Ticker to use for computing locations for the Grid lines. """).accepts(Seq(Float), lambda ticks: FixedTicker(ticks=ticks)) grid_props = Include(ScalarLineProps, prefix="grid", help=""" The {prop} of the Grid lines. """) grid_line_color = Override(default='#e5e5e5') minor_grid_props = Include(ScalarLineProps, prefix="minor_grid", help=""" The {prop} of the minor Grid lines. """) minor_grid_line_color = Override(default=None) band_fill_props = Include(ScalarFillProps, prefix="band", help=""" The {prop} of alternating bands between Grid lines. """) band_fill_alpha = Override(default=0) band_fill_color = Override(default=None) band_hatch_props = Include(ScalarHatchProps, prefix="band", help=""" The {prop} of alternating bands between Grid lines. """) level = Override(default="underlay") #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
Grid
python
astropy__astropy
astropy/extern/configobj/validate.py
{ "start": 13328, "end": 13718 }
class ____(VdtValueError): """The value supplied was of the correct type, but was too big.""" def __init__(self, value): """ >>> raise VdtValueTooBigError('1') Traceback (most recent call last): VdtValueTooBigError: the value "1" is too big. """ ValidateError.__init__(self, 'the value "%s" is too big.' % (value,))
VdtValueTooBigError
python
ray-project__ray
doc/source/_ext/queryparamrefs.py
{ "start": 867, "end": 4145 }
class ____(SphinxDirective): """Sphinx directive to insert a reference with query parameters.""" required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True has_content = True option_spec = { "parameters": directives.unchanged_required, "classes": directives.class_option, "ref-type": ( lambda choice: directives.choice(choice, ["any", "ref", "doc", "myst"]) ), } def run(self): ref_type = self.options.get("ref-type", "any") content = self.content.data reftarget = directives.uri(self.arguments[0]) return [ URLQueryParamRefNode( { "docname": self.env.docname, "parameters": self.options.get("parameters", None), "classes": self.options.get("classes", []), "reftarget": reftarget, "refdocname": self.env.docname, "refdomain": "std" if ref_type in {"ref", "doc"} else "", "reftype": ref_type, "refexplicit": content if content else reftarget, "refwarn": True, } ) ] def on_doctree_resolved(app, doctree, docname): """Replace URLQueryParamRefNode instances with real references. Any text that lives inside a URLQueryParamRefNode is parsed as usual. Args: app: Sphinx application doctree: Doctree which has just been resolved docname: Name of the document containing the reference nodes """ parser = Parser() for node in doctree.traverse(URLQueryParamRefNode): tmp_node = utils.new_document( "Content nested under URLQueryParamRefNode", frontend.OptionParser(components=[Parser]).get_default_values(), ) text = "\n".join(node.rawsource["refexplicit"]) # Parse all child RST as usual, then append any parsed nodes to the # reference node. parser.parse(text, tmp_node) ref_node = nodes.reference( rawsource=text, text="", ) for child in tmp_node.children: ref_node.append(child) # Pass all URLQueryParamRefNode attributes to ref node; # possibly not necessary for key, value in node.rawsource.items(): ref_node[key] = value # Need to update refuri of ref node to include URL query parameters ref_node["refuri"] = ( app.builder.get_relative_uri( docname, node.rawsource["reftarget"], ) + node.rawsource["parameters"] ) # Need to wrap the node in a paragraph for Sphinx to build wrapper = WrapperNode() wrapper["class"] = "query-param-ref-wrapper" wrapper += ref_node node.replace_self([wrapper]) def setup(app): app.add_directive("query-param-ref", URLQueryParamRefDirective) app.connect("doctree-resolved", on_doctree_resolved) app.add_node(WrapperNode, html=(WrapperNode.visit, WrapperNode.depart)) return { "version": "0.1", "parallel_read_safe": True, "parallel_write_safe": True, }
URLQueryParamRefDirective
python
huggingface__transformers
src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py
{ "start": 121781, "end": 122362 }
class ____(nn.Module): def __init__( self, in_channels, out_channels, kernel_size, dilation, ): super().__init__() self.conv = nn.Conv1d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, dilation=dilation, padding="same", padding_mode="reflect", ) self.activation = nn.ReLU() def forward(self, hidden_states: torch.Tensor): return self.activation(self.conv(hidden_states))
TimeDelayNetBlock
python
kubernetes-client__python
kubernetes/client/models/v1_resource_quota_spec.py
{ "start": 383, "end": 5460 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'hard': 'dict(str, str)', 'scope_selector': 'V1ScopeSelector', 'scopes': 'list[str]' } attribute_map = { 'hard': 'hard', 'scope_selector': 'scopeSelector', 'scopes': 'scopes' } def __init__(self, hard=None, scope_selector=None, scopes=None, local_vars_configuration=None): # noqa: E501 """V1ResourceQuotaSpec - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._hard = None self._scope_selector = None self._scopes = None self.discriminator = None if hard is not None: self.hard = hard if scope_selector is not None: self.scope_selector = scope_selector if scopes is not None: self.scopes = scopes @property def hard(self): """Gets the hard of this V1ResourceQuotaSpec. # noqa: E501 hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ # noqa: E501 :return: The hard of this V1ResourceQuotaSpec. # noqa: E501 :rtype: dict(str, str) """ return self._hard @hard.setter def hard(self, hard): """Sets the hard of this V1ResourceQuotaSpec. hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ # noqa: E501 :param hard: The hard of this V1ResourceQuotaSpec. # noqa: E501 :type: dict(str, str) """ self._hard = hard @property def scope_selector(self): """Gets the scope_selector of this V1ResourceQuotaSpec. # noqa: E501 :return: The scope_selector of this V1ResourceQuotaSpec. # noqa: E501 :rtype: V1ScopeSelector """ return self._scope_selector @scope_selector.setter def scope_selector(self, scope_selector): """Sets the scope_selector of this V1ResourceQuotaSpec. :param scope_selector: The scope_selector of this V1ResourceQuotaSpec. # noqa: E501 :type: V1ScopeSelector """ self._scope_selector = scope_selector @property def scopes(self): """Gets the scopes of this V1ResourceQuotaSpec. # noqa: E501 A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects. # noqa: E501 :return: The scopes of this V1ResourceQuotaSpec. # noqa: E501 :rtype: list[str] """ return self._scopes @scopes.setter def scopes(self, scopes): """Sets the scopes of this V1ResourceQuotaSpec. A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects. # noqa: E501 :param scopes: The scopes of this V1ResourceQuotaSpec. # noqa: E501 :type: list[str] """ self._scopes = scopes def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1ResourceQuotaSpec): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1ResourceQuotaSpec): return True return self.to_dict() != other.to_dict()
V1ResourceQuotaSpec
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/simple_web/base.py
{ "start": 271, "end": 3071 }
class ____(BasePydanticReader): """ Simple web page reader. Reads pages from the web. Args: html_to_text (bool): Whether to convert HTML to text. Requires `html2text` package. metadata_fn (Optional[Callable[[str], Dict]]): A function that takes in a URL and returns a dictionary of metadata. Default is None. """ is_remote: bool = True html_to_text: bool _metadata_fn: Optional[Callable[[str], Dict]] = PrivateAttr() _timeout: Optional[int] = PrivateAttr() _fail_on_error: bool = PrivateAttr() def __init__( self, html_to_text: bool = False, metadata_fn: Optional[Callable[[str], Dict]] = None, timeout: Optional[int] = 60, fail_on_error: bool = False, ) -> None: """Initialize with parameters.""" try: import html2text # noqa except ImportError: raise ImportError( "`html2text` package not found, please run `pip install html2text`" ) super().__init__(html_to_text=html_to_text) self._metadata_fn = metadata_fn self._timeout = timeout self._fail_on_error = fail_on_error @classmethod def class_name(cls) -> str: return "SimpleWebPageReader" def load_data(self, urls: List[str]) -> List[Document]: """ Load data from the input directory. Args: urls (List[str]): List of URLs to scrape. Returns: List[Document]: List of documents. """ if not isinstance(urls, list): raise ValueError("urls must be a list of strings.") documents = [] for url in urls: try: response = requests.get(url, headers=None, timeout=self._timeout) except Exception: if self._fail_on_error: raise continue response_text = response.text if response.status_code != 200 and self._fail_on_error: raise ValueError( f"Error fetching page from {url}. server returned status:" f" {response.status_code} and response {response_text}" ) if self.html_to_text: import html2text response_text = html2text.html2text(response_text) metadata: Dict = {"url": url} if self._metadata_fn is not None: metadata = self._metadata_fn(url) if "url" not in metadata: metadata["url"] = url documents.append( Document(text=response_text, id_=str(uuid.uuid4()), metadata=metadata) ) return documents
SimpleWebPageReader
python
pytorch__pytorch
torch/_inductor/utils.py
{ "start": 55056, "end": 77375 }
class ____(DeferredLineBase): """At end of codegen return `line if `pred_fn() else None`""" def __init__(self, pred_fn: Callable[[], bool], line: str): super().__init__(line) self.pred_fn = pred_fn def __call__(self) -> str | None: return self.line if self.pred_fn() else None def _new_line(self, line: str) -> DelayMaybeLine: return DelayMaybeLine(self.pred_fn, line) @functools.cache def is_big_gpu(index_or_device: Union[int, torch.device] = 0) -> bool: if isinstance(index_or_device, torch.device): device = index_or_device else: device = torch.device(get_gpu_type(), index_or_device) prop = DeviceProperties.create(device) # SM logic is not relevant to ROCm gpus # Arbitrarily skipping the older models if torch.version.hip: assert prop.major is not None if prop.major < 9 or prop.major == 10: log.warning("GPU arch does not support max_autotune_gemm mode usage") return False return True min_sms = 16 if device.type == "xpu" else 68 # 3080 avail_sms = prop.multi_processor_count if avail_sms < min_sms: log.warning( "Not enough SMs to use max_autotune_gemm mode", extra={"min_sms": min_sms, "avail_sms": avail_sms}, ) return False return True @functools.lru_cache def get_max_num_sms() -> int: if torch.xpu.is_available(): return torch.xpu.get_device_properties().gpu_subslice_count return torch.cuda.get_device_properties("cuda").multi_processor_count @functools.lru_cache def using_b200() -> bool: """Returns true if the device is a NVIDIA B200, otherwise returns false.""" if not torch.cuda.is_available(): return False # compute capability 10.0 or 10.0a is NVIDIA B200 device_properties = torch.cuda.get_device_properties(torch.cuda.current_device()) return device_properties.major == 10 def get_num_sms() -> int: """Handle experimental carveout if set otherwise return hardware SM count""" # TODO we need to properly guard on this global if torch.xpu.is_available(): return get_max_num_sms() carveout = torch._C._get_sm_carveout_experimental() return get_max_num_sms() - (carveout if carveout is not None else 0) def get_tma_workspace_arg( num_tma_descriptors: int, device: torch.device, num_programs: Optional[int] = None, ) -> WorkspaceArg: """Builds and returns a WorkspaceArg for the device side TMA workspace buffer.""" from .codegen.common import WorkspaceArg, WorkspaceZeroMode if num_programs is None: num_programs = get_num_sms() zero_mode = WorkspaceZeroMode.from_bool(False) size = num_programs * num_tma_descriptors * TMA_DESCRIPTOR_SIZE return WorkspaceArg( count=size, zero_mode=zero_mode, device=device, outer_name=WorkspaceArg.unique_name(), ) def _use_template_for_gpu( layout: Layout, allowed_layout_dtypes: list[torch.dtype] ) -> bool: if layout.dtype not in allowed_layout_dtypes: log.debug( "Not using template since dtype %s is not in allowed layout dtypes %s", layout.dtype, allowed_layout_dtypes, ) return ( is_gpu(layout.device.type) and layout.dtype in allowed_layout_dtypes and is_big_gpu(layout.device) ) def _use_autotune_backend(backend: str) -> bool: return backend.upper() in [ x.strip() for x in config.max_autotune_gemm_backends.upper().split(",") ] def _use_conv_autotune_backend(backend: str) -> bool: return backend.upper() in [ x.strip() for x in config.max_autotune_conv_backends.upper().split(",") ] def use_triton_template( layout: Layout, *, enable_int32: bool = False, enable_float8: bool = False, check_max_autotune: bool = True, ) -> bool: from .codegen.common import BackendFeature, has_backend_feature layout_dtypes = [torch.float16, torch.bfloat16, torch.float32] if enable_int32: layout_dtypes = [torch.float16, torch.bfloat16, torch.float32, torch.int32] if enable_float8: layout_dtypes.extend([torch.float8_e4m3fn, torch.float8_e5m2]) return ( ( ( is_gpu(layout.device.type) and _use_template_for_gpu(layout, layout_dtypes) ) or (layout.device.type == "cpu" and layout.dtype in layout_dtypes) ) # some callers handle max-autotune checking externally and (config.max_autotune or config.max_autotune_gemm or not check_max_autotune) and _use_autotune_backend("TRITON") and has_backend_feature(layout.device, BackendFeature.TRITON_TEMPLATES) ) def can_use_tma( *matrices: IRNode, output_layout: Optional[Layout] = None, add_guards: bool = False ) -> bool: """ Return True iff *all* supplied tensors satisfy the CUDA-12.9 TMA constraints that Triton relies on today. * https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__TENSOR__MEMORY.html A tensor is accepted when: * 2 ≤ rank ≤ 5 * dtype ∈ {FP16, BF16, FP8-E4M3FN} * Every logical size ≥ 2 * Base pointer 16-byte aligned * All "outer" dims have 16-byte aligned strides * The “inner” dim has stride 1 (contiguous) * For FP8 tensors, inner dim ≥ 32 """ from torch.utils._triton import has_triton_tma_device from .virtualized import V def _aligned(expr_bytes: Union[int, sympy.Expr]) -> bool: return V.graph.sizevars.statically_known_multiple_of(expr_bytes, TMA_ALIGNMENT) def _is_tma_compatible_layout(layout: Optional[Layout]) -> bool: if layout is None: return True sizes = layout.size strides = layout.stride dtype = layout.dtype # Verify the output is 16-byte aligned if not _aligned(layout.offset): return False return _is_tma_compatible(sizes, strides, dtype, allow_float32=True) def _is_tma_compatible_matrix(m: IRNode) -> bool: sizes = m.get_size() strides = m.get_stride() dtype = m.get_dtype() # Base pointer 16-byte aligned if m.get_name() in V.graph.unaligned_buffers: return False return _is_tma_compatible(sizes, strides, dtype, allow_float32=False) def _is_tma_compatible( sizes: Sequence[sympy.Expr], strides: Sequence[_IntLike], dtype: torch.dtype, allow_float32: bool, ) -> bool: rank = len(sizes) itemsize = dtype.itemsize # 2 ≤ rank ≤ 5 if rank < 2 or rank > 5: return False # dtype ∈ {FP16, BF16, FP8-E4M3FN} if dtype not in (torch.float16, torch.bfloat16, torch.float8_e4m3fn) and ( not allow_float32 or dtype != torch.float32 ): return False if add_guards: sizes_i = V.graph.sizevars.guard_int_seq(sizes) strides_i = V.graph.sizevars.guard_int_seq(strides) else: sizes_i = [V.graph.sizevars.symbolic_hint(s) for s in sizes] strides_i = [V.graph.sizevars.symbolic_hint(st) for st in strides] # Every logical size ≥ 2 if any(not V.graph.sizevars.statically_known_geq(s, 2) for s in sizes_i): return False # Find the single contiguous (“inner”) dim inner = [ i for i, st in enumerate(strides_i) if V.graph.sizevars.statically_known_equals(st, 1) ] if len(inner) != 1: return False inner_idx = inner[0] # All "outer" dims must have 16-byte aligned strides for i, st in enumerate(strides_i): if i == inner_idx: continue if not _aligned(st * itemsize): return False # Inner dim byte width must still be a multiple of 16 B inner_dim = sizes_i[inner_idx] if not _aligned(inner_dim * itemsize): return False # FP8 special case: inner ≥ 32 if dtype == torch.float8_e4m3fn and not V.graph.sizevars.statically_known_geq( inner_dim, 32 ): return False return True return ( has_triton_tma_device() and all(_is_tma_compatible_matrix(m) for m in matrices) and _is_tma_compatible_layout(output_layout) ) def use_triton_tma_template( *matrices: IRNode, output_layout: Layout, add_guards: bool = False ) -> bool: layout = output_layout if config.triton.enable_template_tma_store else None return ( all(len(m.get_size()) == 2 for m in matrices) and can_use_tma(*matrices, output_layout=layout, add_guards=add_guards) and config.triton.enable_persistent_tma_matmul ) def use_triton_blackwell_tma_template( *matrices: IRNode, output_layout: Layout, add_guards: bool = False ) -> bool: if not use_triton_tma_template( *matrices, output_layout=output_layout, add_guards=add_guards ): return False from torch.utils._triton import has_triton_tensor_descriptor_host_tma from .codegen.cuda.cuda_env import is_datacenter_blackwell_arch # Blackwell template require the tensor descriptor API, not the experimental API. return has_triton_tensor_descriptor_host_tma() and is_datacenter_blackwell_arch() @functools.lru_cache(maxsize=1) def ensure_cute_available() -> bool: """Check if CuTeDSL is importable; cache the result for reuse. Call ensure_cute_available.cache_clear() after installing CuTeDSL in the same interpreter to retry the import. """ try: return importlib.util.find_spec("cutlass.cute") is not None except ImportError: return False def use_blackwell_cutedsl_grouped_mm( mat_a: Any, mat_b: Any, layout: Layout, a_is_2d: bool, b_is_2d: bool, offs: Optional[Any], bias: Optional[Any], scale_result: Optional[Any], ) -> bool: """ Returns True if we can use the blackwell kernel for grouped mm. Required conditions: 1. CuTeDSL backend is enabled 2. CuTeDSL is available 3. We are on a blackwell arch 4. The dtype is bf16 5. Max autotune or max autotune gemm is enabled 6. A, B, and the output are 16B aligned 7. We are not using dynamic shapes 8. A is 2d 9. B is 3d 10. Offsets are provided 11. Bias and Scale are not provided """ if not ensure_cute_available(): return False if not _use_autotune_backend("CUTEDSL"): return False from .codegen.cuda.cuda_env import is_datacenter_blackwell_arch if not is_gpu(layout.device.type): return False if not is_datacenter_blackwell_arch(): return False layout_dtypes = [torch.bfloat16] if not _use_template_for_gpu(layout, layout_dtypes): return False if not (config.max_autotune or config.max_autotune_gemm): return False # Checks for 16B ptr and stride alignment if not can_use_tma(mat_a, mat_b, output_layout=layout): return False if any(is_dynamic(x) for x in [mat_a, mat_b]): return False if not a_is_2d or b_is_2d: return False if offs is None: return False if bias is not None or scale_result is not None: return False return True def use_cutlass_template(layout: Layout, m: int, n: int, k: int) -> bool: from .virtualized import V gemm_size = V.graph.sizevars.size_hint(m * n * k, fallback=-1) if gemm_size <= 0 or gemm_size < config.cuda.cutlass_backend_min_gemm_size: return False from .codegen.cuda.cutlass_utils import try_import_cutlass # Do not use cutlass template on ROCm if torch.version.hip: return False # output dtype # FP32 not supported: https://github.com/pytorch/pytorch/issues/145952 layout_dtypes = [torch.float16, torch.bfloat16, torch.int32] res = ( _use_template_for_gpu(layout, layout_dtypes) and (config.max_autotune or config.max_autotune_gemm) and _use_autotune_backend("CUTLASS") ) if res: if not try_import_cutlass(): log.warning( "Failed to import CUTLASS lib. Please check whether " "_inductor.config.cuda.cutlass_dir %s is set correctly. " "Skipping CUTLASS backend for now.", config.cuda.cutlass_dir, ) return False return res def _use_cutlass_for_op(op_name: str) -> bool: """Check if CUTLASS should be used for the given operation.""" enabled_ops = config.cuda.cutlass_enabled_ops.upper() if enabled_ops == "ALL": return True return op_name.upper() in [x.strip() for x in enabled_ops.split(",")] _IntLike: TypeAlias = Union[int, sympy.Expr] @functools.cache def use_decompose_k_choice( m: _IntLike, n: _IntLike, k: _IntLike, threshold_multiple: int = 1 ) -> bool: from torch._inductor.virtualized import V decompose_k_threshold = config.triton.decompose_k_threshold * threshold_multiple return ( not torch.version.hip and V.graph.sizevars.statically_known_true( sympy.And( sympy.Ge(k, decompose_k_threshold * m), sympy.Ge(k, decompose_k_threshold * n), ) ) and not V.graph.aot_mode # TODO: Support AOTI for decomposeK and not V.graph.cpp_wrapper and config.triton.num_decompose_k_splits > 0 ) @functools.cache def use_contiguous(m: _IntLike, n: _IntLike, k: _IntLike) -> bool: """ Check if we should use the contiguous subgraph transform. This transform makes the second matrix contiguous before the matmul. """ contiguous_threshold = config.rocm.contiguous_threshold # Similar conditions to decompose_k but for contiguous transform from torch._inductor.virtualized import V return ( bool(torch.version.hip) # Only relevant on AMD and V.graph.sizevars.statically_known_true( sympy.And( sympy.Ge(k, contiguous_threshold * m), sympy.Ge(k, contiguous_threshold * n), ) ) and not V.graph.aot_mode and not V.graph.cpp_wrapper ) @functools.cache def get_k_splits(m: _IntLike, n: _IntLike, k: _IntLike) -> list[int]: # To limit compile time k_splits_limit = config.triton.num_decompose_k_splits # Hand-tuned default_k_splits = [16, 32, 64, 128, 256] # If k is a sympy expression, we can't do any splitting if isinstance(k, sympy.Expr) and not k.is_number: return default_k_splits elif k_splits_limit == 0: return [] if (isinstance(m, sympy.Expr) and not m.is_number) or ( isinstance(n, sympy.Expr) and not n.is_number ): max_k_split = 256 else: max_k_split = min(k // m, k // n) min_k_split = 2 # Get all divisors of k, k has to be divisible by kPart divisors = sympy.divisors(k) divisors = [ divisor for divisor in divisors if divisor <= max_k_split and divisor >= min_k_split ] pow_of_2_divisors, mul_of_32_divisors, rest_of_splits = [], [], [] for d in divisors: kPart = k // d # Smaller than 128 might not even fit in a single tile, BLOCK_K can be 128 if kPart < 128: continue # Power of 2 divisors are best performing, conform to hardware if (kPart & kPart - 1) == 0 and kPart >= 128: pow_of_2_divisors.append(d) # Else check if creates a multiple of 32 elif kPart % 32 == 0: mul_of_32_divisors.append(d) # otherwise, take the smallest values else: rest_of_splits.append(d) if config.max_autotune_gemm_search_space == "EXHAUSTIVE": return pow_of_2_divisors + mul_of_32_divisors + rest_of_splits best_splits = pow_of_2_divisors + mul_of_32_divisors + rest_of_splits # Otherwise, conform results to k_splits_limit return best_splits[:k_splits_limit] @functools.cache def _rocm_native_device_arch_name(device: str) -> str: return torch.cuda.get_device_properties(device).gcnArchName @functools.cache def try_import_ck_lib() -> tuple[ Optional[str], Callable[[], list[Any]], Callable[[], list[Any]], type[Any] ]: try: import ck4inductor # type: ignore[import] from ck4inductor.universal_gemm.gen_instances import ( # type: ignore[import] gen_ops_library, gen_ops_preselected, ) from ck4inductor.universal_gemm.op import ( # type: ignore[import] CKGemmOperation, ) package_dirname = os.path.dirname(ck4inductor.__file__) except ImportError: def gen_ops_library() -> list[Any]: return [] def gen_ops_preselected() -> list[Any]: return [] class CKGemmOperation: # type: ignore[no-redef] pass package_dirname = None return package_dirname, gen_ops_library, gen_ops_preselected, CKGemmOperation def use_ck_template(layout: Layout) -> bool: # config knobs check 1 if not (config.max_autotune or config.max_autotune_gemm): return False # platform check if not torch.version.hip: return False # tensors must be on GPU if layout.device.type != "cuda": return False # hardware check # if config arch list is not specified, get the native arch from the device properties native_arch = _rocm_native_device_arch_name(layout.device) requested_archs = {k.split(":")[0]: k for k in config.rocm.arch} or { native_arch.split(":")[0]: native_arch } requested_supported_archs = [ requested_archs[k] for k in requested_archs.keys() & config.rocm.ck_supported_arch ] if not requested_supported_archs: return False # supported input dtypes if layout.dtype not in [torch.float16, torch.bfloat16, torch.float32]: return False ck_package_dirname, _, _, _ = try_import_ck_lib() if not ck_package_dirname: log.warning("Please pip install Composable Kernel package") return False config.rocm.ck_dir = ck_package_dirname return True def use_ck_gemm_template(layout: Layout, m: int, n: int, k: int) -> bool: from .virtualized import V return ( _use_autotune_backend("CK") and use_ck_template(layout) and V.graph.sizevars.size_hint(m * n * k, fallback=-1) > 0 ) def use_ck_tile_gemm_template(layout: Layout, m: int, n: int, k: int) -> bool: from .virtualized import V return ( _use_autotune_backend("CKTILE") and use_ck_template(layout) and V.graph.sizevars.size_hint(m * n * k, fallback=-1) > 0 ) def use_ck_conv_template(layout: Layout) -> bool: return _use_conv_autotune_backend("CK") and use_ck_template(layout) def _use_template_for_cpu(layout: Layout) -> bool: return ( config.max_autotune or config.max_autotune_gemm ) and layout.device.type == "cpu" def use_cpp_bmm_template( layout: Layout, mat1: Union[ReinterpretView, Buffer], mat2: IRNode ) -> bool: from .ir import Layout assert isinstance(mat1.layout, Layout) # In certain scenarios, such as when the first stride is 0, the entire tensor may not be contiguous. # But the 2D matrix within each batch can still be contiguous, allowing us to apply max autotune. # So here we specifically check for contiguity within the 2D matrix of each batch. mat1_size = mat1.layout.size mat1_stride = mat1.layout.stride mat1_each_batch_is_contiguous = ( _use_template_for_cpu(layout) and mat1.get_dtype() == torch.float32 and (len(mat1_size) == 3) and (len(mat1_stride) == 3) and (mat1_stride[1] == mat1_size[2]) and (mat1_stride[2] == 1) ) return use_cpp_gemm_template(layout, mat1, mat2, require_constant_mat2=False) and ( mat1.layout.is_contiguous() or mat1_each_batch_is_contiguous ) def use_cpp_gemm_template( layout: Layout, mat1: IRNode, mat2: IRNode, mat2_transposed: bool = False, require_constant_mat2: bool = True, is_woq_int4: bool = False, q_group_size: Optional[int] = None, ) -> bool: from . import ir from .codegen.cpp_micro_gemm import create_micro_gemm from .codegen.cpp_utils import get_gemm_template_output_and_compute_dtype from .kernel.mm_common import mm_args if not _use_template_for_cpu(layout) or not _use_autotune_backend("CPP"): return False if not config.cpp.weight_prepack: return False int8_gemm = mat1.get_dtype() in [torch.uint8, torch.int8] layout_dtypes = [torch.float32, torch.bfloat16, torch.half, torch.uint8] m, n, k, layout, mat1, mat2 = mm_args( mat1, mat2, out_dtype=layout.dtype if int8_gemm else None, mat2_transposed=mat2_transposed, use_4x2_dim=is_woq_int4, ) # TODO(jgong5): support dynamic shapes for n or k if has_free_symbols((n, k)): return False if isinstance(mat2, ir.BaseView): mat2 = mat2.unwrap_view() output_dtype, _ = get_gemm_template_output_and_compute_dtype(mat1.get_dtype()) micro_gemm = create_micro_gemm( "micro_gemm", m, n, k, input_dtype=mat1.get_dtype(), input2_dtype=mat2.get_dtype(), output_dtype=output_dtype, num_threads=parallel_num_threads(), use_ref=not is_woq_int4, q_group_size=q_group_size, ) def is_last_dim_stride1(x: IRNode) -> bool: x.freeze_layout() return x.get_stride()[-1] == 1 return ( layout.dtype in layout_dtypes and micro_gemm is not None and is_last_dim_stride1(mat1) # TODO(jgong5): support transposed input and isinstance(mat2, ir.StorageBox) and (mat2.is_module_buffer() or not require_constant_mat2) ) def use_aten_gemm_kernels() -> bool: return not ( config.max_autotune or config.max_autotune_gemm ) or _use_autotune_backend("ATEN")
DelayMaybeLine
python
doocs__leetcode
solution/1200-1299/1268.Search Suggestions System/Solution.py
{ "start": 0, "end": 733 }
class ____: def __init__(self): self.children: List[Union[Trie, None]] = [None] * 26 self.v: List[int] = [] def insert(self, w, i): node = self for c in w: idx = ord(c) - ord('a') if node.children[idx] is None: node.children[idx] = Trie() node = node.children[idx] if len(node.v) < 3: node.v.append(i) def search(self, w): node = self ans = [[] for _ in range(len(w))] for i, c in enumerate(w): idx = ord(c) - ord('a') if node.children[idx] is None: break node = node.children[idx] ans[i] = node.v return ans
Trie
python
huggingface__transformers
tests/models/oneformer/test_processing_oneformer.py
{ "start": 1948, "end": 6984 }
class ____: def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, size=None, do_resize=True, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], num_labels=10, do_reduce_labels=False, ignore_index=255, max_seq_length=77, task_seq_length=77, model_repo="shi-labs/oneformer_ade20k_swin_tiny", class_info_file="ade20k_panoptic.json", num_text=10, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = {"shortest_edge": 32, "longest_edge": 1333} if size is None else size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.max_seq_length = max_seq_length self.task_seq_length = task_seq_length self.class_info_file = class_info_file self.metadata = prepare_metadata(class_info_file) self.num_text = num_text self.model_repo = model_repo # for the post_process_functions self.batch_size = 2 self.num_queries = 10 self.num_classes = 10 self.height = 3 self.width = 4 self.num_labels = num_labels self.do_reduce_labels = do_reduce_labels self.ignore_index = ignore_index def prepare_processor_dict(self): image_processor_dict = { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } image_processor = OneFormerImageProcessor(**image_processor_dict) tokenizer = CLIPTokenizer.from_pretrained(self.model_repo) return { "image_processor": image_processor, "tokenizer": tokenizer, "max_seq_length": self.max_seq_length, "task_seq_length": self.task_seq_length, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to OneFormerProcessor, assuming do_resize is set to True with a scalar size. It also provides the expected sequence length for the task_inputs and text_list_input. """ if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size elif isinstance(image, np.ndarray): h, w = image.shape[0], image.shape[1] else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width, expected_sequence_length = self.get_expected_values([image]) expected_values.append((expected_height, expected_width, expected_sequence_length)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] expected_sequence_length = self.max_seq_length return expected_height, expected_width, expected_sequence_length def get_fake_oneformer_outputs(self): return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1)), masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width)), ) def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision
OneFormerProcessorTester
python
django__django
tests/queries/models.py
{ "start": 4329, "end": 4451 }
class ____(models.Model): y = models.ForeignKey("LoopY", models.CASCADE) class Meta: ordering = ["y"]
LoopX
python
joke2k__faker
faker/providers/phone_number/en_US/__init__.py
{ "start": 49, "end": 1610 }
class ____(PhoneNumberProvider): formats = ( # Standard 10-digit phone number formats "$##$######", "$##$######", "$##-$##-####", "$##-$##-####", # Optional 10-digit local phone number format "($##)$##-####", "($##)$##-####", # Non-standard 10-digit phone number format "$##.$##.####", "$##.$##.####", # Standard 10-digit phone number format with extensions "$##-$##-####x###", "$##-$##-####x####", "$##-$##-####x#####", # Optional 10-digit local phone number format with extensions "($##)$##-####x###", "($##)$##-####x####", "($##)$##-####x#####", # Non-standard 10-digit phone number format with extensions "$##.$##.####x###", "$##.$##.####x####", "$##.$##.####x#####", # Standard 11-digit phone number format "+1-$##-$##-####", "001-$##-$##-####", # Standard 11-digit phone number format with extensions "+1-$##-$##-####x###", "+1-$##-$##-####x####", "+1-$##-$##-####x#####", "001-$##-$##-####x###", "001-$##-$##-####x####", "001-$##-$##-####x#####", ) basic_formats = ( # basic 10-digit phone number format with no extensions "$##$######", "$##-$##-####", "($##)$##-####", ) def basic_phone_number(self) -> str: pattern: str = self.random_element(self.basic_formats) return self.numerify(self.generator.parse(pattern))
Provider
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/selectable.py
{ "start": 6103, "end": 6278 }
class ____(BindParameter[int]): inherit_cache = True @property def _limit_offset_value(self) -> Optional[int]: return self.effective_value
_OffsetLimitParam
python
matplotlib__matplotlib
galleries/examples/event_handling/looking_glass.py
{ "start": 890, "end": 1863 }
class ____: def __init__(self): fig.canvas.mpl_connect('button_press_event', self.on_press) fig.canvas.mpl_connect('button_release_event', self.on_release) fig.canvas.mpl_connect('motion_notify_event', self.on_move) self.x0, self.y0 = circ.center self.pressevent = None def on_press(self, event): if event.inaxes != ax: return if not circ.contains(event)[0]: return self.pressevent = event def on_release(self, event): self.pressevent = None self.x0, self.y0 = circ.center def on_move(self, event): if self.pressevent is None or event.inaxes != self.pressevent.inaxes: return dx = event.xdata - self.pressevent.xdata dy = event.ydata - self.pressevent.ydata circ.center = self.x0 + dx, self.y0 + dy line.set_clip_path(circ) fig.canvas.draw() handler = EventHandler() plt.show()
EventHandler
python
apache__airflow
providers/arangodb/src/airflow/providers/arangodb/hooks/arangodb.py
{ "start": 1411, "end": 7831 }
class ____(BaseHook): """ Interact with ArangoDB. Performs a connection to ArangoDB and retrieves client. :param arangodb_conn_id: Reference to :ref:`ArangoDB connection id <howto/connection:arangodb>`. """ conn_name_attr = "arangodb_conn_id" default_conn_name = "arangodb_default" conn_type = "arangodb" hook_name = "ArangoDB" def __init__(self, arangodb_conn_id: str = default_conn_name, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.arangodb_conn_id = arangodb_conn_id @cached_property def client(self) -> ArangoDBClient: """Initiates a new ArangoDB connection (cached).""" return ArangoDBClient(hosts=self.hosts) @cached_property def db_conn(self) -> StandardDatabase: """Connect to an ArangoDB database and return the database API wrapper.""" return self.client.db(name=self.database, username=self.username, password=self.password) @cached_property def _conn(self) -> Connection: return self.get_connection(self.arangodb_conn_id) # type: ignore[return-value] @property def hosts(self) -> list[str]: if not self._conn.host: raise AirflowException(f"No ArangoDB Host(s) provided in connection: {self.arangodb_conn_id!r}.") return self._conn.host.split(",") @property def database(self) -> str: if not self._conn.schema: raise AirflowException(f"No ArangoDB Database provided in connection: {self.arangodb_conn_id!r}.") return self._conn.schema @property def username(self) -> str: if not self._conn.login: raise AirflowException(f"No ArangoDB Username provided in connection: {self.arangodb_conn_id!r}.") return self._conn.login @property def password(self) -> str: return self._conn.password or "" def get_conn(self) -> ArangoDBClient: """Initiate a new ArangoDB connection (cached).""" return self.client def query(self, query, **kwargs) -> Cursor: """ Create an ArangoDB session and execute the AQL query in the session. :param query: AQL query """ try: if self.db_conn: result = self.db_conn.aql.execute(query, **kwargs) if not isinstance(result, Cursor): raise AirflowException("Failed to execute AQLQuery, expected result to be of type Cursor") return result raise AirflowException( f"Failed to execute AQLQuery, error connecting to database: {self.database}" ) except AQLQueryExecuteError as error: raise AirflowException(f"Failed to execute AQLQuery, error: {error}") def create_collection(self, name): if not self.db_conn.has_collection(name): self.log.info("Collection '%s' does not exist. Creating a new collection.", name) self.db_conn.create_collection(name) return True self.log.info("Collection already exists: %s", name) return False def delete_collection(self, name): if self.db_conn.has_collection(name): self.db_conn.delete_collection(name) return True self.log.info("Collection does not exist: %s", name) return False def create_database(self, name): if not self.db_conn.has_database(name): self.db_conn.create_database(name) return True self.log.info("Database already exists: %s", name) return False def create_graph(self, name): if not self.db_conn.has_graph(name): self.db_conn.create_graph(name) return True self.log.info("Graph already exists: %s", name) return False def insert_documents(self, collection_name, documents): if not self.db_conn.has_collection(collection_name): self.create_collection(collection_name) try: collection = self.db_conn.collection(collection_name) collection.insert_many(documents, silent=True) except DocumentInsertError as e: self.log.error("Failed to insert documents: %s", str(e)) raise def update_documents(self, collection_name, documents): if not self.db_conn.has_collection(collection_name): raise AirflowException(f"Collection does not exist: {collection_name}") try: collection = self.db_conn.collection(collection_name) collection.update_many(documents, silent=True) except DocumentUpdateError as e: self.log.error("Failed to update documents: %s", str(e)) raise def replace_documents(self, collection_name, documents): if not self.db_conn.has_collection(collection_name): raise AirflowException(f"Collection does not exist: {collection_name}") try: collection = self.db_conn.collection(collection_name) collection.replace_many(documents, silent=True) except DocumentReplaceError as e: self.log.error("Failed to replace documents: %s", str(e)) raise def delete_documents(self, collection_name, documents): if not self.db_conn.has_collection(collection_name): raise AirflowException(f"Collection does not exist: {collection_name}") try: collection = self.db_conn.collection(collection_name) collection.delete_many(documents, silent=True) except DocumentDeleteError as e: self.log.error("Failed to delete documents: %s", str(e)) raise @classmethod def get_ui_field_behaviour(cls) -> dict[str, Any]: return { "hidden_fields": ["port", "extra"], "relabeling": { "host": "ArangoDB Host URL or comma separated list of URLs (coordinators in a cluster)", "schema": "ArangoDB Database", "login": "ArangoDB Username", "password": "ArangoDB Password", }, "placeholders": { "host": 'eg."http://127.0.0.1:8529" or "http://127.0.0.1:8529,http://127.0.0.1:8530"' " (coordinators in a cluster)", "schema": "_system", "login": "root", "password": "password", }, }
ArangoDBHook
python
readthedocs__readthedocs.org
readthedocs/rtd_tests/tests/test_api_permissions.py
{ "start": 157, "end": 2337 }
class ____(TestCase): def get_request(self, method, is_admin): request = Mock() request.method = method request.user.is_staff = is_admin return request def assertAllow(self, handler, method, is_admin, obj=None): if obj is None: self.assertTrue( handler.has_permission( request=self.get_request(method, is_admin=is_admin), view=None, ) ) else: self.assertTrue( handler.has_object_permission( request=self.get_request(method, is_admin=is_admin), view=None, obj=obj, ) ) def assertDisallow(self, handler, method, is_admin, obj=None): if obj is None: self.assertFalse( handler.has_permission( request=self.get_request(method, is_admin=is_admin), view=None, ) ) else: self.assertFalse( handler.has_object_permission( request=self.get_request(method, is_admin=is_admin), view=None, obj=obj, ) ) def test_read_only_permission(self): handler = ReadOnlyPermission() assertAllow = partial(self.assertAllow, handler, obj=None) assertDisallow = partial(self.assertDisallow, handler, obj=None) assertAllow("GET", is_admin=False) assertAllow("HEAD", is_admin=False) assertAllow("OPTIONS", is_admin=False) assertDisallow("DELETE", is_admin=False) assertDisallow("PATCH", is_admin=False) assertDisallow("POST", is_admin=False) assertDisallow("PUT", is_admin=False) assertAllow("GET", is_admin=True) assertAllow("HEAD", is_admin=True) assertAllow("OPTIONS", is_admin=True) assertDisallow("DELETE", is_admin=True) assertDisallow("PATCH", is_admin=True) assertDisallow("POST", is_admin=True) assertDisallow("PUT", is_admin=True)
APIRestrictedPermissionTests
python
urllib3__urllib3
test/with_dummyserver/test_connectionpool.py
{ "start": 1244, "end": 7696 }
class ____(SocketDummyServerTestCase): def test_timeout_float(self) -> None: block_event = Event() ready_event = self.start_basic_handler(block_send=block_event, num=2) with HTTPConnectionPool(self.host, self.port, retries=False) as pool: wait_for_socket(ready_event) with pytest.raises(ReadTimeoutError): pool.request("GET", "/", timeout=SHORT_TIMEOUT) block_event.set() # Release block # Shouldn't raise this time wait_for_socket(ready_event) block_event.set() # Pre-release block pool.request("GET", "/", timeout=LONG_TIMEOUT) def test_conn_closed(self) -> None: block_event = Event() self.start_basic_handler(block_send=block_event, num=1) with HTTPConnectionPool( self.host, self.port, timeout=SHORT_TIMEOUT, retries=False ) as pool: conn = pool._get_conn() pool._put_conn(conn) try: with pytest.raises(ReadTimeoutError): pool.urlopen("GET", "/") if not conn.is_closed: with pytest.raises(socket.error): conn.sock.recv(1024) # type: ignore[attr-defined] finally: pool._put_conn(conn) block_event.set() def test_timeout(self) -> None: # Requests should time out when expected block_event = Event() ready_event = self.start_basic_handler(block_send=block_event, num=3) # Pool-global timeout short_timeout = Timeout(read=SHORT_TIMEOUT) with HTTPConnectionPool( self.host, self.port, timeout=short_timeout, retries=False ) as pool: wait_for_socket(ready_event) block_event.clear() with pytest.raises(ReadTimeoutError): pool.request("GET", "/") block_event.set() # Release request # Request-specific timeouts should raise errors with HTTPConnectionPool( self.host, self.port, timeout=short_timeout, retries=False ) as pool: wait_for_socket(ready_event) now = time.perf_counter() with pytest.raises(ReadTimeoutError): pool.request("GET", "/", timeout=LONG_TIMEOUT) delta = time.perf_counter() - now message = "timeout was pool-level SHORT_TIMEOUT rather than request-level LONG_TIMEOUT" if platform.system() == "Windows": # Adjust tolerance for floating-point comparison on Windows to # avoid flakiness in CI #3413 assert delta >= (LONG_TIMEOUT - 1e-3), message else: assert delta >= (LONG_TIMEOUT - 1e-5), message block_event.set() # Release request # Timeout passed directly to request should raise a request timeout wait_for_socket(ready_event) with pytest.raises(ReadTimeoutError): pool.request("GET", "/", timeout=SHORT_TIMEOUT) block_event.set() # Release request def test_connect_timeout(self) -> None: url = "/" host, port = TARPIT_HOST, 80 timeout = Timeout(connect=SHORT_TIMEOUT) # Pool-global timeout with HTTPConnectionPool(host, port, timeout=timeout) as pool: conn = pool._get_conn() with pytest.raises(ConnectTimeoutError): pool._make_request(conn, "GET", url) # Retries retries = Retry(connect=0) with pytest.raises(MaxRetryError): pool.request("GET", url, retries=retries) # Request-specific connection timeouts big_timeout = Timeout(read=LONG_TIMEOUT, connect=LONG_TIMEOUT) with HTTPConnectionPool(host, port, timeout=big_timeout, retries=False) as pool: conn = pool._get_conn() with pytest.raises(ConnectTimeoutError): pool._make_request(conn, "GET", url, timeout=timeout) pool._put_conn(conn) with pytest.raises(ConnectTimeoutError): pool.request("GET", url, timeout=timeout) def test_total_applies_connect(self) -> None: host, port = TARPIT_HOST, 80 timeout = Timeout(total=None, connect=SHORT_TIMEOUT) with HTTPConnectionPool(host, port, timeout=timeout) as pool: conn = pool._get_conn() try: with pytest.raises(ConnectTimeoutError): pool._make_request(conn, "GET", "/") finally: conn.close() timeout = Timeout(connect=3, read=5, total=SHORT_TIMEOUT) with HTTPConnectionPool(host, port, timeout=timeout) as pool: conn = pool._get_conn() try: with pytest.raises(ConnectTimeoutError): pool._make_request(conn, "GET", "/") finally: conn.close() def test_total_timeout(self) -> None: block_event = Event() ready_event = self.start_basic_handler(block_send=block_event, num=2) wait_for_socket(ready_event) # This will get the socket to raise an EAGAIN on the read timeout = Timeout(connect=3, read=SHORT_TIMEOUT) with HTTPConnectionPool( self.host, self.port, timeout=timeout, retries=False ) as pool: with pytest.raises(ReadTimeoutError): pool.request("GET", "/") block_event.set() wait_for_socket(ready_event) block_event.clear() # The connect should succeed and this should hit the read timeout timeout = Timeout(connect=3, read=5, total=SHORT_TIMEOUT) with HTTPConnectionPool( self.host, self.port, timeout=timeout, retries=False ) as pool: with pytest.raises(ReadTimeoutError): pool.request("GET", "/") def test_create_connection_timeout(self) -> None: self.start_basic_handler(block_send=Event(), num=0) # needed for self.port timeout = Timeout(connect=SHORT_TIMEOUT, total=LONG_TIMEOUT) with HTTPConnectionPool( TARPIT_HOST, self.port, timeout=timeout, retries=False ) as pool: conn = pool._new_conn() with pytest.raises(ConnectTimeoutError): conn.connect()
TestConnectionPoolTimeouts
python
dagster-io__dagster
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/schemas/asset.py
{ "start": 504, "end": 689 }
class ____(BaseModel): """Latest asset materialization information.""" timestamp: Optional[float] run_id: Optional[str] partition: Optional[str]
DgApiAssetMaterialization
python
kamyu104__LeetCode-Solutions
Python/satisfiability-of-equality-equations.py
{ "start": 487, "end": 1141 }
class ____(object): def equationsPossible(self, equations): """ :type equations: List[str] :rtype: bool """ union_find = UnionFind(26) for eqn in equations: x = ord(eqn[0]) - ord('a') y = ord(eqn[3]) - ord('a') if eqn[1] == '=': union_find.union_set(x, y) for eqn in equations: x = ord(eqn[0]) - ord('a') y = ord(eqn[3]) - ord('a') if eqn[1] == '!': if union_find.find_set(x) == union_find.find_set(y): return False return True # Time: O(n) # Space: O(1)
Solution
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/contrib/regular_languages/regex_parser.py
{ "start": 1845, "end": 2109 }
class ____(Node): """ Regular expression. """ def __init__(self, regex: str) -> None: re.compile(regex) # Validate self.regex = regex def __repr__(self) -> str: return f"{self.__class__.__name__}(/{self.regex}/)"
Regex
python
plotly__plotly.py
plotly/graph_objs/layout/shape/_legendgrouptitle.py
{ "start": 235, "end": 2978 }
class ____(_BaseLayoutHierarchyType): _parent_path_str = "layout.shape" _path_str = "layout.shape.legendgrouptitle" _valid_props = {"font", "text"} @property def font(self): """ Sets this legend group's title font. The 'font' property is an instance of Font that may be specified as: - An instance of :class:`plotly.graph_objs.layout.shape.legendgrouptitle.Font` - A dict of string/value properties that will be passed to the Font constructor Returns ------- plotly.graph_objs.layout.shape.legendgrouptitle.Font """ return self["font"] @font.setter def font(self, val): self["font"] = val @property def text(self): """ Sets the title of the legend group. The 'text' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["text"] @text.setter def text(self, val): self["text"] = val @property def _prop_descriptions(self): return """\ font Sets this legend group's title font. text Sets the title of the legend group. """ def __init__(self, arg=None, font=None, text=None, **kwargs): """ Construct a new Legendgrouptitle object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.layout.shape.L egendgrouptitle` font Sets this legend group's title font. text Sets the title of the legend group. Returns ------- Legendgrouptitle """ super().__init__("legendgrouptitle") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.layout.shape.Legendgrouptitle constructor must be a dict or an instance of :class:`plotly.graph_objs.layout.shape.Legendgrouptitle`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("font", arg, font) self._set_property("text", arg, text) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Legendgrouptitle
python
pytorch__pytorch
test/inductor/test_aot_inductor_custom_ops.py
{ "start": 17589, "end": 18095 }
class ____(AOTICustomOpTestCase): device = "cpu" device_type = "cpu" check_model = check_model check_model_with_multiple_inputs = check_model_with_multiple_inputs code_check_count = code_check_count allow_stack_allocation = False use_minimal_arrayref_interface = False copy_tests( AOTInductorTestsTemplate, AOTInductorTestABICompatibleCpu, "cpu", CPU_TEST_FAILURES, ) @unittest.skipIf(sys.platform == "darwin", "No CUDA on MacOS")
AOTInductorTestABICompatibleCpu
python
explosion__spaCy
spacy/lang/en/__init__.py
{ "start": 372, "end": 592 }
class ____(BaseDefaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS infixes = TOKENIZER_INFIXES lex_attr_getters = LEX_ATTRS syntax_iterators = SYNTAX_ITERATORS stop_words = STOP_WORDS
EnglishDefaults
python
scipy__scipy
scipy/optimize/tests/test_trustregion_exact.py
{ "start": 1968, "end": 4556 }
class ____: def test_for_already_singular_leading_submatrix(self): # Define test matrix A. # Note that the leading 2x2 submatrix is singular. A = np.array([[1, 2, 3], [2, 4, 5], [3, 5, 6]]) # Get Cholesky from lapack functions cholesky, = get_lapack_funcs(('potrf',), (A,)) # Compute Cholesky Decomposition c, k = cholesky(A, lower=False, overwrite_a=False, clean=True) delta, v = singular_leading_submatrix(A, c, k) A[k-1, k-1] += delta # Check if the leading submatrix is singular. assert_array_almost_equal(det(A[:k, :k]), 0) # Check if `v` fulfil the specified properties quadratic_term = np.dot(v, np.dot(A, v)) assert_array_almost_equal(quadratic_term, 0) def test_for_simetric_indefinite_matrix(self): # Define test matrix A. # Note that the leading 5x5 submatrix is indefinite. A = np.asarray([[1, 2, 3, 7, 8], [2, 5, 5, 9, 0], [3, 5, 11, 1, 2], [7, 9, 1, 7, 5], [8, 0, 2, 5, 8]]) # Get Cholesky from lapack functions cholesky, = get_lapack_funcs(('potrf',), (A,)) # Compute Cholesky Decomposition c, k = cholesky(A, lower=False, overwrite_a=False, clean=True) delta, v = singular_leading_submatrix(A, c, k) A[k-1, k-1] += delta # Check if the leading submatrix is singular. assert_array_almost_equal(det(A[:k, :k]), 0) # Check if `v` fulfil the specified properties quadratic_term = np.dot(v, np.dot(A, v)) assert_array_almost_equal(quadratic_term, 0) def test_for_first_element_equal_to_zero(self): # Define test matrix A. # Note that the leading 2x2 submatrix is singular. A = np.array([[0, 3, 11], [3, 12, 5], [11, 5, 6]]) # Get Cholesky from lapack functions cholesky, = get_lapack_funcs(('potrf',), (A,)) # Compute Cholesky Decomposition c, k = cholesky(A, lower=False, overwrite_a=False, clean=True) delta, v = singular_leading_submatrix(A, c, k) A[k-1, k-1] += delta # Check if the leading submatrix is singular assert_array_almost_equal(det(A[:k, :k]), 0) # Check if `v` fulfil the specified properties quadratic_term = np.dot(v, np.dot(A, v)) assert_array_almost_equal(quadratic_term, 0)
TestSingularLeadingSubmatrix
python
tensorflow__tensorflow
tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py
{ "start": 33967, "end": 35194 }
class ____(test.TestCase): def _validateTopK(self, inputs, k, expected_values, expected_indices): np_expected_values = np.array(expected_values) np_expected_indices = np.array(expected_indices) with self.cached_session(use_gpu=True) as _: values_op, indices_op = nn_ops.top_k(inputs, k) self.assertShapeEqual(np_expected_values, values_op) self.assertShapeEqual(np_expected_indices, indices_op) self.assertAllClose(np_expected_values, values_op) def testTop1(self): inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]] self._validateTopK(inputs, 1, [[0.4], [0.3]], [[3], [1]]) def testTop2(self): inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.4, 0.2]] self._validateTopK(inputs, 2, [[0.4, 0.3], [0.4, 0.3]], [[3, 1], [2, 1]]) def testTop3(self): k = 5 inputs = np.random.permutation(np.linspace(0, 100, 6140, dtype=np.float32)) indices = np.argsort(-inputs)[:k] values = -np.sort(-inputs)[:k] self._validateTopK(inputs, k, values, indices) def testTensorK(self): inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.4, 0.2]] k = constant_op.constant(2) self._validateTopK(inputs, k, [[0.4, 0.3], [0.4, 0.3]], [[3, 1], [2, 1]])
TopKTest
python
networkx__networkx
networkx/classes/digraph.py
{ "start": 452, "end": 1807 }
class ____: """Data Descriptor class that syncs and resets cached properties adj and succ The cached properties `adj` and `succ` are reset whenever `_adj` or `_succ` are set to new objects. In addition, the attributes `_succ` and `_adj` are synced so these two names point to the same object. Warning: most of the time, when ``G._adj`` is set, ``G._pred`` should also be set to maintain a valid data structure. They share datadicts. This object sits on a class and ensures that any instance of that class clears its cached properties "succ" and "adj" whenever the underlying instance attributes "_succ" or "_adj" are set to a new object. It only affects the set process of the obj._adj and obj._succ attribute. All get/del operations act as they normally would. For info on Data Descriptors see: https://docs.python.org/3/howto/descriptor.html """ def __set__(self, obj, value): od = obj.__dict__ od["_adj"] = value od["_succ"] = value # reset cached properties props = [ "adj", "succ", "edges", "out_edges", "degree", "out_degree", "in_degree", ] for prop in props: if prop in od: del od[prop]
_CachedPropertyResetterAdjAndSucc
python
doocs__leetcode
solution/1700-1799/1726.Tuple with Same Product/Solution.py
{ "start": 0, "end": 302 }
class ____: def tupleSameProduct(self, nums: List[int]) -> int: cnt = defaultdict(int) for i in range(1, len(nums)): for j in range(i): x = nums[i] * nums[j] cnt[x] += 1 return sum(v * (v - 1) // 2 for v in cnt.values()) << 3
Solution
python
airbytehq__airbyte
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py
{ "start": 66294, "end": 67767 }
class ____(BaseModel): type: Literal["ParentStreamConfig"] parent_key: str = Field( ..., description="The primary key of records from the parent stream that will be used during the retrieval of records for the current substream. This parent identifier field is typically a characteristic of the child records being extracted from the source API.", examples=["id", "{{ config['parent_record_id'] }}"], title="Parent Key", ) stream: DeclarativeStream = Field(..., description="Reference to the parent stream.", title="Parent Stream") partition_field: str = Field( ..., description="While iterating over parent records during a sync, the parent_key value can be referenced by using this field.", examples=["parent_id", "{{ config['parent_partition_field'] }}"], title="Current Parent Key Value Identifier", ) request_option: Optional[RequestOption] = Field( None, description="A request option describing where the parent key value should be injected into and under what field name if applicable.", title="Request Option", ) incremental_dependency: Optional[bool] = Field( False, description="Indicates whether the parent stream should be read incrementally based on updates in the child stream.", title="Incremental Dependency", ) parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
ParentStreamConfig
python
allegroai__clearml
clearml/backend_api/services/v2_13/events.py
{ "start": 42347, "end": 44305 }
class ____(Response): """ Response of events.debug_images endpoint. :param metrics: Debug image events grouped by task metrics and iterations :type metrics: Sequence[dict] :param scroll_id: Scroll ID for getting more results :type scroll_id: str """ _service = "events" _action = "debug_images" _version = "2.13" _schema = { "definitions": {}, "properties": { "metrics": { "description": "Debug image events grouped by task metrics and iterations", "items": {"type": "object"}, "type": ["array", "null"], }, "scroll_id": { "description": "Scroll ID for getting more results", "type": ["string", "null"], }, }, "type": "object", } def __init__(self, metrics: Optional[List[dict]] = None, scroll_id: Optional[str] = None, **kwargs: Any) -> None: super(DebugImagesResponse, self).__init__(**kwargs) self.metrics = metrics self.scroll_id = scroll_id @schema_property("metrics") def metrics(self) -> Optional[List[dict]]: return self._property_metrics @metrics.setter def metrics(self, value: Optional[List[dict]]) -> None: if value is None: self._property_metrics = None return self.assert_isinstance(value, "metrics", (list, tuple)) self.assert_isinstance(value, "metrics", (dict,), is_array=True) self._property_metrics = value @schema_property("scroll_id") def scroll_id(self) -> Optional[str]: return self._property_scroll_id @scroll_id.setter def scroll_id(self, value: Optional[str]) -> None: if value is None: self._property_scroll_id = None return self.assert_isinstance(value, "scroll_id", six.string_types) self._property_scroll_id = value
DebugImagesResponse
python
Lightning-AI__lightning
src/lightning/pytorch/callbacks/lr_finder.py
{ "start": 1012, "end": 4850 }
class ____(Callback): """The ``LearningRateFinder`` callback enables the user to do a range test of good initial learning rates, to reduce the amount of guesswork in picking a good starting learning rate. .. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature. Args: min_lr: Minimum learning rate to investigate max_lr: Maximum learning rate to investigate num_training_steps: Number of learning rates to test mode: Search strategy to update learning rate after each batch: - ``'exponential'`` (default): Increases the learning rate exponentially. - ``'linear'``: Increases the learning rate linearly. early_stop_threshold: Threshold for stopping the search. If the loss at any point is larger than early_stop_threshold*best_loss then the search is stopped. To disable, set to None. update_attr: Whether to update the learning rate attribute or not. attr_name: Name of the attribute which stores the learning rate. The names 'learning_rate' or 'lr' get automatically detected. Otherwise, set the name here. Example:: # Customize LearningRateFinder callback to run at different epochs. # This feature is useful while fine-tuning models. from lightning.pytorch.callbacks import LearningRateFinder class FineTuneLearningRateFinder(LearningRateFinder): def __init__(self, milestones, *args, **kwargs): super().__init__(*args, **kwargs) self.milestones = milestones def on_fit_start(self, *args, **kwargs): return def on_train_epoch_start(self, trainer, pl_module): if trainer.current_epoch in self.milestones or trainer.current_epoch == 0: self.lr_find(trainer, pl_module) trainer = Trainer(callbacks=[FineTuneLearningRateFinder(milestones=(5, 10))]) trainer.fit(...) Raises: MisconfigurationException: If learning rate/lr in ``model`` or ``model.hparams`` isn't overridden, or if you are using more than one optimizer. """ SUPPORTED_MODES = ("linear", "exponential") def __init__( self, min_lr: float = 1e-8, max_lr: float = 1, num_training_steps: int = 100, mode: str = "exponential", early_stop_threshold: Optional[float] = 4.0, update_attr: bool = True, attr_name: str = "", ) -> None: mode = mode.lower() if mode not in self.SUPPORTED_MODES: raise ValueError(f"`mode` should be either of {self.SUPPORTED_MODES}") self._min_lr = min_lr self._max_lr = max_lr self._num_training_steps = num_training_steps self._mode = mode self._early_stop_threshold = early_stop_threshold self._update_attr = update_attr self._attr_name = attr_name self._early_exit = False self.optimal_lr: Optional[_LRFinder] = None def lr_find(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: with isolate_rng(): self.optimal_lr = _lr_find( trainer, pl_module, min_lr=self._min_lr, max_lr=self._max_lr, num_training=self._num_training_steps, mode=self._mode, early_stop_threshold=self._early_stop_threshold, update_attr=self._update_attr, attr_name=self._attr_name, ) if self._early_exit: raise _TunerExitException() @override def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: self.lr_find(trainer, pl_module)
LearningRateFinder
python
weaviate__weaviate-python-client
weaviate/collections/classes/filters.py
{ "start": 13791, "end": 13982 }
class ____(_FilterByTime): def __init__(self, target: Optional[_TargetRefs] = None) -> None: self._target = target self._property = "_creationTimeUnix"
_FilterByCreationTime
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/hooks/opensearch_serverless.py
{ "start": 891, "end": 1543 }
class ____(AwsBaseHook): """ Interact with the Amazon OpenSearch Serverless API. Provide thin wrapper around :external+boto3:py:class:`boto3.client("opensearchserverless") <OpenSearchServiceServerless.Client>`. Additional arguments (such as ``aws_conn_id``) may be specified and are passed down to the underlying AwsBaseHook. .. seealso:: - :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook` """ client_type = "opensearchserverless" def __init__(self, *args, **kwargs) -> None: kwargs["client_type"] = self.client_type super().__init__(*args, **kwargs)
OpenSearchServerlessHook
python
python__mypy
mypyc/ir/ops.py
{ "start": 52446, "end": 53820 }
class ____(RegisterOp): """A no-op operation that ensures source values aren't freed. This is sometimes useful to avoid decref when a reference is still being held but not seen by the compiler. A typical use case is like this (C-like pseudocode): ptr = &x.item r = *ptr keep_alive x # x must not be freed here # x may be freed here If we didn't have "keep_alive x", x could be freed immediately after taking the address of 'item', resulting in a read after free on the second line. If 'steal' is true, the value is considered to be stolen at this op, i.e. it won't be decref'd. You need to ensure that the value is freed otherwise, perhaps by using borrowing followed by Unborrow. Be careful with steal=True -- this can cause memory leaks. """ error_kind = ERR_NEVER def __init__(self, src: list[Value], *, steal: bool = False) -> None: assert src self.src = src self.steal = steal def sources(self) -> list[Value]: return self.src.copy() def stolen(self) -> list[Value]: if self.steal: return self.src.copy() return [] def set_sources(self, new: list[Value]) -> None: self.src = new[:] def accept(self, visitor: OpVisitor[T]) -> T: return visitor.visit_keep_alive(self) @final
KeepAlive
python
tensorflow__tensorflow
tensorflow/python/distribute/strategy_common_test.py
{ "start": 27371, "end": 28585 }
class ____(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.combine( strategy=[strategy_combinations.multi_worker_mirrored_2x1_cpu] + strategy_combinations.all_strategies, mode=['eager'])) def testClusterResolverProperty(self, strategy): # CollectiveAllReduceStrategy and TPUStrategy must have a cluster resolver. # `None` otherwise. resolver = strategy.cluster_resolver if (not isinstance(strategy, CollectiveAllReduceStrategy) and not strategy_test_lib.is_tpu_strategy(strategy)): self.assertIsNone(resolver) return with strategy.scope(): self.assertIs(strategy.cluster_resolver, resolver) self.assertTrue(hasattr(resolver, 'cluster_spec')) self.assertTrue(hasattr(resolver, 'master')) self.assertTrue(hasattr(resolver, 'num_accelerators')) self.assertTrue(hasattr(resolver, 'task_id')) self.assertTrue(hasattr(resolver, 'task_type')) if isinstance(strategy, CollectiveAllReduceStrategy): self.assertEqual(resolver.task_id, 0) self.assertAllInSet(resolver.task_type, ['chief', 'worker']) if __name__ == '__main__': test_util.main()
StrategyClusterResolverTest
python
ray-project__ray
doc/source/ray-core/doc_code/runtime_env_example.py
{ "start": 1983, "end": 2036 }
class ____: pass # __per_task_per_actor_end__
MyClass
python
pytorch__pytorch
test/jit/test_union_pep604.py
{ "start": 553, "end": 33389 }
class ____(JitTestCase): """ This class tests the functionality of `Union`. Note: It's important to be able to refine the type of a `Union` to one of its internal types. Currently, there are differences in the way Python expects `isinstance` checks and the way TorchScript expects `isinstance` checks. This means that we can't use `checkScript` in our test cases because either the eager mode or the script mode wouldn't run! So, some test cases have separate but equivalent functions to emulate `checkScript`. """ def test_check_union_annotation(self): def test_func(a: int | float, b: Optional[int]): return 0 scripted_func = torch.jit.script(test_func) graph_rep = str(scripted_func.graph) code_rep = str(scripted_func.code) # TS graph IR for Union should be annotated as Union() FileCheck().check("Union(").check("int?").run(graph_rep) # Serialized code for Union should be annotated as Union[] FileCheck().check("Union[").check("Optional[int]").run(code_rep) self.checkScript(test_func, (5, 6)) # this shouldn't error out torch._C.parse_ir(str(scripted_func.graph)) def test_union_with_scalar_values(self): def fn(x: int | float) -> str: return "foo" self.checkScript(fn, (1,)) self.checkScript(fn, (1.0,)) scripted = torch.jit.script(fn) with self.assertRaisesRegex( RuntimeError, "Expected a member of" r" Union\[float, int\] but " "instead found type str", ): scripted("1") def test_union_with_collections(self): def fn(x: Dict[str, int] | List[int]) -> str: return "foo" self.checkScript(fn, ({"foo": 1, "bar": 2, "baz": 3},)) self.checkScript(fn, ([1, 2, 3],)) scripted = torch.jit.script(fn) with self.assertRaisesRegex( RuntimeError, "Expected a member of" r" Union\[List\[int\], Dict\[str, " r"int\]\] but instead found type " r"Dict\[str, str\]", ): scripted({"foo": "bar", "baz": "qux"}) with self.assertRaisesRegex( RuntimeError, "Expected a member of" r" Union\[List\[int\], Dict\[str, " r"int\]\] but instead found type " r"List\[str\]", ): scripted(["foo", "bar", "baz"]) with self.assertRaisesRegex( RuntimeError, "Expected a member of" r" Union\[List\[int\], Dict\[str, " r"int\]\] but instead found type " "str", ): scripted("1") def test_union_with_enum(self): class Color(Enum): RED = 1 GREEN = 2 make_global(Color) def fn(x: str | Color) -> str: return "foo" self.checkScript(fn, (Color.RED,)) self.checkScript(fn, ("red",)) scripted = torch.jit.script(fn) with self.assertRaisesRegex( RuntimeError, "Expected a member of" r" Union\[__torch__.jit.test_union_pep604." r"Color, str\] but instead found " "type int", ): scripted(1) def test_union_in_class_constructor(self): @torch.jit.script # noqa: B903 class A: # noqa: B903 def __init__(self, x: int | str) -> None: self.x = x def fn(x: str | int) -> A: return A(x) self.assertEqual(fn("foo").x, "foo") self.assertEqual(fn(1).x, 1) scripted = torch.jit.script(fn) with self.assertRaisesRegex( RuntimeError, "Expected a member of" r" Union\[int, str\] but instead " r"found type List\[str\]", ): scripted(["foo", "bar", "baz"]) def test_union_return_type(self): def fn(x: int) -> int | str: return "foo" self.checkScript(fn, (1,)) def test_union_as_annotation(self): def fn() -> int | str: x: int | str = "foo" return x self.checkScript(fn, ()) def test_union_as_annotation_in_typed_container(self): def fn() -> None: l: List[int | str] = [] u1: int | str = "foo" u2: int | str = 1 l.append(u1) l.append(u2) self.checkScript(fn, ()) def test_union_as_annotation_py2(self): def fn(): # type: () -> int | str x: int | str = "foo" return x self.checkScript(fn, ()) def test_union_as_internal_tuple_type(self): def fn(): t: Tuple[int | str, int | str] = (1, "foo") return t self.checkScript(fn, ()) def test_union_variable_can_be_reassigned(self): @torch.jit.script def aux1(i: int): return int(i**2) @torch.jit.script def aux2(s: str): return s + s def fn() -> int | str: x: int | str = "foo" i: int = 1 x = i y: int = aux1(x) z: str = aux2(str(y)) x = z return x self.checkScript(fn, ()) def test_union_does_not_replace_existing_annotated_type(self): def fn(): x: List[int] = [1, 2, 3] x.append("foo") return x with self.assertRaisesRegex(RuntimeError, "Could not match type str"): scripted = torch.jit.script(fn) scripted() def test_union_does_not_replace_existing_annotated_type_union(self): def fn(): x: List[int | str] = [1, "foo", 3] x.append(2.0) return x with self.assertRaisesRegex(RuntimeError, "Could not match type float"): scripted = torch.jit.script(fn) scripted() def test_union_does_not_replace_existing_annotated_type_empty_container(self): def fn(): x: List[int] = [] x.append("foo") return x with self.assertRaisesRegex(RuntimeError, "Could not match type str"): scripted = torch.jit.script(fn) scripted() def test_unions_of_unions_are_flattened(self): @torch.jit.script def fn(x: (int | str) | float) -> str: return "foo" s = fn.graph FileCheck().check("x : Union(float, int, str)").run(s) def test_unions_of_a_single_argument_vanish(self): @torch.jit.script def fn(x: Union[int]) -> str: return "foo" s = fn.graph FileCheck().check("x : int").run(s) def test_union_redundant_arguments_are_skipped(self): @torch.jit.script def fn(x: int | str | int) -> str: return "foo" s = fn.graph FileCheck().check("x : Union(int, str)").run(s) def test_union_redundant_arguments_are_skipped_optional(self): @torch.jit.script def fn(x: int | Optional[float] | Optional[int]) -> str: return "foo" s = fn.graph FileCheck().check("x : Union(float, int, NoneType)").run(s) def test_union_redundant_arguments_are_skipped_subtyping(self): @torch.jit.script def fn(x: str | Tuple[Optional[int], int] | Tuple[int, int]) -> str: return "foo" s = fn.graph FileCheck().check("x : Union((int?, int), str)").run(s) def test_union_redundant_arguments_are_skipped_container(self): @torch.jit.script def fn(x: List[str] | List[float] | List[str]) -> str: return "foo" s = fn.graph FileCheck().check("x : Union(float[], str[])").run(s) def test_union_argument_order_is_ignored(self): @torch.jit.script def fn1(x: int | str) -> str: return "foo" @torch.jit.script def fn2(x: str | int) -> str: return "foo" for s in (fn1.graph, fn2.graph): FileCheck().check("x : Union(int, str)").run(s) def test_union_argument_order_is_ignored_container(self): @torch.jit.script def fn1(x: List[str] | List[int]) -> str: return "foo" @torch.jit.script def fn2(x: List[int] | List[str]) -> str: return "foo" for s in (fn1.graph, fn2.graph): FileCheck().check("x : Union(int[], str[])").run(s) def test_union_T_None_is_equivalent_to_optional_T(self): @torch.jit.script def inner(x: int | None) -> int: if x is not None: return x else: return 5 @torch.jit.script def fn1() -> int: a: Optional[int] = 5 b: Optional[int] = None a_ = inner(a) b_ = inner(b) return a_ + b_ self.assertEqual(fn1(), 10) @torch.jit.script def inner2(x: Optional[int]) -> int: if x is not None: return x else: return 5 @torch.jit.script def fn2() -> int: a: int | None = 5 b: int | None = None a_ = inner(a) b_ = inner(b) return a_ + b_ self.assertEqual(fn2(), 10) @unittest.expectedFailure def test_union_optional_of_union_return(self): @torch.jit.script def fn() -> None | str | int: y: Optional[int | str] = "foo" return y @unittest.expectedFailure def test_union_optional_of_union_is_flattened(self): @torch.jit.script def fn(flag: int) -> str | int | None: y: int | str | None = "foo" if flag == 0: x: Optional[int | str] = y elif flag == 1: x: Optional[int | str] = 1 else: x: Optional[int | str] = None return x # Can't use `checkScript` because it will flag the fact that # the original code has `Optional[Union[int, str]]` but the # saved/loaded code has `Union[int, NoneType, str]` (even # though this is exactly what we want) self.assertEqual(fn(0), "foo") self.assertEqual(fn(1), 1) self.assertEqual(fn(2), None) buffer = io.BytesIO() torch.jit.save(fn, buffer) buffer = io.BytesIO(buffer.getvalue()) l = torch.jit.load(buffer) s = l.code FileCheck().check("Union[int, NoneType, str]").check( "Union[int, NoneType, str]" ).run(s) def test_union_subclasses_larger_union(self): def fn() -> int | str | torch.Tensor: x: int | str = "foo" return x self.checkScript(fn, ()) # TODO: We would like to eventually support this. The issue is being # tracked at https://github.com/pytorch/pytorch/issues/58167 def test_union_as_dict_key(self): def fn(): x: Dict[int | str, str] = {} x["foo"] = "bar" x[1] = 2 return x[1] with self.assertRaisesRegex( RuntimeError, "only int, float, complex, Tensor, device and string keys are supported", ): torch.jit.script(fn) def test_union_as_dict_value(self): def fn(): x: Dict[str, int | str] = {} x["foo"] = "bar" x["baz"] = 2 return x["baz"] self.checkScript(fn, ()) def test_union_module_with_union_instance_variable(self): class M(torch.nn.Module): x: int | str def __init__(self, x: int | str): super().__init__() self.x: int | str = x def forward(self, y: int | str): self.x = y return self.x self.checkModule( M( 2, ), (1,), ) self.checkModule(M("bar"), ("foo",)) def test_union_module_with_union_class_variable(self): class M(torch.nn.Module): x: int | str = "foo" def __init__(self, y: int): super().__init__() x = y def forward(self, z: str): x = z return x self.checkModule(M(1), ("foo",)) def test_union_type_refinement(self): def fn(x: int | str) -> str: if isinstance(x, str): z = x + "bar" return x else: return "baz" self.checkScript(fn, ("foo",)) self.checkScript(fn, (1,)) def test_union_type_refinement_union_rhs(self): def fn(x: int) -> str: if torch.jit.isinstance(x, int | str): return "bar" else: return "baz" self.checkScript(fn, (1,)) def test_union_type_refinement_tuple_rhs(self): def fn(x: int | float | List[str]) -> str: if isinstance(x, (int, float)): if isinstance(x, int): return str(x) else: return "foo" else: if len(x): return x[0] else: return "bar" self.checkScript(fn, (1,)) self.checkScript(fn, (1.0,)) self.checkScript(fn, (["a", "b", "c"],)) def test_union_type_refinement_tuple_rhs_noncontained_type(self): def fn(x: int | List[str]) -> str: if isinstance(x, (int, float)): y = x + x return str(y) else: if len(x): return x[0] else: return "bar" self.checkScript(fn, (1,)) self.checkScript(fn, (["a", "b", "c"],)) def test_union_type_refinement_tuple_rhs_union(self): @torch.jit.script def fn(x: int) -> str: if torch.jit.isinstance(x, (int | str, float)): y = x + x return str(y) else: return "foo" # TODO: There's currently an unrelated bug in # `torch.jit.isinstance` that makes it fail for tuple literals. # Posted here: https://github.com/pytorch/pytorch/issues/60095 # Change `assertEqual` to `checkScript` when the bug is fixed self.assertEqual(fn(1), "2") def test_union_type_refinement_statically_false(self): @torch.jit.script def fn(x: int) -> str: if torch.jit.isinstance(x, (str | float, List[str], str)): z = x + "foo" return z else: return "bar" s = fn.graph # Check that we don't have any branching statements FileCheck().check_not("block0()").check_not("block1()").run(s) def test_union_type_refinement_statically_true(self): @torch.jit.script def fn(x: List[int] | int) -> List[int] | int: if not torch.jit.isinstance(x, (int, List[int])): return x else: l = [1, 2, 3] y: List[int] | int = l return y s = fn.graph # Check that we don't have any branching statements FileCheck().check_not("block0()").check_not("block1()").run(s) def test_union_type_refinement_partial_static_refinement_tuple_rhs(self): def fn(x: List[int] | int) -> int: if torch.jit.isinstance(x, (int, float, str)): # We should know that `x` is an `int` here z = x + 1 return z else: return 100 self.checkScript(fn, ([1, 2, 3],)) self.checkScript(fn, (1,)) def test_union_type_refinement_partial_static_refinement_union_rhs(self): def fn(x: List[int] | int) -> int: if torch.jit.isinstance(x, int | float | str): # We should know that `x` is an `int` here z = x + 1 return z else: return 100 self.checkScript(fn, ([1, 2, 3],)) self.checkScript(fn, (1,)) def test_union_type_refinement_internal_declaration(self): def fn(flag: bool) -> str: x: int | str | None = None if flag: y = "foo" else: y = 1 if isinstance(x, str): return x else: return "bar" self.checkScript(fn, (True,)) self.checkScript(fn, (False,)) def test_union_branching_with_union_return_and_homogenous_types(self): def fn(x: int) -> int | str: if x % 2: return "foo" else: return "bar" self.checkScript(fn, (1,)) self.checkScript(fn, (8,)) def test_union_branching_does_not_autoinfer_undeclared_union(self): def fn(x: int) -> str: if x % 2: y = "foo" else: y = x if isinstance(y, str): return y else: return "bar" with self.assertRaisesRegex( RuntimeError, "y is set to type str in the true branch and type int in the false branch", ): torch.jit.script(fn) def test_union_branching_does_not_widen_existing_inferred_type(self): def fn(x: int) -> str: y = "foo" if x % 2: y = "bar" else: y = x if isinstance(y, str): return y else: return "baz" with self.assertRaisesRegex( RuntimeError, "previously had type str but is now being assigned to a value of type int", ): torch.jit.script(fn) def test_union_schema_matching_on_internal_type(self): def fn(x: List[int] | Dict[str, int]) -> int: if torch.jit.isinstance(x, List[int]): return x[0] else: return list(x.values())[0] self.checkScript(fn, ([1, 2, 3],)) self.checkScript(fn, ({"foo": 1, "bar": 2, "baz": 3},)) def test_union_subtractive_refinement(self): def fn(x: List[int] | int) -> int: if not isinstance(x, int): x.append(1) return x[0] else: return x self.checkScript(fn, (1,)) self.checkScript(fn, ([1, 2, 3],)) def test_union_subtractive_refinement_with_container(self): def fn(x: List[int] | int) -> int: if not torch.jit.isinstance(x, List[int]): return x else: x.append(1) return x[0] self.checkScript(fn, (1,)) self.checkScript(fn, ([1, 2, 3],)) def test_union_memory_aliasing(self): def fn(): x: List[torch.Tensor] = [] z: List[Optional[List[torch.Tensor]]] = [] z.append(x) x_alias = z[0] if torch.jit.isinstance(x_alias, List[torch.Tensor]): x_alias.append(torch.tensor(3)) return x self.checkScript(fn, ()) def test_union_serialization_preserves_type_annotations(self): # This function will fail after being torch.jit.save'd and # torch.jit.load'd if the type annotations aren't preserved # for Union during serialization. We need the `Union[str, int]` # annotation to make sure that `y` is typed as a Union instead # of as a str in one branch and an int in the other def fn(x: int) -> str: if x % 2: y: str | int = "bar" else: y: str | int = x if isinstance(y, str): return y else: return "baz" self.checkScript(fn, (1,)) self.checkScript(fn, (8,)) def _assert_passes(self, template: str, ann: str, lhs: str): code = template.format(ann=ann, lhs=lhs) self.checkScript(code, (), name="fn") def _assert_raises(self, template: str, ann: str, lhs: str, msg: str): code = template.format(ann=ann, lhs=lhs) with self.assertRaisesRegex(RuntimeError, msg): cu = torch.jit.CompilationUnit(code, _frames_up=1) string_frontend = getattr(cu, "fn") # noqa: B009 def test_union_with_list_assignment(self): template = dedent( """ def fn(): x: {ann} = {lhs} if torch.jit.isinstance(x, List[torch.Tensor]): x.append(torch.tensor(3)) return x """ ) lhs = { "list_literal_empty": "[]", "list_literal_of_tensor": "[torch.arange(3), torch.arange(5)]", "list_literal_of_str": '["foo", "bar", "baz"]', "list_literal_of_mixed": "[torch.arange(5), 1]", "list_comprehension_of_tensor": "[torch.add(x, 1) for x in [torch.arange(3), torch.arange(5)]]", "list_comprehension_of_str": '[x + "!" for x in ["foo", "bar", "baz"]]', "list_comprehension_of_mixed": "[torch.add(1, x) for x in [torch.arange(5), 1]]", } """ List[str] | List[torch.Tensor] """ self._assert_raises( template, "List[str] | List[torch.Tensor]", lhs["list_literal_empty"], "there are multiple possible List type candidates in the Union annotation", ) self._assert_passes( template, "List[str] | List[torch.Tensor]", lhs["list_literal_of_tensor"] ) self._assert_passes( template, "List[str] | List[torch.Tensor]", lhs["list_literal_of_str"] ) self._assert_raises( template, "List[str] | List[torch.Tensor]", lhs["list_literal_of_mixed"], "none of those types match the types of the given list elements", ) self._assert_passes( template, "List[str] | List[torch.Tensor]", lhs["list_comprehension_of_tensor"], ) self._assert_passes( template, "List[str] | List[torch.Tensor]", lhs["list_comprehension_of_str"] ) # TODO: Support mixed list comprehensions self._assert_raises( template, "List[str] | List[torch.Tensor]", lhs["list_comprehension_of_mixed"], "Arguments for call are not valid", ) """ int | torch.Tensor """ self._assert_raises( template, "int | torch.Tensor", lhs["list_literal_empty"], "Expected an Union type annotation with an inner List type", ) self._assert_raises( template, "int | torch.Tensor", lhs["list_literal_of_tensor"], "Expected an Union type annotation with an inner List type", ) self._assert_raises( template, "int | torch.Tensor", lhs["list_comprehension_of_tensor"], "Expected an Union type annotation with an inner List type", ) """ List[torch.Tensor] | int """ self._assert_passes( template, "List[torch.Tensor] | int", lhs["list_literal_empty"] ) self._assert_passes( template, "List[torch.Tensor] | int", lhs["list_literal_of_tensor"] ) self._assert_raises( template, "List[torch.Tensor] | int", lhs["list_literal_of_str"], r"List type annotation `List\[Tensor\]` did " "not match the types of the given list " "elements", ) self._assert_raises( template, "List[torch.Tensor] | int", lhs["list_literal_of_mixed"], r"List type annotation `List\[Tensor\]` did " "not match the types of the given list " "elements", ) self._assert_passes( template, "List[torch.Tensor] | int", lhs["list_comprehension_of_tensor"] ) self._assert_raises( template, "List[torch.Tensor] | int", lhs["list_comprehension_of_str"], r"List type annotation `List\[Tensor\]` did " "not match the types of the given list " "elements", ) # TODO(@ansley): Support mixed list comprehensions self._assert_raises( template, "List[torch.Tensor] | int", lhs["list_comprehension_of_mixed"], "Arguments for call are not valid", ) def test_union_with_dict_assignment(self): template = dedent( """ def fn(): x: {ann} = {lhs} if torch.jit.isinstance(x, Dict[str, torch.Tensor]): x["foo"] = torch.tensor(3) return x """ ) lhs = { "dict_literal_empty": "{}", "dict_literal_of_str_tensor": '{"foo" : torch.arange(3), "bar" : torch.arange(5)}', "dict_literal_of_str_int": '{"foo" : 1, "bar" : 2}', "dict_literal_of_mixed": '{"foo" : torch.arange(3), "bar" : 2}', "dict_comprehension_of_str_tensor": '{x : torch.add(y, 1) for x, y in \ zip(["foo", "bar"], [torch.arange(3), torch.arange(5)])}', "dict_comprehension_of_str_int": '{x : torch.add(y, 1) for x, y in \ zip(["foo", "bar"], [1, 2]}', "dict_comprehension_of_mixed": '{x : torch.add(y, 1) for x, y in \ zip(["foo", "bar"], [torch.arange(3), 2])}', "dict_keyword": "dict(foo=torch.arange(3), baz=torch.arange(5))", "dict_keyword_with_iterable": 'dict([("foo", torch.arange(3)), ("bar", torch.arange(5))])', "dict_keyword_with_empty_iterable": "dict([])", "dict_keyword_with_internal_aggregate_function": 'dict(zip(["foo", "bar"], [torch.arange(3), torch.arange(5)])', "dict_keyword_with_mapping": 'dict({"foo" : torch.arange(3), "bar" : torch.arange(5)})', "dict_keyword_with_mapping_and_kwargs": 'dict({"foo" : torch.arange(3), "bar" : torch.arange(5)}, baz=torch.arange(7))', } """ Dict[str, torch.Tensor] | Dict[str, int] """ self._assert_raises( template, "List[str] | List[torch.Tensor]", lhs["dict_literal_empty"], "Expected an Union type annotation with an inner Dict type", ) self._assert_passes( template, "Dict[str, torch.Tensor] | Dict[str, int]", lhs["dict_literal_of_str_tensor"], ) self._assert_passes( template, "Dict[str, torch.Tensor] | Dict[str, int]", lhs["dict_literal_of_str_int"], ) self._assert_raises( template, "Dict[str, torch.Tensor] | Dict[str, int]", lhs["dict_literal_of_mixed"], "none of those dict types can hold the types of the given keys and values", ) # TODO: String frontend does not support tuple unpacking # https://github.com/pytorch/pytorch/issues/64096 # self._assert_passes(template, "Dict[str, torch.Tensor] | Dict[str, int]", # lhs["dict_comprehension_of_str_tensor"]) # self._assert_passes(template, "Dict[str, torch.Tensor] | Dict[str, int]", # lhs["dict_comprehension_of_str_int"]) # self._assert_raises(template, "Dict[str, torch.Tensor] | Dict[str, int]", # lhs["dict_comprehension_of_mixed"], # "foobar") # self._assert_passes(template, # "Dict[str, torch.Tensor] | Dict[str, int]", # lhs["dict_keyword_with_internal_aggregate_function"]) # TODO(@ansley): Follow-up project needed for full type # inference with dict keyword (supported for dict comprehension # and dict literal already; should not be a blocker for anyone) self._assert_raises( template, "Dict[str, torch.Tensor] | Dict[str, int]", lhs["dict_keyword"], "full type inference is not yet supported", ) self._assert_raises( template, "Dict[str, torch.Tensor] | Dict[str, int]", lhs["dict_keyword_with_iterable"], "full type inference is not yet supported", ) self._assert_raises( template, "Dict[str, torch.Tensor] | Dict[str, int]", lhs["dict_keyword_with_empty_iterable"], "full type inference is not yet supported", ) self._assert_raises( template, "Dict[str, torch.Tensor] | Dict[str, int]", lhs["dict_keyword_with_mapping"], "full type inference is not yet supported", ) self._assert_raises( template, "Dict[str, torch.Tensor] | Dict[str, int]", lhs["dict_keyword_with_mapping_and_kwargs"], "full type inference is not yet supported", ) """ int | torch.Tensor """ self._assert_raises( template, "int | torch.Tensor", lhs["dict_literal_empty"], "Expected an Union type annotation with an inner Dict type", ) self._assert_raises( template, "int | torch.Tensor", lhs["dict_literal_of_str_tensor"], "Expected an Union type annotation with an inner Dict type", ) # See above--string frontend does not support tuple unpacking # self._assert_raises(template, "int | torch.Tensor", # lhs["dict_comprehension_of_tensor"], # "foobar") """ Dict[str, torch.Tensor] | int """ self._assert_passes( template, "Dict[str, torch.Tensor] | int", lhs["dict_literal_empty"] ) self._assert_passes( template, "Dict[str, torch.Tensor] | int", lhs["dict_literal_of_str_tensor"] ) self._assert_raises( template, "Dict[str, torch.Tensor] | int", lhs["dict_literal_of_str_int"], "Type annotation was inferred to be " r"`Dict\[str, Tensor\]`, but the type of " "values given by the dict literal is", ) self._assert_raises( template, "Dict[str, torch.Tensor] | int", lhs["dict_literal_of_mixed"], "Type annotation was inferred to be " r"`Dict\[str, Tensor\]`, but the type of " "values given by the dict literal is", ) self._assert_passes( template, "Dict[str, torch.Tensor] | int", lhs["dict_keyword"] ) self._assert_passes( template, "Dict[str, torch.Tensor] | int", lhs["dict_keyword_with_iterable"] ) self._assert_passes( template, "Dict[str, torch.Tensor] | int", lhs["dict_keyword_with_empty_iterable"], ) self._assert_passes( template, "Dict[str, torch.Tensor] | int", lhs["dict_keyword_with_mapping"] ) self._assert_passes( template, "Dict[str, torch.Tensor] | int", lhs["dict_keyword_with_mapping_and_kwargs"], ) # See above--string frontend does not support tuple unpacking # self._assert_passes(template, # "Dict[str, torch.Tensor] | int", # lhs["dict_keyword_with_internal_aggregate_function"]) # # self._assert_passes(template, # "Dict[str, torch.Tensor] | int", # lhs["dict_comprehension_of_str_tensor"]) # self._assert_raises(template, # "Dict[str, torch.Tensor] | int", # lhs["dict_comprehension_of_str_int"], # "foobar") # self._assert_raises(template, # "Dict[str, torch.Tensor] | int", # lhs["dict_comprehension_of_mixed"], # "foobar") if __name__ == "__main__": raise_on_run_directly("test/test_jit.py")
TestUnion
python
sphinx-doc__sphinx
sphinx/domains/cpp/_ast.py
{ "start": 53121, "end": 55349 }
class ____(ASTBase): is_anonymous: ClassVar[Literal[False]] = False def __eq__(self, other: object) -> bool: raise NotImplementedError(repr(self)) def __hash__(self) -> int: raise NotImplementedError(repr(self)) def is_anon(self) -> bool: return self.is_anonymous def is_operator(self) -> bool: return True def get_id(self, version: int) -> str: raise NotImplementedError def _describe_identifier( self, signode: TextElement, identnode: TextElement, env: BuildEnvironment, symbol: Symbol, ) -> None: """Render the prefix into signode, and the last part into identnode.""" raise NotImplementedError def describe_signature( self, signode: TextElement, mode: str, env: BuildEnvironment, prefix: str, templateArgs: str, symbol: Symbol, ) -> None: verify_description_mode(mode) if mode == 'lastIsName': main_name = addnodes.desc_name() self._describe_identifier(main_name, main_name, env, symbol) signode += main_name elif mode == 'markType': target_text = prefix + str(self) + templateArgs pnode = addnodes.pending_xref( '', refdomain='cpp', reftype='identifier', reftarget=target_text, modname=None, classname=None, ) pnode['cpp:parent_key'] = symbol.get_lookup_key() # Render the identifier part, but collapse it into a string # and make that the a link to this operator. # E.g., if it is 'operator SomeType', then 'SomeType' becomes # a link to the operator, not to 'SomeType'. container = nodes.literal() self._describe_identifier(signode, container, env, symbol) txt = container.astext() pnode += addnodes.desc_name(txt, txt) signode += pnode else: add_name = addnodes.desc_addname() self._describe_identifier(add_name, add_name, env, symbol) signode += add_name
ASTOperator
python
uqfoundation__dill
dill/tests/test_recursive.py
{ "start": 2469, "end": 4182 }
class ____(object): def __init__(self): super(obj4, self).__init__() a = self class obj5(object): def __init__(self): super(obj5, self).__init__() self.a = a self.b = obj5() def test_circular_reference(): assert copy(obj4()) obj4_copy = dill.loads(dill.dumps(obj4())) assert type(obj4_copy) is type(obj4_copy).__init__.__closure__[0].cell_contents assert type(obj4_copy.b) is type(obj4_copy.b).__init__.__closure__[0].cell_contents def f(): def g(): return g return g def test_function_cells(): assert copy(f()) def fib(n): assert n >= 0 if n <= 1: return n else: return fib(n-1) + fib(n-2) def test_recursive_function(): global fib fib2 = copy(fib, recurse=True) fib3 = copy(fib) fib4 = fib del fib assert fib2(5) == 5 for _fib in (fib3, fib4): try: _fib(5) except Exception: # This is expected to fail because fib no longer exists pass else: raise AssertionError("Function fib shouldn't have been found") fib = fib4 def collection_function_recursion(): d = {} def g(): return d d['g'] = g return g def test_collection_function_recursion(): g = copy(collection_function_recursion()) assert g()['g'] is g if __name__ == '__main__': with warnings.catch_warnings(): warnings.simplefilter('error') test_super() test_partial() test_partials() test_circular_reference() test_function_cells() test_recursive_function() test_collection_function_recursion()
obj4
python
fluentpython__example-code
attic/concurrency/wikipedia/orig/potd.py
{ "start": 398, "end": 3059 }
class ____(Exception): '''No Picture of the Day found for {day}''' def build_page_url(iso_date): return POTD_BASE_URL + iso_date def fetch(url): response = requests.get(url) return response def extract_image_url(html): re_image = r'src="(//upload\..*?)"' image_url = re.search(re_image, html) return 'http:' + image_url.group(1) def format_date(year, month, day): return '{year}-{month:02d}-{day:02d}'.format(**locals()) def list_days_of_month(year, month): lastday = calendar.monthrange(year, month)[1] days = [format_date(year, month, day) for day in range(1, lastday + 1)] return days def build_save_path(iso_date, url): head, filename = os.path.split(url) return os.path.join(SAVE_DIR, iso_date+'_'+filename) def save_one(iso_date, verbose): page_url = build_page_url(iso_date) response = fetch(page_url) if response.status_code != 200: msg = NoPictureForDate.__doc__.format(day=iso_date) raise NoPictureForDate(msg) img_url = extract_image_url(response.text) response = fetch(img_url) path = build_save_path(iso_date, img_url) if verbose: print('saving: '+path) with io.open(path, 'wb') as fp: fp.write(response.content) return len(response.content) def save_month(year_month, verbose): year, month = [int(s) for s in year_month.split('-')] total_size = 0 img_count = 0 dates = list_days_of_month(year, month) for date in dates: try: total_size += save_one(date, verbose) img_count += 1 except NoPictureForDate: continue return img_count, total_size def main(save_one=save_one, save_month=save_month): """Get "Picture of The Day" from English Wikipedia for a given date or month""" parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument('date', help='year, month and (optional) day in YYYY-MM-DD format') parser.add_argument('-q', '--max_qty', type=int, help='maximum number of files to download') parser.add_argument('-v', '--verbose', action='store_true', help='display progress information') args = parser.parse_args() t0 = time.time() if len(args.date) == len('YYYY-MM-DD'): img_count = 1 total_size = save_one(args.date, args.verbose) else: img_count, total_size = save_month(args.date, args.verbose) elapsed = time.time() - t0 print("images: %3d | total size: %6.1f Kbytes | elapsed time: %3ds" % (img_count, total_size/1024.0, elapsed)) if __name__ == '__main__': main()
NoPictureForDate
python
encode__starlette
starlette/middleware/gzip.py
{ "start": 4928, "end": 5899 }
class ____(IdentityResponder): content_encoding = "gzip" def __init__(self, app: ASGIApp, minimum_size: int, compresslevel: int = 9) -> None: super().__init__(app, minimum_size) self.gzip_buffer = io.BytesIO() self.gzip_file = gzip.GzipFile(mode="wb", fileobj=self.gzip_buffer, compresslevel=compresslevel) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: with self.gzip_buffer, self.gzip_file: await super().__call__(scope, receive, send) def apply_compression(self, body: bytes, *, more_body: bool) -> bytes: self.gzip_file.write(body) if not more_body: self.gzip_file.close() body = self.gzip_buffer.getvalue() self.gzip_buffer.seek(0) self.gzip_buffer.truncate() return body async def unattached_send(message: Message) -> NoReturn: raise RuntimeError("send awaitable not set") # pragma: no cover
GZipResponder
python
apache__airflow
airflow-core/src/airflow/security/kerberos.py
{ "start": 1886, "end": 7035 }
class ____(Enum): """ Defines modes for running airflow kerberos. :return: None. """ STANDARD = "standard" ONE_TIME = "one-time" def get_kerberos_principal(principal: str | None) -> str: """Retrieve Kerberos principal. Fallback to principal from Airflow configuration if not provided.""" return principal or conf.get_mandatory_value("kerberos", "principal").replace("_HOST", get_hostname()) def renew_from_kt(principal: str | None, keytab: str, exit_on_fail: bool = True): """ Renew kerberos token from keytab. :param principal: principal :param keytab: keytab file :return: None """ # The config is specified in seconds. But we ask for that same amount in # minutes to give ourselves a large renewal buffer. renewal_lifetime = f"{conf.getint('kerberos', 'reinit_frequency')}m" cmd_principal = get_kerberos_principal(principal) if conf.getboolean("kerberos", "forwardable"): forwardable = "-f" else: forwardable = "-F" if conf.getboolean("kerberos", "include_ip"): include_ip = "-a" else: include_ip = "-A" cmdv: list[str] = [ conf.get_mandatory_value("kerberos", "kinit_path"), forwardable, include_ip, "-r", renewal_lifetime, "-k", # host ticket "-t", keytab, # specify keytab "-c", conf.get_mandatory_value("kerberos", "ccache"), # specify credentials cache cmd_principal, ] log.info("Re-initialising kerberos from keytab: %s", " ".join(shlex.quote(f) for f in cmdv)) with subprocess.Popen( cmdv, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, bufsize=-1, universal_newlines=True, ) as subp: subp.wait() if subp.returncode != 0: log.error( "Couldn't reinit from keytab! `kinit` exited with %s.\n%s\n%s", subp.returncode, "\n".join(subp.stdout.readlines() if subp.stdout else []), "\n".join(subp.stderr.readlines() if subp.stderr else []), ) if exit_on_fail: sys.exit(subp.returncode) else: return subp.returncode if detect_conf_var(): # (From: HUE-640). Kerberos clock have seconds level granularity. Make sure we # renew the ticket after the initial valid time. time.sleep(1.5) ret = perform_krb181_workaround(cmd_principal) if exit_on_fail and ret != 0: sys.exit(ret) else: return ret return 0 def perform_krb181_workaround(principal: str): """ Workaround for Kerberos 1.8.1. :param principal: principal name :return: None """ cmdv: list[str] = [ conf.get_mandatory_value("kerberos", "kinit_path"), "-c", conf.get_mandatory_value("kerberos", "ccache"), "-R", ] # Renew ticket_cache log.info("Renewing kerberos ticket to work around kerberos 1.8.1: %s", " ".join(cmdv)) ret = subprocess.call(cmdv, close_fds=True) if ret != 0: principal = f"{principal or conf.get('kerberos', 'principal')}/{get_hostname()}" ccache = conf.get("kerberos", "ccache") log.error( "Couldn't renew kerberos ticket in order to work around Kerberos 1.8.1 issue. Please check that " "the ticket for '%s' is still renewable:\n $ kinit -f -c %s\nIf the 'renew until' date is the " "same as the 'valid starting' date, the ticket cannot be renewed. Please check your KDC " "configuration, and the ticket renewal policy (maxrenewlife) for the '%s' and `krbtgt' " "principals.", principal, ccache, principal, ) return ret @cache def detect_conf_var() -> bool: """ Autodetect the Kerberos ticket configuration. Return true if the ticket cache contains "conf" information as is found in ticket caches of Kerberos 1.8.1 or later. This is incompatible with the Sun Java Krb5LoginModule in Java6, so we need to take an action to work around it. """ ticket_cache = conf.get_mandatory_value("kerberos", "ccache") with open(ticket_cache, "rb") as file: # Note: this file is binary, so we check against a bytearray. return b"X-CACHECONF:" in file.read() def run(principal: str | None, keytab: str, mode: KerberosMode = KerberosMode.STANDARD): """ Run the kerberos renewer. :param principal: principal name :param keytab: keytab file :param mode: mode to run the airflow kerberos in :return: None """ if not keytab: log.warning("Keytab renewer not starting, no keytab configured") sys.exit(0) log.info("Using airflow kerberos with mode: %s", mode.value) if mode == KerberosMode.STANDARD: while True: renew_from_kt(principal, keytab) time.sleep(conf.getint("kerberos", "reinit_frequency")) elif mode == KerberosMode.ONE_TIME: renew_from_kt(principal, keytab)
KerberosMode
python
huggingface__transformers
src/transformers/models/rembert/modeling_rembert.py
{ "start": 44401, "end": 46824 }
class ____(RemBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.rembert = RemBertModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.rembert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring
RemBertForTokenClassification
python
getsentry__sentry
src/sentry/rules/conditions/event_attribute.py
{ "start": 9720, "end": 10142 }
class ____(AttributeHandler): minimum_path_length = 2 @classmethod def _handle(cls, path: list[str], event: GroupEvent) -> list[str]: if path[1] not in ("id", "ip_address", "email", "username"): return [] result = getattr(event.interfaces.get("user", {}), path[1], None) return [result] if result is not None else [] @attribute_registry.register("http")
UserAttributeHandler
python
getsentry__sentry
tests/sentry/notifications/api/endpoints/test_user_notification_settings_providers.py
{ "start": 4084, "end": 5935 }
class ____(UserNotificationSettingsProvidersBaseTest): method = "PUT" def setUp(self) -> None: super().setUp() self.login_as(self.user) def test_simple(self) -> None: response = self.get_success_response( "me", user_id=self.user.id, scope_type="organization", scope_identifier=self.organization.id, type="alerts", status_code=status.HTTP_201_CREATED, value="always", providers=["slack"], ) assert NotificationSettingProvider.objects.filter( user_id=self.user.id, scope_type=NotificationScopeEnum.ORGANIZATION.value, scope_identifier=self.organization.id, type=NotificationSettingEnum.ISSUE_ALERTS.value, value=NotificationSettingsOptionEnum.ALWAYS.value, provider=ExternalProviderEnum.SLACK.value, ).exists() assert len(response.data) == 3 def test_invalid_scope_type(self) -> None: response = self.get_error_response( "me", user_id=self.user.id, scope_type="project", scope_identifier=self.project.id, type="alerts", status_code=status.HTTP_400_BAD_REQUEST, providers=["slack"], ) assert response.data["scopeType"] == ["Invalid scope type"] def test_invalid_provider(self) -> None: response = self.get_error_response( "me", user_id=self.user.id, scope_type="organization", scope_identifier=self.organization.id, type="alerts", status_code=status.HTTP_400_BAD_REQUEST, providers=["github"], ) assert response.data["providers"] == ["Invalid provider"]
UserNotificationSettingsProvidersPutTest
python
langchain-ai__langchain
libs/langchain/langchain_classic/chains/qa_with_sources/loading.py
{ "start": 1129, "end": 7833 }
class ____(Protocol): """Interface for loading the combine documents chain.""" def __call__( self, llm: BaseLanguageModel, **kwargs: Any, ) -> BaseCombineDocumentsChain: """Callable to load the combine documents chain.""" def _load_map_rerank_chain( llm: BaseLanguageModel, *, prompt: BasePromptTemplate = MAP_RERANK_PROMPT, verbose: bool = False, document_variable_name: str = "context", rank_key: str = "score", answer_key: str = "answer", **kwargs: Any, ) -> MapRerankDocumentsChain: llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) return MapRerankDocumentsChain( llm_chain=llm_chain, rank_key=rank_key, answer_key=answer_key, document_variable_name=document_variable_name, **kwargs, ) def _load_stuff_chain( llm: BaseLanguageModel, *, prompt: BasePromptTemplate = stuff_prompt.PROMPT, document_prompt: BasePromptTemplate = stuff_prompt.EXAMPLE_PROMPT, document_variable_name: str = "summaries", verbose: bool | None = None, **kwargs: Any, ) -> StuffDocumentsChain: llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) return StuffDocumentsChain( llm_chain=llm_chain, document_variable_name=document_variable_name, document_prompt=document_prompt, verbose=verbose, **kwargs, ) def _load_map_reduce_chain( llm: BaseLanguageModel, *, question_prompt: BasePromptTemplate = map_reduce_prompt.QUESTION_PROMPT, combine_prompt: BasePromptTemplate = map_reduce_prompt.COMBINE_PROMPT, document_prompt: BasePromptTemplate = map_reduce_prompt.EXAMPLE_PROMPT, combine_document_variable_name: str = "summaries", map_reduce_document_variable_name: str = "context", collapse_prompt: BasePromptTemplate | None = None, reduce_llm: BaseLanguageModel | None = None, collapse_llm: BaseLanguageModel | None = None, verbose: bool | None = None, token_max: int = 3000, **kwargs: Any, ) -> MapReduceDocumentsChain: map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) _reduce_llm = reduce_llm or llm reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose=verbose) combine_documents_chain = StuffDocumentsChain( llm_chain=reduce_chain, document_variable_name=combine_document_variable_name, document_prompt=document_prompt, verbose=verbose, ) if collapse_prompt is None: collapse_chain = None if collapse_llm is not None: msg = ( "collapse_llm provided, but collapse_prompt was not: please " "provide one or stop providing collapse_llm." ) raise ValueError(msg) else: _collapse_llm = collapse_llm or llm collapse_chain = StuffDocumentsChain( llm_chain=LLMChain( llm=_collapse_llm, prompt=collapse_prompt, verbose=verbose, ), document_variable_name=combine_document_variable_name, document_prompt=document_prompt, ) reduce_documents_chain = ReduceDocumentsChain( combine_documents_chain=combine_documents_chain, collapse_documents_chain=collapse_chain, token_max=token_max, verbose=verbose, ) return MapReduceDocumentsChain( llm_chain=map_chain, reduce_documents_chain=reduce_documents_chain, document_variable_name=map_reduce_document_variable_name, verbose=verbose, **kwargs, ) def _load_refine_chain( llm: BaseLanguageModel, *, question_prompt: BasePromptTemplate = refine_prompts.DEFAULT_TEXT_QA_PROMPT, refine_prompt: BasePromptTemplate = refine_prompts.DEFAULT_REFINE_PROMPT, document_prompt: BasePromptTemplate = refine_prompts.EXAMPLE_PROMPT, document_variable_name: str = "context_str", initial_response_name: str = "existing_answer", refine_llm: BaseLanguageModel | None = None, verbose: bool | None = None, **kwargs: Any, ) -> RefineDocumentsChain: initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) _refine_llm = refine_llm or llm refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose) return RefineDocumentsChain( initial_llm_chain=initial_chain, refine_llm_chain=refine_chain, document_variable_name=document_variable_name, initial_response_name=initial_response_name, document_prompt=document_prompt, verbose=verbose, **kwargs, ) @deprecated( since="0.2.13", removal="1.0", message=( "This function is deprecated. Refer to this guide on retrieval and question " "answering with sources: " "https://python.langchain.com/docs/how_to/qa_sources/" "\nSee also the following migration guides for replacements " "based on `chain_type`:\n" "stuff: https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain\n" "map_reduce: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain\n" "refine: https://python.langchain.com/docs/versions/migrating_chains/refine_chain\n" "map_rerank: https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain\n" ), ) def load_qa_with_sources_chain( llm: BaseLanguageModel, chain_type: str = "stuff", verbose: bool | None = None, # noqa: FBT001 **kwargs: Any, ) -> BaseCombineDocumentsChain: """Load a question answering with sources chain. Args: llm: Language Model to use in the chain. chain_type: Type of document combining chain to use. Should be one of "stuff", "map_reduce", "refine" and "map_rerank". verbose: Whether chains should be run in verbose mode or not. Note that this applies to all chains that make up the final chain. **kwargs: Additional keyword arguments. Returns: A chain to use for question answering with sources. """ loader_mapping: Mapping[str, LoadingCallable] = { "stuff": _load_stuff_chain, "map_reduce": _load_map_reduce_chain, "refine": _load_refine_chain, "map_rerank": _load_map_rerank_chain, } if chain_type not in loader_mapping: msg = ( f"Got unsupported chain type: {chain_type}. " f"Should be one of {loader_mapping.keys()}" ) raise ValueError(msg) _func: LoadingCallable = loader_mapping[chain_type] return _func(llm, verbose=verbose, **kwargs)
LoadingCallable
python
PyCQA__bandit
tests/unit/formatters/test_html.py
{ "start": 370, "end": 5746 }
class ____(testtools.TestCase): def setUp(self): super().setUp() conf = config.BanditConfig() self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname def test_report_with_skipped(self): self.manager.skipped = [("abc.py", "File is bad")] with open(self.tmp_fname, "w") as tmp_file: b_html.report(self.manager, tmp_file, bandit.LOW, bandit.LOW) with open(self.tmp_fname) as f: soup = bs4.BeautifulSoup(f.read(), "html.parser") skipped = soup.find_all("div", id="skipped")[0] self.assertEqual(1, len(soup.find_all("div", id="skipped"))) self.assertIn("abc.py", skipped.text) self.assertIn("File is bad", skipped.text) @mock.patch("bandit.core.issue.Issue.get_code") @mock.patch("bandit.core.manager.BanditManager.get_issue_list") def test_report_contents(self, get_issue_list, get_code): self.manager.metrics.data["_totals"] = {"loc": 1000, "nosec": 50} issue_a = _get_issue_instance(severity=bandit.LOW) issue_a.fname = "abc.py" issue_a.test = "AAAAAAA" issue_a.text = "BBBBBBB" issue_a.confidence = "CCCCCCC" # don't need to test severity, it determines the color which we're # testing separately issue_b = _get_issue_instance(severity=bandit.MEDIUM) issue_c = _get_issue_instance(severity=bandit.HIGH) issue_x = _get_issue_instance() get_code.return_value = "some code" issue_y = _get_issue_instance() get_issue_list.return_value = collections.OrderedDict( [ (issue_a, [issue_x, issue_y]), (issue_b, [issue_x]), (issue_c, [issue_y]), ] ) with open(self.tmp_fname, "w") as tmp_file: b_html.report(self.manager, tmp_file, bandit.LOW, bandit.LOW) with open(self.tmp_fname) as f: soup = bs4.BeautifulSoup(f.read(), "html.parser") self.assertEqual("1000", soup.find_all("span", id="loc")[0].text) self.assertEqual("50", soup.find_all("span", id="nosec")[0].text) issue1 = soup.find_all("div", id="issue-0")[0] issue2 = soup.find_all("div", id="issue-1")[0] issue3 = soup.find_all("div", id="issue-2")[0] # make sure the class has been applied properly self.assertEqual( 1, len(issue1.find_all("div", {"class": "issue-sev-low"})) ) self.assertEqual( 1, len(issue2.find_all("div", {"class": "issue-sev-medium"})) ) self.assertEqual( 1, len(issue3.find_all("div", {"class": "issue-sev-high"})) ) # issue1 has a candidates section with 2 candidates in it self.assertEqual( 1, len(issue1.find_all("div", {"class": "candidates"})) ) self.assertEqual( 2, len(issue1.find_all("div", {"class": "candidate"})) ) # issue2 doesn't have candidates self.assertEqual( 0, len(issue2.find_all("div", {"class": "candidates"})) ) self.assertEqual( 0, len(issue2.find_all("div", {"class": "candidate"})) ) # issue1 doesn't have code issue 2 and 3 do self.assertEqual(0, len(issue1.find_all("div", {"class": "code"}))) self.assertEqual(1, len(issue2.find_all("div", {"class": "code"}))) self.assertEqual(1, len(issue3.find_all("div", {"class": "code"}))) # issue2 code and issue1 first candidate have code element1 = issue1.find_all("div", {"class": "candidate"}) self.assertIn("some code", element1[0].text) element2 = issue2.find_all("div", {"class": "code"}) self.assertIn("some code", element2[0].text) # make sure correct things are being output in issues self.assertIn("AAAAAAA:", issue1.text) self.assertIn("BBBBBBB", issue1.text) self.assertIn("CCCCCCC", issue1.text) self.assertIn("abc.py", issue1.text) self.assertIn("Line number: 1", issue1.text) @mock.patch("bandit.core.issue.Issue.get_code") @mock.patch("bandit.core.manager.BanditManager.get_issue_list") def test_escaping(self, get_issue_list, get_code): self.manager.metrics.data["_totals"] = {"loc": 1000, "nosec": 50} marker = "<tag in code>" issue_a = _get_issue_instance() issue_x = _get_issue_instance() get_code.return_value = marker get_issue_list.return_value = {issue_a: [issue_x]} with open(self.tmp_fname, "w") as tmp_file: b_html.report(self.manager, tmp_file, bandit.LOW, bandit.LOW) with open(self.tmp_fname) as f: contents = f.read() self.assertNotIn(marker, contents) def _get_issue_instance( severity=bandit.MEDIUM, cwe=123, confidence=bandit.MEDIUM ): new_issue = issue.Issue(severity, cwe, confidence, "Test issue") new_issue.fname = "code.py" new_issue.test = "bandit_plugin" new_issue.lineno = 1 return new_issue
HtmlFormatterTests
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/translate.py
{ "start": 21535, "end": 24836 }
class ____(GoogleCloudBaseOperator): """ Get a list of native Google Cloud Translation datasets in a project. Get project's list of `native` translation datasets, using API V3. For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:TranslateDatasetsListOperator`. :param project_id: ID of the Google Cloud project where dataset is located. If not provided default project_id is used. :param location: The location of the project. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. :param gcp_conn_id: The connection ID to use connecting to Google Cloud. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = ( "location", "project_id", "gcp_conn_id", "impersonation_chain", ) operator_extra_links = (TranslationDatasetsListLink(),) def __init__( self, *, project_id: str = PROVIDE_PROJECT_ID, location: str, metadata: Sequence[tuple[str, str]] = (), timeout: float | _MethodDefault = DEFAULT, retry: Retry | _MethodDefault = DEFAULT, gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.project_id = project_id self.location = location self.metadata = metadata self.timeout = timeout self.retry = retry self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context: Context): hook = TranslateHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) project_id = self.project_id or hook.project_id TranslationDatasetsListLink.persist( context=context, project_id=project_id, ) self.log.info("Requesting datasets list") results_pager = hook.list_datasets( location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) result_ids = [] for ds_item in results_pager: ds_data = type(ds_item).to_dict(ds_item) ds_id = hook.extract_object_id(ds_data) result_ids.append(ds_id) self.log.info("Fetching the datasets list complete.") return result_ids
TranslateDatasetsListOperator
python
tensorflow__tensorflow
tensorflow/python/distribute/multi_process_runner.py
{ "start": 30614, "end": 31577 }
class ____(multi_process_lib.Process): """A modified `multiprocessing.Process` that can set up environment variables.""" # TODO(crccw): consider moving other logics in _ProcFunc to _Process. def __init__(self, test_env, **kwargs): super(_Process, self).__init__(**kwargs) self._test_env = test_env self._actual_run = getattr(self, 'run') self.run = self._run_with_setenv def _run_with_setenv(self): # We need to set environment variables before doing anything because # setenv() is not thread-safe. test_env = self._test_env if test_env.grpc_fail_fast is not None: os.environ['GRPC_FAIL_FAST'] = str(test_env.grpc_fail_fast) if test_env.visible_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = ','.join( [str(i) for i in test_env.visible_gpus]) _set_tf_config(test_env.task_type, test_env.task_id, test_env.cluster_spec, test_env.rpc_layer) return self._actual_run()
_Process
python
readthedocs__readthedocs.org
readthedocs/projects/views/private.py
{ "start": 24737, "end": 24791 }
class ____(WebHookMixin, ListView): pass
WebHookList
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol7.py
{ "start": 196, "end": 251 }
class ____(Protocol): name: str @runtime_checkable
P1
python
django__django
tests/template_tests/test_engine.py
{ "start": 3602, "end": 5050 }
class ____(SimpleTestCase): def test_origin(self): engine = Engine(dirs=[TEMPLATE_DIR], debug=True) template = engine.get_template("index.html") self.assertEqual(template.origin.template_name, "index.html") def test_loader_priority(self): """ #21460 -- The order of template loader works. """ loaders = [ "django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader", ] engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders) template = engine.get_template("priority/foo.html") self.assertEqual(template.render(Context()), "priority\n") def test_cached_loader_priority(self): """ The order of template loader works. Refs #21460. """ loaders = [ ( "django.template.loaders.cached.Loader", [ "django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader", ], ), ] engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders) template = engine.get_template("priority/foo.html") self.assertEqual(template.render(Context()), "priority\n") template = engine.get_template("priority/foo.html") self.assertEqual(template.render(Context()), "priority\n")
LoaderTests
python
facebook__pyre-check
scripts/tests/shape_type_coverage_test.py
{ "start": 4275, "end": 6018 }
class ____(unittest.TestCase): def assert_is_tensor(self, parametric: ParametricType) -> None: self.assertTrue(_is_tensor(parametric)) def assert_is_not_tensor(self, parametric: ParametricType) -> None: self.assertFalse(_is_tensor(parametric)) def test_is_tensor(self) -> None: self.assert_is_tensor(ParametricType("torch.Tensor", [])) self.assert_is_tensor( ParametricType( "torch.Tensor", [ "torch.float32", "typing_extensions.Literal[5]", "typing_extensions.Literal[2]", ], ) ) self.assert_is_tensor( ParametricType("torch.Tensor", ["torch.float32", "int", "int"]) ) self.assert_is_tensor( ParametricType( "torch.Tensor", ["torch.float32", "typing_extensions.Literal[5]", "int"] ) ) self.assert_is_tensor( ParametricType( "torch.Tensor", [ "torch.float32", "typing_extensions.Literal[5]", "pyre_extensions.IntExpression[5 + 3(N1//2)]", "Variable[N (bound to int)]", "*Tuple[Variable[N (bound to int)], typing_extensions.Literal[5], \ pyre_extensions.IntExpression[2N - 5]]", ], ) ) self.assert_is_tensor(ParametricType("torch.Tensor", [])) def test_is_not_tensor(self) -> None: self.assert_is_not_tensor(ParametricType("torch.TensorLike", [])) self.assert_is_not_tensor(ParametricType("typing_extensions.Literal", ["5"]))
IsTensorTest
python
pola-rs__polars
py-polars/src/polars/io/cloud/credential_provider/_builder.py
{ "start": 7956, "end": 17214 }
class ____(CredentialProviderBuilderImpl): def __init__(self, cls: Any, **kw: Any) -> None: self.cls = cls self.kw = kw self._cache_key: NoPickleOption[bytes] = NoPickleOption() def __call__(self) -> CredentialProviderFunction | None: # This is used for credential_provider="auto", which allows for # ImportErrors. try: return _build_with_cache( self.get_or_init_cache_key, lambda: self.cls(**self.kw), ) except ImportError as e: if verbose(): eprint(f"failed to auto-initialize {self.provider_repr}: {e!r}") return None def get_or_init_cache_key(self) -> bytes: cache_key = self._cache_key.get() if cache_key is None: cache_key = self.get_cache_key_impl() self._cache_key.set(cache_key) if verbose(): eprint(f"{self!r}: AutoInit cache key: {cache_key.hex()}") return cache_key def get_cache_key_impl(self) -> bytes: import hashlib import pickle hash = hashlib.sha256(pickle.dumps(self)) return hash.digest()[:16] @property def provider_repr(self) -> str: return self.cls.__name__ DEFAULT_CREDENTIAL_PROVIDER: CredentialProviderFunction | Literal["auto"] | None = ( "auto" ) def _init_credential_provider_builder( credential_provider: CredentialProviderFunction | CredentialProviderBuilder | Literal["auto"] | None, source: Any, storage_options: dict[str, Any] | None, caller_name: str, ) -> CredentialProviderBuilder | None: def f() -> CredentialProviderBuilder | None: # Note: The behavior of this function should depend only on the function # parameters. Any environment-specific behavior should take place inside # instantiated credential providers. from polars.io.cloud._utils import ( _first_scan_path, _get_path_scheme, _is_aws_cloud, _is_azure_cloud, _is_gcp_cloud, ) if credential_provider is None: return None if isinstance(credential_provider, CredentialProviderBuilder): # This happens when the catalog client auto-inits and passes it to # scan/write_delta, which calls us again. return credential_provider if credential_provider != "auto": msg = f"the `credential_provider` parameter of `{caller_name}` is considered unstable." issue_unstable_warning(msg) return CredentialProviderBuilder.from_initialized_provider( credential_provider ) if DEFAULT_CREDENTIAL_PROVIDER is None: return None if (first_scan_path := _first_scan_path(source)) is None: return None if (scheme := _get_path_scheme(first_scan_path)) is None: return None def get_default_credential_provider() -> CredentialProviderBuilder | None: return ( CredentialProviderBuilder.from_initialized_provider( DEFAULT_CREDENTIAL_PROVIDER ) if DEFAULT_CREDENTIAL_PROVIDER != "auto" else None ) if _is_azure_cloud(scheme): tenant_id = None storage_account = None if storage_options is not None: for k, v in storage_options.items(): k = k.lower() # https://docs.rs/object_store/latest/object_store/azure/enum.AzureConfigKey.html if k in { "azure_storage_tenant_id", "azure_storage_authority_id", "azure_tenant_id", "azure_authority_id", "tenant_id", "authority_id", }: tenant_id = v elif k in {"azure_storage_account_name", "account_name"}: storage_account = v elif k in {"azure_use_azure_cli", "use_azure_cli"}: continue elif k in OBJECT_STORE_CLIENT_OPTIONS: continue else: # We assume some sort of access key was given, so we # just dispatch to the rust side. return None storage_account = ( # Prefer the one embedded in the path CredentialProviderAzure._extract_adls_uri_storage_account( str(first_scan_path) ) or storage_account ) if (default := get_default_credential_provider()) is not None: return default return CredentialProviderBuilder( AutoInit( CredentialProviderAzure, tenant_id=tenant_id, _storage_account=storage_account, ) ) elif _is_aws_cloud(scheme=scheme, first_scan_path=str(first_scan_path)): region = None profile = None default_region = None unhandled_key = None has_endpoint_url = False if storage_options is not None: for k, v in storage_options.items(): k = k.lower() # https://docs.rs/object_store/latest/object_store/aws/enum.AmazonS3ConfigKey.html if k in {"aws_region", "region"}: region = v elif k in {"aws_default_region", "default_region"}: default_region = v elif k in {"aws_profile", "profile"}: profile = v elif k in { "aws_endpoint", "aws_endpoint_url", "endpoint", "endpoint_url", }: has_endpoint_url = True elif k in {"aws_request_payer", "request_payer"}: continue elif k in OBJECT_STORE_CLIENT_OPTIONS: continue else: # We assume this is some sort of access key unhandled_key = k if unhandled_key is not None: if profile is not None: msg = ( "unsupported: cannot combine aws_profile with " f"{unhandled_key} in storage_options" ) raise ValueError(msg) if ( unhandled_key is None and (default := get_default_credential_provider()) is not None ): return default return CredentialProviderBuilder( AutoInit( CredentialProviderAWS, profile_name=profile, region_name=region or default_region, _auto_init_unhandled_key=unhandled_key, _storage_options_has_endpoint_url=has_endpoint_url, ) ) elif _is_gcp_cloud(scheme): token = None unhandled_key = None if storage_options is not None: for k, v in storage_options.items(): k = k.lower() # https://docs.rs/object_store/latest/object_store/gcp/enum.GoogleConfigKey.html if k in {"token", "bearer_token"}: token = v elif k in { "google_bucket", "google_bucket_name", "bucket", "bucket_name", }: continue elif k in OBJECT_STORE_CLIENT_OPTIONS: continue else: # We assume some sort of access key was given, so we # just dispatch to the rust side. unhandled_key = k if unhandled_key is not None: if token is not None: msg = ( "unsupported: cannot combine token with " f"{unhandled_key} in storage_options" ) raise ValueError(msg) return None if token is not None: return CredentialProviderBuilder( InitializedCredentialProvider(UserProvidedGCPToken(token)) ) if (default := get_default_credential_provider()) is not None: return default return CredentialProviderBuilder(AutoInit(CredentialProviderGCP)) return None credential_provider_init = f() if verbose(): eprint(f"_init_credential_provider_builder(): {credential_provider_init = !r}") return credential_provider_init
AutoInit
python
allegroai__clearml
clearml/backend_interface/task/populate.py
{ "start": 383, "end": 32861 }
class ____(object): _VCS_SSH_REGEX = ( "^" "(?:(?P<user>{regular}*?)@)?" "(?P<host>{regular}*?)" ":" "(?P<path>{regular}.*)?" "$".format(regular=r"[^/@:#]") ) def __init__( self, project_name: Optional[str] = None, task_name: Optional[str] = None, task_type: Optional[str] = None, repo: Optional[str] = None, branch: Optional[str] = None, commit: Optional[str] = None, script: Optional[str] = None, working_directory: Optional[str] = None, module: Optional[str] = None, packages: Optional[Union[bool, Sequence[str]]] = None, requirements_file: Optional[Union[str, Path]] = None, docker: Optional[str] = None, docker_args: Optional[str] = None, docker_bash_setup_script: Optional[str] = None, output_uri: Optional[str] = None, base_task_id: Optional[str] = None, add_task_init_call: bool = True, force_single_script_file: bool = False, raise_on_missing_entries: bool = False, verbose: bool = False, binary: Optional[str] = None, detect_repository: bool = True ) -> None: """ Create a new Task from an existing code base. If the code does not already contain a call to Task.init, pass add_task_init_call=True, and the code will be patched in remote execution (i.e. when executed by `clearml-agent` :param project_name: Set the project name for the task. Required if base_task_id is None. :param task_name: Set the name of the remote task. Required if base_task_id is None. :param task_type: Optional, The task type to be created. Supported values: 'training', 'testing', 'inference', 'data_processing', 'application', 'monitor', 'controller', 'optimizer', 'service', 'qc', 'custom' :param repo: Remote URL for the repository to use, OR path to local copy of the git repository Example: 'https://github.com/allegroai/clearml.git' or '~/project/repo' :param branch: Select specific repository branch/tag (implies the latest commit from the branch) :param commit: Select specific commit id to use (default: latest commit, or when used with local repository matching the local commit id) :param script: Specify the entry point script for the remote execution. When used in tandem with remote git repository the script should be a relative path inside the repository, for example: './source/train.py' . When used with local repository path it supports a direct path to a file inside the local repository itself, for example: '~/project/source/train.py' :param module: If specified instead of executing `script`, a module named `module` is executed. Implies script is empty. Module can contain multiple argument for execution, for example: module="my.module arg1 arg2" :param working_directory: Working directory to launch the script from. Default: repository root folder. Relative to repo root or local folder. :param packages: Manually specify a list of required packages. Example: ["tqdm>=2.1", "scikit-learn"] or `True` to automatically create requirements based on locally installed packages (repository must be local). Pass an empty string to not install any packages (not even from the repository) :param requirements_file: Specify requirements.txt file to install when setting the session. If not provided, the requirements.txt from the repository will be used. :param docker: Select the docker image to be executed in by the remote session :param docker_args: Add docker arguments, pass a single string :param docker_bash_setup_script: Add bash script to be executed inside the docker before setting up the Task's environment :param output_uri: Optional, set the Tasks's output_uri (Storage destination). examples: 's3://bucket/folder', 'https://server/' , 'gs://bucket/folder', 'azure://bucket', '/folder/' :param base_task_id: Use a pre-existing task in the system, instead of a local repo/script. Essentially clones an existing task and overrides arguments/requirements. :param add_task_init_call: If True, a 'Task.init()' call is added to the script entry point in remote execution. :param force_single_script_file: If True, do not auto-detect local repository :param raise_on_missing_entries: If True, raise ValueError on missing entries when populating :param verbose: If True, print verbose logging :param binary: Binary used to launch the entry point :param detect_repository: If True, detect the repository if no repository has been specified. If False, don't detect repository under any circumstance. Ignored if `repo` is specified """ if repo and len(urlparse(repo).scheme) <= 1 and not re.compile(self._VCS_SSH_REGEX).match(repo): folder = repo repo = None else: folder = None if script and module: raise ValueError("Entry point script or module need to be specified not both") if raise_on_missing_entries and not base_task_id: if not script and not module: raise ValueError("Entry point script not provided") if not repo and not folder and (script and not Path(script).is_file()): raise ValueError("Script file '{}' could not be found".format(script)) if raise_on_missing_entries and commit and branch: raise ValueError( "Specify either a branch/tag or specific commit id, not both (either --commit or --branch)" ) if raise_on_missing_entries and not folder and working_directory and working_directory.startswith("/"): raise ValueError("working directory '{}', must be relative to repository root") if requirements_file and not Path(requirements_file).is_file(): raise ValueError("requirements file could not be found '{}'") self.folder = folder self.commit = commit self.branch = branch self.repo = repo self.script = script self.module = module self.cwd = working_directory assert not packages or isinstance(packages, (tuple, list, bool)) if isinstance(packages, bool): self.packages = True if packages else None elif packages: self.packages = list(packages) else: self.packages = packages self.requirements_file = Path(requirements_file) if requirements_file else None self.base_task_id = base_task_id self.docker = dict(image=docker, args=docker_args, bash_script=docker_bash_setup_script) self.add_task_init_call = add_task_init_call self.project_name = project_name self.task_name = task_name self.task_type = task_type self.output_uri = output_uri self.task = None self.force_single_script_file = bool(force_single_script_file) self.raise_on_missing_entries = raise_on_missing_entries self.verbose = verbose self.binary = binary self.detect_repository = detect_repository def create_task(self, dry_run: bool = False) -> Union[Task, Dict]: """ Create the new populated Task :param dry_run: Optional, If True, do not create an actual Task, instead return the Task definition as dict :return: newly created Task object """ local_entry_file = None repo_info = None stand_alone_script_outside_repo = False entry_point = "" # populate from local repository / script if self.folder or (self.script and Path(self.script).is_file() and not self.repo): self.folder = os.path.expandvars(os.path.expanduser(self.folder)) if self.folder else None self.script = os.path.expandvars(os.path.expanduser(self.script)) if self.script else None self.cwd = os.path.expandvars(os.path.expanduser(self.cwd)) if self.cwd else None if self.module: entry_point = "-m {}".format(self.module) # we must have a folder if we are here local_entry_file = self.folder.rstrip("/") + "/." else: if Path(self.script).is_file(): entry_point = self.script else: entry_point = (Path(self.folder) / self.script).as_posix() entry_point = os.path.abspath(entry_point) try: if entry_point and Path(entry_point).is_file() and self.folder and Path(self.folder).is_dir(): # make sure we raise exception if this is outside the local repo folder entry_point = (Path(entry_point) / (Path(entry_point).relative_to(self.folder))).as_posix() except ValueError: entry_point = self.folder stand_alone_script_outside_repo = True if not os.path.isfile(entry_point) and not stand_alone_script_outside_repo: if ( not Path(self.script).is_absolute() and not Path(self.cwd).is_absolute() and (Path(self.folder) / self.cwd / self.script).is_file() ): entry_point = (Path(self.folder) / self.cwd / self.script).as_posix() elif ( Path(self.cwd).is_absolute() and not Path(self.script).is_absolute() and (Path(self.cwd) / self.script).is_file() ): entry_point = (Path(self.cwd) / self.script).as_posix() else: raise ValueError("Script entrypoint file '{}' could not be found".format(entry_point)) local_entry_file = entry_point if self.detect_repository: repo_info, requirements = ScriptInfo.get( filepaths=[local_entry_file], log=getLogger(), create_requirements=self.packages is True, uncommitted_from_remote=True, detect_jupyter_notebook=False, add_missing_installed_packages=True, detailed_req_report=False, force_single_script=self.force_single_script_file, ) else: repo_info, requirements = None, None if stand_alone_script_outside_repo: # if we have a standalone script and a local repo we skip[ the local diff and store it local_entry_file = Path(self.script).as_posix() if self.detect_repository: a_create_requirements = self.packages is True a_repo_info, a_requirements = ScriptInfo.get( filepaths=[Path(self.script).as_posix()], log=getLogger(), create_requirements=a_create_requirements, uncommitted_from_remote=True, detect_jupyter_notebook=False, add_missing_installed_packages=True, detailed_req_report=False, force_single_script=True, ) if repo_info.script["diff"]: print( "Warning: local git repo diff is ignored, " "storing only the standalone script form {}".format(self.script) ) repo_info.script["diff"] = a_repo_info.script["diff"] or "" repo_info.script["entry_point"] = a_repo_info.script["entry_point"] if a_create_requirements: repo_info.script["requirements"] = a_repo_info.script.get("requirements") or {} # check if we have no repository and no requirements raise error if ( self.raise_on_missing_entries and (self.requirements_file is None and self.packages is None) and not self.repo and (not repo_info or not repo_info.script or not repo_info.script.get("repository")) and (not entry_point or not entry_point.endswith(".sh")) ): raise ValueError("Standalone script detected '{}', but no requirements provided".format(self.script)) if dry_run: task = None task_state: dict = dict( name=self.task_name, project=Task.get_project_id(self.project_name), type=str(self.task_type or Task.TaskTypes.training), ) if self.output_uri is not None: task_state["output"] = dict(destination=self.output_uri) else: task_state = dict(script={}) if self.base_task_id: if self.verbose: print("Cloning task {}".format(self.base_task_id)) task = Task.clone( source_task=self.base_task_id, project=Task.get_project_id(self.project_name), ) self._set_output_uri(task) else: # noinspection PyProtectedMember task = Task._create( task_name=self.task_name, project_name=self.project_name, task_type=self.task_type or Task.TaskTypes.training, ) self._set_output_uri(task) # if there is nothing to populate, return if not any( [ self.folder, self.commit, self.branch, self.repo, self.script, self.module, self.cwd, self.packages, self.requirements_file, self.base_task_id, ] + (list(self.docker.values())) ): return task # clear the script section task_state["script"] = {} if repo_info: task_state["script"]["repository"] = repo_info.script["repository"] task_state["script"]["version_num"] = repo_info.script["version_num"] task_state["script"]["branch"] = repo_info.script["branch"] task_state["script"]["diff"] = repo_info.script["diff"] or "" task_state["script"]["working_dir"] = repo_info.script["working_dir"] task_state["script"]["entry_point"] = repo_info.script["entry_point"] task_state["script"]["binary"] = self.binary or ( "/bin/bash" if ( (repo_info.script["entry_point"] or "").lower().strip().endswith(".sh") and not (repo_info.script["entry_point"] or "").lower().strip().startswith("-m ") ) else repo_info.script["binary"] ) task_state["script"]["requirements"] = repo_info.script.get("requirements") or {} if self.cwd: cwd = self.cwd if not Path(cwd).is_absolute(): # cwd should be relative to the repo_root, but we need the full path # (repo_root + cwd) in order to resolve the entry point cwd = os.path.normpath((Path(repo_info.script["repo_root"]) / self.cwd).as_posix()) if not Path(cwd).is_dir(): # we need to leave it as is, we have no idea, and this is a repo cwd = self.cwd elif not Path(cwd).is_dir(): # we were passed an absolute dir and it does not exist raise ValueError("Working directory '{}' could not be found".format(cwd)) if self.module: entry_point = "-m {}".format(self.module) elif stand_alone_script_outside_repo: # this should be relative and the temp file we generated entry_point = repo_info.script["entry_point"] else: entry_point = os.path.normpath( Path(repo_info.script["repo_root"]) / repo_info.script["working_dir"] / repo_info.script["entry_point"] ) # resolve entry_point relative to the current working directory if Path(cwd).is_absolute(): entry_point = Path(entry_point).relative_to(cwd).as_posix() else: entry_point = repo_info.script["entry_point"] # restore cwd - make it relative to the repo_root again if Path(cwd).is_absolute(): # now cwd is relative again cwd = Path(cwd).relative_to(repo_info.script["repo_root"]).as_posix() # make sure we always have / (never \\) if platform == "win32": entry_point = entry_point.replace("\\", "/") if entry_point else "" cwd = cwd.replace("\\", "/") if cwd else "" task_state["script"]["entry_point"] = entry_point or "" task_state["script"]["working_dir"] = cwd or "." elif self.repo: cwd = "/".join([p for p in (self.cwd or ".").split("/") if p and p != "."]) # normalize backslashes and remove first one if self.module: entry_point = "-m {}".format(self.module) else: entry_point = "/".join([p for p in self.script.split("/") if p and p != "."]) if cwd and entry_point.startswith(cwd + "/"): entry_point = entry_point[len(cwd) + 1 :] task_state["script"]["repository"] = self.repo task_state["script"]["version_num"] = self.commit or None task_state["script"]["branch"] = self.branch or None task_state["script"]["diff"] = "" task_state["script"]["working_dir"] = cwd or "." task_state["script"]["entry_point"] = entry_point or "" if ( self.script and Path(self.script).is_file() and (self.force_single_script_file or Path(self.script).is_absolute()) ): self.force_single_script_file = True create_requirements = self.packages is True repo_info, requirements = ScriptInfo.get( filepaths=[Path(self.script).as_posix()], log=getLogger(), create_requirements=create_requirements, uncommitted_from_remote=True, detect_jupyter_notebook=False, add_missing_installed_packages=True, detailed_req_report=False, force_single_script=True, ) task_state["script"]["binary"] = self.binary or ( "/bin/bash" if ( (repo_info.script["entry_point"] or "").lower().strip().endswith(".sh") and not (repo_info.script["entry_point"] or "").lower().strip().startswith("-m ") ) else repo_info.script["binary"] ) task_state["script"]["diff"] = repo_info.script["diff"] or "" task_state["script"]["entry_point"] = repo_info.script["entry_point"] if create_requirements: task_state["script"]["requirements"] = repo_info.script.get("requirements") or {} else: if self.binary: task_state["script"]["binary"] = self.binary elif ( entry_point and entry_point.lower().strip().endswith(".sh") and not entry_point.lower().strip().startswith("-m") ): task_state["script"]["binary"] = "/bin/bash" else: # standalone task task_state["script"]["entry_point"] = ( self.script if self.script else ("-m {}".format(self.module) if self.module else "") ) task_state["script"]["working_dir"] = "." # update requirements reqs = [] if self.requirements_file: with open(self.requirements_file.as_posix(), "rt") as f: reqs = [line.strip() for line in f.readlines()] if self.packages and self.packages is not True: reqs += self.packages if self.packages == "" and len(reqs) == 0: reqs = [""] if reqs: # make sure we have clearml. clearml_found = False for line in reqs: if line.strip().startswith("#"): continue package = reduce(lambda a, b: a.split(b)[0], "#;@=~<>[", line).strip() if package == "clearml": clearml_found = True break if not clearml_found and reqs != [""]: reqs.append("clearml") task_state["script"]["requirements"] = {"pip": "\n".join(reqs)} elif not self.repo and repo_info and not repo_info.script.get("requirements"): # we are in local mode, make sure we have "requirements.txt" it is a must reqs_txt_file = Path(repo_info.script["repo_root"]) / "requirements.txt" poetry_toml_file = Path(repo_info.script["repo_root"]) / "pyproject.toml" if self.raise_on_missing_entries and not reqs_txt_file.is_file() and not poetry_toml_file.is_file(): raise ValueError( "requirements.txt not found [{}] " "Use --requirements or --packages".format(reqs_txt_file.as_posix()) ) if self.add_task_init_call: script_entry = ( "/" + task_state["script"].get("working_dir", ".") + "/" + task_state["script"]["entry_point"] ) if platform == "win32": script_entry = os.path.normpath(script_entry).replace("\\", "/") else: script_entry = os.path.abspath(script_entry) idx_a = 0 lines = None # find the right entry for the patch if we have a local file (basically after __future__ if ( local_entry_file and not stand_alone_script_outside_repo and not self.module and str(local_entry_file).lower().endswith(".py") ): with open(local_entry_file, "rt") as f: lines = f.readlines() future_found = self._locate_future_import(lines) if future_found >= 0: idx_a = future_found + 1 task_init_patch = "" if ( (self.repo or task_state.get("script", {}).get("repository")) and not self.force_single_script_file and not stand_alone_script_outside_repo ): # if we do not have requirements, add clearml to the requirements.txt if not reqs: task_init_patch += ( "diff --git a/requirements.txt b/requirements.txt\n" "--- a/requirements.txt\n" "+++ b/requirements.txt\n" "@@ -0,0 +1,1 @@\n" "+clearml\n" ) # Add Task.init call if not self.module and script_entry and str(script_entry).lower().endswith(".py"): task_init_patch += ( "diff --git a{script_entry} b{script_entry}\n" "--- a{script_entry}\n" "+++ b{script_entry}\n" "@@ -{idx_a},0 +{idx_b},4 @@\n" "+try: from allegroai import Task\n" "+except ImportError: from clearml import Task\n" '+(__name__ != "__main__") or Task.init()\n' "+\n".format(script_entry=script_entry, idx_a=idx_a, idx_b=idx_a + 1) ) elif self.module: # if we are here, do nothing pass elif local_entry_file and lines: # if we are here it means we do not have a git diff, but a single script file init_lines = [ "try: from allegroai import Task\n", "except ImportError: from clearml import Task\n", '(__name__ != "__main__") or Task.init()\n\n', ] task_state["script"]["diff"] = "".join(lines[:idx_a] + init_lines + lines[idx_a:]) # no need to add anything, we patched it. task_init_patch = "" elif str(script_entry or "").lower().endswith(".py"): # Add Task.init call # if we are here it means we do not have a git diff, but a single script file task_init_patch += ( "try: from allegroai import Task\n" "except ImportError: from clearml import Task\n" '(__name__ != "__main__") or Task.init()\n\n' ) task_state["script"]["diff"] = task_init_patch + task_state["script"].get("diff", "") task_init_patch = "" # make sure we add the diff at the end of the current diff task_state["script"]["diff"] = task_state["script"].get("diff", "") if task_state["script"]["diff"] and not task_state["script"]["diff"].endswith("\n"): task_state["script"]["diff"] += "\n" task_state["script"]["diff"] += task_init_patch # set base docker image if provided if self.docker: if dry_run: task_state["container"] = dict( image=self.docker.get("image") or "", arguments=self.docker.get("args") or "", setup_shell_script=self.docker.get("bash_script") or "", ) else: task.set_base_docker( docker_image=self.docker.get("image"), docker_arguments=self.docker.get("args"), docker_setup_bash_script=self.docker.get("bash_script"), ) if self.verbose: if task_state["script"].get("repository"): repo_details = { k: v for k, v in task_state["script"].items() if v and k not in ("diff", "requirements", "binary") } print("Repository Detected\n{}".format(json.dumps(repo_details, indent=2))) else: print("Standalone script detected\n Script: {}".format(self.script)) if task_state["script"].get("requirements") and task_state["script"]["requirements"].get("pip"): print( "Requirements:{}{}".format( "\n Using requirements.txt: {}".format(self.requirements_file.as_posix()) if self.requirements_file else "", "\n {}Packages: {}".format( "Additional " if self.requirements_file else "", self.packages, ) if self.packages else "", ) ) if self.docker: print("Base docker image: {}".format(self.docker)) if dry_run: return task_state # update the Task task.update_task(task_state) self.task = task return task def _set_output_uri(self, task: Task) -> None: if self.output_uri is not None: try: task.output_uri = self.output_uri except ValueError: getLogger().warning('Could not verify permission for output_uri: "{}"'.format(self.output_uri)) # do not verify the output uri (it might not be valid when we are creating the Task) task.storage_uri = self.output_uri def update_task_args( self, args: Optional[Union[Sequence[str], Sequence[Tuple[str, str]]]] = None, ) -> (): """ Update the newly created Task argparse Arguments If called before Task created, used for argument verification :param args: Arguments to pass to the remote execution, list of string pairs (argument, value) or list of strings '<argument>=<value>'. Example: ['lr=0.003', (batch_size, 64)] """ if not args: return # check args are in format <key>=<value> args_list = [] for a in args: if isinstance(a, (list, tuple)): assert len(a) == 2 args_list.append(a) continue try: parts = a.split("=", 1) assert len(parts) == 2 args_list.append(parts) except Exception: raise ValueError("Failed parsing argument '{}', arguments must be in '<key>=<value>' format") if not self.task: return task_params = self.task.get_parameters() args_list = {"Args/{}".format(k): v for k, v in args_list} task_params.update(args_list) self.task.set_parameters(task_params) def get_id(self) -> Optional[str]: """ :return: Return the created Task id (str) """ return self.task.id if self.task else None @staticmethod def _locate_future_import(lines: List[str]) -> int: """ :param lines: string lines of a python file :return: line index of the last __future_ import. return -1 if no __future__ was found """ # skip over the first two lines, they are ours # then skip over empty or comment lines lines = [ (i, line.split("#", 1)[0].rstrip()) for i, line in enumerate(lines) if line.strip("\r\n\t ") and not line.strip().startswith("#") ] # remove triple quotes ' """ ' nested_c = -1 skip_lines = [] for i, line_pair in enumerate(lines): for _ in line_pair[1].split('"""')[1:]: if nested_c >= 0: skip_lines.extend(list(range(nested_c, i + 1))) nested_c = -1 else: nested_c = i # now select all the lines = [pair for i, pair in enumerate(lines) if i not in skip_lines] from_future = re.compile(r"^from[\s]*__future__[\s]*") import_future = re.compile(r"^import[\s]*__future__[\s]*") # test if we have __future__ import found_index = -1 for a_i, (_, a_line) in enumerate(lines): if found_index >= a_i: continue if from_future.match(a_line) or import_future.match(a_line): found_index = a_i # check the last import block i, line = lines[found_index] # wither we have \\ character at the end of the line or the line is indented parenthesized_lines = "(" in line and ")" not in line while line.endswith("\\") or parenthesized_lines: found_index += 1 i, line = lines[found_index] if ")" in line: break else: break return found_index if found_index < 0 else lines[found_index][0]
CreateAndPopulate
python
pypa__warehouse
tests/unit/cache/origin/test_fastly.py
{ "start": 21042, "end": 21993 }
class ____: def test_purge_key_prints(self, capsys, metrics): purge_key = pretend.stub(delay=pretend.stub()) request = pretend.stub( registry=pretend.stub( settings={ "origin_cache.api_endpoint": "https://api.example.com", "origin_cache.api_key": "the api key", "origin_cache.service_id": "the service id", } ), task=lambda f: purge_key, ) cacher = fastly.NullFastlyCache.create_service(None, request) cacher.purge_key("one", metrics=metrics) captured = capsys.readouterr() expected = """ Origin cache purge issued: * URL: 'https://api.example.com/service/the service id/purge/one' * Headers: {'Accept': 'application/json', 'Fastly-Key': 'the api key', 'Fastly-Soft-Purge': '1'} """ # noqa assert captured.out.strip() == expected.strip()
TestNullFastlyCache
python
matplotlib__matplotlib
tools/triage_tests.py
{ "start": 2760, "end": 7752 }
class ____(QtWidgets.QDialog): """ The main dialog window. """ def __init__(self, entries): super().__init__() self.entries = entries self.current_entry = -1 self.current_thumbnail = -1 event_filter = EventFilter(self) self.installEventFilter(event_filter) # The list of files on the left-hand side. self.filelist = QtWidgets.QListWidget() self.filelist.setMinimumWidth(400) for entry in entries: self.filelist.addItem(entry.display) self.filelist.currentRowChanged.connect(self.set_entry) thumbnails_box = QtWidgets.QWidget() thumbnails_layout = QtWidgets.QVBoxLayout() self.thumbnails = [] for i, name in enumerate(('test', 'expected', 'diff')): thumbnail = Thumbnail(self, i, name) thumbnails_layout.addWidget(thumbnail) self.thumbnails.append(thumbnail) thumbnails_box.setLayout(thumbnails_layout) images_layout = QtWidgets.QVBoxLayout() images_box = QtWidgets.QWidget() self.image_display = QtWidgets.QLabel() self.image_display.setAlignment( QtCore.Qt.AlignmentFlag.AlignHCenter | QtCore.Qt.AlignmentFlag.AlignVCenter) self.image_display.setMinimumSize(800, 600) images_layout.addWidget(self.image_display, 6) images_box.setLayout(images_layout) buttons_box = QtWidgets.QWidget() buttons_layout = QtWidgets.QHBoxLayout() accept_button = QtWidgets.QPushButton("Accept (A)") accept_button.clicked.connect(self.accept_test) buttons_layout.addWidget(accept_button) reject_button = QtWidgets.QPushButton("Reject (R)") reject_button.clicked.connect(self.reject_test) buttons_layout.addWidget(reject_button) buttons_box.setLayout(buttons_layout) images_layout.addWidget(buttons_box) main_layout = QtWidgets.QHBoxLayout() main_layout.addWidget(self.filelist, 1) main_layout.addWidget(thumbnails_box, 1) main_layout.addWidget(images_box, 3) self.setLayout(main_layout) self.setWindowTitle("matplotlib test triager") self.set_entry(0) def set_entry(self, index): if self.current_entry == index: return self.current_entry = index entry = self.entries[index] self.pixmaps = [] for fname, thumbnail in zip(entry.thumbnails, self.thumbnails): pixmap = QtGui.QPixmap(os.fspath(fname)) scaled_pixmap = pixmap.scaled( thumbnail.size(), QtCore.Qt.AspectRatioMode.KeepAspectRatio, QtCore.Qt.TransformationMode.SmoothTransformation) thumbnail.image.setPixmap(scaled_pixmap) self.pixmaps.append(scaled_pixmap) self.set_large_image(0) self.filelist.setCurrentRow(self.current_entry) def set_large_image(self, index): self.thumbnails[self.current_thumbnail].setFrameShape( QtWidgets.QFrame.Shape.NoFrame) self.current_thumbnail = index pixmap = QtGui.QPixmap(os.fspath( self.entries[self.current_entry] .thumbnails[self.current_thumbnail])) self.image_display.setPixmap(pixmap) self.thumbnails[self.current_thumbnail].setFrameShape( QtWidgets.QFrame.Shape.Box) def accept_test(self): entry = self.entries[self.current_entry] if entry.status == 'autogen': print('Cannot accept autogenerated test cases.') return entry.accept() self.filelist.currentItem().setText( self.entries[self.current_entry].display) # Auto-move to the next entry self.set_entry(min((self.current_entry + 1), len(self.entries) - 1)) def reject_test(self): entry = self.entries[self.current_entry] if entry.status == 'autogen': print('Cannot reject autogenerated test cases.') return entry.reject() self.filelist.currentItem().setText( self.entries[self.current_entry].display) # Auto-move to the next entry self.set_entry(min((self.current_entry + 1), len(self.entries) - 1)) def keyPressEvent(self, e): if e.key() == QtCore.Qt.Key.Key_Left: self.set_large_image((self.current_thumbnail - 1) % 3) elif e.key() == QtCore.Qt.Key.Key_Right: self.set_large_image((self.current_thumbnail + 1) % 3) elif e.key() == QtCore.Qt.Key.Key_Up: self.set_entry(max(self.current_entry - 1, 0)) elif e.key() == QtCore.Qt.Key.Key_Down: self.set_entry(min(self.current_entry + 1, len(self.entries) - 1)) elif e.key() == QtCore.Qt.Key.Key_A: self.accept_test() elif e.key() == QtCore.Qt.Key.Key_R: self.reject_test() else: super().keyPressEvent(e)
Dialog
python
PyCQA__pylint
tests/checkers/unittest_unicode/unittest_bidirectional_unicode.py
{ "start": 563, "end": 3403 }
class ____(pylint.testutils.CheckerTestCase): CHECKER_CLASS = pylint.checkers.unicode.UnicodeChecker checker: pylint.checkers.unicode.UnicodeChecker def test_finds_bidirectional_unicode_that_currently_not_parsed(self) -> None: """Test an example from https://github.com/nickboucher/trojan-source/tree/main/Python that is currently not working Python but producing a syntax error. So we test this to make sure it stays like this """ test_file = UNICODE_TESTS / "invisible_function.txt" with pytest.raises(astroid.AstroidSyntaxError): astroid.MANAGER.ast_from_string(test_file.read_text("utf-8")) with pytest.raises(AssertionError): # The following errors are not risen at the moment, # But we keep this in order to allow writing the test fast, if # the condition above isn't met anymore. module = FakeNode(test_file.read_bytes()) with self.assertAddsMessages( pylint.testutils.MessageTest( msg_id="bidirectional-unicode", confidence=pylint.interfaces.HIGH, # node=module, line=6, end_line=10, col_offset=0, end_col_offset=17, ), pylint.testutils.MessageTest( msg_id="bidirectional-unicode", confidence=pylint.interfaces.HIGH, line=10, # node=module, end_line=10, col_offset=0, end_col_offset=20, ), ): self.checker.process_module(cast(nodes.Module, module)) @pytest.mark.parametrize( "bad_string, codec", [ pytest.param( char, codec, id=f"{unicodedata.name(char)}_{codec}".replace(" ", "_"), ) for char, codec in itertools.product( pylint.checkers.unicode.BIDI_UNICODE, ("utf-8", "utf-16le", "utf-16be", "utf-32le", "utf-32be"), ) ], ) def test_find_bidi_string(self, bad_string: str, codec: str) -> None: """Ensure that all Bidirectional strings are detected. Tests also UTF-16 and UTF-32. """ expected = pylint.testutils.MessageTest( msg_id="bidirectional-unicode", confidence=pylint.interfaces.HIGH, line=1, # node=module, end_line=1, col_offset=0, end_col_offset=3, ) with self.assertAddsMessages(expected): self.checker._check_bidi_chars(f"# {bad_string}".encode(codec), 1, codec)
TestBidirectionalUnicodeChecker
python
dagster-io__dagster
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/defs_state.py
{ "start": 691, "end": 7051 }
class ____: status: Literal["refreshing", "done", "failed"] management_type: Annotated[ "DefsStateManagementType", ImportFrom("dagster_shared.serdes.objects.models.defs_state_info"), ] error: Optional[Exception] = None # For updating: start_time tracks when it began # For completed: duration tracks final elapsed time start_time: float = 0.0 duration: Optional[float] = None def raise_component_state_refresh_errors(statuses: dict[str, ComponentStateRefreshStatus]) -> None: """Raises an error if any of the component state refreshes failed.""" errors = [ (key, status.error) for key, status in statuses.items() if status.status == "failed" and status.error ] if errors: click.echo("\n" + click.style("Detailed error information:", fg="red", bold=True)) for key, error in errors: click.echo( f" {click.style(key, fg='white', bold=True)}: {click.style(str(error), fg='red')}" ) raise errors[0][1] def _get_components_to_refresh( component_tree: "ComponentTree", defs_state_keys: Optional[set[str]], management_types: set["DefsStateManagementType"], ) -> list["StateBackedComponent"]: from dagster.components.component.state_backed_component import StateBackedComponent state_backed_components = component_tree.get_all_components(of_type=StateBackedComponent) selected_components = [ component for component in state_backed_components if component.defs_state_config.management_type in management_types ] # Filter by defs state keys if specified if defs_state_keys is None: return selected_components selected_components = [ component for component in selected_components if component.defs_state_config.key in defs_state_keys ] missing_defs_keys = defs_state_keys - { component.defs_state_config.key for component in selected_components } if missing_defs_keys: click.echo("Error: The following defs state keys were not found:") for key in sorted(missing_defs_keys): click.echo(f" {key}") click.echo("Available defs state keys:") for key in sorted( [component.defs_state_config.key for component in state_backed_components] ): click.echo(f" {key}") exit_with_error("One or more specified defs state keys were not found.") return selected_components async def _refresh_state_for_component( component: "StateBackedComponent", statuses: dict[str, ComponentStateRefreshStatus], project_root: Path, ) -> None: """Refreshes the state of a component and tracks its state in the statuses dictionary as it progresses.""" key = component.defs_state_config.key try: await component.refresh_state(project_root) error = None except Exception as e: error = e statuses[key] = replace( statuses[key], duration=time.time() - statuses[key].start_time, status="done" if error is None else "failed", error=error, ) async def _refresh_state_for_components( defs_state_storage: "DefsStateStorage", components: list["StateBackedComponent"], statuses: dict[str, ComponentStateRefreshStatus], project_root: Path, ) -> Optional["DefsStateInfo"]: await asyncio.gather( *[ _refresh_state_for_component(component, statuses, project_root) for component in components ] ) return defs_state_storage.get_latest_defs_state_info() def get_updated_defs_state_info_task_and_statuses( project_path: Path, defs_state_storage: "DefsStateStorage", management_types: set["DefsStateManagementType"], defs_state_keys: Optional[set[str]] = None, ) -> tuple[asyncio.Task[Optional["DefsStateInfo"]], dict[str, ComponentStateRefreshStatus]]: """Creates an asyncio.Task that will refresh the defs state for all selected components within the specified project path. Can be used in place of `get_updated_defs_state_info_and_statuses` in cases where the caller wants to do other work (e.g. display progress) while the task is running. """ from dagster.components.core.component_tree import ComponentTree from dagster_shared.utils.warnings import disable_dagster_warnings with disable_dagster_warnings(): component_tree = ComponentTree.for_project(project_path) components_to_refresh = _get_components_to_refresh( component_tree, defs_state_keys, management_types ) # in some cases, multiple components may share the same defs state key. in these cases, it is assumed that # the refresh_state method for each component of the same key will be identical, so we choose an arbitrary one deduplicated_components: dict[str, StateBackedComponent] = {} for component in components_to_refresh: key = component.defs_state_config.key if key not in deduplicated_components: deduplicated_components[key] = component # shared dictionary to be used for all subtasks statuses = { key: ComponentStateRefreshStatus( status="refreshing", management_type=component.defs_state_config.management_type, start_time=time.time(), ) for key, component in deduplicated_components.items() } refresh_task = asyncio.create_task( _refresh_state_for_components( defs_state_storage, list(deduplicated_components.values()), statuses, project_path ) ) return refresh_task, statuses async def get_updated_defs_state_info_and_statuses( project_path: Path, defs_state_storage: "DefsStateStorage", management_types: set["DefsStateManagementType"], defs_state_keys: Optional[set[str]] = None, ) -> tuple[Optional["DefsStateInfo"], dict[str, ComponentStateRefreshStatus]]: """Refreshes the defs state for all selected components within the specified project path, and returns the updated defs state info and statuses. """ task, statuses = get_updated_defs_state_info_task_and_statuses( project_path, defs_state_storage, management_types, defs_state_keys ) await task return task.result(), statuses
ComponentStateRefreshStatus
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/automation_condition_sensor_definition.py
{ "start": 3278, "end": 11505 }
class ____(SensorDefinition, IHasInternalInit): """Targets a set of assets and repeatedly evaluates all the AutomationConditions on all of those assets to determine which to request runs for. Args: name: The name of the sensor. target (Union[str, Sequence[str], Sequence[AssetKey], Sequence[Union[AssetsDefinition, SourceAsset]], AssetSelection]): A selection of assets to evaluate AutomationConditions of and request runs for. tags (Optional[Mapping[str, str]]): A set of key-value tags that annotate the sensor and can be used for searching and filtering in the UI. run_tags (Optional[Mapping[str, Any]]): Tags that will be automatically attached to runs launched by this sensor. metadata (Optional[Mapping[str, object]]): A set of metadata entries that annotate the sensor. Values will be normalized to typed `MetadataValue` objects. default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default status can be overridden from the Dagster UI or via the GraphQL API. minimum_interval_seconds (Optional[int]): The frequency at which to try to evaluate the sensor. The actual interval will be longer if the sensor evaluation takes longer than the provided interval. description (Optional[str]): A human-readable description of the sensor. emit_backfills (bool): If set to True, will emit a backfill on any tick where more than one partition of any single asset is requested, rather than individual runs. Defaults to True. use_user_code_server (bool): (Beta) If set to True, this sensor will be evaluated in the user code server, rather than the AssetDaemon. This enables evaluating custom AutomationCondition subclasses, and ensures that the condition definitions will remain in sync with your user code version, eliminating version skew. Note: currently a maximum of 500 assets or checks may be targeted at a time by a sensor that has this value set. default_condition (Optional[AutomationCondition]): (Beta) If provided, this condition will be used for any selected assets or asset checks which do not have an automation condition defined. Requires `use_user_code_server` to be set to `True`. Examples: .. code-block:: python import dagster as dg # automation condition sensor that defaults to running defs1 = dg.Definitions( assets=..., sensors=[ dg.AutomationConditionSensorDefinition( name="automation_condition_sensor", target=dg.AssetSelection.all(), default_status=dg.DefaultSensorStatus.RUNNING, ), ] ) # one automation condition sensor per group defs2 = dg.Definitions( assets=..., sensors=[ dg.AutomationConditionSensorDefinition( name="raw_data_automation_condition_sensor", target=dg.AssetSelection.groups("raw_data"), ), dg.AutomationConditionSensorDefinition( name="ml_automation_condition_sensor", target=dg.AssetSelection.groups("machine_learning"), ), ] ) """ def __init__( self, name: str, *, target: CoercibleToAssetSelection, tags: Optional[Mapping[str, str]] = None, run_tags: Optional[Mapping[str, Any]] = None, default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED, minimum_interval_seconds: Optional[int] = None, description: Optional[str] = None, metadata: Optional[RawMetadataMapping] = None, emit_backfills: bool = True, use_user_code_server: bool = False, default_condition: Optional[AutomationCondition] = None, ): self._use_user_code_server = use_user_code_server check.bool_param(emit_backfills, "allow_backfills") self._default_condition = check.opt_inst_param( default_condition, "default_condition", AutomationCondition ) check.param_invariant( not (self._default_condition and not self._use_user_code_server), "default_condition", "Setting a `default_condition` for a non-user-code AutomationConditionSensorDefinition is not supported.", ) self._run_tags = normalize_tags(run_tags) self._sensor_target = target self._emit_backfills = emit_backfills # only store this value in the metadata if it's True if emit_backfills: metadata = {**(metadata or {}), EMIT_BACKFILLS_METADATA_KEY: True} super().__init__( name=check_valid_name(name), job_name=None, evaluation_fn=partial(_evaluate, self) if self._use_user_code_server else not_supported, minimum_interval_seconds=minimum_interval_seconds, description=description, job=None, jobs=None, default_status=default_status, required_resource_keys=None, asset_selection=target, tags=tags, metadata=metadata, ) @property def run_tags(self) -> Mapping[str, str]: return self._run_tags @property def asset_selection(self) -> AssetSelection: return cast("AssetSelection", super().asset_selection) @property def emit_backfills(self) -> bool: return EMIT_BACKFILLS_METADATA_KEY in self.metadata @property def default_condition(self) -> Optional[AutomationCondition]: return self._default_condition @property def sensor_type(self) -> SensorType: return SensorType.AUTOMATION if self._use_user_code_server else SensorType.AUTO_MATERIALIZE @staticmethod def dagster_internal_init( # type: ignore *, name: str, target: CoercibleToAssetSelection, tags: Optional[Mapping[str, str]], run_tags: Optional[Mapping[str, Any]], default_status: DefaultSensorStatus, minimum_interval_seconds: Optional[int], description: Optional[str], metadata: Optional[RawMetadataMapping], emit_backfills: bool, use_user_code_server: bool, default_condition: Optional[AutomationCondition], ) -> "AutomationConditionSensorDefinition": return AutomationConditionSensorDefinition( name=name, target=target, tags=tags, run_tags=run_tags, default_status=default_status, minimum_interval_seconds=minimum_interval_seconds, description=description, metadata=metadata, emit_backfills=emit_backfills, use_user_code_server=use_user_code_server, default_condition=default_condition, ) def with_attributes( self, *, jobs: Optional[Sequence[ExecutableDefinition]] = None, metadata: Optional[RawMetadataMapping] = None, ) -> "AutomationConditionSensorDefinition": """Returns a copy of this sensor with the attributes replaced. Note: jobs parameter is ignored for AutomationConditionSensorDefinition as it doesn't use jobs. """ return AutomationConditionSensorDefinition.dagster_internal_init( name=self.name, target=self._sensor_target, tags=self._tags, run_tags=self._run_tags, default_status=self.default_status, minimum_interval_seconds=self.minimum_interval_seconds, description=self.description, metadata=metadata if metadata is not None else self._metadata, emit_backfills=self._emit_backfills, use_user_code_server=self._use_user_code_server, default_condition=self._default_condition, )
AutomationConditionSensorDefinition
python
marshmallow-code__marshmallow
src/marshmallow/fields.py
{ "start": 59111, "end": 60446 }
class ____(String): """An URL field. :param default: Default value for the field if the attribute is not set. :param relative: Whether to allow relative URLs. :param absolute: Whether to allow absolute URLs. :param require_tld: Whether to reject non-FQDN hostnames. :param schemes: Valid schemes. By default, ``http``, ``https``, ``ftp``, and ``ftps`` are allowed. :param kwargs: The same keyword arguments that :class:`String` receives. """ #: Default error messages. default_error_messages = {"invalid": "Not a valid URL."} def __init__( self, *, relative: bool = False, absolute: bool = True, schemes: types.StrSequenceOrSet | None = None, require_tld: bool = True, **kwargs: Unpack[_BaseFieldKwargs], ): super().__init__(**kwargs) self.relative = relative self.absolute = absolute self.require_tld = require_tld # Insert validation into self.validators so that multiple errors can be stored. validator = validate.URL( relative=self.relative, absolute=self.absolute, schemes=schemes, require_tld=self.require_tld, error=self.error_messages["invalid"], ) self.validators.insert(0, validator)
Url
python
getsentry__sentry
src/sentry/snuba/outcomes.py
{ "start": 2252, "end": 2729 }
class ____(Field): def get_snuba_columns(self, raw_groupby: Sequence[str] | None = None) -> list[str]: return ["quantity"] def extract_from_row( self, row: Mapping[str, Any] | None, group: Mapping[str, Any] | None = None ) -> int: if row is None: return 0 return int(row["quantity"]) def select_params(self, dataset: Dataset) -> Function: return Function("sum", [Column("quantity")], "quantity")
QuantityField
python
getsentry__sentry
tests/sentry/monitors/test_models.py
{ "start": 606, "end": 7131 }
class ____(TestCase): def test_next_run_crontab(self) -> None: ts = datetime(2019, 1, 1, 1, 10, 20, tzinfo=timezone.utc) monitor = Monitor( config={ "schedule_type": ScheduleType.CRONTAB, "schedule": "* * * * *", "checkin_margin": None, "max_runtime": None, } ) monitor_environment = MonitorEnvironment(monitor=monitor, last_checkin=ts) # XXX: Seconds are removed as we clamp to the minute assert monitor_environment.monitor.get_next_expected_checkin(ts) == datetime( 2019, 1, 1, 1, 11, tzinfo=timezone.utc ) assert monitor_environment.monitor.get_next_expected_checkin_latest(ts) == datetime( 2019, 1, 1, 1, 12, tzinfo=timezone.utc ) monitor.config["schedule"] = "*/5 * * * *" assert monitor_environment.monitor.get_next_expected_checkin(ts) == datetime( 2019, 1, 1, 1, 15, tzinfo=timezone.utc ) assert monitor_environment.monitor.get_next_expected_checkin_latest(ts) == datetime( 2019, 1, 1, 1, 16, tzinfo=timezone.utc ) def test_next_run_latest_crontab_with_margin(self) -> None: ts = datetime(2019, 1, 1, 1, 10, 20, tzinfo=timezone.utc) monitor = Monitor( config={ "schedule_type": ScheduleType.CRONTAB, "schedule": "* * * * *", "checkin_margin": 5, "max_runtime": None, } ) monitor_environment = MonitorEnvironment(monitor=monitor, last_checkin=ts) # XXX: Seconds are removed as we clamp to the minute assert monitor_environment.monitor.get_next_expected_checkin(ts) == datetime( 2019, 1, 1, 1, 11, tzinfo=timezone.utc ) assert monitor_environment.monitor.get_next_expected_checkin_latest(ts) == datetime( 2019, 1, 1, 1, 16, tzinfo=timezone.utc ) def test_next_run_crontab_with_timezone(self) -> None: ts = datetime(2019, 1, 1, 1, 10, 20, tzinfo=timezone.utc) monitor = Monitor( config={ "schedule_type": ScheduleType.CRONTAB, "schedule": "0 12 * * *", "timezone": "UTC", "checkin_margin": None, "max_runtime": None, }, ) monitor_environment = MonitorEnvironment(monitor=monitor, last_checkin=ts) # XXX: Seconds are removed as we clamp to the minute assert monitor_environment.monitor.get_next_expected_checkin(ts) == datetime( 2019, 1, 1, 12, 00, tzinfo=timezone.utc ) # Europe/Berlin == UTC+01:00. # the run should be represented 1 hours earlier in UTC time monitor.config["timezone"] = "Europe/Berlin" assert monitor_environment.monitor.get_next_expected_checkin(ts) == datetime( 2019, 1, 1, 11, 00, tzinfo=timezone.utc ) def test_next_run_interval(self) -> None: ts = datetime(2019, 1, 1, 1, 10, 20, tzinfo=timezone.utc) monitor = Monitor( config={ "schedule": [1, "month"], "schedule_type": ScheduleType.INTERVAL, "checkin_margin": None, "max_runtime": None, }, ) monitor_environment = MonitorEnvironment(monitor=monitor, last_checkin=ts) # XXX: Seconds are removed as we clamp to the minute. assert monitor_environment.monitor.get_next_expected_checkin(ts) == datetime( 2019, 2, 1, 1, 10, 0, tzinfo=timezone.utc ) def test_save_defaults_slug_to_name(self) -> None: monitor = Monitor.objects.create( organization_id=self.organization.id, project_id=self.project.id, name="My Awesome Monitor", config={ "schedule": [1, "month"], "schedule_type": ScheduleType.INTERVAL, "checkin_margin": None, "max_runtime": None, }, ) assert monitor.slug == "my-awesome-monitor" def test_save_defaults_slug_unique(self) -> None: monitor = Monitor.objects.create( organization_id=self.organization.id, project_id=self.project.id, name="My Awesome Monitor", slug="my-awesome-monitor", config={ "schedule": [1, "month"], "schedule_type": ScheduleType.INTERVAL, "checkin_margin": None, "max_runtime": None, }, ) assert monitor.slug == "my-awesome-monitor" # Create another monitor with the same name monitor = Monitor.objects.create( organization_id=self.organization.id, project_id=self.project.id, name="My Awesome Monitor", config={ "schedule": [1, "month"], "schedule_type": ScheduleType.INTERVAL, "checkin_margin": None, "max_runtime": None, }, ) assert monitor.slug.startswith("my-awesome-monitor-") @override_settings(MAX_MONITORS_PER_ORG=2) def test_monitor_organization_limit(self) -> None: for i in range(settings.MAX_MONITORS_PER_ORG): Monitor.objects.create( organization_id=self.organization.id, project_id=self.project.id, name=f"Unicron-{i}", slug=f"unicron-{i}", config={ "schedule": [1, "month"], "schedule_type": ScheduleType.INTERVAL, "checkin_margin": None, "max_runtime": None, }, ) with pytest.raises( MonitorLimitsExceeded, match=f"You may not exceed {settings.MAX_MONITORS_PER_ORG} monitors per organization", ): Monitor.objects.create( organization_id=self.organization.id, project_id=self.project.id, name=f"Unicron-{settings.MAX_MONITORS_PER_ORG}", slug=f"unicron-{settings.MAX_MONITORS_PER_ORG}", config={ "schedule": [1, "month"], "schedule_type": ScheduleType.INTERVAL, "checkin_margin": None, "max_runtime": None, }, )
MonitorTestCase
python
pytest-dev__pytest
testing/example_scripts/fixtures/fill_fixtures/test_funcarg_lookup_classlevel.py
{ "start": 79, "end": 254 }
class ____: @pytest.fixture def something(self, request): return request.instance def test_method(self, something): assert something is self
TestClass
python
vyperlang__vyper
vyper/venom/function.py
{ "start": 395, "end": 643 }
class ____: name: str index: int # needed? offset: int # needed? size: int # needed? id_: int call_site_var: Optional[IRVariable] # needed? func_var: IRVariable addr_var: Optional[IRVariable] # needed?
IRParameter
python
pytest-dev__pytest
testing/python/metafunc.py
{ "start": 63702, "end": 73615 }
class ____: """#308""" def test_simple_mark(self, pytester: Pytester) -> None: s = """ import pytest @pytest.mark.foo @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.param(1, 3, marks=pytest.mark.bar), (2, 3), ]) def test_increment(n, expected): assert n + 1 == expected """ items = pytester.getitems(s) assert len(items) == 3 for item in items: assert "foo" in item.keywords assert "bar" not in items[0].keywords assert "bar" in items[1].keywords assert "bar" not in items[2].keywords def test_select_based_on_mark(self, pytester: Pytester) -> None: s = """ import pytest @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.param(2, 3, marks=pytest.mark.foo), (3, 4), ]) def test_increment(n, expected): assert n + 1 == expected """ pytester.makepyfile(s) rec = pytester.inline_run("-m", "foo") passed, skipped, fail = rec.listoutcomes() assert len(passed) == 1 assert len(skipped) == 0 assert len(fail) == 0 def test_simple_xfail(self, pytester: Pytester) -> None: s = """ import pytest @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.param(1, 3, marks=pytest.mark.xfail), (2, 3), ]) def test_increment(n, expected): assert n + 1 == expected """ pytester.makepyfile(s) reprec = pytester.inline_run() # xfail is skip?? reprec.assertoutcome(passed=2, skipped=1) def test_simple_xfail_single_argname(self, pytester: Pytester) -> None: s = """ import pytest @pytest.mark.parametrize("n", [ 2, pytest.param(3, marks=pytest.mark.xfail), 4, ]) def test_isEven(n): assert n % 2 == 0 """ pytester.makepyfile(s) reprec = pytester.inline_run() reprec.assertoutcome(passed=2, skipped=1) def test_xfail_with_arg(self, pytester: Pytester) -> None: s = """ import pytest @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.param(1, 3, marks=pytest.mark.xfail("True")), (2, 3), ]) def test_increment(n, expected): assert n + 1 == expected """ pytester.makepyfile(s) reprec = pytester.inline_run() reprec.assertoutcome(passed=2, skipped=1) def test_xfail_with_kwarg(self, pytester: Pytester) -> None: s = """ import pytest @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.param(1, 3, marks=pytest.mark.xfail(reason="some bug")), (2, 3), ]) def test_increment(n, expected): assert n + 1 == expected """ pytester.makepyfile(s) reprec = pytester.inline_run() reprec.assertoutcome(passed=2, skipped=1) def test_xfail_with_arg_and_kwarg(self, pytester: Pytester) -> None: s = """ import pytest @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.param(1, 3, marks=pytest.mark.xfail("True", reason="some bug")), (2, 3), ]) def test_increment(n, expected): assert n + 1 == expected """ pytester.makepyfile(s) reprec = pytester.inline_run() reprec.assertoutcome(passed=2, skipped=1) @pytest.mark.parametrize("strict", [True, False]) def test_xfail_passing_is_xpass(self, pytester: Pytester, strict: bool) -> None: s = f""" import pytest m = pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict}) @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.param(2, 3, marks=m), (3, 4), ]) def test_increment(n, expected): assert n + 1 == expected """ pytester.makepyfile(s) reprec = pytester.inline_run() passed, failed = (2, 1) if strict else (3, 0) reprec.assertoutcome(passed=passed, failed=failed) def test_parametrize_called_in_generate_tests(self, pytester: Pytester) -> None: s = """ import pytest def pytest_generate_tests(metafunc): passingTestData = [(1, 2), (2, 3)] failingTestData = [(1, 3), (2, 2)] testData = passingTestData + [pytest.param(*d, marks=pytest.mark.xfail) for d in failingTestData] metafunc.parametrize(("n", "expected"), testData) def test_increment(n, expected): assert n + 1 == expected """ pytester.makepyfile(s) reprec = pytester.inline_run() reprec.assertoutcome(passed=2, skipped=2) def test_parametrize_ID_generation_string_int_works( self, pytester: Pytester ) -> None: """#290""" pytester.makepyfile( """ import pytest @pytest.fixture def myfixture(): return 'example' @pytest.mark.parametrize( 'limit', (0, '0')) def test_limit(limit, myfixture): return """ ) reprec = pytester.inline_run() reprec.assertoutcome(passed=2) @pytest.mark.parametrize("strict", [True, False]) def test_parametrize_marked_value(self, pytester: Pytester, strict: bool) -> None: s = f""" import pytest @pytest.mark.parametrize(("n", "expected"), [ pytest.param( 2,3, marks=pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict}), ), pytest.param( 2,3, marks=[pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict})], ), ]) def test_increment(n, expected): assert n + 1 == expected """ pytester.makepyfile(s) reprec = pytester.inline_run() passed, failed = (0, 2) if strict else (2, 0) reprec.assertoutcome(passed=passed, failed=failed) def test_pytest_make_parametrize_id(self, pytester: Pytester) -> None: pytester.makeconftest( """ def pytest_make_parametrize_id(config, val): return str(val * 2) """ ) pytester.makepyfile( """ import pytest @pytest.mark.parametrize("x", range(2)) def test_func(x): pass """ ) result = pytester.runpytest("-v") result.stdout.fnmatch_lines(["*test_func*0*PASS*", "*test_func*2*PASS*"]) def test_pytest_make_parametrize_id_with_argname(self, pytester: Pytester) -> None: pytester.makeconftest( """ def pytest_make_parametrize_id(config, val, argname): return str(val * 2 if argname == 'x' else val * 10) """ ) pytester.makepyfile( """ import pytest @pytest.mark.parametrize("x", range(2)) def test_func_a(x): pass @pytest.mark.parametrize("y", [1]) def test_func_b(y): pass """ ) result = pytester.runpytest("-v") result.stdout.fnmatch_lines( ["*test_func_a*0*PASS*", "*test_func_a*2*PASS*", "*test_func_b*10*PASS*"] ) def test_parametrize_positional_args(self, pytester: Pytester) -> None: pytester.makepyfile( """ import pytest @pytest.mark.parametrize("a", [1], False) def test_foo(a): pass """ ) result = pytester.runpytest() result.assert_outcomes(passed=1) def test_parametrize_iterator(self, pytester: Pytester) -> None: pytester.makepyfile( """ import itertools import pytest id_parametrize = pytest.mark.parametrize( ids=("param%d" % i for i in itertools.count()) ) @id_parametrize('y', ['a', 'b']) def test1(y): pass @id_parametrize('y', ['a', 'b']) def test2(y): pass @pytest.mark.parametrize("a, b", [(1, 2), (3, 4)], ids=itertools.count()) def test_converted_to_str(a, b): pass """ ) result = pytester.runpytest("-vv", "-s") result.stdout.fnmatch_lines( [ "test_parametrize_iterator.py::test1[param0] PASSED", "test_parametrize_iterator.py::test1[param1] PASSED", "test_parametrize_iterator.py::test2[param0] PASSED", "test_parametrize_iterator.py::test2[param1] PASSED", "test_parametrize_iterator.py::test_converted_to_str[0] PASSED", "test_parametrize_iterator.py::test_converted_to_str[1] PASSED", "*= 6 passed in *", ] )
TestMarkersWithParametrization
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/event/base.py
{ "start": 7264, "end": 11061 }
class ____(Generic[_ET]): _dispatch_target: Optional[Type[_ET]] """class which will receive the .dispatch collection""" dispatch: _Dispatch[_ET] """reference back to the _Dispatch class. Bidirectional against _Dispatch._events """ if typing.TYPE_CHECKING: def __getattr__(self, name: str) -> _InstanceLevelDispatch[_ET]: ... def __init_subclass__(cls) -> None: """Intercept new Event subclasses and create associated _Dispatch classes.""" cls._create_dispatcher_class(cls.__name__, cls.__bases__, cls.__dict__) @classmethod def _accept_with( cls, target: Union[_ET, Type[_ET]], identifier: str ) -> Optional[Union[_ET, Type[_ET]]]: raise NotImplementedError() @classmethod def _listen( cls, event_key: _EventKey[_ET], *, propagate: bool = False, insert: bool = False, named: bool = False, asyncio: bool = False, ) -> None: raise NotImplementedError() @staticmethod def _set_dispatch( klass: Type[_HasEventsDispatch[_ET]], dispatch_cls: Type[_Dispatch[_ET]], ) -> _Dispatch[_ET]: # This allows an Events subclass to define additional utility # methods made available to the target via # "self.dispatch._events.<utilitymethod>" # @staticmethod to allow easy "super" calls while in a metaclass # constructor. klass.dispatch = dispatch_cls(None) dispatch_cls._events = klass return klass.dispatch @classmethod def _create_dispatcher_class( cls, classname: str, bases: Tuple[type, ...], dict_: Mapping[str, Any] ) -> None: """Create a :class:`._Dispatch` class corresponding to an :class:`.Events` class.""" # there's all kinds of ways to do this, # i.e. make a Dispatch class that shares the '_listen' method # of the Event class, this is the straight monkeypatch. if hasattr(cls, "dispatch"): dispatch_base = cls.dispatch.__class__ else: dispatch_base = _Dispatch event_names = [k for k in dict_ if _is_event_name(k)] dispatch_cls = cast( "Type[_Dispatch[_ET]]", type( "%sDispatch" % classname, (dispatch_base,), {"__slots__": event_names}, ), ) dispatch_cls._event_names = event_names dispatch_inst = cls._set_dispatch(cls, dispatch_cls) for k in dispatch_cls._event_names: setattr(dispatch_inst, k, _ClsLevelDispatch(cls, dict_[k])) _registrars[k].append(cls) for super_ in dispatch_cls.__bases__: if issubclass(super_, _Dispatch) and super_ is not _Dispatch: for ls in super_._events.dispatch._event_descriptors: setattr(dispatch_inst, ls.name, ls) dispatch_cls._event_names.append(ls.name) if getattr(cls, "_dispatch_target", None): dispatch_target_cls = cls._dispatch_target assert dispatch_target_cls is not None if ( hasattr(dispatch_target_cls, "__slots__") and "_slots_dispatch" in dispatch_target_cls.__slots__ ): dispatch_target_cls.dispatch = slots_dispatcher(cls) else: dispatch_target_cls.dispatch = dispatcher(cls) klass = type( "Joined%s" % dispatch_cls.__name__, (_JoinedDispatcher,), {"__slots__": event_names}, ) dispatch_cls._joined_dispatch_cls = klass # establish pickle capability by adding it to this module globals()[klass.__name__] = klass
_HasEventsDispatch
python
kamyu104__LeetCode-Solutions
Python/the-kth-factor-of-n.py
{ "start": 754, "end": 1197 }
class ____(object): def kthFactor(self, n, k): """ :type n: int :type k: int :rtype: int """ result = [] i = 1 while i*i <= n: if not n%i: if i*i != n: result.append(i) k -= 1 if not k: return i i += 1 return -1 if k > len(result) else n//result[-k]
Solution2
python
ray-project__ray
rllib/core/rl_module/multi_rl_module.py
{ "start": 19511, "end": 31918 }
class ____: """A utility spec class to make it constructing MultiRLModules easier. Users can extend this class to modify the behavior of base class. For example to share neural networks across the modules, the build method can be overridden to create the shared module first and then pass it to custom module classes that would then use it as a shared module. """ #: The class of the MultiRLModule to construct. By default, #: this is the base `MultiRLModule` class. multi_rl_module_class: Type[MultiRLModule] = MultiRLModule #: Optional global observation space for the MultiRLModule. #: Useful for shared network components that live only inside the MultiRLModule #: and don't have their own ModuleID and own RLModule within #: `self._rl_modules`. observation_space: Optional[gym.Space] = None #: Optional global action space for the MultiRLModule. Useful for #: shared network components that live only inside the MultiRLModule and don't #: have their own ModuleID and own RLModule within `self._rl_modules`. action_space: Optional[gym.Space] = None #: An optional global inference_only flag. If not set (None by #: default), considers the MultiRLModule to be inference_only=True, only if all #: submodules also have their own inference_only flags set to True. inference_only: Optional[bool] = None # TODO (sven): Once we support MultiRLModules inside other MultiRLModules, we would # need this flag in here as well, but for now, we'll leave it out for simplicity. # learner_only: bool = False #: An optional global model_config dict. Useful to configure shared #: network components that only live inside the MultiRLModule and don't have #: their own ModuleID and own RLModule within `self._rl_modules`. model_config: Optional[dict] = None #: The module specs for each individual module. It can be either #: an RLModuleSpec used for all module_ids or a dictionary mapping from module #: IDs to RLModuleSpecs for each individual module. rl_module_specs: Union[RLModuleSpec, Dict[ModuleID, RLModuleSpec]] = None # TODO (sven): Deprecate these in favor of using the pure Checkpointable APIs for # loading and saving state. load_state_path: Optional[str] = None modules_to_load: Optional[Set[ModuleID]] = None # Deprecated: Do not use anymore. module_specs: Optional[Union[RLModuleSpec, Dict[ModuleID, RLModuleSpec]]] = None def __post_init__(self): if self.module_specs is not None: deprecation_warning( old="MultiRLModuleSpec(module_specs=..)", new="MultiRLModuleSpec(rl_module_specs=..)", error=True, ) if self.rl_module_specs is None: raise ValueError( "Module_specs cannot be None. It should be either a " "RLModuleSpec or a dictionary mapping from module IDs to " "RLModuleSpecs for each individual module." ) self.module_specs = self.rl_module_specs # Figure out global inference_only setting. # If not provided (None), only if all submodules are # inference_only, this MultiRLModule will be inference_only. self.inference_only = ( self.inference_only if self.inference_only is not None else all(spec.inference_only for spec in self.rl_module_specs.values()) ) @OverrideToImplementCustomLogic def build(self, module_id: Optional[ModuleID] = None) -> RLModule: """Builds either the MultiRLModule or a (single) sub-RLModule under `module_id`. Args: module_id: Optional ModuleID of a single RLModule to be built. If None (default), builds the MultiRLModule. Returns: The built RLModule if `module_id` is provided, otherwise the built MultiRLModule. """ self._check_before_build() # ModuleID provided, return single-agent RLModule. if module_id: return self.rl_module_specs[module_id].build() # Return MultiRLModule. try: module = self.multi_rl_module_class( observation_space=self.observation_space, action_space=self.action_space, inference_only=self.inference_only, model_config=( dataclasses.asdict(self.model_config) if dataclasses.is_dataclass(self.model_config) else self.model_config ), rl_module_specs=self.rl_module_specs, ) # Older custom model might still require the old `MultiRLModuleConfig` under # the `config` arg. except AttributeError as e: if self.multi_rl_module_class is not MultiRLModule: multi_rl_module_config = self.get_rl_module_config() module = self.multi_rl_module_class(multi_rl_module_config) else: raise e return module def add_modules( self, module_specs: Dict[ModuleID, RLModuleSpec], override: bool = True, ) -> None: """Add new module specs to the spec or updates existing ones. Args: module_specs: The mapping for the module_id to the single-agent module specs to be added to this multi-agent module spec. override: Whether to override the existing module specs if they already exist. If False, they are only updated. """ if self.rl_module_specs is None: self.rl_module_specs = {} for module_id, module_spec in module_specs.items(): if override or module_id not in self.rl_module_specs: # Disable our `inference_only` as soon as any single-agent module has # `inference_only=False`. if not module_spec.inference_only: self.inference_only = False self.rl_module_specs[module_id] = module_spec else: self.rl_module_specs[module_id].update(module_spec) def remove_modules(self, module_ids: Union[ModuleID, Collection[ModuleID]]) -> None: """Removes the provided ModuleIDs from this MultiRLModuleSpec. Args: module_ids: Collection of the ModuleIDs to remove from this spec. """ for module_id in force_list(module_ids): self.rl_module_specs.pop(module_id, None) @classmethod def from_module(self, module: MultiRLModule) -> "MultiRLModuleSpec": """Creates a MultiRLModuleSpec from a MultiRLModule. Args: module: The MultiRLModule to create the spec from. Returns: The MultiRLModuleSpec. """ # we want to get the spec of the underlying unwrapped module that way we can # easily reconstruct it. The only wrappers that we expect to support today are # wrappers that allow us to do distributed training. Those will be added back # by the learner if necessary. rl_module_specs = { module_id: RLModuleSpec.from_module(rl_module.unwrapped()) for module_id, rl_module in module._rl_modules.items() } multi_rl_module_class = module.__class__ return MultiRLModuleSpec( multi_rl_module_class=multi_rl_module_class, observation_space=module.observation_space, action_space=module.action_space, inference_only=module.inference_only, model_config=module.model_config, rl_module_specs=rl_module_specs, ) def _check_before_build(self): if not isinstance(self.rl_module_specs, dict): raise ValueError( f"When build() is called on {self.__class__}, the `rl_module_specs` " "attribute should be a dictionary mapping ModuleIDs to " "RLModuleSpecs for each individual RLModule." ) def to_dict(self) -> Dict[str, Any]: """Converts the MultiRLModuleSpec to a dictionary.""" return { "multi_rl_module_class": serialize_type(self.multi_rl_module_class), "observation_space": gym_space_to_dict(self.observation_space), "action_space": gym_space_to_dict(self.action_space), "inference_only": self.inference_only, "model_config": self.model_config, "rl_module_specs": { module_id: rl_module_spec.to_dict() for module_id, rl_module_spec in self.rl_module_specs.items() }, } @classmethod def from_dict(cls, d) -> "MultiRLModuleSpec": """Creates a MultiRLModuleSpec from a dictionary.""" return MultiRLModuleSpec( multi_rl_module_class=deserialize_type(d["multi_rl_module_class"]), observation_space=gym_space_from_dict(d.get("observation_space")), action_space=gym_space_from_dict(d.get("action_space")), model_config=d.get("model_config"), inference_only=d["inference_only"], rl_module_specs={ module_id: RLModuleSpec.from_dict(rl_module_spec) for module_id, rl_module_spec in ( d.get("rl_module_specs", d.get("module_specs")).items() ) }, ) def update( self, other: Union["MultiRLModuleSpec", RLModuleSpec], override: bool = False, ) -> None: """Updates this spec with the other spec. Traverses this MultiRLModuleSpec's module_specs and updates them with the module specs from the `other` (Multi)RLModuleSpec. Args: other: The other spec to update this spec with. override: Whether to override the existing module specs if they already exist. If False, they are only updated. """ if isinstance(other, RLModuleSpec): # Disable our `inference_only` as soon as any single-agent module has # `inference_only=False`. if not other.inference_only: self.inference_only = False for mid, spec in self.rl_module_specs.items(): self.rl_module_specs[mid].update(other, override=False) elif isinstance(other.module_specs, dict): self.add_modules(other.rl_module_specs, override=override) else: assert isinstance(other, MultiRLModuleSpec) if not self.rl_module_specs: self.inference_only = other.inference_only self.rl_module_specs = other.rl_module_specs else: if not other.inference_only: self.inference_only = False self.rl_module_specs.update(other.rl_module_specs) def as_multi_rl_module_spec(self) -> "MultiRLModuleSpec": """Returns self in order to match `RLModuleSpec.as_multi_rl_module_spec()`.""" return self def __contains__(self, item) -> bool: """Returns whether the given `item` (ModuleID) is present in self.""" return item in self.rl_module_specs def __getitem__(self, item) -> RLModuleSpec: """Returns the RLModuleSpec under the ModuleID.""" return self.rl_module_specs[item] @Deprecated( new="MultiRLModule(*, module_specs={module1: [RLModuleSpec], " "module2: [RLModuleSpec], ..}, inference_only=..)", error=True, ) def get_multi_rl_module_config(self): pass @Deprecated(new="MultiRLModuleSpec.as_multi_rl_module_spec()", error=True) def as_multi_agent(self): pass @Deprecated(new="MultiRLModuleSpec.get_multi_rl_module_config", error=True) def get_marl_config(self, *args, **kwargs): pass @Deprecated( new="MultiRLModule(*, observation_space=.., action_space=.., ....)", error=False, ) def get_rl_module_config(self): return MultiRLModuleConfig( inference_only=self.inference_only, modules=self.rl_module_specs, ) @Deprecated( new="MultiRLModule(*, rl_module_specs={module1: [RLModuleSpec], " "module2: [RLModuleSpec], ..}, inference_only=..)", error=False, ) @dataclasses.dataclass
MultiRLModuleSpec
python
ray-project__ray
release/ray_release/tests/test_state_machine.py
{ "start": 2441, "end": 2674 }
class ____: def create_build(self, *args, **kwargs): return { "number": 1, "jobs": [{"id": "1"}], } def list_all_for_pipeline(self, *args, **kwargs): return []
MockBuildkiteBuild
python
py-pdf__pypdf
pypdf/_encryption.py
{ "start": 4351, "end": 19669 }
class ____: @staticmethod def compute_key( password: bytes, rev: int, key_size: int, o_entry: bytes, P: int, id1_entry: bytes, metadata_encrypted: bool, ) -> bytes: """ Algorithm 2: Computing an encryption key. a) Pad or truncate the password string to exactly 32 bytes. If the password string is more than 32 bytes long, use only its first 32 bytes; if it is less than 32 bytes long, pad it by appending the required number of additional bytes from the beginning of the following padding string: < 28 BF 4E 5E 4E 75 8A 41 64 00 4E 56 FF FA 01 08 2E 2E 00 B6 D0 68 3E 80 2F 0C A9 FE 64 53 69 7A > That is, if the password string is n bytes long, append the first 32 - n bytes of the padding string to the end of the password string. If the password string is empty (zero-length), meaning there is no user password, substitute the entire padding string in its place. b) Initialize the MD5 hash function and pass the result of step (a) as input to this function. c) Pass the value of the encryption dictionary’s O entry to the MD5 hash function. ("Algorithm 3: Computing the encryption dictionary’s O (owner password) value" shows how the O value is computed.) d) Convert the integer value of the P entry to a 32-bit unsigned binary number and pass these bytes to the MD5 hash function, low-order byte first. e) Pass the first element of the file’s file identifier array (the value of the ID entry in the document’s trailer dictionary; see Table 15) to the MD5 hash function. f) (Security handlers of revision 4 or greater) If document metadata is not being encrypted, pass 4 bytes with the value 0xFFFFFFFF to the MD5 hash function. g) Finish the hash. h) (Security handlers of revision 3 or greater) Do the following 50 times: Take the output from the previous MD5 hash and pass the first n bytes of the output as input into a new MD5 hash, where n is the number of bytes of the encryption key as defined by the value of the encryption dictionary’s Length entry. i) Set the encryption key to the first n bytes of the output from the final MD5 hash, where n shall always be 5 for security handlers of revision 2 but, for security handlers of revision 3 or greater, shall depend on the value of the encryption dictionary’s Length entry. Args: password: The encryption secret as a bytes-string rev: The encryption revision (see PDF standard) key_size: The size of the key in bytes o_entry: The owner entry P: A set of flags specifying which operations shall be permitted when the document is opened with user access. If bit 2 is set to 1, all other bits are ignored and all operations are permitted. If bit 2 is set to 0, permission for operations are based on the values of the remaining flags defined in Table 24. id1_entry: metadata_encrypted: A boolean indicating if the metadata is encrypted. Returns: The u_hash digest of length key_size """ a = _padding(password) u_hash = hashlib.md5(a) u_hash.update(o_entry) u_hash.update(struct.pack("<I", P)) u_hash.update(id1_entry) if rev >= 4 and not metadata_encrypted: u_hash.update(b"\xff\xff\xff\xff") u_hash_digest = u_hash.digest() length = key_size // 8 if rev >= 3: for _ in range(50): u_hash_digest = hashlib.md5(u_hash_digest[:length]).digest() return u_hash_digest[:length] @staticmethod def compute_O_value_key(owner_password: bytes, rev: int, key_size: int) -> bytes: """ Algorithm 3: Computing the encryption dictionary’s O (owner password) value. a) Pad or truncate the owner password string as described in step (a) of "Algorithm 2: Computing an encryption key". If there is no owner password, use the user password instead. b) Initialize the MD5 hash function and pass the result of step (a) as input to this function. c) (Security handlers of revision 3 or greater) Do the following 50 times: Take the output from the previous MD5 hash and pass it as input into a new MD5 hash. d) Create an RC4 encryption key using the first n bytes of the output from the final MD5 hash, where n shall always be 5 for security handlers of revision 2 but, for security handlers of revision 3 or greater, shall depend on the value of the encryption dictionary’s Length entry. e) Pad or truncate the user password string as described in step (a) of "Algorithm 2: Computing an encryption key". f) Encrypt the result of step (e), using an RC4 encryption function with the encryption key obtained in step (d). g) (Security handlers of revision 3 or greater) Do the following 19 times: Take the output from the previous invocation of the RC4 function and pass it as input to a new invocation of the function; use an encryption key generated by taking each byte of the encryption key obtained in step (d) and performing an XOR (exclusive or) operation between that byte and the single-byte value of the iteration counter (from 1 to 19). h) Store the output from the final invocation of the RC4 function as the value of the O entry in the encryption dictionary. Args: owner_password: rev: The encryption revision (see PDF standard) key_size: The size of the key in bytes Returns: The RC4 key """ a = _padding(owner_password) o_hash_digest = hashlib.md5(a).digest() if rev >= 3: for _ in range(50): o_hash_digest = hashlib.md5(o_hash_digest).digest() return o_hash_digest[: key_size // 8] @staticmethod def compute_O_value(rc4_key: bytes, user_password: bytes, rev: int) -> bytes: """ See :func:`compute_O_value_key`. Args: rc4_key: user_password: rev: The encryption revision (see PDF standard) Returns: The RC4 encrypted """ a = _padding(user_password) rc4_enc = rc4_encrypt(rc4_key, a) if rev >= 3: for i in range(1, 20): key = bytes(x ^ i for x in rc4_key) rc4_enc = rc4_encrypt(key, rc4_enc) return rc4_enc @staticmethod def compute_U_value(key: bytes, rev: int, id1_entry: bytes) -> bytes: """ Algorithm 4: Computing the encryption dictionary’s U (user password) value. (Security handlers of revision 2) a) Create an encryption key based on the user password string, as described in "Algorithm 2: Computing an encryption key". b) Encrypt the 32-byte padding string shown in step (a) of "Algorithm 2: Computing an encryption key", using an RC4 encryption function with the encryption key from the preceding step. c) Store the result of step (b) as the value of the U entry in the encryption dictionary. Args: key: rev: The encryption revision (see PDF standard) id1_entry: Returns: The value """ if rev <= 2: return rc4_encrypt(key, _PADDING) """ Algorithm 5: Computing the encryption dictionary’s U (user password) value. (Security handlers of revision 3 or greater) a) Create an encryption key based on the user password string, as described in "Algorithm 2: Computing an encryption key". b) Initialize the MD5 hash function and pass the 32-byte padding string shown in step (a) of "Algorithm 2: Computing an encryption key" as input to this function. c) Pass the first element of the file’s file identifier array (the value of the ID entry in the document’s trailer dictionary; see Table 15) to the hash function and finish the hash. d) Encrypt the 16-byte result of the hash, using an RC4 encryption function with the encryption key from step (a). e) Do the following 19 times: Take the output from the previous invocation of the RC4 function and pass it as input to a new invocation of the function; use an encryption key generated by taking each byte of the original encryption key obtained in step (a) and performing an XOR (exclusive or) operation between that byte and the single-byte value of the iteration counter (from 1 to 19). f) Append 16 bytes of arbitrary padding to the output from the final invocation of the RC4 function and store the 32-byte result as the value of the U entry in the encryption dictionary. """ u_hash = hashlib.md5(_PADDING) u_hash.update(id1_entry) rc4_enc = rc4_encrypt(key, u_hash.digest()) for i in range(1, 20): rc4_key = bytes(x ^ i for x in key) rc4_enc = rc4_encrypt(rc4_key, rc4_enc) return _padding(rc4_enc) @staticmethod def verify_user_password( user_password: bytes, rev: int, key_size: int, o_entry: bytes, u_entry: bytes, P: int, id1_entry: bytes, metadata_encrypted: bool, ) -> bytes: """ Algorithm 6: Authenticating the user password. a) Perform all but the last step of "Algorithm 4: Computing the encryption dictionary’s U (user password) value (Security handlers of revision 2)" or "Algorithm 5: Computing the encryption dictionary’s U (user password) value (Security handlers of revision 3 or greater)" using the supplied password string. b) If the result of step (a) is equal to the value of the encryption dictionary’s U entry (comparing on the first 16 bytes in the case of security handlers of revision 3 or greater), the password supplied is the correct user password. The key obtained in step (a) (that is, in the first step of "Algorithm 4: Computing the encryption dictionary’s U (user password) value (Security handlers of revision 2)" or "Algorithm 5: Computing the encryption dictionary’s U (user password) value (Security handlers of revision 3 or greater)") shall be used to decrypt the document. Args: user_password: The user password as a bytes stream rev: The encryption revision (see PDF standard) key_size: The size of the key in bytes o_entry: The owner entry u_entry: The user entry P: A set of flags specifying which operations shall be permitted when the document is opened with user access. If bit 2 is set to 1, all other bits are ignored and all operations are permitted. If bit 2 is set to 0, permission for operations are based on the values of the remaining flags defined in Table 24. id1_entry: metadata_encrypted: A boolean indicating if the metadata is encrypted. Returns: The key """ key = AlgV4.compute_key( user_password, rev, key_size, o_entry, P, id1_entry, metadata_encrypted ) u_value = AlgV4.compute_U_value(key, rev, id1_entry) if rev >= 3: u_value = u_value[:16] u_entry = u_entry[:16] if u_value != u_entry: key = b"" return key @staticmethod def verify_owner_password( owner_password: bytes, rev: int, key_size: int, o_entry: bytes, u_entry: bytes, P: int, id1_entry: bytes, metadata_encrypted: bool, ) -> bytes: """ Algorithm 7: Authenticating the owner password. a) Compute an encryption key from the supplied password string, as described in steps (a) to (d) of "Algorithm 3: Computing the encryption dictionary’s O (owner password) value". b) (Security handlers of revision 2 only) Decrypt the value of the encryption dictionary’s O entry, using an RC4 encryption function with the encryption key computed in step (a). (Security handlers of revision 3 or greater) Do the following 20 times: Decrypt the value of the encryption dictionary’s O entry (first iteration) or the output from the previous iteration (all subsequent iterations), using an RC4 encryption function with a different encryption key at each iteration. The key shall be generated by taking the original key (obtained in step (a)) and performing an XOR (exclusive or) operation between each byte of the key and the single-byte value of the iteration counter (from 19 to 0). c) The result of step (b) purports to be the user password. Authenticate this user password using "Algorithm 6: Authenticating the user password". If it is correct, the password supplied is the correct owner password. Args: owner_password: rev: The encryption revision (see PDF standard) key_size: The size of the key in bytes o_entry: The owner entry u_entry: The user entry P: A set of flags specifying which operations shall be permitted when the document is opened with user access. If bit 2 is set to 1, all other bits are ignored and all operations are permitted. If bit 2 is set to 0, permission for operations are based on the values of the remaining flags defined in Table 24. id1_entry: metadata_encrypted: A boolean indicating if the metadata is encrypted. Returns: bytes """ rc4_key = AlgV4.compute_O_value_key(owner_password, rev, key_size) if rev <= 2: user_password = rc4_decrypt(rc4_key, o_entry) else: user_password = o_entry for i in range(19, -1, -1): key = bytes(x ^ i for x in rc4_key) user_password = rc4_decrypt(key, user_password) return AlgV4.verify_user_password( user_password, rev, key_size, o_entry, u_entry, P, id1_entry, metadata_encrypted, )
AlgV4