language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
django__django
django/contrib/redirects/migrations/0002_alter_redirect_new_path_help_text.py
{ "start": 43, "end": 631 }
class ____(migrations.Migration): dependencies = [ ("redirects", "0001_initial"), ] operations = [ migrations.AlterField( model_name="redirect", name="new_path", field=models.CharField( blank=True, help_text=( "This can be either an absolute path (as above) or a full " "URL starting with a scheme such as “https://”." ), max_length=200, verbose_name="redirect to", ), ), ]
Migration
python
catalyst-team__catalyst
examples/recsys/macridvae.py
{ "start": 3862, "end": 6748 }
class ____(dl.Runner): def on_loader_start(self, runner): super().on_loader_start(runner) self.meters = { key: metrics.AdditiveMetric(compute_on_call=False) for key in ["loss_ae", "loss_kld", "loss"] } def handle_batch(self, batch): x = batch["inputs"] x_true = batch["targets"] x_recon, mu, logvar = self.model(x) anneal = min( self.hparams["anneal_cap"], self.batch_step / self.hparams["total_anneal_steps"], ) loss_ae = -torch.mean(torch.sum(F.log_softmax(x_recon, 1) * x, -1)) loss_kld = None for i in range(self.model.kfac): loss_kld_k = -0.5 * torch.mean( torch.sum(1 + logvar[i] - mu[i].pow(2) - logvar[i].exp(), dim=1) ) # loss_kld_k = -0.5 * torch.mean(torch.sum(1 + logvar[i] - logvar[i].exp(), dim=1)) loss_kld = loss_kld_k if (loss_kld is None) else (loss_kld + loss_kld_k) loss = loss_ae + anneal * loss_kld self.batch.update({"logits": x_recon, "inputs": x, "targets": x_true}) self.batch_metrics.update( {"loss_ae": loss_ae, "loss_kld": loss_kld, "loss": loss} ) for key in ["loss_ae", "loss_kld", "loss"]: self.meters[key].update(self.batch_metrics[key].item(), self.batch_size) def on_loader_end(self, runner): for key in ["loss_ae", "loss_kld", "loss"]: self.loader_metrics[key] = self.meters[key].compute()[0] super().on_loader_end(runner) if __name__ == "__main__": set_global_seed(42) train_dataset = MovieLens(root=".", train=True, download=True) test_dataset = MovieLens(root=".", train=False, download=True) loaders = { "train": DataLoader(train_dataset, batch_size=32, collate_fn=collate_fn_train), "valid": DataLoader(test_dataset, batch_size=32, collate_fn=collate_fn_valid), } item_num = len(train_dataset[0]) model = MacridVAE([item_num, 600, 200]) optimizer = optim.Adam(model.parameters(), lr=0.001) engine = dl.Engine() hparams = { "anneal_cap": 0.2, "total_anneal_steps": 6000, } callbacks = [ dl.NDCGCallback("logits", "targets", [20, 50, 100]), dl.MAPCallback("logits", "targets", [20, 50, 100]), dl.MRRCallback("logits", "targets", [20, 50, 100]), dl.HitrateCallback("logits", "targets", [20, 50, 100]), dl.BackwardCallback("loss"), dl.OptimizerCallback("loss", accumulation_steps=1), ] runner = RecSysRunner() runner.train( model=model, optimizer=optimizer, engine=engine, hparams=hparams, loaders=loaders, num_epochs=15, verbose=True, timeit=False, callbacks=callbacks, logdir="./logs_macridvae", )
RecSysRunner
python
django__django
tests/template_tests/test_callables.py
{ "start": 123, "end": 6218 }
class ____(TestCase): @classmethod def setUpClass(cls): cls.engine = Engine() super().setUpClass() def test_callable(self): class Doodad: def __init__(self, value): self.num_calls = 0 self.value = value def __call__(self): self.num_calls += 1 return {"the_value": self.value} my_doodad = Doodad(42) c = Context({"my_doodad": my_doodad}) # We can't access ``my_doodad.value`` in the template, because # ``my_doodad.__call__`` will be invoked first, yielding a dictionary # without a key ``value``. t = self.engine.from_string("{{ my_doodad.value }}") self.assertEqual(t.render(c), "") # We can confirm that the doodad has been called self.assertEqual(my_doodad.num_calls, 1) # But we can access keys on the dict that's returned # by ``__call__``, instead. t = self.engine.from_string("{{ my_doodad.the_value }}") self.assertEqual(t.render(c), "42") self.assertEqual(my_doodad.num_calls, 2) def test_alters_data(self): class Doodad: alters_data = True def __init__(self, value): self.num_calls = 0 self.value = value def __call__(self): self.num_calls += 1 return {"the_value": self.value} my_doodad = Doodad(42) c = Context({"my_doodad": my_doodad}) # Since ``my_doodad.alters_data`` is True, the template system will not # try to call our doodad but will use string_if_invalid t = self.engine.from_string("{{ my_doodad.value }}") self.assertEqual(t.render(c), "") t = self.engine.from_string("{{ my_doodad.the_value }}") self.assertEqual(t.render(c), "") # Double-check that the object was really never called during the # template rendering. self.assertEqual(my_doodad.num_calls, 0) def test_alters_data_propagation(self): class GrandParentLeft(AltersData): def my_method(self): return 42 my_method.alters_data = True class ParentLeft(GrandParentLeft): def change_alters_data_method(self): return 63 change_alters_data_method.alters_data = True def sub_non_callable_method(self): return 64 sub_non_callable_method.alters_data = True class ParentRight(AltersData): def other_method(self): return 52 other_method.alters_data = True class Child(ParentLeft, ParentRight): def my_method(self): return 101 def other_method(self): return 102 def change_alters_data_method(self): return 103 change_alters_data_method.alters_data = False sub_non_callable_method = 104 class GrandChild(Child): pass child = Child() self.assertIs(child.my_method.alters_data, True) self.assertIs(child.other_method.alters_data, True) self.assertIs(child.change_alters_data_method.alters_data, False) grand_child = GrandChild() self.assertIs(grand_child.my_method.alters_data, True) self.assertIs(grand_child.other_method.alters_data, True) self.assertIs(grand_child.change_alters_data_method.alters_data, False) c = Context({"element": grand_child}) t = self.engine.from_string("{{ element.my_method }}") self.assertEqual(t.render(c), "") t = self.engine.from_string("{{ element.other_method }}") self.assertEqual(t.render(c), "") t = self.engine.from_string("{{ element.change_alters_data_method }}") self.assertEqual(t.render(c), "103") t = self.engine.from_string("{{ element.sub_non_callable_method }}") self.assertEqual(t.render(c), "104") def test_do_not_call(self): class Doodad: do_not_call_in_templates = True def __init__(self, value): self.num_calls = 0 self.value = value def __call__(self): self.num_calls += 1 return {"the_value": self.value} my_doodad = Doodad(42) c = Context({"my_doodad": my_doodad}) # Since ``my_doodad.do_not_call_in_templates`` is True, the template # system will not try to call our doodad. We can access its attributes # as normal, and we don't have access to the dict that it returns when # called. t = self.engine.from_string("{{ my_doodad.value }}") self.assertEqual(t.render(c), "42") t = self.engine.from_string("{{ my_doodad.the_value }}") self.assertEqual(t.render(c), "") # Double-check that the object was really never called during the # template rendering. self.assertEqual(my_doodad.num_calls, 0) def test_do_not_call_and_alters_data(self): # If we combine ``alters_data`` and ``do_not_call_in_templates``, the # ``alters_data`` attribute will not make any difference in the # template system's behavior. class Doodad: do_not_call_in_templates = True alters_data = True def __init__(self, value): self.num_calls = 0 self.value = value def __call__(self): self.num_calls += 1 return {"the_value": self.value} my_doodad = Doodad(42) c = Context({"my_doodad": my_doodad}) t = self.engine.from_string("{{ my_doodad.value }}") self.assertEqual(t.render(c), "42") t = self.engine.from_string("{{ my_doodad.the_value }}") self.assertEqual(t.render(c), "") # Double-check that the object was really never called during the # template rendering. self.assertEqual(my_doodad.num_calls, 0)
CallableVariablesTests
python
wandb__wandb
wandb/vendor/graphql-core-1.1/wandb_graphql/language/location.py
{ "start": 47, "end": 746 }
class ____(object): __slots__ = 'line', 'column' def __init__(self, line, column): self.line = line self.column = column def __repr__(self): return '<SourceLocation line={} column={}>'.format(self.line, self.column) def __eq__(self, other): return ( isinstance(other, SourceLocation) and self.line == other.line and self.column == other.column ) def get_location(source, position): lines = source.body[:position].splitlines() if lines: line = len(lines) column = len(lines[-1]) + 1 else: line = 1 column = 1 return SourceLocation(line, column)
SourceLocation
python
huggingface__transformers
src/transformers/models/hubert/modeling_hubert.py
{ "start": 4547, "end": 5294 }
class ____(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states
HubertNoLayerNormConvLayer
python
getsentry__sentry
src/sentry/releases/endpoints/organization_release_file_details.py
{ "start": 553, "end": 694 }
class ____(serializers.Serializer): name = serializers.CharField(max_length=200, required=True) @region_silo_endpoint
ReleaseFileSerializer
python
getsentry__sentry
src/social_auth/backends/asana.py
{ "start": 950, "end": 2448 }
class ____(BaseOAuth2): """Asana OAuth authentication mechanism""" AUTHORIZATION_URL = ASANA_AUTHORIZATION_URL ACCESS_TOKEN_URL = ASANA_TOKEN_EXCHANGE_URL AUTH_BACKEND = AsanaBackend SETTINGS_KEY_NAME = "ASANA_CLIENT_ID" SETTINGS_SECRET_NAME = "ASANA_CLIENT_SECRET" REDIRECT_STATE = False def user_data(self, access_token, *args, **kwargs): """Loads user data from service""" headers = {"Authorization": f"Bearer {access_token}"} try: resp = requests.get(ASANA_USER_DETAILS_URL, headers=headers) resp.raise_for_status() return resp.json()["data"] except ValueError: return None def auth_complete(self, *args, **kwargs): """Completes logging process, must return user instance""" self.process_error(self.data) params = self.auth_complete_params(self.validate_state()) response = requests.post(self.ACCESS_TOKEN_URL, data=params, headers=self.auth_headers()) if response.status_code == 400: raise AuthCanceled(self) response.raise_for_status() try: response_json = response.json() except (ValueError, KeyError): raise AuthUnknownError(self) response_json.pop("data") self.process_error(response_json) return self.do_auth(response_json["access_token"], response=response_json, *args, **kwargs) # Backend definition BACKENDS = {"asana": AsanaAuth}
AsanaAuth
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/nn_ops/embedding_ops_test.py
{ "start": 53879, "end": 55264 }
class ____(test.TestCase): @test_util.run_deprecated_v1 def testCint32Cpu(self): with self.session(use_gpu=False): indices = [ ops.convert_to_tensor([0, 1, 4, 6]), ops.convert_to_tensor([2, 3, 5]) ] values = [ ops.convert_to_tensor([12, 23, 34, 45]), ops.convert_to_tensor([1, 2, 3]) ] self.assertAllEqual( data_flow_ops.parallel_dynamic_stitch(indices, values), [12, 23, 1, 2, 34, 3, 45]) @test_util.run_deprecated_v1 def testInt32Cpu(self): with self.session(use_gpu=False): indices = [ ops.convert_to_tensor([0, 1, 5, 6, 7]), ops.convert_to_tensor([2, 4, 3]) ] values = [ ops.convert_to_tensor([12, 23, 34, 45, 56]), ops.convert_to_tensor([1, 3, 2]) ] self.assertAllEqual( data_flow_ops.parallel_dynamic_stitch(indices, values), [12, 23, 1, 2, 3, 34, 45, 56]) @test_util.run_deprecated_v1 def testSimple(self): with self.session(use_gpu=False): indices = [ops.convert_to_tensor([0, 1]), ops.convert_to_tensor([2, 3])] values = [ops.convert_to_tensor([2, 3]), ops.convert_to_tensor([1, 1])] self.assertAllEqual( data_flow_ops.parallel_dynamic_stitch(indices, values), [2, 3, 1, 1]) if __name__ == "__main__": test.main()
ParallelDynamicStitchOpTest
python
allegroai__clearml
clearml/automation/trigger.py
{ "start": 477, "end": 3379 }
class ____(BaseScheduleJob): _only_fields = {"id", "name", "last_update", "last_change"} _update_field = None _change_field = None project = attrib(default=None, type=str) match_name = attrib(default=None, type=str) tags = attrib(default=None, type=list) required_tags = attrib(default=None, type=list) add_tag = attrib(default=None, type=str) last_update = attrib(default=None, type=datetime, converter=datetime_from_isoformat) # remember the previous state of Ids answering the specific query # any new object.id returned that is not in the list, is a new event # we store a dict of {object_id: datetime} # allowing us to ignore repeating object updates triggering multiple times _triggered_instances = attrib(type=dict, default=None) def build_query(self, ref_time: datetime, client: APIClient = None) -> dict: server_supports_datetime_or_query = client and ( (client.session.feature_set == "basic" and client.session.check_min_server_version("1.16.3")) or (client.session.feature_set != "basic" and client.session.check_min_server_version("3.22.6")) ) query = { "name": self.match_name or None, "project": [self.project] if self.project else None, "tags": ((self.tags or []) + (self.required_tags or [])) or None, } if not server_supports_datetime_or_query: query[self._update_field] = ">{}".format(ref_time.isoformat() if ref_time else self.last_update.isoformat()) else: query["_or_"] = { "fields": [self._update_field, self._change_field], "datetime": [">{}".format(ref_time.isoformat() if ref_time else self.last_update.isoformat())], } return query def verify(self) -> None: super(BaseTrigger, self).verify() if self.tags and (not isinstance(self.tags, (list, tuple)) or not all(isinstance(s, str) for s in self.tags)): raise ValueError("Tags must be a list of strings: {}".format(self.tags)) if self.required_tags and ( not isinstance(self.required_tags, (list, tuple)) or not all(isinstance(s, str) for s in self.required_tags) ): raise ValueError("Required tags must be a list of strings: {}".format(self.required_tags)) if self.project and not isinstance(self.project, str): raise ValueError("Project must be a string: {}".format(self.project)) if self.match_name and not isinstance(self.match_name, str): raise ValueError("Match name must be a string: {}".format(self.match_name)) def get_key(self) -> Optional[str]: return getattr(self, "_key", None) def get_ref_time(self, obj: Any) -> datetime: return max(getattr(obj, self._update_field, 0), getattr(obj, self._change_field, 0)) @attrs
BaseTrigger
python
google__flatbuffers
python/flatbuffers/flexbuffers.py
{ "start": 14731, "end": 15727 }
class ____(Vector): """Data accessor for the encoded map bytes.""" @staticmethod def CompareKeys(a, b): if isinstance(a, Ref): a = a.AsKeyBytes if isinstance(b, Ref): b = b.AsKeyBytes return a < b def __getitem__(self, key): if isinstance(key, int): return super().__getitem__(key) index = _BinarySearch(self.Keys, key.encode('ascii'), self.CompareKeys) if index != -1: return super().__getitem__(index) raise KeyError(key) @property def Keys(self): byte_width = _Unpack( U, self._buf[-2 * self._byte_width : -self._byte_width] ) buf = self._buf.Indirect(-3 * self._byte_width, self._byte_width) return TypedVector(buf, byte_width, Type.KEY) @property def Values(self): return Vector(self._buf, self._byte_width) @property def Value(self): return {k.Value: v.Value for k, v in zip(self.Keys, self.Values)} def __repr__(self): return 'Map(%s, size=%d)' % (self._buf, len(self))
Map
python
dagster-io__dagster
python_modules/dagster/dagster/components/core/component_tree.py
{ "start": 1619, "end": 1727 }
class ____(Exception): pass @record( checked=False, # cant handle ModuleType )
ComponentTreeException
python
pytorch__pytorch
torch/_inductor/fuzzer.py
{ "start": 19862, "end": 37524 }
class ____: """ This tool makes it easy to search through config state-space with a minimal reproduction or test, either for debugging or just bug hunting. It has two entry points: - bisect, which randomly flips configs and tries to find the minimal reproduction upon failure. - fuzz_n_tuple, which tries every combination of n configs. This grows quickly as a function of n, so beware. bisect is recommended, but fuzz_n_tuple can give you peace of mind that a new config will compose with every other config. The main interface is a function factory that will return Callables to be torch.compiled. This function factory should return a test function when it's called. Said test function returns a boolean, which determines whether the ConfigFuzzer considers it a successful run or not. Throwing an exception from within the function will be considered a failure as well. # Example usage: ```python import torch._inductor.config as cfg def create_simple_test_model_gpu() -> FactoryOutputType: batch_size = 32 seq_length = 50 hidden_size = 768 def test_fn() -> bool: inp = torch.randn(batch_size, seq_length, hidden_size, device="cuda") weight = torch.randn(hidden_size, hidden_size, device="cuda") matmul_output = inp @ weight final_output = torch.nn.LayerNorm(hidden_size, device="cuda")(matmul_output) return True return test_fn fuzzer = ConfigFuzzer(cfg, create_simple_test_model_gpu, seed=2) # Test every pair of configs: results = fuzzer.fuzz_n_tuple(n, max_combinations=10000000) visualize_results(n, results) # Test random configs with bisection: ret = fuzzer.bisect(num_attempts=10) # reproduce a failing config fuzzer.reproduce( [{"triton.autotune_pointwise": ..., "coordinate_descent_tuning": ...}] ) ``` The list of known failures on inductor config are: cpp_wrapper, triton_debug_sync_graph cpp_wrapper, triton_debug_sync_kernel cpp_wrapper, disable_cpp_codegen combo_kernels, benchmark_combo_kernel, profile_bandwidth, profile_bandwidth_regex trace.enabled, trace.save_real_tensors """ sample: SamplingType default: ConfigType def __init__( self, config_module: ConfigModule, test_model_fn_factory: FactoryType, seed: int, default: Optional[ConfigType] = None, sm: SamplingMethod = SamplingMethod.TOGGLE, test_timeout: int = 3600, ): """ Args: config_module: The module containing the configs to fuzz test_model_fn_factory: Function that returns a test model, which runs and returns True if successful, or the outputs if they should be compared with eager seed: Randomness seed. default: Default values for the config. Inductor has preset based on know failures. sm: How type value samples are generated, default TOGGLE. test_timeout: max time a test can take. """ self.seed = seed self.test_timeout = test_timeout self.detailed_results: dict[ComboType, dict[str, Any]] = {} self.config_module = config_module self.test_model_fn_factory = test_model_fn_factory self.fields: dict[str, _ConfigEntry] = self.config_module._config self.sample = SamplingMethod.dispatch(sm) if default is None: if self.config_module.__name__ in MODULE_DEFAULTS: self.default = MODULE_DEFAULTS[self.config_module.__name__] else: raise ValueError("No default passed to ConfigFuzzer.") else: self.default = default def __repr__(self) -> str: return ( f"ConfigFuzzer(config_module={self.config_module}, " f"test_model_fn_factor={self.test_model_fn_factory}, seed={self.seed}, default={self.default})" ) def _set_config(self, field_name: str, value: Any) -> None: """Set a config value in the module.""" setattr(self.config_module, field_name, value) def _reset_configs(self) -> None: """Reset all configs to their default values.""" for field_name, field_obj in self.fields.items(): self._set_config(field_name, field_obj.default) def new_config(self) -> ConfigType: """creates a new config from the default""" ret = { name: val if val != DEFAULT else self.fields[name].default for name, val in self.default.items() } return ret def reproduce(self, configs: Sequence[ConfigType]) -> ResultType: """entrypoint to reproduce any failure""" results = ResultType() for conf in configs: self._reproduce_single_helper(conf, results) return results def _reproduce_single_helper(self, conf: ConfigType, results: ResultType) -> None: print(f"Starting repro of {conf}") new_config = self.new_config() new_config.update(conf) self.test_config(results, new_config) print(f"Status of {conf}:\n{results.lookup(tuple(conf.keys()))}") def reproduce_single(self, config: ConfigType) -> ResultType: results = ResultType() self._reproduce_single_helper(config, results) return results def _fuzz_helper(self, results: ResultType, combo: ComboType) -> Status: print(combo) if st := results.lookup(combo): # we already processed this config return st config = self.new_config() skip = False for field_name in combo: if field_name in config: # don't break here because we need to build the config dict skip = True if field_name.startswith("_"): skip = True field = self.fields[field_name] value = self.sample(field_name, field.value_type, field.default) config[field_name] = value if skip: results.set(combo, Status.SKIPPED) return Status.SKIPPED return self.test_config(results, config) def fuzz_n_tuple(self, n: int, max_combinations: int = 1000) -> ResultType: """ Test every combination of n configs. returns a dict of this shape: {(config-1, config-2... config-n): status} """ results = ResultType() print(f"Starting {n}-tuple testing with seed {self.seed}") random.seed(self.seed) for combo in itertools.combinations(self.fields, n): st = self._fuzz_helper(results, combo) if st != Status.SKIPPED: max_combinations -= 1 if max_combinations <= 0: print("Reached maximum combinations limit") break return results def save_state(self, filename: str = "fuzzer_state.pkl") -> None: """Save the current fuzzer state to a file""" with open(filename, "wb") as f: pickle.dump( {"results": self.results, "detailed_results": self.detailed_results}, f ) def load_state(self, filename: str = "fuzzer_state.pkl") -> None: """Load fuzzer state from a file""" with open(filename, "rb") as f: state = pickle.load(f) self.results = state["results"] self.detailed_results = state.get("detailed_results", {}) def timeout_handler(self, signum: int, frame: Optional[FrameType]) -> None: raise TimeoutError("Test execution timed out") def test_config(self, results: ResultType, config: ConfigType) -> Status: """ Tests a config by calling the function produced by the factory function. """ original_handler = signal.signal(signal.SIGALRM, self.timeout_handler) signal.alarm(self.test_timeout) print(f"Testing config {config}") config_tuple = tuple(config.keys()) if ret := results.lookup(config_tuple): signal.signal(signal.SIGALRM, original_handler) return ret def print_config() -> None: for field, value in config.items(): print(f"{field} = {value}") def get_error_info(exc: Exception) -> dict[str, Any]: return { "exception": str(exc), "traceback": traceback.format_exc(), "config": config.copy(), } def handle_return( message: str, return_status: Status, print_traceback: bool, exc: Optional[Exception], ) -> Status: signal.signal(signal.SIGALRM, original_handler) print(f"{message} with config combination:") print_config() if exc: self.detailed_results[config_tuple] = get_error_info(exc) if print_traceback: traceback.print_exc() results.set(config_tuple, return_status) return return_status # reset config torch._dynamo.reset() self._reset_configs() for name, value in config.items(): self._set_config(name, value) # try running eager test_model_fn = self.test_model_fn_factory() try: test_model_fn() except Exception as exc: return handle_return( "Eager exception", Status.FAILED_RUN_EAGER_EXCEPTION, True, exc ) # try compilation try: test_model_fn2 = self.test_model_fn_factory() comp = torch.compile(test_model_fn2, backend="inductor") except Exception as exc: return handle_return( "Exception compiling", Status.FAILED_COMPILE, True, exc ) # try running compiled try: compile_result = comp() except Exception as exc: return handle_return( "Exception running compiled", Status.FAILED_RUN_COMPILE_EXCEPTION, True, exc, ) # bool return value means don't compare with eager if not compile_result: return handle_return( "Function returned False", Status.FAILED_RUN_RETURN, False, None ) else: return handle_return("Function succeeded", Status.PASSED, False, None) def bisect(self, num_attempts: int = 100, p: float = 0.5) -> list[ConfigType]: """ Test configs and bisect to minimal failing configuration. """ print(f"Starting random testing with bisection, seed {self.seed}, and p {p}") random.seed(self.seed) self._reset_configs() results = ResultType() ret: list[ConfigType] = [] for attempt in range(num_attempts): print(f"Random attempt {attempt + 1}/{num_attempts}") config = self.new_config() for field_name, config_entry in self.fields.items(): if ( field_name not in config and not field_name.startswith("_") and "TESTING_ONLY" not in field_name and random.random() < p ): value = self.sample( field_name, config_entry.value_type, config_entry.default ) config[field_name] = value status = self.test_config(results, config) if status not in OrderedSet([Status.PASSED, Status.SKIPPED]): if minimal_failing_config := self._bisect_failing_config( results, config ): print(f"Minimum failing config: {minimal_failing_config}") ret.append(minimal_failing_config) return ret def _bisect_failing_config( self, results: ResultType, failing_config: ConfigType ) -> Optional[ConfigType]: return self._bisect_failing_config_helper(results, list(failing_config.items())) def _bisect_failing_config_helper( self, results: ResultType, failing_config: list[tuple[str, Any]] ) -> Optional[ConfigType]: """ Bisect a failing configuration to find minimal set of configs that cause failure. Splits it into halves, then fourths, then tries dropping configs one-by-one. """ print(f"bisecting config: {failing_config}") if not failing_config: return None def test(x: list[tuple[str, Any]]) -> Status: d = dict(x) result = self.test_config(results, d) return result if len(failing_config) <= 1: return dict(failing_config) if test(failing_config).failing() else None random.shuffle(failing_config) mid = len(failing_config) // 2 first_half = failing_config[:mid] second_half = failing_config[mid:] if test(first_half).failing(): return self._bisect_failing_config_helper(results, first_half) if test(second_half).failing(): return self._bisect_failing_config_helper(results, second_half) if len(failing_config) >= 8: low = len(failing_config) // 4 high = mid + low quart1 = failing_config[low:] if test(quart1).failing(): return self._bisect_failing_config_helper(results, quart1) quart2 = failing_config[:low] + second_half if test(quart2).failing(): return self._bisect_failing_config_helper(results, quart2) quart3 = first_half + failing_config[:high] if test(quart3).failing(): return self._bisect_failing_config_helper(results, quart3) quart4 = failing_config[high:] if test(quart4).failing(): return self._bisect_failing_config_helper(results, quart4) # try dropping one value at a time for i in range(len(failing_config)): new_list = [x for j, x in enumerate(failing_config) if j != i] if test(new_list).failing(): return self._bisect_failing_config_helper(results, new_list) # we have the minimal set return dict(failing_config) def visualize_results( n: int, results: ResultType, filename: str = "results.html" ) -> None: """ Creates an HTML document representing the results of running the fuzzer with fuzz_n_tuple, with n = 2. """ # TODO support more dimensions assert n == 2 assert len(results) > 0 input_set: OrderedSet[str] = OrderedSet({}) for key in results.keys(): # noqa: SIM118 input_set.add(key[0]) input_set.add(key[1]) input_list = sorted(input_set) # Start the HTML content html_content = """ <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title> Fuzzer Visualization</title> <style> table { border-collapse: collapse; width: 50%; margin: 20px auto; } th, td { border: 1px solid #ddd; padding: 8px; text-align: center; } th { background-color: #f2f2f2; } .skipped { background-color: yellow; } .passed { background-color: green; color: white; } .failed { background-color: red; color: white; } </style> </head> <body> <h2 style="text-align: center;">Fuzzer Visualization</h2> <table> <thead> """ html_content += "<tr><th>\\</th>" for col_name in input_list: col = "<br>".join(col_name) html_content += f"<th>{col}</th>" html_content += "</tr></thead><tbody>" # Add table rows for row_name in input_list: html_content += f"<tr><th>{row_name}</th>" for col_name in input_list: # Determine the status class for the cell status_enum = results.lookup((row_name, col_name)) status_class = "" status_val = "" if status_enum == Status.SKIPPED: status_class = "skipped" status_val = "-" elif status_enum == Status.PASSED: status_class = "passed" status_val = "O" elif status_enum == Status.FAILED_RUN_EAGER_EXCEPTION: status_class = "failed" status_val = "e" elif status_enum == Status.FAILED_RUN_COMPILE_EXCEPTION: status_class = "failed" status_val = "E" elif status_enum == Status.FAILED_RUN_RETURN: status_class = "failed" status_val = "R" elif status_enum == Status.FAILED_COMPILE: status_class = "failed" status_val = "C" else: status_class = "skipped" status_val = "-" html_content += f'<td class="{status_class}">{status_val}</td>' html_content += "</tr>" html_content += """ </tbody> </table> </body> </html> """ with open(filename, "w") as file: file.write(html_content)
ConfigFuzzer
python
getsentry__sentry
tests/sentry/core/endpoints/scim/test_scim_user_details.py
{ "start": 1162, "end": 1927 }
class ____(APITestCase): def setUp(self) -> None: super().setUp() self.login_as(user=self.user) def test_cant_use_scim(self) -> None: url = reverse("sentry-api-0-organization-scim-member-index", args=[self.organization.slug]) response = self.client.get(url) assert response.status_code == 403 def test_cant_use_scim_even_with_authprovider(self) -> None: with assume_test_silo_mode(SiloMode.CONTROL): AuthProvider.objects.create(organization_id=self.organization.id, provider="dummy") url = reverse("sentry-api-0-organization-scim-member-index", args=[self.organization.slug]) response = self.client.get(url) assert response.status_code == 403
SCIMMemberTestsPermissions
python
pyinstaller__pyinstaller
PyInstaller/exceptions.py
{ "start": 1140, "end": 1448 }
class ____(SystemExit): def __init__(self, message): super().__init__( f"ERROR: Bytecode encryption was removed in PyInstaller v6.0. {message}" " For the rationale and alternatives see https://github.com/pyinstaller/pyinstaller/pull/6999" )
RemovedCipherFeatureError
python
gevent__gevent
src/greentest/3.11/test_signal.py
{ "start": 24306, "end": 27750 }
class ____(unittest.TestCase): def readpipe_interrupted(self, interrupt): """Perform a read during which a signal will arrive. Return True if the read is interrupted by the signal and raises an exception. Return False if it returns normally. """ # use a subprocess to have only one thread, to have a timeout on the # blocking read and to not touch signal handling in this process code = """if 1: import errno import os import signal import sys interrupt = %r r, w = os.pipe() def handler(signum, frame): 1 / 0 signal.signal(signal.SIGALRM, handler) if interrupt is not None: signal.siginterrupt(signal.SIGALRM, interrupt) print("ready") sys.stdout.flush() # run the test twice try: for loop in range(2): # send a SIGALRM in a second (during the read) signal.alarm(1) try: # blocking call: read from a pipe without data os.read(r, 1) except ZeroDivisionError: pass else: sys.exit(2) sys.exit(3) finally: os.close(r) os.close(w) """ % (interrupt,) with spawn_python('-c', code) as process: try: # wait until the child process is loaded and has started first_line = process.stdout.readline() stdout, stderr = process.communicate(timeout=support.SHORT_TIMEOUT) except subprocess.TimeoutExpired: process.kill() return False else: stdout = first_line + stdout exitcode = process.wait() if exitcode not in (2, 3): raise Exception("Child error (exit code %s): %r" % (exitcode, stdout)) return (exitcode == 3) def test_without_siginterrupt(self): # If a signal handler is installed and siginterrupt is not called # at all, when that signal arrives, it interrupts a syscall that's in # progress. interrupted = self.readpipe_interrupted(None) self.assertTrue(interrupted) def test_siginterrupt_on(self): # If a signal handler is installed and siginterrupt is called with # a true value for the second argument, when that signal arrives, it # interrupts a syscall that's in progress. interrupted = self.readpipe_interrupted(True) self.assertTrue(interrupted) @support.requires_resource('walltime') def test_siginterrupt_off(self): # If a signal handler is installed and siginterrupt is called with # a false value for the second argument, when that signal arrives, it # does not interrupt a syscall that's in progress. interrupted = self.readpipe_interrupted(False) self.assertFalse(interrupted) @unittest.skipIf(sys.platform == "win32", "Not valid on Windows") @unittest.skipUnless(hasattr(signal, 'getitimer') and hasattr(signal, 'setitimer'), "needs signal.getitimer() and signal.setitimer()")
SiginterruptTest
python
pytorch__pytorch
torch/_inductor/codegen/cpp.py
{ "start": 229420, "end": 234693 }
class ____: """ A loop-nest-like structure. It is built with the `build` method as a loop nest and then will perform loop-tiling at some depth. A typical case is for vectorization, where we typically do loop-tiling at the innermost loop level. A more complicated case is when we do 2D tiling at both the innermost and outer levels. """ loops: Optional[list[LoopLevel]] = None kernel: Optional[CppKernel] = None @staticmethod def build(kernel: CppKernel): """Build a LoopNest with the given `kernel` as the leaf""" itervars = kernel.itervars ranges = kernel.ranges reduction_depth = kernel.reduction_depth assert reduction_depth is not None loops: Optional[list[LoopLevel]] = None for loop_idx, (var, size) in enumerate(zip(itervars, ranges)): loop = LoopLevel(var, size) if not loops: loops = [loop] else: loops.append(loop) if loop_idx >= reduction_depth: loop.is_reduction = kernel.is_reduction loop_nest = LoopNest(loops) return loop_nest def __bool__(self): return bool(self.loops) @cache_on_self def max_parallel_depth(self): """ Maximal allowed depth for parallelism: All reduction or non-reduction levels. When the range of the first inner loop beyond the maximum parallel depth is much larger than the range of all outer loops within the maximum parallel depth, change the starting depth of parallelism to the first inner loop and recalculate the maximum parallel depth. """ if self.loops is None: return ParallelDepth(parallel_depth=0, start_depth=0) start_depth = 0 max_depth = 0 is_reduction = self.loops[0].is_reduction num_steps = sympy.Integer(1) for loop in self.loops: if loop.is_reduction != is_reduction: break num_steps = num_steps * FloorDiv(loop.size, loop.steps) max_depth += 1 def get_simd_vec_depth(loops): # Return the first loop level which is simd_vec for i, loop in enumerate(loops): if loop.simd_vec: return i return None simd_vec_depth = get_simd_vec_depth(self.loops) def has_scalar_kernel(loop_nest: LoopNest): assert isinstance(loop_nest.kernel, CppKernelProxy) return any( not isinstance(kernel, CppVecKernel) for kernel in loop_nest.kernel.kernels ) # When the number of steps of the first inner loop is much larger than the number of steps of # all outer loops, change `start_depth` to the first inner loop and recalculate `max_depth`. if ( max_depth < len(self.loops) and isinstance(num_steps, sympy.Integer) and isinstance(self.loops[max_depth].size, sympy.Integer) and num_steps * 300 < FloorDiv(self.loops[max_depth].size, self.loops[max_depth].steps) and not ( # Disable parallel reduction under the vec loop simd_vec_depth is not None and max_depth > simd_vec_depth and self.loops[max_depth].is_reduction and has_scalar_kernel(self) ) ): start_depth = max_depth max_depth = 0 is_reduction = self.loops[start_depth].is_reduction for i in range(start_depth, len(self.loops)): if self.loops[i].is_reduction != is_reduction: break max_depth += 1 return ParallelDepth(parallel_depth=max_depth, start_depth=start_depth) def mark_parallel(self, par_depth): assert par_depth.parallel_depth <= self.max_parallel_depth().parallel_depth, ( "Parallel depth cannot exceed the maximal allowed parallel depth" ) assert self.loops is not None assert len(self.loops) >= par_depth.parallel_depth loop = self.loops[par_depth.start_depth] loop.parallel = par_depth.parallel_depth if loop.is_reduction: # pyrefly: ignore [bad-assignment] metrics.parallel_reduction_count += 1 for i in range(par_depth.start_depth + 1, par_depth.parallel_depth): self.loops[i].collapsed = True def tile(self, depth, factor): """ Do loop-tiling at the `depth` level with `factor`. for (x0 = 0; x0 < x0_end; x0++) -> for (x0 = 0; x0 < x0_end; x0 += factor) See details in Note [tiled_size]. """ assert self.loops self.loops[depth] = self.loops[depth].tile(factor) return self.loops[depth] def get_kernel(self) -> CppKernel: assert self.kernel return self.kernel def set_kernel(self, kernel): self.kernel = kernel def from_loop_level(self, level: int): assert self.loops assert len(self.loops) >= level loops = None if level == len(self.loops) else self.loops[level:] return LoopNest(loops, self.kernel)
LoopNest
python
allegroai__clearml
clearml/utilities/gpu/gpustat.py
{ "start": 4755, "end": 24836 }
class ____(object): global_processes = {} _initialized = False _device_count = None _gpu_device_info = {} _mig_device_info = {} def __init__( self, gpu_list: List[GPUStat], driver_version: Optional[str] = None, driver_cuda_version: Optional[str] = None, ) -> None: self.gpus = gpu_list # attach additional system information self.hostname = platform.node() self.query_time = datetime.now() self.driver_version = driver_version self.driver_cuda_version = driver_cuda_version @staticmethod def clean_processes() -> None: for pid in list(GPUStatCollection.global_processes.keys()): if not psutil.pid_exists(pid): del GPUStatCollection.global_processes[pid] @staticmethod def _new_query_amd( shutdown: bool = False, per_process_stats: bool = False, get_driver_info: bool = False, ) -> "GPUStatCollection": initialized = False if not GPUStatCollection._initialized: R.smi_initialize() GPUStatCollection._initialized = True initialized = True def get_gpu_info(index: int) -> dict: def amd_query_processes() -> List[R.rsmi_process_info_t]: num_procs = c_uint32() ret = R.rocm_lib.rsmi_compute_process_info_get(None, byref(num_procs)) if R.rsmi_ret_ok(ret): buff_sz = num_procs.value + 10 proc_info = (R.rsmi_process_info_t * buff_sz)() ret = R.rocm_lib.rsmi_compute_process_info_get(byref(proc_info), byref(num_procs)) proc_info_list = [proc_info[i] for i in range(num_procs.value)] if R.rsmi_ret_ok(ret) else [] result_proc_info_list = [] # query VRAM usage explicitly, as rsmi_compute_process_info_get # doesn't actually return VRAM usage for proc_info in proc_info_list: vram_query_proc_info = R.rsmi_process_info_t() ret = R.rocm_lib.rsmi_compute_process_info_by_pid_get( int(proc_info.process_id), byref(vram_query_proc_info) ) if R.rsmi_ret_ok(ret): proc_info.vram_usage = vram_query_proc_info.vram_usage result_proc_info_list.append(proc_info) return result_proc_info_list return [] def get_fan_speed() -> float: fan_level = c_int64() fan_max = c_int64() sensor_ind = c_uint32(0) ret = R.rocm_lib.rsmi_dev_fan_speed_get(index, sensor_ind, byref(fan_level)) if not R.rsmi_ret_ok(ret, log_error=False): return None ret = R.rocm_lib.rsmi_dev_fan_speed_max_get(index, sensor_ind, byref(fan_max)) if not R.rsmi_ret_ok(ret, log_error=False): return None if fan_level.value <= 0 or fan_max <= 0: return None return float(fan_level.value) / float(fan_max.value) def get_process_info(comp_process: R.rsmi_process_info_t) -> dict: process = {} pid = comp_process.process_id # skip global_processes caching because PID querying seems to be inconsistent atm # if pid not in GPUStatCollection.global_processes: # GPUStatCollection.global_processes[pid] = psutil.Process(pid=pid) process["pid"] = pid try: process["gpu_memory_usage"] = comp_process.vram_usage // MB except Exception: pass return process if not GPUStatCollection._gpu_device_info.get(index): uuid = R.smi_get_device_id(index) name = R.smi_get_device_name(index) GPUStatCollection._gpu_device_info[index] = (name, uuid) name, uuid = GPUStatCollection._gpu_device_info[index] temperature = None # TODO: fetch temperature. It should be possible fan_speed = get_fan_speed() try: memory_total = R.smi_get_device_memory_total(index) except Exception: memory_total = None try: memory_used = R.smi_get_device_memory_used(index) except Exception: memory_used = None try: utilization = R.smi_get_device_utilization(index) except Exception: utilization = None try: power = R.smi_get_device_average_power(index) except Exception: power = None power_limit = None # TODO: find a way to fetch this processes = [] if per_process_stats: try: comp_processes = amd_query_processes() except Exception: comp_processes = [] for comp_process in comp_processes: try: process = get_process_info(comp_process) except psutil.NoSuchProcess: # skip process caching for now pass else: processes.append(process) gpu_info = { "index": index, "uuid": uuid, "name": name, "temperature.gpu": temperature if temperature is not None else 0, "fan.speed": fan_speed if fan_speed is not None else 0, "utilization.gpu": utilization if utilization is not None else 100, "power.draw": power if power is not None else 0, "enforced.power.limit": power_limit if power_limit is not None else 0, # Convert bytes into MBytes "memory.used": memory_used // MB if memory_used is not None else 0, "memory.total": memory_total // MB if memory_total is not None else 100, "processes": None if (processes and all(p is None for p in processes)) else processes, } if per_process_stats: GPUStatCollection.clean_processes() return gpu_info gpu_list = [] if GPUStatCollection._device_count is None: GPUStatCollection._device_count = R.smi_get_device_count() for index in range(GPUStatCollection._device_count): gpu_info = get_gpu_info(index) gpu_stat = GPUStat(gpu_info) gpu_list.append(gpu_stat) if shutdown and initialized: R.smi_shutdown() GPUStatCollection._initialized = False # noinspection PyProtectedMember driver_version = GPUStatCollection._get_amd_driver_version() if get_driver_info else None return GPUStatCollection(gpu_list, driver_version=driver_version, driver_cuda_version=None) @staticmethod def _get_amd_driver_version() -> Optional[str]: # make sure the program doesn't crash with something like a SEGFAULT when querying the driver version try: process = subprocess.Popen(["rocm-smi", "--showdriverversion", "--json"], stdout=subprocess.PIPE) out, _ = process.communicate() return json.loads(out)["system"]["Driver version"] except Exception: try: process = subprocess.Popen( [ sys.executable, "-c", "from clearml.utilities.gpu.pyrsmi import smi_get_kernel_version, smi_initialize; " + "smi_initialize(); " + "print(smi_get_kernel_version())", ] ) out, _ = process.communicate() return out.strip() except Exception: return None @staticmethod def _running_in_amd_env() -> bool: # noinspection PyProtectedMember return bool(R._find_lib_rocm()) @staticmethod def _new_query_nvidia( shutdown: bool = False, per_process_stats: bool = False, get_driver_info: bool = False, ) -> "GPUStatCollection": """Query the information of all the GPUs on local machine""" initialized = False if not GPUStatCollection._initialized: N.nvmlInit() GPUStatCollection._initialized = True initialized = True def _decode(b: bytes) -> str: if isinstance(b, bytes): return b.decode() # for python3, to unicode return b def get_gpu_info(index: int, handle: Any, is_mig: bool = False) -> dict: """Get one GPU information specified by nvml handle""" def get_process_info(nv_process: Any) -> dict: """Get the process information of specific pid""" process = {} if nv_process.pid not in GPUStatCollection.global_processes: GPUStatCollection.global_processes[nv_process.pid] = psutil.Process(pid=nv_process.pid) process["pid"] = nv_process.pid # noinspection PyBroadException try: # ps_process = GPUStatCollection.global_processes[nv_process.pid] # we do not actually use these, so no point in collecting them # process['username'] = ps_process.username() # # cmdline returns full path; # # as in `ps -o comm`, get short cmdnames. # _cmdline = ps_process.cmdline() # if not _cmdline: # # sometimes, zombie or unknown (e.g. [kworker/8:2H]) # process['command'] = '?' # process['full_command'] = ['?'] # else: # process['command'] = os.path.basename(_cmdline[0]) # process['full_command'] = _cmdline # process['cpu_percent'] = ps_process.cpu_percent() # process['cpu_memory_usage'] = \ # round((ps_process.memory_percent() / 100.0) * # psutil.virtual_memory().total) # Bytes to MBytes process["gpu_memory_usage"] = nv_process.usedGpuMemory // MB except Exception: # insufficient permissions pass return process device_info = GPUStatCollection._mig_device_info if is_mig else GPUStatCollection._gpu_device_info if not device_info.get(index): name = _decode(N.nvmlDeviceGetName(handle)) uuid = _decode(N.nvmlDeviceGetUUID(handle)) device_info[index] = (name, uuid) name, uuid = device_info[index] try: temperature = N.nvmlDeviceGetTemperature(handle, N.NVML_TEMPERATURE_GPU) except N.NVMLError: temperature = None # Not supported try: fan_speed = N.nvmlDeviceGetFanSpeed(handle) except N.NVMLError: fan_speed = None # Not supported try: memory = N.nvmlDeviceGetMemoryInfo(handle) # in Bytes except N.NVMLError: memory = None # Not supported try: utilization = N.nvmlDeviceGetUtilizationRates(handle) except N.NVMLError: utilization = None # Not supported try: power = N.nvmlDeviceGetPowerUsage(handle) except N.NVMLError: power = None try: power_limit = N.nvmlDeviceGetEnforcedPowerLimit(handle) except N.NVMLError: power_limit = None try: nv_comp_processes = N.nvmlDeviceGetComputeRunningProcesses(handle) except N.NVMLError: nv_comp_processes = None # Not supported try: nv_graphics_processes = N.nvmlDeviceGetGraphicsRunningProcesses(handle) except N.NVMLError: nv_graphics_processes = None # Not supported if not per_process_stats or (nv_comp_processes is None and nv_graphics_processes is None): processes = None else: processes = [] nv_comp_processes = nv_comp_processes or [] nv_graphics_processes = nv_graphics_processes or [] for nv_process in nv_comp_processes + nv_graphics_processes: try: process = get_process_info(nv_process) except psutil.NoSuchProcess: # TODO: add some reminder for NVML broken context # e.g. nvidia-smi reset or reboot the system process = None processes.append(process) # we do not actually use these, so no point in collecting them # # TODO: Do not block if full process info is not requested # time.sleep(0.1) # for process in processes: # pid = process['pid'] # cache_process = GPUStatCollection.global_processes[pid] # process['cpu_percent'] = cache_process.cpu_percent() index = N.nvmlDeviceGetIndex(handle) gpu_info = { "index": index, "uuid": uuid, "name": name, "temperature.gpu": temperature, "fan.speed": fan_speed, "utilization.gpu": utilization.gpu if utilization else None, "power.draw": power // 1000 if power is not None else None, "enforced.power.limit": power_limit // 1000 if power_limit is not None else None, # Convert bytes into MBytes "memory.used": memory.used // MB if memory else None, "memory.total": memory.total // MB if memory else None, "processes": None if (processes and all(p is None for p in processes)) else processes, } if per_process_stats: GPUStatCollection.clean_processes() return gpu_info # 1. get the list of gpu and status gpu_list = [] if GPUStatCollection._device_count is None: GPUStatCollection._device_count = N.nvmlDeviceGetCount() for index in range(GPUStatCollection._device_count): handle = N.nvmlDeviceGetHandleByIndex(index) gpu_info = get_gpu_info(index, handle) mig_cnt = 0 # noinspection PyBroadException try: mig_cnt = N.nvmlDeviceGetMaxMigDeviceCount(handle) except Exception: pass if mig_cnt <= 0: gpu_list.append(GPUStat(gpu_info)) continue got_mig_info = False for mig_index in range(mig_cnt): # noinspection PyBroadException try: mig_handle = N.nvmlDeviceGetMigDeviceHandleByIndex(handle, mig_index) mig_info = get_gpu_info(mig_index, mig_handle, is_mig=True) mig_info["mig_name"] = mig_info["name"] mig_info["name"] = gpu_info["name"] mig_info["mig_index"] = mig_info["index"] mig_info["mig_uuid"] = mig_info["uuid"] mig_info["index"] = gpu_info["index"] mig_info["uuid"] = gpu_info["uuid"] mig_info["temperature.gpu"] = gpu_info["temperature.gpu"] mig_info["fan.speed"] = gpu_info["fan.speed"] gpu_list.append(GPUStat(mig_info)) got_mig_info = True except Exception: pass if not got_mig_info: gpu_list.append(GPUStat(gpu_info)) # 2. additional info (driver version, etc). if get_driver_info: try: driver_version = _decode(N.nvmlSystemGetDriverVersion()) except N.NVMLError: driver_version = None # N/A # noinspection PyBroadException try: cuda_driver_version = str(N.nvmlSystemGetCudaDriverVersion()) except BaseException: # noinspection PyBroadException try: cuda_driver_version = str(N.nvmlSystemGetCudaDriverVersion_v2()) except BaseException: cuda_driver_version = None if cuda_driver_version: try: cuda_driver_version = "{}.{}".format( int(cuda_driver_version) // 1000, (int(cuda_driver_version) % 1000) // 10, ) except (ValueError, TypeError): pass else: driver_version = None cuda_driver_version = None # no need to shutdown: if shutdown and initialized: N.nvmlShutdown() GPUStatCollection._initialized = False return GPUStatCollection( gpu_list, driver_version=driver_version, driver_cuda_version=cuda_driver_version, ) @staticmethod def new_query( shutdown: bool = False, per_process_stats: bool = False, get_driver_info: bool = False, ) -> "GPUStatCollection": # noinspection PyProtectedMember if GPUStatCollection._running_in_amd_env(): # noinspection PyProtectedMember return GPUStatCollection._new_query_amd( shutdown=shutdown, per_process_stats=per_process_stats, get_driver_info=get_driver_info, ) else: # noinspection PyProtectedMember return GPUStatCollection._new_query_nvidia( shutdown=shutdown, per_process_stats=per_process_stats, get_driver_info=get_driver_info, ) def __len__(self) -> int: return len(self.gpus) def __iter__(self) -> Iterator[GPUStat]: return iter(self.gpus) def __getitem__(self, index: int) -> GPUStat: return self.gpus[index] def __repr__(self) -> str: s = "GPUStatCollection(host=%s, [\n" % self.hostname s += "\n".join(" " + str(g) for g in self.gpus) s += "\n])" return s # --- Printing Functions --- def jsonify(self) -> dict: return { "hostname": self.hostname, "query_time": self.query_time, "gpus": [g.jsonify() for g in self], } def print_json(self, fp: TextIO = sys.stdout) -> None: def date_handler(obj: Any) -> str: if hasattr(obj, "isoformat"): return obj.isoformat() else: raise TypeError(type(obj)) o = self.jsonify() json.dump(o, fp, indent=4, separators=(",", ": "), default=date_handler) fp.write("\n") fp.flush() def new_query( shutdown: bool = False, per_process_stats: bool = False, get_driver_info: bool = False, ) -> GPUStatCollection: """ Obtain a new GPUStatCollection instance by querying nvidia-smi to get the list of GPUs and running process information. """ return GPUStatCollection.new_query( shutdown=shutdown, per_process_stats=per_process_stats, get_driver_info=get_driver_info, )
GPUStatCollection
python
anthropics__anthropic-sdk-python
src/anthropic/lib/foundry.py
{ "start": 2607, "end": 2977 }
class ____(AsyncBeta): @cached_property @override def messages(self) -> AsyncBetaMessages: # type: ignore[override] """Return beta messages resource instance with excluded unsupported endpoints.""" return AsyncBetaFoundryMessages(self._client) # ==============================================================================
AsyncBetaFoundry
python
Textualize__textual
src/textual/demo/widgets.py
{ "start": 4076, "end": 4954 }
class ____(containers.VerticalGroup): """Demonstrates DataTables.""" DEFAULT_CLASSES = "column" DATATABLES_MD = """\ ## Datatables A fully-featured DataTable, with cell, row, and columns cursors. Cells may be individually styled, and may include Rich renderables. **Tip:** Focus the table and press `ctrl+a` """ DEFAULT_CSS = """ DataTable { height: 16 !important; &.-maximized { height: auto !important; } } """ def compose(self) -> ComposeResult: yield Markdown(self.DATATABLES_MD) with containers.Center(): yield DataTable(fixed_columns=1) def on_mount(self) -> None: ROWS = list(csv.reader(io.StringIO(MOVIES))) table = self.query_one(DataTable) table.add_columns(*ROWS[0]) table.add_rows(ROWS[1:])
Datatables
python
PrefectHQ__prefect
tests/test_task_worker.py
{ "start": 4406, "end": 6540 }
class ____: async def test_serve_basic_sync_task(self, foo_task, mock_task_worker_start): await serve(foo_task) mock_task_worker_start.assert_called_once() task_run_future = foo_task.apply_async((42,)) assert isinstance(task_run_future, PrefectDistributedFuture) assert task_run_future.state.is_scheduled() async def test_serve_basic_async_task(self, async_foo_task, mock_task_worker_start): await serve(async_foo_task) mock_task_worker_start.assert_called_once() task_run_future = async_foo_task.apply_async((42,)) assert isinstance(task_run_future, PrefectDistributedFuture) assert task_run_future.state.is_scheduled() async def test_task_worker_can_execute_a_single_async_single_task_run( async_foo_task, prefect_client, events_pipeline ): task_worker = TaskWorker(async_foo_task) task_run_future = async_foo_task.apply_async((42,)) task_run = await prefect_client.read_task_run(task_run_future.task_run_id) await task_worker.execute_task_run(task_run) await events_pipeline.process_events() updated_task_run = await prefect_client.read_task_run(task_run_future.task_run_id) assert updated_task_run.state.is_completed() assert await updated_task_run.state.result() == 42 async def test_task_worker_can_execute_a_single_sync_single_task_run( foo_task, prefect_client, events_pipeline ): task_worker = TaskWorker(foo_task) task_run_future = foo_task.apply_async((42,)) task_run = await prefect_client.read_task_run(task_run_future.task_run_id) await task_worker.execute_task_run(task_run) await events_pipeline.process_events() updated_task_run = await prefect_client.read_task_run(task_run_future.task_run_id) assert updated_task_run.state.is_completed() assert await updated_task_run.state.result() == 42 def test_task_worker_cannot_be_instantiated_outside_of_async_context(foo_task): with pytest.raises( RuntimeError, match="TaskWorker must be initialized within an async context." ): TaskWorker(foo_task).start()
TestServe
python
pdm-project__pdm
src/pdm/cli/commands/fix/__init__.py
{ "start": 310, "end": 3197 }
class ____(BaseCommand): """Fix the project problems according to the latest version of PDM""" def add_arguments(self, parser: argparse.ArgumentParser) -> None: parser.add_argument("problem", nargs="?", help="Fix the specific problem, or all if not given") parser.add_argument("--dry-run", action="store_true", help="Only show the problems") @staticmethod def find_problems(project: Project) -> list[tuple[str, BaseFixer]]: """Get the problems in the project""" problems: list[tuple[str, BaseFixer]] = [] for fixer in Command.get_fixers(project): if fixer.check(): problems.append((fixer.identifier, fixer)) return problems @staticmethod def check_problems(project: Project, strict: bool = True) -> None: """Check the problems in the project""" problems = Command.find_problems(project) if not problems: return breaking = False project.core.ui.warn("The following problems are found in your project:") for name, fixer in problems: project.core.ui.echo(f" [b]{name}[/]: {fixer.get_message()}", err=True) if fixer.breaking: breaking = True extra_option = " -g" if project.is_global else "" project.core.ui.echo( f"Run [success]pdm fix{extra_option}[/] to fix all or [success]pdm fix{extra_option} <name>[/]" " to fix individual problem.", err=True, ) if breaking and strict: raise SystemExit(1) @staticmethod def get_fixers(project: Project) -> list[BaseFixer]: """Return a list of fixers to check, the order matters""" return [ProjectConfigFixer(project), PackageTypeFixer(project), LockStrategyFixer(project)] def handle(self, project: Project, options: argparse.Namespace) -> None: if options.dry_run: return self.check_problems(project) problems = self.find_problems(project) if options.problem: fixer = next((fixer for name, fixer in problems if name == options.problem), None) if not fixer: raise PdmUsageError( f"The problem doesn't exist: [success]{options.problem}[/], " f"possible values are {[p[0] for p in problems]}", ) project.core.ui.echo(f"Fixing [success]{fixer.identifier}[/]...", end=" ") fixer.fix() project.core.ui.echo(f"[success]{Emoji.SUCC}[/]") return if not problems: project.core.ui.echo("No problem is found, nothing to fix.") return for name, fixer in problems: project.core.ui.echo(f"Fixing [success]{name}[/]...", end=" ") fixer.fix() project.core.ui.echo(f"[success]{Emoji.SUCC}[/]")
Command
python
apache__thrift
lib/py/test/test_sslsocket.py
{ "start": 3984, "end": 15110 }
class ____(unittest.TestCase): def _server_socket(self, **kwargs): return TSSLServerSocket(port=0, **kwargs) @contextmanager def _connectable_client(self, server, expect_failure=False, path=None, **client_kwargs): acc = ServerAcceptor(server, expect_failure) try: acc.start() acc.await_listening() host, port = ('localhost', acc.port) if path is None else (None, None) client = TSSLSocket(host, port, unix_socket=path, **client_kwargs) yield acc, client finally: acc.close() def _assert_connection_failure(self, server, path=None, **client_args): logging.disable(logging.CRITICAL) try: with self._connectable_client(server, True, path=path, **client_args) as (acc, client): # We need to wait for a connection failure, but not too long. 20ms is a tunable # compromise between test speed and stability client.setTimeout(20) with self._assert_raises(TTransportException): client.open() client.write(b"hello") client.read(5) # b"there" finally: logging.disable(logging.NOTSET) def _assert_raises(self, exc): if sys.hexversion >= 0x020700F0: return self.assertRaises(exc) else: return AssertRaises(exc) def _assert_connection_success(self, server, path=None, **client_args): with self._connectable_client(server, path=path, **client_args) as (acc, client): try: self.assertFalse(client.isOpen()) client.open() self.assertTrue(client.isOpen()) client.write(b"hello") self.assertEqual(client.read(5), b"there") self.assertTrue(acc.client is not None) finally: client.close() # deprecated feature def test_deprecation(self): with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', category=DeprecationWarning, module=self.__module__) TSSLSocket('localhost', 0, validate=True, ca_certs=SERVER_CERT) self.assertEqual(len(w), 1) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', category=DeprecationWarning, module=self.__module__) # Deprecated signature # def __init__(self, host='localhost', port=9090, validate=True, ca_certs=None, keyfile=None, certfile=None, unix_socket=None, ciphers=None): TSSLSocket('localhost', 0, True, SERVER_CERT, CLIENT_KEY, CLIENT_CERT, None, TEST_CIPHERS) self.assertEqual(len(w), 7) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', category=DeprecationWarning, module=self.__module__) # Deprecated signature # def __init__(self, host=None, port=9090, certfile='cert.pem', unix_socket=None, ciphers=None): TSSLServerSocket(None, 0, SERVER_PEM, None, TEST_CIPHERS) self.assertEqual(len(w), 3) # deprecated feature def test_set_cert_reqs_by_validate(self): with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', category=DeprecationWarning, module=self.__module__) c1 = TSSLSocket('localhost', 0, validate=True, ca_certs=SERVER_CERT) self.assertEqual(c1.cert_reqs, ssl.CERT_REQUIRED) c1 = TSSLSocket('localhost', 0, validate=False) self.assertEqual(c1.cert_reqs, ssl.CERT_NONE) self.assertEqual(len(w), 2) # deprecated feature def test_set_validate_by_cert_reqs(self): with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', category=DeprecationWarning, module=self.__module__) c1 = TSSLSocket('localhost', 0, cert_reqs=ssl.CERT_NONE) self.assertFalse(c1.validate) c2 = TSSLSocket('localhost', 0, cert_reqs=ssl.CERT_REQUIRED, ca_certs=SERVER_CERT) self.assertTrue(c2.validate) c3 = TSSLSocket('localhost', 0, cert_reqs=ssl.CERT_OPTIONAL, ca_certs=SERVER_CERT) self.assertTrue(c3.validate) self.assertEqual(len(w), 3) def test_unix_domain_socket(self): if platform.system() == 'Windows': print('skipping test_unix_domain_socket') return fd, path = tempfile.mkstemp() os.close(fd) os.unlink(path) try: server = self._server_socket(unix_socket=path, keyfile=SERVER_KEY, certfile=SERVER_CERT) self._assert_connection_success(server, path=path, cert_reqs=ssl.CERT_NONE) finally: os.unlink(path) def test_server_cert(self): server = self._server_socket(keyfile=SERVER_KEY, certfile=SERVER_CERT) self._assert_connection_success(server, cert_reqs=ssl.CERT_REQUIRED, ca_certs=SERVER_CERT) server = self._server_socket(keyfile=SERVER_KEY, certfile=SERVER_CERT) # server cert not in ca_certs self._assert_connection_failure(server, cert_reqs=ssl.CERT_REQUIRED, ca_certs=CLIENT_CERT) server = self._server_socket(keyfile=SERVER_KEY, certfile=SERVER_CERT) self._assert_connection_success(server, cert_reqs=ssl.CERT_NONE) def test_set_server_cert(self): server = self._server_socket(keyfile=SERVER_KEY, certfile=CLIENT_CERT) with self._assert_raises(Exception): server.certfile = 'foo' with self._assert_raises(Exception): server.certfile = None server.certfile = SERVER_CERT self._assert_connection_success(server, cert_reqs=ssl.CERT_REQUIRED, ca_certs=SERVER_CERT) def test_client_cert(self): if not _match_has_ipaddress: print('skipping test_client_cert') return server = self._server_socket( cert_reqs=ssl.CERT_REQUIRED, keyfile=SERVER_KEY, certfile=SERVER_CERT, ca_certs=CLIENT_CERT) self._assert_connection_failure(server, cert_reqs=ssl.CERT_NONE, certfile=SERVER_CERT, keyfile=SERVER_KEY) server = self._server_socket( cert_reqs=ssl.CERT_REQUIRED, keyfile=SERVER_KEY, certfile=SERVER_CERT, ca_certs=CLIENT_CA) self._assert_connection_failure(server, cert_reqs=ssl.CERT_NONE, certfile=CLIENT_CERT_NO_IP, keyfile=CLIENT_KEY_NO_IP) server = self._server_socket( cert_reqs=ssl.CERT_REQUIRED, keyfile=SERVER_KEY, certfile=SERVER_CERT, ca_certs=CLIENT_CA) self._assert_connection_success(server, cert_reqs=ssl.CERT_NONE, certfile=CLIENT_CERT, keyfile=CLIENT_KEY) server = self._server_socket( cert_reqs=ssl.CERT_OPTIONAL, keyfile=SERVER_KEY, certfile=SERVER_CERT, ca_certs=CLIENT_CA) self._assert_connection_success(server, cert_reqs=ssl.CERT_NONE, certfile=CLIENT_CERT, keyfile=CLIENT_KEY) def test_ciphers(self): server = self._server_socket(keyfile=SERVER_KEY, certfile=SERVER_CERT, ciphers=TEST_CIPHERS) self._assert_connection_success(server, ca_certs=SERVER_CERT, ciphers=TEST_CIPHERS) if not TSSLSocket._has_ciphers: # unittest.skip is not available for Python 2.6 print('skipping test_ciphers') return server = self._server_socket(keyfile=SERVER_KEY, certfile=SERVER_CERT) self._assert_connection_failure(server, ca_certs=SERVER_CERT, ciphers='NULL') server = self._server_socket(keyfile=SERVER_KEY, certfile=SERVER_CERT, ciphers=TEST_CIPHERS) self._assert_connection_failure(server, ca_certs=SERVER_CERT, ciphers='NULL') def test_ssl2_and_ssl3_disabled(self): if not hasattr(ssl, 'PROTOCOL_SSLv3'): print('PROTOCOL_SSLv3 is not available') else: server = self._server_socket(keyfile=SERVER_KEY, certfile=SERVER_CERT) self._assert_connection_failure(server, ca_certs=SERVER_CERT, ssl_version=ssl.PROTOCOL_SSLv3) server = self._server_socket(keyfile=SERVER_KEY, certfile=SERVER_CERT, ssl_version=ssl.PROTOCOL_SSLv3) self._assert_connection_failure(server, ca_certs=SERVER_CERT) if not hasattr(ssl, 'PROTOCOL_SSLv2'): print('PROTOCOL_SSLv2 is not available') else: server = self._server_socket(keyfile=SERVER_KEY, certfile=SERVER_CERT) self._assert_connection_failure(server, ca_certs=SERVER_CERT, ssl_version=ssl.PROTOCOL_SSLv2) server = self._server_socket(keyfile=SERVER_KEY, certfile=SERVER_CERT, ssl_version=ssl.PROTOCOL_SSLv2) self._assert_connection_failure(server, ca_certs=SERVER_CERT) def test_newer_tls(self): if not TSSLSocket._has_ssl_context: # unittest.skip is not available for Python 2.6 print('skipping test_newer_tls') return if not hasattr(ssl, 'PROTOCOL_TLSv1_2'): print('PROTOCOL_TLSv1_2 is not available') else: server = self._server_socket(keyfile=SERVER_KEY, certfile=SERVER_CERT, ssl_version=ssl.PROTOCOL_TLSv1_2) self._assert_connection_success(server, ca_certs=SERVER_CERT, ssl_version=ssl.PROTOCOL_TLSv1_2) if not hasattr(ssl, 'PROTOCOL_TLSv1_1'): print('PROTOCOL_TLSv1_1 is not available') else: server = self._server_socket(keyfile=SERVER_KEY, certfile=SERVER_CERT, ssl_version=ssl.PROTOCOL_TLSv1_1) self._assert_connection_success(server, ca_certs=SERVER_CERT, ssl_version=ssl.PROTOCOL_TLSv1_1) if not hasattr(ssl, 'PROTOCOL_TLSv1_1') or not hasattr(ssl, 'PROTOCOL_TLSv1_2'): print('PROTOCOL_TLSv1_1 and/or PROTOCOL_TLSv1_2 is not available') else: server = self._server_socket(keyfile=SERVER_KEY, certfile=SERVER_CERT, ssl_version=ssl.PROTOCOL_TLSv1_2) self._assert_connection_failure(server, ca_certs=SERVER_CERT, ssl_version=ssl.PROTOCOL_TLSv1_1) def test_ssl_context(self): if not TSSLSocket._has_ssl_context: # unittest.skip is not available for Python 2.6 print('skipping test_ssl_context') return server_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) server_context.load_cert_chain(SERVER_CERT, SERVER_KEY) server_context.load_verify_locations(CLIENT_CA) server_context.verify_mode = ssl.CERT_REQUIRED server = self._server_socket(ssl_context=server_context) client_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) client_context.load_cert_chain(CLIENT_CERT, CLIENT_KEY) client_context.load_verify_locations(SERVER_CERT) client_context.verify_mode = ssl.CERT_REQUIRED self._assert_connection_success(server, ssl_context=client_context) # Add a dummy test because starting from python 3.12, if all tests in a test # file are skipped that's considered an error.
TSSLSocketTest
python
walkccc__LeetCode
solutions/2965. Find Missing and Repeated Values/2965.py
{ "start": 0, "end": 269 }
class ____: def findMissingAndRepeatedValues(self, grid: list[list[int]]) -> list[int]: count = [1] + [0] * len(grid)**2 # padding for 1-indexed for row in grid: for num in row: count[num] += 1 return [count.index(2), count.index(0)]
Solution
python
kamyu104__LeetCode-Solutions
Python/minimum-number-of-buckets-required-to-collect-rainwater-from-houses.py
{ "start": 29, "end": 619 }
class ____(object): def minimumBuckets(self, street): """ :type street: str :rtype: int """ result = 0 street = list(street) for i, c in enumerate(street): if c != 'H' or (i and street[i-1] == 'B'): continue if i+1 < len(street) and street[i+1] == '.': street[i+1] = 'B' result += 1 elif i and street[i-1] == '.': street[i-1] = 'B' result += 1 else: return -1 return result
Solution
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeNarrowingTypeIs1.py
{ "start": 1042, "end": 1186 }
class ____: pass E = C[T] | D def func5(x: E[T]) -> None: if type(x) is C: reveal_type(x, expected_text="C[T@func5]") @final
D
python
PyCQA__pylint
tests/functional/ext/no_self_use/no_self_use.py
{ "start": 2569, "end": 2804 }
class ____(ABC): """Don't emit no-self-use for abstract methods.""" @abstractmethod def a(self): pass def b(self): raise NotImplementedError def c(self): pass # pass counts as abstract
Foo1
python
pennersr__django-allauth
tests/apps/account/test_signup.py
{ "start": 832, "end": 2712 }
class ____(TestCase): @override_settings( ACCOUNT_SIGNUP_EMAIL_ENTER_TWICE=True, ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE=True, ) def test_custom_form_field_order(self): expected_field_order = [ "email", "email2", "password1", "password2", "username", "last_name", "first_name", ] class TestSignupForm(forms.Form): first_name = forms.CharField(max_length=30) last_name = forms.CharField(max_length=30) field_order = expected_field_order class CustomSignupForm(SignupForm, TestSignupForm): # ACCOUNT_SIGNUP_FORM_CLASS is only abided by when the # BaseSignupForm definition is loaded the first time on Django # startup. @override_settings() has therefore no effect. pass form = CustomSignupForm() self.assertEqual(list(form.fields.keys()), expected_field_order) def test_user_class_attribute(self): from django.contrib.auth import get_user_model from django.db.models.query_utils import DeferredAttribute class CustomSignupForm(SignupForm): # ACCOUNT_SIGNUP_FORM_CLASS is only abided by when the # BaseSignupForm definition is loaded the first time on Django # startup. @override_settings() has therefore no effect. pass User = get_user_model() data = { "username": "username", "email": "user@example.com", "password1": "very-secret", "password2": "very-secret", } form = CustomSignupForm(data, email_required=True) assert isinstance(User.username, DeferredAttribute) form.is_valid() assert isinstance(User.username, DeferredAttribute)
CustomSignupFormTests
python
tiangolo__fastapi
docs_src/pydantic_v1_in_v2/tutorial004_an_py310.py
{ "start": 143, "end": 360 }
class ____(BaseModel): name: str description: str | None = None size: float app = FastAPI() @app.post("/items/") async def create_item(item: Annotated[Item, Body(embed=True)]) -> Item: return item
Item
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/string_conversion.py
{ "start": 403, "end": 968 }
class ____: def __repr__(self): return request.GET["tainted"] def str_is_tainted(): s = StrIsTainted() eval(str(s)) def repr_is_tainted(): r = ReprIsTainted() eval(repr(r)) def str_falls_back_to_repr(): r = ReprIsTainted() eval(str(r)) def implicit_str(): s = StrIsTainted() eval(f"prefix{s}suffix") # noqa: P204 def implicit_repr(): r = ReprIsTainted() eval(f"prefix{r}suffix") # noqa: P204 def explicit_str(): s = StrIsTainted() eval(f"prefix{s.__str__()}suffix") # noqa: P204
ReprIsTainted
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/partitions/mapping/multi/base.py
{ "start": 1164, "end": 12531 }
class ____(ABC): @abstractmethod def get_dimension_dependencies( self, upstream_partitions_def: PartitionsDefinition, downstream_partitions_def: PartitionsDefinition, ) -> Sequence[DimensionDependency]: ... def get_partitions_def( self, partitions_def: PartitionsDefinition, dimension_name: Optional[str] ) -> PartitionsDefinition: if isinstance(partitions_def, MultiPartitionsDefinition): if not isinstance(dimension_name, str): check.failed("Expected dimension_name to be a string") return partitions_def.get_partitions_def_for_dimension(dimension_name) return partitions_def def _get_dependency_partitions_subset( self, a_partitions_def: PartitionsDefinition, a_partitions_subset: PartitionsSubset, b_partitions_def: PartitionsDefinition, a_upstream_of_b: bool, ) -> Union[UpstreamPartitionsResult, PartitionsSubset]: """Given two partitions definitions a_partitions_def and b_partitions_def that have a dependency relationship (a_upstream_of_b is True if a_partitions_def is upstream of b_partitions_def), and a_partition_keys, a list of partition keys in a_partitions_def, returns a list of partition keys in the partitions definition b_partitions_def that are dependencies of the partition keys in a_partition_keys. """ a_partition_keys_by_dimension = defaultdict(set) if isinstance(a_partitions_def, MultiPartitionsDefinition): for partition_key in a_partitions_subset.get_partition_keys(): key = a_partitions_def.get_partition_key_from_str(partition_key) for dimension_name, key in key.keys_by_dimension.items(): a_partition_keys_by_dimension[dimension_name].add(key) else: for partition_key in a_partitions_subset.get_partition_keys(): a_partition_keys_by_dimension[None].add(partition_key) # Maps the dimension name and key of a partition in a_partitions_def to the list of # partition keys in b_partitions_def that are dependencies of that partition dep_b_keys_by_a_dim_and_key: dict[Optional[str], dict[Optional[str], list[str]]] = ( defaultdict(lambda: defaultdict(list)) ) required_but_nonexistent_upstream_partitions = set() b_dimension_partitions_def_by_name: dict[Optional[str], PartitionsDefinition] = ( { dimension.name: dimension.partitions_def for dimension in b_partitions_def.partitions_defs } if isinstance(b_partitions_def, MultiPartitionsDefinition) else {None: b_partitions_def} ) if a_upstream_of_b: # a_partitions_def is upstream of b_partitions_def, so we need to map the # dimension names of a_partitions_def to the corresponding dependent dimensions of # b_partitions_def a_dim_to_dependency_b_dim = { dimension_mapping.upstream_dimension_name: ( dimension_mapping.downstream_dimension_name, dimension_mapping.partition_mapping, ) for dimension_mapping in self.get_dimension_dependencies( a_partitions_def, b_partitions_def ) } for a_dim_name, keys in a_partition_keys_by_dimension.items(): if a_dim_name in a_dim_to_dependency_b_dim: ( b_dim_name, dimension_mapping, ) = a_dim_to_dependency_b_dim[a_dim_name] a_dimension_partitions_def = self.get_partitions_def( a_partitions_def, a_dim_name ) b_dimension_partitions_def = self.get_partitions_def( b_partitions_def, b_dim_name ) for key in keys: # if downstream dimension mapping exists, for a given key, get the list of # downstream partition keys that are dependencies of that key dep_b_keys_by_a_dim_and_key[a_dim_name][key] = list( dimension_mapping.get_downstream_partitions_for_partitions( a_dimension_partitions_def.empty_subset().with_partition_keys( [key] ), a_dimension_partitions_def, b_dimension_partitions_def, ).get_partition_keys() ) else: # a_partitions_def is downstream of b_partitions_def, so we need to map the # dimension names of a_partitions_def to the corresponding dependency dimensions of # b_partitions_def a_dim_to_dependency_b_dim = { dimension_mapping.downstream_dimension_name: ( dimension_mapping.upstream_dimension_name, dimension_mapping.partition_mapping, ) for dimension_mapping in self.get_dimension_dependencies( b_partitions_def, a_partitions_def ) } for a_dim_name, keys in a_partition_keys_by_dimension.items(): if a_dim_name in a_dim_to_dependency_b_dim: ( b_dim_name, partition_mapping, ) = a_dim_to_dependency_b_dim[a_dim_name] a_dimension_partitions_def = self.get_partitions_def( a_partitions_def, a_dim_name ) b_dimension_partitions_def = self.get_partitions_def( b_partitions_def, b_dim_name ) for key in keys: mapped_partitions_result = ( partition_mapping.get_upstream_mapped_partitions_result_for_partitions( a_dimension_partitions_def.empty_subset().with_partition_keys( [key] ), a_dimension_partitions_def, b_dimension_partitions_def, ) ) dep_b_keys_by_a_dim_and_key[a_dim_name][key] = list( mapped_partitions_result.partitions_subset.get_partition_keys() ) # enumerating partition keys since the two subsets might be from different # asset keys required_but_nonexistent_upstream_partitions.update( set( mapped_partitions_result.required_but_nonexistent_subset.get_partition_keys() ) ) b_partition_keys = set() mapped_a_dim_names = a_dim_to_dependency_b_dim.keys() mapped_b_dim_names = [mapping[0] for mapping in a_dim_to_dependency_b_dim.values()] unmapped_b_dim_names = list( set(b_dimension_partitions_def_by_name.keys()) - set(mapped_b_dim_names) ) for key in a_partitions_subset.get_partition_keys(): for b_key_values in itertools.product( *( [ dep_b_keys_by_a_dim_and_key[dim_name][ ( cast("MultiPartitionsDefinition", a_partitions_def) .get_partition_key_from_str(key) .keys_by_dimension[dim_name] if dim_name else key ) ] for dim_name in mapped_a_dim_names ] ), *[ b_dimension_partitions_def_by_name[dim_name].get_partition_keys() for dim_name in unmapped_b_dim_names ], ): b_partition_keys.add( MultiPartitionKey( { cast("str", (mapped_b_dim_names + unmapped_b_dim_names)[i]): key for i, key in enumerate(b_key_values) } ) if len(b_key_values) > 1 else b_key_values[0] # type: ignore ) mapped_subset = b_partitions_def.empty_subset().with_partition_keys(b_partition_keys) if a_upstream_of_b: return mapped_subset else: return UpstreamPartitionsResult( partitions_subset=mapped_subset, required_but_nonexistent_subset=DefaultPartitionsSubset( required_but_nonexistent_upstream_partitions ), ) def get_upstream_mapped_partitions_result_for_partitions( self, downstream_partitions_subset: Optional[PartitionsSubset], downstream_partitions_def: Optional[PartitionsDefinition], upstream_partitions_def: PartitionsDefinition, current_time: Optional[datetime] = None, dynamic_partitions_store: Optional["DynamicPartitionsStore"] = None, ) -> UpstreamPartitionsResult: with partition_loading_context(current_time, dynamic_partitions_store): if downstream_partitions_subset is None: check.failed("downstream asset is not partitioned") result = self._get_dependency_partitions_subset( check.not_none(downstream_partitions_def), downstream_partitions_subset, cast("MultiPartitionsDefinition", upstream_partitions_def), a_upstream_of_b=False, ) if not isinstance(result, UpstreamPartitionsResult): check.failed("Expected UpstreamPartitionsResult") return result def get_downstream_partitions_for_partitions( self, upstream_partitions_subset: PartitionsSubset, upstream_partitions_def: PartitionsDefinition, downstream_partitions_def: PartitionsDefinition, current_time: Optional[datetime] = None, dynamic_partitions_store: Optional["DynamicPartitionsStore"] = None, ) -> PartitionsSubset: with partition_loading_context(current_time, dynamic_partitions_store): if upstream_partitions_subset is None: check.failed("upstream asset is not partitioned") result = self._get_dependency_partitions_subset( upstream_partitions_def, upstream_partitions_subset, cast("MultiPartitionsDefinition", downstream_partitions_def), a_upstream_of_b=True, ) if isinstance(result, UpstreamPartitionsResult): check.failed("Expected PartitionsSubset") return result @whitelist_for_serdes
BaseMultiPartitionMapping
python
pypa__warehouse
warehouse/organizations/models.py
{ "start": 6145, "end": 6281 }
class ____(enum.StrEnum): GitHub = "github" GitLab = "gitlab" Google = "google" ActiveState = "activestate"
OIDCIssuerType
python
redis__redis-py
redis/lock.py
{ "start": 282, "end": 12760 }
class ____: """ A shared, distributed Lock. Using Redis for locking allows the Lock to be shared across processes and/or machines. It's left to the user to resolve deadlock issues and make sure multiple clients play nicely together. """ lua_release = None lua_extend = None lua_reacquire = None # KEYS[1] - lock name # ARGV[1] - token # return 1 if the lock was released, otherwise 0 LUA_RELEASE_SCRIPT = """ local token = redis.call('get', KEYS[1]) if not token or token ~= ARGV[1] then return 0 end redis.call('del', KEYS[1]) return 1 """ # KEYS[1] - lock name # ARGV[1] - token # ARGV[2] - additional milliseconds # ARGV[3] - "0" if the additional time should be added to the lock's # existing ttl or "1" if the existing ttl should be replaced # return 1 if the locks time was extended, otherwise 0 LUA_EXTEND_SCRIPT = """ local token = redis.call('get', KEYS[1]) if not token or token ~= ARGV[1] then return 0 end local expiration = redis.call('pttl', KEYS[1]) if not expiration then expiration = 0 end if expiration < 0 then return 0 end local newttl = ARGV[2] if ARGV[3] == "0" then newttl = ARGV[2] + expiration end redis.call('pexpire', KEYS[1], newttl) return 1 """ # KEYS[1] - lock name # ARGV[1] - token # ARGV[2] - milliseconds # return 1 if the locks time was reacquired, otherwise 0 LUA_REACQUIRE_SCRIPT = """ local token = redis.call('get', KEYS[1]) if not token or token ~= ARGV[1] then return 0 end redis.call('pexpire', KEYS[1], ARGV[2]) return 1 """ def __init__( self, redis, name: str, timeout: Optional[Number] = None, sleep: Number = 0.1, blocking: bool = True, blocking_timeout: Optional[Number] = None, thread_local: bool = True, raise_on_release_error: bool = True, ): """ Create a new Lock instance named ``name`` using the Redis client supplied by ``redis``. ``timeout`` indicates a maximum life for the lock in seconds. By default, it will remain locked until release() is called. ``timeout`` can be specified as a float or integer, both representing the number of seconds to wait. ``sleep`` indicates the amount of time to sleep in seconds per loop iteration when the lock is in blocking mode and another client is currently holding the lock. ``blocking`` indicates whether calling ``acquire`` should block until the lock has been acquired or to fail immediately, causing ``acquire`` to return False and the lock not being acquired. Defaults to True. Note this value can be overridden by passing a ``blocking`` argument to ``acquire``. ``blocking_timeout`` indicates the maximum amount of time in seconds to spend trying to acquire the lock. A value of ``None`` indicates continue trying forever. ``blocking_timeout`` can be specified as a float or integer, both representing the number of seconds to wait. ``thread_local`` indicates whether the lock token is placed in thread-local storage. By default, the token is placed in thread local storage so that a thread only sees its token, not a token set by another thread. Consider the following timeline: time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds. thread-1 sets the token to "abc" time: 1, thread-2 blocks trying to acquire `my-lock` using the Lock instance. time: 5, thread-1 has not yet completed. redis expires the lock key. time: 5, thread-2 acquired `my-lock` now that it's available. thread-2 sets the token to "xyz" time: 6, thread-1 finishes its work and calls release(). if the token is *not* stored in thread local storage, then thread-1 would see the token value as "xyz" and would be able to successfully release the thread-2's lock. ``raise_on_release_error`` indicates whether to raise an exception when the lock is no longer owned when exiting the context manager. By default, this is True, meaning an exception will be raised. If False, the warning will be logged and the exception will be suppressed. In some use cases it's necessary to disable thread local storage. For example, if you have code where one thread acquires a lock and passes that lock instance to a worker thread to release later. If thread local storage isn't disabled in this case, the worker thread won't see the token set by the thread that acquired the lock. Our assumption is that these cases aren't common and as such default to using thread local storage. """ self.redis = redis self.name = name self.timeout = timeout self.sleep = sleep self.blocking = blocking self.blocking_timeout = blocking_timeout self.thread_local = bool(thread_local) self.raise_on_release_error = raise_on_release_error self.local = threading.local() if self.thread_local else SimpleNamespace() self.local.token = None self.register_scripts() def register_scripts(self) -> None: cls = self.__class__ client = self.redis if cls.lua_release is None: cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT) if cls.lua_extend is None: cls.lua_extend = client.register_script(cls.LUA_EXTEND_SCRIPT) if cls.lua_reacquire is None: cls.lua_reacquire = client.register_script(cls.LUA_REACQUIRE_SCRIPT) def __enter__(self) -> "Lock": if self.acquire(): return self raise LockError( "Unable to acquire lock within the time specified", lock_name=self.name, ) def __exit__( self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: try: self.release() except LockError: if self.raise_on_release_error: raise logger.warning( "Lock was unlocked or no longer owned when exiting context manager." ) def acquire( self, sleep: Optional[Number] = None, blocking: Optional[bool] = None, blocking_timeout: Optional[Number] = None, token: Optional[str] = None, ): """ Use Redis to hold a shared, distributed lock named ``name``. Returns True once the lock is acquired. If ``blocking`` is False, always return immediately. If the lock was acquired, return True, otherwise return False. ``blocking_timeout`` specifies the maximum number of seconds to wait trying to acquire the lock. ``token`` specifies the token value to be used. If provided, token must be a bytes object or a string that can be encoded to a bytes object with the default encoding. If a token isn't specified, a UUID will be generated. """ if sleep is None: sleep = self.sleep if token is None: token = uuid.uuid1().hex.encode() else: encoder = self.redis.get_encoder() token = encoder.encode(token) if blocking is None: blocking = self.blocking if blocking_timeout is None: blocking_timeout = self.blocking_timeout stop_trying_at = None if blocking_timeout is not None: stop_trying_at = mod_time.monotonic() + blocking_timeout while True: if self.do_acquire(token): self.local.token = token return True if not blocking: return False next_try_at = mod_time.monotonic() + sleep if stop_trying_at is not None and next_try_at > stop_trying_at: return False mod_time.sleep(sleep) def do_acquire(self, token: str) -> bool: if self.timeout: # convert to milliseconds timeout = int(self.timeout * 1000) else: timeout = None if self.redis.set(self.name, token, nx=True, px=timeout): return True return False def locked(self) -> bool: """ Returns True if this key is locked by any process, otherwise False. """ return self.redis.get(self.name) is not None def owned(self) -> bool: """ Returns True if this key is locked by this lock, otherwise False. """ stored_token = self.redis.get(self.name) # need to always compare bytes to bytes # TODO: this can be simplified when the context manager is finished if stored_token and not isinstance(stored_token, bytes): encoder = self.redis.get_encoder() stored_token = encoder.encode(stored_token) return self.local.token is not None and stored_token == self.local.token def release(self) -> None: """ Releases the already acquired lock """ expected_token = self.local.token if expected_token is None: raise LockError( "Cannot release a lock that's not owned or is already unlocked.", lock_name=self.name, ) self.local.token = None self.do_release(expected_token) def do_release(self, expected_token: str) -> None: if not bool( self.lua_release(keys=[self.name], args=[expected_token], client=self.redis) ): raise LockNotOwnedError( "Cannot release a lock that's no longer owned", lock_name=self.name, ) def extend(self, additional_time: Number, replace_ttl: bool = False) -> bool: """ Adds more time to an already acquired lock. ``additional_time`` can be specified as an integer or a float, both representing the number of seconds to add. ``replace_ttl`` if False (the default), add `additional_time` to the lock's existing ttl. If True, replace the lock's ttl with `additional_time`. """ if self.local.token is None: raise LockError("Cannot extend an unlocked lock", lock_name=self.name) if self.timeout is None: raise LockError("Cannot extend a lock with no timeout", lock_name=self.name) return self.do_extend(additional_time, replace_ttl) def do_extend(self, additional_time: Number, replace_ttl: bool) -> bool: additional_time = int(additional_time * 1000) if not bool( self.lua_extend( keys=[self.name], args=[self.local.token, additional_time, "1" if replace_ttl else "0"], client=self.redis, ) ): raise LockNotOwnedError( "Cannot extend a lock that's no longer owned", lock_name=self.name, ) return True def reacquire(self) -> bool: """ Resets a TTL of an already acquired lock back to a timeout value. """ if self.local.token is None: raise LockError("Cannot reacquire an unlocked lock", lock_name=self.name) if self.timeout is None: raise LockError( "Cannot reacquire a lock with no timeout", lock_name=self.name, ) return self.do_reacquire() def do_reacquire(self) -> bool: timeout = int(self.timeout * 1000) if not bool( self.lua_reacquire( keys=[self.name], args=[self.local.token, timeout], client=self.redis ) ): raise LockNotOwnedError( "Cannot reacquire a lock that's no longer owned", lock_name=self.name, ) return True
Lock
python
bottlepy__bottle
test/test_plugins.py
{ "start": 5538, "end": 7526 }
class ____(tools.ServerTestBase): def setUp(self): super(TestPluginAPI, self).setUp() @self.app.route('/', test='plugin.cfg') def test(**args): return ', '.join('%s:%s' % (k,v) for k,v in args.items()) def test_callable(self): def plugin(func): def wrapper(*a, **ka): return func(test='me', *a, **ka) + '; tail' return wrapper self.app.install(plugin) self.assertBody('test:me; tail', '/') def test_apply(self): class Plugin(object): def apply(self, func, route): def wrapper(*a, **ka): return func(test=route.config['test'], *a, **ka) + '; tail' return wrapper def __call__(self, func): raise AssertionError("Plugins must not be called "\ "if they implement 'apply'") self.app.install(Plugin()) self.assertBody('test:plugin.cfg; tail', '/') def test_instance_method_wrapper(self): class Plugin(object): api=2 def apply(self, callback, route): return self.b def b(self): return "Hello" self.app.install(Plugin()) self.assertBody('Hello', '/') def test_setup(self): class Plugin(object): def __call__(self, func): return func def setup(self, app): self.app = app plugin = self.app.install(Plugin()) self.assertEqual(getattr(plugin, 'app', None), self.app) def test_close(self): class Plugin(object): def __call__(self, func): return func def close(self): self.closed = True plugin = self.app.install(Plugin()) plugin2 = self.app.install(Plugin()) self.app.uninstall(plugin) self.assertTrue(getattr(plugin, 'closed', False)) self.app.close() self.assertTrue(getattr(plugin2, 'closed', False))
TestPluginAPI
python
numba__numba
numba/core/datamodel/models.py
{ "start": 34555, "end": 34775 }
class ____(PrimitiveModel): def __init__(self, dmm, fe_type): be_type = ir.IntType(64) super(NPDatetimeModel, self).__init__(dmm, fe_type, be_type) @register_default(types.ArrayIterator)
NPDatetimeModel
python
networkx__networkx
networkx/classes/tests/test_coreviews.py
{ "start": 7833, "end": 9171 }
class ____(TestUnionAdjacency): # nbr->key->data def setup_method(self): dd = {"color": "blue", "weight": 1.2} self.kd = {7: {}, "ekey": {}, 9: {"color": 1}} self.s = {3: self.kd, 0: {7: dd}, 1: {}, 2: {"key": {"color": 1}}} self.p = {3: {}, 0: {3: dd}, 1: {}, 2: {1: {"span": 2}}} self.adjview = nx.classes.coreviews.UnionMultiInner(self.s, self.p) def test_len(self): assert len(self.adjview) == len(self.s.keys() | self.p.keys()) == 4 def test_getitem(self): assert self.adjview[1] is not self.s[1] assert self.adjview[0][7] is self.adjview[0][3] assert self.adjview[2]["key"]["color"] == 1 assert self.adjview[2][1]["span"] == 2 pytest.raises(KeyError, self.adjview.__getitem__, 4) pytest.raises(KeyError, self.adjview[1].__getitem__, "key") def test_copy(self): avcopy = self.adjview.copy() assert avcopy[0] == self.adjview[0] assert avcopy[0] is not self.adjview[0] avcopy[2][1]["width"] = 8 assert avcopy[2] != self.adjview[2] self.adjview[2][1]["width"] = 8 assert avcopy[2] == self.adjview[2] del self.adjview[2][1]["width"] assert not hasattr(self.adjview, "__setitem__") assert hasattr(avcopy, "__setitem__")
TestUnionMultiInner
python
redis__redis-py
tests/test_asyncio/test_command_policies.py
{ "start": 567, "end": 2972 }
class ____: async def test_resolve(self): zcount_policy = CommandPolicies( request_policy=RequestPolicy.DEFAULT_KEYED, response_policy=ResponsePolicy.DEFAULT_KEYED, ) rpoplpush_policy = CommandPolicies( request_policy=RequestPolicy.DEFAULT_KEYED, response_policy=ResponsePolicy.DEFAULT_KEYED, ) dynamic_resolver = AsyncDynamicPolicyResolver( { "core": { "zcount": zcount_policy, "rpoplpush": rpoplpush_policy, } } ) assert await dynamic_resolver.resolve("zcount") == zcount_policy assert await dynamic_resolver.resolve("rpoplpush") == rpoplpush_policy with pytest.raises( ValueError, match="Wrong command or module name: foo.bar.baz" ): await dynamic_resolver.resolve("foo.bar.baz") assert await dynamic_resolver.resolve("foo.bar") is None assert await dynamic_resolver.resolve("core.foo") is None # Test that policy fallback correctly static_resolver = AsyncStaticPolicyResolver() with_fallback_dynamic_resolver = dynamic_resolver.with_fallback(static_resolver) resolved_policies = await with_fallback_dynamic_resolver.resolve("ft.aggregate") assert resolved_policies.request_policy == RequestPolicy.DEFAULT_KEYLESS assert resolved_policies.response_policy == ResponsePolicy.DEFAULT_KEYLESS # Extended chain with one more resolver foo_bar_policy = CommandPolicies( request_policy=RequestPolicy.DEFAULT_KEYLESS, response_policy=ResponsePolicy.DEFAULT_KEYLESS, ) another_dynamic_resolver = AsyncDynamicPolicyResolver( { "foo": { "bar": foo_bar_policy, } } ) with_fallback_static_resolver = static_resolver.with_fallback( another_dynamic_resolver ) with_double_fallback_dynamic_resolver = dynamic_resolver.with_fallback( with_fallback_static_resolver ) assert ( await with_double_fallback_dynamic_resolver.resolve("foo.bar") == foo_bar_policy ) @pytest.mark.onlycluster @pytest.mark.asyncio @skip_if_server_version_lt("8.0.0")
TestBasePolicyResolver
python
astropy__astropy
astropy/coordinates/tests/test_pickle.py
{ "start": 1258, "end": 2247 }
class ____(coord.ICRS): default_representation = coord.PhysicsSphericalRepresentation @pytest.mark.parametrize( "frame", [ coord.SkyOffsetFrame(origin=coord.ICRS(0 * u.deg, 0 * u.deg)), coord.SkyOffsetFrame( 5 * u.deg, 10 * u.deg, origin=coord.Galactic(2 * u.deg, -3 * u.deg) ), coord.SkyOffsetFrame( 5 * u.deg, 10 * u.deg, 10 * u.pc, origin=coord.Galactic(2 * u.deg, -3 * u.deg), representation_type=coord.PhysicsSphericalRepresentation, ), coord.SkyOffsetFrame( 5 * u.deg, 10 * u.deg, 0 * u.pc, origin=_CustomICRS(2 * u.deg, 3 * u.deg, 1 * u.pc), ), ], ) def test_skyoffset_pickle(pickle_protocol, frame): # noqa: F811 """ This is a regression test for issue #9249: https://github.com/astropy/astropy/issues/9249 """ check_pickling_recovery(frame, pickle_protocol)
_CustomICRS
python
simplejson__simplejson
simplejson/tests/test_fail.py
{ "start": 3504, "end": 6454 }
class ____(TestCase): def test_failures(self): for idx, doc in enumerate(JSONDOCS): idx = idx + 1 if idx in SKIPS: json.loads(doc) continue try: json.loads(doc) except json.JSONDecodeError: pass else: self.fail("Expected failure for fail%d.json: %r" % (idx, doc)) def test_array_decoder_issue46(self): # http://code.google.com/p/simplejson/issues/detail?id=46 for doc in [u'[,]', '[,]']: try: json.loads(doc) except json.JSONDecodeError: e = sys.exc_info()[1] self.assertEqual(e.pos, 1) self.assertEqual(e.lineno, 1) self.assertEqual(e.colno, 2) except Exception: e = sys.exc_info()[1] self.fail("Unexpected exception raised %r %s" % (e, e)) else: self.fail("Unexpected success parsing '[,]'") def test_truncated_input(self): test_cases = [ ('', 'Expecting value', 0), ('[', "Expecting value or ']'", 1), ('[42', "Expecting ',' delimiter", 3), ('[42,', 'Expecting value', 4), ('["', 'Unterminated string starting at', 1), ('["spam', 'Unterminated string starting at', 1), ('["spam"', "Expecting ',' delimiter", 7), ('["spam",', 'Expecting value', 8), ('{', "Expecting property name enclosed in double quotes or '}'", 1), ('{"', 'Unterminated string starting at', 1), ('{"spam', 'Unterminated string starting at', 1), ('{"spam"', "Expecting ':' delimiter", 7), ('{"spam":', 'Expecting value', 8), ('{"spam":42', "Expecting ',' delimiter", 10), ('{"spam":42,', 'Expecting property name enclosed in double quotes', 11), ('"', 'Unterminated string starting at', 0), ('"spam', 'Unterminated string starting at', 0), ('[,', "Expecting value", 1), ('--', 'Expecting value', 0), ('"\x18d', "Invalid control character %r", 1), ] for data, msg, idx in test_cases: try: json.loads(data) except json.JSONDecodeError: e = sys.exc_info()[1] self.assertEqual( e.msg[:len(msg)], msg, "%r doesn't start with %r for %r" % (e.msg, msg, data)) self.assertEqual( e.pos, idx, "pos %r != %r for %r" % (e.pos, idx, data)) except Exception: e = sys.exc_info()[1] self.fail("Unexpected exception raised %r %s" % (e, e)) else: self.fail("Unexpected success parsing '%r'" % (data,))
TestFail
python
tensorflow__tensorflow
tensorflow/python/ops/init_ops.py
{ "start": 17402, "end": 20543 }
class ____(Initializer): """Initializer that generates tensors with a normal distribution. Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. @compatibility(TF2) Although it is a legacy `compat.v1` API, this symbol is compatible with eager execution and `tf.function`. To switch to TF2, switch to using either `tf.initializers.RandomNormal` or `tf.keras.initializers.RandomNormal` (neither from `compat.v1`) and pass the dtype when calling the initializer. Keep in mind that the default stddev and the behavior of fixed seeds have changed. #### Structural Mapping to TF2 Before: ```python initializer = tf.compat.v1.random_normal_initializer( mean=mean, stddev=stddev, seed=seed, dtype=dtype) weight_one = tf.Variable(initializer(shape_one)) weight_two = tf.Variable(initializer(shape_two)) ``` After: ```python initializer = tf.initializers.RandomNormal( mean=mean, seed=seed, stddev=stddev) weight_one = tf.Variable(initializer(shape_one, dtype=dtype)) weight_two = tf.Variable(initializer(shape_two, dtype=dtype)) ``` #### How to Map Arguments | TF1 Arg Name | TF2 Arg Name | Note | | :----------------- | :-------------- | :------------------------- | | `mean` | `mean` | No change to defaults | | `stddev` | `stddev` | Default changes from 1.0 to 0.05 | | `seed` | `seed` | | | `dtype` | `dtype` | The TF2 native api only takes it as a | : : : `__call__` arg, not a constructor arg. : | `partition_info` | - | (`__call__` arg in TF1) Not supported. | @end_compatibility """ @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32): self.mean = mean self.stddev = stddev self.seed = seed self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype return random_ops.random_normal( shape, self.mean, self.stddev, dtype, seed=self.seed) def get_config(self): return { "mean": self.mean, "stddev": self.stddev, "seed": self.seed, "dtype": self.dtype.name } @tf_export(v1=["initializers.truncated_normal", "truncated_normal_initializer"]) @deprecation.deprecated_endpoints("initializers.truncated_normal", "truncated_normal_initializer")
RandomNormal
python
PrefectHQ__prefect
src/prefect/server/schemas/actions.py
{ "start": 29210, "end": 29581 }
class ____(ActionBaseModel): """Data used by the Prefect REST API to update a block document.""" block_schema_id: Optional[UUID] = Field( default=None, description="A block schema ID" ) data: Dict[str, Any] = Field( default_factory=dict, description="The block document's data" ) merge_existing_data: bool = True
BlockDocumentUpdate
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/ext/instrumentation.py
{ "start": 8073, "end": 10431 }
class ____: """User-defined class instrumentation extension. :class:`.InstrumentationManager` can be subclassed in order to change how class instrumentation proceeds. This class exists for the purposes of integration with other object management frameworks which would like to entirely modify the instrumentation methodology of the ORM, and is not intended for regular usage. For interception of class instrumentation events, see :class:`.InstrumentationEvents`. The API for this class should be considered as semi-stable, and may change slightly with new releases. """ # r4361 added a mandatory (cls) constructor to this interface. # given that, perhaps class_ should be dropped from all of these # signatures. def __init__(self, class_): pass def manage(self, class_, manager): setattr(class_, "_default_class_manager", manager) def unregister(self, class_, manager): delattr(class_, "_default_class_manager") def manager_getter(self, class_): def get(cls): return cls._default_class_manager return get def instrument_attribute(self, class_, key, inst): pass def post_configure_attribute(self, class_, key, inst): pass def install_descriptor(self, class_, key, inst): setattr(class_, key, inst) def uninstall_descriptor(self, class_, key): delattr(class_, key) def install_member(self, class_, key, implementation): setattr(class_, key, implementation) def uninstall_member(self, class_, key): delattr(class_, key) def instrument_collection_class(self, class_, key, collection_class): return collections._prepare_instrumentation(collection_class) def get_instance_dict(self, class_, instance): return instance.__dict__ def initialize_instance_dict(self, class_, instance): pass def install_state(self, class_, instance, state): setattr(instance, "_default_state", state) def remove_state(self, class_, instance): delattr(instance, "_default_state") def state_getter(self, class_): return lambda instance: getattr(instance, "_default_state") def dict_getter(self, class_): return lambda inst: self.get_instance_dict(class_, inst)
InstrumentationManager
python
django__django
tests/absolute_url_overrides/tests.py
{ "start": 153, "end": 2139 }
class ____(SimpleTestCase): def test_get_absolute_url(self): """ get_absolute_url() functions as a normal method. """ def get_absolute_url(o): return "/test-a/%s/" % o.pk TestA = self._create_model_class("TestA", get_absolute_url) self.assertTrue(hasattr(TestA, "get_absolute_url")) obj = TestA(pk=1, name="Foo") self.assertEqual("/test-a/%s/" % obj.pk, obj.get_absolute_url()) def test_override_get_absolute_url(self): """ ABSOLUTE_URL_OVERRIDES should override get_absolute_url(). """ def get_absolute_url(o): return "/test-b/%s/" % o.pk with self.settings( ABSOLUTE_URL_OVERRIDES={ "absolute_url_overrides.testb": lambda o: "/overridden-test-b/%s/" % o.pk, }, ): TestB = self._create_model_class("TestB", get_absolute_url) obj = TestB(pk=1, name="Foo") self.assertEqual("/overridden-test-b/%s/" % obj.pk, obj.get_absolute_url()) def test_insert_get_absolute_url(self): """ ABSOLUTE_URL_OVERRIDES should work even if the model doesn't have a get_absolute_url() method. """ with self.settings( ABSOLUTE_URL_OVERRIDES={ "absolute_url_overrides.testc": lambda o: "/test-c/%s/" % o.pk, }, ): TestC = self._create_model_class("TestC") obj = TestC(pk=1, name="Foo") self.assertEqual("/test-c/%s/" % obj.pk, obj.get_absolute_url()) def _create_model_class(self, class_name, get_absolute_url_method=None): attrs = { "name": models.CharField(max_length=50), "__module__": "absolute_url_overrides", } if get_absolute_url_method: attrs["get_absolute_url"] = get_absolute_url_method return type(class_name, (models.Model,), attrs)
AbsoluteUrlOverrideTests
python
joke2k__faker
faker/providers/automotive/ja_JP/__init__.py
{ "start": 70, "end": 2707 }
class ____(AutomotiveProvider): """Implement automotive provider for ``ja_JP`` locale. Sources (retrieved on 2025-09-15): - https://ja.wikipedia.org/wiki/%E6%97%A5%E6%9C%AC%E3%81%AE%E3%83%8A%E3%83%B3%E3%83%90%E3%83%BC%E3%83%97%E3%83%AC%E3%83%BC%E3%83%88%E4%B8%80%E8%A6%A7 - http://nplate.cloudfree.jp/misc/m50_bango.html """ license_plate_area_names = ( "品川", "足立", "練馬", "横浜", "川崎", "名古屋", "大阪", "神戸", "福岡", "札幌", "尾張小牧", "伊勢志摩", ) classification_numbers = ( "###", "##", ) license_plate_kana = ( "あ", "い", "う", "え", "か", "き", "く", "け", "こ", "さ", "す", "せ", "そ", "た", "ち", "つ", "て", "と", "な", "に", "ぬ", "ね", "の", "は", "ひ", "ふ", "ほ", "ま", "み", "む", "め", "も", "や", "ゆ", "よ", "ら", "り", "る", "れ", "ろ", "わ", "を", ) serial_number_formats = ("#", "##", "###", "####") MIDDLE_DOT = "・" DELIMITER = "-" license_plate_formats = ("{{area_name}} {{classification_number}} {{kana}} {{serial_number}}",) def license_plate(self) -> str: """Generate a Japanese license plate.""" pattern = self.random_element(self.license_plate_formats) return self.generator.parse(pattern) def area_name(self) -> str: return self.random_element(self.license_plate_area_names) def classification_number(self) -> str: return self.numerify(self.random_element(self.classification_numbers)) def kana(self) -> str: return self.random_element(self.license_plate_kana) def serial_number(self) -> str: """ Generate the vehicle’s serial number (the last four digits on a Japanese license plate). - For 4 digits: insert a hyphen between the second and third digits (e.g., 12-34). - For 1 to 3 digits: pad the left side with middle dots (・) so the total width is four characters (e.g., ・123, ・・12, ・・・1). Do not use a hyphen in these cases. """ raw_digits = self.numerify(self.random_element(self.serial_number_formats)) n = len(raw_digits) if n == 4: v = f"{raw_digits[:2]}{self.DELIMITER}{raw_digits[2:]}" return v else: return raw_digits.rjust(4, self.MIDDLE_DOT)
Provider
python
pandas-dev__pandas
pandas/tests/series/indexing/test_setitem.py
{ "start": 23468, "end": 31404 }
class ____: """ Check each of several methods that _should_ be equivalent to `obj[key] = val` We assume that - obj.index is the default Index(range(len(obj))) - the setitem does not expand the obj """ @pytest.fixture def is_inplace(self, obj, expected): """ Whether we expect the setting to be in-place or not. """ return expected.dtype == obj.dtype def check_indexer(self, obj, key, expected, val, indexer, is_inplace): orig = obj obj = obj.copy() arr = obj._values indexer(obj)[key] = val tm.assert_series_equal(obj, expected) self._check_inplace(is_inplace, orig, arr, obj) def _check_inplace(self, is_inplace, orig, arr, obj): if is_inplace is None: # We are not (yet) checking whether setting is inplace or not pass elif is_inplace: if arr.dtype.kind in ["m", "M"]: # We may not have the same DTA/TDA, but will have the same # underlying data assert arr._ndarray is obj._values._ndarray else: assert obj._values is arr else: # otherwise original array should be unchanged tm.assert_equal(arr, orig._values) def test_int_key(self, obj, key, expected, raises, val, indexer_sli, is_inplace): if not isinstance(key, int): pytest.skip("Not relevant for int key") if raises: ctx = pytest.raises(TypeError, match="Invalid value") else: ctx = contextlib.nullcontext() with ctx: self.check_indexer(obj, key, expected, val, indexer_sli, is_inplace) if indexer_sli is tm.loc: with ctx: self.check_indexer(obj, key, expected, val, tm.at, is_inplace) elif indexer_sli is tm.iloc: with ctx: self.check_indexer(obj, key, expected, val, tm.iat, is_inplace) rng = range(key, key + 1) with ctx: self.check_indexer(obj, rng, expected, val, indexer_sli, is_inplace) if indexer_sli is not tm.loc: # Note: no .loc because that handles slice edges differently slc = slice(key, key + 1) with ctx: self.check_indexer(obj, slc, expected, val, indexer_sli, is_inplace) ilkey = [key] with ctx: self.check_indexer(obj, ilkey, expected, val, indexer_sli, is_inplace) indkey = np.array(ilkey) with ctx: self.check_indexer(obj, indkey, expected, val, indexer_sli, is_inplace) genkey = (x for x in [key]) with ctx: self.check_indexer(obj, genkey, expected, val, indexer_sli, is_inplace) def test_slice_key(self, obj, key, expected, raises, val, indexer_sli, is_inplace): if not isinstance(key, slice): pytest.skip("Not relevant for slice key") if raises: ctx = pytest.raises(TypeError, match="Invalid value") else: ctx = contextlib.nullcontext() if indexer_sli is not tm.loc: # Note: no .loc because that handles slice edges differently with ctx: self.check_indexer(obj, key, expected, val, indexer_sli, is_inplace) ilkey = list(range(len(obj)))[key] with ctx: self.check_indexer(obj, ilkey, expected, val, indexer_sli, is_inplace) indkey = np.array(ilkey) with ctx: self.check_indexer(obj, indkey, expected, val, indexer_sli, is_inplace) genkey = (x for x in indkey) with ctx: self.check_indexer(obj, genkey, expected, val, indexer_sli, is_inplace) def test_mask_key(self, obj, key, expected, raises, val, indexer_sli): # setitem with boolean mask mask = np.zeros(obj.shape, dtype=bool) mask[key] = True obj = obj.copy() if is_list_like(val) and len(val) < mask.sum(): msg = "boolean index did not match indexed array along dimension" with pytest.raises(IndexError, match=msg): indexer_sli(obj)[mask] = val return if raises: with pytest.raises(TypeError, match="Invalid value"): indexer_sli(obj)[mask] = val else: indexer_sli(obj)[mask] = val def test_series_where(self, obj, key, expected, raises, val, is_inplace): mask = np.zeros(obj.shape, dtype=bool) mask[key] = True if is_list_like(val) and len(val) < len(obj): # Series.where is not valid here msg = "operands could not be broadcast together with shapes" with pytest.raises(ValueError, match=msg): obj.where(~mask, val) return orig = obj obj = obj.copy() arr = obj._values res = obj.where(~mask, val) if val is NA and res.dtype == object: expected = expected.fillna(NA) elif val is None and res.dtype == object: assert expected.dtype == object expected = expected.copy() expected[expected.isna()] = None tm.assert_series_equal(res, expected) self._check_inplace(is_inplace, orig, arr, obj) def test_index_where(self, obj, key, expected, raises, val): mask = np.zeros(obj.shape, dtype=bool) mask[key] = True res = Index(obj).where(~mask, val) expected_idx = Index(expected, dtype=expected.dtype) tm.assert_index_equal(res, expected_idx) def test_index_putmask(self, obj, key, expected, raises, val): mask = np.zeros(obj.shape, dtype=bool) mask[key] = True res = Index(obj).putmask(mask, val) tm.assert_index_equal(res, Index(expected, dtype=expected.dtype)) @pytest.mark.parametrize( "obj,expected,key,raises", [ pytest.param( # GH#45568 setting a valid NA value into IntervalDtype[int] should # cast to IntervalDtype[float] Series(interval_range(1, 5)), Series( [Interval(1, 2), np.nan, Interval(3, 4), Interval(4, 5)], dtype="interval[float64]", ), 1, True, id="interval_int_na_value", ), pytest.param( # these induce dtype changes Series([2, 3, 4, 5, 6, 7, 8, 9, 10]), Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan]), slice(None, None, 2), False, id="int_series_slice_key_step", ), pytest.param( Series([True, True, False, False]), Series([np.nan, True, np.nan, False], dtype=object), slice(None, None, 2), True, id="bool_series_slice_key_step", ), pytest.param( # these induce dtype changes Series(np.arange(10)), Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9]), slice(None, 5), False, id="int_series_slice_key", ), pytest.param( # changes dtype GH#4463 Series([1, 2, 3]), Series([np.nan, 2, 3]), 0, False, id="int_series_int_key", ), pytest.param( # changes dtype GH#4463 Series([False]), Series([np.nan], dtype=object), # TODO: maybe go to float64 since we are changing the _whole_ Series? 0, True, id="bool_series_int_key_change_all", ), pytest.param( # changes dtype GH#4463 Series([False, True]), Series([np.nan, True], dtype=object), 0, True, id="bool_series_int_key", ), ], )
SetitemCastingEquivalents
python
huggingface__transformers
src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py
{ "start": 12991, "end": 14057 }
class ____(Dinov2PreTrainedModel): @torch.no_grad() def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): init.trunc_normal_(module.weight, mean=0.0, std=self.config.initializer_range) if module.bias is not None: init.zeros_(module.bias) elif isinstance(module, nn.LayerNorm): init.zeros_(module.bias) init.ones_(module.weight) elif isinstance(module, Dinov2WithRegistersEmbeddings): init.trunc_normal_(module.position_embeddings, mean=0.0, std=self.config.initializer_range) init.trunc_normal_(module.cls_token, mean=0.0, std=self.config.initializer_range) init.zeros_(module.mask_token) init.zeros_(module.register_tokens) elif isinstance(module, Dinov2WithRegistersLayerScale): # noqa: F821 init.constant_(module.lambda1, self.config.layerscale_value)
Dinov2WithRegistersPreTrainedModel
python
openai__openai-python
src/openai/lib/streaming/responses/_responses.py
{ "start": 1020, "end": 3166 }
class ____(Generic[TextFormatT]): def __init__( self, *, raw_stream: Stream[RawResponseStreamEvent], text_format: type[TextFormatT] | Omit, input_tools: Iterable[ToolParam] | Omit, starting_after: int | None, ) -> None: self._raw_stream = raw_stream self._response = raw_stream.response self._iterator = self.__stream__() self._state = ResponseStreamState(text_format=text_format, input_tools=input_tools) self._starting_after = starting_after def __next__(self) -> ResponseStreamEvent[TextFormatT]: return self._iterator.__next__() def __iter__(self) -> Iterator[ResponseStreamEvent[TextFormatT]]: for item in self._iterator: yield item def __enter__(self) -> Self: return self def __stream__(self) -> Iterator[ResponseStreamEvent[TextFormatT]]: for sse_event in self._raw_stream: events_to_fire = self._state.handle_event(sse_event) for event in events_to_fire: if self._starting_after is None or event.sequence_number > self._starting_after: yield event def __exit__( self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None, ) -> None: self.close() def close(self) -> None: """ Close the response and release the connection. Automatically called if the response body is read to completion. """ self._response.close() def get_final_response(self) -> ParsedResponse[TextFormatT]: """Waits until the stream has been read to completion and returns the accumulated `ParsedResponse` object. """ self.until_done() response = self._state._completed_response if not response: raise RuntimeError("Didn't receive a `response.completed` event.") return response def until_done(self) -> Self: """Blocks until the stream has been consumed.""" consume_sync_iterator(self) return self
ResponseStream
python
cython__cython
Cython/Compiler/CmdLine.py
{ "start": 1616, "end": 1819 }
class ____(Action): def __call__(self, parser, namespace, values, option_string=None): namespace.error_on_unknown_names = False namespace.error_on_uninitialized = False
SetLenientAction
python
ansible__ansible
lib/ansible/modules/service_facts.py
{ "start": 3766, "end": 3857 }
class ____(object): def __init__(self, module): self.module = module
BaseService
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_object_position15.py
{ "start": 315, "end": 915 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("object_position15.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.set_column(1, 1, 5, None) worksheet.insert_image("A9", self.image_dir + "red.png", {"x_offset": 232}) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
pypa__warehouse
tests/unit/accounts/test_services.py
{ "start": 1422, "end": 53593 }
class ____: def test_verify_service(self): assert verifyClass(IUserService, services.DatabaseUserService) def test_service_creation(self, monkeypatch): crypt_context_obj = pretend.stub() crypt_context_cls = pretend.call_recorder(lambda **kwargs: crypt_context_obj) monkeypatch.setattr(services, "CryptContext", crypt_context_cls) session = pretend.stub() service = services.DatabaseUserService( session, metrics=NullMetrics(), remote_addr=REMOTE_ADDR ) assert service.db is session assert service.hasher is crypt_context_obj assert crypt_context_cls.calls == [ pretend.call( schemes=[ "argon2", "bcrypt_sha256", "bcrypt", "django_bcrypt", "unix_disabled", ], deprecated=["auto"], truncate_error=True, argon2__memory_cost=1024, argon2__parallelism=6, argon2__time_cost=6, ) ] def test_service_creation_ratelimiters(self, monkeypatch): crypt_context_obj = pretend.stub() crypt_context_cls = pretend.call_recorder(lambda **kwargs: crypt_context_obj) monkeypatch.setattr(services, "CryptContext", crypt_context_cls) ratelimiters = {"user.login": pretend.stub(), "global.login": pretend.stub()} session = pretend.stub() service = services.DatabaseUserService( session, metrics=NullMetrics(), remote_addr=REMOTE_ADDR, ratelimiters=ratelimiters, ) assert service.db is session assert service.ratelimiters == ratelimiters assert service.hasher is crypt_context_obj assert crypt_context_cls.calls == [ pretend.call( schemes=[ "argon2", "bcrypt_sha256", "bcrypt", "django_bcrypt", "unix_disabled", ], deprecated=["auto"], truncate_error=True, argon2__memory_cost=1024, argon2__parallelism=6, argon2__time_cost=6, ) ] def test_skips_ip_rate_limiter(self, user_service, metrics): user = UserFactory.create() resets = pretend.stub() limiter = pretend.stub( test=pretend.call_recorder(lambda uid: False), resets_in=pretend.call_recorder(lambda ipaddr: resets), hit=pretend.call_recorder(lambda uid: None), ) user_service.ratelimiters["ip.login"] = limiter user_service.remote_addr = None user_service.check_password(user.id, "password") assert limiter.test.calls == [] assert limiter.resets_in.calls == [] def test_username_is_not_prohibited(self, user_service): assert user_service.username_is_prohibited("my_username") is False def test_username_is_prohibited(self, user_service): user = UserFactory.create() user_service.db.add( ProhibitedUserName( name="my_username", comment="blah", prohibited_by=user, ) ) assert user_service.username_is_prohibited("my_username") is True def test_find_userid_nonexistent_user(self, user_service): assert user_service.find_userid("my_username") is None def test_find_userid_existing_user(self, user_service): user = UserFactory.create() assert user_service.find_userid(user.username) == user.id def test_check_password_global_rate_limited(self, user_service, metrics): resets = pretend.stub() limiter = pretend.stub(test=lambda: False, resets_in=lambda: resets) user_service.ratelimiters["global.login"] = limiter with pytest.raises(TooManyFailedLogins) as excinfo: user_service.check_password(uuid.uuid4(), None, tags=["foo"]) assert excinfo.value.resets_in is resets assert metrics.increment.calls == [ pretend.call( "warehouse.authentication.start", tags=["foo", "mechanism:check_password"], ), pretend.call( "warehouse.authentication.ratelimited", tags=["foo", "mechanism:check_password", "ratelimiter:global"], ), ] def test_check_password_nonexistent_user(self, user_service, metrics): assert not user_service.check_password(uuid.uuid4(), None, tags=["foo"]) assert metrics.increment.calls == [ pretend.call( "warehouse.authentication.start", tags=["foo", "mechanism:check_password"], ), pretend.call( "warehouse.authentication.failure", tags=["foo", "mechanism:check_password", "failure_reason:user"], ), ] def test_check_password_user_rate_limited(self, user_service, metrics): user = UserFactory.create() resets = pretend.stub() limiter = pretend.stub( test=pretend.call_recorder(lambda uid: False), resets_in=pretend.call_recorder(lambda uid: resets), ) user_service.ratelimiters["user.login"] = limiter with pytest.raises(TooManyFailedLogins) as excinfo: user_service.check_password(user.id, None) assert excinfo.value.resets_in is resets assert limiter.test.calls == [pretend.call(user.id)] assert limiter.resets_in.calls == [pretend.call(user.id)] assert metrics.increment.calls == [ pretend.call( "warehouse.authentication.start", tags=["mechanism:check_password"] ), pretend.call( "warehouse.authentication.ratelimited", tags=["mechanism:check_password", "ratelimiter:user"], ), ] def test_check_password_ip_rate_limited(self, user_service, metrics): user = UserFactory.create() resets = pretend.stub() limiter = pretend.stub( test=pretend.call_recorder(lambda uid: False), resets_in=pretend.call_recorder(lambda ipaddr: resets), ) user_service.ratelimiters["ip.login"] = limiter with pytest.raises(TooManyFailedLogins) as excinfo: user_service.check_password(user.id, None) assert excinfo.value.resets_in is resets assert limiter.test.calls == [pretend.call(REMOTE_ADDR)] assert limiter.resets_in.calls == [pretend.call(REMOTE_ADDR)] assert metrics.increment.calls == [ pretend.call( "warehouse.authentication.start", tags=["mechanism:check_password"] ), pretend.call( "warehouse.authentication.ratelimited", tags=["mechanism:check_password", "ratelimiter:ip"], ), ] def test_check_password_invalid(self, user_service, metrics): user = UserFactory.create() user_service.hasher = pretend.stub( verify_and_update=pretend.call_recorder(lambda L, r: (False, None)) ) assert not user_service.check_password(user.id, "user password") assert user_service.hasher.verify_and_update.calls == [ pretend.call("user password", user.password) ] assert metrics.increment.calls == [ pretend.call( "warehouse.authentication.start", tags=["mechanism:check_password"] ), pretend.call( "warehouse.authentication.failure", tags=["mechanism:check_password", "failure_reason:password"], ), ] def test_check_password_catches_bcrypt_exception(self, user_service, metrics): user = UserFactory.create() @pretend.call_recorder def raiser(*a, **kw): raise passlib.exc.PasswordValueError user_service.hasher = pretend.stub(verify_and_update=raiser) assert not user_service.check_password(user.id, "user password") assert user_service.hasher.verify_and_update.calls == [ pretend.call("user password", user.password) ] assert metrics.increment.calls == [ pretend.call( "warehouse.authentication.start", tags=["mechanism:check_password"] ), pretend.call( "warehouse.authentication.failure", tags=["mechanism:check_password", "failure_reason:password"], ), ] assert raiser.calls == [pretend.call("user password", "!")] def test_check_password_valid(self, user_service, metrics): user = UserFactory.create() user_service.hasher = pretend.stub( verify_and_update=pretend.call_recorder(lambda L, r: (True, None)) ) assert user_service.check_password(user.id, "user password", tags=["bar"]) assert user_service.hasher.verify_and_update.calls == [ pretend.call("user password", user.password) ] assert metrics.increment.calls == [ pretend.call( "warehouse.authentication.start", tags=["bar", "mechanism:check_password"], ), pretend.call( "warehouse.authentication.ok", tags=["bar", "mechanism:check_password"] ), ] @pytest.mark.parametrize( "password", [ ( "$argon2id$v=19$m=8,t=1,p=1$" "w/gfo5QSQihFyHlvDcE4pw$Hd4KENg+xDlq2bfeGUEYSieIXXL/c1NfTr0ZkYueO2Y" ), ( "$bcrypt-sha256$v=2,t=2b,r=12$" "DqC0lms6x9Dh6XesvIJvVe$hBbYe9JfdjyorOFcS3rv5BhmuSIyXD6" ), "$2b$12$2t/EVU3H9b3c5iR6GdELZOwCoyrT518DgCpNxHbX.S1IxV6eEEDhC", "bcrypt$$2b$12$EhhZDxGr/7HIKYRGMngC.O4sQx68vkaISSnSGZ6s8iOfaGy6l9cma", ], ) def test_check_password_updates(self, user_service, password): """ This test confirms passlib is actually working, see https://github.com/pypi/warehouse/issues/15454 """ user = UserFactory.create(password=password) assert user_service.check_password(user.id, "password") assert user.password.startswith("$argon2id$v=19$m=1024,t=6,p=6$") assert user_service.check_password(user.id, "password") def test_hash_is_upgraded(self, user_service): user = UserFactory.create() password = user.password user_service.hasher = pretend.stub( verify_and_update=pretend.call_recorder(lambda L, r: (True, "new password")) ) assert user_service.check_password(user.id, "user password") assert user_service.hasher.verify_and_update.calls == [ pretend.call("user password", password) ] assert user.password == "new password" def test_create_user(self, user_service): user = UserFactory.build() new_user = user_service.create_user( username=user.username, name=user.name, password=user.password ) user_service.db.flush() user_from_db = user_service.get_user(new_user.id) assert user_from_db.username == user.username assert user_from_db.name == user.name assert not user_from_db.is_active assert not user_from_db.is_superuser def test_add_email_not_primary(self, user_service): user = UserFactory.create() email = "foo@example.com" new_email = user_service.add_email(user.id, email, primary=False) assert new_email.email == email assert new_email.user == user assert not new_email.primary assert not new_email.verified def test_add_email_defaults_to_primary(self, user_service): user = UserFactory.create() email1 = "foo@example.com" email2 = "bar@example.com" new_email1 = user_service.add_email(user.id, email1) new_email2 = user_service.add_email(user.id, email2) assert new_email1.email == email1 assert new_email1.user == user assert new_email1.primary assert not new_email1.verified assert new_email2.email == email2 assert new_email2.user == user assert not new_email2.primary assert not new_email2.verified def test_add_email_rate_limited(self, user_service, metrics): resets = pretend.stub() limiter = pretend.stub( hit=pretend.call_recorder(lambda ip: None), test=pretend.call_recorder(lambda ip: False), resets_in=pretend.call_recorder(lambda ip: resets), ) user_service.ratelimiters["email.add"] = limiter user = UserFactory.build() with pytest.raises(TooManyEmailsAdded) as excinfo: user_service.add_email(user.id, user.email) assert excinfo.value.resets_in is resets assert limiter.test.calls == [pretend.call(REMOTE_ADDR)] assert limiter.resets_in.calls == [pretend.call(REMOTE_ADDR)] assert metrics.increment.calls == [ pretend.call( "warehouse.email.add.ratelimited", tags=["ratelimiter:email.add"] ) ] def test_add_email_bypass_ratelimit(self, user_service, metrics): resets = pretend.stub() limiter = pretend.stub( hit=pretend.call_recorder(lambda ip: None), test=pretend.call_recorder(lambda ip: False), resets_in=pretend.call_recorder(lambda ip: resets), ) user_service.ratelimiters["email.add"] = limiter user = UserFactory.create() new_email = user_service.add_email(user.id, "foo@example.com", ratelimit=False) assert new_email.email == "foo@example.com" assert not new_email.verified assert limiter.test.calls == [] assert limiter.resets_in.calls == [] assert metrics.increment.calls == [] def test_update_user(self, user_service): user = UserFactory.create() new_name, password = "new username", "TestPa@@w0rd" user_service.update_user(user.id, username=new_name, password=password) user_from_db = user_service.get_user(user.id) assert user_from_db.username == user.username assert password != user_from_db.password assert user_service.hasher.verify(password, user_from_db.password) def test_update_user_without_pw(self, user_service): user = UserFactory.create() new_name = "new username" user_service.update_user(user.id, username=new_name) user_from_db = user_service.get_user(user.id) assert user_from_db.username == user.username def test_find_by_email(self, user_service): user = UserFactory.create() EmailFactory.create(user=user, primary=True, verified=False) found_userid = user_service.find_userid_by_email(user.emails[0].email) user_service.db.flush() assert user.id == found_userid def test_find_by_email_not_found(self, user_service): assert user_service.find_userid_by_email("something") is None def test_create_login_success(self, user_service): user = user_service.create_user("test_user", "test_name", "test_password") assert user.id is not None # now make sure that we can log in as that user assert user_service.check_password(user.id, "test_password") def test_create_login_error(self, user_service): user = user_service.create_user("test_user", "test_name", "test_password") assert user.id is not None assert not user_service.check_password(user.id, "bad_password") def test_get_user_by_username(self, user_service): user = UserFactory.create() found_user = user_service.get_user_by_username(user.username) user_service.db.flush() assert user.username == found_user.username def test_get_user_by_username_failure(self, user_service): UserFactory.create() found_user = user_service.get_user_by_username("UNKNOWNTOTHEWORLD") user_service.db.flush() assert found_user is None def test_get_user_by_email(self, user_service): user = UserFactory.create() EmailFactory.create(user=user, primary=True, verified=False) found_user = user_service.get_user_by_email(user.emails[0].email) user_service.db.flush() assert user.id == found_user.id def test_get_users_by_prefix(self, user_service): user = UserFactory.create() found_users = user_service.get_users_by_prefix(user.username[:3]) assert len(found_users) == 1 assert user.id == found_users[0].id def test_get_user_by_email_failure(self, user_service): found_user = user_service.get_user_by_email("example@email.com") user_service.db.flush() assert found_user is None def test_get_admin_user(self, user_service): admin = UserFactory.create(is_superuser=True, username="admin") assert user_service.get_admin_user() == admin @pytest.mark.parametrize( ("reason", "expected"), [ (None, None), ( DisableReason.CompromisedPassword, DisableReason.CompromisedPassword.value, ), ], ) def test_disable_password(self, user_service, reason, expected): request = pretend.stub( remote_addr="127.0.0.1", ip_address=IpAddressFactory.create(), ) user = UserFactory.create() user.record_event = pretend.call_recorder(lambda *a, **kw: None) # Need to give the user a good password first. user_service.update_user(user.id, password="foo") assert user.password != "!" # Now we'll actually test our disable function. user_service.disable_password(user.id, reason=reason, request=request) assert user.password == "!" assert user.record_event.calls == [ pretend.call( tag=EventTag.Account.PasswordDisabled, request=request, additional={"reason": expected}, ) ] @pytest.mark.parametrize( ("disabled", "reason"), [(True, None), (True, DisableReason.CompromisedPassword), (False, None)], ) def test_is_disabled(self, user_service, disabled, reason): request = pretend.stub( remote_addr="127.0.0.1", ip_address=IpAddressFactory.create(), headers=dict(), db=pretend.stub(add=lambda *a: None), ) user = UserFactory.create() user_service.update_user(user.id, password="foo") if disabled: user_service.disable_password(user.id, reason=reason, request=request) assert user_service.is_disabled(user.id) == (disabled, reason) def test_is_disabled_user_frozen(self, user_service): user = UserFactory.create(is_frozen=True) assert user_service.is_disabled(user.id) == (True, DisableReason.AccountFrozen) def test_updating_password_undisables(self, user_service): request = pretend.stub( remote_addr="127.0.0.1", ip_address=IpAddressFactory.create(), headers=dict(), db=pretend.stub(add=lambda *a: None), ) user = UserFactory.create() user_service.disable_password( user.id, reason=DisableReason.CompromisedPassword, request=request ) assert user_service.is_disabled(user.id) == ( True, DisableReason.CompromisedPassword, ) user_service.update_user(user.id, password="foo") assert user_service.is_disabled(user.id) == (False, None) def test_has_two_factor(self, user_service): user = UserFactory.create(totp_secret=None) assert not user_service.has_two_factor(user.id) user_service.update_user(user.id, totp_secret=b"foobar") assert user_service.has_two_factor(user.id) def test_has_totp(self, user_service): user = UserFactory.create(totp_secret=None) assert not user_service.has_totp(user.id) user_service.update_user(user.id, totp_secret=b"foobar") assert user_service.has_totp(user.id) def test_has_webauthn(self, user_service): user = UserFactory.create() assert not user_service.has_webauthn(user.id) user_service.add_webauthn( user.id, label="test_label", credential_id="foo", public_key="bar", sign_count=1, ) assert user_service.has_webauthn(user.id) def test_get_last_totp_value(self, user_service): user = UserFactory.create() assert user_service.get_last_totp_value(user.id) is None user_service.update_user(user.id, last_totp_value="123456") assert user_service.get_last_totp_value(user.id) == "123456" @pytest.mark.parametrize( ("last_totp_value", "valid"), [(None, True), ("000000", True), ("000000", False)], ) def test_check_totp_value(self, user_service, monkeypatch, last_totp_value, valid): verify_totp = pretend.call_recorder(lambda *a: valid) monkeypatch.setattr(otp, "verify_totp", verify_totp) user = UserFactory.create() user_service.update_user( user.id, last_totp_value=last_totp_value, totp_secret=b"foobar" ) user_service.add_email(user.id, "foo@bar.com", primary=True, verified=True) assert user_service.check_totp_value(user.id, b"123456") == valid def test_check_totp_value_reused(self, user_service): user = UserFactory.create() user_service.update_user( user.id, last_totp_value="123456", totp_secret=b"foobar" ) assert not user_service.check_totp_value(user.id, b"123456") def test_check_totp_out_of_sync(self, mocker, metrics, user_service): user = UserFactory.create() mocker.patch.object(otp, "verify_totp", side_effect=otp.OutOfSyncTOTPError) with pytest.raises(otp.OutOfSyncTOTPError): user_service.check_totp_value(user.id, b"123456") assert metrics.increment.calls == [ pretend.call( "warehouse.authentication.two_factor.start", tags=["mechanism:check_totp_value"], ), pretend.call( "warehouse.authentication.two_factor.failure", tags=["mechanism:check_totp_value", "failure_reason:out_of_sync"], ), ] def test_check_totp_value_no_secret(self, user_service): user = UserFactory.create() with pytest.raises(otp.InvalidTOTPError): user_service.check_totp_value(user.id, b"123456") def test_check_totp_ip_rate_limited(self, user_service, metrics): resets = pretend.stub() limiter = pretend.stub( test=pretend.call_recorder(lambda uid: False), resets_in=pretend.call_recorder(lambda uid: resets), ) user_service.ratelimiters["2fa.ip"] = limiter with pytest.raises(TooManyFailedLogins) as excinfo: user_service.check_totp_value(uuid.uuid4(), b"123456", tags=["foo"]) assert excinfo.value.resets_in is resets assert metrics.increment.calls == [ pretend.call( "warehouse.authentication.two_factor.start", tags=["foo", "mechanism:check_totp_value"], ), pretend.call( "warehouse.authentication.ratelimited", tags=["foo", "mechanism:check_totp_value", "ratelimiter:ip"], ), ] def test_check_totp_value_user_rate_limited(self, user_service, metrics): user = UserFactory.create() resets = pretend.stub() limiter = pretend.stub( test=pretend.call_recorder(lambda uid: False), resets_in=pretend.call_recorder(lambda uid: resets), ) user_service.ratelimiters["2fa.user"] = limiter with pytest.raises(TooManyFailedLogins) as excinfo: user_service.check_totp_value(user.id, b"123456") assert excinfo.value.resets_in is resets assert limiter.test.calls == [pretend.call(user.id)] assert limiter.resets_in.calls == [pretend.call(user.id)] assert metrics.increment.calls == [ pretend.call( "warehouse.authentication.two_factor.start", tags=["mechanism:check_totp_value"], ), pretend.call( "warehouse.authentication.ratelimited", tags=["mechanism:check_totp_value", "ratelimiter:user"], ), ] def test_check_totp_value_invalid_secret(self, user_service): user = UserFactory.create(totp_secret=None) limiter = pretend.stub( hit=pretend.call_recorder(lambda *a, **kw: None), test=lambda *a, **kw: True ) user_service.ratelimiters["2fa.user"] = limiter user_service.ratelimiters["2fa.ip"] = limiter valid = user_service.check_totp_value(user.id, b"123456") assert not valid assert limiter.hit.calls == [pretend.call(user.id), pretend.call(REMOTE_ADDR)] def test_check_totp_value_invalid_totp(self, user_service, monkeypatch): user = UserFactory.create() limiter = pretend.stub( hit=pretend.call_recorder(lambda ip: None), test=pretend.call_recorder(lambda uid: True), ) user_service.get_totp_secret = lambda uid: "secret" monkeypatch.setattr(otp, "verify_totp", lambda secret, value: False) user_service.ratelimiters["2fa.user"] = limiter user_service.ratelimiters["2fa.ip"] = limiter valid = user_service.check_totp_value(user.id, b"123456") assert not valid assert limiter.test.calls == [pretend.call(REMOTE_ADDR), pretend.call(user.id)] assert limiter.hit.calls == [pretend.call(user.id), pretend.call(REMOTE_ADDR)] def test_check_totp_value_with_2fa_rate_limiters( self, db_session, metrics, monkeypatch ): """Test that check_totp_value uses new 2FA rate limiters when available.""" user = UserFactory.create() # Create mocked rate limiters ratelimiters = { "user.login": pretend.stub(test=lambda *a: True, hit=lambda *a: None), "ip.login": pretend.stub(test=lambda *a: True, hit=lambda *a: None), "global.login": pretend.stub(test=lambda: True, hit=lambda: None), "2fa.user": pretend.stub( test=pretend.call_recorder(lambda uid: True), hit=pretend.call_recorder(lambda uid: None), ), "2fa.ip": pretend.stub( test=pretend.call_recorder(lambda addr: True), hit=pretend.call_recorder(lambda addr: None), ), } user_service = services.DatabaseUserService( db_session, metrics=metrics, remote_addr=REMOTE_ADDR, ratelimiters=ratelimiters, ) # Mock TOTP verification to fail monkeypatch.setattr(otp, "verify_totp", lambda secret, value: False) user_service.update_user(user.id, totp_secret=b"secret") result = user_service.check_totp_value(user.id, b"123456") assert not result # Should use 2FA rate limiters, not login rate limiters assert ratelimiters["2fa.user"].test.calls == [pretend.call(user.id)] assert ratelimiters["2fa.ip"].test.calls == [pretend.call(REMOTE_ADDR)] def test_check_2fa_ratelimits_ip_limited(self, db_session, metrics): """Test IP-based 2FA rate limiting.""" user = UserFactory.create() resets = pretend.stub() ratelimiters = { "2fa.ip": pretend.stub( test=pretend.call_recorder(lambda addr: False), resets_in=pretend.call_recorder(lambda addr: resets), ), "2fa.user": pretend.stub(test=lambda *a: True), } user_service = services.DatabaseUserService( db_session, metrics=metrics, remote_addr=REMOTE_ADDR, ratelimiters=ratelimiters, ) with pytest.raises(TooManyFailedLogins) as excinfo: user_service._check_2fa_ratelimits(userid=user.id, tags=["test_tag"]) assert excinfo.value.resets_in is resets assert ratelimiters["2fa.ip"].test.calls == [pretend.call(REMOTE_ADDR)] assert metrics.increment.calls == [ pretend.call( "warehouse.authentication.ratelimited", tags=["test_tag", "ratelimiter:ip"], ), ] def test_check_2fa_ratelimits_user_limited(self, db_session, metrics): """Test user-based 2FA rate limiting.""" user = UserFactory.create() resets = pretend.stub() ratelimiters = { "2fa.ip": pretend.stub(test=lambda *a: True), "2fa.user": pretend.stub( test=pretend.call_recorder(lambda uid: False), resets_in=pretend.call_recorder(lambda uid: resets), ), } user_service = services.DatabaseUserService( db_session, metrics=metrics, remote_addr=REMOTE_ADDR, ratelimiters=ratelimiters, ) with pytest.raises(TooManyFailedLogins) as excinfo: user_service._check_2fa_ratelimits(userid=user.id, tags=["test_tag"]) assert excinfo.value.resets_in is resets assert ratelimiters["2fa.user"].test.calls == [pretend.call(user.id)] assert metrics.increment.calls == [ pretend.call( "warehouse.authentication.ratelimited", tags=["test_tag", "ratelimiter:user"], ), ] def test_check_2fa_ratelimits_no_remote_addr(self, db_session, metrics): """Test 2FA rate limiting when remote_addr is None.""" user = UserFactory.create() ratelimiters = { "2fa.ip": pretend.stub( test=pretend.call_recorder(lambda addr: True), ), "2fa.user": pretend.stub(test=lambda *a: True), } user_service = services.DatabaseUserService( db_session, metrics=metrics, remote_addr=None, ratelimiters=ratelimiters, ) # Should not raise, IP check should be skipped user_service._check_2fa_ratelimits(userid=user.id) # IP limiter should not be called assert ratelimiters["2fa.ip"].test.calls == [] def test_hit_2fa_ratelimits(self, db_session, metrics): """Test hitting 2FA rate limits records properly.""" user = UserFactory.create() ratelimiters = { "2fa.user": pretend.stub(hit=pretend.call_recorder(lambda uid: None)), "2fa.ip": pretend.stub(hit=pretend.call_recorder(lambda addr: None)), } user_service = services.DatabaseUserService( db_session, metrics=metrics, remote_addr=REMOTE_ADDR, ratelimiters=ratelimiters, ) user_service._hit_2fa_ratelimits(userid=user.id) assert ratelimiters["2fa.user"].hit.calls == [pretend.call(user.id)] assert ratelimiters["2fa.ip"].hit.calls == [pretend.call(REMOTE_ADDR)] def test_hit_2fa_ratelimits_no_remote_addr(self, db_session, metrics): """Test hitting 2FA rate limits when remote_addr is None.""" user = UserFactory.create() ratelimiters = { "2fa.user": pretend.stub(hit=pretend.call_recorder(lambda uid: None)), "2fa.ip": pretend.stub(hit=pretend.call_recorder(lambda addr: None)), } user_service = services.DatabaseUserService( db_session, metrics=metrics, remote_addr=None, ratelimiters=ratelimiters, ) user_service._hit_2fa_ratelimits(userid=user.id) # Only user limiter should be hit assert ratelimiters["2fa.user"].hit.calls == [pretend.call(user.id)] assert ratelimiters["2fa.ip"].hit.calls == [] def test_verify_webauthn_assertion_rate_limited( self, db_session, metrics, monkeypatch ): """Test that verify_webauthn_assertion uses 2FA rate limiters.""" user = UserFactory.create() resets = pretend.stub() ratelimiters = { "2fa.user": pretend.stub( test=pretend.call_recorder(lambda uid: False), resets_in=pretend.call_recorder(lambda uid: resets), ), "2fa.ip": pretend.stub(test=lambda *a: True), } user_service = services.DatabaseUserService( db_session, metrics=metrics, remote_addr=REMOTE_ADDR, ratelimiters=ratelimiters, ) with pytest.raises(TooManyFailedLogins) as excinfo: user_service.verify_webauthn_assertion( user.id, b"assertion", challenge=b"challenge", origin="https://example.com", rp_id="example.com", ) assert excinfo.value.resets_in is resets assert ratelimiters["2fa.user"].test.calls == [pretend.call(user.id)] assert metrics.increment.calls == [ pretend.call( "warehouse.authentication.ratelimited", tags=["mechanism:webauthn", "ratelimiter:user"], ), ] def test_verify_webauthn_assertion_failure_hits_ratelimits( self, db_session, metrics, monkeypatch ): """Test that failed WebAuthn assertions hit 2FA rate limiters.""" user = UserFactory.create() ratelimiters = { "2fa.user": pretend.stub( test=lambda *a: True, hit=pretend.call_recorder(lambda uid: None) ), "2fa.ip": pretend.stub( test=lambda *a: True, hit=pretend.call_recorder(lambda addr: None) ), } user_service = services.DatabaseUserService( db_session, metrics=metrics, remote_addr=REMOTE_ADDR, ratelimiters=ratelimiters, ) # Mock webauthn to raise AuthenticationRejectedError monkeypatch.setattr( webauthn, "verify_assertion_response", pretend.raiser(webauthn.AuthenticationRejectedError("test error")), ) with pytest.raises(webauthn.AuthenticationRejectedError): user_service.verify_webauthn_assertion( user.id, b"assertion", challenge=b"challenge", origin="https://example.com", rp_id="example.com", ) assert ratelimiters["2fa.user"].hit.calls == [pretend.call(user.id)] assert ratelimiters["2fa.ip"].hit.calls == [pretend.call(REMOTE_ADDR)] def test_check_recovery_code_uses_2fa_ratelimits(self, db_session, metrics): """Test that check_recovery_code uses 2FA rate limiters.""" user = UserFactory.create() resets = pretend.stub() ratelimiters = { "2fa.ip": pretend.stub( test=pretend.call_recorder(lambda addr: False), resets_in=pretend.call_recorder(lambda addr: resets), ), "2fa.user": pretend.stub(test=lambda *a: True), } user_service = services.DatabaseUserService( db_session, metrics=metrics, remote_addr=REMOTE_ADDR, ratelimiters=ratelimiters, ) with pytest.raises(TooManyFailedLogins) as excinfo: user_service.check_recovery_code(user.id, "code") assert excinfo.value.resets_in is resets assert ratelimiters["2fa.ip"].test.calls == [pretend.call(REMOTE_ADDR)] assert metrics.increment.calls == [ pretend.call("warehouse.authentication.recovery_code.start"), pretend.call( "warehouse.authentication.ratelimited", tags=["mechanism:check_recovery_code", "ratelimiter:ip"], ), ] def test_get_webauthn_credential_options(self, user_service): user = UserFactory.create() options = user_service.get_webauthn_credential_options( user.id, challenge=b"fake_challenge", rp_name="fake_rp_name", rp_id="fake_rp_id", ) assert options["user"]["id"] == bytes_to_base64url(str(user.id).encode()) assert options["user"]["name"] == user.username assert options["user"]["displayName"] == user.name assert options["challenge"] == bytes_to_base64url(b"fake_challenge") assert options["rp"]["name"] == "fake_rp_name" assert options["rp"]["id"] == "fake_rp_id" assert "icon" not in options["user"] def test_get_webauthn_credential_options_for_blank_name(self, user_service): user = UserFactory.create(name="") options = user_service.get_webauthn_credential_options( user.id, challenge=b"fake_challenge", rp_name="fake_rp_name", rp_id="fake_rp_id", ) assert options["user"]["name"] == user.username assert options["user"]["displayName"] == user.username def test_get_webauthn_assertion_options(self, user_service): user = UserFactory.create() user_service.add_webauthn( user.id, label="test_label", credential_id="foo", public_key="bar", sign_count=1, ) options = user_service.get_webauthn_assertion_options( user.id, challenge=b"fake_challenge", rp_id="fake_rp_id" ) assert options["challenge"] == bytes_to_base64url(b"fake_challenge") assert options["rpId"] == "fake_rp_id" assert options["allowCredentials"][0]["id"] == user.webauthn[0].credential_id def test_verify_webauthn_credential(self, user_service, monkeypatch): user = UserFactory.create() user_service.add_webauthn( user.id, label="test_label", credential_id="foo", public_key="bar", sign_count=1, ) fake_validated_credential = pretend.stub(credential_id=b"bar") verify_registration_response = pretend.call_recorder( lambda *a, **kw: fake_validated_credential ) monkeypatch.setattr( webauthn, "verify_registration_response", verify_registration_response ) validated_credential = user_service.verify_webauthn_credential( pretend.stub(), challenge=pretend.stub(), rp_id=pretend.stub(), origin=pretend.stub(), ) assert validated_credential is fake_validated_credential def test_verify_webauthn_credential_already_in_use(self, user_service, monkeypatch): user = UserFactory.create() user_service.add_webauthn( user.id, label="test_label", credential_id=bytes_to_base64url(b"foo"), public_key=b"bar", sign_count=1, ) fake_validated_credential = VerifiedRegistration( credential_id=b"foo", credential_public_key=b"bar", sign_count=0, aaguid="wutang", fmt=AttestationFormat.NONE, credential_type=PublicKeyCredentialType.PUBLIC_KEY, user_verified=False, attestation_object=b"foobar", credential_device_type="single_device", credential_backed_up=False, ) verify_registration_response = pretend.call_recorder( lambda *a, **kw: fake_validated_credential ) monkeypatch.setattr( webauthn, "verify_registration_response", verify_registration_response ) with pytest.raises(webauthn.RegistrationRejectedError): user_service.verify_webauthn_credential( pretend.stub(), challenge=pretend.stub(), rp_id=pretend.stub(), origin=pretend.stub(), ) def test_verify_webauthn_assertion(self, user_service, monkeypatch): user = UserFactory.create() user_service.add_webauthn( user.id, label="test_label", credential_id="foo", public_key="bar", sign_count=1, ) verify_assertion_response = pretend.call_recorder(lambda *a, **kw: 2) monkeypatch.setattr( webauthn, "verify_assertion_response", verify_assertion_response ) updated_sign_count = user_service.verify_webauthn_assertion( user.id, pretend.stub(), challenge=pretend.stub(), origin=pretend.stub(), rp_id=pretend.stub(), ) assert updated_sign_count == 2 def test_get_webauthn_by_label(self, user_service): user = UserFactory.create() user_service.add_webauthn( user.id, label="test_label", credential_id="foo", public_key="bar", sign_count=1, ) webauthn = user_service.get_webauthn_by_label(user.id, "test_label") assert webauthn is not None assert webauthn.label == "test_label" webauthn = user_service.get_webauthn_by_label(user.id, "not_a_real_label") assert webauthn is None other_user = UserFactory.create() webauthn = user_service.get_webauthn_by_label(other_user.id, "test_label") assert webauthn is None def test_get_webauthn_by_credential_id(self, user_service): user = UserFactory.create() user_service.add_webauthn( user.id, label="foo", credential_id="test_credential_id", public_key="bar", sign_count=1, ) webauthn = user_service.get_webauthn_by_credential_id( user.id, "test_credential_id" ) assert webauthn is not None assert webauthn.credential_id == "test_credential_id" webauthn = user_service.get_webauthn_by_credential_id( user.id, "not_a_real_label" ) assert webauthn is None other_user = UserFactory.create() webauthn = user_service.get_webauthn_by_credential_id( other_user.id, "test_credential_id" ) assert webauthn is None def test_has_recovery_codes(self, user_service): user = UserFactory.create() assert not user_service.has_recovery_codes(user.id) user_service.generate_recovery_codes(user.id) assert user_service.has_recovery_codes(user.id) def test_get_recovery_codes(self, user_service): user = UserFactory.create() with pytest.raises(NoRecoveryCodes): user_service.get_recovery_codes(user.id) user_service.generate_recovery_codes(user.id) assert len(user_service.get_recovery_codes(user.id)) == 8 def test_get_recovery_code(self, user_service): user = UserFactory.create() with pytest.raises(NoRecoveryCodes): user_service.get_recovery_code(user.id, "invalid") codes = user_service.generate_recovery_codes(user.id) with pytest.raises(InvalidRecoveryCode): user_service.get_recovery_code(user.id, "invalid") code = user_service.get_recovery_code(user.id, codes[0]) assert user_service.hasher.verify(codes[0], code.code) def test_generate_recovery_codes(self, user_service): user = UserFactory.create() assert not user_service.has_recovery_codes(user.id) with pytest.raises(NoRecoveryCodes): user_service.get_recovery_codes(user.id) codes = user_service.generate_recovery_codes(user.id) assert len(codes) == 8 assert len(user_service.get_recovery_codes(user.id)) == 8 def test_check_recovery_code(self, user_service, metrics): user = UserFactory.create() with pytest.raises(NoRecoveryCodes): user_service.check_recovery_code(user.id, "no codes yet") codes = user_service.generate_recovery_codes(user.id) assert len(codes) == 8 assert len(user_service.get_recovery_codes(user.id)) == 8 assert not user_service.get_recovery_code(user.id, codes[0]).burned assert user_service.check_recovery_code(user.id, codes[0]) # Once used, the code should not be accepted again. assert len(user_service.get_recovery_codes(user.id)) == 8 with pytest.raises(BurnedRecoveryCode): user_service.check_recovery_code(user.id, codes[0]) assert user_service.get_recovery_code(user.id, codes[0]).burned assert metrics.increment.calls == [ pretend.call("warehouse.authentication.recovery_code.start"), pretend.call( "warehouse.authentication.recovery_code.failure", tags=["failure_reason:no_recovery_codes"], ), pretend.call("warehouse.authentication.recovery_code.start"), pretend.call("warehouse.authentication.recovery_code.ok"), pretend.call("warehouse.authentication.recovery_code.start"), pretend.call( "warehouse.authentication.recovery_code.failure", tags=["failure_reason:burned_recovery_code"], ), ] def test_check_recovery_code_ip_rate_limited(self, user_service, metrics): resets = pretend.stub() limiter = pretend.stub( test=pretend.call_recorder(lambda uid: False), resets_in=pretend.call_recorder(lambda uid: resets), ) user_service.ratelimiters["2fa.ip"] = limiter with pytest.raises(TooManyFailedLogins) as excinfo: user_service.check_recovery_code(uuid.uuid4(), "recovery_code") assert excinfo.value.resets_in is resets assert metrics.increment.calls == [ pretend.call("warehouse.authentication.recovery_code.start"), pretend.call( "warehouse.authentication.ratelimited", tags=["mechanism:check_recovery_code", "ratelimiter:ip"], ), ] def test_check_recovery_code_user_rate_limited(self, user_service, metrics): user = UserFactory.create() resets = pretend.stub() limiter = pretend.stub( test=pretend.call_recorder(lambda uid: False), resets_in=pretend.call_recorder(lambda uid: resets), ) user_service.ratelimiters["2fa.ip"] = limiter with pytest.raises(TooManyFailedLogins) as excinfo: user_service.check_recovery_code(user.id, "recovery_code") assert excinfo.value.resets_in is resets assert limiter.test.calls == [pretend.call(REMOTE_ADDR)] assert limiter.resets_in.calls == [pretend.call(REMOTE_ADDR)] assert metrics.increment.calls == [ pretend.call("warehouse.authentication.recovery_code.start"), pretend.call( "warehouse.authentication.ratelimited", tags=["mechanism:check_recovery_code", "ratelimiter:ip"], ), ] def test_regenerate_recovery_codes(self, user_service): user = UserFactory.create() with pytest.raises(NoRecoveryCodes): user_service.get_recovery_codes(user.id) user_service.generate_recovery_codes(user.id) initial_codes = user_service.get_recovery_codes(user.id) assert len(initial_codes) == 8 user_service.generate_recovery_codes(user.id) new_codes = user_service.get_recovery_codes(user.id) assert len(new_codes) == 8 assert [c.id for c in initial_codes] != [c.id for c in new_codes] def test_get_password_timestamp(self, user_service): create_time = datetime.datetime.now(datetime.UTC) with freezegun.freeze_time(create_time): user = UserFactory.create() user.password_date = create_time assert user_service.get_password_timestamp(user.id) == create_time.timestamp() def test_get_password_timestamp_no_value(self, user_service): user = UserFactory.create() user.password_date = None assert user_service.get_password_timestamp(user.id) == 0 def test_needs_tos_flash_no_engagements(self, user_service): user = UserFactory.create() assert user_service.needs_tos_flash(user.id, "initial") is True def test_needs_tos_flash_with_passive_engagements(self, user_service): user = UserFactory.create() assert user_service.needs_tos_flash(user.id, "initial") is True user_service.record_tos_engagement( user.id, "initial", TermsOfServiceEngagement.Notified ) assert user_service.needs_tos_flash(user.id, "initial") is True user_service.record_tos_engagement( user.id, "initial", TermsOfServiceEngagement.Flashed ) assert user_service.needs_tos_flash(user.id, "initial") is True def test_needs_tos_flash_with_viewed_engagement(self, user_service): user = UserFactory.create() assert user_service.needs_tos_flash(user.id, "initial") is True user_service.record_tos_engagement( user.id, "initial", TermsOfServiceEngagement.Viewed ) assert user_service.needs_tos_flash(user.id, "initial") is False def test_needs_tos_flash_with_agreed_engagement(self, user_service): user = UserFactory.create() assert user_service.needs_tos_flash(user.id, "initial") is True user_service.record_tos_engagement( user.id, "initial", TermsOfServiceEngagement.Agreed ) assert user_service.needs_tos_flash(user.id, "initial") is False def test_needs_tos_flash_if_engaged_more_than_30_days_ago(self, user_service): user = UserFactory.create() UserTermsOfServiceEngagementFactory.create( user=user, created=(datetime.datetime.now(datetime.UTC) - datetime.timedelta(days=31)), engagement=TermsOfServiceEngagement.Notified, ) assert user_service.needs_tos_flash(user.id, "initial") is False def test_record_tos_engagement_invalid_engagement(self, user_service): user = UserFactory.create() assert user.terms_of_service_engagements == [] with pytest.raises(ValueError): # noqa: PT011 user_service.record_tos_engagement( user.id, "initial", None, ) @pytest.mark.parametrize( "engagement", [ TermsOfServiceEngagement.Flashed, TermsOfServiceEngagement.Notified, TermsOfServiceEngagement.Viewed, TermsOfServiceEngagement.Agreed, ], ) def test_record_tos_engagement(self, user_service, db_request, engagement): user = UserFactory.create() assert user.terms_of_service_engagements == [] user_service.record_tos_engagement( user.id, "initial", engagement, ) assert ( db_request.db.query(UserTermsOfServiceEngagement) .filter( UserTermsOfServiceEngagement.user_id == user.id, UserTermsOfServiceEngagement.revision == "initial", UserTermsOfServiceEngagement.engagement == engagement, ) .count() ) == 1
TestDatabaseUserService
python
huggingface__transformers
src/transformers/models/nemotron/modeling_nemotron.py
{ "start": 39562, "end": 43417 }
class ____(NemotronPreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"} def __init__(self, config): super().__init__(config) self.model = NemotronModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs, ) -> CausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, NemotronForCausalLM >>> model = NemotronForCausalLM.from_pretrained("nvidia/nemotron-3-8b-base-4k-hf") >>> tokenizer = AutoTokenizer.from_pretrained("nvidia/nemotron-3-8b-base-4k-hf") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits, labels, self.vocab_size, **kwargs) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
NemotronForCausalLM
python
psf__requests
src/requests/exceptions.py
{ "start": 225, "end": 788 }
class ____(IOError): """There was an ambiguous exception that occurred while handling your request. """ def __init__(self, *args, **kwargs): """Initialize RequestException with `request` and `response` objects.""" response = kwargs.pop("response", None) self.response = response self.request = kwargs.pop("request", None) if response is not None and not self.request and hasattr(response, "request"): self.request = self.response.request super().__init__(*args, **kwargs)
RequestException
python
jmcnamara__XlsxWriter
xlsxwriter/test/sharedstrings/test_sharedstrings01.py
{ "start": 372, "end": 2228 }
class ____(unittest.TestCase): """ Test assembling a complete SharedStrings file. """ def test_assemble_xml_file(self): """Test the _write_sheet_data() method""" string_table = SharedStringTable() # Add some strings and check the returned indices. index = string_table._get_shared_string_index("neptune") self.assertEqual(index, 0) index = string_table._get_shared_string_index("neptune") self.assertEqual(index, 0) index = string_table._get_shared_string_index("neptune") self.assertEqual(index, 0) index = string_table._get_shared_string_index("mars") self.assertEqual(index, 1) index = string_table._get_shared_string_index("venus") self.assertEqual(index, 2) index = string_table._get_shared_string_index("mars") self.assertEqual(index, 1) index = string_table._get_shared_string_index("venus") self.assertEqual(index, 2) string_table._sort_string_data() fh = StringIO() sharedstrings = SharedStrings() sharedstrings._set_filehandle(fh) sharedstrings.string_table = string_table sharedstrings._assemble_xml_file() exp = _xml_to_list( """ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <sst xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" count="7" uniqueCount="3"> <si> <t>neptune</t> </si> <si> <t>mars</t> </si> <si> <t>venus</t> </si> </sst> """ ) got = _xml_to_list(fh.getvalue()) self.assertEqual(exp, got)
TestAssembleSharedStrings
python
scipy__scipy
scipy/integrate/tests/test_quadpack.py
{ "start": 11089, "end": 15839 }
class ____: def test_double_integral(self): # 8) Double Integral test def simpfunc(y, x): # Note order of arguments. return x+y a, b = 1.0, 2.0 assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2*x), 5/6.0 * (b**3.0-a**3.0)) def test_double_integral2(self): def func(x0, x1, t0, t1): return x0 + x1 + t0 + t1 def g(x): return x def h(x): return 2 * x args = 1, 2 assert_quad(dblquad(func, 1, 2, g, h, args=args),35./6 + 9*.5) def test_double_integral3(self): def func(x0, x1): return x0 + x1 + 1 + 2 assert_quad(dblquad(func, 1, 2, 1, 2),6.) @pytest.mark.parametrize( "x_lower, x_upper, y_lower, y_upper, expected", [ # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain D = [-inf, 0] for all n. (-np.inf, 0, -np.inf, 0, np.pi / 4), # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain D = [-inf, -1] for each n (one at a time). (-np.inf, -1, -np.inf, 0, np.pi / 4 * erfc(1)), (-np.inf, 0, -np.inf, -1, np.pi / 4 * erfc(1)), # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain D = [-inf, -1] for all n. (-np.inf, -1, -np.inf, -1, np.pi / 4 * (erfc(1) ** 2)), # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain D = [-inf, 1] for each n (one at a time). (-np.inf, 1, -np.inf, 0, np.pi / 4 * (erf(1) + 1)), (-np.inf, 0, -np.inf, 1, np.pi / 4 * (erf(1) + 1)), # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain D = [-inf, 1] for all n. (-np.inf, 1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) ** 2)), # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain Dx = [-inf, -1] and Dy = [-inf, 1]. (-np.inf, -1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) * erfc(1))), # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain Dx = [-inf, 1] and Dy = [-inf, -1]. (-np.inf, 1, -np.inf, -1, np.pi / 4 * ((erf(1) + 1) * erfc(1))), # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain D = [0, inf] for all n. (0, np.inf, 0, np.inf, np.pi / 4), # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain D = [1, inf] for each n (one at a time). (1, np.inf, 0, np.inf, np.pi / 4 * erfc(1)), (0, np.inf, 1, np.inf, np.pi / 4 * erfc(1)), # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain D = [1, inf] for all n. (1, np.inf, 1, np.inf, np.pi / 4 * (erfc(1) ** 2)), # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain D = [-1, inf] for each n (one at a time). (-1, np.inf, 0, np.inf, np.pi / 4 * (erf(1) + 1)), (0, np.inf, -1, np.inf, np.pi / 4 * (erf(1) + 1)), # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain D = [-1, inf] for all n. (-1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) ** 2)), # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain Dx = [-1, inf] and Dy = [1, inf]. (-1, np.inf, 1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))), # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain Dx = [1, inf] and Dy = [-1, inf]. (1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))), # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain D = [-inf, inf] for all n. (-np.inf, np.inf, -np.inf, np.inf, np.pi), # Multiple integration of a function in n = 2 variables: f(x, y, z) # over domain D = [0, 0] for each n (one at a time). (0, 0, 0, np.inf, 0.), (0, np.inf, 0, 0, 0.), ] ) def test_double_integral_improper( self, x_lower, x_upper, y_lower, y_upper, expected ): # The Gaussian Integral. def f(x, y): return np.exp(-x ** 2 - y ** 2) assert_quad( dblquad(f, x_lower, x_upper, y_lower, y_upper), expected, error_tolerance=3e-8 ) @make_xp_test_case(tplquad)
TestDblquad
python
fastai__fastai
fastai/vision/augment.py
{ "start": 2792, "end": 3913 }
class ____(RandTransform): "Randomly flip with probability `p`" def __init__(self, p:float=0.5): super().__init__(p=p) def encodes(self, x:(Image.Image,*TensorTypes)): return x.flip_lr() # %% ../../nbs/09_vision.augment.ipynb 21 @patch def dihedral(x:PILImage, k:int, # Dihedral transformation to apply ): return x if k==0 else x.transpose(k-1) @patch def dihedral(x:TensorImage, k:int, # Dihedral transformation to apply ): if k in [1,3,4,7]: x = x.flip(-1) if k in [2,4,5,7]: x = x.flip(-2) if k in [3,5,6,7]: x = x.transpose(-1,-2) return x @patch def dihedral(x:TensorPoint, k:int, # Dihedral transformation to apply ): if k in [1,3,4,7]: x = _neg_axis(x, 0) if k in [2,4,5,7]: x = _neg_axis(x, 1) if k in [3,5,6,7]: x = x.flip(1) return x @patch def dihedral(x:TensorBBox, k:int, #Dihedral transformation to apply ): pnts = TensorPoint(x.view(-1,2)).dihedral(k).view(-1,2,2) tl,br = pnts.min(dim=1)[0],pnts.max(dim=1)[0] return TensorBBox(torch.cat([tl, br], dim=1), img_size=x.img_size) # %% ../../nbs/09_vision.augment.ipynb 22
FlipItem
python
pytest-dev__pytest
src/_pytest/_py/path.py
{ "start": 3608, "end": 4736 }
class ____: def __init__(self, fil, rec, ignore, bf, sort): if isinstance(fil, str): fil = FNMatcher(fil) if isinstance(rec, str): self.rec: Callable[[LocalPath], bool] = FNMatcher(rec) elif not hasattr(rec, "__call__") and rec: self.rec = lambda path: True else: self.rec = rec self.fil = fil self.ignore = ignore self.breadthfirst = bf self.optsort = cast(Callable[[Any], Any], sorted) if sort else (lambda x: x) def gen(self, path): try: entries = path.listdir() except self.ignore: return rec = self.rec dirs = self.optsort( [p for p in entries if p.check(dir=1) and (rec is None or rec(p))] ) if not self.breadthfirst: for subdir in dirs: yield from self.gen(subdir) for p in self.optsort(entries): if self.fil is None or self.fil(p): yield p if self.breadthfirst: for subdir in dirs: yield from self.gen(subdir)
Visitor
python
tensorflow__tensorflow
tensorflow/python/autograph/converters/control_flow.py
{ "start": 14345, "end": 14837 }
class ____(reaching_definitions.Definition): def __init__(self): super(AnnotatedDef, self).__init__() self.directives = {} def transform(node, ctx): graphs = cfg.build(node) node = qual_names.resolve(node) node = activity.resolve(node, ctx, None) node = reaching_definitions.resolve(node, ctx, graphs) node = reaching_fndefs.resolve(node, ctx, graphs) node = liveness.resolve(node, ctx, graphs) node = ControlFlowTransformer(ctx).visit(node) return node
AnnotatedDef
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/operators/bedrock.py
{ "start": 4909, "end": 12398 }
class ____(AwsBaseOperator[BedrockHook]): """ Create a fine-tuning job to customize a base model. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:BedrockCustomizeModelOperator` :param job_name: A unique name for the fine-tuning job. :param custom_model_name: A name for the custom model being created. :param role_arn: The Amazon Resource Name (ARN) of an IAM role that Amazon Bedrock can assume to perform tasks on your behalf. :param base_model_id: Name of the base model. :param training_data_uri: The S3 URI where the training data is stored. :param output_data_uri: The S3 URI where the output data is stored. :param hyperparameters: Parameters related to tuning the model. :param ensure_unique_job_name: If set to true, operator will check whether a model customization job already exists for the name in the config and append the current timestamp if there is a name conflict. (Default: True) :param customization_job_kwargs: Any optional parameters to pass to the API. :param wait_for_completion: Whether to wait for cluster to stop. (default: True) :param waiter_delay: Time in seconds to wait between status checks. (default: 120) :param waiter_max_attempts: Maximum number of attempts to check for job completion. (default: 75) :param deferrable: If True, the operator will wait asynchronously for the cluster to stop. This implies waiting for completion. This mode requires aiobotocore module to be installed. (default: False) :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether or not to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param botocore_config: Configuration dictionary (key-values) for botocore client. See: https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html """ aws_hook_class = BedrockHook template_fields: Sequence[str] = aws_template_fields( "job_name", "custom_model_name", "role_arn", "base_model_id", "hyperparameters", "ensure_unique_job_name", "customization_job_kwargs", ) def __init__( self, job_name: str, custom_model_name: str, role_arn: str, base_model_id: str, training_data_uri: str, output_data_uri: str, hyperparameters: dict[str, str], ensure_unique_job_name: bool = True, customization_job_kwargs: dict[str, Any] | None = None, wait_for_completion: bool = True, waiter_delay: int = 120, waiter_max_attempts: int = 75, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), **kwargs, ): super().__init__(**kwargs) self.wait_for_completion = wait_for_completion self.waiter_delay = waiter_delay self.waiter_max_attempts = waiter_max_attempts self.deferrable = deferrable self.job_name = job_name self.custom_model_name = custom_model_name self.role_arn = role_arn self.base_model_id = base_model_id self.training_data_config = {"s3Uri": training_data_uri} self.output_data_config = {"s3Uri": output_data_uri} self.hyperparameters = hyperparameters self.ensure_unique_job_name = ensure_unique_job_name self.customization_job_kwargs = customization_job_kwargs or {} self.valid_action_if_job_exists: set[str] = {"timestamp", "fail"} def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> str: validated_event = validate_execute_complete_event(event) if validated_event["status"] != "success": raise AirflowException(f"Error while running job: {validated_event}") self.log.info("Bedrock model customization job `%s` complete.", self.job_name) return self.hook.conn.get_model_customization_job(jobIdentifier=validated_event["job_name"])["jobArn"] def execute(self, context: Context) -> dict: response = {} retry = True while retry: # If there is a name conflict and ensure_unique_job_name is True, append the current timestamp # to the name and retry until there is no name conflict. # - Break the loop when the API call returns success. # - If the API returns an exception other than a name conflict, raise that exception. # - If the API returns a name conflict and ensure_unique_job_name is false, raise that exception. try: # Ensure the loop is executed at least once, and not repeat unless explicitly set to do so. retry = False self.log.info("Creating Bedrock model customization job '%s'.", self.job_name) response = self.hook.conn.create_model_customization_job( jobName=self.job_name, customModelName=self.custom_model_name, roleArn=self.role_arn, baseModelIdentifier=self.base_model_id, trainingDataConfig=self.training_data_config, outputDataConfig=self.output_data_config, hyperParameters=self.hyperparameters, **self.customization_job_kwargs, ) except ClientError as error: if error.response["Error"]["Message"] != "The provided job name is currently in use.": raise error if not self.ensure_unique_job_name: raise error retry = True self.job_name = f"{self.job_name}-{int(utcnow().timestamp())}" self.log.info("Changed job name to '%s' to avoid collision.", self.job_name) if response["ResponseMetadata"]["HTTPStatusCode"] != 201: raise AirflowException(f"Bedrock model customization job creation failed: {response}") task_description = f"Bedrock model customization job {self.job_name} to complete." if self.deferrable: self.log.info("Deferring for %s", task_description) self.defer( trigger=BedrockCustomizeModelCompletedTrigger( job_name=self.job_name, waiter_delay=self.waiter_delay, waiter_max_attempts=self.waiter_max_attempts, aws_conn_id=self.aws_conn_id, ), method_name="execute_complete", ) elif self.wait_for_completion: self.log.info("Waiting for %s", task_description) self.hook.get_waiter("model_customization_job_complete").wait( jobIdentifier=self.job_name, WaiterConfig={"Delay": self.waiter_delay, "MaxAttempts": self.waiter_max_attempts}, ) return response["jobArn"]
BedrockCustomizeModelOperator
python
fastapi__sqlmodel
docs_src/tutorial/fastapi/teams/tutorial001.py
{ "start": 1064, "end": 4877 }
class ____(SQLModel): name: Optional[str] = None secret_name: Optional[str] = None age: Optional[int] = None team_id: Optional[int] = None sqlite_file_name = "database.db" sqlite_url = f"sqlite:///{sqlite_file_name}" connect_args = {"check_same_thread": False} engine = create_engine(sqlite_url, echo=True, connect_args=connect_args) def create_db_and_tables(): SQLModel.metadata.create_all(engine) def get_session(): with Session(engine) as session: yield session app = FastAPI() @app.on_event("startup") def on_startup(): create_db_and_tables() @app.post("/heroes/", response_model=HeroPublic) def create_hero(*, session: Session = Depends(get_session), hero: HeroCreate): db_hero = Hero.model_validate(hero) session.add(db_hero) session.commit() session.refresh(db_hero) return db_hero @app.get("/heroes/", response_model=List[HeroPublic]) def read_heroes( *, session: Session = Depends(get_session), offset: int = 0, limit: int = Query(default=100, le=100), ): heroes = session.exec(select(Hero).offset(offset).limit(limit)).all() return heroes @app.get("/heroes/{hero_id}", response_model=HeroPublic) def read_hero(*, session: Session = Depends(get_session), hero_id: int): hero = session.get(Hero, hero_id) if not hero: raise HTTPException(status_code=404, detail="Hero not found") return hero @app.patch("/heroes/{hero_id}", response_model=HeroPublic) def update_hero( *, session: Session = Depends(get_session), hero_id: int, hero: HeroUpdate ): db_hero = session.get(Hero, hero_id) if not db_hero: raise HTTPException(status_code=404, detail="Hero not found") hero_data = hero.model_dump(exclude_unset=True) db_hero.sqlmodel_update(hero_data) session.add(db_hero) session.commit() session.refresh(db_hero) return db_hero @app.delete("/heroes/{hero_id}") def delete_hero(*, session: Session = Depends(get_session), hero_id: int): hero = session.get(Hero, hero_id) if not hero: raise HTTPException(status_code=404, detail="Hero not found") session.delete(hero) session.commit() return {"ok": True} @app.post("/teams/", response_model=TeamPublic) def create_team(*, session: Session = Depends(get_session), team: TeamCreate): db_team = Team.model_validate(team) session.add(db_team) session.commit() session.refresh(db_team) return db_team @app.get("/teams/", response_model=List[TeamPublic]) def read_teams( *, session: Session = Depends(get_session), offset: int = 0, limit: int = Query(default=100, le=100), ): teams = session.exec(select(Team).offset(offset).limit(limit)).all() return teams @app.get("/teams/{team_id}", response_model=TeamPublic) def read_team(*, team_id: int, session: Session = Depends(get_session)): team = session.get(Team, team_id) if not team: raise HTTPException(status_code=404, detail="Team not found") return team @app.patch("/teams/{team_id}", response_model=TeamPublic) def update_team( *, session: Session = Depends(get_session), team_id: int, team: TeamUpdate, ): db_team = session.get(Team, team_id) if not db_team: raise HTTPException(status_code=404, detail="Team not found") team_data = team.model_dump(exclude_unset=True) db_team.sqlmodel_update(team_data) session.add(db_team) session.commit() session.refresh(db_team) return db_team @app.delete("/teams/{team_id}") def delete_team(*, session: Session = Depends(get_session), team_id: int): team = session.get(Team, team_id) if not team: raise HTTPException(status_code=404, detail="Team not found") session.delete(team) session.commit() return {"ok": True}
HeroUpdate
python
RaRe-Technologies__gensim
gensim/test/test_utils.py
{ "start": 4660, "end": 5007 }
class ____(unittest.TestCase): def test_merge_dicts(self): d1 = {"word1": 5, "word2": 1, "word3": 2} d2 = {"word1": 2, "word3": 3, "word4": 10} res_dict = utils.merge_counts(d1, d2) expected_dict = {"word1": 7, "word2": 1, "word3": 5, "word4": 10} self.assertEqual(res_dict, expected_dict)
TestMergeDicts
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pylint/duplicate_bases.py
{ "start": 532, "end": 577 }
class ____(Foo, # 1 Foo # 2 ): pass
Bar
python
django__django
django/db/models/aggregates.py
{ "start": 7963, "end": 8393 }
class ____(Aggregate): function = "ANY_VALUE" name = "AnyValue" arity = 1 window_compatible = False def as_sql(self, compiler, connection, **extra_context): if not connection.features.supports_any_value: raise NotSupportedError( "ANY_VALUE is not supported on this database backend." ) return super().as_sql(compiler, connection, **extra_context)
AnyValue
python
Pylons__pyramid
src/pyramid/interfaces.py
{ "start": 51908, "end": 51993 }
class ____(Interface): """Interface representing a predicate list"""
IPredicateList
python
chroma-core__chroma
chromadb/api/models/AsyncCollection.py
{ "start": 635, "end": 18574 }
class ____(CollectionCommon["AsyncServerAPI"]): async def add( self, ids: OneOrMany[ID], embeddings: Optional[ Union[ OneOrMany[Embedding], OneOrMany[PyEmbedding], ] ] = None, metadatas: Optional[OneOrMany[Metadata]] = None, documents: Optional[OneOrMany[Document]] = None, images: Optional[OneOrMany[Image]] = None, uris: Optional[OneOrMany[URI]] = None, ) -> None: """Add embeddings to the data store. Args: ids: The ids of the embeddings you wish to add embeddings: The embeddings to add. If None, embeddings will be computed based on the documents or images using the embedding_function set for the Collection. Optional. metadatas: The metadata to associate with the embeddings. When querying, you can filter on this metadata. Optional. documents: The documents to associate with the embeddings. Optional. images: The images to associate with the embeddings. Optional. uris: The uris of the images to associate with the embeddings. Optional. Returns: None Raises: ValueError: If you don't provide either embeddings or documents ValueError: If the length of ids, embeddings, metadatas, or documents don't match ValueError: If you don't provide an embedding function and don't provide embeddings ValueError: If you provide both embeddings and documents ValueError: If you provide an id that already exists """ add_request = self._validate_and_prepare_add_request( ids=ids, embeddings=embeddings, metadatas=metadatas, documents=documents, images=images, uris=uris, ) await self._client._add( collection_id=self.id, ids=add_request["ids"], embeddings=add_request["embeddings"], metadatas=add_request["metadatas"], documents=add_request["documents"], uris=add_request["uris"], tenant=self.tenant, database=self.database, ) async def count(self) -> int: """The total number of embeddings added to the database Returns: int: The total number of embeddings added to the database """ return await self._client._count( collection_id=self.id, tenant=self.tenant, database=self.database, ) async def get( self, ids: Optional[OneOrMany[ID]] = None, where: Optional[Where] = None, limit: Optional[int] = None, offset: Optional[int] = None, where_document: Optional[WhereDocument] = None, include: Include = ["metadatas", "documents"], ) -> GetResult: """Get embeddings and their associate data from the data store. If no ids or where filter is provided returns all embeddings up to limit starting at offset. Args: ids: The ids of the embeddings to get. Optional. where: A Where type dict used to filter results by. E.g. `{"$and": [{"color" : "red"}, {"price": {"$gte": 4.20}}]}`. Optional. limit: The number of documents to return. Optional. offset: The offset to start returning results from. Useful for paging results with limit. Optional. where_document: A WhereDocument type dict used to filter by the documents. E.g. `{"$contains": "hello"}`. Optional. include: A list of what to include in the results. Can contain `"embeddings"`, `"metadatas"`, `"documents"`. Ids are always included. Defaults to `["metadatas", "documents"]`. Optional. Returns: GetResult: A GetResult object containing the results. """ get_request = self._validate_and_prepare_get_request( ids=ids, where=where, where_document=where_document, include=include, ) get_results = await self._client._get( collection_id=self.id, ids=get_request["ids"], where=get_request["where"], where_document=get_request["where_document"], include=get_request["include"], limit=limit, offset=offset, tenant=self.tenant, database=self.database, ) return self._transform_get_response( response=get_results, include=get_request["include"] ) async def peek(self, limit: int = 10) -> GetResult: """Get the first few results in the database up to limit Args: limit: The number of results to return. Returns: GetResult: A GetResult object containing the results. """ return self._transform_peek_response( await self._client._peek( collection_id=self.id, n=limit, tenant=self.tenant, database=self.database, ) ) async def query( self, query_embeddings: Optional[ Union[ OneOrMany[Embedding], OneOrMany[PyEmbedding], ] ] = None, query_texts: Optional[OneOrMany[Document]] = None, query_images: Optional[OneOrMany[Image]] = None, query_uris: Optional[OneOrMany[URI]] = None, ids: Optional[OneOrMany[ID]] = None, n_results: int = 10, where: Optional[Where] = None, where_document: Optional[WhereDocument] = None, include: Include = [ "metadatas", "documents", "distances", ], ) -> QueryResult: """Get the n_results nearest neighbor embeddings for provided query_embeddings or query_texts. Args: query_embeddings: The embeddings to get the closes neighbors of. Optional. query_texts: The document texts to get the closes neighbors of. Optional. query_images: The images to get the closes neighbors of. Optional. ids: A subset of ids to search within. Optional. n_results: The number of neighbors to return for each query_embedding or query_texts. Optional. where: A Where type dict used to filter results by. E.g. `{"$and": [{"color" : "red"}, {"price": {"$gte": 4.20}}]}`. Optional. where_document: A WhereDocument type dict used to filter by the documents. E.g. `{"$contains": "hello"}`. Optional. include: A list of what to include in the results. Can contain `"embeddings"`, `"metadatas"`, `"documents"`, `"distances"`. Ids are always included. Defaults to `["metadatas", "documents", "distances"]`. Optional. Returns: QueryResult: A QueryResult object containing the results. Raises: ValueError: If you don't provide either query_embeddings, query_texts, or query_images ValueError: If you provide both query_embeddings and query_texts ValueError: If you provide both query_embeddings and query_images ValueError: If you provide both query_texts and query_images """ query_request = self._validate_and_prepare_query_request( query_embeddings=query_embeddings, query_texts=query_texts, query_images=query_images, query_uris=query_uris, ids=ids, n_results=n_results, where=where, where_document=where_document, include=include, ) query_results = await self._client._query( collection_id=self.id, ids=query_request["ids"], query_embeddings=query_request["embeddings"], n_results=query_request["n_results"], where=query_request["where"], where_document=query_request["where_document"], include=query_request["include"], tenant=self.tenant, database=self.database, ) return self._transform_query_response( response=query_results, include=query_request["include"] ) async def modify( self, name: Optional[str] = None, metadata: Optional[CollectionMetadata] = None, configuration: Optional[UpdateCollectionConfiguration] = None, ) -> None: """Modify the collection name or metadata Args: name: The updated name for the collection. Optional. metadata: The updated metadata for the collection. Optional. Returns: None """ self._validate_modify_request(metadata) # Note there is a race condition here where the metadata can be updated # but another thread sees the cached local metadata. # TODO: fixme await self._client._modify( id=self.id, new_name=name, new_metadata=metadata, new_configuration=configuration, tenant=self.tenant, database=self.database, ) self._update_model_after_modify_success(name, metadata, configuration) async def fork( self, new_name: str, ) -> "AsyncCollection": """Fork the current collection under a new name. The returning collection should contain identical data to the current collection. This is an experimental API that only works for Hosted Chroma for now. Args: new_name: The name of the new collection. Returns: Collection: A new collection with the specified name and containing identical data to the current collection. """ model = await self._client._fork( collection_id=self.id, new_name=new_name, tenant=self.tenant, database=self.database, ) return AsyncCollection( client=self._client, model=model, embedding_function=self._embedding_function, data_loader=self._data_loader, ) async def search( self, searches: OneOrMany[Search], ) -> SearchResult: """Perform hybrid search on the collection. This is an experimental API that only works for Hosted Chroma for now. Args: searches: A single Search object or a list of Search objects, each containing: - where: Where expression for filtering - rank: Ranking expression for hybrid search (defaults to Val(0.0)) - limit: Limit configuration for pagination (defaults to no limit) - select: Select configuration for keys to return (defaults to empty) Returns: SearchResult: Column-major format response with: - ids: List of result IDs for each search payload - documents: Optional documents for each payload - embeddings: Optional embeddings for each payload - metadatas: Optional metadata for each payload - scores: Optional scores for each payload - select: List of selected keys for each payload Raises: NotImplementedError: For local/segment API implementations Examples: # Using builder pattern with Key constants from chromadb.execution.expression import ( Search, Key, K, Knn, Val ) # Note: K is an alias for Key, so K.DOCUMENT == Key.DOCUMENT search = (Search() .where((K("category") == "science") & (K("score") > 0.5)) .rank(Knn(query=[0.1, 0.2, 0.3]) * 0.8 + Val(0.5) * 0.2) .limit(10, offset=0) .select(K.DOCUMENT, K.SCORE, "title")) # Direct construction from chromadb.execution.expression import ( Search, Eq, And, Gt, Knn, Limit, Select, Key ) search = Search( where=And([Eq("category", "science"), Gt("score", 0.5)]), rank=Knn(query=[0.1, 0.2, 0.3]), limit=Limit(offset=0, limit=10), select=Select(keys={Key.DOCUMENT, Key.SCORE, "title"}) ) # Single search result = await collection.search(search) # Multiple searches at once searches = [ Search().where(K("type") == "article").rank(Knn(query=[0.1, 0.2])), Search().where(K("type") == "paper").rank(Knn(query=[0.3, 0.4])) ] results = await collection.search(searches) """ # Convert single search to list for consistent handling searches_list = maybe_cast_one_to_many(searches) if searches_list is None: searches_list = [] # Embed any string queries in Knn objects embedded_searches = [ self._embed_search_string_queries(search) for search in searches_list ] return await self._client._search( collection_id=self.id, searches=cast(List[Search], embedded_searches), tenant=self.tenant, database=self.database, ) async def update( self, ids: OneOrMany[ID], embeddings: Optional[ Union[ OneOrMany[Embedding], OneOrMany[PyEmbedding], ] ] = None, metadatas: Optional[OneOrMany[Metadata]] = None, documents: Optional[OneOrMany[Document]] = None, images: Optional[OneOrMany[Image]] = None, uris: Optional[OneOrMany[URI]] = None, ) -> None: """Update the embeddings, metadatas or documents for provided ids. Args: ids: The ids of the embeddings to update embeddings: The embeddings to update. If None, embeddings will be computed based on the documents or images using the embedding_function set for the Collection. Optional. metadatas: The metadata to associate with the embeddings. When querying, you can filter on this metadata. Optional. documents: The documents to associate with the embeddings. Optional. images: The images to associate with the embeddings. Optional. Returns: None """ update_request = self._validate_and_prepare_update_request( ids=ids, embeddings=embeddings, metadatas=metadatas, documents=documents, images=images, uris=uris, ) await self._client._update( collection_id=self.id, ids=update_request["ids"], embeddings=update_request["embeddings"], metadatas=update_request["metadatas"], documents=update_request["documents"], uris=update_request["uris"], tenant=self.tenant, database=self.database, ) async def upsert( self, ids: OneOrMany[ID], embeddings: Optional[ Union[ OneOrMany[Embedding], OneOrMany[PyEmbedding], ] ] = None, metadatas: Optional[OneOrMany[Metadata]] = None, documents: Optional[OneOrMany[Document]] = None, images: Optional[OneOrMany[Image]] = None, uris: Optional[OneOrMany[URI]] = None, ) -> None: """Update the embeddings, metadatas or documents for provided ids, or create them if they don't exist. Args: ids: The ids of the embeddings to update embeddings: The embeddings to add. If None, embeddings will be computed based on the documents using the embedding_function set for the Collection. Optional. metadatas: The metadata to associate with the embeddings. When querying, you can filter on this metadata. Optional. documents: The documents to associate with the embeddings. Optional. Returns: None """ upsert_request = self._validate_and_prepare_upsert_request( ids=ids, embeddings=embeddings, metadatas=metadatas, documents=documents, images=images, uris=uris, ) await self._client._upsert( collection_id=self.id, ids=upsert_request["ids"], embeddings=upsert_request["embeddings"], metadatas=upsert_request["metadatas"], documents=upsert_request["documents"], uris=upsert_request["uris"], tenant=self.tenant, database=self.database, ) async def delete( self, ids: Optional[IDs] = None, where: Optional[Where] = None, where_document: Optional[WhereDocument] = None, ) -> None: """Delete the embeddings based on ids and/or a where filter Args: ids: The ids of the embeddings to delete where: A Where type dict used to filter the delection by. E.g. `{"$and": [{"color" : "red"}, {"price": {"$gte": 4.20}}]}`. Optional. where_document: A WhereDocument type dict used to filter the deletion by the document content. E.g. `{"$contains": "hello"}`. Optional. Returns: None Raises: ValueError: If you don't provide either ids, where, or where_document """ delete_request = self._validate_and_prepare_delete_request( ids, where, where_document ) await self._client._delete( collection_id=self.id, ids=delete_request["ids"], where=delete_request["where"], where_document=delete_request["where_document"], tenant=self.tenant, database=self.database, )
AsyncCollection
python
pytorch__pytorch
torch/nn/modules/_functions.py
{ "start": 8432, "end": 11832 }
class ____(Function): @staticmethod # pyrefly: ignore [bad-override] def forward(ctx, input, size, alpha=1e-4, beta=0.75, k=1): ctx.size = size ctx.alpha = alpha ctx.beta = beta ctx.k = k ctx.scale = None if input.dim() != 4: raise ValueError( f"CrossMapLRN2d: Expected input to be 4D, got {input.dim()}D instead." ) ctx.scale = ctx.scale or input.new() output = input.new() channels = input.size(1) output.resize_as_(input) ctx.scale.resize_as_(input) # use output storage as temporary buffer input_square = output torch.pow(input, 2, out=input_square) pre_pad = int((ctx.size - 1) / 2 + 1) pre_pad_crop = min(pre_pad, channels) scale_first = ctx.scale.select(1, 0) scale_first.zero_() # compute first feature map normalization for c in range(pre_pad_crop): scale_first.add_(input_square.select(1, c)) # reuse computations for next feature maps normalization # by adding the next feature map and removing the previous for c in range(1, channels): scale_previous = ctx.scale.select(1, c - 1) scale_current = ctx.scale.select(1, c) scale_current.copy_(scale_previous) if c < channels - pre_pad + 1: square_next = input_square.select(1, c + pre_pad - 1) scale_current.add_(square_next, alpha=1) if c > pre_pad: square_previous = input_square.select(1, c - pre_pad) scale_current.add_(square_previous, alpha=-1) ctx.scale.mul_(ctx.alpha / ctx.size).add_(ctx.k) torch.pow(ctx.scale, -ctx.beta, out=output) output.mul_(input) ctx.save_for_backward(input, output) return output @staticmethod # pyrefly: ignore [bad-override] def backward(ctx, grad_output): input, output = ctx.saved_tensors grad_input = grad_output.new() batch_size = input.size(0) channels = input.size(1) input_height = input.size(2) input_width = input.size(3) paddded_ratio = input.new(channels + ctx.size - 1, input_height, input_width) accum_ratio = input.new(input_height, input_width) cache_ratio_value = 2 * ctx.alpha * ctx.beta / ctx.size inversePrePad = int(ctx.size - (ctx.size - 1) / 2) grad_input.resize_as_(input) torch.pow(ctx.scale, -ctx.beta, out=grad_input).mul_(grad_output) paddded_ratio.zero_() padded_ratio_center = paddded_ratio.narrow(0, inversePrePad, channels) for n in range(batch_size): torch.mul(grad_output[n], output[n], out=padded_ratio_center) padded_ratio_center.div_(ctx.scale[n]) torch.sum( paddded_ratio.narrow(0, 0, ctx.size - 1), 0, keepdim=False, out=accum_ratio, ) for c in range(channels): accum_ratio.add_(paddded_ratio[c + ctx.size - 1]) grad_input[n][c].addcmul_( input[n][c], accum_ratio, value=-cache_ratio_value ) accum_ratio.add_(paddded_ratio[c], alpha=-1) return grad_input, None, None, None, None
CrossMapLRN2d
python
sphinx-doc__sphinx
sphinx/ext/intersphinx/_shared.py
{ "start": 4008, "end": 5490 }
class ____: """Inventory adapter for environment""" def __init__(self, env: BuildEnvironment) -> None: self.env = env if not hasattr(env, 'intersphinx_cache'): # initial storage when fetching inventories before processing self.env.intersphinx_cache = {} # type: ignore[attr-defined] self.env.intersphinx_inventory = {} # type: ignore[attr-defined] self.env.intersphinx_named_inventory = {} # type: ignore[attr-defined] @property def cache(self) -> dict[InventoryURI, InventoryCacheEntry]: """Intersphinx cache. - Key is the URI of the remote inventory. - Element one is the key given in the Sphinx :confval:`intersphinx_mapping`. - Element two is a time value for cache invalidation, an integer. - Element three is the loaded remote inventory of type :class:`!Inventory`. """ return self.env.intersphinx_cache # type: ignore[attr-defined] @property def main_inventory(self) -> Inventory: return self.env.intersphinx_inventory # type: ignore[attr-defined] @property def named_inventory(self) -> dict[InventoryName, Inventory]: return self.env.intersphinx_named_inventory # type: ignore[attr-defined] def clear(self) -> None: self.env.intersphinx_inventory.clear() # type: ignore[attr-defined] self.env.intersphinx_named_inventory.clear() # type: ignore[attr-defined]
InventoryAdapter
python
astropy__astropy
astropy/io/fits/hdu/base.py
{ "start": 3592, "end": 27274 }
class ____: """Base class for all HDU (header data unit) classes.""" _hdu_registry = set() # This HDU type is part of the FITS standard _standard = True # Byte to use for padding out blocks _padding_byte = "\x00" _default_name = "" # _header uses a descriptor to delay the loading of the fits.Header object # until it is necessary. _header = _DelayedHeader() def __init__(self, data=None, header=None, *args, **kwargs): if header is None: header = Header() self._header = header self._header_str = None self._file = None self._buffer = None self._header_offset = None self._data_offset = None self._data_size = None # This internal variable is used to track whether the data attribute # still points to the same data array as when the HDU was originally # created (this does not track whether the data is actually the same # content-wise) self._data_replaced = False self._data_needs_rescale = False self._new = True self._output_checksum = False if "DATASUM" in self._header and "CHECKSUM" not in self._header: self._output_checksum = "datasum" elif "CHECKSUM" in self._header: self._output_checksum = True def __init_subclass__(cls, **kwargs): # Add the same data.deleter to all HDUs with a data property. # It's unfortunate, but there's otherwise no straightforward way # that a property can inherit setters/deleters of the property of the # same name on base classes. data_prop = cls.__dict__.get("data", None) if isinstance(data_prop, (lazyproperty, property)) and data_prop.fdel is None: # Don't do anything if the class has already explicitly # set the deleter for its data property def data(self): # The deleter if self._file is not None and self._data_loaded: # sys.getrefcount is CPython specific and not on PyPy. has_getrefcount = hasattr(sys, "getrefcount") if has_getrefcount: data_refcount = sys.getrefcount(self.data) # Manually delete *now* so that FITS_rec.__del__ # cleanup can happen if applicable del self.__dict__["data"] # Don't even do this unless the *only* reference to the # .data array was the one we're deleting by deleting # this attribute; if any other references to the array # are hanging around (perhaps the user ran ``data = # hdu.data``) don't even consider this: if has_getrefcount and data_refcount == 2: self._file._maybe_close_mmap() cls.data = data_prop.deleter(data) super().__init_subclass__(**kwargs) @property def header(self): return self._header @header.setter def header(self, value): self._header = value @property def name(self): # Convert the value to a string to be flexible in some pathological # cases (see ticket #96) return str(self._header.get("EXTNAME", self._default_name)) @name.setter def name(self, value): if not isinstance(value, str): raise TypeError("'name' attribute must be a string") if not conf.extension_name_case_sensitive: value = value.upper() if "EXTNAME" in self._header: self._header["EXTNAME"] = value else: self._header["EXTNAME"] = (value, "extension name") @property def ver(self): return self._header.get("EXTVER", 1) @ver.setter def ver(self, value): if not _is_int(value): raise TypeError("'ver' attribute must be an integer") if "EXTVER" in self._header: self._header["EXTVER"] = value else: self._header["EXTVER"] = (value, "extension value") @property def level(self): return self._header.get("EXTLEVEL", 1) @level.setter def level(self, value): if not _is_int(value): raise TypeError("'level' attribute must be an integer") if "EXTLEVEL" in self._header: self._header["EXTLEVEL"] = value else: self._header["EXTLEVEL"] = (value, "extension level") @property def is_image(self): return self.name == "PRIMARY" or ( "XTENSION" in self._header and ( self._header["XTENSION"] == "IMAGE" or ( self._header["XTENSION"] == "BINTABLE" and "ZIMAGE" in self._header and self._header["ZIMAGE"] is True ) ) ) @property def _data_loaded(self): return "data" in self.__dict__ and self.data is not DELAYED @property def _has_data(self): return self._data_loaded and self.data is not None @classmethod def register_hdu(cls, hducls): cls._hdu_registry.add(hducls) @classmethod def unregister_hdu(cls, hducls): if hducls in cls._hdu_registry: cls._hdu_registry.remove(hducls) @classmethod def match_header(cls, header): raise NotImplementedError @classmethod def fromstring(cls, data, checksum=False, ignore_missing_end=False, **kwargs): """ Creates a new HDU object of the appropriate type from a string containing the HDU's entire header and, optionally, its data. Note: When creating a new HDU from a string without a backing file object, the data of that HDU may be read-only. It depends on whether the underlying string was an immutable Python str/bytes object, or some kind of read-write memory buffer such as a `memoryview`. Parameters ---------- data : str, bytes, memoryview, ndarray A byte string containing the HDU's header and data. checksum : bool, optional Check the HDU's checksum and/or datasum. ignore_missing_end : bool, optional Ignore a missing end card in the header data. Note that without the end card the end of the header may be ambiguous and resulted in a corrupt HDU. In this case the assumption is that the first 2880 block that does not begin with valid FITS header data is the beginning of the data. **kwargs : optional May consist of additional keyword arguments specific to an HDU type--these correspond to keywords recognized by the constructors of different HDU classes such as `PrimaryHDU`, `ImageHDU`, or `BinTableHDU`. Any unrecognized keyword arguments are simply ignored. """ return cls._readfrom_internal( data, checksum=checksum, ignore_missing_end=ignore_missing_end, **kwargs ) @classmethod def readfrom(cls, fileobj, checksum=False, ignore_missing_end=False, **kwargs): """ Read the HDU from a file. Normally an HDU should be opened with :func:`open` which reads the entire HDU list in a FITS file. But this method is still provided for symmetry with :func:`writeto`. Parameters ---------- fileobj : file-like Input FITS file. The file's seek pointer is assumed to be at the beginning of the HDU. checksum : bool If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card values (when present in the HDU header) match the header and data of all HDU's in the file. ignore_missing_end : bool Do not issue an exception when opening a file that is missing an ``END`` card in the last header. """ # TODO: Figure out a way to make it possible for the _File # constructor to be a noop if the argument is already a _File if not isinstance(fileobj, _File): fileobj = _File(fileobj) hdu = cls._readfrom_internal( fileobj, checksum=checksum, ignore_missing_end=ignore_missing_end, **kwargs ) # If the checksum had to be checked the data may have already been read # from the file, in which case we don't want to seek relative fileobj.seek(hdu._data_offset + hdu._data_size, os.SEEK_SET) return hdu def writeto(self, name, output_verify="exception", overwrite=False, checksum=False): """ Write the HDU to a new file. This is a convenience method to provide a user easier output interface if only one HDU needs to be written to a file. Parameters ---------- name : path-like or file-like Output FITS file. If the file object is already opened, it must be opened in a writeable mode. output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``"+warn"``, or ``"+exception"`` (e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info. overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. checksum : bool When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the header of the HDU when written to the file. Notes ----- gzip, zip, bzip2 and lzma compression algorithms are natively supported. Compression mode is determined from the filename extension ('.gz', '.zip', '.bz2' or '.xz' respectively). It is also possible to pass a compressed file object, e.g. `gzip.GzipFile`. """ from .hdulist import HDUList hdulist = HDUList([self]) hdulist.writeto(name, output_verify, overwrite=overwrite, checksum=checksum) @classmethod def _from_data(cls, data, header, **kwargs): """ Instantiate the HDU object after guessing the HDU class from the FITS Header. """ klass = _hdu_class_from_header(cls, header) return klass(data=data, header=header, **kwargs) @classmethod def _readfrom_internal( cls, data, header=None, checksum=False, ignore_missing_end=False, **kwargs ): """ Provides the bulk of the internal implementation for readfrom and fromstring. For some special cases, supports using a header that was already created, and just using the input data for the actual array data. """ hdu_buffer = None hdu_fileobj = None header_offset = 0 if isinstance(data, _File): if header is None: header_offset = data.tell() try: # First we try to read the header with the fast parser # from _BasicHeader, which will read only the standard # 8 character keywords to get the structural keywords # that are needed to build the HDU object. header_str, header = _BasicHeader.fromfile(data) except Exception: # If the fast header parsing failed, then fallback to # the classic Header parser, which has better support # and reporting for the various issues that can be found # in the wild. data.seek(header_offset) header = Header.fromfile(data, endcard=not ignore_missing_end) hdu_fileobj = data data_offset = data.tell() # *after* reading the header else: try: # Test that the given object supports the buffer interface by # ensuring an ndarray can be created from it np.ndarray((), dtype="ubyte", buffer=data) except TypeError: raise TypeError( f"The provided object {data!r} does not contain an underlying " "memory buffer. fromstring() requires an object that " "supports the buffer interface such as bytes, buffer, " "memoryview, ndarray, etc. This restriction is to ensure " "that efficient access to the array/table data is possible." ) if header is None: def block_iter(nbytes): idx = 0 while idx < len(data): yield data[idx : idx + nbytes] idx += nbytes header_str, header = Header._from_blocks( block_iter, True, "", not ignore_missing_end, True ) if len(data) > len(header_str): hdu_buffer = data elif data: hdu_buffer = data header_offset = 0 data_offset = len(header_str) # Determine the appropriate arguments to pass to the constructor from # self._kwargs. self._kwargs contains any number of optional arguments # that may or may not be valid depending on the HDU type cls = _hdu_class_from_header(cls, header) sig = signature(cls.__init__) new_kwargs = kwargs.copy() if Parameter.VAR_KEYWORD not in (x.kind for x in sig.parameters.values()): # If __init__ accepts arbitrary keyword arguments, then we can go # ahead and pass all keyword arguments; otherwise we need to delete # any that are invalid for key in kwargs: if key not in sig.parameters: del new_kwargs[key] try: hdu = cls(data=DELAYED, header=header, **new_kwargs) except TypeError: # This may happen because some HDU class (e.g. GroupsHDU) wants # to set a keyword on the header, which is not possible with the # _BasicHeader. While HDU classes should not need to modify the # header in general, sometimes this is needed to fix it. So in # this case we build a full Header and try again to create the # HDU object. if isinstance(header, _BasicHeader): header = Header.fromstring(header_str) hdu = cls(data=DELAYED, header=header, **new_kwargs) else: raise # One of these may be None, depending on whether the data came from a # file or a string buffer--later this will be further abstracted hdu._file = hdu_fileobj hdu._buffer = hdu_buffer hdu._header_offset = header_offset # beginning of the header area hdu._data_offset = data_offset # beginning of the data area # data area size, including padding size = hdu.size hdu._data_size = size + _pad_length(size) if isinstance(hdu._header, _BasicHeader): # Delete the temporary _BasicHeader. # We need to do this before an eventual checksum computation, # since it needs to modify temporarily the header # # The header string is stored in the HDU._header_str attribute, # so that it can be used directly when we need to create the # classic Header object, without having to parse again the file. del hdu._header hdu._header_str = header_str # Checksums are not checked on invalid HDU types if checksum and checksum != "remove" and isinstance(hdu, _ValidHDU): hdu._verify_checksum_datasum() return hdu def _get_raw_data(self, shape, code, offset): """ Return raw array from either the HDU's memory buffer or underlying file. """ if isinstance(shape, numbers.Integral): shape = (shape,) if self._buffer: return np.ndarray(shape, dtype=code, buffer=self._buffer, offset=offset) elif self._file: return self._file.readarray(offset=offset, dtype=code, shape=shape) else: return None def _postwriteto(self): pass def _writeheader(self, fileobj): offset = 0 with suppress(AttributeError, OSError): offset = fileobj.tell() self._header.tofile(fileobj) try: size = fileobj.tell() - offset except (AttributeError, OSError): size = len(str(self._header)) return offset, size def _writedata(self, fileobj): size = 0 fileobj.flush() try: offset = fileobj.tell() except (AttributeError, OSError): offset = 0 if self._data_loaded or self._data_needs_rescale: if self.data is not None: size += self._writedata_internal(fileobj) # pad the FITS data block # to avoid a bug in the lustre filesystem client, don't # write zero-byte objects if size > 0 and _pad_length(size) > 0: padding = _pad_length(size) * self._padding_byte # TODO: Not that this is ever likely, but if for some odd # reason _padding_byte is > 0x80 this will fail; but really if # somebody's custom fits format is doing that, they're doing it # wrong and should be reprimanded harshly. fileobj.write(padding.encode("ascii")) size += len(padding) else: # The data has not been modified or does not need need to be # rescaled, so it can be copied, unmodified, directly from an # existing file or buffer size += self._writedata_direct_copy(fileobj) # flush, to make sure the content is written fileobj.flush() # return both the location and the size of the data area return offset, size def _writedata_internal(self, fileobj): """ The beginning and end of most _writedata() implementations are the same, but the details of writing the data array itself can vary between HDU types, so that should be implemented in this method. Should return the size in bytes of the data written. """ fileobj.writearray(self.data) return self.data.size * self.data.itemsize def _writedata_direct_copy(self, fileobj): """Copies the data directly from one file/buffer to the new file. For now this is handled by loading the raw data from the existing data (including any padding) via a memory map or from an already in-memory buffer and using Numpy's existing file-writing facilities to write to the new file. If this proves too slow a more direct approach may be used. """ raw = self._get_raw_data(self._data_size, "ubyte", self._data_offset) if raw is not None: fileobj.writearray(raw) return raw.nbytes else: return 0 # TODO: This is the start of moving HDU writing out of the _File class; # Though right now this is an internal private method (though still used by # HDUList, eventually the plan is to have this be moved into writeto() # somehow... def _writeto(self, fileobj, inplace=False, copy=False): try: dirname = os.path.dirname(fileobj._file.name) except (AttributeError, TypeError): dirname = None with _free_space_check(self, dirname): self._writeto_internal(fileobj, inplace, copy) def _writeto_internal(self, fileobj, inplace, copy): # For now fileobj is assumed to be a _File object if not inplace or self._new: header_offset, _ = self._writeheader(fileobj) data_offset, data_size = self._writedata(fileobj) # Set the various data location attributes on newly-written HDUs if self._new: self._header_offset = header_offset self._data_offset = data_offset self._data_size = data_size return hdrloc = self._header_offset hdrsize = self._data_offset - self._header_offset datloc = self._data_offset datsize = self._data_size if self._header._modified: # Seek to the original header location in the file self._file.seek(hdrloc) # This should update hdrloc with he header location in the new file hdrloc, hdrsize = self._writeheader(fileobj) # If the data is to be written below with self._writedata, that # will also properly update the data location; but it should be # updated here too datloc = hdrloc + hdrsize elif copy: # Seek to the original header location in the file self._file.seek(hdrloc) # Before writing, update the hdrloc with the current file position, # which is the hdrloc for the new file hdrloc = fileobj.tell() fileobj.write(self._file.read(hdrsize)) # The header size is unchanged, but the data location may be # different from before depending on if previous HDUs were resized datloc = fileobj.tell() if self._data_loaded: if self.data is not None: # Seek through the array's bases for an memmap'd array; we # can't rely on the _File object to give us this info since # the user may have replaced the previous mmap'd array if copy or self._data_replaced: # Of course, if we're copying the data to a new file # we don't care about flushing the original mmap; # instead just read it into the new file array_mmap = None else: array_mmap = _get_array_mmap(self.data) if array_mmap is not None: array_mmap.flush() else: self._file.seek(self._data_offset) datloc, datsize = self._writedata(fileobj) elif copy: datsize = self._writedata_direct_copy(fileobj) self._header_offset = hdrloc self._data_offset = datloc self._data_size = datsize self._data_replaced = False def _close(self, closed=True): # If the data was mmap'd, close the underlying mmap (this will # prevent any future access to the .data attribute if there are # not other references to it; if there are other references then # it is up to the user to clean those up if closed and self._data_loaded and _get_array_mmap(self.data) is not None: del self.data # For backwards-compatibility, though nobody should have # been using this directly: _AllHDU = _BaseHDU # For convenience... # TODO: register_hdu could be made into a class decorator which would be pretty # cool, but only once 2.6 support is dropped. register_hdu = _BaseHDU.register_hdu unregister_hdu = _BaseHDU.unregister_hdu
_BaseHDU
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/partial.py
{ "start": 3269, "end": 3564 }
class ____: @PartialDecorator def __init__(self, x: str, y: str) -> None: self.x = x self.y = y def dunder_call_partial_constructor(x: str, y: str) -> C: # pyre-ignore: Type[PartialConstructor] is not a function. return PartialConstructor(x, y)
PartialConstructor
python
pypa__setuptools
setuptools/discovery.py
{ "start": 8603, "end": 21190 }
class ____: """Fill-in metadata and options that can be automatically derived (from other metadata/options, the file system or conventions) """ def __init__(self, distribution: Distribution) -> None: self.dist = distribution self._called = False self._disabled = False self._skip_ext_modules = False def _disable(self): """Internal API to disable automatic discovery""" self._disabled = True def _ignore_ext_modules(self): """Internal API to disregard ext_modules. Normally auto-discovery would not be triggered if ``ext_modules`` are set (this is done for backward compatibility with existing packages relying on ``setup.py`` or ``setup.cfg``). However, ``setuptools`` can call this function to ignore given ``ext_modules`` and proceed with the auto-discovery if ``packages`` and ``py_modules`` are not given (e.g. when using pyproject.toml metadata). """ self._skip_ext_modules = True @property def _root_dir(self) -> StrPath: # The best is to wait until `src_root` is set in dist, before using _root_dir. return self.dist.src_root or os.curdir @property def _package_dir(self) -> dict[str, str]: if self.dist.package_dir is None: return {} return self.dist.package_dir def __call__( self, force: bool = False, name: bool = True, ignore_ext_modules: bool = False ) -> None: """Automatically discover missing configuration fields and modifies the given ``distribution`` object in-place. Note that by default this will only have an effect the first time the ``ConfigDiscovery`` object is called. To repeatedly invoke automatic discovery (e.g. when the project directory changes), please use ``force=True`` (or create a new ``ConfigDiscovery`` instance). """ if force is False and (self._called or self._disabled): # Avoid overhead of multiple calls return self._analyse_package_layout(ignore_ext_modules) if name: self.analyse_name() # depends on ``packages`` and ``py_modules`` self._called = True def _explicitly_specified(self, ignore_ext_modules: bool) -> bool: """``True`` if the user has specified some form of package/module listing""" ignore_ext_modules = ignore_ext_modules or self._skip_ext_modules ext_modules = not (self.dist.ext_modules is None or ignore_ext_modules) return ( self.dist.packages is not None or self.dist.py_modules is not None or ext_modules or hasattr(self.dist, "configuration") and self.dist.configuration # ^ Some projects use numpy.distutils.misc_util.Configuration ) def _analyse_package_layout(self, ignore_ext_modules: bool) -> bool: if self._explicitly_specified(ignore_ext_modules): # For backward compatibility, just try to find modules/packages # when nothing is given return True log.debug( "No `packages` or `py_modules` configuration, performing " "automatic discovery." ) return ( self._analyse_explicit_layout() or self._analyse_src_layout() # flat-layout is the trickiest for discovery so it should be last or self._analyse_flat_layout() ) def _analyse_explicit_layout(self) -> bool: """The user can explicitly give a package layout via ``package_dir``""" package_dir = self._package_dir.copy() # don't modify directly package_dir.pop("", None) # This falls under the "src-layout" umbrella root_dir = self._root_dir if not package_dir: return False log.debug(f"`explicit-layout` detected -- analysing {package_dir}") pkgs = chain_iter( _find_packages_within(pkg, os.path.join(root_dir, parent_dir)) for pkg, parent_dir in package_dir.items() ) self.dist.packages = list(pkgs) log.debug(f"discovered packages -- {self.dist.packages}") return True def _analyse_src_layout(self) -> bool: """Try to find all packages or modules under the ``src`` directory (or anything pointed by ``package_dir[""]``). The "src-layout" is relatively safe for automatic discovery. We assume that everything within is meant to be included in the distribution. If ``package_dir[""]`` is not given, but the ``src`` directory exists, this function will set ``package_dir[""] = "src"``. """ package_dir = self._package_dir src_dir = os.path.join(self._root_dir, package_dir.get("", "src")) if not os.path.isdir(src_dir): return False log.debug(f"`src-layout` detected -- analysing {src_dir}") package_dir.setdefault("", os.path.basename(src_dir)) self.dist.package_dir = package_dir # persist eventual modifications self.dist.packages = PEP420PackageFinder.find(src_dir) self.dist.py_modules = ModuleFinder.find(src_dir) log.debug(f"discovered packages -- {self.dist.packages}") log.debug(f"discovered py_modules -- {self.dist.py_modules}") return True def _analyse_flat_layout(self) -> bool: """Try to find all packages and modules under the project root. Since the ``flat-layout`` is more dangerous in terms of accidentally including extra files/directories, this function is more conservative and will raise an error if multiple packages or modules are found. This assumes that multi-package dists are uncommon and refuse to support that use case in order to be able to prevent unintended errors. """ log.debug(f"`flat-layout` detected -- analysing {self._root_dir}") return self._analyse_flat_packages() or self._analyse_flat_modules() def _analyse_flat_packages(self) -> bool: self.dist.packages = FlatLayoutPackageFinder.find(self._root_dir) top_level = remove_nested_packages(remove_stubs(self.dist.packages)) log.debug(f"discovered packages -- {self.dist.packages}") self._ensure_no_accidental_inclusion(top_level, "packages") return bool(top_level) def _analyse_flat_modules(self) -> bool: self.dist.py_modules = FlatLayoutModuleFinder.find(self._root_dir) log.debug(f"discovered py_modules -- {self.dist.py_modules}") self._ensure_no_accidental_inclusion(self.dist.py_modules, "modules") return bool(self.dist.py_modules) def _ensure_no_accidental_inclusion(self, detected: list[str], kind: str): if len(detected) > 1: from inspect import cleandoc from setuptools.errors import PackageDiscoveryError msg = f"""Multiple top-level {kind} discovered in a flat-layout: {detected}. To avoid accidental inclusion of unwanted files or directories, setuptools will not proceed with this build. If you are trying to create a single distribution with multiple {kind} on purpose, you should not rely on automatic discovery. Instead, consider the following options: 1. set up custom discovery (`find` directive with `include` or `exclude`) 2. use a `src-layout` 3. explicitly set `py_modules` or `packages` with a list of names To find more information, look for "package discovery" on setuptools docs. """ raise PackageDiscoveryError(cleandoc(msg)) def analyse_name(self) -> None: """The packages/modules are the essential contribution of the author. Therefore the name of the distribution can be derived from them. """ if self.dist.metadata.name or self.dist.name: # get_name() is not reliable (can return "UNKNOWN") return log.debug("No `name` configuration, performing automatic discovery") name = ( self._find_name_single_package_or_module() or self._find_name_from_packages() ) if name: self.dist.metadata.name = name def _find_name_single_package_or_module(self) -> str | None: """Exactly one module or package""" for field in ('packages', 'py_modules'): items = getattr(self.dist, field, None) or [] if items and len(items) == 1: log.debug(f"Single module/package detected, name: {items[0]}") return items[0] return None def _find_name_from_packages(self) -> str | None: """Try to find the root package that is not a PEP 420 namespace""" if not self.dist.packages: return None packages = remove_stubs(sorted(self.dist.packages, key=len)) package_dir = self.dist.package_dir or {} parent_pkg = find_parent_package(packages, package_dir, self._root_dir) if parent_pkg: log.debug(f"Common parent package detected, name: {parent_pkg}") return parent_pkg log.warn("No parent package detected, impossible to derive `name`") return None def remove_nested_packages(packages: list[str]) -> list[str]: """Remove nested packages from a list of packages. >>> remove_nested_packages(["a", "a.b1", "a.b2", "a.b1.c1"]) ['a'] >>> remove_nested_packages(["a", "b", "c.d", "c.d.e.f", "g.h", "a.a1"]) ['a', 'b', 'c.d', 'g.h'] """ pkgs = sorted(packages, key=len) top_level = pkgs[:] size = len(pkgs) for i, name in enumerate(reversed(pkgs)): if any(name.startswith(f"{other}.") for other in top_level): top_level.pop(size - i - 1) return top_level def remove_stubs(packages: list[str]) -> list[str]: """Remove type stubs (:pep:`561`) from a list of packages. >>> remove_stubs(["a", "a.b", "a-stubs", "a-stubs.b.c", "b", "c-stubs"]) ['a', 'a.b', 'b'] """ return [pkg for pkg in packages if not pkg.split(".")[0].endswith("-stubs")] def find_parent_package( packages: list[str], package_dir: Mapping[str, str], root_dir: StrPath ) -> str | None: """Find the parent package that is not a namespace.""" packages = sorted(packages, key=len) common_ancestors = [] for i, name in enumerate(packages): if not all(n.startswith(f"{name}.") for n in packages[i + 1 :]): # Since packages are sorted by length, this condition is able # to find a list of all common ancestors. # When there is divergence (e.g. multiple root packages) # the list will be empty break common_ancestors.append(name) for name in common_ancestors: pkg_path = find_package_path(name, package_dir, root_dir) init = os.path.join(pkg_path, "__init__.py") if os.path.isfile(init): return name return None def find_package_path( name: str, package_dir: Mapping[str, str], root_dir: StrPath ) -> str: """Given a package name, return the path where it should be found on disk, considering the ``package_dir`` option. >>> path = find_package_path("my.pkg", {"": "root/is/nested"}, ".") >>> path.replace(os.sep, "/") './root/is/nested/my/pkg' >>> path = find_package_path("my.pkg", {"my": "root/is/nested"}, ".") >>> path.replace(os.sep, "/") './root/is/nested/pkg' >>> path = find_package_path("my.pkg", {"my.pkg": "root/is/nested"}, ".") >>> path.replace(os.sep, "/") './root/is/nested' >>> path = find_package_path("other.pkg", {"my.pkg": "root/is/nested"}, ".") >>> path.replace(os.sep, "/") './other/pkg' """ parts = name.split(".") for i in range(len(parts), 0, -1): # Look backwards, the most specific package_dir first partial_name = ".".join(parts[:i]) if partial_name in package_dir: parent = package_dir[partial_name] return os.path.join(root_dir, parent, *parts[i:]) parent = package_dir.get("") or "" return os.path.join(root_dir, *parent.split("/"), *parts) def construct_package_dir(packages: list[str], package_path: StrPath) -> dict[str, str]: parent_pkgs = remove_nested_packages(packages) prefix = Path(package_path).parts return {pkg: "/".join([*prefix, *pkg.split(".")]) for pkg in parent_pkgs}
ConfigDiscovery
python
falconry__falcon
tests/test_wsgi.py
{ "start": 443, "end": 5761 }
class ____: def test_get(self, requests_lite, server_base_url): resp = requests_lite.get(server_base_url) assert resp.status_code == 200 assert resp.text == '127.0.0.1' def test_get_file(self, requests_lite, server_base_url): # NOTE(vytas): There was a breaking change in the behaviour of # ntpath.isabs() in CPython 3.13, let us verify basic file serving. resp = requests_lite.get(server_base_url + 'tests/test_wsgi.py') assert resp.status_code == 200 assert 'class TestWSGIServer:' in resp.text def test_put(self, requests_lite, server_base_url): body = '{}' resp = requests_lite.put(server_base_url, data=body) assert resp.status_code == 200 assert resp.text == '{}' def test_head_405(self, requests_lite, server_base_url): body = '{}' resp = requests_lite.head(server_base_url, data=body) assert resp.status_code == 405 def test_post(self, requests_lite, server_base_url): body = testing.rand_string(_SIZE_1_KB // 2, _SIZE_1_KB) resp = requests_lite.post(server_base_url, data=body) assert resp.status_code == 200 assert resp.text == body def test_post_invalid_content_length(self, requests_lite, server_base_url): headers = {'Content-Length': 'invalid'} resp = requests_lite.post(server_base_url, headers=headers) assert resp.status_code == 400 def test_post_read_bounded_stream(self, requests_lite, server_base_url): body = testing.rand_string(_SIZE_1_KB // 2, _SIZE_1_KB) resp = requests_lite.post(server_base_url + 'bucket', data=body) assert resp.status_code == 200 assert resp.text == body def test_post_read_bounded_stream_no_body(self, requests_lite, server_base_url): resp = requests_lite.post(server_base_url + 'bucket') assert not resp.text def _run_server(stop_event, host, port): class Things: def on_get(self, req, resp): resp.text = req.remote_addr def on_post(self, req, resp): # NOTE(kgriffs): Elsewhere we just use req.bounded_stream, so # here we read the stream directly to test that use case. resp.text = req.stream.read(req.content_length or 0) def on_put(self, req, resp): # NOTE(kgriffs): Test that reading past the end does # not hang. req_body = ( req.bounded_stream.read(1) for i in range(req.content_length + 1) ) resp.text = b''.join(req_body) class Bucket: def on_post(self, req, resp): # NOTE(kgriffs): This would normally block when # Content-Length is 0 and the WSGI input object. # BoundedStream fixes that. This is just a sanity check to # make sure req.bounded_stream is what we think it is; # BoundedStream itself has its own unit tests in # test_request_body.py resp.text = req.bounded_stream.read() # NOTE(kgriffs): No need to also test the same read() for # req.stream, since we already asserted they are the same # objects. api = application = falcon.App() api.add_route('/', Things()) api.add_route('/bucket', Bucket()) api.add_static_route('/tests', _HERE) print(f'wsgiref server is starting on {host}:{port}...') server = wsgiref.simple_server.make_server(host, port, application) while not stop_event.is_set(): server.handle_request() print('wsgiref server is exiting (stop event set)...') def _start_server(port, base_url, requests_lite): stop_event = multiprocessing.Event() process = multiprocessing.Process( target=_run_server, # NOTE(kgriffs): Pass these explicitly since if multiprocessing is # using the 'spawn' start method, we can't depend on closures. args=(stop_event, _SERVER_HOST, port), daemon=True, ) process.start() # NOTE(vytas): Give the server some time to start. start_time = time.time() while time.time() - start_time < _STARTUP_TIMEOUT: try: requests_lite.get(base_url, timeout=1.0) except OSError: time.sleep(0.2) else: break else: if process.is_alive(): pytest.fail('server {base_url} is not responding to requests') else: return None return process, stop_event @pytest.fixture(scope='module') def server_base_url(requests_lite): for attempt in range(_START_ATTEMPTS): server_port = testing.get_unused_port() base_url = f'http://{_SERVER_HOST}:{server_port}/' if server_details := _start_server(server_port, base_url, requests_lite): break else: pytest.fail(f'could not start a wsgiref server in {_START_ATTEMPTS} attempts.') yield base_url process, stop_event = server_details stop_event.set() # NOTE(kgriffs): Pump the request handler loop in case execution # made it to the next server.handle_request() before we sent the # event. try: requests_lite.get(base_url, timeout=1.0) except OSError: pass # Process already exited process.join()
TestWSGIServer
python
PyCQA__pylint
pylint/checkers/typecheck.py
{ "start": 2495, "end": 29681 }
class ____: pass VERSION_COMPATIBLE_OVERLOAD_SENTINEL = VERSION_COMPATIBLE_OVERLOAD() def _is_owner_ignored( owner: SuccessfulInferenceResult, attrname: str | None, ignored_classes: Iterable[str], ignored_modules: Iterable[str], ) -> bool: """Check if the given owner should be ignored. This will verify if the owner's module is in *ignored_modules* or the owner's module fully qualified name is in *ignored_modules* or if the *ignored_modules* contains a pattern which catches the fully qualified name of the module. Also, similar checks are done for the owner itself, if its name matches any name from the *ignored_classes* or if its qualified name can be found in *ignored_classes*. """ if is_module_ignored(owner.root().qname(), ignored_modules): return True # Match against ignored classes. ignored_classes = set(ignored_classes) qname = owner.qname() if hasattr(owner, "qname") else "" return any(ignore in (attrname, qname) for ignore in ignored_classes) @singledispatch def _node_names(node: SuccessfulInferenceResult) -> Iterable[str]: if not hasattr(node, "locals"): return [] return node.locals.keys() # type: ignore[no-any-return] @_node_names.register(nodes.ClassDef) @_node_names.register(astroid.Instance) def _(node: nodes.ClassDef | bases.Instance) -> Iterable[str]: values = itertools.chain(node.instance_attrs.keys(), node.locals.keys()) try: mro = node.mro()[1:] except (NotImplementedError, TypeError, astroid.MroError): mro = node.ancestors() other_values = [value for cls in mro for value in _node_names(cls)] return itertools.chain(values, other_values) def _string_distance(seq1: str, seq2: str, seq1_length: int, seq2_length: int) -> int: if not seq1_length: return seq2_length if not seq2_length: return seq1_length row = [*list(range(1, seq2_length + 1)), 0] for seq1_index, seq1_char in enumerate(seq1): last_row = row row = [0] * seq2_length + [seq1_index + 1] for seq2_index, seq2_char in enumerate(seq2): row[seq2_index] = min( last_row[seq2_index] + 1, row[seq2_index - 1] + 1, last_row[seq2_index - 1] + (seq1_char != seq2_char), ) return row[seq2_length - 1] @lru_cache(maxsize=256) def _similar_names( owner: SuccessfulInferenceResult, attrname: str | None, distance_threshold: int, max_choices: int, ) -> list[str]: """Given an owner and a name, try to find similar names. The similar names are searched given a distance metric and only a given number of choices will be returned. """ possible_names: list[tuple[str, int]] = [] names = _node_names(owner) attr_str = attrname or "" attr_len = len(attr_str) for name in names: if name == attrname: continue name_len = len(name) min_distance = abs(attr_len - name_len) if min_distance > distance_threshold: continue distance = _string_distance(attr_str, name, attr_len, name_len) if distance <= distance_threshold: possible_names.append((name, distance)) # Now get back the values with a minimum, up to the given # limit or choices. picked = [ name for (name, _) in heapq.nsmallest( max_choices, possible_names, key=operator.itemgetter(1) ) ] return sorted(picked) MSGS: dict[str, MessageDefinitionTuple] = { "E1101": ( "%s %r has no %r member%s", "no-member", "Used when a variable is accessed for a nonexistent member.", {"old_names": [("E1103", "maybe-no-member")]}, ), "I1101": ( "%s %r has no %r member%s, but source is unavailable. Consider " "adding this module to extension-pkg-allow-list if you want " "to perform analysis based on run-time introspection of living objects.", "c-extension-no-member", "Used when a variable is accessed for non-existent member of C " "extension. Due to unavailability of source static analysis is impossible, " "but it may be performed by introspecting living objects in run-time.", ), "E1102": ( "%s is not callable", "not-callable", "Used when an object being called has been inferred to a non " "callable object.", ), "E1111": ( "Assigning result of a function call, where the function has no return", "assignment-from-no-return", "Used when an assignment is done on a function call but the " "inferred function doesn't return anything.", ), "E1120": ( "No value for argument %s in %s call", "no-value-for-parameter", "Used when a function call passes too few arguments.", ), "E1121": ( "Too many positional arguments for %s call", "too-many-function-args", "Used when a function call passes too many positional arguments.", ), "E1123": ( "Unexpected keyword argument %r in %s call", "unexpected-keyword-arg", "Used when a function call passes a keyword argument that " "doesn't correspond to one of the function's parameter names.", ), "E1124": ( "Argument %r passed by position and keyword in %s call", "redundant-keyword-arg", "Used when a function call would result in assigning multiple " "values to a function parameter, one value from a positional " "argument and one from a keyword argument.", ), "E1125": ( "Missing mandatory keyword argument %r in %s call", "missing-kwoa", ( "Used when a function call does not pass a mandatory" " keyword-only argument." ), ), "E1126": ( "Sequence index is not an int, slice, or instance with __index__", "invalid-sequence-index", "Used when a sequence type is indexed with an invalid type. " "Valid types are ints, slices, and objects with an __index__ " "method.", ), "E1127": ( "Slice index is not an int, None, or instance with __index__", "invalid-slice-index", "Used when a slice index is not an integer, None, or an object " "with an __index__ method.", ), "E1128": ( "Assigning result of a function call, where the function returns None", "assignment-from-none", "Used when an assignment is done on a function call but the " "inferred function returns nothing but None.", {"old_names": [("W1111", "old-assignment-from-none")]}, ), "E1129": ( "Context manager '%s' doesn't implement __enter__ and __exit__.", "not-context-manager", "Used when an instance in a with statement doesn't implement " "the context manager protocol(__enter__/__exit__).", ), "E1145": ( "Context manager '%s' is async and should be used with 'async with'.", "async-context-manager-with-regular-with", "Used when an async context manager is used with a regular 'with' statement " "instead of 'async with'.", ), "E1130": ( "%s", "invalid-unary-operand-type", "Emitted when a unary operand is used on an object which does not " "support this type of operation.", ), "E1131": ( "%s", "unsupported-binary-operation", "Emitted when a binary arithmetic operation between two " "operands is not supported.", ), "E1132": ( "Got multiple values for keyword argument %r in function call", "repeated-keyword", "Emitted when a function call got multiple values for a keyword.", ), "E1135": ( "Value '%s' doesn't support membership test", "unsupported-membership-test", "Emitted when an instance in membership test expression doesn't " "implement membership protocol (__contains__/__iter__/__getitem__).", ), "E1136": ( "Value '%s' is unsubscriptable", "unsubscriptable-object", "Emitted when a subscripted value doesn't support subscription " "(i.e. doesn't define __getitem__ method or __class_getitem__ for a class).", ), "E1137": ( "%r does not support item assignment", "unsupported-assignment-operation", "Emitted when an object does not support item assignment " "(i.e. doesn't define __setitem__ method).", ), "E1138": ( "%r does not support item deletion", "unsupported-delete-operation", "Emitted when an object does not support item deletion " "(i.e. doesn't define __delitem__ method).", ), "E1139": ( "Invalid metaclass %r used", "invalid-metaclass", "Emitted whenever we can detect that a class is using, " "as a metaclass, something which might be invalid for using as " "a metaclass.", ), "E1141": ( "Unpacking a dictionary in iteration without calling .items()", "dict-iter-missing-items", "Emitted when trying to iterate through a dict without calling .items()", ), "E1142": ( "'await' should be used within an async function", "await-outside-async", "Emitted when await is used outside an async function.", ), "E1143": ( "'%s' is unhashable and can't be used as a %s in a %s", "unhashable-member", "Emitted when a dict key or set member is not hashable " "(i.e. doesn't define __hash__ method).", {"old_names": [("E1140", "unhashable-dict-key")]}, ), "E1144": ( "Slice step cannot be 0", "invalid-slice-step", "Used when a slice step is 0 and the object doesn't implement " "a custom __getitem__ method.", ), "W1113": ( "Keyword argument before variable positional arguments list " "in the definition of %s function", "keyword-arg-before-vararg", "When defining a keyword argument before variable positional arguments, one can " "end up in having multiple values passed for the aforementioned parameter in " "case the method is called with keyword arguments.", ), "W1114": ( "Positional arguments appear to be out of order", "arguments-out-of-order", "Emitted when the caller's argument names fully match the parameter " "names in the function signature but do not have the same order.", ), "W1115": ( "Non-string value assigned to __name__", "non-str-assignment-to-dunder-name", "Emitted when a non-string value is assigned to __name__", ), "W1116": ( "Second argument of isinstance is not a type", "isinstance-second-argument-not-valid-type", "Emitted when the second argument of an isinstance call is not a type.", ), "W1117": ( "%r will be included in %r since a positional-only parameter with this name already exists", "kwarg-superseded-by-positional-arg", "Emitted when a function is called with a keyword argument that has the " "same name as a positional-only parameter and the function contains a " "keyword variadic parameter dict.", ), } # builtin sequence types in Python 2 and 3. SEQUENCE_TYPES = { "str", "unicode", "list", "tuple", "bytearray", "xrange", "range", "bytes", "memoryview", } def _emit_no_member( node: nodes.Attribute | nodes.AssignAttr | nodes.DelAttr, owner: InferenceResult, owner_name: str | None, mixin_class_rgx: Pattern[str], ignored_mixins: bool = True, ignored_none: bool = True, ) -> bool: """Try to see if no-member should be emitted for the given owner. The following cases are ignored: * the owner is a function and it has decorators. * the owner is an instance and it has __getattr__, __getattribute__ implemented * the module is explicitly ignored from no-member checks * the owner is a class and the name can be found in its metaclass. * The access node is protected by an except handler, which handles AttributeError, Exception or bare except. * The node is guarded behind and `IF` or `IFExp` node """ # pylint: disable = too-many-return-statements, too-many-branches if node_ignores_exception(node, AttributeError): return False if ignored_none and isinstance(owner, nodes.Const) and owner.value is None: return False if is_super(owner) or getattr(owner, "type", None) == "metaclass": return False if owner_name and ignored_mixins and mixin_class_rgx.match(owner_name): return False if isinstance(owner, nodes.FunctionDef) and ( owner.decorators or owner.is_abstract() ): return False if isinstance(owner, (astroid.Instance, nodes.ClassDef)): # Issue #2565: Don't ignore enums, as they have a `__getattr__` but it's not # invoked at this point. try: metaclass = owner.metaclass() except astroid.MroError: pass else: # Renamed in Python 3.10 to `EnumType` if metaclass and metaclass.qname() in {"enum.EnumMeta", "enum.EnumType"}: return not _enum_has_attribute(owner, node) if owner.has_dynamic_getattr(): return False if not has_known_bases(owner): return False # Exclude typed annotations, since these might actually exist # at some point during the runtime of the program. if utils.is_attribute_typed_annotation(owner, node.attrname): return False if isinstance(owner, objects.Super): # Verify if we are dealing with an invalid Super object. # If it is invalid, then there's no point in checking that # it has the required attribute. Also, don't fail if the # MRO is invalid. try: owner.super_mro() except (astroid.MroError, astroid.SuperError): return False if not all(has_known_bases(base) for base in owner.type.mro()): return False if isinstance(owner, nodes.Module): try: owner.getattr("__getattr__") return False except astroid.NotFoundError: pass if owner_name and node.attrname.startswith("_" + owner_name): # Test if an attribute has been mangled ('private' attribute) unmangled_name = node.attrname.split("_" + owner_name)[-1] try: if owner.getattr(unmangled_name, context=None) is not None: return False except astroid.NotFoundError: return True # Don't emit no-member if guarded behind `IF` or `IFExp` # * Walk up recursively until if statement is found. # * Check if condition can be inferred as `Const`, # would evaluate as `False`, # and whether the node is part of the `body`. # * Continue checking until scope of node is reached. scope: nodes.NodeNG = node.scope() node_origin: nodes.NodeNG = node parent: nodes.NodeNG = node.parent while parent != scope: if isinstance(parent, (nodes.If, nodes.IfExp)): inferred = safe_infer(parent.test) if ( # pylint: disable=too-many-boolean-expressions isinstance(inferred, nodes.Const) and inferred.bool_value() is False and ( (isinstance(parent, nodes.If) and node_origin in parent.body) or (isinstance(parent, nodes.IfExp) and node_origin == parent.body) ) ): return False node_origin, parent = parent, parent.parent return True def _get_all_attribute_assignments( node: nodes.FunctionDef, name: str | None = None ) -> set[str]: attributes: set[str] = set() for child in node.nodes_of_class((nodes.Assign, nodes.AnnAssign)): targets = [] match child: case nodes.Assign(): targets = child.targets case nodes.AnnAssign(): targets = [child.target] for assign_target in targets: match assign_target: case nodes.Tuple(): targets.extend(assign_target.elts) continue case nodes.AssignAttr(expr=nodes.Name(name=n)) if ( n is None or n == name ): attributes.add(assign_target.attrname) return attributes def _enum_has_attribute( owner: astroid.Instance | nodes.ClassDef, node: nodes.Attribute ) -> bool: if isinstance(owner, astroid.Instance): enum_def = next( (b.parent for b in owner.bases if isinstance(b.parent, nodes.ClassDef)), None, ) if enum_def is None: # We don't inherit from anything, so try to find the parent # class definition and roll with that enum_def = node while enum_def is not None and not isinstance(enum_def, nodes.ClassDef): enum_def = enum_def.parent # If this blows, something is clearly wrong assert enum_def is not None, "enum_def unexpectedly None" else: enum_def = owner # Find __new__ and __init__ dunder_new = next((m for m in enum_def.methods() if m.name == "__new__"), None) dunder_init = next((m for m in enum_def.methods() if m.name == "__init__"), None) enum_attributes: set[str] = set() # Find attributes defined in __new__ if dunder_new: # Get the object returned in __new__ returned_obj_name = next( (c.value for c in dunder_new.get_children() if isinstance(c, nodes.Return)), None, ) if isinstance(returned_obj_name, nodes.Name): # Find all attribute assignments to the returned object enum_attributes |= _get_all_attribute_assignments( dunder_new, returned_obj_name.name ) # Find attributes defined in __init__ if dunder_init and dunder_init.body and dunder_init.args: # Grab the name referring to `self` from the function def enum_attributes |= _get_all_attribute_assignments( dunder_init, dunder_init.args.arguments[0].name ) return node.attrname in enum_attributes def _determine_callable( callable_obj: nodes.NodeNG, ) -> tuple[CallableObjects, int, str]: # TODO: The typing of the second return variable is actually Literal[0,1] # We need typing on nodes.NodeNG.implicit_parameters for this # TODO: The typing of the third return variable can be narrowed to a Literal # We need typing on nodes.NodeNG.type for this # Ordering is important, since BoundMethod is a subclass of UnboundMethod, # and Function inherits Lambda. parameters = 0 if hasattr(callable_obj, "implicit_parameters"): parameters = callable_obj.implicit_parameters() match callable_obj: case bases.BoundMethod(): # Bound methods have an extra implicit 'self' argument. return callable_obj, parameters, callable_obj.type case bases.UnboundMethod(): return callable_obj, parameters, "unbound method" case nodes.FunctionDef(): return callable_obj, parameters, callable_obj.type case nodes.Lambda(): return callable_obj, parameters, "lambda" case nodes.ClassDef(): # Class instantiation, lookup __new__ instead. # If we only find object.__new__, we can safely check __init__ # instead. If __new__ belongs to builtins, then we look # again for __init__ in the locals, since we won't have # argument information for the builtin __new__ function. try: # Use the last definition of __new__. new = callable_obj.local_attr("__new__")[-1] except astroid.NotFoundError: new = None from_object = new and new.parent.scope().name == "object" from_builtins = new and new.root().name in sys.builtin_module_names if not new or from_object or from_builtins: try: # Use the last definition of __init__. callable_obj = callable_obj.local_attr("__init__")[-1] except astroid.NotFoundError as e: raise ValueError from e else: callable_obj = new if not isinstance(callable_obj, nodes.FunctionDef): raise ValueError # both have an extra implicit 'cls'/'self' argument. return callable_obj, parameters, "constructor" raise ValueError def _has_parent_of_type( node: nodes.Call, node_type: nodes.Keyword | nodes.Starred, statement: _base_nodes.Statement, ) -> bool: """Check if the given node has a parent of the given type.""" parent = node.parent while not isinstance(parent, node_type) and statement.parent_of(parent): parent = parent.parent return isinstance(parent, node_type) def _no_context_variadic_keywords(node: nodes.Call, scope: nodes.Lambda) -> bool: statement = node.statement() variadics = [] if ( isinstance(scope, nodes.Lambda) and not isinstance(scope, nodes.FunctionDef) ) or isinstance(statement, nodes.With): variadics = list(node.keywords or []) + node.kwargs elif isinstance(statement, (nodes.Return, nodes.Expr, nodes.Assign)) and isinstance( statement.value, nodes.Call ): call = statement.value variadics = list(call.keywords or []) + call.kwargs return _no_context_variadic(node, scope.args.kwarg, nodes.Keyword, variadics) def _no_context_variadic_positional(node: nodes.Call, scope: nodes.Lambda) -> bool: variadics = node.starargs + node.kwargs return _no_context_variadic(node, scope.args.vararg, nodes.Starred, variadics) def _no_context_variadic( node: nodes.Call, variadic_name: str | None, variadic_type: nodes.Keyword | nodes.Starred, variadics: list[nodes.Keyword | nodes.Starred], ) -> bool: """Verify if the given call node has variadic nodes without context. This is a workaround for handling cases of nested call functions which don't have the specific call context at hand. Variadic arguments (variable positional arguments and variable keyword arguments) are inferred, inherently wrong, by astroid as a Tuple, respectively a Dict with empty elements. This can lead pylint to believe that a function call receives too few arguments. """ scope = node.scope() is_in_lambda_scope = not isinstance(scope, nodes.FunctionDef) and isinstance( scope, nodes.Lambda ) statement = node.statement() for name in statement.nodes_of_class(nodes.Name): if name.name != variadic_name: continue inferred = safe_infer(name) if isinstance(inferred, (nodes.List, nodes.Tuple)): length = len(inferred.elts) elif isinstance(inferred, nodes.Dict): length = len(inferred.items) else: continue if is_in_lambda_scope and isinstance(inferred.parent, nodes.Arguments): # The statement of the variadic will be the assignment itself, # so we need to go the lambda instead inferred_statement = inferred.parent.parent else: inferred_statement = inferred.statement() if not length and isinstance( inferred_statement, (nodes.Lambda, nodes.FunctionDef) ): is_in_starred_context = _has_parent_of_type(node, variadic_type, statement) used_as_starred_argument = any( variadic.value == name or variadic.value.parent_of(name) for variadic in variadics ) if is_in_starred_context or used_as_starred_argument: return True return False def _is_invalid_metaclass(metaclass: nodes.ClassDef) -> bool: try: mro = metaclass.mro() except (astroid.DuplicateBasesError, astroid.InconsistentMroError): return True return not any(is_builtin_object(cls) and cls.name == "type" for cls in mro) def _infer_from_metaclass_constructor( cls: nodes.ClassDef, func: nodes.FunctionDef ) -> InferenceResult | None: """Try to infer what the given *func* constructor is building. :param nodes.FunctionDef func: A metaclass constructor. Metaclass definitions can be functions, which should accept three arguments, the name of the class, the bases of the class and the attributes. The function could return anything, but usually it should be a proper metaclass. :param nodes.ClassDef cls: The class for which the *func* parameter should generate a metaclass. :returns: The class generated by the function or None, if we couldn't infer it. :rtype: nodes.ClassDef """ context = astroid.context.InferenceContext() class_bases = nodes.List() class_bases.postinit(elts=cls.bases) attrs = nodes.Dict( lineno=0, col_offset=0, parent=None, end_lineno=0, end_col_offset=0 ) local_names = [(name, values[-1]) for name, values in cls.locals.items()] attrs.postinit(local_names) builder_args = nodes.Tuple() builder_args.postinit([cls.name, class_bases, attrs]) context.callcontext = astroid.context.CallContext(builder_args) try: inferred = next(func.infer_call_result(func, context), None) except astroid.InferenceError: return None return inferred or None def _is_c_extension(module_node: InferenceResult) -> bool: return ( isinstance(module_node, nodes.Module) and not astroid.modutils.is_stdlib_module(module_node.name) and not module_node.fully_defined() ) def _is_invalid_isinstance_type(arg: nodes.NodeNG) -> bool: # Return True if we are sure that arg is not a type if isinstance(arg, nodes.BinOp) and arg.op == "|": return any( _is_invalid_isinstance_type(elt) and not is_none(elt) for elt in (arg.left, arg.right) ) match inferred := utils.safe_infer(arg): case _ if not inferred: # Cannot infer it so skip it. return False case nodes.Tuple(): return any(_is_invalid_isinstance_type(elt) for elt in inferred.elts) case nodes.ClassDef(): return False case astroid.Instance() if inferred.qname() == BUILTIN_TUPLE: return False case bases.UnionType(): return any( _is_invalid_isinstance_type(elt) and not is_none(elt) for elt in (inferred.left, inferred.right) ) return True
VERSION_COMPATIBLE_OVERLOAD
python
more-itertools__more-itertools
tests/test_recipes.py
{ "start": 46070, "end": 46234 }
class ____(TestCase): def test_basic(self): self.assertTrue( all(list(mi.loops(n)) == [None] * n for n in range(-10, 10)) )
LoopsTests
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/constructorCallable2.py
{ "start": 2562, "end": 2847 }
class ____(Generic[T]): def __new__(cls, x: T, y: list[T]) -> Self: return super().__new__(cls) r8 = accepts_callable(Class8) reveal_type(r8, expected_text="(x: T@Class8, y: list[T@Class8]) -> Class8[T@Class8]") reveal_type(r8("", [""]), expected_text="Class8[str]")
Class8
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_B.py
{ "start": 12515, "end": 13716 }
class ____(Benchmark): r""" BoxBetts objective function. The BoxBetts global optimization problem is a multimodal minimization problem defined as follows .. math:: f_{\text{BoxBetts}}(x) = \sum_{i=1}^k g(x_i)^2 Where, in this exercise: .. math:: g(x) = e^{-0.1i x_1} - e^{-0.1i x_2} - x_3\left[e^{-0.1i} - e^{-i}\right] And :math:`k = 10`. Here, :math:`x_1 \in [0.9, 1.2], x_2 \in [9, 11.2], x_3 \in [0.9, 1.2]`. *Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10, 1]` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=3): Benchmark.__init__(self, dimensions) self._bounds = ([0.9, 1.2], [9.0, 11.2], [0.9, 1.2]) self.global_optimum = [[1.0, 10.0, 1.0]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 i = arange(1, 11) g = (exp(-0.1 * i * x[0]) - exp(-0.1 * i * x[1]) - (exp(-0.1 * i) - exp(-i)) * x[2]) return sum(g**2)
BoxBetts
python
graphql-python__graphene
graphene/relay/tests/test_mutation.py
{ "start": 247, "end": 291 }
class ____: shared = String()
SharedFields
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_us_county_name.py
{ "start": 1789, "end": 4450 }
class ____(ColumnMapExpectation): """Expect values in this column to be valid us county names. See https://github.com/yaph/geonamescache for more information. """ # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "valid_county_names": [ "Baldwin County", "Bibb County", "Monroe County", "Orange County", "Converse County", ], "invalid_county_names": [ "", "1234", "anarchy", "failing test", "Silly Country", ], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "valid_county_names"}, "out": {"success": True}, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "invalid_county_names"}, "out": {"success": False}, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.valid_us_county_name" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "maturity": "experimental", # "experimental", "beta", or "production" "tags": [ "hackathon", "typed-entities", ], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@luismdiaz01", "@derekma73", # Don't forget to add your github handle here! ], "requirements": ["geonamescache"], } if __name__ == "__main__": ExpectColumnValuesToBeValidUSCountyName().print_diagnostic_checklist()
ExpectColumnValuesToBeValidUSCountyName
python
wandb__wandb
wandb/apis/public/automations.py
{ "start": 531, "end": 2095 }
class ____(RelayPaginator["ProjectTriggersFields", "Automation"]): """A lazy iterator of `Automation` objects. <!-- lazydoc-ignore-class: internal --> """ QUERY: Document # Must be set per-instance last_response: Connection[ProjectTriggersFields] | None def __init__( self, client: _Client, variables: Mapping[str, Any], per_page: int = 50, *, _query: Document, # internal use only, but required ): self.QUERY = _query super().__init__(client, variables=variables, per_page=per_page) @override def _update_response(self) -> None: """Fetch the raw response data for the current page.""" from wandb._pydantic import Connection from wandb.automations._generated import ProjectTriggersFields data = self.client.execute(self.QUERY, variable_values=self.variables) try: conn_data = data["scope"]["projects"] conn = Connection[ProjectTriggersFields].model_validate(conn_data) self.last_response = conn except (LookupError, AttributeError, ValidationError) as e: raise ValueError("Unexpected response data") from e @override def _convert(self, node: ProjectTriggersFields) -> Iterator[Automation]: from wandb.automations import Automation return (Automation.model_validate(obj) for obj in node.triggers) @override def convert_objects(self) -> Iterator[Automation]: return chain.from_iterable(super().convert_objects())
Automations
python
neetcode-gh__leetcode
python/0745-prefix-and-suffix-search.py
{ "start": 171, "end": 1522 }
class ____: def __init__(self, words: List[str]): # Initialize root of the Trie self.root = TrieNode() # For each word, we create combined prefix-suffix keys for index, word in enumerate(words): # Insert all combinations of the form prefix{suffix into the Trie for i in range(len(word) + 1): for j in range(len(word) + 1): # Create the key as suffix + '{' + prefix key = word[i:] + '{' + word[:j] cur = self.root for c in key: if c not in cur.children: cur.children[c] = TrieNode() cur = cur.children[c] cur.word = index # Store the index of the word at this node def f(self, pref: str, suff: str) -> int: # Combine suffix and prefix to search in Trie key = suff + '{' + pref cur = self.root for c in key: if c not in cur.children: return -1 # If combination doesn't exist, return -1 cur = cur.children[c] return cur.word # Return the largest index found for the valid combination # Your WordFilter object will be instantiated and called as such: # obj = WordFilter(words) # param_1 = obj.f(pref,suff)
WordFilter
python
tensorflow__tensorflow
tensorflow/python/eager/polymorphic_function/function_spec_test.py
{ "start": 1323, "end": 21697 }
class ____(test.TestCase, parameterized.TestCase): @parameterized.product( ({ 'input_signature': None, 'type_constraint': (None, None, None) }, { 'input_signature': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)) }, { 'input_signature': ([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (trace_type.from_value([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], trace_type.InternalTracingContext(is_legacy_signature=True)), tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)) }), decorator=(dummy_tf_decorator, transparent_decorator), ) def test_required_only(self, input_signature, type_constraint, decorator): @decorator def foo(x, y, z): # pylint: disable=unused-argument pass spec = function_type_utils.FunctionSpec.from_function_and_signature( foo, input_signature) self.assertEqual( tuple(spec.fullargspec), (['x', 'y', 'z'], None, None, None, [], None, {})) self.assertEqual(spec.input_signature, input_signature) self.assertEqual(spec.default_values, {}) self.assertEqual( spec.function_type, function_type_lib.FunctionType([ function_type_lib.Parameter( 'x', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, False, type_constraint[0]), function_type_lib.Parameter( 'y', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, False, type_constraint[1]), function_type_lib.Parameter( 'z', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, False, type_constraint[2]) ])) @parameterized.product( ({ 'input_signature': None, 'type_constraint': (None, None, None) }, { 'input_signature': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)) }, { 'input_signature': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None), trace_type.from_value(3)) }, { 'input_signature': (tensor_spec.TensorSpec(shape=None),), 'type_constraint': (tensor_spec.TensorSpec(shape=None), trace_type.from_value(2), trace_type.from_value(3)) }, { 'input_signature': ([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (trace_type.from_value([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], trace_type.InternalTracingContext(is_legacy_signature=True)), tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)) }), decorator=(dummy_tf_decorator, transparent_decorator), ) def test_optional_only(self, input_signature, type_constraint, decorator): @decorator def foo(x=1, y=2, z=3): # pylint: disable=unused-argument pass spec = function_type_utils.FunctionSpec.from_function_and_signature( foo, input_signature) self.assertEqual( tuple(spec.fullargspec), (['x', 'y', 'z'], None, None, (1, 2, 3), [], None, {})) self.assertEqual(spec.input_signature, input_signature) self.assertEqual(spec.default_values, {'x': 1, 'y': 2, 'z': 3}) self.assertEqual( spec.function_type, function_type_lib.FunctionType([ function_type_lib.Parameter( 'x', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, True, type_constraint[0]), function_type_lib.Parameter( 'y', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, True, type_constraint[1]), function_type_lib.Parameter( 'z', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, True, type_constraint[2]) ])) @parameterized.product( ({ 'input_signature': None, 'type_constraint': (None, None, None) }, { 'input_signature': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)) }, { 'input_signature': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None), trace_type.from_value(3)) }, { 'input_signature': ([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (trace_type.from_value([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], trace_type.InternalTracingContext(is_legacy_signature=True)), tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)) }), decorator=(dummy_tf_decorator, transparent_decorator), ) def test_required_and_optional(self, input_signature, type_constraint, decorator): @decorator def foo(x, y, z=3): # pylint: disable=unused-argument pass spec = function_type_utils.FunctionSpec.from_function_and_signature( foo, input_signature) self.assertEqual( tuple(spec.fullargspec), (['x', 'y', 'z'], None, None, (3,), [], None, {})) self.assertEqual(spec.input_signature, input_signature) self.assertEqual(spec.default_values, {'z': 3}) self.assertEqual( spec.function_type, function_type_lib.FunctionType([ function_type_lib.Parameter( 'x', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, False, type_constraint[0]), function_type_lib.Parameter( 'y', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, False, type_constraint[1]), function_type_lib.Parameter( 'z', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, True, type_constraint[2]) ])) @parameterized.product( ({ 'input_signature': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)) }, { 'input_signature': ([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (trace_type.from_value([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], trace_type.InternalTracingContext(is_legacy_signature=True)), tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)) }), decorator=(dummy_tf_decorator, transparent_decorator), ) def test_varargs(self, input_signature, type_constraint, decorator): @decorator def foo(*my_var_args): # pylint: disable=unused-argument pass spec = function_type_utils.FunctionSpec.from_function_and_signature( foo, input_signature) self.assertEqual( tuple(spec.fullargspec), (['my_var_args_0', 'my_var_args_1', 'my_var_args_2' ], None, None, None, [], None, {})) self.assertEqual(spec.input_signature, input_signature) self.assertEqual(spec.default_values, {}) self.assertEqual( spec.function_type, function_type_lib.FunctionType([ function_type_lib.Parameter( 'my_var_args_0', function_type_lib.Parameter.POSITIONAL_ONLY, False, type_constraint[0]), function_type_lib.Parameter( 'my_var_args_1', function_type_lib.Parameter.POSITIONAL_ONLY, False, type_constraint[1]), function_type_lib.Parameter( 'my_var_args_2', function_type_lib.Parameter.POSITIONAL_ONLY, False, type_constraint[2]) ])) @parameterized.product( ({ 'input_signature': None, 'type_constraint': (None, None, None) }, { 'input_signature': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)) }, { 'input_signature': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None), trace_type.from_value(3)) }, { 'input_signature': ([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (trace_type.from_value([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], trace_type.InternalTracingContext(is_legacy_signature=True)), tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)) }), decorator=(dummy_tf_decorator, transparent_decorator), ) def test_kwonly(self, input_signature, type_constraint, decorator): @decorator def foo(x, y, *, z=3): # pylint: disable=unused-argument pass spec = function_type_utils.FunctionSpec.from_function_and_signature( foo, input_signature) self.assertEqual( tuple(spec.fullargspec), (['x', 'y'], None, None, None, ['z'], { 'z': 3 }, {})) self.assertEqual(spec.input_signature, input_signature) self.assertEqual(spec.default_values, {'z': 3}) self.assertEqual( spec.function_type, function_type_lib.FunctionType([ function_type_lib.Parameter( 'x', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, False, type_constraint[0]), function_type_lib.Parameter( 'y', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, False, type_constraint[1]), function_type_lib.Parameter( 'z', function_type_lib.Parameter.KEYWORD_ONLY, True, type_constraint[2]) ])) @parameterized.product( ({ 'input_signature': None, 'type_constraint': (None, None, None) }, { 'input_signature': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (None, tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)) }, { 'input_signature': (tensor_spec.TensorSpec(shape=None),), 'type_constraint': (None, tensor_spec.TensorSpec(shape=None), trace_type.from_value(1)) }, { 'input_signature': ([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], tensor_spec.TensorSpec(shape=None)), 'type_constraint': (None, trace_type.from_value([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], trace_type.InternalTracingContext(is_legacy_signature=True)), tensor_spec.TensorSpec(shape=None)) }), decorator=(dummy_tf_decorator, transparent_decorator), ) def test_method_bound_internal( self, input_signature, type_constraint, decorator ): def testing_decorator(func): spec = function_type_utils.FunctionSpec.from_function_and_signature( func, input_signature ) self.assertEqual( tuple(spec.fullargspec), (['self', 'x', 'y'], None, None, (1,), [], None, {}), ) self.assertEqual(spec.default_values, {'y': 1}) self.assertEqual( spec.function_type, function_type_lib.FunctionType([ function_type_lib.Parameter( 'self', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, False, type_constraint[0], ), function_type_lib.Parameter( 'x', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, False, type_constraint[1], ), function_type_lib.Parameter( 'y', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, True, type_constraint[2], ), ]), ) return func class MyClass: @testing_decorator def foo(self, x, y=1): pass MyClass().foo(1) @parameterized.product( ({ 'input_signature': None, 'type_constraint': (None, None, None) }, { 'input_signature': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)) }, { 'input_signature': (tensor_spec.TensorSpec(shape=None),), 'type_constraint': (tensor_spec.TensorSpec(shape=None), trace_type.from_value(1)) }, { 'input_signature': ([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], tensor_spec.TensorSpec(shape=None)), 'type_constraint': (trace_type.from_value([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], trace_type.InternalTracingContext(is_legacy_signature=True)), tensor_spec.TensorSpec(shape=None)) }), decorator=(dummy_tf_decorator, transparent_decorator), ) def test_method_bound_external( self, input_signature, type_constraint, decorator ): class MyClass: @decorator def foo(self, x, y=1): pass spec = function_type_utils.FunctionSpec.from_function_and_signature( MyClass().foo, input_signature) self.assertEqual( tuple(spec.fullargspec), (['x', 'y'], None, None, (1,), [], None, {}), ) self.assertEqual(spec.default_values, {'y': 1}) self.assertEqual( spec.function_type, function_type_lib.FunctionType([ function_type_lib.Parameter( 'x', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, False, type_constraint[0]), function_type_lib.Parameter( 'y', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, True, type_constraint[1]) ])) @parameterized.product( ({ 'input_signature': None, 'type_constraint': (None, None, None) }, { 'input_signature': (tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)), 'type_constraint': (None, tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None)) }, { 'input_signature': (tensor_spec.TensorSpec(shape=None),), 'type_constraint': (None, tensor_spec.TensorSpec(shape=None), trace_type.from_value(1)) }, { 'input_signature': ([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], tensor_spec.TensorSpec(shape=None)), 'type_constraint': (None, trace_type.from_value([ tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None) ], trace_type.InternalTracingContext(is_legacy_signature=True)), tensor_spec.TensorSpec(shape=None)) }), decorator=(dummy_tf_decorator, transparent_decorator), ) def test_method_unbound(self, input_signature, type_constraint, decorator): class MyClass: @decorator def foo(self, x, y=1): pass spec = function_type_utils.FunctionSpec.from_function_and_signature( MyClass.foo, input_signature) self.assertEqual( tuple(spec.fullargspec), (['self', 'x', 'y'], None, None, (1,), [], None, {})) self.assertEqual(spec.input_signature, input_signature) self.assertEqual(spec.default_values, {'y': 1}) self.assertEqual( spec.function_type, function_type_lib.FunctionType([ function_type_lib.Parameter( 'self', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, False, type_constraint[0]), function_type_lib.Parameter( 'x', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, False, type_constraint[1]), function_type_lib.Parameter( 'y', function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, True, type_constraint[2]) ])) def test_spec_summary(self): input_signature = ( tensor_spec.TensorSpec(shape=None), tensor_spec.TensorSpec(shape=None), ) @dummy_tf_decorator def foo(x=2, y=3): # pylint: disable=unused-argument pass spec = function_type_utils.FunctionSpec.from_function_and_signature( foo, input_signature ) self.assertEqual( spec.signature_summary(True), 'Input Parameters:\n' + ' x (POSITIONAL_OR_KEYWORD):' ' TensorSpec(shape=<unknown>, dtype=tf.float32, name=None)\n' + ' y' ' (POSITIONAL_OR_KEYWORD): TensorSpec(shape=<unknown>,' ' dtype=tf.float32, name=None)\n' + 'Output Type:\n' + ' None\n' + 'Captures:\n' + ' None\n' + 'Defaults:\n' + ' x: 2\n' + ' y: 3', ) # TODO(fmuham): Remove when is_same_structure is removed.
FunctionSpecTest
python
allegroai__clearml
clearml/backend_api/services/v2_23/tasks.py
{ "start": 319154, "end": 322391 }
class ____(Response): """ Response of tasks.enqueue endpoint. :param updated: Number of tasks updated (0 or 1) :type updated: int :param fields: Updated fields names and values :type fields: dict :param queued: Number of tasks queued (0 or 1) :type queued: int :param queue_watched: Returns Trueif there are workers or autscalers working with the queue :type queue_watched: bool """ _service = "tasks" _action = "enqueue" _version = "2.23" _schema = { "definitions": {}, "properties": { "fields": { "additionalProperties": True, "description": "Updated fields names and values", "type": ["object", "null"], }, "queue_watched": { "description": "Returns Trueif there are workers or autscalers working with the queue", "type": ["boolean", "null"], }, "queued": { "description": "Number of tasks queued (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, "updated": { "description": "Number of tasks updated (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, }, "type": "object", } def __init__( self, updated=None, fields=None, queued=None, queue_watched=None, **kwargs ): super(EnqueueResponse, self).__init__(**kwargs) self.updated = updated self.fields = fields self.queued = queued self.queue_watched = queue_watched @schema_property("updated") def updated(self): return self._property_updated @updated.setter def updated(self, value): if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value @schema_property("fields") def fields(self): return self._property_fields @fields.setter def fields(self, value): if value is None: self._property_fields = None return self.assert_isinstance(value, "fields", (dict,)) self._property_fields = value @schema_property("queued") def queued(self): return self._property_queued @queued.setter def queued(self, value): if value is None: self._property_queued = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "queued", six.integer_types) self._property_queued = value @schema_property("queue_watched") def queue_watched(self): return self._property_queue_watched @queue_watched.setter def queue_watched(self, value): if value is None: self._property_queue_watched = None return self.assert_isinstance(value, "queue_watched", (bool,)) self._property_queue_watched = value
EnqueueResponse
python
getsentry__sentry
src/sentry/net/http.py
{ "start": 951, "end": 3915 }
class ____: """ HACK(mattrobenolt): Most of this is yanked out of core urllib3 to override `_new_conn` with the ability to create our own socket. """ is_ipaddress_permitted: IsIpAddressPermitted = None def __init__(self, *args, is_ipaddress_permitted: IsIpAddressPermitted = None, **kwargs): self.is_ipaddress_permitted = is_ipaddress_permitted super().__init__(*args, **kwargs) # urllib3.connection.HTTPConnection.host # These `host` properties need rebound otherwise `self._dns_host` doesn't # get set correctly. @property def host(self): """ Getter method to remove any trailing dots that indicate the hostname is an FQDN. In general, SSL certificates don't include the trailing dot indicating a fully-qualified domain name, and thus, they don't validate properly when checked against a domain name that includes the dot. In addition, some servers may not expect to receive the trailing dot when provided. However, the hostname with trailing dot is critical to DNS resolution; doing a lookup with the trailing dot will properly only resolve the appropriate FQDN, whereas a lookup without a trailing dot will search the system's search domain list. Thus, it's important to keep the original host around for use only in those cases where it's appropriate (i.e., when doing DNS lookup to establish the actual TCP connection across which we're going to send HTTP requests). """ return self._dns_host.rstrip(".") @host.setter def host(self, value): """ Setter for the `host` property. We assume that only urllib3 uses the _dns_host attribute; httplib itself only uses `host`, and it seems reasonable that other libraries follow suit. """ self._dns_host = value # urllib3.connection.HTTPConnection._new_conn def _new_conn(self): """Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ extra_kw = {} if self.source_address: extra_kw["source_address"] = self.source_address if self.socket_options: extra_kw["socket_options"] = self.socket_options try: # Begin custom code. conn = safe_create_connection( (self._dns_host, self.port), self.timeout, is_ipaddress_permitted=self.is_ipaddress_permitted, **extra_kw, ) # End custom code. except SocketTimeout: raise ConnectTimeoutError( self, f"Connection to {self.host} timed out. (connect timeout={self.timeout})", ) except SocketError as e: raise NewConnectionError(self, f"Failed to establish a new connection: {e}") return conn
SafeConnectionMixin
python
ray-project__ray
rllib/examples/_old_api_stack/models/custom_loss_model.py
{ "start": 697, "end": 2341 }
class ____(TFModelV2): """Custom model that adds an imitation loss on top of the policy loss.""" def __init__(self, obs_space, action_space, num_outputs, model_config, name): super().__init__(obs_space, action_space, num_outputs, model_config, name) self.fcnet = FullyConnectedNetwork( self.obs_space, self.action_space, num_outputs, model_config, name="fcnet" ) @override(ModelV2) def forward(self, input_dict, state, seq_lens): # Delegate to our FCNet. return self.fcnet(input_dict, state, seq_lens) @override(ModelV2) def value_function(self): # Delegate to our FCNet. return self.fcnet.value_function() @override(ModelV2) def custom_loss(self, policy_loss, loss_inputs): # Create a new input reader per worker. reader = JsonReader(self.model_config["custom_model_config"]["input_files"]) input_ops = reader.tf_input_ops() # Define a secondary loss by building a graph copy with weight sharing. obs = restore_original_dimensions( tf.cast(input_ops["obs"], tf.float32), self.obs_space ) logits, _ = self.forward({"obs": obs}, [], None) # Compute the IL loss. action_dist = Categorical(logits, self.model_config) self.policy_loss = policy_loss self.imitation_loss = tf.reduce_mean(-action_dist.logp(input_ops["actions"])) return policy_loss + 10 * self.imitation_loss def metrics(self): return { "policy_loss": self.policy_loss, "imitation_loss": self.imitation_loss, }
CustomLossModel
python
huggingface__transformers
src/transformers/models/bert_generation/modeling_bert_generation.py
{ "start": 16115, "end": 17547 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.config = config # Ignore copy self.layer = nn.ModuleList([BertGenerationLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: for i, layer_module in enumerate(self.layer): hidden_states = layer_module( hidden_states, attention_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, )
BertEncoder
python
PrefectHQ__prefect
tests/server/models/test_csrf_token.py
{ "start": 3686, "end": 4619 }
class ____: async def test_can_delete_expired_tokens(self, session: AsyncSession): # Create some tokens for i in range(5): await models.csrf_token.create_or_update_csrf_token( session=session, client=f"client{i}" ) # Update some of them to be expired await session.execute( sa.update(orm_models.CsrfToken) .where(orm_models.CsrfToken.client.in_(["client0", "client1"])) .values(expiration=datetime.now(timezone.utc) - timedelta(days=1)) ) await models.csrf_token.delete_expired_tokens(session=session) all_tokens = ( (await session.execute(sa.select(orm_models.CsrfToken))).scalars().all() ) assert len(all_tokens) == 3 assert "client0" not in [t.client for t in all_tokens] assert "client1" not in [t.client for t in all_tokens]
TestDeleteExpiredTokens
python
dagster-io__dagster
python_modules/dagster/dagster/_core/errors.py
{ "start": 24710, "end": 24808 }
class ____(DagsterError): """Errors during an object store operation."""
DagsterObjectStoreError
python
TheAlgorithms__Python
knapsack/tests/test_knapsack.py
{ "start": 212, "end": 1294 }
class ____(unittest.TestCase): def test_base_case(self): """ test for the base case """ cap = 0 val = [0] w = [0] c = len(val) assert k.knapsack(cap, w, val, c) == 0 val = [60] w = [10] c = len(val) assert k.knapsack(cap, w, val, c) == 0 def test_easy_case(self): """ test for the easy case """ cap = 3 val = [1, 2, 3] w = [3, 2, 1] c = len(val) assert k.knapsack(cap, w, val, c) == 5 def test_knapsack(self): """ test for the knapsack """ cap = 50 val = [60, 100, 120] w = [10, 20, 30] c = len(val) assert k.knapsack(cap, w, val, c) == 220 def test_knapsack_repetition(self): """ test for the knapsack repetition """ cap = 50 val = [60, 100, 120] w = [10, 20, 30] c = len(val) assert k.knapsack(cap, w, val, c, True) == 300 if __name__ == "__main__": unittest.main()
Test
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/styles/style_transformation.py
{ "start": 1177, "end": 1770 }
class ____(metaclass=ABCMeta): """ Base class for any style transformation. """ @abstractmethod def transform_attrs(self, attrs: Attrs) -> Attrs: """ Take an `Attrs` object and return a new `Attrs` object. Remember that the color formats can be either "ansi..." or a 6 digit lowercase hexadecimal color (without '#' prefix). """ def invalidation_hash(self) -> Hashable: """ When this changes, the cache should be invalidated. """ return f"{self.__class__.__name__}-{id(self)}"
StyleTransformation
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/links/datasync.py
{ "start": 1180, "end": 1515 }
class ____(BaseAwsLink): """Helper class for constructing AWS DataSync TaskExecution console link.""" name = "DataSync Task Execution" key = "datasync_task_execution" format_str = ( BASE_AWS_CONSOLE_LINK + "/datasync/home?region={region_name}#/history/{task_id}/{task_execution_id}" )
DataSyncTaskExecutionLink
python
cython__cython
Cython/Tests/TestJediTyper.py
{ "start": 1018, "end": 1363 }
class ____(Visitor.VisitorTransform): directives = None visit_Node = Visitor.VisitorTransform.recurse_to_children def visit_CompilerDirectivesNode(self, node): if not self.directives: self.directives = [] self.directives.append(node) self.visitchildren(node) return node
DeclarationsFinder
python
tensorflow__tensorflow
tensorflow/python/ops/distributions/bernoulli.py
{ "start": 1390, "end": 7087 }
class ____(distribution.Distribution): """Bernoulli distribution. The Bernoulli distribution with `probs` parameter, i.e., the probability of a `1` outcome (vs a `0` outcome). """ @deprecation.deprecated( "2019-01-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.distributions`.", warn_once=True) def __init__(self, logits=None, probs=None, dtype=dtypes.int32, validate_args=False, allow_nan_stats=True, name="Bernoulli"): """Construct Bernoulli distributions. Args: logits: An N-D `Tensor` representing the log-odds of a `1` event. Each entry in the `Tensor` parametrizes an independent Bernoulli distribution where the probability of an event is sigmoid(logits). Only one of `logits` or `probs` should be passed in. probs: An N-D `Tensor` representing the probability of a `1` event. Each entry in the `Tensor` parameterizes an independent Bernoulli distribution. Only one of `logits` or `probs` should be passed in. dtype: The type of the event samples. Default: `int32`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: ValueError: If p and logits are passed, or if neither are passed. """ parameters = dict(locals()) with ops.name_scope(name) as name: self._logits, self._probs = distribution_util.get_logits_and_probs( logits=logits, probs=probs, validate_args=validate_args, name=name) super(Bernoulli, self).__init__( dtype=dtype, reparameterization_type=distribution.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._logits, self._probs], name=name) @staticmethod def _param_shapes(sample_shape): return {"logits": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)} @property def logits(self): """Log-odds of a `1` outcome (vs `0`).""" return self._logits @property def probs(self): """Probability of a `1` outcome (vs `0`).""" return self._probs def _batch_shape_tensor(self): return array_ops.shape(self._logits) def _batch_shape(self): return self._logits.get_shape() def _event_shape_tensor(self): return array_ops.constant([], dtype=dtypes.int32) def _event_shape(self): return tensor_shape.TensorShape([]) def _sample_n(self, n, seed=None): new_shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) uniform = random_ops.random_uniform( new_shape, seed=seed, dtype=self.probs.dtype) sample = math_ops.less(uniform, self.probs) return math_ops.cast(sample, self.dtype) def _log_prob(self, event): if self.validate_args: event = distribution_util.embed_check_integer_casting_closed( event, target_dtype=dtypes.bool) # TODO(jaana): The current sigmoid_cross_entropy_with_logits has # inconsistent behavior for logits = inf/-inf. event = math_ops.cast(event, self.logits.dtype) logits = self.logits # sigmoid_cross_entropy_with_logits doesn't broadcast shape, # so we do this here. def _broadcast(logits, event): return (array_ops.ones_like(event) * logits, array_ops.ones_like(logits) * event) if not (event.get_shape().is_fully_defined() and logits.get_shape().is_fully_defined() and event.get_shape() == logits.get_shape()): logits, event = _broadcast(logits, event) return -nn.sigmoid_cross_entropy_with_logits(labels=event, logits=logits) def _entropy(self): return (-self.logits * (math_ops.sigmoid(self.logits) - 1) + # pylint: disable=invalid-unary-operand-type nn.softplus(-self.logits)) # pylint: disable=invalid-unary-operand-type def _mean(self): return array_ops.identity(self.probs) def _variance(self): return self._mean() * (1. - self.probs) def _mode(self): """Returns `1` if `prob > 0.5` and `0` otherwise.""" return math_ops.cast(self.probs > 0.5, self.dtype) @kullback_leibler.RegisterKL(Bernoulli, Bernoulli) def _kl_bernoulli_bernoulli(a, b, name=None): """Calculate the batched KL divergence KL(a || b) with a and b Bernoulli. Args: a: instance of a Bernoulli distribution object. b: instance of a Bernoulli distribution object. name: (optional) Name to use for created operations. default is "kl_bernoulli_bernoulli". Returns: Batchwise KL(a || b) """ with ops.name_scope(name, "kl_bernoulli_bernoulli", values=[a.logits, b.logits]): delta_probs0 = nn.softplus(-b.logits) - nn.softplus(-a.logits) delta_probs1 = nn.softplus(b.logits) - nn.softplus(a.logits) return (math_ops.sigmoid(a.logits) * delta_probs0 + math_ops.sigmoid(-a.logits) * delta_probs1)
Bernoulli
python
modin-project__modin
modin/core/execution/python/implementations/pandas_on_python/partitioning/virtual_partition.py
{ "start": 2270, "end": 2428 }
class ____(PandasOnPythonDataframeAxisPartition): axis = 0 @_inherit_docstrings(PandasOnPythonDataframeAxisPartition)
PandasOnPythonDataframeColumnPartition
python
ansible__ansible
test/lib/ansible_test/_internal/coverage_util.py
{ "start": 1489, "end": 9457 }
class ____(ApplicationError): """Exception caused while attempting to read a coverage file.""" def __init__(self, path: str, message: str) -> None: self.path = path self.message = message super().__init__(f'Error reading coverage file "{os.path.relpath(path)}": {message}') def get_coverage_version(version: str) -> CoverageVersion: """Return the coverage version to use with the specified Python version.""" python_version = str_to_version(version) supported_versions = [entry for entry in COVERAGE_VERSIONS if entry.min_python <= python_version <= entry.max_python] if not supported_versions: raise InternalError(f'Python {version} has no matching entry in COVERAGE_VERSIONS.') if len(supported_versions) > 1: raise InternalError(f'Python {version} has multiple matching entries in COVERAGE_VERSIONS.') coverage_version = supported_versions[0] return coverage_version def get_coverage_file_schema_version(path: str) -> int: """ Return the schema version from the specified coverage file. SQLite based files report schema version 1 or later. JSON based files are reported as schema version 0. An exception is raised if the file is not recognized or the schema version cannot be determined. """ with open_binary_file(path) as file_obj: header = file_obj.read(16) if header.startswith(b'!coverage.py: '): return 0 if header.startswith(b'SQLite'): return get_sqlite_schema_version(path) raise CoverageError(path, f'Unknown header: {header!r}') def get_sqlite_schema_version(path: str) -> int: """Return the schema version from a SQLite based coverage file.""" try: with sqlite3.connect(path) as connection: cursor = connection.cursor() cursor.execute('select version from coverage_schema') schema_version = cursor.fetchmany(1)[0][0] except Exception as ex: raise CoverageError(path, f'SQLite error: {ex}') from ex if not isinstance(schema_version, int): raise CoverageError(path, f'Schema version is {type(schema_version)} instead of {int}: {schema_version}') if schema_version < 1: raise CoverageError(path, f'Schema version is out-of-range: {schema_version}') return schema_version def cover_python( args: TestConfig, python: PythonConfig, cmd: list[str], target_name: str, env: dict[str, str], capture: bool, data: t.Optional[str] = None, cwd: t.Optional[str] = None, ) -> tuple[t.Optional[str], t.Optional[str]]: """Run a command while collecting Python code coverage.""" if args.coverage: env.update(get_coverage_environment(args, target_name, python.version)) return intercept_python(args, python, cmd, env, capture, data, cwd) def get_coverage_platform(config: HostConfig) -> str: """Return the platform label for the given host config.""" if isinstance(config, PosixRemoteConfig): platform = f'remote-{sanitize_host_name(config.name)}' elif isinstance(config, DockerConfig): platform = f'docker-{sanitize_host_name(config.name)}' elif isinstance(config, PosixSshConfig): platform = f'ssh-{sanitize_host_name(config.host)}' elif isinstance(config, OriginConfig): platform = 'origin' # previous versions of ansible-test used "local-{python_version}" else: raise NotImplementedError(f'Coverage platform label not defined for type: {type(config)}') return platform def get_coverage_environment( args: TestConfig, target_name: str, version: str, ) -> dict[str, str]: """Return environment variables needed to collect code coverage.""" # unit tests, sanity tests and other special cases (localhost only) # config is in a temporary directory # results are in the source tree config_file = get_coverage_config(args) coverage_name = '='.join((args.command, target_name, get_coverage_platform(args.controller), f'python-{version}', 'coverage')) coverage_dir = os.path.join(data_context().content.root, data_context().content.results_path, ResultType.COVERAGE.name) coverage_file = os.path.join(coverage_dir, coverage_name) make_dirs(coverage_dir) if args.coverage_check: # cause the 'coverage' module to be found, but not imported or enabled coverage_file = '' # Enable code coverage collection on local Python programs (this does not include Ansible modules). # Used by the injectors to support code coverage. # Used by the pytest unit test plugin to support code coverage. # The COVERAGE_FILE variable is also used directly by the 'coverage' module. env = dict( COVERAGE_CONF=config_file, COVERAGE_FILE=coverage_file, ) return env @mutex def get_coverage_config(args: TestConfig) -> str: """Return the path to the coverage config, creating the config if it does not already exist.""" try: return get_coverage_config.path # type: ignore[attr-defined] except AttributeError: pass coverage_config = generate_coverage_config() if args.explain: temp_dir = '/tmp/coverage-temp-dir' else: temp_dir = tempfile.mkdtemp() ExitHandler.register(lambda: remove_tree(temp_dir)) path = os.path.join(temp_dir, COVERAGE_CONFIG_NAME) if not args.explain: write_text_file(path, coverage_config) get_coverage_config.path = path # type: ignore[attr-defined] return path def generate_coverage_config() -> str: """Generate code coverage configuration for tests.""" if data_context().content.collection: coverage_config = generate_collection_coverage_config() else: coverage_config = generate_ansible_coverage_config() return coverage_config def generate_ansible_coverage_config() -> str: """Generate code coverage configuration for Ansible tests.""" coverage_config = """ [run] branch = True concurrency = multiprocessing thread parallel = True omit = */python*/dist-packages/* */python*/site-packages/* */python*/distutils/* */pyshared/* */pytest */AnsiballZ_*.py */test/results/.tmp/delegation/* """ coverage_config = coverage_config.lstrip() return coverage_config def generate_collection_coverage_config() -> str: """Generate code coverage configuration for Ansible Collection tests.""" include_patterns = [ # {base}/ansible_collections/{ns}/{col}/* os.path.join(data_context().content.root, '*'), # */ansible_collections/{ns}/{col}/* (required to pick up AnsiballZ coverage) os.path.join('*', data_context().content.collection.directory, '*'), ] omit_patterns = [ # {base}/ansible_collections/{ns}/{col}/tests/output/.tmp/delegation/* os.path.join(data_context().content.root, data_context().content.results_path, '.tmp/delegation/*'), ] include = textwrap.indent('\n'.join(include_patterns), ' ' * 4) omit = textwrap.indent('\n'.join(omit_patterns), ' ' * 4) coverage_config = f""" [run] branch = True concurrency = multiprocessing thread parallel = True disable_warnings = no-data-collected include = {include} omit = {omit} """ coverage_config = coverage_config.lstrip() return coverage_config def self_check() -> None: """Check for internal errors due to incorrect code changes.""" # Verify all supported Python versions have a coverage version. for version in SUPPORTED_PYTHON_VERSIONS: get_coverage_version(version) # Verify all controller Python versions are mapped to the latest coverage version. for version in CONTROLLER_PYTHON_VERSIONS: if get_coverage_version(version) != CONTROLLER_COVERAGE_VERSION: raise InternalError(f'Controller Python version {version} is not mapped to the latest coverage version.') self_check()
CoverageError
python
spack__spack
lib/spack/spack/url_buildcache.py
{ "start": 54834, "end": 54978 }
class ____(spack.error.SpackError): """Raised for problems finding or accessing binary cache entry on mirror""" pass
BuildcacheEntryError
python
readthedocs__readthedocs.org
readthedocs/analytics/migrations/0005_add_unique_constraint.py
{ "start": 149, "end": 665 }
class ____(migrations.Migration): safe = Safe.after_deploy() dependencies = [ ("analytics", "0004_merge_duplicate_records"), ] operations = [ migrations.AddConstraint( model_name="pageview", constraint=models.UniqueConstraint( condition=models.Q(("version", None)), fields=("project", "path", "date", "status"), name="analytics_pageview_constraint_unique_without_optional", ), ), ]
Migration
python
ansible__ansible
lib/ansible/utils/collection_loader/_collection_config.py
{ "start": 1178, "end": 3057 }
class ____(type): def __init__(cls, meta, name, bases): cls._collection_finder = None cls._default_collection = None cls._on_collection_load = _EventSource() @property def collection_finder(cls): return cls._collection_finder @collection_finder.setter def collection_finder(cls, value): if cls._collection_finder: raise ValueError('an AnsibleCollectionFinder has already been configured') cls._collection_finder = value @property def collection_paths(cls): cls._require_finder() return [_to_text(p) for p in cls._collection_finder._n_collection_paths] @property def _internal_collections(cls): cls._require_finder() return cls._collection_finder._internal_collections @property def default_collection(cls): return cls._default_collection @default_collection.setter def default_collection(cls, value): cls._default_collection = value @property def on_collection_load(cls): return cls._on_collection_load @on_collection_load.setter def on_collection_load(cls, value): if value is not cls._on_collection_load: raise ValueError('on_collection_load is not directly settable (use +=)') @property def playbook_paths(cls): cls._require_finder() return [_to_text(p) for p in cls._collection_finder._n_playbook_paths] @playbook_paths.setter def playbook_paths(cls, value): cls._require_finder() cls._collection_finder.set_playbook_paths(value) def _require_finder(cls): if not cls._collection_finder: raise NotImplementedError('an AnsibleCollectionFinder has not been installed in this process') # concrete class of our metaclass type that defines the class properties we want
_AnsibleCollectionConfig
python
pennersr__django-allauth
allauth/socialaccount/providers/agave/views.py
{ "start": 228, "end": 1326 }
class ____(OAuth2Adapter): provider_id = "agave" settings = app_settings.PROVIDERS.get(provider_id, {}) provider_base_url = settings.get("API_URL", "https://public.agaveapi.co") access_token_url = "{0}/token".format(provider_base_url) authorize_url = "{0}/authorize".format(provider_base_url) profile_url = "{0}/profiles/v2/me".format(provider_base_url) def complete_login(self, request, app, token, response): extra_data = ( get_adapter() .get_requests_session() .get( self.profile_url, params={"access_token": token.token}, headers={ "Authorization": "Bearer " + token.token, }, ) ) user_profile = ( extra_data.json()["result"] if "result" in extra_data.json() else {} ) return self.get_provider().sociallogin_from_response(request, user_profile) oauth2_login = OAuth2LoginView.adapter_view(AgaveAdapter) oauth2_callback = OAuth2CallbackView.adapter_view(AgaveAdapter)
AgaveAdapter
python
plotly__plotly.py
plotly/graph_objs/candlestick/_legendgrouptitle.py
{ "start": 233, "end": 2967 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "candlestick" _path_str = "candlestick.legendgrouptitle" _valid_props = {"font", "text"} @property def font(self): """ Sets this legend group's title font. The 'font' property is an instance of Font that may be specified as: - An instance of :class:`plotly.graph_objs.candlestick.legendgrouptitle.Font` - A dict of string/value properties that will be passed to the Font constructor Returns ------- plotly.graph_objs.candlestick.legendgrouptitle.Font """ return self["font"] @font.setter def font(self, val): self["font"] = val @property def text(self): """ Sets the title of the legend group. The 'text' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["text"] @text.setter def text(self, val): self["text"] = val @property def _prop_descriptions(self): return """\ font Sets this legend group's title font. text Sets the title of the legend group. """ def __init__(self, arg=None, font=None, text=None, **kwargs): """ Construct a new Legendgrouptitle object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.candlestick.Legendgrouptitle` font Sets this legend group's title font. text Sets the title of the legend group. Returns ------- Legendgrouptitle """ super().__init__("legendgrouptitle") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.candlestick.Legendgrouptitle constructor must be a dict or an instance of :class:`plotly.graph_objs.candlestick.Legendgrouptitle`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("font", arg, font) self._set_property("text", arg, text) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Legendgrouptitle
python
airbytehq__airbyte
airbyte-ci/connectors/live-tests/src/live_tests/report.py
{ "start": 1026, "end": 2469 }
class ____(ABC): TEMPLATE_NAME: str def __init__(self, path: Path, pytest_config: Config) -> None: self.path = path self.pytest_config = pytest_config self.created_at = datetime.datetime.utcnow() self.updated_at = self.created_at self.control_execution_results_per_command: dict[Command, List[ExecutionResult]] = {command: [] for command in Command} self.target_execution_results_per_command: dict[Command, List[ExecutionResult]] = {command: [] for command in Command} self.update(ReportState.INITIALIZING) @abstractmethod def render(self) -> None: pass @property def all_connection_objects(self) -> List[ConnectionObjects]: return self.pytest_config.stash[stash_keys.ALL_CONNECTION_OBJECTS] def add_control_execution_result(self, control_execution_result: ExecutionResult) -> None: self.control_execution_results_per_command[control_execution_result.command].append(control_execution_result) self.update() def add_target_execution_result(self, target_execution_result: ExecutionResult) -> None: self.target_execution_results_per_command[target_execution_result.command].append(target_execution_result) self.update() def update(self, state: ReportState = ReportState.RUNNING) -> None: self._state = state self.updated_at = datetime.datetime.utcnow() self.render()
BaseReport
python
kamyu104__LeetCode-Solutions
Python/print-words-vertically.py
{ "start": 48, "end": 273 }
class ____(object): def printVertically(self, s): """ :type s: str :rtype: List[str] """ return ["".join(c).rstrip() for c in itertools.izip_longest(*s.split(), fillvalue=' ')]
Solution
python
getsentry__sentry
tests/sentry/incidents/test_logic.py
{ "start": 144695, "end": 146752 }
class ____(TestCase): @patch("sentry.incidents.logic.schedule_invalidate_project_config") def test_create_alert_rule(self, mocked_schedule_invalidate_project_config: MagicMock) -> None: self.create_alert_rule() mocked_schedule_invalidate_project_config.assert_not_called() @patch("sentry.incidents.logic.schedule_invalidate_project_config") def test_create_custom_metric_alert_rule_extraction( self, mocked_schedule_invalidate_project_config ): with self.feature({"organizations:on-demand-metrics-extraction": True}): self.create_alert_rule( projects=[self.project], dataset=Dataset.PerformanceMetrics, query="transaction.duration:>=100", ) mocked_schedule_invalidate_project_config.assert_called_once_with( trigger="alerts:create-on-demand-metric", project_id=self.project.id ) @patch("sentry.incidents.logic.schedule_invalidate_project_config") def test_create_custom_metric_alert_rule_prefill( self, mocked_schedule_invalidate_project_config ): with self.feature({"organizations:on-demand-metrics-prefill": True}): self.create_alert_rule( projects=[self.project], dataset=Dataset.PerformanceMetrics, query="transaction.duration:>=50", ) mocked_schedule_invalidate_project_config.assert_called_once_with( trigger="alerts:create-on-demand-metric", project_id=self.project.id ) @patch("sentry.incidents.logic.schedule_invalidate_project_config") def test_create_custom_metric_turned_off( self, mocked_schedule_invalidate_project_config: MagicMock ) -> None: self.create_alert_rule( projects=[self.project], dataset=Dataset.PerformanceMetrics, query="transaction.duration:>=100", ) mocked_schedule_invalidate_project_config.assert_not_called()
TestCustomMetricAlertRule