language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
huggingface__transformers
tests/models/qwen3_moe/test_modeling_qwen3_moe.py
{ "start": 1387, "end": 4220 }
class ____(CausalLMModelTest, unittest.TestCase): test_all_params_have_gradient = False model_tester_class = Qwen3MoeModelTester # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return True # Ignore copy def test_load_balancing_loss(self): r""" Let's make sure we can actually compute the loss and do a backward on it. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.num_experts = 8 config.expert_interval = 2 config.output_router_logits = True input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = Qwen3MoeForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask) self.assertEqual(result.router_logits[0].shape, (91, config.num_experts)) torch.testing.assert_close(result.aux_loss.cpu(), torch.tensor(2, dtype=torch.float32), rtol=1e-2, atol=1e-2) # First, we make sure that adding padding tokens doesn't change the loss # loss(input_ids, attention_mask=None) == loss(input_ids + padding, attention_mask=attention_mask_with_padding) pad_length = 1000 # Add padding tokens (assume that pad_token_id=1) to input_ids padding_block = torch.ones(input_ids.shape[0], pad_length, dtype=torch.int32).to(torch_device) padded_input_ids = torch.cat((padding_block, input_ids), dim=1) # this is to simulate padding to the left padded_attention_mask = padded_input_ids.ne(1).to(torch_device) padded_result = model(padded_input_ids, attention_mask=padded_attention_mask) torch.testing.assert_close(result.aux_loss.cpu(), padded_result.aux_loss.cpu(), rtol=1e-4, atol=1e-4) # We make sure that the loss of including padding tokens != the loss without padding tokens # if attention_mask=None --> we don't exclude padding tokens include_padding_result = model(padded_input_ids, attention_mask=None) # This is to mimic torch.testing.assert_not_close self.assertNotAlmostEqual(include_padding_result.aux_loss.item(), result.aux_loss.item()) # Run on runners with larger accelerators (for example A10 instead of T4) with a lot of CPU RAM (e.g. g5-12xlarge) @require_torch_multi_accelerator @require_torch_large_accelerator @require_torch
Qwen3MoeModelTest
python
networkx__networkx
networkx/algorithms/tests/test_cluster.py
{ "start": 17600, "end": 18450 }
class ____: @classmethod def setup_class(cls): pytest.importorskip("numpy") def test_empty(self): G = nx.DiGraph() with pytest.raises(ZeroDivisionError): nx.average_clustering(G) def test_average_clustering(self): G = nx.cycle_graph(3, create_using=nx.DiGraph()) G.add_edge(2, 3) assert nx.average_clustering(G) == (1 + 1 + 1 / 3) / 8 assert nx.average_clustering(G, count_zeros=True) == (1 + 1 + 1 / 3) / 8 assert nx.average_clustering(G, count_zeros=False) == (1 + 1 + 1 / 3) / 6 assert nx.average_clustering(G, [1, 2, 3]) == (1 + 1 / 3) / 6 assert nx.average_clustering(G, [1, 2, 3], count_zeros=True) == (1 + 1 / 3) / 6 assert nx.average_clustering(G, [1, 2, 3], count_zeros=False) == (1 + 1 / 3) / 4
TestDirectedAverageClustering
python
optuna__optuna
optuna/storages/journal/_redis.py
{ "start": 465, "end": 3929 }
class ____(BaseJournalBackend, BaseJournalSnapshot): """Redis storage class for Journal log backend. Args: url: URL of the redis storage, password and db are optional. (ie: ``redis://localhost:6379``) use_cluster: Flag whether you use the Redis cluster. If this is :obj:`False`, it is assumed that you use the standalone Redis server and ensured that a write operation is atomic. This provides the consistency of the preserved logs. If this is :obj:`True`, it is assumed that you use the Redis cluster and not ensured that a write operation is atomic. This means the preserved logs can be inconsistent due to network errors, and may cause errors. prefix: Prefix of the preserved key of logs. This is useful when multiple users work on one Redis server. """ def __init__(self, url: str, use_cluster: bool = False, prefix: str = "") -> None: _imports.check() self._url = url self._redis = redis.Redis.from_url(url) self._use_cluster = use_cluster self._prefix = prefix def __getstate__(self) -> dict[Any, Any]: state = self.__dict__.copy() del state["_redis"] return state def __setstate__(self, state: dict[Any, Any]) -> None: self.__dict__.update(state) self._redis = redis.Redis.from_url(self._url) def read_logs(self, log_number_from: int) -> Generator[dict[str, Any], None, None]: max_log_number_bytes = self._redis.get(f"{self._prefix}:log_number") if max_log_number_bytes is None: return max_log_number = int(max_log_number_bytes) for log_number in range(log_number_from, max_log_number + 1): sleep_secs = 0.1 while True: log = self._redis.get(self._key_log_id(log_number)) if log is not None: break time.sleep(sleep_secs) sleep_secs = min(sleep_secs * 2, 10) try: yield json.loads(log) except json.JSONDecodeError as err: if log_number != max_log_number: raise err def append_logs(self, logs: list[dict[str, Any]]) -> None: self._redis.setnx(f"{self._prefix}:log_number", -1) for log in logs: if not self._use_cluster: self._redis.eval( # type: ignore "local i = redis.call('incr', string.format('%s:log_number', ARGV[1])) " "redis.call('set', string.format('%s:log:%d', ARGV[1], i), ARGV[2])", 0, self._prefix, json.dumps(log), ) else: log_number = self._redis.incr(f"{self._prefix}:log_number", 1) self._redis.set(self._key_log_id(log_number), json.dumps(log)) def save_snapshot(self, snapshot: bytes) -> None: self._redis.set(f"{self._prefix}:snapshot", snapshot) def load_snapshot(self) -> bytes | None: snapshot_bytes = self._redis.get(f"{self._prefix}:snapshot") return snapshot_bytes def _key_log_id(self, log_number: int) -> str: return f"{self._prefix}:log:{log_number}" @deprecated_class( "4.0.0", "6.0.0", text="Use :class:`~optuna.storages.journal.JournalRedisBackend` instead." )
JournalRedisBackend
python
astropy__astropy
astropy/cosmology/_src/traits/photoncomponent.py
{ "start": 380, "end": 1338 }
class ____: """The cosmology has attributes and methods for the photon density.""" Ogamma0: float | np.floating """Omega gamma; the density/critical density of photons at z=0.""" inv_efunc: Callable[[NDArray[Any]], NDArray[Any]] def Ogamma(self, z: Quantity | ArrayLike, /) -> FArray: """Return the density parameter for photons at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like Input redshift. .. versionchanged:: 7.0 Passing z as a keyword argument is deprecated. .. versionchanged:: 8.0 z must be a positional argument. Returns ------- Ogamma : array The energy density of photons relative to the critical density at each redshift. """ z = aszarr(z) return self.Ogamma0 * (z + 1.0) ** 4 * self.inv_efunc(z) ** 2
PhotonComponent
python
django__django
django/db/migrations/operations/models.py
{ "start": 21996, "end": 23383 }
class ____(ModelOptionOperation): def __init__(self, name, table_comment): self.table_comment = table_comment super().__init__(name) def deconstruct(self): kwargs = { "name": self.name, "table_comment": self.table_comment, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.alter_model_options( app_label, self.name_lower, {"db_table_comment": self.table_comment} ) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) schema_editor.alter_db_table_comment( new_model, old_model._meta.db_table_comment, new_model._meta.db_table_comment, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def describe(self): return f"Alter {self.name} table comment" @property def migration_name_fragment(self): return f"alter_{self.name_lower}_table_comment"
AlterModelTableComment
python
pandas-dev__pandas
pandas/tests/indexes/timedeltas/test_timedelta_range.py
{ "start": 5562, "end": 6537 }
class ____: def test_timedelta_range_unit_inference_matching_unit(self, unit): start = Timedelta(0).as_unit(unit) end = Timedelta(days=1).as_unit(unit) tdi = timedelta_range(start, end, freq="D") assert tdi.unit == unit def test_timedelta_range_unit_inference_mismatched_unit(self, unit): start = Timedelta(0).as_unit(unit) end = Timedelta(days=1).as_unit("s") tdi = timedelta_range(start, end, freq="D") assert tdi.unit == unit tdi = timedelta_range(start, end.as_unit("ns"), freq="D") assert tdi.unit == "ns" def test_timedelta_range_unit_inference_tick(self): start = Timedelta(0).as_unit("ms") end = Timedelta(days=1).as_unit("s") tdi = timedelta_range(start, end, freq="2000000us") assert tdi.unit == "us" tdi = timedelta_range(start, end.as_unit("ns"), freq="2000000us") assert tdi.unit == "ns"
TestTimedeltaRangeUnitInference
python
ansible__ansible
lib/ansible/module_utils/facts/system/systemd.py
{ "start": 897, "end": 1633 }
class ____(BaseFactCollector): name = "systemd" _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): systemctl_bin = module.get_bin_path("systemctl") systemd_facts = {} if systemctl_bin and ServiceMgrFactCollector.is_systemd_managed(module): rc, stdout, dummy = module.run_command( [systemctl_bin, "--version"], check_rc=False, ) if rc != 0: return systemd_facts systemd_facts["systemd"] = { "features": str(stdout.split("\n")[1]), "version": int(stdout.split(" ")[1]), } return systemd_facts
SystemdFactCollector
python
huggingface__transformers
src/transformers/models/dbrx/modeling_dbrx.py
{ "start": 11872, "end": 12962 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.hidden_size = config.hidden_size self.ffn_hidden_size = config.ffn_hidden_size self.moe_num_experts = config.moe_num_experts self.w1 = nn.Parameter(torch.empty(self.moe_num_experts * self.ffn_hidden_size, self.hidden_size)) self.v1 = nn.Parameter(torch.empty(self.moe_num_experts * self.ffn_hidden_size, self.hidden_size)) self.w2 = nn.Parameter(torch.empty(self.moe_num_experts * self.ffn_hidden_size, self.hidden_size)) act_fn_name = config.ffn_act_fn.get("name", "silu") self.activation_fn = ACT2FN[act_fn_name] def forward( self, x: torch.Tensor, expert_w1: torch.Tensor, expert_v1: torch.Tensor, expert_w2: torch.Tensor ) -> torch.Tensor: gate_proj = x.matmul(expert_w1) up_proj = x.matmul(expert_v1) gate_proj = self.activation_fn(gate_proj) intermediate_states = gate_proj * up_proj down_proj = intermediate_states.matmul(expert_w2.t()) return down_proj
DbrxExpertGLU
python
facebookresearch__faiss
tests/test_fast_scan_ivf.py
{ "start": 5040, "end": 6392 }
class ____(unittest.TestCase): """ Verify implem 1 (search from original invlists) against IndexIVFPQ """ def do_test(self, by_residual, metric_type=faiss.METRIC_L2, use_precomputed_table=0): ds = datasets.SyntheticDataset(32, 2000, 5000, 1000) index = faiss.index_factory(32, "IVF32,PQ16x4np", metric_type) index.use_precomputed_table index.use_precomputed_table = use_precomputed_table index.train(ds.get_train()) index.add(ds.get_database()) index.nprobe = 4 index.by_residual = by_residual Da, Ia = index.search(ds.get_queries(), 10) index2 = faiss.IndexIVFPQFastScan(index) index2.implem = 1 Db, Ib = index2.search(ds.get_queries(), 10) # self.assertLess((Ia != Ib).sum(), Ia.size * 0.005) np.testing.assert_array_equal(Ia, Ib) np.testing.assert_almost_equal(Da, Db, decimal=5) def test_no_residual(self): self.do_test(False) def test_by_residual(self): self.do_test(True) def test_by_residual_no_precomputed(self): self.do_test(True, use_precomputed_table=-1) def test_no_residual_ip(self): self.do_test(False, faiss.METRIC_INNER_PRODUCT) def test_by_residual_ip(self): self.do_test(True, faiss.METRIC_INNER_PRODUCT)
TestIVFImplem1
python
getsentry__sentry
src/sentry/ratelimits/sliding_windows.py
{ "start": 5455, "end": 6969 }
class ____(SlidingWindowRateLimiter): def __init__(self, **options: Any) -> None: self.cluster_key = options.get("cluster", "default") self._client: RedisCluster[str] | StrictRedis[str] | None = None self._impl: RedisSlidingWindowRateLimiterImpl | None = None super().__init__(**options) @property def client(self) -> StrictRedis[str] | RedisCluster[str]: if self._client is None: self._client = redis.redis_clusters.get(self.cluster_key) assert isinstance(self._client, (StrictRedis, RedisCluster)), self._client return self._client @property def impl(self) -> RedisSlidingWindowRateLimiterImpl: if self._impl is None: self._impl = RedisSlidingWindowRateLimiterImpl(self.client) return self._impl def validate(self) -> None: try: self.client.ping() self.client.connection_pool.disconnect() except Exception as e: raise InvalidConfiguration(str(e)) def check_within_quotas( self, requests: Sequence[RequestedQuota], timestamp: Timestamp | None = None ) -> tuple[Timestamp, Sequence[GrantedQuota]]: return self.impl.check_within_quotas(requests, timestamp) def use_quotas( self, requests: Sequence[RequestedQuota], grants: Sequence[GrantedQuota], timestamp: Timestamp, ) -> None: return self.impl.use_quotas(requests, grants, timestamp)
RedisSlidingWindowRateLimiter
python
modin-project__modin
modin/core/storage_formats/pandas/parsers.py
{ "start": 24468, "end": 29943 }
class ____(PandasParser): @staticmethod def _read_row_group_chunk( f, row_group_start, row_group_end, columns, filters, engine, to_pandas_kwargs ): # noqa: GL08 if engine == "pyarrow": if filters is not None: import pyarrow.dataset as ds from pyarrow.parquet import filters_to_expression parquet_format = ds.ParquetFileFormat() fragment = parquet_format.make_fragment( f, row_groups=range( row_group_start, row_group_end, ), ) dataset = ds.FileSystemDataset( [fragment], schema=fragment.physical_schema, format=parquet_format, filesystem=fragment.filesystem, ) # This lower-level API doesn't have the ability to automatically handle pandas metadata # The following code is based on # https://github.com/apache/arrow/blob/f44e28fa03a64ae5b3d9352d21aee2cc84f9af6c/python/pyarrow/parquet/core.py#L2619-L2628 # if use_pandas_metadata, we need to include index columns in the # column selection, to be able to restore those in the pandas DataFrame metadata = dataset.schema.metadata or {} if b"pandas" in metadata and columns is not None: index_columns = json.loads(metadata[b"pandas"].decode("utf8"))[ "index_columns" ] # In the pandas metadata, the index columns can either be string column names, # or a dictionary that describes a RangeIndex. # Here, we are finding the real data columns that need to be read to become part # of the pandas Index, so we can skip the RangeIndex. # Not only can a RangeIndex be trivially reconstructed later, but we actually # ignore partition-level range indices, because we want to have a single Modin # RangeIndex that spans all partitions. index_columns = [ col for col in index_columns if not isinstance(col, dict) ] columns = list(columns) + list(set(index_columns) - set(columns)) return dataset.to_table( columns=columns, filter=filters_to_expression(filters), ).to_pandas(**to_pandas_kwargs) else: from pyarrow.parquet import ParquetFile return ( ParquetFile(f) .read_row_groups( range( row_group_start, row_group_end, ), columns=columns, use_pandas_metadata=True, ) .to_pandas(**to_pandas_kwargs) ) elif engine == "fastparquet": from fastparquet import ParquetFile return ParquetFile(f)[row_group_start:row_group_end].to_pandas( columns=columns, filters=filters, # Setting row_filter=True would perform filtering at the row level, which is more correct # (in line with pyarrow) # However, it doesn't work: https://github.com/dask/fastparquet/issues/873 # Also, this would create incompatibility with pandas ) else: # We shouldn't ever come to this case, so something went wrong raise ValueError( f"engine must be one of 'pyarrow', 'fastparquet', got: {engine}" ) @staticmethod @doc( _doc_parse_func, parameters="""files_for_parser : list List of files to be read. engine : str Parquet library to use (either PyArrow or fastparquet). """, ) def parse(files_for_parser, engine, **kwargs): columns = kwargs.get("columns", None) filters = kwargs.get("filters", None) storage_options = kwargs.get("storage_options", {}) chunks = [] # `single_worker_read` just passes in a string path or path-like object if isinstance(files_for_parser, (str, os.PathLike)): return pandas.read_parquet(files_for_parser, engine=engine, **kwargs) to_pandas_kwargs = PandasParser.get_types_mapper(kwargs["dtype_backend"]) for file_for_parser in files_for_parser: if isinstance(file_for_parser.path, IOBase): context = contextlib.nullcontext(file_for_parser.path) else: context = fsspec.open(file_for_parser.path, **storage_options) with context as f: chunk = PandasParquetParser._read_row_group_chunk( f, file_for_parser.row_group_start, file_for_parser.row_group_end, columns, filters, engine, to_pandas_kwargs, ) chunks.append(chunk) df = pandas.concat(chunks) return df, df.index, len(df) @doc(_doc_pandas_parser_class, data_type="HDF data")
PandasParquetParser
python
google__pytype
pytype/utils.py
{ "start": 5844, "end": 6161 }
class ____: """A decorator for storing function attributes. Attributes: lookup: maps functions to their attributes. """ def __init__(self): self.lookup = {} def __call__(self, value): def decorate(f): self.lookup[f.__name__] = value return f return decorate
AnnotatingDecorator
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_errorbars05.py
{ "start": 315, "end": 1568 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_errorbars05.xlsx") def test_create_file(self): """Test the creation of an XlsxWriter file with error bars.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "bar"}) chart.axis_ids = [49016832, 49019136] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series( { "categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$B$1:$B$5", "x_error_bars": {"type": "standard_error"}, } ) chart.add_series( { "categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$C$1:$C$5", } ) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
crytic__slither
slither/printers/summary/contract.py
{ "start": 348, "end": 3461 }
class ____(AbstractPrinter): ARGUMENT = "contract-summary" HELP = "Print a summary of the contracts" WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#contract-summary" def output(self, _filename: str) -> Output: # pylint: disable=too-many-locals """ _filename is not used Args: _filename(string) """ txt = "" all_contracts = [] for c in self.contracts: is_upgradeable_proxy = c.is_upgradeable_proxy is_upgradeable = c.is_upgradeable additional_txt_info = "" if is_upgradeable_proxy: additional_txt_info += " (Upgradeable Proxy)" if is_upgradeable: additional_txt_info += " (Upgradeable)" if c in self.slither.contracts_derived: additional_txt_info += " (Most derived contract)" txt += blue(f"\n+ Contract {c.name}{additional_txt_info}\n") additional_fields = output.Output( "", additional_fields={ "is_upgradeable_proxy": is_upgradeable_proxy, "is_upgradeable": is_upgradeable, "is_most_derived": c in self.slither.contracts_derived, }, ) # Order the function with # contract_declarer -> list_functions public_function = [ (f.contract_declarer.name, f) for f in c.functions if (not f.is_shadowed and not f.is_constructor_variables) ] collect: Dict[str, List[FunctionContract]] = collections.defaultdict(list) for a, b in public_function: collect[a].append(b) for contract, functions in collect.items(): txt += blue(f" - From {contract}\n") functions = sorted(functions, key=lambda f: f.full_name) for function in functions: if function.visibility in ["external", "public"]: txt += green(f" - {function.full_name} ({function.visibility})\n") if function.visibility in ["internal", "private"]: txt += magenta(f" - {function.full_name} ({function.visibility})\n") if function.visibility not in [ "external", "public", "internal", "private", ]: txt += f" - {function.full_name}  ({function.visibility})\n" additional_fields.add( function, additional_fields={"visibility": function.visibility} ) all_contracts.append((c, additional_fields.data)) self.info(txt) res = self.generate_output(txt) for current_contract, current_additional_fields in all_contracts: res.add(current_contract, additional_fields=current_additional_fields) return res
ContractSummary
python
django__django
tests/model_inheritance_regress/tests.py
{ "start": 747, "end": 25525 }
class ____(TestCase): def test_model_inheritance(self): # Regression for #7350, #7202 # When you create a Parent object with a specific reference to an # existent child instance, saving the Parent doesn't duplicate the # child. This behavior is only activated during a raw save - it is # mostly relevant to deserialization, but any sort of CORBA style # 'narrow()' API would require a similar approach. # Create a child-parent-grandparent chain place1 = Place(name="Guido's House of Pasta", address="944 W. Fullerton") place1.save_base(raw=True) restaurant = Restaurant( place_ptr=place1, serves_hot_dogs=True, serves_pizza=False, ) restaurant.save_base(raw=True) italian_restaurant = ItalianRestaurant( restaurant_ptr=restaurant, serves_gnocchi=True ) italian_restaurant.save_base(raw=True) # Create a child-parent chain with an explicit parent link place2 = Place(name="Main St", address="111 Main St") place2.save_base(raw=True) park = ParkingLot(parent=place2, capacity=100) park.save_base(raw=True) # No extra parent objects have been created. places = list(Place.objects.all()) self.assertEqual(places, [place1, place2]) dicts = list(Restaurant.objects.values("name", "serves_hot_dogs")) self.assertEqual( dicts, [{"name": "Guido's House of Pasta", "serves_hot_dogs": True}] ) dicts = list( ItalianRestaurant.objects.values( "name", "serves_hot_dogs", "serves_gnocchi" ) ) self.assertEqual( dicts, [ { "name": "Guido's House of Pasta", "serves_gnocchi": True, "serves_hot_dogs": True, } ], ) dicts = list(ParkingLot.objects.values("name", "capacity")) self.assertEqual( dicts, [ { "capacity": 100, "name": "Main St", } ], ) # You can also update objects when using a raw save. place1.name = "Guido's All New House of Pasta" place1.save_base(raw=True) restaurant.serves_hot_dogs = False restaurant.save_base(raw=True) italian_restaurant.serves_gnocchi = False italian_restaurant.save_base(raw=True) place2.name = "Derelict lot" place2.save_base(raw=True) park.capacity = 50 park.save_base(raw=True) # No extra parent objects after an update, either. places = list(Place.objects.all()) self.assertEqual(places, [place2, place1]) self.assertEqual(places[0].name, "Derelict lot") self.assertEqual(places[1].name, "Guido's All New House of Pasta") dicts = list(Restaurant.objects.values("name", "serves_hot_dogs")) self.assertEqual( dicts, [ { "name": "Guido's All New House of Pasta", "serves_hot_dogs": False, } ], ) dicts = list( ItalianRestaurant.objects.values( "name", "serves_hot_dogs", "serves_gnocchi" ) ) self.assertEqual( dicts, [ { "name": "Guido's All New House of Pasta", "serves_gnocchi": False, "serves_hot_dogs": False, } ], ) dicts = list(ParkingLot.objects.values("name", "capacity")) self.assertEqual( dicts, [ { "capacity": 50, "name": "Derelict lot", } ], ) # If you try to raw_save a parent attribute onto a child object, # the attribute will be ignored. italian_restaurant.name = "Lorenzo's Pasta Hut" italian_restaurant.save_base(raw=True) # Note that the name has not changed # - name is an attribute of Place, not ItalianRestaurant dicts = list( ItalianRestaurant.objects.values( "name", "serves_hot_dogs", "serves_gnocchi" ) ) self.assertEqual( dicts, [ { "name": "Guido's All New House of Pasta", "serves_gnocchi": False, "serves_hot_dogs": False, } ], ) def test_issue_7105(self): # Regressions tests for #7105: dates() queries should be able to use # fields from the parent model as easily as the child. Child.objects.create( name="child", created=datetime.datetime(2008, 6, 26, 17, 0, 0) ) datetimes = list(Child.objects.datetimes("created", "month")) self.assertEqual(datetimes, [datetime.datetime(2008, 6, 1, 0, 0)]) def test_issue_7276(self): # Regression test for #7276: calling delete() on a model with # multi-table inheritance should delete the associated rows from any # ancestor tables, as well as any descendent objects. place1 = Place(name="Guido's House of Pasta", address="944 W. Fullerton") place1.save_base(raw=True) restaurant = Restaurant( place_ptr=place1, serves_hot_dogs=True, serves_pizza=False, ) restaurant.save_base(raw=True) italian_restaurant = ItalianRestaurant( restaurant_ptr=restaurant, serves_gnocchi=True ) italian_restaurant.save_base(raw=True) ident = ItalianRestaurant.objects.all()[0].id self.assertEqual(Place.objects.get(pk=ident), place1) Restaurant.objects.create( name="a", address="xx", serves_hot_dogs=True, serves_pizza=False, ) # This should delete both Restaurants, plus the related places, plus # the ItalianRestaurant. Restaurant.objects.all().delete() with self.assertRaises(Place.DoesNotExist): Place.objects.get(pk=ident) with self.assertRaises(ItalianRestaurant.DoesNotExist): ItalianRestaurant.objects.get(pk=ident) def test_issue_6755(self): """ Regression test for #6755 """ r = Restaurant(serves_pizza=False, serves_hot_dogs=False) r.save() self.assertEqual(r.id, r.place_ptr_id) orig_id = r.id r = Restaurant(place_ptr_id=orig_id, serves_pizza=True, serves_hot_dogs=False) r.save() self.assertEqual(r.id, orig_id) self.assertEqual(r.id, r.place_ptr_id) def test_issue_11764(self): """ Regression test for #11764 """ wholesalers = list(Wholesaler.objects.select_related()) self.assertEqual(wholesalers, []) def test_issue_7853(self): """ Regression test for #7853 If the parent class has a self-referential link, make sure that any updates to that link via the child update the right table. """ obj = SelfRefChild.objects.create(child_data=37, parent_data=42) obj.delete() def test_get_next_previous_by_date(self): """ Regression tests for #8076 get_(next/previous)_by_date should work """ c1 = ArticleWithAuthor( headline="ArticleWithAuthor 1", author="Person 1", pub_date=datetime.datetime(2005, 8, 1, 3, 0), ) c1.save() c2 = ArticleWithAuthor( headline="ArticleWithAuthor 2", author="Person 2", pub_date=datetime.datetime(2005, 8, 1, 10, 0), ) c2.save() c3 = ArticleWithAuthor( headline="ArticleWithAuthor 3", author="Person 3", pub_date=datetime.datetime(2005, 8, 2), ) c3.save() self.assertEqual(c1.get_next_by_pub_date(), c2) self.assertEqual(c2.get_next_by_pub_date(), c3) with self.assertRaises(ArticleWithAuthor.DoesNotExist): c3.get_next_by_pub_date() self.assertEqual(c3.get_previous_by_pub_date(), c2) self.assertEqual(c2.get_previous_by_pub_date(), c1) with self.assertRaises(ArticleWithAuthor.DoesNotExist): c1.get_previous_by_pub_date() def test_inherited_fields(self): """ Regression test for #8825 and #9390 Make sure all inherited fields (esp. m2m fields, in this case) appear on the child class. """ m2mchildren = list(M2MChild.objects.filter(articles__isnull=False)) self.assertEqual(m2mchildren, []) # Ordering should not include any database column more than once (this # is most likely to occur naturally with model inheritance, so we # check it here). Regression test for #9390. This necessarily pokes at # the SQL string for the query, since the duplicate problems are only # apparent at that late stage. qs = ArticleWithAuthor.objects.order_by("pub_date", "pk") sql = qs.query.get_compiler(qs.db).as_sql()[0] fragment = sql[sql.find("ORDER BY") :] pos = fragment.find("pub_date") self.assertEqual(fragment.find("pub_date", pos + 1), -1) def test_queryset_update_on_parent_model(self): """ Regression test for #10362 It is possible to call update() and only change a field in an ancestor model. """ article = ArticleWithAuthor.objects.create( author="fred", headline="Hey there!", pub_date=datetime.datetime(2009, 3, 1, 8, 0, 0), ) update = ArticleWithAuthor.objects.filter(author="fred").update( headline="Oh, no!" ) self.assertEqual(update, 1) update = ArticleWithAuthor.objects.filter(pk=article.pk).update( headline="Oh, no!" ) self.assertEqual(update, 1) derivedm1 = DerivedM.objects.create( customPK=44, base_name="b1", derived_name="d1", ) self.assertEqual(derivedm1.customPK, 44) self.assertEqual(derivedm1.base_name, "b1") self.assertEqual(derivedm1.derived_name, "d1") derivedms = list(DerivedM.objects.all()) self.assertEqual(derivedms, [derivedm1]) def test_use_explicit_o2o_to_parent_as_pk(self): """ The connector from child to parent need not be the pk on the child. """ self.assertEqual(ParkingLot3._meta.pk.name, "primary_key") # the child->parent link self.assertEqual(ParkingLot3._meta.get_ancestor_link(Place).name, "parent") def test_use_explicit_o2o_to_parent_from_abstract_model(self): self.assertEqual(ParkingLot4A._meta.pk.name, "parent") ParkingLot4A.objects.create( name="Parking4A", address="21 Jump Street", ) self.assertEqual(ParkingLot4B._meta.pk.name, "parent") ParkingLot4A.objects.create( name="Parking4B", address="21 Jump Street", ) def test_all_fields_from_abstract_base_class(self): """ Regression tests for #7588 """ # All fields from an ABC, including those inherited non-abstractly # should be available on child classes (#7588). Creating this instance # should work without error. QualityControl.objects.create( headline="Problems in Django", pub_date=datetime.datetime.now(), quality=10, assignee="adrian", ) def test_abstract_base_class_m2m_relation_inheritance(self): # many-to-many relations defined on an abstract base class are # correctly inherited (and created) on the child class. p1 = Person.objects.create(name="Alice") p2 = Person.objects.create(name="Bob") p3 = Person.objects.create(name="Carol") p4 = Person.objects.create(name="Dave") birthday = BirthdayParty.objects.create(name="Birthday party for Alice") birthday.attendees.set([p1, p3]) bachelor = BachelorParty.objects.create(name="Bachelor party for Bob") bachelor.attendees.set([p2, p4]) parties = list(p1.birthdayparty_set.all()) self.assertEqual(parties, [birthday]) parties = list(p1.bachelorparty_set.all()) self.assertEqual(parties, []) parties = list(p2.bachelorparty_set.all()) self.assertEqual(parties, [bachelor]) # A subclass of a subclass of an abstract model doesn't get its own # accessor. self.assertFalse(hasattr(p2, "messybachelorparty_set")) # ... but it does inherit the m2m from its parent messy = MessyBachelorParty.objects.create(name="Bachelor party for Dave") messy.attendees.set([p4]) messy_parent = messy.bachelorparty_ptr parties = list(p4.bachelorparty_set.all()) self.assertEqual(parties, [bachelor, messy_parent]) def test_abstract_verbose_name_plural_inheritance(self): """ verbose_name_plural correctly inherited from ABC if inheritance chain includes an abstract model. """ # Regression test for #11369: verbose_name_plural should be inherited # from an ABC even when there are one or more intermediate # abstract models in the inheritance chain, for consistency with # verbose_name. self.assertEqual(InternalCertificationAudit._meta.verbose_name_plural, "Audits") def test_inherited_nullable_exclude(self): obj = SelfRefChild.objects.create(child_data=37, parent_data=42) self.assertQuerySetEqual( SelfRefParent.objects.exclude(self_data=72), [obj.pk], attrgetter("pk") ) self.assertQuerySetEqual( SelfRefChild.objects.exclude(self_data=72), [obj.pk], attrgetter("pk") ) def test_concrete_abstract_concrete_pk(self): """ Primary key set correctly with concrete->abstract->concrete inheritance. """ # Regression test for #13987: Primary key is incorrectly determined # when more than one model has a concrete->abstract->concrete # inheritance hierarchy. self.assertEqual( len( [field for field in BusStation._meta.local_fields if field.primary_key] ), 1, ) self.assertEqual( len( [ field for field in TrainStation._meta.local_fields if field.primary_key ] ), 1, ) self.assertIs(BusStation._meta.pk.model, BusStation) self.assertIs(TrainStation._meta.pk.model, TrainStation) def test_inherited_unique_field_with_form(self): """ A model which has different primary key for the parent model passes unique field checking correctly (#17615). """ class ProfileForm(forms.ModelForm): class Meta: model = Profile fields = "__all__" User.objects.create(username="user_only") p = Profile.objects.create(username="user_with_profile") form = ProfileForm( {"username": "user_with_profile", "extra": "hello"}, instance=p ) self.assertTrue(form.is_valid()) def test_inheritance_joins(self): # Test for #17502 - check that filtering through two levels of # inheritance chain doesn't generate extra joins. qs = ItalianRestaurant.objects.all() self.assertEqual(str(qs.query).count("JOIN"), 2) qs = ItalianRestaurant.objects.filter(name="foo") self.assertEqual(str(qs.query).count("JOIN"), 2) @expectedFailure def test_inheritance_values_joins(self): # It would be nice (but not too important) to skip the middle join in # this case. Skipping is possible as nothing from the middle model is # used in the qs and top contains direct pointer to the bottom model. qs = ItalianRestaurant.objects.values_list("serves_gnocchi").filter(name="foo") self.assertEqual(str(qs.query).count("JOIN"), 1) def test_issue_21554(self): senator = Senator.objects.create(name="John Doe", title="X", state="Y") senator = Senator.objects.get(pk=senator.pk) self.assertEqual(senator.name, "John Doe") self.assertEqual(senator.title, "X") self.assertEqual(senator.state, "Y") def test_inheritance_resolve_columns(self): Restaurant.objects.create( name="Bobs Cafe", address="Somewhere", serves_pizza=True, serves_hot_dogs=True, ) p = Place.objects.select_related("restaurant")[0] self.assertIsInstance(p.restaurant.serves_pizza, bool) def test_inheritance_select_related(self): # Regression test for #7246 r1 = Restaurant.objects.create( name="Nobu", serves_hot_dogs=True, serves_pizza=False ) r2 = Restaurant.objects.create( name="Craft", serves_hot_dogs=False, serves_pizza=True ) Supplier.objects.create(name="John", restaurant=r1) Supplier.objects.create(name="Jane", restaurant=r2) self.assertQuerySetEqual( Supplier.objects.order_by("name").select_related(), [ "Jane", "John", ], attrgetter("name"), ) jane = Supplier.objects.order_by("name").select_related("restaurant")[0] self.assertEqual(jane.restaurant.name, "Craft") def test_filter_with_parent_fk(self): r = Restaurant.objects.create() s = Supplier.objects.create(restaurant=r) # The mismatch between Restaurant and Place is intentional (#28175). self.assertSequenceEqual( Supplier.objects.filter(restaurant__in=Place.objects.all()), [s] ) def test_ptr_accessor_assigns_state(self): r = Restaurant.objects.create() self.assertIs(r.place_ptr._state.adding, False) self.assertEqual(r.place_ptr._state.db, "default") def test_related_filtering_query_efficiency_ticket_15844(self): r = Restaurant.objects.create( name="Guido's House of Pasta", address="944 W. Fullerton", serves_hot_dogs=True, serves_pizza=False, ) s = Supplier.objects.create(restaurant=r) with self.assertNumQueries(1): self.assertSequenceEqual(Supplier.objects.filter(restaurant=r), [s]) with self.assertNumQueries(1): self.assertSequenceEqual(r.supplier_set.all(), [s]) def test_queries_on_parent_access(self): italian_restaurant = ItalianRestaurant.objects.create( name="Guido's House of Pasta", address="944 W. Fullerton", serves_hot_dogs=True, serves_pizza=False, serves_gnocchi=True, ) # No queries are made when accessing the parent objects. italian_restaurant = ItalianRestaurant.objects.get(pk=italian_restaurant.pk) with self.assertNumQueries(0): restaurant = italian_restaurant.restaurant_ptr self.assertEqual(restaurant.place_ptr.restaurant, restaurant) self.assertEqual(restaurant.italianrestaurant, italian_restaurant) # One query is made when accessing the parent objects when the instance # is deferred. italian_restaurant = ItalianRestaurant.objects.only("serves_gnocchi").get( pk=italian_restaurant.pk ) with self.assertNumQueries(1): restaurant = italian_restaurant.restaurant_ptr self.assertEqual(restaurant.place_ptr.restaurant, restaurant) self.assertEqual(restaurant.italianrestaurant, italian_restaurant) # No queries are made when accessing the parent objects when the # instance has deferred a field not present in the parent table. italian_restaurant = ItalianRestaurant.objects.defer("serves_gnocchi").get( pk=italian_restaurant.pk ) with self.assertNumQueries(0): restaurant = italian_restaurant.restaurant_ptr self.assertEqual(restaurant.place_ptr.restaurant, restaurant) self.assertEqual(restaurant.italianrestaurant, italian_restaurant) def test_parent_access_copies_fetch_mode(self): italian_restaurant = ItalianRestaurant.objects.create( name="Mom's Spaghetti", address="2131 Woodward Ave", serves_hot_dogs=False, serves_pizza=False, serves_gnocchi=True, ) # No queries are made when accessing the parent objects. italian_restaurant = ItalianRestaurant.objects.fetch_mode(FETCH_PEERS).get( pk=italian_restaurant.pk ) restaurant = italian_restaurant.restaurant_ptr self.assertEqual(restaurant._state.fetch_mode, FETCH_PEERS) def test_id_field_update_on_ancestor_change(self): place1 = Place.objects.create(name="House of Pasta", address="944 Fullerton") place2 = Place.objects.create(name="House of Pizza", address="954 Fullerton") place3 = Place.objects.create(name="Burger house", address="964 Fullerton") restaurant1 = Restaurant.objects.create( place_ptr=place1, serves_hot_dogs=True, serves_pizza=False, ) restaurant2 = Restaurant.objects.create( place_ptr=place2, serves_hot_dogs=True, serves_pizza=False, ) italian_restaurant = ItalianRestaurant.objects.create( restaurant_ptr=restaurant1, serves_gnocchi=True, ) # Changing the parent of a restaurant changes the restaurant's ID & PK. restaurant1.place_ptr = place3 self.assertEqual(restaurant1.pk, place3.pk) self.assertEqual(restaurant1.id, place3.id) self.assertEqual(restaurant1.pk, restaurant1.id) restaurant1.place_ptr = None self.assertIsNone(restaurant1.pk) self.assertIsNone(restaurant1.id) # Changing the parent of an italian restaurant changes the restaurant's # ID & PK. italian_restaurant.restaurant_ptr = restaurant2 self.assertEqual(italian_restaurant.pk, restaurant2.pk) self.assertEqual(italian_restaurant.id, restaurant2.id) self.assertEqual(italian_restaurant.pk, italian_restaurant.id) italian_restaurant.restaurant_ptr = None self.assertIsNone(italian_restaurant.pk) self.assertIsNone(italian_restaurant.id) def test_create_new_instance_with_pk_equals_none(self): p1 = Profile.objects.create(username="john") p2 = User.objects.get(pk=p1.user_ptr_id).profile # Create a new profile by setting pk = None. p2.pk = None p2.user_ptr_id = None p2.username = "bill" p2.save() self.assertEqual(Profile.objects.count(), 2) self.assertEqual(User.objects.get(pk=p1.user_ptr_id).username, "john") def test_create_new_instance_with_pk_equals_none_multi_inheritance(self): c1 = Congressman.objects.create(state="PA", name="John", title="senator 1") c2 = Person.objects.get(pk=c1.pk).congressman # Create a new congressman by setting pk = None. c2.pk = None c2.id = None c2.politician_ptr_id = None c2.name = "Bill" c2.title = "senator 2" c2.save() self.assertEqual(Congressman.objects.count(), 2) self.assertEqual(Person.objects.get(pk=c1.pk).name, "John") self.assertEqual( Politician.objects.get(pk=c1.politician_ptr_id).title, "senator 1", ) def test_mti_update_parent_through_child(self): Politician.objects.create() Congressman.objects.create() Congressman.objects.update(title="senator 1") self.assertEqual(Congressman.objects.get().title, "senator 1") def test_mti_update_grand_parent_through_child(self): Politician.objects.create() Senator.objects.create() Senator.objects.update(title="senator 1") self.assertEqual(Senator.objects.get().title, "senator 1")
ModelInheritanceTest
python
pyinstaller__pyinstaller
bootloader/waflib/Task.py
{ "start": 27668, "end": 28095 }
class ____(object): def __init__(self, num): self.num = num self.locking = set() self.waiting = set() def is_locked(self): return len(self.locking) >= self.num def acquire(self, tsk): if self.is_locked(): raise IndexError('Cannot lock more %r' % self.locking) self.locking.add(tsk) def release(self, tsk): self.locking.remove(tsk)
TaskSemaphore
python
mozilla__bleach
bleach/_vendor/html5lib/html5parser.py
{ "start": 117091, "end": 117164 }
class ____(Exception): """Error in parsed document""" pass
ParseError
python
dask__distributed
distributed/lock.py
{ "start": 170, "end": 3529 }
class ____(Semaphore): """Distributed Centralized Lock .. warning:: This is using the ``distributed.Semaphore`` as a backend, which is susceptible to lease overbooking. For the Lock this means that if a lease is timing out, two or more instances could acquire the lock at the same time. To disable lease timeouts, set ``distributed.scheduler.locks.lease-timeout`` to `inf`, e.g. .. code-block:: python with dask.config.set({"distributed.scheduler.locks.lease-timeout": "inf"}): lock = Lock("x") ... Note, that without lease timeouts, the Lock may deadlock in case of cluster downscaling or worker failures. Parameters ---------- name: string (optional) Name of the lock to acquire. Choosing the same name allows two disconnected processes to coordinate a lock. If not given, a random name will be generated. client: Client (optional) Client to use for communication with the scheduler. If not given, the default global client will be used. Examples -------- >>> lock = Lock('x') # doctest: +SKIP >>> lock.acquire(timeout=1) # doctest: +SKIP >>> # do things with protected resource >>> lock.release() # doctest: +SKIP """ def __init__( self, name=None, client=_no_value, scheduler_rpc=None, loop=None, ): if client is not _no_value: import warnings warnings.warn( "The `client` parameter is deprecated. It is no longer necessary to pass a client to Lock.", DeprecationWarning, stacklevel=2, ) self.name = name or "lock-" + uuid.uuid4().hex super().__init__( max_leases=1, name=name, scheduler_rpc=scheduler_rpc, loop=loop, ) def acquire(self, blocking=True, timeout=None): """Acquire the lock Parameters ---------- blocking : bool, optional If false, don't wait on the lock in the scheduler at all. timeout : string or number or timedelta, optional Seconds to wait on the lock in the scheduler. This does not include local coroutine time, network transfer time, etc.. It is forbidden to specify a timeout when blocking is false. Instead of number of seconds, it is also possible to specify a timedelta in string format, e.g. "200ms". Examples -------- >>> lock = Lock('x') # doctest: +SKIP >>> lock.acquire(timeout="1s") # doctest: +SKIP Returns ------- True or False whether or not it successfully acquired the lock """ if not blocking: if timeout is not None: raise ValueError("can't specify a timeout for a non-blocking call") timeout = 0 return super().acquire(timeout=timeout) async def _locked(self): val = await self.scheduler.semaphore_value(name=self.name) return val == 1 def locked(self): return self.sync(self._locked) def __getstate__(self): return self.name def __setstate__(self, state): self.__init__(name=state)
Lock
python
getsentry__sentry
src/sentry/api/endpoints/broadcast_details.py
{ "start": 798, "end": 4382 }
class ____(Endpoint): owner = ApiOwner.UNOWNED publish_status = { "GET": ApiPublishStatus.PRIVATE, "PUT": ApiPublishStatus.PRIVATE, } permission_classes = (SentryIsAuthenticated,) def _get_broadcast(self, request: Request, broadcast_id): if request.access.has_permission("broadcasts.admin"): queryset = Broadcast.objects.all() else: queryset = Broadcast.objects.filter( Q(date_expires__isnull=True) | Q(date_expires__gt=timezone.now()), is_active=True ) try: return queryset.get(id=int(broadcast_id)) except (Broadcast.DoesNotExist, ValueError): raise ResourceDoesNotExist def _get_validator(self, request: Request): if request.access.has_permission("broadcasts.admin"): return AdminBroadcastValidator return BroadcastValidator def _get_serializer(self, request: Request): if request.access.has_permission("broadcasts.admin"): return AdminBroadcastSerializer return BroadcastSerializer def _serialize_response(self, request: Request, broadcast): serializer_cls = self._get_serializer(request) return self.respond(serialize(broadcast, request.user, serializer=serializer_cls())) def get(self, request: Request, broadcast_id) -> Response: broadcast = self._get_broadcast(request, broadcast_id) return self._serialize_response(request, broadcast) def put(self, request: Request, broadcast_id) -> Response: if not request.user.is_authenticated: return Response(status=400) broadcast = self._get_broadcast(request, broadcast_id) validator = self._get_validator(request)(data=request.data, partial=True) if not validator.is_valid(): return self.respond(validator.errors, status=400) result = validator.validated_data update_kwargs = {} if result.get("title"): update_kwargs["title"] = result["title"] if result.get("message"): update_kwargs["message"] = result["message"] if result.get("link"): update_kwargs["link"] = result["link"] if result.get("isActive") is not None: update_kwargs["is_active"] = result["isActive"] if result.get("dateExpires", -1) != -1: update_kwargs["date_expires"] = result["dateExpires"] if result.get("cta"): update_kwargs["cta"] = result["cta"] if result.get("mediaUrl"): update_kwargs["media_url"] = result["mediaUrl"] if result.get("category"): update_kwargs["category"] = result["category"] if update_kwargs: with transaction.atomic(using=router.db_for_write(Broadcast)): broadcast.update(**update_kwargs) logger.info( "broadcasts.update", extra={ "ip_address": request.META["REMOTE_ADDR"], "user_id": request.user.id, "broadcast_id": broadcast.id, "data": update_kwargs, }, ) if result.get("hasSeen"): try: with transaction.atomic(using=router.db_for_write(Broadcast)): BroadcastSeen.objects.create(broadcast=broadcast, user_id=request.user.id) except IntegrityError: pass return self._serialize_response(request, broadcast)
BroadcastDetailsEndpoint
python
getsentry__sentry
src/sentry/monitors/migrations/0007_monitors_json_field.py
{ "start": 285, "end": 1985 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = True dependencies = [ ("monitors", "0006_add_is_upserting"), ] operations = [ migrations.SeparateDatabaseAndState( database_operations=[mod.to_jsonb("sentry_monitor", "config")], state_operations=[ migrations.AlterField( model_name="monitor", name="config", field=models.JSONField(default=dict), ), ], ), migrations.AlterField( model_name="monitorcheckin", name="monitor_config", field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(null=True), ), ]
Migration
python
realpython__materials
python-guitar-synthesizer/source_code_final/src/tablature/models.py
{ "start": 700, "end": 1066 }
class ____(BaseModel): time_signature: constr(pattern=r"\d+/\d+") notes: Optional[tuple[Note, ...]] = tuple() @cached_property def beats_per_measure(self) -> int: return int(self.time_signature.split("/")[0]) @cached_property def note_value(self) -> Fraction: return Fraction(1, int(self.time_signature.split("/")[1]))
Measure
python
python-attrs__attrs
tests/test_filters.py
{ "start": 184, "end": 231 }
class ____: a = attr.ib() b = attr.ib()
C
python
python-attrs__attrs
typing-examples/baseline.py
{ "start": 1801, "end": 2228 }
class ____: a: int b: str = attrs.field(on_setattr=attrs.setters.NO_OP) c: bool = attrs.field(on_setattr=attrs.setters.frozen) d: int = attrs.field( on_setattr=[attrs.setters.convert, attrs.setters.validate] ) e: bool = attrs.field( on_setattr=attrs.setters.pipe( attrs.setters.convert, attrs.setters.validate ) ) @attrs.define(eq=True, order=True)
ValidatedSetter2
python
doocs__leetcode
solution/0100-0199/0179.Largest Number/Solution.py
{ "start": 0, "end": 238 }
class ____: def largestNumber(self, nums: List[int]) -> str: nums = [str(v) for v in nums] nums.sort(key=cmp_to_key(lambda a, b: 1 if a + b < b + a else -1)) return "0" if nums[0] == "0" else "".join(nums)
Solution
python
chroma-core__chroma
chromadb/auth/__init__.py
{ "start": 1611, "end": 5316 }
class ____(Component): """ ServerAuthenticationProvider is responsible for authenticating requests. If a ServerAuthenticationProvider is configured, it will be called by the server to authenticate requests. If no ServerAuthenticationProvider is configured, all requests will be authenticated. The ServerAuthenticationProvider should return a UserIdentity object if the request is authenticated for use by the ServerAuthorizationProvider. """ def __init__(self, system: System) -> None: super().__init__(system) self._ignore_auth_paths: Dict[ str, List[str] ] = system.settings.chroma_server_auth_ignore_paths self.overwrite_singleton_tenant_database_access_from_auth = ( system.settings.chroma_overwrite_singleton_tenant_database_access_from_auth ) @abstractmethod def authenticate_or_raise(self, headers: Dict[str, str]) -> UserIdentity: pass def ignore_operation(self, verb: str, path: str) -> bool: if ( path in self._ignore_auth_paths.keys() and verb.upper() in self._ignore_auth_paths[path] ): return True return False def read_creds_or_creds_file(self) -> List[str]: _creds_file = None _creds = None if self._system.settings.chroma_server_authn_credentials_file: _creds_file = str( self._system.settings["chroma_server_authn_credentials_file"] ) if self._system.settings.chroma_server_authn_credentials: _creds = str(self._system.settings["chroma_server_authn_credentials"]) if not _creds_file and not _creds: raise ValueError( "No credentials file or credentials found in " "[chroma_server_authn_credentials]." ) if _creds_file and _creds: raise ValueError( "Both credentials file and credentials found." "Please provide only one." ) if _creds: return [c for c in _creds.split("\n") if c] elif _creds_file: with open(_creds_file, "r") as f: return f.readlines() raise ValueError("Should never happen") def singleton_tenant_database_if_applicable( self, user: Optional[UserIdentity] ) -> Tuple[Optional[str], Optional[str]]: """ If settings.chroma_overwrite_singleton_tenant_database_access_from_auth is False, this function always returns (None, None). If settings.chroma_overwrite_singleton_tenant_database_access_from_auth is True, follows the following logic: - If the user only has access to a single tenant, this function will return that tenant as its first return value. - If the user only has access to a single database, this function will return that database as its second return value. If the user has access to multiple tenants and/or databases, including "*", this function will return None for the corresponding value(s). - If the user has access to multiple tenants and/or databases this function will return None for the corresponding value(s). """ if not self.overwrite_singleton_tenant_database_access_from_auth or not user: return None, None tenant = None database = None if user.tenant and user.tenant != "*": tenant = user.tenant if user.databases and len(user.databases) == 1 and user.databases[0] != "*": database = user.databases[0] return tenant, database
ServerAuthenticationProvider
python
readthedocs__readthedocs.org
readthedocs/settings/test.py
{ "start": 94, "end": 6936 }
class ____(CommunityBaseSettings): """Settings for testing environment (e.g. tox)""" SLUMBER_API_HOST = "http://localhost:8000" # A bunch of our tests check this value in a returned URL/Domain PRODUCTION_DOMAIN = "readthedocs.org" PUBLIC_DOMAIN = "readthedocs.io" DONT_HIT_DB = False # Disable password validators on tests AUTH_PASSWORD_VALIDATORS = [] DEBUG = False TEMPLATE_DEBUG = False ELASTICSEARCH_DSL_AUTOSYNC = False ELASTICSEARCH_DSL_AUTO_REFRESH = True CELERY_ALWAYS_EAGER = True # Skip automatic detection of Docker limits for testing BUILD_TIME_LIMIT = 600 BUILD_MEMORY_LIMIT = "200m" CACHES = { "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "PREFIX": "docs", } } # Random private RSA key for testing # $ openssl genpkey -algorithm RSA -out private-key.pem -pkeyopt rsa_keygen_bits:4096 GITHUB_APP_PRIVATE_KEY = textwrap.dedent(""" -----BEGIN PRIVATE KEY----- MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDChDHFcZRVl/DT M+gWhptJRqLC5Yuq93OxA7BrAQpknNa4kDSVObrtD3ZlyI90VvlCEb/BcgX+lZGb lw9K93t+XgD/ZspVyfYXKh2OOd93v+xYpgjiu1idyOeblOldTnyQLaFvooU44hGZ tuIvtJMKiE/Tnd6uq2CypzBS8CGkTJ/YQuIQmsXHL/DS7Oi00M3UWNe0sq8C4T5u HohCIVmXFOtduNYwNGCAdRW6qcn9VwtJumDnAy884qX5YYHQk52PeAq2R1nS/Dnv yhIt4opzY7PXvhUw7Jg16hDz96GlAhEin1HYhQuVDZpBwfQ74VzEFEZ3+dD/WWC0 bA2o/M4SB7ccyC6CPDY+DZgKDQlHJwRge3mQjyFO3J6q9DokgTVqGxF1YY+UVwTP JBuW4xowAddMJoUiljr6U/Rv4Hz+nk4YuKkDmqL+EmPr6iGRvQiT/C8cVJsD/E8K pbG9TYMkluN13NoSWN4odVH8dPkd1LeS4IjcVKmnnraSg/X93gBOQe8L7J1KMutE V3UFHDrYeSJeieW59NLgMwwpXBfFNRADbVBz40Ok4qERu2lh3bJLOgV9lpedJ25N LlwTgDY138BDdT6oYZabjw2MUW1uebCzl3yu0yzTjeYxNrgj+wxsqMhKg/bDE96i pzKL0Z6PR4lvgYcOp4Wh/+kmxdIyZQIDAQABAoICAFbYp9IkQlqu4nahw7se8UEX mP7UdvXn0o8TexZjWg0O221+8QM5ScSi9TU/hREn7dT6ULehXZzLkb26hbjuYwRK Gz7s2WTRLZ8tDhIcs7HnDjKMOwZkKA4Wj5Xut/yRWNsUjHHnyXxarwoG1dj/0fDP aHiukShCWwOY0uIM1bBiB7IKNp28RJaIyIib/tAQM/3dhr1mU+5Au9t1pVeFRVdH n0hyiKrwD6/61q9HNGh4jxEldjNeQB56gSklSEzkQ2I1ce7tT2T8eS+e9FvpO/CF 8Ntfwl1cHR9hOJ18j/64vAbNxECcMk4jyx4V5yI/HehrtwTFFHOVp7AWWEj9SlGJ A4BxcCvyGAjPKgjaIxMG9bHM8Je4ushlroUgYXroHdSVd9Su7JBoLmRFHf+3oAAj IieDEXBeQbUx8lIQmsO71l+eX2EvouNduQ0NY+oCF9ookd4dDXA/rkexZD4so7Ee lb79z3i1HTidYZHoejlEyTHQFXOeonzmKgVGDHxJrHm5clnAjk8HdmyWgFxcgSVO fnPWdntTTGUQNowqkaJLyMhYHIP5BGZZ7oGCfY2WuUfBSMgsssnOmUmavN5hbpbk Ohikn4sKIeZQTf2HJiJNBPdV5FJ3Y1TT8MW5lvNnDDS1M+ezEBSWd6NaN3PXlQpk pGEIuoVv3Yo/eIJPjqinAoIBAQD5U5FW9fbOUTpxfI6Cg1ktBmQCRALeiCbU+jZf PVIU+HZBkChkL50Qpq0D9KjJqVvZGyMFG3jkJbdKoAADkcuRptf678GWlOjAzERa Z43Hh4vsF8PnXNojslpgoMRRWMhckcEV6VrhdvtqupBNW2GnyLofCPomwLL5DSfO M+3pgxlqav+3WMZkTowHWu5NP8v1+X/6O2ASIWe8XNSvrcchNj6wvPVaZZB5OV28 IcsnKOhveVy9fmXmuNZfWzEGiccra5DmqfkXwz2ZZokOTSfr2R7IsVNj5Z686oWW FawmEMx7zFQE6BRpDgGTZF/e1ve3sjmarX/jTYks7rBnW5IzAoIBAQDHuQ7Jf0tf BX7fZtDPy0JzwRzg6pYXJktHpXsPvUwf93Y991dDYdXWo/JDnj73G5bgJ7g0jTXq AYdndK1KUnKeZcV7rFJsqEjIociRSeKFMKS9lWP+XKoJDzNtygAKyCaIZXlOKSXE xWIUzeigVWnom6fwOuDe3/8TGE1aJSINCnSZ0TZLwsH8+lewjALPOt2e8bZ6ePpe ypysvVWnASio3OUoLmhbC7YV/lAvLgp8b4vB/9EPzmlwIKjN9Uurq4LOjTwRP/MD SHSPkiFe47zDyT0S+DOODxNC9bKh26NzOZ1Nbuqy1flvjTlhk68ih0CMEUWPv6wd sOFXn8AVRQEHAoIBAQCVCyDB9E0yrpoaR1RFrtE7OivEsvVoI8na3SxtqJGN2a2P qeaLZW8mCg05ZSMVUjmGwlMf9XlCIU29vYHkoF4p1qwb5QE7zA6LWlCuHmNB2MSL QPWqM/ZvCmo+gzx4SHOV6sebGqFqUJ8hAR/MLollLHgen1YynlUezn9yI9bgFa+2 zvnIl7gZNF8+8lusMCv0Ac9APghDLlb94hx+XIrCTtQRARRGkppX7TQch7MS2MCC CvGmkY3G682yuSfIecpnKWk4inlOfDcxoXri4rqvoV5mqKJqAFTxJ9ztiE0dgENM 6it7t2SkHGxSuNkatDTnShJnZboinjIXeyRW1QXDAoIBACikJ7YpCRVU8PRU37jp C6Syb0X1doVPbZIuwlP5mTwIBy+k3UUA65q50dqgoP93xcPnUTygX5A2r28F9x1g maJR41W/QyaJOAZbpYyrFEU2GM/bTnW8NX2SckytBkUrZWvr+jtFdEIOSF8jZ2r4 9ow24H2p/Yhc3HLuRw9I7xzoO8HxKLNR9lecOavbUdcJi3+EgDV72LbhU/BytrM9 MSDrklYS23lrcKoZDggLvmaD7FSV0dz9i8cdXjxK5hMQ25VceBSqhrDsVYvBmLjO buMIWD079IG735eIl8kIAMK5vqC7KVcq448nlb2dZ84G58OY4CbYQhXooHJMN7Ic UJECggEAJuMo2+TjqSKP3NgPknDboWm4rpi6u9u4/5Lp6gFVr0wXbFVEcQWic9Gt pb7+hgm3x08s7RBWr8SsDkslT0rFs6v05nYIsALFUu0BXtYqh652BRY8hLD8cDew V1YR0bULHRFbN8AyjNVNS/68R89kb9kYgAjsJP/30AIdAopP2UMSCj9cq8BUrOHf 1JhQ9/uq2YJo3XEz2ypjitUMgCtpLxu9WKDU0sYyqZaOxbr8q3HLOAttNzp0Ai3a wFZ8cpFd+mMwHGsM9+WqUFnnZkHbw2ylLo/Kv3eHLA0MEYyyF8hLvPH4JV+ftnDS agcfZZcGZQjnJO+V/MWnsSY4obY8Ag== -----END PRIVATE KEY----- """).strip() GITHUB_APP_WEBHOOK_SECRET = "secret" @property def PASSWORD_HASHERS(self): # Speed up tests by using a fast password hasher as the default. # https://docs.djangoproject.com/en/5.0/topics/testing/overview/#speeding-up-the-tests. return ["django.contrib.auth.hashers.MD5PasswordHasher"] + super().PASSWORD_HASHERS @property def DATABASES(self): # noqa return { "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": os.path.join(self.SITE_ROOT, "dev.db"), }, "telemetry": { "ENGINE": "django.db.backends.sqlite3", "NAME": os.path.join(self.SITE_ROOT, "telemetry.dev.db"), }, } @property def ES_INDEXES(self): # noqa - avoid pep8 N802 es_indexes = super(CommunityTestSettings, self).ES_INDEXES for index_conf in es_indexes.values(): index_conf["name"] = "test_{}".format(index_conf["name"]) return es_indexes @property def LOGGING(self): # noqa - avoid pep8 N802 logging = super().LOGGING logging["handlers"]["console"]["level"] = "DEBUG" logging["formatters"]["default"]["format"] = "[%(asctime)s] " + self.LOG_FORMAT # Allow Sphinx and other tools to create loggers logging["disable_existing_loggers"] = False return logging @property def STORAGES(self): # Attempt to fix tests using the default storage backends return { "default": { "BACKEND": "django.core.files.storage.FileSystemStorage", }, "staticfiles": { "BACKEND": "django.contrib.staticfiles.storage.StaticFilesStorage", }, "usercontent": { "BACKEND": "django.core.files.storage.FileSystemStorage", "OPTIONS": { "location": Path(self.MEDIA_ROOT) / "usercontent", "allow_overwrite": True, }, }, } CommunityTestSettings.load_settings(__name__)
CommunityTestSettings
python
langchain-ai__langchain
libs/partners/huggingface/langchain_huggingface/llms/huggingface_pipeline.py
{ "start": 895, "end": 14788 }
class ____(BaseLLM): """HuggingFace Pipeline API. To use, you should have the `transformers` python package installed. Only supports `text-generation`, `text2text-generation`, `image-text-to-text`, `summarization` and `translation` for now. Example using from_model_id: ```python from langchain_huggingface import HuggingFacePipeline hf = HuggingFacePipeline.from_model_id( model_id="gpt2", task="text-generation", pipeline_kwargs={"max_new_tokens": 10}, ) ``` Example passing pipeline in directly: ```python from langchain_huggingface import HuggingFacePipeline from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10, ) hf = HuggingFacePipeline(pipeline=pipe) ``` """ pipeline: Any = None model_id: str | None = None """The model name. If not set explicitly by the user, it will be inferred from the provided pipeline (if available). If neither is provided, the DEFAULT_MODEL_ID will be used.""" model_kwargs: dict | None = None """Keyword arguments passed to the model.""" pipeline_kwargs: dict | None = None """Keyword arguments passed to the pipeline.""" batch_size: int = DEFAULT_BATCH_SIZE """Batch size to use when passing multiple documents to generate.""" model_config = ConfigDict( extra="forbid", ) @model_validator(mode="before") @classmethod def pre_init_validator(cls, values: dict[str, Any]) -> dict[str, Any]: """Ensure model_id is set either by pipeline or user input.""" if "model_id" not in values: if values.get("pipeline"): values["model_id"] = values["pipeline"].model.name_or_path else: values["model_id"] = DEFAULT_MODEL_ID return values @classmethod def from_model_id( cls, model_id: str, task: str, backend: str = "default", device: int | None = None, device_map: str | None = None, model_kwargs: dict | None = None, pipeline_kwargs: dict | None = None, batch_size: int = DEFAULT_BATCH_SIZE, **kwargs: Any, ) -> HuggingFacePipeline: """Construct the pipeline object from model_id and task.""" try: from transformers import ( # type: ignore[import] AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, ) from transformers import pipeline as hf_pipeline # type: ignore[import] except ImportError as e: msg = ( "Could not import transformers python package. " "Please install it with `pip install transformers`." ) raise ValueError(msg) from e _model_kwargs = model_kwargs.copy() if model_kwargs else {} if device_map is not None: if device is not None: msg = ( "Both `device` and `device_map` are specified. " "`device` will override `device_map`. " "You will most likely encounter unexpected behavior." "Please remove `device` and keep " "`device_map`." ) raise ValueError(msg) if "device_map" in _model_kwargs: msg = "`device_map` is already specified in `model_kwargs`." raise ValueError(msg) _model_kwargs["device_map"] = device_map tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) if backend in {"openvino", "ipex"}: if task not in VALID_TASKS: msg = ( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) raise ValueError(msg) err_msg = f"Backend: {backend} {IMPORT_ERROR.format(f'optimum[{backend}]')}" if not is_optimum_intel_available(): raise ImportError(err_msg) # TODO: upgrade _MIN_OPTIMUM_VERSION to 1.22 after release min_optimum_version = ( "1.22" if backend == "ipex" and task != "text-generation" else _MIN_OPTIMUM_VERSION ) if is_optimum_intel_version("<", min_optimum_version): msg = ( f"Backend: {backend} requires optimum-intel>=" f"{min_optimum_version}. You can install it with pip: " "`pip install --upgrade --upgrade-strategy eager " f"`optimum[{backend}]`." ) raise ImportError(msg) if backend == "openvino": if not is_openvino_available(): raise ImportError(err_msg) from optimum.intel import ( # type: ignore[import] OVModelForCausalLM, OVModelForSeq2SeqLM, ) model_cls = ( OVModelForCausalLM if task == "text-generation" else OVModelForSeq2SeqLM ) else: if not is_ipex_available(): raise ImportError(err_msg) if task == "text-generation": from optimum.intel import ( IPEXModelForCausalLM, # type: ignore[import] ) model_cls = IPEXModelForCausalLM else: from optimum.intel import ( IPEXModelForSeq2SeqLM, # type: ignore[import] ) model_cls = IPEXModelForSeq2SeqLM else: model_cls = ( AutoModelForCausalLM if task == "text-generation" else AutoModelForSeq2SeqLM ) model = model_cls.from_pretrained(model_id, **_model_kwargs) if tokenizer.pad_token is None: if model.config.pad_token_id is not None: tokenizer.pad_token_id = model.config.pad_token_id elif model.config.eos_token_id is not None and isinstance( model.config.eos_token_id, int ): tokenizer.pad_token_id = model.config.eos_token_id elif tokenizer.eos_token_id is not None: tokenizer.pad_token_id = tokenizer.eos_token_id else: tokenizer.add_special_tokens({"pad_token": "[PAD]"}) if ( ( getattr(model, "is_loaded_in_4bit", False) or getattr(model, "is_loaded_in_8bit", False) ) and device is not None and backend == "default" ): logger.warning( f"Setting the `device` argument to None from {device} to avoid " "the error caused by attempting to move the model that was already " "loaded on the GPU using the Accelerate module to the same or " "another device." ) device = None if ( device is not None and importlib.util.find_spec("torch") is not None and backend == "default" ): import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): msg = ( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) raise ValueError(msg) if device_map is not None and device < 0: device = None if device is not None and device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 (default) for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) if device is not None and device_map is not None and backend == "openvino": logger.warning("Please set device for OpenVINO through: `model_kwargs`") if "trust_remote_code" in _model_kwargs: _model_kwargs = { k: v for k, v in _model_kwargs.items() if k != "trust_remote_code" } _pipeline_kwargs = pipeline_kwargs or {} pipeline = hf_pipeline( # type: ignore[call-overload] task=task, model=model, tokenizer=tokenizer, device=device, batch_size=batch_size, model_kwargs=_model_kwargs, **_pipeline_kwargs, ) if pipeline.task not in VALID_TASKS: msg = ( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) raise ValueError(msg) return cls( pipeline=pipeline, model_id=model_id, model_kwargs=_model_kwargs, pipeline_kwargs=_pipeline_kwargs, batch_size=batch_size, **kwargs, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_id": self.model_id, "model_kwargs": self.model_kwargs, "pipeline_kwargs": self.pipeline_kwargs, } @property def _llm_type(self) -> str: return "huggingface_pipeline" def _generate( self, prompts: list[str], stop: list[str] | None = None, run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> LLMResult: # List to hold all results text_generations: list[str] = [] pipeline_kwargs = kwargs.get("pipeline_kwargs", {}) skip_prompt = kwargs.get("skip_prompt", False) for i in range(0, len(prompts), self.batch_size): batch_prompts = prompts[i : i + self.batch_size] # Process batch of prompts responses = self.pipeline( batch_prompts, **pipeline_kwargs, ) # Process each response in the batch for j, response in enumerate(responses): if isinstance(response, list): # if model returns multiple generations, pick the top one response = response[0] if ( self.pipeline.task == "text-generation" or self.pipeline.task == "text2text-generation" or self.pipeline.task == "image-text-to-text" ): text = response["generated_text"] elif self.pipeline.task == "summarization": text = response["summary_text"] elif self.pipeline.task in "translation": text = response["translation_text"] else: msg = ( f"Got invalid task {self.pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) raise ValueError(msg) if skip_prompt: text = text[len(batch_prompts[j]) :] # Append the processed text to results text_generations.append(text) return LLMResult( generations=[[Generation(text=text)] for text in text_generations] ) def _stream( self, prompt: str, stop: list[str] | None = None, run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: from threading import Thread import torch from transformers import ( StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer, ) pipeline_kwargs = kwargs.get("pipeline_kwargs", {}) skip_prompt = kwargs.get("skip_prompt", True) if stop is not None: stop = self.pipeline.tokenizer.convert_tokens_to_ids(stop) stopping_ids_list = stop or [] class StopOnTokens(StoppingCriteria): def __call__( self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs: Any, ) -> bool: return any(input_ids[0][-1] == stop_id for stop_id in stopping_ids_list) stopping_criteria = StoppingCriteriaList([StopOnTokens()]) streamer = TextIteratorStreamer( self.pipeline.tokenizer, timeout=60.0, skip_prompt=skip_prompt, skip_special_tokens=True, ) generation_kwargs = dict( text_inputs=prompt, streamer=streamer, stopping_criteria=stopping_criteria, **pipeline_kwargs, ) t1 = Thread(target=self.pipeline, kwargs=generation_kwargs) t1.start() for char in streamer: chunk = GenerationChunk(text=char) if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk) yield chunk
HuggingFacePipeline
python
mlflow__mlflow
dev/clint/src/clint/rules/use_walrus_operator.py
{ "start": 48, "end": 3450 }
class ____(Rule): def _message(self) -> str: return ( "Use the walrus operator `:=` when a variable is assigned and only used " "within an `if` block that tests its truthiness. " "For example, replace `a = ...; if a: use_a(a)` with `if a := ...: use_a(a)`." ) @staticmethod def check( if_node: ast.If, prev_stmt: ast.stmt, following_stmts: list[ast.stmt], ) -> bool: """ Flags:: a = func() if a: use(a) Ignores: comparisons, tuple unpacking, multi-line, used in elif/else, used after if, line > 100 chars """ # Check if previous statement is a simple assignment (not augmented, not annotated) if not isinstance(prev_stmt, ast.Assign): return False # Skip if the assignment statement spans multiple lines if ( prev_stmt.end_lineno is not None and prev_stmt.lineno is not None and prev_stmt.end_lineno > prev_stmt.lineno ): return False # Must be a single target assignment to a Name if len(prev_stmt.targets) != 1: return False target = prev_stmt.targets[0] if not isinstance(target, ast.Name): return False var_name = target.id # The if condition must be just the variable name (truthiness test) if not isinstance(if_node.test, ast.Name): return False if if_node.test.id != var_name: return False # Check that the variable is used in the if body if not _name_used_in_stmts(var_name, if_node.body): return False # Check that the variable is NOT used in elif/else branches if if_node.orelse and _name_used_in_stmts(var_name, if_node.orelse): return False # Check that the variable is NOT used after the if statement if following_stmts and _name_used_in_stmts(var_name, following_stmts): return False # Skip if the fixed code would exceed 100 characters # Original: "if var:" -> Fixed: "if var := value:" value = prev_stmt.value if ( value.end_col_offset is None or value.col_offset is None or if_node.test.end_col_offset is None ): return False value_width = value.end_col_offset - value.col_offset fixed_line_length = ( if_node.test.end_col_offset + 4 # len(" := ") + value_width + 1 # len(":") ) if fixed_line_length > 100: return False return True def _name_used_in_stmts(name: str, stmts: list[ast.stmt]) -> bool: """Check if a name is used (loaded) in a list of statements. Skips nested function/class definitions to avoid false positives from inner scopes that shadow or independently use the same name. """ return any(_name_used_in_node(name, stmt) for stmt in stmts) def _name_used_in_node(name: str, node: ast.AST) -> bool: """Recursively check if a name is used.""" match node: case ast.Name(id=id, ctx=ast.Load()) if id == name: return True case _: return any(_name_used_in_node(name, child) for child in ast.iter_child_nodes(node))
UseWalrusOperator
python
ray-project__ray
python/ray/tune/tests/test_logger.py
{ "start": 1743, "end": 10217 }
class ____(unittest.TestCase): """Test built-in loggers.""" def setUp(self): self.test_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.test_dir, ignore_errors=True) def testLegacyCSV(self): config = {"a": 2, "b": 5, "c": {"c": {"D": 123}, "e": None}} t = Trial(evaluated_params=config, trial_id="csv", logdir=self.test_dir) logger = CSVLogger(config=config, logdir=self.test_dir, trial=t) logger.on_result(result(2, 4)) logger.on_result(result(2, 5)) logger.on_result(result(2, 6, score=[1, 2, 3], hello={"world": 1})) logger.close() self._validate_csv_result() def testCSV(self): config = {"a": 2, "b": 5, "c": {"c": {"D": 123}, "e": None}} t = Trial(evaluated_params=config, trial_id="csv", logdir=self.test_dir) logger = CSVLoggerCallback() logger.on_trial_result(0, [], t, result(0, 4)) logger.on_trial_result(1, [], t, result(1, 5)) logger.on_trial_result( 2, [], t, result(2, 6, score=[1, 2, 3], hello={"world": 1}) ) logger.on_trial_complete(3, [], t) self._validate_csv_result() def testCSVEmptyHeader(self): """Test that starting a trial twice does not lead to empty CSV headers. In a previous bug, the CSV header was sometimes missing when a trial crashed before reporting results. See https://github.com/ray-project/ray/issues/15106 """ config = {"a": 2, "b": 5, "c": {"c": {"D": 123}, "e": None}} t = Trial(evaluated_params=config, trial_id="csv", logdir=self.test_dir) logger = CSVLoggerCallback() logger.on_trial_start(0, [], t) logger.on_trial_start(0, [], t) logger.on_trial_result(1, [], t, result(1, 5)) with open(os.path.join(self.test_dir, "progress.csv"), "rt") as f: csv_contents = f.read() csv_lines = csv_contents.split("\n") # Assert header has been written to progress.csv assert "training_iteration" in csv_lines[0] def _validate_csv_result(self): results = [] result_file = os.path.join(self.test_dir, EXPR_PROGRESS_FILE) with open(result_file, "rt") as fp: reader = csv.DictReader(fp) for row in reader: results.append(row) self.assertEqual(len(results), 3) self.assertSequenceEqual( [int(row["episode_reward_mean"]) for row in results], [4, 5, 6] ) def testJSONLegacyLogger(self): config = {"a": 2, "b": 5, "c": {"c": {"D": 123}, "e": None}} t = Trial(evaluated_params=config, trial_id="json", logdir=self.test_dir) logger = JsonLogger(config=config, logdir=self.test_dir, trial=t) logger.on_result(result(0, 4)) logger.on_result(result(1, 5)) logger.on_result(result(2, 6, score=[1, 2, 3], hello={"world": 1})) logger.close() self._validate_json_result(config) def testJSON(self): config = {"a": 2, "b": 5, "c": {"c": {"D": 123}, "e": None}} t = Trial(evaluated_params=config, trial_id="json", logdir=self.test_dir) logger = JsonLoggerCallback() logger.on_trial_result(0, [], t, result(0, 4)) logger.on_trial_result(1, [], t, result(1, 5)) logger.on_trial_result( 2, [], t, result(2, 6, score=[1, 2, 3], hello={"world": 1}) ) logger.on_trial_complete(3, [], t) self._validate_json_result(config) def _validate_json_result(self, config): # Check result logs results = [] result_file = os.path.join(self.test_dir, EXPR_RESULT_FILE) with open(result_file, "rt") as fp: for row in fp.readlines(): results.append(json.loads(row)) self.assertEqual(len(results), 3) self.assertSequenceEqual( [int(row["episode_reward_mean"]) for row in results], [4, 5, 6] ) # Check json saved config file config_file = os.path.join(self.test_dir, EXPR_PARAM_FILE) with open(config_file, "rt") as fp: loaded_config = json.load(fp) self.assertEqual(loaded_config, config) # Check pickled config file config_file = os.path.join(self.test_dir, EXPR_PARAM_PICKLE_FILE) with open(config_file, "rb") as fp: loaded_config = cloudpickle.load(fp) self.assertEqual(loaded_config, config) def testLegacyTBX(self): config = { "a": 2, "b": [1, 2], "c": {"c": {"D": 123}}, "d": np.int64(1), "e": np.bool_(True), "f": None, } t = Trial(evaluated_params=config, trial_id="tbx", logdir=self.test_dir) logger = TBXLogger(config=config, logdir=self.test_dir, trial=t) logger.on_result(result(0, 4)) logger.on_result(result(1, 5)) logger.on_result(result(2, 6, score=[1, 2, 3], hello={"world": 1})) logger.close() self._validate_tbx_result() def testTBX(self): config = { "a": 2, "b": [1, 2], "c": {"c": {"D": 123}}, "int32": np.int32(1), "int64": np.int64(2), "bool8": np.bool_(True), "float32": np.float32(3), "float64": np.float64(4), "bad": np.float128(4), } t = Trial(evaluated_params=config, trial_id="tbx", logdir=self.test_dir) logger = TBXLoggerCallback() logger.on_trial_result(0, [], t, result(0, 4)) logger.on_trial_result(1, [], t, result(1, 5)) logger.on_trial_result( 2, [], t, result(2, 6, score=[1, 2, 3], hello={"world": 1}) ) logger.on_trial_complete(3, [], t) self._validate_tbx_result( params=(b"float32", b"float64", b"int32", b"int64", b"bool8"), excluded_params=(b"bad",), ) def _validate_tbx_result(self, params=None, excluded_params=None): try: from tensorflow.python.summary.summary_iterator import summary_iterator except ImportError: print("Skipping rest of test as tensorflow is not installed.") return events_file = list(glob.glob(f"{self.test_dir}/events*"))[0] results = [] excluded_params = excluded_params or [] for event in summary_iterator(events_file): for v in event.summary.value: if v.tag == "ray/tune/episode_reward_mean": results.append(v.simple_value) elif v.tag == "_hparams_/experiment" and params: for key in params: self.assertIn(key, v.metadata.plugin_data.content) for key in excluded_params: self.assertNotIn(key, v.metadata.plugin_data.content) elif v.tag == "_hparams_/session_start_info" and params: for key in params: self.assertIn(key, v.metadata.plugin_data.content) for key in excluded_params: self.assertNotIn(key, v.metadata.plugin_data.content) self.assertEqual(len(results), 3) self.assertSequenceEqual([int(res) for res in results], [4, 5, 6]) def testLegacyBadTBX(self): config = {"b": (1, 2, 3)} t = Trial(evaluated_params=config, trial_id="tbx", logdir=self.test_dir) logger = TBXLogger(config=config, logdir=self.test_dir, trial=t) logger.on_result(result(0, 4)) logger.on_result(result(2, 4, score=[1, 2, 3], hello={"world": 1})) with self.assertLogs("ray.tune.logger", level="INFO") as cm: logger.close() assert "INFO" in cm.output[0] def testBadTBX(self): config = {"b": (1, 2, 3)} t = Trial(evaluated_params=config, trial_id="tbx", logdir=self.test_dir) logger = TBXLoggerCallback() logger.on_trial_result(0, [], t, result(0, 4)) logger.on_trial_result(1, [], t, result(1, 5)) logger.on_trial_result( 2, [], t, result(2, 6, score=[1, 2, 3], hello={"world": 1}) ) with self.assertLogs("ray.tune.logger", level="INFO") as cm: logger.on_trial_complete(3, [], t) assert "INFO" in cm.output[0] @pytest.mark.skipif(sys.version_info >= (3, 12), reason="Aim doesn't support py312")
LoggerSuite
python
getsentry__sentry
src/sentry/sentry_apps/services/hook/impl.py
{ "start": 591, "end": 8752 }
class ____(HookService): def update_webhook_and_events( self, *, organization_id: int, application_id: int | None, webhook_url: str | None, events: list[str], ) -> list[RpcServiceHook]: with transaction.atomic(router.db_for_write(ServiceHook)): hooks = ServiceHook.objects.filter(application_id=application_id) hook_count = hooks.count() if webhook_url: expanded_events = expand_events(events) updated_hook_count = hooks.update(url=webhook_url, events=expanded_events) if updated_hook_count != hook_count: sentry_sdk.set_context( "hook info", { "application_id": application_id, "updated_hook_count": updated_hook_count, "expected_hook_count": hook_count, "webhook_url": webhook_url, }, ) sentry_sdk.capture_message( "failed_to_update_all_hooks_for_app", level="warning" ) return [serialize_service_hook(h) for h in hooks] else: deletions.exec_sync_many(list(hooks)) return [] def update_webhook_and_events_for_app_by_region( self, *, application_id: int | None, webhook_url: str | None, events: list[str], region_name: str, ) -> list[RpcServiceHook]: with transaction.atomic(router.db_for_write(ServiceHook)): hooks = ServiceHook.objects.filter(application_id=application_id) hook_count = hooks.count() if webhook_url: expanded_events = expand_events(events) updated_hook_count = hooks.update(url=webhook_url, events=expanded_events) if hook_count != updated_hook_count: sentry_sdk.set_context( "hook info", { "application_id": application_id, "updated_hook_count": updated_hook_count, "expected_hook_count": hook_count, }, ) sentry_sdk.capture_message( "failed_to_update_all_hooks_for_app", level="warning" ) return [serialize_service_hook(h) for h in hooks] else: deletions.exec_sync_many(list(hooks)) return [] def create_or_update_webhook_and_events_for_installation( self, *, installation_id: int, organization_id: int, webhook_url: str | None, events: list[str], application_id: int, ) -> list[RpcServiceHook]: with transaction.atomic(router.db_for_write(ServiceHook)): if webhook_url: hook, created = ServiceHook.objects.update_or_create( installation_id=installation_id, application_id=application_id, defaults={ "application_id": application_id, "actor_id": installation_id, "installation_id": installation_id, "url": webhook_url, "events": expand_events(events), }, ) logger.info( "create_or_update_webhook_and_events_for_installation.created_or_updated_hook", extra={ "hook_id": hook.id, "created_hook": created, "organization_id": organization_id, "installation_id": installation_id, "application_id": application_id, "events": events, }, ) return [serialize_service_hook(hook)] else: # If no webhook_url, try to find and delete existing hook try: hook = ServiceHook.objects.get( installation_id=installation_id, application_id=application_id ) deletions.exec_sync(hook) except ServiceHook.DoesNotExist: pass return [] def create_service_hook( self, *, application_id: int | None = None, actor_id: int = -1, installation_id: int | None = None, organization_id: int = -1, project_ids: list[int] | None = None, events: list[str] | None = None, url: str = "", ) -> RpcServiceHook: # nullable for sentry apps with transaction.atomic(router.db_for_write(ServiceHook)): project_id: int | None = project_ids[0] if project_ids else None hook = ServiceHook.objects.create( application_id=application_id, actor_id=actor_id, project_id=project_id, organization_id=organization_id, events=expand_events(events or []), installation_id=installation_id, url=url, ) if project_ids: for project_id in project_ids: hook.add_project(project_id) return serialize_service_hook(hook) def bulk_create_service_hooks_for_app( self, *, region_name: str, application_id: int, events: list[str], installation_organization_ids: list[RpcInstallationOrganizationPair], url: str, ) -> list[RpcServiceHook]: with transaction.atomic(router.db_for_write(ServiceHook)): expanded_events = expand_events(events) installation_ids = [pair.installation_id for pair in installation_organization_ids] # There shouldn't be any existing hooks for this app but in case we don't want to create duplicates existing_hooks = ServiceHook.objects.filter( application_id=application_id, installation_id__in=installation_ids, ) existing_installation_ids = existing_hooks.values_list("installation_id", flat=True) if existing_hooks.count() > 0: # TODO(christinarlong): If this happens we should write a script or add some logic to update existing hooks sentry_sdk.set_context( "existing_hooks", { "application_id": application_id, "existing_installation_ids": list(existing_installation_ids), "existing_hooks": list(existing_hooks.values_list("id", flat=True)), }, ) sentry_sdk.capture_exception( SentryAppSentryError( message="bulk_create_service_hooks_for_app recieved existing hooks for this app" ) ) hooks_to_create = [] for pair in installation_organization_ids: installation_id = pair.installation_id organization_id = pair.organization_id if installation_id in existing_installation_ids: continue hook = ServiceHook( application_id=application_id, actor_id=installation_id, installation_id=installation_id, organization_id=organization_id, url=url, events=expanded_events, ) hooks_to_create.append(hook) if hooks_to_create: created_hooks = ServiceHook.objects.bulk_create(hooks_to_create) return [serialize_service_hook(hook) for hook in created_hooks] return []
DatabaseBackedHookService
python
openai__openai-python
src/openai/resources/beta/realtime/sessions.py
{ "start": 21650, "end": 21867 }
class ____: def __init__(self, sessions: Sessions) -> None: self._sessions = sessions self.create = to_streamed_response_wrapper( sessions.create, )
SessionsWithStreamingResponse
python
getsentry__sentry
src/sentry/workflow_engine/typings/notification_action.py
{ "start": 18985, "end": 19497 }
class ____(BaseActionTranslator): @property def action_type(self) -> ActionType: return ActionType.PLUGIN @property def required_fields(self) -> list[str]: # NotifyEventAction doesn't appear to have any required fields # beyond the standard id and uuid return [] @property def target_type(self) -> None: # This appears to be a generic plugin notification # so we'll use SPECIFIC as the target type return None
PluginActionTranslator
python
PrefectHQ__prefect
tests/cli/transfer/test_blocks.py
{ "start": 540, "end": 6673 }
class ____: async def test_construct_creates_new_instance( self, transfer_block_type_x: BlockType ): """Test that construct creates a new MigratableBlockType instance.""" migratable = await MigratableBlockType.construct(transfer_block_type_x) assert isinstance(migratable, MigratableBlockType) assert migratable.source_block_type == transfer_block_type_x assert migratable.source_id == transfer_block_type_x.id assert migratable.destination_block_type is None assert migratable.destination_id is None async def test_construct_returns_cached_instance( self, transfer_block_type_x: BlockType ): """Test that construct returns cached instance for same ID.""" # Clear any existing instances MigratableBlockType._instances.clear() # Create first instance migratable1 = await MigratableBlockType.construct(transfer_block_type_x) # Create second instance with same block type migratable2 = await MigratableBlockType.construct(transfer_block_type_x) # Should be the same instance assert migratable1 is migratable2 assert len(MigratableBlockType._instances) == 1 async def test_get_instance_returns_cached_instance( self, transfer_block_type_x: BlockType ): """Test that get_instance returns cached instance.""" # Clear any existing instances MigratableBlockType._instances.clear() # Create instance migratable = await MigratableBlockType.construct(transfer_block_type_x) # Retrieve instance retrieved = await MigratableBlockType.get_instance(transfer_block_type_x.id) assert retrieved is migratable async def test_get_instance_returns_none_for_unknown_id(self): """Test that get_instance returns None for unknown ID.""" # Clear any existing instances MigratableBlockType._instances.clear() unknown_id = uuid.uuid4() retrieved = await MigratableBlockType.get_instance(unknown_id) assert retrieved is None async def test_get_dependencies_returns_empty_list( self, transfer_block_type_x: BlockType ): """Test that get_dependencies returns empty list (block types have no dependencies).""" migratable = await MigratableBlockType.construct(transfer_block_type_x) dependencies = await migratable.get_dependencies() assert dependencies == [] @patch("prefect.cli.transfer._migratable_resources.blocks.get_client") async def test_migrate_success( self, mock_get_client: MagicMock, transfer_block_type_x: BlockType ): """Test successful block type migration.""" # Mock the client mock_client = AsyncMock() mock_get_client.return_value.__aenter__.return_value = mock_client # Mock successful creation destination_block_type = BlockType( id=uuid.uuid4(), name=transfer_block_type_x.name, slug=transfer_block_type_x.slug, logo_url=transfer_block_type_x.logo_url, documentation_url=transfer_block_type_x.documentation_url, description=transfer_block_type_x.description, code_example=transfer_block_type_x.code_example, is_protected=transfer_block_type_x.is_protected, created=transfer_block_type_x.created, updated=transfer_block_type_x.updated, ) mock_client.create_block_type.return_value = destination_block_type migratable = await MigratableBlockType.construct(transfer_block_type_x) await migratable.migrate() # Verify client was called correctly mock_client.create_block_type.assert_called_once_with( block_type=BlockTypeCreate( name=transfer_block_type_x.name, slug=transfer_block_type_x.slug, ) ) # Verify destination_block_type is set assert migratable.destination_block_type == destination_block_type assert migratable.destination_id == destination_block_type.id @patch("prefect.cli.transfer._migratable_resources.blocks.get_client") async def test_migrate_already_exists( self, mock_get_client: MagicMock, transfer_block_type_x: BlockType ): """Test migration when block type already exists.""" # Mock the client mock_client = AsyncMock() mock_get_client.return_value.__aenter__.return_value = mock_client # Mock ObjectAlreadyExists exception on create mock_http_exc = Exception("Conflict") mock_client.create_block_type.side_effect = ObjectAlreadyExists(mock_http_exc) # Mock successful read of existing block type existing_block_type = BlockType( id=uuid.uuid4(), name=transfer_block_type_x.name, slug=transfer_block_type_x.slug, logo_url="https://example.com/existing-logo.png", # Different to show it reads existing documentation_url=transfer_block_type_x.documentation_url, description="existing description", code_example=transfer_block_type_x.code_example, is_protected=transfer_block_type_x.is_protected, created=transfer_block_type_x.created, updated=transfer_block_type_x.updated, ) mock_client.read_block_type_by_slug.return_value = existing_block_type migratable = await MigratableBlockType.construct(transfer_block_type_x) # Should raise TransferSkipped with pytest.raises(TransferSkipped, match="Already exists"): await migratable.migrate() # Verify client calls mock_client.create_block_type.assert_called_once() mock_client.read_block_type_by_slug.assert_called_once_with( transfer_block_type_x.slug ) # Verify destination_block_type is set to existing assert migratable.destination_block_type == existing_block_type assert migratable.destination_id == existing_block_type.id
TestMigratableBlockType
python
Unity-Technologies__ml-agents
ml-agents-envs/mlagents_envs/base_env.py
{ "start": 19926, "end": 22873 }
class ____(ABC): @abstractmethod def step(self) -> None: """ Signals the environment that it must move the simulation forward by one step. """ @abstractmethod def reset(self) -> None: """ Signals the environment that it must reset the simulation. """ @abstractmethod def close(self) -> None: """ Signals the environment that it must close. """ @property @abstractmethod def behavior_specs(self) -> MappingType[str, BehaviorSpec]: """ Returns a Mapping from behavior names to behavior specs. Agents grouped under the same behavior name have the same action and observation specs, and are expected to behave similarly in the environment. Note that new keys can be added to this mapping as new policies are instantiated. """ @abstractmethod def set_actions(self, behavior_name: BehaviorName, action: ActionTuple) -> None: """ Sets the action for all of the agents in the simulation for the next step. The Actions must be in the same order as the order received in the DecisionSteps. :param behavior_name: The name of the behavior the agents are part of :param action: ActionTuple tuple of continuous and/or discrete action. Actions are np.arrays with dimensions (n_agents, continuous_size) and (n_agents, discrete_size), respectively. """ @abstractmethod def set_action_for_agent( self, behavior_name: BehaviorName, agent_id: AgentId, action: ActionTuple ) -> None: """ Sets the action for one of the agents in the simulation for the next step. :param behavior_name: The name of the behavior the agent is part of :param agent_id: The id of the agent the action is set for :param action: ActionTuple tuple of continuous and/or discrete action Actions are np.arrays with dimensions (1, continuous_size) and (1, discrete_size), respectively. Note, this initial dimensions of 1 is because this action is meant for a single agent. """ @abstractmethod def get_steps( self, behavior_name: BehaviorName ) -> Tuple[DecisionSteps, TerminalSteps]: """ Retrieves the steps of the agents that requested a step in the simulation. :param behavior_name: The name of the behavior the agents are part of :return: A tuple containing : - A DecisionSteps NamedTuple containing the observations, the rewards, the agent ids and the action masks for the Agents of the specified behavior. These Agents need an action this step. - A TerminalSteps NamedTuple containing the observations, rewards, agent ids and interrupted flags of the agents that had their episode terminated last step. """
BaseEnv
python
pydantic__pydantic
tests/test_forward_ref.py
{ "start": 20446, "end": 20788 }
class ____(BaseModel): bar: Bar[str] | None = None # The `int | str` here differs from the previous test and requires the backport. # At the same time, `PydanticRecursiveRef.__or__` means that the second `|` works normally, # which actually triggered a bug in the backport that needed fixing. bar2: int | str | Bar[float]
Foo
python
run-llama__llama_index
llama-index-core/llama_index/core/indices/property_graph/sub_retrievers/cypher_template.py
{ "start": 468, "end": 3029 }
class ____(BasePGRetriever): """ A Cypher retriever that fills in params for a cypher query using an LLM. Args: graph_store (PropertyGraphStore): The graph store to retrieve data from. output_cls (Type[BaseModel]): The output class to use for the LLM. Should contain the params needed for the cypher query. cypher_query (str): The cypher query to use, with templated params. llm (Optional[LLM], optional): The language model to use. Defaults to Settings.llm. """ def __init__( self, graph_store: PropertyGraphStore, output_cls: Type[BaseModel], cypher_query: str, llm: Optional[LLM] = None, **kwargs: Any, ) -> None: if not graph_store.supports_structured_queries: raise ValueError( "The provided graph store does not support cypher queries." ) self.llm = llm or Settings.llm # Explicit type hint to suppress: # `Expected type '_SpecialForm[BaseModel]', got 'Type[BaseModel]' instead` self.output_cls: Type[BaseModel] = output_cls self.cypher_query = cypher_query super().__init__( graph_store=graph_store, include_text=False, include_properties=False ) def retrieve_from_graph(self, query_bundle: QueryBundle) -> List[NodeWithScore]: question = query_bundle.query_str response = self.llm.structured_predict( self.output_cls, PromptTemplate(question) ) cypher_response = self._graph_store.structured_query( self.cypher_query, param_map=response.model_dump(), ) return [ NodeWithScore( node=TextNode( text=str(cypher_response), ), score=1.0, ) ] async def aretrieve_from_graph( self, query_bundle: QueryBundle ) -> List[NodeWithScore]: question = query_bundle.query_str response = await self.llm.astructured_predict( self.output_cls, PromptTemplate(question) ) cypher_response = await self._graph_store.astructured_query( self.cypher_query, param_map=response.model_dump(), ) return [ NodeWithScore( node=TextNode( text=str(cypher_response), ), score=1.0, ) ]
CypherTemplateRetriever
python
sympy__sympy
sympy/polys/agca/modules.py
{ "start": 30214, "end": 33190 }
class ____(SubModule): """ Submodule of a quotient module. Equivalently, quotient module of a submodule. Do not instantiate this, instead use the submodule or quotient_module constructing methods: >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) >>> S = F.submodule([1, 0], [1, x]) >>> Q = F/[(1, 0)] >>> S/[(1, 0)] == Q.submodule([5, x]) True Attributes: - base - base module we are quotient of - killed_module - submodule used to form the quotient """ def __init__(self, gens, container, **opts): SubModule.__init__(self, gens, container) self.killed_module = self.container.killed_module # XXX it is important for some code below that the generators of base # are in this particular order! self.base = self.container.base.submodule( *[x.data for x in self.gens], **opts).union(self.killed_module) def _contains(self, elem): return self.base.contains(elem.data) def _syzygies(self): # let N = self.killed_module be generated by e_1, ..., e_r # let F = self.base be generated by f_1, ..., f_s and e_1, ..., e_r # Then self = F/N. # Let phi: R**s --> self be the evident surjection. # Similarly psi: R**(s + r) --> F. # We need to find generators for ker(phi). Let chi: R**s --> F be the # evident lift of phi. For X in R**s, phi(X) = 0 iff chi(X) is # contained in N, iff there exists Y in R**r such that # psi(X, Y) = 0. # Hence if alpha: R**(s + r) --> R**s is the projection map, then # ker(phi) = alpha ker(psi). return [X[:len(self.gens)] for X in self.base._syzygies()] def _in_terms_of_generators(self, e): return self.base._in_terms_of_generators(e.data)[:len(self.gens)] def is_full_module(self): """ Return True if ``self`` is the entire free module. Examples ======== >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) >>> F.submodule([x, 1]).is_full_module() False >>> F.submodule([1, 1], [1, 2]).is_full_module() True """ return self.base.is_full_module() def quotient_hom(self): """ Return the quotient homomorphism to self. That is, return the natural map from ``self.base`` to ``self``. Examples ======== >>> from sympy.abc import x >>> from sympy import QQ >>> M = (QQ.old_poly_ring(x).free_module(2) / [(1, x)]).submodule([1, 0]) >>> M.quotient_hom() Matrix([ [1, 0], : <[1, 0], [1, x]> -> <[1, 0] + <[1, x]>, [1, x] + <[1, x]>> [0, 1]]) """ return self.base.identity_hom().quotient_codomain(self.killed_module) _subs0 = lambda x: x[0] _subs1 = lambda x: x[1:]
SubQuotientModule
python
pytorch__pytorch
test/distributed/tensor/test_random_ops.py
{ "start": 1392, "end": 12154 }
class ____(DTensorTestBase): def _run_init_op(self, init_op, *args, **kwargs): device_mesh = self.build_device_mesh() shard_spec = [Shard(0)] input_size = (8, 4) # NOTE: currently random initialization on gpu device has different # behavior from other devices. Unify the test once the behavior is unified. if not is_rng_supported_mesh(device_mesh): input_tensor = torch.randn(*input_size, device=self.device_type) dtensor = DTensor.from_local(input_tensor, device_mesh, shard_spec) local_tensor_clone = torch.clone(input_tensor) torch.manual_seed(self.rank) local_tensor_clone = init_op(local_tensor_clone, *args, **kwargs) torch.manual_seed(self.rank) dtensor = init_op(dtensor, *args, **kwargs) self.assertEqual(local_tensor_clone, dtensor.to_local()) else: # create DTensor from Tensor _tensor = torch.empty(*input_size, device=self.device_type) dtensor = distribute_tensor(_tensor, device_mesh, [Shard(1)]) # DTensor random init dtensor = init_op(dtensor, *args, **kwargs) local_tensor = dtensor.to_local() # compare with local tensors from other ranks for other_rank in range(self.world_size): if self.rank != other_rank: slice_idx = ( slice(input_size[0]), slice( other_rank * input_size[1], (other_rank + 1) * input_size[1] ), ) # other rank should have a different local tensor self.assertNotEqual(dtensor.full_tensor()[slice_idx], local_tensor) @with_comms def test_init_ops(self): self._run_init_op( torch.nn.init.kaiming_uniform_, a=0, mode="fan_in", nonlinearity="leaky_relu", ) self._run_init_op(torch.nn.init.normal_, mean=1.5, std=0.8) self._run_init_op(torch.nn.init.uniform_, a=0, b=1.2) for dtype in (torch.float32, torch.float16): self._run_init_op(torch.rand_like, dtype=dtype) self._run_init_op(torch.randn_like, dtype=dtype) self._run_init_op(torch.randint_like, low=0, high=100, dtype=dtype) @with_comms @skip_if_lt_x_gpu(4) def test_init_with_user_generator(self): device_mesh = self.build_device_mesh() torch.manual_seed(42) rng = torch.Generator(device=self.device_type).manual_seed(42) t1 = torch.distributed.tensor.empty( (8, 3), device_mesh=device_mesh, placements=[Shard(0)] ) t2 = torch.distributed.tensor.empty( (8, 3), device_mesh=device_mesh, placements=[Shard(0)] ) for i in range(2): # run a second time, to make sure that `rng`'s offset-state is advancing on the second usage torch.nn.init.uniform_(t1, 0.0, 1.0) torch.nn.init.uniform_(t2, 0.0, 1.0, rng) self.assertEqual(t1.full_tensor(), t2.full_tensor(), f"Failed at {i=}") # ensure that we do not cache the 'seed' from the first time we see it in DTensor # this is a behavior change, DTensor used to cache the generator state and not modify the original generator, # now it modifies the original generator instead. torch.manual_seed(55) rng.manual_seed(55) torch.nn.init.uniform_(t1, 0.0, 1.0) torch.nn.init.uniform_(t2, 0.0, 1.0, rng) self.assertEqual(t1.full_tensor(), t2.full_tensor()) @with_comms @skip_if_lt_x_gpu(4) def test_meta_tensor_init(self): # test suite sets each rank's seed to the same value. # The DTensor random ops will use the same generator as the default one on the device. # Note: this behavior changed, and now the guideline is to set the same RNG seed on all SPMD ranks. torch.get_device_module(self.device_type).manual_seed(0) device_mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) size = [1024, 2048] meta_dtensor = distribute_tensor( torch.empty(*size, device="meta"), device_mesh, [Replicate()] ) # Test 1: enable the distribute region for RNG (by default) self.assertTrue(meta_dtensor.is_meta) # Tensor meta init dtensor = torch.empty_like(meta_dtensor, device=self.device_type) dtensor.uniform_() # check `distribute_region_enabled` is set to True by default self.assertTrue(random._rng_tracker.distribute_region_enabled) # allgather the local tensors gathered_local_tensors = funcol.all_gather_tensor( dtensor.to_local(), gather_dim=0, group=(device_mesh, 0) ) @maybe_run_for_local_tensor def compute_rankwise_if_local_tensor(gathered_local_tensors, rank): # the tensor slice on the current rank self_slice = slice(1024 * rank, 1024 * rank + 1024) # compare with local tensors from other ranks for other_rank in range(self.world_size): # the RNG result on each rank are the same because they're replicated if rank != other_rank: # other rank should have an identical local tensor other_slice = slice(1024 * other_rank, 1024 * other_rank + 1024) self.assertEqual( gathered_local_tensors[self_slice, :], gathered_local_tensors[other_slice, :], ) compute_rankwise_if_local_tensor(gathered_local_tensors.wait(), self.rank) # Test 2: disable the distribute region for RNG self.assertTrue(meta_dtensor.is_meta) # Tensor meta init dtensor = torch.empty_like(meta_dtensor, device=self.device_type) random._rng_tracker.distribute_region_enabled = False dtensor.uniform_() # check `distribute_region_enabled` is set to False self.assertTrue(not random._rng_tracker.distribute_region_enabled) # allgather the local tensors local_tensor = funcol.all_gather_tensor( dtensor.to_local(), gather_dim=0, group=(device_mesh, 0) ) compute_rankwise_if_local_tensor(local_tensor.wait(), self.rank) @with_comms @skip_unless_torch_gpu def test_tp_model_meta_init(self): # initialize the 1-d device mesh for TP tp_mesh = init_device_mesh(self.device_type, mesh_shape=(self.world_size,)) # model meta init with torch.device("meta"): model = torch.nn.Linear(self.world_size, self.world_size, bias=False) self.assertEqual(model.weight.device, torch.device("meta")) parallelize_module(model, tp_mesh, ColwiseParallel()) if random._rng_tracker is not None: random._rng_tracker.distribute_region_enabled = True self.assertEqual(model.weight.device, torch.device("meta")) # actual initialization device = torch.device( self.device_type, torch.get_device_module(self.device_type).current_device() ) model.to_empty(device=device) model.reset_parameters() self.assertTrue( random._rng_tracker is not None and isinstance(random._rng_tracker, OffsetBasedRNGTracker) ) self.assertEqual(model.weight.device, device) assert isinstance(model.weight, DTensor) # gather all the shards to compare initialization results WORLD = torch.distributed.group.WORLD assert WORLD is not None weight_local = model.weight.to_local() weight_gather = funcol.all_gather_tensor( weight_local, gather_dim=0, group=WORLD, ) @maybe_run_for_local_tensor def compute_rankwise_if_local_tensor(weight_local, weight_gather, rank): # verify the weights are initialized differently on all ranks for other_rank in range(self.world_size): if rank != other_rank: self.assertNotEqual( weight_local, weight_gather[other_rank : other_rank + 1, :], ) compute_rankwise_if_local_tensor(weight_local, weight_gather.wait(), self.rank) @with_comms @skip_if_lt_x_gpu(4) def test_fsdp_tp_model_meta_init(self): # initialize the 2-d device mesh global_mesh = init_device_mesh( self.device_type, mesh_shape=(self.world_size // 2, 2), mesh_dim_names=("dp", "tp"), ) dp_mesh, tp_mesh = global_mesh["dp"], global_mesh["tp"] # model meta init with torch.device("meta"): model = torch.nn.Linear(self.world_size, self.world_size, bias=False) self.assertEqual(model.weight.device, torch.device("meta")) parallelize_module(model, tp_mesh, ColwiseParallel()) if random._rng_tracker is not None: random._rng_tracker.distribute_region_enabled = True fully_shard(model, mesh=dp_mesh) self.assertEqual(model.weight.device, torch.device("meta")) # actual initialization device = torch.device( self.device_type, torch.get_device_module(self.device_type).current_device() ) model.to_empty(device=device) model.reset_parameters() self.assertTrue( random._rng_tracker is not None and isinstance(random._rng_tracker, OffsetBasedRNGTracker) ) self.assertEqual(model.weight.device, device) assert isinstance(model.weight, DTensor) # gather all the shards to compare initialization results WORLD = torch.distributed.group.WORLD assert WORLD is not None weight_local = model.weight.to_local() weight_gather = funcol.all_gather_tensor( weight_local, gather_dim=0, group=WORLD, ) @maybe_run_for_local_tensor def compute_rankwise_if_local_tensor(weight_local, weight_gather, rank): # verify the weights are initialized differently on all ranks for other_rank in range(self.world_size): if rank != other_rank: self.assertNotEqual( weight_local, weight_gather[other_rank : other_rank + 1, :], ) compute_rankwise_if_local_tensor(weight_local, weight_gather.wait(), self.rank)
DistTensorRandomInitTest
python
matplotlib__matplotlib
lib/matplotlib/offsetbox.py
{ "start": 38188, "end": 40181 }
class ____(OffsetBox): def __init__(self, arr, *, zoom=1, cmap=None, norm=None, interpolation=None, origin=None, filternorm=True, filterrad=4.0, resample=False, dpi_cor=True, **kwargs ): super().__init__() self._dpi_cor = dpi_cor self.image = BboxImage(bbox=self.get_window_extent, cmap=cmap, norm=norm, interpolation=interpolation, origin=origin, filternorm=filternorm, filterrad=filterrad, resample=resample, **kwargs ) self._children = [self.image] self.set_zoom(zoom) self.set_data(arr) def set_data(self, arr): self._data = np.asarray(arr) self.image.set_data(self._data) self.stale = True def get_data(self): return self._data def set_zoom(self, zoom): self._zoom = zoom self.stale = True def get_zoom(self): return self._zoom def get_offset(self): """Return offset of the container.""" return self._offset def get_children(self): return [self.image] def get_bbox(self, renderer): dpi_cor = renderer.points_to_pixels(1.) if self._dpi_cor else 1. zoom = self.get_zoom() data = self.get_data() ny, nx = data.shape[:2] w, h = dpi_cor * nx * zoom, dpi_cor * ny * zoom return Bbox.from_bounds(0, 0, w, h) def draw(self, renderer): # docstring inherited self.image.draw(renderer) # bbox_artist(self, renderer, fill=False, props=dict(pad=0.)) self.stale = False
OffsetImage
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/final5.py
{ "start": 351, "end": 383 }
class ____: x: Final[int]
ClassB
python
anthropics__anthropic-sdk-python
src/anthropic/pagination.py
{ "start": 2276, "end": 2997 }
class ____(BaseSyncPage[_T], BasePage[_T], Generic[_T]): data: List[_T] has_more: Optional[bool] = None next_page: Optional[str] = None @override def _get_page_items(self) -> List[_T]: data = self.data if not data: return [] return data @override def has_next_page(self) -> bool: has_more = self.has_more if has_more is not None and has_more is False: return False return super().has_next_page() @override def next_page_info(self) -> Optional[PageInfo]: next_page = self.next_page if not next_page: return None return PageInfo(params={"page_token": next_page})
SyncTokenPage
python
Pylons__pyramid
tests/test_config/test_init.py
{ "start": 46597, "end": 47977 }
class ____(unittest.TestCase): def _makeOne(self): from pyramid.config import Configurator return Configurator() def test_factory_as_object(self): config = self._makeOne() def _fakeAction( discriminator, callable=None, args=(), kw=None, order=0, introspectables=(), **extra, ): self.assertEqual(len(introspectables), 1) self.assertEqual(introspectables[0]['name'], 'testing') self.assertEqual(introspectables[0]['factory'], DummyPredicate) config.action = _fakeAction config._add_predicate('route', 'testing', DummyPredicate) def test_factory_as_dotted_name(self): config = self._makeOne() def _fakeAction( discriminator, callable=None, args=(), kw=None, order=0, introspectables=(), **extra, ): self.assertEqual(len(introspectables), 1) self.assertEqual(introspectables[0]['name'], 'testing') self.assertEqual(introspectables[0]['factory'], DummyPredicate) config.action = _fakeAction config._add_predicate( 'route', 'testing', 'tests.test_config.test_init.DummyPredicate' )
TestConfigurator__add_predicate
python
getsentry__sentry
tests/sentry/issues/endpoints/test_organization_group_search_views.py
{ "start": 3257, "end": 8934 }
class ____(GroupSearchViewAPITestCase): endpoint = "sentry-api-0-organization-group-search-views" method = "get" def setUp(self) -> None: self.user_1 = self.user self.user_2 = self.create_user() self.create_member(organization=self.organization, user=self.user_2) # Create views for current user self.my_view_1 = self.create_view( user=self.user, name="My View 1", starred=False, filters={"query": "assigned:me is:unresolved"}, ) self.my_view_2 = self.create_view(user=self.user, name="My View 2", starred=True) self.my_view_3 = self.create_view( user=self.user, name="My View 3", starred=True, filters={"query": "assigned:me"} ) # Create views for another user self.other_view_1 = self.create_view(user=self.user_2, name="Other View 1", starred=False) self.other_view_2 = self.create_view(user=self.user_2, name="Other View 2", starred=True) # User 1 stars User 2's view self.star_view(self.user, self.other_view_2) def test_get_views_created_by_me(self) -> None: self.login_as(user=self.user) response = self.get_success_response(self.organization.slug, createdBy="me") # Should return views created by current user, ordered by name assert len(response.data) == 3 assert response.data[0]["id"] == str(self.my_view_2.id) assert response.data[0]["name"] == "My View 2" assert response.data[0]["stars"] == 1 assert response.data[0]["createdBy"]["id"] == str(self.user.id) assert response.data[0]["starred"] assert response.data[1]["id"] == str(self.my_view_3.id) assert response.data[1]["name"] == "My View 3" assert response.data[1]["stars"] == 1 assert response.data[1]["createdBy"]["id"] == str(self.user.id) assert response.data[1]["starred"] # View 1 should appear last since it's the only non-starred view assert response.data[2]["id"] == str(self.my_view_1.id) assert response.data[2]["name"] == "My View 1" assert response.data[2]["stars"] == 0 assert response.data[2]["createdBy"]["id"] == str(self.user.id) assert not response.data[2]["starred"] def test_get_views_created_by_others(self) -> None: self.login_as(user=self.user) response = self.get_success_response(self.organization.slug, createdBy="others") # Should return only organization-visible views created by other users assert len(response.data) == 2 # View 2 should appear first since it's starred view assert response.data[0]["id"] == str(self.other_view_2.id) assert response.data[0]["name"] == "Other View 2" assert response.data[0]["stars"] == 2 assert response.data[0]["createdBy"]["id"] == str(self.user_2.id) assert response.data[0]["starred"] # View 1 should appear last since it's not starred assert response.data[1]["id"] == str(self.other_view_1.id) assert response.data[1]["name"] == "Other View 1" assert response.data[1]["stars"] == 0 assert response.data[1]["createdBy"]["id"] == str(self.user_2.id) assert not response.data[1]["starred"] def test_invalid_created_by_value(self) -> None: self.login_as(user=self.user) response = self.get_error_response(self.organization.slug, createdBy="asdf") # Should return a validation error assert response.status_code == 400 assert "createdBy" in response.data def test_invalid_sort_value(self) -> None: self.login_as(user=self.user) response = self.get_error_response(self.organization.slug, sort="asdf") # Should return a validation error assert response.status_code == 400 assert "sort" in response.data def test_query_filter_by_name(self) -> None: self.login_as(user=self.user) response = self.get_success_response(self.organization.slug, query="View 2", createdBy="me") assert len(response.data) == 1 assert response.data[0]["id"] == str(self.my_view_2.id) assert response.data[0]["name"] == "My View 2" response = self.get_success_response( self.organization.slug, query="View 2", createdBy="others" ) assert len(response.data) == 1 assert response.data[0]["id"] == str(self.other_view_2.id) assert response.data[0]["name"] == "Other View 2" def test_query_filter_by_query(self) -> None: self.login_as(user=self.user) response = self.get_success_response(self.organization.slug, query="assigned:me") assert len(response.data) == 2 # View 3 is starred while View 1 is not, and thus View 3 should appear first assert response.data[0]["id"] == str(self.my_view_3.id) assert response.data[1]["id"] == str(self.my_view_1.id) assert "assigned:me" in response.data[0]["query"] def test_query_filter_case_insensitive(self) -> None: self.login_as(user=self.user) response = self.get_success_response(self.organization.slug, query="my view") assert len(response.data) == 3 assert "My View" in response.data[0]["name"] assert "My View" in response.data[1]["name"] assert "My View" in response.data[2]["name"] def test_query_filter_no_matches(self) -> None: self.login_as(user=self.user) response = self.get_success_response(self.organization.slug, query="capybara") assert len(response.data) == 0
OrganizationGroupSearchViewsGetTest
python
keon__algorithms
tests/test_dp.py
{ "start": 1718, "end": 1916 }
class ____(unittest.TestCase): def test_edit_distance(self): self.assertEqual(edit_distance('food', 'money'), 4) self.assertEqual(edit_distance('horse', 'ros'), 3)
TestEditDistance
python
huggingface__transformers
src/transformers/models/cohere2_vision/modular_cohere2_vision.py
{ "start": 12833, "end": 13737 }
class ____(ImagesKwargs, total=False): """ crop_to_patches (`bool`, *optional*, defaults to `False`): Whether to crop the image to patches. Can be overridden by the `crop_to_patches` parameter in the `preprocess` method. min_patches (`int`, *optional*, defaults to 1): The minimum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is set to `True`. Can be overridden by the `min_patches` parameter in the `preprocess` method. max_patches (`int`, *optional*, defaults to 12): The maximum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is set to `True`. Can be overridden by the `max_patches` parameter in the `preprocess` method. """ crop_to_patches: bool min_patches: int max_patches: int @auto_docstring
Cohere2VisionFastImageProcessorKwargs
python
run-llama__llama_index
llama-index-integrations/llms/llama-index-llms-helicone/llama_index/llms/helicone/base.py
{ "start": 560, "end": 4097 }
class ____(OpenAILike): """ Helicone (OpenAI-compatible) LLM. Route OpenAI-compatible requests through Helicone for observability and control. Authentication: - Set your Helicone API key via the `api_key` parameter or `HELICONE_API_KEY`. No OpenAI/third-party provider keys are required when using the AI Gateway. Examples: `pip install llama-index-llms-helicone` ```python from llama_index.llms.helicone import Helicone from llama_index.llms.openai_like.base import ChatMessage llm = Helicone( api_key="<helicone-api-key>", model="gpt-4o-mini", # works across providers ) message: ChatMessage = ChatMessage(role="user", content="Hello world!") response = helicone.chat(messages=[message]) print(str(response)) ``` """ model: str = Field( description=( "OpenAI-compatible model name routed via the Helicone AI Gateway. " "Learn more about [provider routing](https://docs.helicone.ai/gateway/provider-routing). " "All models are visible [here](https://www.helicone.ai/models)." ) ) api_base: Optional[str] = Field( default=DEFAULT_API_BASE, description=( "Base URL for the Helicone AI Gateway. Can also be set via the " "HELICONE_API_BASE environment variable. See the " "[Gateway overview](https://docs.helicone.ai/gateway/overview)." ), ) api_key: Optional[str] = Field( description=( "Helicone API key used to authorize requests (Authorization: Bearer). " "Provide directly or set via HELICONE_API_KEY. Generate your API key " "in the [dashboard settings](https://us.helicone.ai/settings/api-keys). " ), ) default_headers: Optional[Dict[str, str]] = Field( default=None, description=( "Additional HTTP headers to include with requests. The Helicone " "Authorization header is added automatically from api_key. See " "[custom properties](https://docs.helicone.ai/features/advanced-usage/custom-properties)/[headers](https://docs.helicone.ai/helicone-headers/header-directory)." ), ) def __init__( self, model: str = DEFAULT_MODEL, temperature: float = DEFAULT_TEMPERATURE, max_tokens: int = DEFAULT_NUM_OUTPUTS, additional_kwargs: Optional[Dict[str, Any]] = None, max_retries: int = 5, api_base: Optional[str] = DEFAULT_API_BASE, api_key: Optional[str] = None, default_headers: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> None: additional_kwargs = additional_kwargs or {} api_base = get_from_param_or_env("api_base", api_base, "HELICONE_API_BASE") api_key = get_from_param_or_env("api_key", api_key, "HELICONE_API_KEY") if default_headers: default_headers.update({"Authorization": f"Bearer {api_key}"}) else: default_headers = {"Authorization": f"Bearer {api_key}"} super().__init__( model=model, temperature=temperature, max_tokens=max_tokens, api_base=api_base, default_headers=default_headers, additional_kwargs=additional_kwargs, max_retries=max_retries, **kwargs, ) @classmethod def class_name(cls) -> str: return "Helicone_LLM"
Helicone
python
pydantic__pydantic
pydantic/_internal/_generate_schema.py
{ "start": 12361, "end": 12438 }
class ____(Exception): """The core schema is invalid."""
InvalidSchemaError
python
getsentry__sentry
src/sentry/integrations/bitbucket_server/client.py
{ "start": 818, "end": 3321 }
class ____(ApiClient): """ Client for making requests to Bitbucket Server to follow OAuth1 flow. """ request_token_url = "{}/plugins/servlet/oauth/request-token" access_token_url = "{}/plugins/servlet/oauth/access-token" authorize_url = "{}/plugins/servlet/oauth/authorize?oauth_token={}" integration_name = "bitbucket_server_setup" def __init__(self, base_url, consumer_key, private_key, verify_ssl=True, *args, **kwargs): super().__init__(*args, **kwargs) self.base_url = base_url self.consumer_key = consumer_key self.private_key = private_key self.verify_ssl = verify_ssl def get_request_token(self): """ Step 1 of the oauth flow. Get a request token that we can have the user verify. """ url = self.request_token_url.format(self.base_url) resp = self.post(url, allow_text=True) return dict(parse_qsl(resp.text)) def get_authorize_url(self, request_token): """ Step 2 of the oauth flow. Get a URL that the user can verify our request token at. """ return self.authorize_url.format(self.base_url, request_token["oauth_token"]) def get_access_token(self, request_token, verifier): """ Step 3 of the oauth flow. Use the verifier and request token from step 1 to get an access token. """ if not verifier: raise ApiError("Missing OAuth token verifier") auth = OAuth1( client_key=self.consumer_key, resource_owner_key=request_token["oauth_token"], resource_owner_secret=request_token["oauth_token_secret"], verifier=verifier, rsa_key=self.private_key, signature_method=SIGNATURE_RSA, signature_type="auth_header", decoding=None, ) url = self.access_token_url.format(self.base_url) resp = self.post(url, auth=auth, allow_text=True) return dict(parse_qsl(resp.text)) def request(self, *args, **kwargs): """ Add OAuth1 RSA signatures. """ if "auth" not in kwargs: kwargs["auth"] = OAuth1( client_key=self.consumer_key, rsa_key=self.private_key, signature_method=SIGNATURE_RSA, signature_type="auth_header", decoding=None, ) return self._request(*args, **kwargs)
BitbucketServerSetupClient
python
getsentry__sentry
src/sentry/search/events/fields.py
{ "start": 30593, "end": 31029 }
class ____(FunctionArg): date_format = "%Y-%m-%dT%H:%M:%S" def normalize(self, value: str, params: ParamsType, combinator: Combinator | None) -> str: try: datetime.strptime(value, self.date_format) except ValueError: raise InvalidFunctionArgument( f"{value} is in the wrong format, expected a date like 2020-03-14T15:14:15" ) return f"'{value}'"
DateArg
python
getsentry__sentry
src/sentry/preprod/api/endpoints/pull_request/organization_pullrequest_size_analysis_download.py
{ "start": 938, "end": 3700 }
class ____(OrganizationEndpoint): owner = ApiOwner.EMERGE_TOOLS publish_status = { "GET": ApiPublishStatus.EXPERIMENTAL, } def get( self, request: Request, organization: Organization, artifact_id: str ) -> HttpResponseBase: """ Download size analysis results for a preprod artifact ```````````````````````````````````````````````````` Download the size analysis results for a preprod artifact. This is separate from the ProjectPreprodArtifactSizeAnalysisDownloadEndpoint as PR page is not tied to a project. :pparam string organization_id_or_slug: the id or slug of the organization the artifact belongs to. :pparam string artifact_id: the ID of the preprod artifact to download size analysis for. :auth: required """ analytics.record( PreprodApiPrPageSizeAnalysisDownloadEvent( organization_id=organization.id, user_id=request.user.id, artifact_id=artifact_id, ) ) if not features.has("organizations:pr-page", organization, actor=request.user): return Response({"error": "Feature not enabled"}, status=403) try: artifact = PreprodArtifact.objects.get( id=int(artifact_id), project__organization_id=organization.id, ) except (PreprodArtifact.DoesNotExist, ValueError): raise PreprodArtifactResourceDoesNotExist try: size_metrics_qs = artifact.get_size_metrics() size_metrics_count = size_metrics_qs.count() if size_metrics_count == 0: return Response( {"error": "Size analysis results not available for this artifact"}, status=404, ) elif size_metrics_count > 1: return Response( {"error": "Multiple size analysis results found for this artifact"}, status=409, ) size_metrics = size_metrics_qs.first() if size_metrics is None or size_metrics.analysis_file_id is None: logger.info( "preprod.size_analysis.download.no_size_metrics", extra={"artifact_id": artifact_id}, ) return Response( {"error": "Size analysis not found"}, status=404, ) return get_size_analysis_file_response(size_metrics) except SizeAnalysisError as e: return get_size_analysis_error_response(e)
OrganizationPullRequestSizeAnalysisDownloadEndpoint
python
Pylons__pyramid
src/pyramid/config/predicates.py
{ "start": 2704, "end": 2932 }
class ____: def __init__(self, package, registry, settings, maybe_dotted): self.package = package self.registry = registry self.settings = settings self.maybe_dotted = maybe_dotted
PredicateInfo
python
sqlalchemy__sqlalchemy
test/dialect/postgresql/test_types.py
{ "start": 203552, "end": 203668 }
class ____( _DateTimeTZMultiRangeTests, _MultiRangeTypeCompilation ): pass
DateTimeTZMultiRangeCompilationTest
python
Textualize__textual
docs/examples/styles/keyline.py
{ "start": 121, "end": 488 }
class ____(App): CSS_PATH = "keyline.tcss" def compose(self) -> ComposeResult: with Grid(): yield Placeholder(id="foo") yield Placeholder(id="bar") yield Placeholder() yield Placeholder(classes="hidden") yield Placeholder(id="baz") if __name__ == "__main__": KeylineApp().run()
KeylineApp
python
google__pytype
pytype/pytd/visitors_test.py
{ "start": 40211, "end": 40517 }
class ____(unittest.TestCase): def test_any_replacement(self): union = pytd.UnionType((pytd.NamedType("a"), pytd.NamedType("b"))) self.assertEqual( union.Visit(visitors.ReplaceUnionsWithAny()), pytd.AnythingType() ) if __name__ == "__main__": unittest.main()
ReplaceUnionsWithAnyTest
python
pypa__hatch
tests/index/test_core.py
{ "start": 1447, "end": 2659 }
class ____: def test_default(self, mocker): mock = mocker.patch("httpx._transports.default.create_ssl_context") index = PackageIndex("https://foo.internal/a/b/") _ = index.client mock.assert_called_once_with(verify=True, cert=None, trust_env=True) def test_ca_cert(self, mocker): mock = mocker.patch("httpx._transports.default.create_ssl_context") index = PackageIndex("https://foo.internal/a/b/", ca_cert="foo") _ = index.client mock.assert_called_once_with(verify="foo", cert=None, trust_env=True) def test_client_cert(self, mocker): mock = mocker.patch("httpx._transports.default.create_ssl_context") index = PackageIndex("https://foo.internal/a/b/", client_cert="foo") _ = index.client mock.assert_called_once_with(verify=True, cert="foo", trust_env=True) def test_client_cert_with_key(self, mocker): mock = mocker.patch("httpx._transports.default.create_ssl_context") index = PackageIndex("https://foo.internal/a/b/", client_cert="foo", client_key="bar") _ = index.client mock.assert_called_once_with(verify=True, cert=("foo", "bar"), trust_env=True)
TestTLS
python
langchain-ai__langchain
libs/langchain/langchain_classic/memory/entity.py
{ "start": 2753, "end": 6395 }
class ____(BaseEntityStore): """Upstash Redis backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. """ def __init__( self, session_id: str = "default", url: str = "", token: str = "", key_prefix: str = "memory_store", ttl: int | None = 60 * 60 * 24, recall_ttl: int | None = 60 * 60 * 24 * 3, *args: Any, **kwargs: Any, ): """Initializes the RedisEntityStore. Args: session_id: Unique identifier for the session. url: URL of the Redis server. token: Authentication token for the Redis server. key_prefix: Prefix for keys in the Redis store. ttl: Time-to-live for keys in seconds (default 1 day). recall_ttl: Time-to-live extension for keys when recalled (default 3 days). *args: Additional positional arguments. **kwargs: Additional keyword arguments. """ try: from upstash_redis import Redis except ImportError as e: msg = ( "Could not import upstash_redis python package. " "Please install it with `pip install upstash_redis`." ) raise ImportError(msg) from e super().__init__(*args, **kwargs) try: self.redis_client = Redis(url=url, token=token) except Exception as exc: error_msg = "Upstash Redis instance could not be initiated" logger.exception(error_msg) raise RuntimeError(error_msg) from exc self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl @property def full_key_prefix(self) -> str: """Returns the full key prefix with session ID.""" return f"{self.key_prefix}:{self.session_id}" @override def get(self, key: str, default: str | None = None) -> str | None: res = ( self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) or default or "" ) logger.debug( "Upstash Redis MEM get '%s:%s': '%s'", self.full_key_prefix, key, res ) return res @override def set(self, key: str, value: str | None) -> None: if not value: return self.delete(key) self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) logger.debug( "Redis MEM set '%s:%s': '%s' EX %s", self.full_key_prefix, key, value, self.ttl, ) return None @override def delete(self, key: str) -> None: self.redis_client.delete(f"{self.full_key_prefix}:{key}") @override def exists(self, key: str) -> bool: return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 @override def clear(self) -> None: def scan_and_delete(cursor: int) -> int: cursor, keys_to_delete = self.redis_client.scan( cursor, f"{self.full_key_prefix}:*", ) self.redis_client.delete(*keys_to_delete) return cursor cursor = scan_and_delete(0) while cursor != 0: scan_and_delete(cursor) @deprecated( since="0.3.1", removal="1.0.0", message=( "Please see the migration guide at: " "https://python.langchain.com/docs/versions/migrating_memory/" ), )
UpstashRedisEntityStore
python
kamyu104__LeetCode-Solutions
Python/remove-interval.py
{ "start": 29, "end": 400 }
class ____(object): def removeInterval(self, intervals, toBeRemoved): """ :type intervals: List[List[int]] :type toBeRemoved: List[int] :rtype: List[List[int]] """ A, B = toBeRemoved return [[x, y] for a, b in intervals for x, y in ((a, min(A, b)), (max(a, B), b)) if x < y]
Solution
python
encode__django-rest-framework
tests/test_fields.py
{ "start": 34144, "end": 34747 }
class ____(FieldValues): """ Valid and invalid values for `IPAddressField` """ valid_inputs = { '2001:0db8:85a3:0042:1000:8a2e:0370:7334': '2001:db8:85a3:42:1000:8a2e:370:7334', '2001:cdba:0:0:0:0:3257:9652': '2001:cdba::3257:9652', '2001:cdba::3257:9652': '2001:cdba::3257:9652' } invalid_inputs = { '2001:::9652': ['Enter a valid IPv4 or IPv6 address.'], '2001:0db8:85a3:0042:1000:8a2e:0370:73341': ['Enter a valid IPv4 or IPv6 address.'], } outputs = {} field = serializers.IPAddressField(protocol='IPv6')
TestIPv6AddressField
python
protocolbuffers__protobuf
python/google/protobuf/internal/well_known_types_test.py
{ "start": 1942, "end": 26342 }
class ____(TimeUtilTestBase): def testTimestampSerializeAndParse(self): message = timestamp_pb2.Timestamp() # Generated output should contain 3, 6, or 9 fractional digits. message.seconds = 0 message.nanos = 0 self.CheckTimestampConversion(message, '1970-01-01T00:00:00Z') message.nanos = 10000000 self.CheckTimestampConversion(message, '1970-01-01T00:00:00.010Z') message.nanos = 10000 self.CheckTimestampConversion(message, '1970-01-01T00:00:00.000010Z') message.nanos = 10 self.CheckTimestampConversion(message, '1970-01-01T00:00:00.000000010Z') # Test min timestamps. message.seconds = -62135596800 message.nanos = 0 self.CheckTimestampConversion(message, '0001-01-01T00:00:00Z') # Test max timestamps. message.seconds = 253402300799 message.nanos = 999999999 self.CheckTimestampConversion(message, '9999-12-31T23:59:59.999999999Z') # Test negative timestamps. message.seconds = -1 self.CheckTimestampConversion(message, '1969-12-31T23:59:59.999999999Z') # Parsing accepts an fractional digits as long as they fit into nano # precision. message.FromJsonString('1970-01-01T00:00:00.1Z') self.assertEqual(0, message.seconds) self.assertEqual(100000000, message.nanos) # Parsing accepts offsets. message.FromJsonString('1970-01-01T00:00:00-08:00') self.assertEqual(8 * 3600, message.seconds) self.assertEqual(0, message.nanos) # It is not easy to check with current time. For test coverage only. message.GetCurrentTime() self.assertNotEqual(8 * 3600, message.seconds) def testDurationSerializeAndParse(self): message = duration_pb2.Duration() # Generated output should contain 3, 6, or 9 fractional digits. message.seconds = 0 message.nanos = 0 self.CheckDurationConversion(message, '0s') message.nanos = 10000000 self.CheckDurationConversion(message, '0.010s') message.nanos = 10000 self.CheckDurationConversion(message, '0.000010s') message.nanos = 10 self.CheckDurationConversion(message, '0.000000010s') # Test min and max message.seconds = 315576000000 message.nanos = 999999999 self.CheckDurationConversion(message, '315576000000.999999999s') message.seconds = -315576000000 message.nanos = -999999999 self.CheckDurationConversion(message, '-315576000000.999999999s') # Parsing accepts an fractional digits as long as they fit into nano # precision. message.FromJsonString('0.1s') self.assertEqual(100000000, message.nanos) message.FromJsonString('0.0000001s') self.assertEqual(100, message.nanos) def testTimestampIntegerConversion(self): message = timestamp_pb2.Timestamp() message.FromNanoseconds(1) self.assertEqual('1970-01-01T00:00:00.000000001Z', message.ToJsonString()) self.assertEqual(1, message.ToNanoseconds()) message.FromNanoseconds(-1) self.assertEqual('1969-12-31T23:59:59.999999999Z', message.ToJsonString()) self.assertEqual(-1, message.ToNanoseconds()) message.FromMicroseconds(1) self.assertEqual('1970-01-01T00:00:00.000001Z', message.ToJsonString()) self.assertEqual(1, message.ToMicroseconds()) message.FromMicroseconds(-1) self.assertEqual('1969-12-31T23:59:59.999999Z', message.ToJsonString()) self.assertEqual(-1, message.ToMicroseconds()) message.FromMilliseconds(1) self.assertEqual('1970-01-01T00:00:00.001Z', message.ToJsonString()) self.assertEqual(1, message.ToMilliseconds()) message.FromMilliseconds(-1) self.assertEqual('1969-12-31T23:59:59.999Z', message.ToJsonString()) self.assertEqual(-1, message.ToMilliseconds()) message.FromSeconds(1) self.assertEqual('1970-01-01T00:00:01Z', message.ToJsonString()) self.assertEqual(1, message.ToSeconds()) message.FromSeconds(-1) self.assertEqual('1969-12-31T23:59:59Z', message.ToJsonString()) self.assertEqual(-1, message.ToSeconds()) message.FromNanoseconds(1999) self.assertEqual(1, message.ToMicroseconds()) # For negative values, Timestamp will be rounded down. # For example, "1969-12-31T23:59:59.5Z" (i.e., -0.5s) rounded to seconds # will be "1969-12-31T23:59:59Z" (i.e., -1s) rather than # "1970-01-01T00:00:00Z" (i.e., 0s). message.FromNanoseconds(-1999) self.assertEqual(-2, message.ToMicroseconds()) def testDurationIntegerConversion(self): message = duration_pb2.Duration() message.FromNanoseconds(1) self.assertEqual('0.000000001s', message.ToJsonString()) self.assertEqual(1, message.ToNanoseconds()) message.FromNanoseconds(-1) self.assertEqual('-0.000000001s', message.ToJsonString()) self.assertEqual(-1, message.ToNanoseconds()) message.FromMicroseconds(1) self.assertEqual('0.000001s', message.ToJsonString()) self.assertEqual(1, message.ToMicroseconds()) message.FromMicroseconds(-1) self.assertEqual('-0.000001s', message.ToJsonString()) self.assertEqual(-1, message.ToMicroseconds()) message.FromMilliseconds(1) self.assertEqual('0.001s', message.ToJsonString()) self.assertEqual(1, message.ToMilliseconds()) message.FromMilliseconds(-1) self.assertEqual('-0.001s', message.ToJsonString()) self.assertEqual(-1, message.ToMilliseconds()) message.FromSeconds(1) self.assertEqual('1s', message.ToJsonString()) self.assertEqual(1, message.ToSeconds()) message.FromSeconds(-1) self.assertEqual('-1s', message.ToJsonString()) self.assertEqual(-1, message.ToSeconds()) # Test truncation behavior. message.FromNanoseconds(1999) self.assertEqual(1, message.ToMicroseconds()) # For negative values, Duration will be rounded towards 0. message.FromNanoseconds(-1999) self.assertEqual(-1, message.ToMicroseconds()) def testTimezoneNaiveDatetimeConversionNearEpoch(self): message = timestamp_pb2.Timestamp() naive_utc_epoch = datetime.datetime(1970, 1, 1) message.FromDatetime(naive_utc_epoch) self.assertEqual(0, message.seconds) self.assertEqual(0, message.nanos) self.assertEqual(naive_utc_epoch, message.ToDatetime()) naive_epoch_morning = datetime.datetime(1970, 1, 1, 8, 0, 0, 1) message.FromDatetime(naive_epoch_morning) self.assertEqual(8 * 3600, message.seconds) self.assertEqual(1000, message.nanos) self.assertEqual(naive_epoch_morning, message.ToDatetime()) message.FromMilliseconds(1999) self.assertEqual(1, message.seconds) self.assertEqual(999_000_000, message.nanos) self.assertEqual( datetime.datetime(1970, 1, 1, 0, 0, 1, 999000), message.ToDatetime() ) def testTimezoneNaiveDatetimeConversionWhereTimestampLosesPrecision(self): ts = timestamp_pb2.Timestamp() naive_future = datetime.datetime(2555, 2, 22, 1, 2, 3, 456789) # The float timestamp for this datetime does not represent the integer # millisecond value with full precision. self.assertNotEqual( naive_future.astimezone(datetime.timezone.utc), datetime.datetime.fromtimestamp( naive_future.timestamp(), datetime.timezone.utc ), ) # It still round-trips correctly. ts.FromDatetime(naive_future) self.assertEqual(naive_future, ts.ToDatetime()) def testTimezoneNaiveMaxDatetimeConversion(self): ts = timestamp_pb2.Timestamp() naive_max_datetime = datetime.datetime(9999, 12, 31, 23, 59, 59, 999999) ts.FromDatetime(naive_max_datetime) self.assertEqual(naive_max_datetime, ts.ToDatetime()) def testTimezoneNaiveMinDatetimeConversion(self): ts = timestamp_pb2.Timestamp() naive_min_datetime = datetime.datetime(1, 1, 1) ts.FromDatetime(naive_min_datetime) self.assertEqual(naive_min_datetime, ts.ToDatetime()) # Two hours after the Unix Epoch, around the world. @parameterized.named_parameters( ('London', [1970, 1, 1, 2], datetime.timezone.utc), ('Tokyo', [1970, 1, 1, 11], _TZ_JAPAN), ('LA', [1969, 12, 31, 18], _TZ_PACIFIC), ) def testTimezoneAwareDatetimeConversion(self, date_parts, tzinfo): original_datetime = datetime.datetime(*date_parts, tzinfo=tzinfo) # pylint:disable=g-tzinfo-datetime message = timestamp_pb2.Timestamp() message.FromDatetime(original_datetime) self.assertEqual(7200, message.seconds) self.assertEqual(0, message.nanos) # ToDatetime() with no parameters produces a naive UTC datetime, i.e. it not # only loses the original timezone information (e.g. US/Pacific) as it's # "normalised" to UTC, but also drops the information that the datetime # represents a UTC one. naive_datetime = message.ToDatetime() self.assertEqual(datetime.datetime(1970, 1, 1, 2), naive_datetime) self.assertIsNone(naive_datetime.tzinfo) self.assertNotEqual(original_datetime, naive_datetime) # not even for UTC! # In contrast, ToDatetime(tzinfo=) produces an aware datetime in the given # timezone. aware_datetime = message.ToDatetime(tzinfo=tzinfo) self.assertEqual(original_datetime, aware_datetime) self.assertEqual( datetime.datetime(1970, 1, 1, 2, tzinfo=datetime.timezone.utc), aware_datetime, ) self.assertEqual(tzinfo, aware_datetime.tzinfo) @unittest.skipIf( not has_zoneinfo, 'Versions without zoneinfo use a fixed-offset timezone that does not' ' demonstrate this problem.', ) def testDatetimeConversionWithDifferentUtcOffsetThanEpoch(self): # This timezone has a different UTC offset at this date than at the epoch. # The datetime returned by FromDatetime needs to have the correct offset # for the moment represented. tz = _TZ_PACIFIC dt = datetime.datetime(2016, 6, 26, tzinfo=tz) epoch_dt = datetime.datetime.fromtimestamp( 0, tz=datetime.timezone.utc ).astimezone(tz) self.assertNotEqual(dt.utcoffset(), epoch_dt.utcoffset()) ts = timestamp_pb2.Timestamp() ts.FromDatetime(dt) self.assertEqual(dt, ts.ToDatetime(tzinfo=dt.tzinfo)) def testTimezoneAwareDatetimeConversionWhereTimestampLosesPrecision(self): tz = _TZ_PACIFIC ts = timestamp_pb2.Timestamp() tz_aware_future = datetime.datetime(2555, 2, 22, 1, 2, 3, 456789, tzinfo=tz) # The float timestamp for this datetime does not represent the integer # millisecond value with full precision. self.assertNotEqual( tz_aware_future, datetime.datetime.fromtimestamp(tz_aware_future.timestamp(), tz), ) # It still round-trips correctly. ts.FromDatetime(tz_aware_future) self.assertEqual(tz_aware_future, ts.ToDatetime(tz)) def testTimezoneAwareMaxDatetimeConversion(self): ts = timestamp_pb2.Timestamp() tz_aware_max_datetime = datetime.datetime( 9999, 12, 31, 23, 59, 59, 999999, tzinfo=datetime.timezone.utc ) ts.FromDatetime(tz_aware_max_datetime) self.assertEqual( tz_aware_max_datetime, ts.ToDatetime(datetime.timezone.utc) ) def testTimezoneAwareMinDatetimeConversion(self): ts = timestamp_pb2.Timestamp() tz_aware_min_datetime = datetime.datetime( 1, 1, 1, tzinfo=datetime.timezone.utc ) ts.FromDatetime(tz_aware_min_datetime) self.assertEqual( tz_aware_min_datetime, ts.ToDatetime(datetime.timezone.utc) ) # Two hours after the Unix Epoch, around the world. @parameterized.named_parameters( ('London', [1970, 1, 1, 2], datetime.timezone.utc), ('Tokyo', [1970, 1, 1, 11], _TZ_JAPAN), ('LA', [1969, 12, 31, 18], _TZ_PACIFIC), ) def testTimestampAssignment(self, date_parts, tzinfo): original_datetime = datetime.datetime(*date_parts, tzinfo=tzinfo) # pylint:disable=g-tzinfo-datetime msg = well_known_types_test_pb2.WKTMessage() msg.optional_timestamp = original_datetime self.assertEqual(7200, msg.optional_timestamp.seconds) self.assertEqual(0, msg.optional_timestamp.nanos) # Two hours after the Unix Epoch, around the world. @parameterized.named_parameters( ('London', [1970, 1, 1, 2], datetime.timezone.utc), ('Tokyo', [1970, 1, 1, 11], _TZ_JAPAN), ('LA', [1969, 12, 31, 18], _TZ_PACIFIC), ) def testTimestampCreation(self, date_parts, tzinfo): original_datetime = datetime.datetime(*date_parts, tzinfo=tzinfo) # pylint:disable=g-tzinfo-datetime msg = well_known_types_test_pb2.WKTMessage( optional_timestamp=original_datetime ) self.assertEqual(7200, msg.optional_timestamp.seconds) self.assertEqual(0, msg.optional_timestamp.nanos) msg2 = well_known_types_test_pb2.WKTMessage( optional_timestamp=msg.optional_timestamp ) self.assertEqual(7200, msg2.optional_timestamp.seconds) self.assertEqual(0, msg2.optional_timestamp.nanos) @parameterized.named_parameters( ( 'tz_aware_min_dt', datetime.datetime(1, 1, 1, tzinfo=datetime.timezone.utc), datetime.timedelta(hours=9), -62135564400, 0, ), ( 'no_change', datetime.datetime(1970, 1, 1, 11, tzinfo=_TZ_JAPAN), datetime.timedelta(hours=0), 7200, 0, ), ) def testTimestampAdd(self, old_time, time_delta, expected_sec, expected_nano): msg = well_known_types_test_pb2.WKTMessage() msg.optional_timestamp = old_time # Timestamp + timedelta new_msg1 = well_known_types_test_pb2.WKTMessage() new_msg1.optional_timestamp = msg.optional_timestamp + time_delta self.assertEqual(expected_sec, new_msg1.optional_timestamp.seconds) self.assertEqual(expected_nano, new_msg1.optional_timestamp.nanos) # timedelta + Timestamp new_msg2 = well_known_types_test_pb2.WKTMessage() new_msg2.optional_timestamp = time_delta + msg.optional_timestamp self.assertEqual(expected_sec, new_msg2.optional_timestamp.seconds) self.assertEqual(expected_nano, new_msg2.optional_timestamp.nanos) # Timestamp + Duration msg.optional_duration.FromTimedelta(time_delta) new_msg3 = well_known_types_test_pb2.WKTMessage() new_msg3.optional_timestamp = msg.optional_timestamp + msg.optional_duration self.assertEqual(expected_sec, new_msg3.optional_timestamp.seconds) self.assertEqual(expected_nano, new_msg3.optional_timestamp.nanos) @parameterized.named_parameters( ( 'test1', datetime.datetime(999, 1, 1, tzinfo=datetime.timezone.utc), datetime.timedelta(hours=9), -30641792400, 0, ), ( 'no_change', datetime.datetime(1970, 1, 1, 11, tzinfo=_TZ_JAPAN), datetime.timedelta(hours=0), 7200, 0, ), ) def testTimestampSub(self, old_time, time_delta, expected_sec, expected_nano): msg = well_known_types_test_pb2.WKTMessage() msg.optional_timestamp = old_time # Timestamp - timedelta new_msg1 = well_known_types_test_pb2.WKTMessage() new_msg1.optional_timestamp = msg.optional_timestamp - time_delta self.assertEqual(expected_sec, new_msg1.optional_timestamp.seconds) self.assertEqual(expected_nano, new_msg1.optional_timestamp.nanos) # Timestamp - Duration msg.optional_duration = time_delta new_msg2 = well_known_types_test_pb2.WKTMessage() new_msg2.optional_timestamp = msg.optional_timestamp - msg.optional_duration self.assertEqual(expected_sec, new_msg2.optional_timestamp.seconds) self.assertEqual(expected_nano, new_msg2.optional_timestamp.nanos) result_msg = well_known_types_test_pb2.WKTMessage() result_msg.optional_timestamp = old_time - time_delta # Timestamp - Timestamp td = msg.optional_timestamp - result_msg.optional_timestamp self.assertEqual(time_delta, td) # Timestamp - datetime td1 = msg.optional_timestamp - result_msg.optional_timestamp.ToDatetime() self.assertEqual(time_delta, td1) # datetime - Timestamp td2 = msg.optional_timestamp.ToDatetime() - result_msg.optional_timestamp self.assertEqual(time_delta, td2) def testNanosOneSecond(self): tz = _TZ_PACIFIC ts = timestamp_pb2.Timestamp(nanos=1_000_000_000) self.assertRaisesRegex(ValueError, 'Timestamp is not valid', ts.ToDatetime) def testNanosNegativeOneSecond(self): ts = timestamp_pb2.Timestamp(nanos=-1_000_000_000) self.assertRaisesRegex(ValueError, 'Timestamp is not valid', ts.ToDatetime) def testTimedeltaConversion(self): message = duration_pb2.Duration() message.FromNanoseconds(1999999999) td = message.ToTimedelta() self.assertEqual(1, td.seconds) self.assertEqual(999999, td.microseconds) message.FromNanoseconds(-1999999999) td = message.ToTimedelta() self.assertEqual(-1, td.days) self.assertEqual(86398, td.seconds) self.assertEqual(1, td.microseconds) message.FromMicroseconds(-1) td = message.ToTimedelta() self.assertEqual(-1, td.days) self.assertEqual(86399, td.seconds) self.assertEqual(999999, td.microseconds) converted_message = duration_pb2.Duration() converted_message.FromTimedelta(td) self.assertEqual(message, converted_message) def testInvalidTimestamp(self): message = timestamp_pb2.Timestamp() self.assertRaisesRegex( ValueError, 'Failed to parse timestamp: missing valid timezone offset.', message.FromJsonString, '', ) self.assertRaisesRegex( ValueError, 'Failed to parse timestamp: invalid trailing data ' '1970-01-01T00:00:01Ztrail.', message.FromJsonString, '1970-01-01T00:00:01Ztrail', ) self.assertRaisesRegex( ValueError, "time data '10000-01-01T00:00:00' does not match" " format '%Y-%m-%dT%H:%M:%S'", message.FromJsonString, '10000-01-01T00:00:00.00Z', ) self.assertRaisesRegex( ValueError, 'nanos 0123456789012 more than 9 fractional digits.', message.FromJsonString, '1970-01-01T00:00:00.0123456789012Z', ) self.assertRaisesRegex( ValueError, r'Invalid timezone offset value: \+08.', message.FromJsonString, '1972-01-01T01:00:00.01+08', ) self.assertRaisesRegex( ValueError, 'year ', message.FromJsonString, '0000-01-01T00:00:00Z', ) message.seconds = 253402300800 self.assertRaisesRegex(ValueError, 'Timestamp is not valid', message.ToJsonString) self.assertRaisesRegex(ValueError, 'Timestamp is not valid', message.FromSeconds, -62135596801) msg = well_known_types_test_pb2.WKTMessage() with self.assertRaises((TypeError)): msg.optional_timestamp = 1 with self.assertRaises((TypeError)): msg2 = well_known_types_test_pb2.WKTMessage(optional_timestamp=1) with self.assertRaises(TypeError): msg.optional_timestamp + '' with self.assertRaises(TypeError): msg.optional_timestamp - 123 def testInvalidDuration(self): message = duration_pb2.Duration() self.assertRaisesRegex( ValueError, 'Duration must end with letter "s": 1.', message.FromJsonString, '1', ) self.assertRaisesRegex( ValueError, "Couldn't parse duration: 1...2s.", message.FromJsonString, '1...2s', ) text = '-315576000001.000000000s' self.assertRaisesRegex( ValueError, r'Duration is not valid\: Seconds -315576000001 must be in range' r' \[-315576000000\, 315576000000\].', message.FromJsonString, text, ) text = '315576000001.000000000s' self.assertRaisesRegex( ValueError, r'Duration is not valid\: Seconds 315576000001 must be in range' r' \[-315576000000\, 315576000000\].', message.FromJsonString, text, ) message.seconds = -315576000001 message.nanos = 0 self.assertRaisesRegex( ValueError, r'Duration is not valid\: Seconds -315576000001 must be in range' r' \[-315576000000\, 315576000000\].', message.ToJsonString, ) message.seconds = 0 message.nanos = 999999999 + 1 self.assertRaisesRegex( ValueError, r'Duration is not valid\: Nanos 1000000000 must be in range' r' \[-999999999\, 999999999\].', message.ToJsonString, ) message.seconds = -1 message.nanos = 1 self.assertRaisesRegex( ValueError, r'Duration is not valid\: Sign mismatch.', message.ToJsonString, ) msg = well_known_types_test_pb2.WKTMessage() with self.assertRaises((TypeError, AttributeError)): msg.optional_duration = 1 with self.assertRaises((TypeError, AttributeError)): msg2 = well_known_types_test_pb2.WKTMessage(optional_duration=1) with self.assertRaises(TypeError): msg.optional_duration + '' with self.assertRaises(TypeError): 123 - msg.optional_duration @parameterized.named_parameters( ('test1', -1999999, -1, -999999000), ('test2', 1999999, 1, 999999000) ) def testDurationAssignment(self, microseconds, expected_sec, expected_nano): message = well_known_types_test_pb2.WKTMessage() expected_td = datetime.timedelta(microseconds=microseconds) message.optional_duration = expected_td self.assertEqual(expected_td, message.optional_duration.ToTimedelta()) self.assertEqual(expected_sec, message.optional_duration.seconds) self.assertEqual(expected_nano, message.optional_duration.nanos) @parameterized.named_parameters( ('test1', -1999999, -1, -999999000), ('test2', 1999999, 1, 999999000) ) def testDurationCreation(self, microseconds, expected_sec, expected_nano): message = well_known_types_test_pb2.WKTMessage( optional_duration=datetime.timedelta(microseconds=microseconds) ) expected_td = datetime.timedelta(microseconds=microseconds) self.assertEqual(expected_td, message.optional_duration.ToTimedelta()) self.assertEqual(expected_sec, message.optional_duration.seconds) self.assertEqual(expected_nano, message.optional_duration.nanos) @parameterized.named_parameters( ( 'tz_aware_min_dt', datetime.datetime(1, 1, 1, tzinfo=datetime.timezone.utc), datetime.timedelta(hours=9), -62135564400, 0, ), ( 'no_change', datetime.datetime(1970, 1, 1, 11, tzinfo=_TZ_JAPAN), datetime.timedelta(hours=0), 7200, 0, ), ) def testDurationAdd(self, old_time, time_delta, expected_sec, expected_nano): msg = well_known_types_test_pb2.WKTMessage() msg.optional_duration = time_delta msg.optional_timestamp = old_time # Duration + datetime msg1 = well_known_types_test_pb2.WKTMessage() msg1.optional_timestamp = msg.optional_duration + old_time self.assertEqual(expected_sec, msg1.optional_timestamp.seconds) self.assertEqual(expected_nano, msg1.optional_timestamp.nanos) # datetime + Duration msg2 = well_known_types_test_pb2.WKTMessage() msg2.optional_timestamp = old_time + msg.optional_duration self.assertEqual(expected_sec, msg2.optional_timestamp.seconds) self.assertEqual(expected_nano, msg2.optional_timestamp.nanos) # Duration + Timestamp msg3 = well_known_types_test_pb2.WKTMessage() msg3.optional_timestamp = msg.optional_duration + msg.optional_timestamp self.assertEqual(expected_sec, msg3.optional_timestamp.seconds) self.assertEqual(expected_nano, msg3.optional_timestamp.nanos) @parameterized.named_parameters( ( 'test1', datetime.datetime(999, 1, 1, tzinfo=datetime.timezone.utc), datetime.timedelta(hours=9), -30641792400, 0, ), ( 'no_change', datetime.datetime(1970, 1, 1, 11, tzinfo=_TZ_JAPAN), datetime.timedelta(hours=0), 7200, 0, ), ) def testDurationSub(self, old_time, time_delta, expected_sec, expected_nano): msg = well_known_types_test_pb2.WKTMessage() msg.optional_duration = time_delta # datetime - Duration msg.optional_timestamp = old_time - msg.optional_duration self.assertEqual(expected_sec, msg.optional_timestamp.seconds) self.assertEqual(expected_nano, msg.optional_timestamp.nanos) @testing_refleaks.TestCase
TimeUtilTest
python
apache__airflow
providers/microsoft/azure/src/airflow/providers/microsoft/azure/secrets/key_vault.py
{ "start": 1347, "end": 9051 }
class ____(BaseSecretsBackend, LoggingMixin): """ Retrieves Airflow Connections or Variables from Azure Key Vault secrets. The Azure Key Vault can be configured as a secrets backend in the ``airflow.cfg``: .. code-block:: ini [secrets] backend = airflow.providers.microsoft.azure.secrets.key_vault.AzureKeyVaultBackend backend_kwargs = {"connections_prefix": "airflow-connections", "vault_url": "<azure_key_vault_uri>"} For example, if the secrets prefix is ``airflow-connections-smtp-default``, this would be accessible if you provide ``{"connections_prefix": "airflow-connections"}`` and request conn_id ``smtp-default``. And if variables prefix is ``airflow-variables-hello``, this would be accessible if you provide ``{"variables_prefix": "airflow-variables"}`` and request variable key ``hello``. For client authentication, the ``DefaultAzureCredential`` from the Azure Python SDK is used as credential provider, which supports service principal, managed identity and user credentials For example, to specify a service principal with secret you can set the environment variables ``AZURE_TENANT_ID``, ``AZURE_CLIENT_ID`` and ``AZURE_CLIENT_SECRET``. .. seealso:: For more details on client authentication refer to the ``DefaultAzureCredential`` Class reference: https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity.defaultazurecredential?view=azure-python :param connections_prefix: Specifies the prefix of the secret to read to get Connections If set to None (null), requests for connections will not be sent to Azure Key Vault :param variables_prefix: Specifies the prefix of the secret to read to get Variables If set to None (null), requests for variables will not be sent to Azure Key Vault :param config_prefix: Specifies the prefix of the secret to read to get Variables. If set to None (null), requests for configurations will not be sent to Azure Key Vault :param vault_url: The URL of an Azure Key Vault to use :param sep: separator used to concatenate secret_prefix and secret_id. Default: "-" :param tenant_id: The tenant id of an Azure Key Vault to use. If not given, it falls back to ``DefaultAzureCredential`` :param client_id: The client id of an Azure Key Vault to use. If not given, it falls back to ``DefaultAzureCredential`` :param managed_identity_client_id: The client ID of a user-assigned managed identity. If provided with `workload_identity_tenant_id`, they'll pass to ``DefaultAzureCredential``. :param workload_identity_tenant_id: ID of the application's Microsoft Entra tenant. Also called its "directory" ID. If provided with `managed_identity_client_id`, they'll pass to ``DefaultAzureCredential``. """ def __init__( self, connections_prefix: str = "airflow-connections", variables_prefix: str = "airflow-variables", config_prefix: str = "airflow-config", vault_url: str = "", sep: str = "-", *, tenant_id: str = "", client_id: str = "", client_secret: str = "", managed_identity_client_id: str = "", workload_identity_tenant_id: str = "", **kwargs, ) -> None: super().__init__() self.vault_url = vault_url self.connections_prefix: str | None if connections_prefix: self.connections_prefix = connections_prefix.rstrip(sep) else: self.connections_prefix = connections_prefix self.variables_prefix: str | None if variables_prefix: self.variables_prefix = variables_prefix.rstrip(sep) else: self.variables_prefix = variables_prefix self.config_prefix: str | None if config_prefix: self.config_prefix = config_prefix.rstrip(sep) else: self.config_prefix = config_prefix logger = logging.getLogger("azure.core.pipeline.policies.http_logging_policy") try: logger.setLevel(os.environ.get("AZURE_HTTP_LOGGING_LEVEL", logging.WARNING)) except ValueError: logger.setLevel(logging.WARNING) self.sep = sep self.tenant_id = tenant_id self.client_id = client_id self.client_secret = client_secret self.managed_identity_client_id = managed_identity_client_id self.workload_identity_tenant_id = workload_identity_tenant_id self.kwargs = kwargs @cached_property def client(self) -> SecretClient: """Create a Azure Key Vault client.""" credential: ClientSecretCredential | DefaultAzureCredential if all([self.tenant_id, self.client_id, self.client_secret]): credential = ClientSecretCredential(self.tenant_id, self.client_id, self.client_secret) else: credential = get_sync_default_azure_credential( managed_identity_client_id=self.managed_identity_client_id, workload_identity_tenant_id=self.workload_identity_tenant_id, ) client = SecretClient(vault_url=self.vault_url, credential=credential, **self.kwargs) return client def get_conn_value(self, conn_id: str) -> str | None: """ Get a serialized representation of Airflow Connection from an Azure Key Vault secret. :param conn_id: The Airflow connection id to retrieve """ if self.connections_prefix is None: return None return self._get_secret(self.connections_prefix, conn_id) def get_variable(self, key: str) -> str | None: """ Get an Airflow Variable from an Azure Key Vault secret. :param key: Variable Key :return: Variable Value """ if self.variables_prefix is None: return None return self._get_secret(self.variables_prefix, key) def get_config(self, key: str) -> str | None: """ Get Airflow Configuration. :param key: Configuration Option Key :return: Configuration Option Value """ if self.config_prefix is None: return None return self._get_secret(self.config_prefix, key) @staticmethod def build_path(path_prefix: str, secret_id: str, sep: str = "-") -> str: """ Given a path_prefix and secret_id, build a valid secret name for the Azure Key Vault Backend. Also replaces underscore in the path with dashes to support easy switching between environment variables, so ``connection_default`` becomes ``connection-default``. :param path_prefix: The path prefix of the secret to retrieve :param secret_id: Name of the secret :param sep: Separator used to concatenate path_prefix and secret_id """ # When an empty prefix is given, do not add a separator to the secret name if path_prefix == "": path = f"{secret_id}" else: path = f"{path_prefix}{sep}{secret_id}" return path.replace("_", sep) def _get_secret(self, path_prefix: str, secret_id: str) -> str | None: """ Get an Azure Key Vault secret value. :param path_prefix: Prefix for the Path to get Secret :param secret_id: Secret Key """ name = self.build_path(path_prefix, secret_id, self.sep) try: secret = self.client.get_secret(name=name) return secret.value except ResourceNotFoundError as ex: self.log.debug("Secret %s not found: %s", name, ex) return None
AzureKeyVaultBackend
python
pydantic__pydantic
tests/benchmarks/test_attribute_access.py
{ "start": 116, "end": 260 }
class ____(BaseModel): model_config = ConfigDict(validate_assignment=True) inner_field1: str inner_field2: int
InnerValidateAssignment
python
dagster-io__dagster
examples/docs_projects/project_ml/src/project_ml/defs/assets/model_assets.py
{ "start": 13810, "end": 18139 }
class ____(dg.Config): """Configuration for model evaluation.""" batch_size: int = DEFAULT_BATCH_SIZE accuracy_threshold: float = ACCURACY_THRESHOLD # start_model_evaluation @dg.asset( description="Evaluate model performance on test set", group_name="model_pipeline", required_resource_keys={"model_storage"}, deps=["digit_classifier"], ) def model_evaluation( context, raw_mnist_data: dict[str, Any], config: ModelEvaluationConfig, ) -> dict[str, Any]: """Evaluate the trained model on the test set.""" model_store = context.resources.model_storage # Get the latest trained model try: saved_models = model_store.list_models() if not saved_models: context.log.error("No saved models found for evaluation") return { "test_accuracy": 0.0, "predictions": [], "labels": [], "classification_report": {}, } # Get the latest model name (first one is newest due to sorting) latest_model_name = saved_models[0] # Already just the model name context.log.info(f"Loading model for evaluation: {latest_model_name}") model_data = model_store.load_model(latest_model_name) if isinstance(model_data, dict) and "model" in model_data: model_to_evaluate = model_data["model"] else: model_to_evaluate = model_data # Direct model object # Log model metadata context.log.info("Model loaded successfully") context.log.info(f"Model architecture:\n{model_to_evaluate!s}") except Exception as e: context.log.error(f"Failed to load model for evaluation: {e!s}") context.log.error(f"Exception details: {e.__class__.__name__!s}") import traceback context.log.error(f"Traceback: {traceback.format_exc()}") return { "test_accuracy": 0.0, "predictions": [], "labels": [], "classification_report": {}, } test_data = raw_mnist_data["test_data"] test_labels = raw_mnist_data["test_labels"] device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model_to_evaluate.to(device) model_to_evaluate.eval() # Make predictions all_predictions = [] all_labels = [] test_dataset = TensorDataset(test_data, test_labels) test_loader = DataLoader(test_dataset, batch_size=config.batch_size, shuffle=False) with torch.no_grad(): for data, target in test_loader: _data, _target = data.to(device), target.to(device) outputs = model_to_evaluate(_data) _, predicted = torch.max(outputs.data, 1) all_predictions.extend(predicted.cpu().numpy()) all_labels.extend(_target.cpu().numpy()) all_predictions = np.array(all_predictions) all_labels = np.array(all_labels) # Calculate metrics test_accuracy = float(np.mean(all_predictions == all_labels)) # Create confusion matrix plot cm = confusion_matrix(all_labels, all_predictions) fig, ax = plt.subplots(figsize=(10, 8)) sns.heatmap(cm, annot=True, fmt="d", cmap="Blues", ax=ax) ax.set_title("Confusion Matrix - Test Set") ax.set_xlabel("Predicted") ax.set_ylabel("Actual") # Generate classification report class_report = classification_report(all_labels, all_predictions, output_dict=True) context.add_output_metadata( { "test_accuracy": test_accuracy, "test_samples": len(all_labels), "precision_macro": float(class_report["macro avg"]["precision"]), "recall_macro": float(class_report["macro avg"]["recall"]), "f1_macro": float(class_report["macro avg"]["f1-score"]), "evaluated_model_path": latest_model_name, }, output_name="result", ) context.log.info(f"Model evaluation completed. Test accuracy: {test_accuracy:.4f}") plt.close(fig) return { "test_accuracy": test_accuracy, "predictions": all_predictions.tolist(), "labels": all_labels.tolist(), "classification_report": class_report, "model_info": {"path": latest_model_name}, } # end_model_evaluation # start_deployment_config
ModelEvaluationConfig
python
matplotlib__matplotlib
lib/mpl_toolkits/axes_grid1/axes_divider.py
{ "start": 295, "end": 9416 }
class ____: """ An Axes positioning class. The divider is initialized with lists of horizontal and vertical sizes (:mod:`mpl_toolkits.axes_grid1.axes_size`) based on which a given rectangular area will be divided. The `new_locator` method then creates a callable object that can be used as the *axes_locator* of the axes. """ def __init__(self, fig, pos, horizontal, vertical, aspect=None, anchor="C"): """ Parameters ---------- fig : Figure pos : tuple of 4 floats Position of the rectangle that will be divided. horizontal : list of :mod:`~mpl_toolkits.axes_grid1.axes_size` Sizes for horizontal division. vertical : list of :mod:`~mpl_toolkits.axes_grid1.axes_size` Sizes for vertical division. aspect : bool, optional Whether overall rectangular area is reduced so that the relative part of the horizontal and vertical scales have the same scale. anchor : (float, float) or {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', \ 'NW', 'W'}, default: 'C' Placement of the reduced rectangle, when *aspect* is True. """ self._fig = fig self._pos = pos self._horizontal = horizontal self._vertical = vertical self._anchor = anchor self.set_anchor(anchor) self._aspect = aspect self._xrefindex = 0 self._yrefindex = 0 self._locator = None def get_horizontal_sizes(self, renderer): return np.array([s.get_size(renderer) for s in self.get_horizontal()]) def get_vertical_sizes(self, renderer): return np.array([s.get_size(renderer) for s in self.get_vertical()]) def set_position(self, pos): """ Set the position of the rectangle. Parameters ---------- pos : tuple of 4 floats position of the rectangle that will be divided """ self._pos = pos def get_position(self): """Return the position of the rectangle.""" return self._pos def set_anchor(self, anchor): """ Parameters ---------- anchor : (float, float) or {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', \ 'NW', 'W'} Either an (*x*, *y*) pair of relative coordinates (0 is left or bottom, 1 is right or top), 'C' (center), or a cardinal direction ('SW', southwest, is bottom left, etc.). See Also -------- .Axes.set_anchor """ if isinstance(anchor, str): _api.check_in_list(mtransforms.Bbox.coefs, anchor=anchor) elif not isinstance(anchor, (tuple, list)) or len(anchor) != 2: raise TypeError("anchor must be str or 2-tuple") self._anchor = anchor def get_anchor(self): """Return the anchor.""" return self._anchor def get_subplotspec(self): return None def set_horizontal(self, h): """ Parameters ---------- h : list of :mod:`~mpl_toolkits.axes_grid1.axes_size` sizes for horizontal division """ self._horizontal = h def get_horizontal(self): """Return horizontal sizes.""" return self._horizontal def set_vertical(self, v): """ Parameters ---------- v : list of :mod:`~mpl_toolkits.axes_grid1.axes_size` sizes for vertical division """ self._vertical = v def get_vertical(self): """Return vertical sizes.""" return self._vertical def set_aspect(self, aspect=False): """ Parameters ---------- aspect : bool """ self._aspect = aspect def get_aspect(self): """Return aspect.""" return self._aspect def set_locator(self, _locator): self._locator = _locator def get_locator(self): return self._locator def get_position_runtime(self, ax, renderer): if self._locator is None: return self.get_position() else: return self._locator(ax, renderer).bounds @staticmethod def _calc_k(sizes, total): # sizes is a (n, 2) array of (rel_size, abs_size); this method finds # the k factor such that sum(rel_size * k + abs_size) == total. rel_sum, abs_sum = sizes.sum(0) return (total - abs_sum) / rel_sum if rel_sum else 0 @staticmethod def _calc_offsets(sizes, k): # Apply k factors to (n, 2) sizes array of (rel_size, abs_size); return # the resulting cumulative offset positions. return np.cumsum([0, *(sizes @ [k, 1])]) def new_locator(self, nx, ny, nx1=None, ny1=None): """ Return an axes locator callable for the specified cell. Parameters ---------- nx, nx1 : int Integers specifying the column-position of the cell. When *nx1* is None, a single *nx*-th column is specified. Otherwise, location of columns spanning between *nx* to *nx1* (but excluding *nx1*-th column) is specified. ny, ny1 : int Same as *nx* and *nx1*, but for row positions. """ if nx1 is None: nx1 = nx + 1 if ny1 is None: ny1 = ny + 1 # append_size("left") adds a new size at the beginning of the # horizontal size lists; this shift transforms e.g. # new_locator(nx=2, ...) into effectively new_locator(nx=3, ...). To # take that into account, instead of recording nx, we record # nx-self._xrefindex, where _xrefindex is shifted by 1 by each # append_size("left"), and re-add self._xrefindex back to nx in # _locate, when the actual axes position is computed. Ditto for y. xref = self._xrefindex yref = self._yrefindex locator = functools.partial( self._locate, nx - xref, ny - yref, nx1 - xref, ny1 - yref) locator.get_subplotspec = self.get_subplotspec return locator def _locate(self, nx, ny, nx1, ny1, axes, renderer): """ Implementation of ``divider.new_locator().__call__``. The axes locator callable returned by ``new_locator()`` is created as a `functools.partial` of this method with *nx*, *ny*, *nx1*, and *ny1* specifying the requested cell. """ nx += self._xrefindex nx1 += self._xrefindex ny += self._yrefindex ny1 += self._yrefindex fig_w, fig_h = self._fig.bbox.size / self._fig.dpi x, y, w, h = self.get_position_runtime(axes, renderer) hsizes = self.get_horizontal_sizes(renderer) vsizes = self.get_vertical_sizes(renderer) k_h = self._calc_k(hsizes, fig_w * w) k_v = self._calc_k(vsizes, fig_h * h) if self.get_aspect(): k = min(k_h, k_v) ox = self._calc_offsets(hsizes, k) oy = self._calc_offsets(vsizes, k) ww = (ox[-1] - ox[0]) / fig_w hh = (oy[-1] - oy[0]) / fig_h pb = mtransforms.Bbox.from_bounds(x, y, w, h) pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh) x0, y0 = pb1.anchored(self.get_anchor(), pb).p0 else: ox = self._calc_offsets(hsizes, k_h) oy = self._calc_offsets(vsizes, k_v) x0, y0 = x, y if nx1 is None: nx1 = -1 if ny1 is None: ny1 = -1 x1, w1 = x0 + ox[nx] / fig_w, (ox[nx1] - ox[nx]) / fig_w y1, h1 = y0 + oy[ny] / fig_h, (oy[ny1] - oy[ny]) / fig_h return mtransforms.Bbox.from_bounds(x1, y1, w1, h1) def append_size(self, position, size): _api.check_in_list(["left", "right", "bottom", "top"], position=position) if position == "left": self._horizontal.insert(0, size) self._xrefindex += 1 elif position == "right": self._horizontal.append(size) elif position == "bottom": self._vertical.insert(0, size) self._yrefindex += 1 else: # 'top' self._vertical.append(size) def add_auto_adjustable_area(self, use_axes, pad=0.1, adjust_dirs=None): """ Add auto-adjustable padding around *use_axes* to take their decorations (title, labels, ticks, ticklabels) into account during layout. Parameters ---------- use_axes : `~matplotlib.axes.Axes` or list of `~matplotlib.axes.Axes` The Axes whose decorations are taken into account. pad : float, default: 0.1 Additional padding in inches. adjust_dirs : list of {"left", "right", "bottom", "top"}, optional The sides where padding is added; defaults to all four sides. """ if adjust_dirs is None: adjust_dirs = ["left", "right", "bottom", "top"] for d in adjust_dirs: self.append_size(d, Size._AxesDecorationsSize(use_axes, d) + pad)
Divider
python
kamyu104__LeetCode-Solutions
Python/append-characters-to-string-to-make-subsequence.py
{ "start": 52, "end": 414 }
class ____(object): def appendCharacters(self, s, t): """ :type s: str :type t: str :rtype: int """ i = -1 for j, c in enumerate(t): for i in xrange(i+1, len(s)): if s[i] == c: break else: return len(t)-j return 0
Solution
python
pandas-dev__pandas
asv_bench/benchmarks/series_methods.py
{ "start": 9204, "end": 9694 }
class ____: def setup(self): N = 1_000_000 self.ser = Series( np.random.randn( N, ) ) def time_to_numpy(self): self.ser.to_numpy() def time_to_numpy_double_copy(self): self.ser.to_numpy(dtype="float64", copy=True) def time_to_numpy_copy(self): self.ser.to_numpy(copy=True) def time_to_numpy_float_with_nan(self): self.ser.to_numpy(dtype="float64", na_value=np.nan)
ToNumpy
python
pytorch__pytorch
torch/_dynamo/exc.py
{ "start": 4649, "end": 5542 }
class ____(TorchDynamoException): def __init__( self, msg: str, *, case_name: Optional[str] = None, real_stack: None | StackSummary = None, ) -> None: super().__init__(msg) if not real_stack: real_stack = torch._guards.TracingContext.extract_stack() self.real_stack = real_stack self.msg = msg self.category: Optional[str] = None self.add_to_stats() self.case_name: Optional[str] = case_name def remove_from_stats(self) -> None: assert self.category is not None counters[self.category][self.msg] -= 1 if counters[self.category][self.msg] <= 0: del counters[self.category][self.msg] def add_to_stats(self, category: str = "unimplemented") -> None: self.category = category counters[category][self.msg] += 1
Unsupported
python
pallets__werkzeug
src/werkzeug/exceptions.py
{ "start": 7774, "end": 7955 }
class ____(BadRequest): """Raised if something triggers a security error. This is otherwise exactly like a bad request error. .. versionadded:: 0.9 """
SecurityError
python
airbytehq__airbyte
airbyte-integrations/connectors/source-google-ads/source_google_ads/utils.py
{ "start": 9118, "end": 18567 }
class ____: """ The `RunAsThread` decorator is designed to run a generator function in a separate thread with a specified timeout. This is particularly useful when dealing with functions that involve potentially time-consuming operations, and you want to enforce a time limit for their execution. """ def __init__(self, timeout_minutes): """ :param timeout_minutes: The maximum allowed time (in minutes) for the generator function to idle. If the timeout is reached, a TimeoutError is raised. """ self._timeout_seconds = timeout_minutes * 60 def __call__(self, generator_func): @functools.wraps(generator_func) def wrapper(*args, **kwargs): """ The wrapper function sets up threading components, starts a separate thread to run the generator function. It uses events and a queue for communication and synchronization between the main thread and the thread running the generator function. """ # Event and Queue initialization write_event = threading.Event() exit_event = threading.Event() the_queue = queue.Queue() # Thread initialization and start thread = threading.Thread( target=self.target, args=(the_queue, write_event, exit_event, generator_func, args, kwargs), daemon=True ) thread.start() # Records the starting time for the timeout calculation. start_time = time.time() while thread.is_alive() or not the_queue.empty(): # The main thread waits for the `write_event` to be set or until the specified timeout. if the_queue.empty(): write_event.wait(self._timeout_seconds) try: # The main thread yields the result obtained from reading the queue. yield self.read(the_queue) # The timer is reset since a new result has been received, preventing the timeout from occurring. start_time = time.time() except queue.Empty: # If exit_event is set it means that the generator function in the thread has completed its execution. if exit_event.is_set(): break # Check if the timeout has been reached without new results. if time.time() - start_time > self._timeout_seconds: # The thread may continue to run for some time after reaching a timeout and even come to life and continue working. # That is why the exit event is set to signal the generator function to stop producing data. exit_event.set() raise TimeoutError(f"Method '{generator_func.__name__}' timed out after {self._timeout_seconds / 60.0} minutes") # The write event is cleared to reset it for the next iteration. write_event.clear() return wrapper def target(self, the_queue, write_event, exit_event, func, args, kwargs): """ This is a target function for the thread. It runs the actual generator function, writing its results to a queue. Exceptions raised during execution are also written to the queue. :param the_queue: A queue used for communication between the main thread and the thread running the generator function. :param write_event: An event signaling the availability of new data in the queue. :param exit_event: An event indicating whether the generator function should stop producing data due to a timeout. :param func: The generator function to be executed. :param args: Positional arguments for the generator function. :param kwargs: Keyword arguments for the generator function. :return: None """ try: for value in func(*args, **kwargs): # If the timeout has been reached we must stop producing any data if exit_event.is_set(): break self.write(the_queue, value, write_event) else: # Notify the main thread that the generator function has completed its execution. exit_event.set() # Notify the main thread (even if the generator didn't produce any data) to prevent waiting for no reason. if not write_event.is_set(): write_event.set() except Exception as e: self.write(the_queue, e, write_event) @staticmethod def write(the_queue, value, write_event): """ Puts a value into the queue and sets a write event to notify the main thread that new data is available. :param the_queue: A queue used for communication between the main thread and the thread running the generator function. :param value: The value to be put into the communication queue. This can be any type of data produced by the generator function, including results or exceptions. :param write_event: An event signaling the availability of new data in the queue. :return: None """ the_queue.put(value) write_event.set() @staticmethod def read(the_queue, timeout=0.001): """ Retrieves a value from the queue, handling the case where the value is an exception, and raising it. :param the_queue: A queue used for communication between the main thread and the thread running the generator function. :param timeout: A time in seconds to wait for a value to be available in the queue. If the timeout is reached and no new data is available, a `queue.Empty` exception is raised. :return: a value retrieved from the queue """ value = the_queue.get(block=True, timeout=timeout) if isinstance(value, Exception): raise value return value detached = RunAsThread def parse_dates(stream_slice): start_date = pendulum.parse(stream_slice["start_date"]) end_date = pendulum.parse(stream_slice["end_date"]) return start_date, end_date def chunk_date_range( start_date: str, end_date: str = None, conversion_window: int = 0, days_of_data_storage: int = None, time_zone=None, time_format="YYYY-MM-DD", slice_duration: pendulum.Duration = pendulum.duration(days=14), slice_step: pendulum.Duration = pendulum.duration(days=1), ) -> Iterable[Optional[MutableMapping[str, any]]]: """ Splits a date range into smaller chunks based on the provided parameters. Args: start_date (str): The beginning date of the range. end_date (str, optional): The ending date of the range. Defaults to today's date. conversion_window (int): Number of days to subtract from the start date. Defaults to 0. days_of_data_storage (int, optional): Maximum age of data that can be retrieved. Used to adjust the start date. time_zone: Time zone to be used for date parsing and today's date calculation. If not provided, the default time zone is used. time_format (str): Format to be used when returning dates. Defaults to 'YYYY-MM-DD'. slice_duration (pendulum.Duration): Duration of each chunk. Defaults to 14 days. slice_step (pendulum.Duration): Step size to move to the next chunk. Defaults to 1 day. Returns: Iterable[Optional[MutableMapping[str, any]]]: An iterable of dictionaries containing start and end dates for each chunk. If the adjusted start date is greater than the end date, returns a list with a None value. Notes: - If the difference between `end_date` and `start_date` is large (e.g., >= 1 month), processing all records might take a long time. - Tokens for fetching subsequent pages of data might expire after 2 hours, leading to potential errors. - The function adjusts the start date based on `days_of_data_storage` and `conversion_window` to adhere to certain data retrieval policies, such as Google Ads' policy of only retrieving data not older than a certain number of days. - The method returns `start_date` and `end_date` with a difference typically spanning 15 days to avoid token expiration issues. """ start_date = pendulum.parse(start_date, tz=time_zone) today = pendulum.today(tz=time_zone) end_date = pendulum.parse(end_date, tz=time_zone) if end_date else today # For some metrics we can only get data not older than N days, it is Google Ads policy if days_of_data_storage: start_date = max(start_date, pendulum.now(tz=time_zone).subtract(days=days_of_data_storage - conversion_window)) # As in to return some state when state in abnormal if start_date > end_date: return [None] # applying conversion window start_date = start_date.subtract(days=conversion_window) slice_start = start_date while slice_start <= end_date: slice_end = min(end_date, slice_start + slice_duration) yield { "start_date": slice_start.format(time_format), "end_date": slice_end.format(time_format), } slice_start = slice_end + slice_step @dataclass(repr=False, eq=False, frozen=True)
RunAsThread
python
pypa__setuptools
setuptools/build_meta.py
{ "start": 9899, "end": 17425 }
class ____(_ConfigSettingsTranslator): def _get_build_requires( self, config_settings: _ConfigSettings, requirements: list[str] ): sys.argv = [ *sys.argv[:1], *self._global_args(config_settings), "egg_info", ] try: with Distribution.patch(): self.run_setup() except SetupRequirementsError as e: requirements += e.specifiers return requirements def run_setup(self, setup_script: str = 'setup.py') -> None: # Note that we can reuse our build directory between calls # Correctness comes first, then optimization later __file__ = os.path.abspath(setup_script) __name__ = '__main__' with _open_setup_script(__file__) as f: code = f.read().replace(r'\r\n', r'\n') try: exec(code, locals()) except SystemExit as e: if e.code: raise # We ignore exit code indicating success SetuptoolsDeprecationWarning.emit( "Running `setup.py` directly as CLI tool is deprecated.", "Please avoid using `sys.exit(0)` or similar statements " "that don't fit in the paradigm of a configuration file.", see_url="https://blog.ganssle.io/articles/2021/10/" "setup-py-deprecated.html", ) def get_requires_for_build_wheel( self, config_settings: _ConfigSettings = None ) -> list[str]: return self._get_build_requires(config_settings, requirements=[]) def get_requires_for_build_sdist( self, config_settings: _ConfigSettings = None ) -> list[str]: return self._get_build_requires(config_settings, requirements=[]) def _bubble_up_info_directory( self, metadata_directory: StrPath, suffix: str ) -> str: """ PEP 517 requires that the .dist-info directory be placed in the metadata_directory. To comply, we MUST copy the directory to the root. Returns the basename of the info directory, e.g. `proj-0.0.0.dist-info`. """ info_dir = self._find_info_directory(metadata_directory, suffix) if not same_path(info_dir.parent, metadata_directory): shutil.move(str(info_dir), metadata_directory) # PEP 517 allow other files and dirs to exist in metadata_directory return info_dir.name def _find_info_directory(self, metadata_directory: StrPath, suffix: str) -> Path: for parent, dirs, _ in os.walk(metadata_directory): candidates = [f for f in dirs if f.endswith(suffix)] if len(candidates) != 0 or len(dirs) != 1: assert len(candidates) == 1, f"Multiple {suffix} directories found" return Path(parent, candidates[0]) msg = f"No {suffix} directory found in {metadata_directory}" raise errors.InternalError(msg) def prepare_metadata_for_build_wheel( self, metadata_directory: StrPath, config_settings: _ConfigSettings = None ) -> str: sys.argv = [ *sys.argv[:1], *self._global_args(config_settings), "dist_info", "--output-dir", str(metadata_directory), "--keep-egg-info", ] with no_install_setup_requires(): self.run_setup() self._bubble_up_info_directory(metadata_directory, ".egg-info") return self._bubble_up_info_directory(metadata_directory, ".dist-info") def _build_with_temp_dir( self, setup_command: Iterable[str], result_extension: str | tuple[str, ...], result_directory: StrPath, config_settings: _ConfigSettings, arbitrary_args: Iterable[str] = (), ): result_directory = os.path.abspath(result_directory) # Build in a temporary directory, then copy to the target. os.makedirs(result_directory, exist_ok=True) with tempfile.TemporaryDirectory( prefix=".tmp-", dir=result_directory ) as tmp_dist_dir: sys.argv = [ *sys.argv[:1], *self._global_args(config_settings), *setup_command, "--dist-dir", tmp_dist_dir, *arbitrary_args, ] with no_install_setup_requires(): self.run_setup() result_basename = _file_with_extension(tmp_dist_dir, result_extension) result_path = os.path.join(result_directory, result_basename) if os.path.exists(result_path): # os.rename will fail overwriting on non-Unix. os.remove(result_path) os.rename(os.path.join(tmp_dist_dir, result_basename), result_path) return result_basename def build_wheel( self, wheel_directory: StrPath, config_settings: _ConfigSettings = None, metadata_directory: StrPath | None = None, ) -> str: def _build(cmd: list[str]): with suppress_known_deprecation(): return self._build_with_temp_dir( cmd, '.whl', wheel_directory, config_settings, self._arbitrary_args(config_settings), ) if metadata_directory is None: return _build(['bdist_wheel']) try: return _build(['bdist_wheel', '--dist-info-dir', str(metadata_directory)]) except SystemExit as ex: # pragma: nocover # pypa/setuptools#4683 if "--dist-info-dir not recognized" not in str(ex): raise _IncompatibleBdistWheel.emit() return _build(['bdist_wheel']) def build_sdist( self, sdist_directory: StrPath, config_settings: _ConfigSettings = None ) -> str: return self._build_with_temp_dir( ['sdist', '--formats', 'gztar'], '.tar.gz', sdist_directory, config_settings ) def _get_dist_info_dir(self, metadata_directory: StrPath | None) -> str | None: if not metadata_directory: return None dist_info_candidates = list(Path(metadata_directory).glob("*.dist-info")) assert len(dist_info_candidates) <= 1 return str(dist_info_candidates[0]) if dist_info_candidates else None def build_editable( self, wheel_directory: StrPath, config_settings: _ConfigSettings = None, metadata_directory: StrPath | None = None, ) -> str: # XXX can or should we hide our editable_wheel command normally? info_dir = self._get_dist_info_dir(metadata_directory) opts = ["--dist-info-dir", info_dir] if info_dir else [] cmd = ["editable_wheel", *opts, *self._editable_args(config_settings)] with suppress_known_deprecation(): return self._build_with_temp_dir( cmd, ".whl", wheel_directory, config_settings ) def get_requires_for_build_editable( self, config_settings: _ConfigSettings = None ) -> list[str]: return self.get_requires_for_build_wheel(config_settings) def prepare_metadata_for_build_editable( self, metadata_directory: StrPath, config_settings: _ConfigSettings = None ) -> str: return self.prepare_metadata_for_build_wheel( metadata_directory, config_settings )
_BuildMetaBackend
python
kamyu104__LeetCode-Solutions
Python/count-good-nodes-in-binary-tree.py
{ "start": 221, "end": 749 }
class ____(object): def goodNodes(self, root): """ :type root: TreeNode :rtype: int """ result = 0 stk = [(root, root.val)] while stk: node, curr_max = stk.pop() if not node: continue curr_max = max(curr_max, node.val) result += int(curr_max <= node.val) stk.append((node.right, curr_max)) stk.append((node.left, curr_max)) return result # Time: O(n) # Space: O(h)
Solution
python
kamyu104__LeetCode-Solutions
Python/single-threaded-cpu.py
{ "start": 48, "end": 741 }
class ____(object): def getOrder(self, tasks): """ :type tasks: List[List[int]] :rtype: List[int] """ idx = range(len(tasks)) idx.sort(key=lambda x: tasks[x][0]) result, min_heap = [], [] i, time = 0, tasks[idx[0]][0] while i < len(idx) or min_heap: while i < len(idx) and tasks[idx[i]][0] <= time: heapq.heappush(min_heap, (tasks[idx[i]][1], idx[i])) i += 1 if not min_heap: time = tasks[idx[i]][0] continue t, j = heapq.heappop(min_heap) time += t result.append(j) return result
Solution
python
pytorch__pytorch
torch/testing/_internal/common_quantization.py
{ "start": 63979, "end": 64543 }
class ____(torch.nn.Module): def __init__(self, qengine): super().__init__() self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float) self.quant = QuantStub() self.dequant = DeQuantStub() def forward(self, x): x = self.quant(x) x = self.conv(x) x = self.dequant(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 3, 5, 5),)
AnnotatedConvTransposeModel
python
tornadoweb__tornado
tornado/test/websocket_test.py
{ "start": 24816, "end": 24938 }
class ____(UncompressedTestMixin): def get_server_compression_options(self): return {}
ServerOnlyCompressionTest
python
walkccc__LeetCode
solutions/410. Split Array Largest Sum/410.py
{ "start": 0, "end": 483 }
class ____: def splitArray(self, nums: list[int], k: int) -> int: prefix = list(itertools.accumulate(nums, initial=0)) @functools.lru_cache(None) def dp(i: int, k: int) -> int: """ Returns the minimum of the maximum sum to split the first i numbers into k groups. """ if k == 1: return prefix[i] return min(max(dp(j, k - 1), prefix[i] - prefix[j]) for j in range(k - 1, i)) return dp(len(nums), k)
Solution
python
oauthlib__oauthlib
tests/openid/connect/core/endpoints/test_userinfo_endpoint.py
{ "start": 311, "end": 2522 }
class ____(TestCase): def setUp(self): self.claims = { "sub": "john", "fruit": "banana" } # Can't use MagicMock/wraps below. # Triggers error when endpoint copies to self.bearer.request_validator self.validator = RequestValidator() self.validator.validate_bearer_token = mock.Mock() self.validator.validate_bearer_token.side_effect = set_scopes_valid self.validator.get_userinfo_claims = mock.Mock() self.validator.get_userinfo_claims.return_value = self.claims self.endpoint = UserInfoEndpoint(self.validator) self.uri = 'should_not_matter' self.headers = { 'Authorization': 'Bearer eyJxx' } def test_userinfo_no_auth(self): self.endpoint.create_userinfo_response(self.uri) def test_userinfo_wrong_auth(self): self.headers['Authorization'] = 'Basic foifoifoi' self.endpoint.create_userinfo_response(self.uri, headers=self.headers) def test_userinfo_token_expired(self): self.validator.validate_bearer_token.return_value = False self.endpoint.create_userinfo_response(self.uri, headers=self.headers) def test_userinfo_token_no_openid_scope(self): def set_scopes_invalid(token, scopes, request): request.scopes = ["foo", "bar"] return True self.validator.validate_bearer_token.side_effect = set_scopes_invalid with self.assertRaises(errors.InsufficientScopeError) as context: self.endpoint.create_userinfo_response(self.uri) def test_userinfo_json_response(self): h, b, s = self.endpoint.create_userinfo_response(self.uri) self.assertEqual(s, 200) body_json = json.loads(b) self.assertEqual(self.claims, body_json) self.assertEqual("application/json", h['Content-Type']) def test_userinfo_jwt_response(self): self.validator.get_userinfo_claims.return_value = "eyJzzzzz" h, b, s = self.endpoint.create_userinfo_response(self.uri) self.assertEqual(s, 200) self.assertEqual(b, "eyJzzzzz") self.assertEqual("application/jwt", h['Content-Type'])
UserInfoEndpointTest
python
dagster-io__dagster
python_modules/dagster-pipes/dagster_pipes/__init__.py
{ "start": 3552, "end": 4041 }
class ____(TypedDict): type: "PipesMetadataType" raw_value: PipesMetadataRawValue # Infer the type from the raw value on the orchestration end PIPES_METADATA_TYPE_INFER = "__infer__" PipesMetadataType = Literal[ "__infer__", "text", "url", "path", "notebook", "json", "md", "float", "int", "bool", "dagster_run", "asset", "null", "table", "table_schema", "table_column_lineage", "timestamp", ]
PipesMetadataValue
python
sympy__sympy
sympy/stats/drv.py
{ "start": 9675, "end": 11994 }
class ____(DiscretePSpace, SinglePSpace): """ Discrete probability space over a single univariate variable """ is_real = True @property def set(self): return self.distribution.set @property def domain(self): return SingleDiscreteDomain(self.symbol, self.set) def sample(self, size=(), library='scipy', seed=None): """ Internal sample method. Returns dictionary mapping RandomSymbol to realization value. """ return {self.value: self.distribution.sample(size, library=library, seed=seed)} def compute_expectation(self, expr, rvs=None, evaluate=True, **kwargs): rvs = rvs or (self.value,) if self.value not in rvs: return expr expr = _sympify(expr) expr = expr.xreplace({rv: rv.symbol for rv in rvs}) x = self.value.symbol try: return self.distribution.expectation(expr, x, evaluate=evaluate, **kwargs) except NotImplementedError: return Sum(expr * self.pdf, (x, self.set.inf, self.set.sup), **kwargs) def compute_cdf(self, expr, **kwargs): if expr == self.value: x = Dummy("x", real=True) return Lambda(x, self.distribution.cdf(x, **kwargs)) else: raise NotImplementedError() def compute_density(self, expr, **kwargs): if expr == self.value: return self.distribution raise NotImplementedError() def compute_characteristic_function(self, expr, **kwargs): if expr == self.value: t = Dummy("t", real=True) return Lambda(t, self.distribution.characteristic_function(t, **kwargs)) else: raise NotImplementedError() def compute_moment_generating_function(self, expr, **kwargs): if expr == self.value: t = Dummy("t", real=True) return Lambda(t, self.distribution.moment_generating_function(t, **kwargs)) else: raise NotImplementedError() def compute_quantile(self, expr, **kwargs): if expr == self.value: p = Dummy("p", real=True) return Lambda(p, self.distribution.quantile(p, **kwargs)) else: raise NotImplementedError()
SingleDiscretePSpace
python
hynek__structlog
src/structlog/typing.py
{ "start": 2033, "end": 2730 }
class ____(Protocol): """ **Protocol:** A callable that transforms an `ExcInfo` into another datastructure. The result should be something that your renderer can work with, e.g., a ``str`` or a JSON-serializable ``dict``. Used by `structlog.processors.format_exc_info()` and `structlog.processors.ExceptionPrettyPrinter`. Args: exc_info: Is the exception tuple to format Returns: Anything that can be rendered by the last processor in your chain, for example, a string or a JSON-serializable structure. .. versionadded:: 22.1.0 """ def __call__(self, exc_info: ExcInfo) -> Any: ... @runtime_checkable
ExceptionTransformer
python
tensorflow__tensorflow
tensorflow/python/module/module.py
{ "start": 1156, "end": 17024 }
class ____(autotrackable.AutoTrackable): """Base neural network module class. A module is a named container for `tf.Variable`s, other `tf.Module`s and functions which apply to user input. For example a dense layer in a neural network might be implemented as a `tf.Module`: >>> class Dense(tf.Module): ... def __init__(self, input_dim, output_size, name=None): ... super().__init__(name=name) ... self.w = tf.Variable( ... tf.random.normal([input_dim, output_size]), name='w') ... self.b = tf.Variable(tf.zeros([output_size]), name='b') ... def __call__(self, x): ... y = tf.matmul(x, self.w) + self.b ... return tf.nn.relu(y) You can use the Dense layer as you would expect: >>> d = Dense(input_dim=3, output_size=2) >>> d(tf.ones([1, 3])) <tf.Tensor: shape=(1, 2), dtype=float32, numpy=..., dtype=float32)> By subclassing `tf.Module` instead of `object` any `tf.Variable` or `tf.Module` instances assigned to object properties can be collected using the `variables`, `trainable_variables` or `submodules` property: >>> d.variables (<tf.Variable 'b:0' shape=(2,) dtype=float32, numpy=..., dtype=float32)>, <tf.Variable 'w:0' shape=(3, 2) dtype=float32, numpy=..., dtype=float32)>) Subclasses of `tf.Module` can also take advantage of the `_flatten` method which can be used to implement tracking of any other types. All `tf.Module` classes have an associated `tf.name_scope` which can be used to group operations in TensorBoard and create hierarchies for variable names which can help with debugging. We suggest using the name scope when creating nested submodules/parameters or for forward methods whose graph you might want to inspect in TensorBoard. You can enter the name scope explicitly using `with self.name_scope:` or you can annotate methods (apart from `__init__`) with `@tf.Module.with_name_scope`. >>> class MLP(tf.Module): ... def __init__(self, input_size, sizes, name=None): ... super().__init__(name=name) ... self.layers = [] ... with self.name_scope: ... for size in sizes: ... self.layers.append(Dense(input_dim=input_size, output_size=size)) ... input_size = size ... @tf.Module.with_name_scope ... def __call__(self, x): ... for layer in self.layers: ... x = layer(x) ... return x >>> module = MLP(input_size=5, sizes=[5, 5]) >>> module.variables (<tf.Variable 'mlp/b:0' shape=(5,) dtype=float32, numpy=..., dtype=float32)>, <tf.Variable 'mlp/w:0' shape=(5, 5) dtype=float32, numpy=..., dtype=float32)>, <tf.Variable 'mlp/b:0' shape=(5,) dtype=float32, numpy=..., dtype=float32)>, <tf.Variable 'mlp/w:0' shape=(5, 5) dtype=float32, numpy=..., dtype=float32)>) """ # AutoTrackable adds object attributes that users will not expect us to # include when flattening (these reference dependencies reachable via other # object attributes). _TF_MODULE_IGNORED_PROPERTIES = frozenset(( "_self_unconditional_checkpoint_dependencies", "_self_unconditional_dependency_names" )) def __init__(self, name=None): if name is None: name = camel_to_snake(type(self).__name__) else: if not valid_identifier(name): raise ValueError( "%r is not a valid module name. Module names must be valid Python " "identifiers (e.g. a valid class name)." % name) self._name = name if tf2.enabled(): with ops.name_scope_v2(name) as scope_name: self._name_scope = ops.name_scope_v2(scope_name) else: with ops.name_scope(name, skip_on_eager=False) as scope_name: self._scope_name = scope_name @property def name(self): """Returns the name of this module as passed or determined in the ctor. NOTE: This is not the same as the `self.name_scope.name` which includes parent module names. """ return self._name @property def name_scope(self): """Returns a `tf.name_scope` instance for this class.""" if tf2.enabled(): return self._name_scope else: # In TF1 name_scope is not re-entrant in eager so we cannot memoize it. return ops.name_scope(self._scope_name, skip_on_eager=False) @property def variables(self): """Sequence of variables owned by this module and its submodules. Note: this method uses reflection to find variables on the current instance and submodules. For performance reasons you may wish to cache the result of calling this method if you don't expect the return value to change. Returns: A sequence of variables for the current module (sorted by attribute name) followed by variables from all submodules recursively (breadth first). """ return tuple(self._flatten(predicate=_is_variable, expand_composites=True)) @property def trainable_variables(self): """Sequence of trainable variables owned by this module and its submodules. Note: this method uses reflection to find variables on the current instance and submodules. For performance reasons you may wish to cache the result of calling this method if you don't expect the return value to change. Returns: A sequence of variables for the current module (sorted by attribute name) followed by variables from all submodules recursively (breadth first). """ return tuple( self._flatten(predicate=_is_trainable_variable, expand_composites=True)) @property def non_trainable_variables(self): """Sequence of non-trainable variables owned by this module and its submodules. Note: this method uses reflection to find variables on the current instance and submodules. For performance reasons you may wish to cache the result of calling this method if you don't expect the return value to change. Returns: A sequence of variables for the current module (sorted by attribute name) followed by variables from all submodules recursively (breadth first). """ return tuple(self._flatten( predicate=_is_non_trainable_variable, expand_composites=True)) @property def submodules(self): """Sequence of all sub-modules. Submodules are modules which are properties of this module, or found as properties of modules which are properties of this module (and so on). >>> a = tf.Module() >>> b = tf.Module() >>> c = tf.Module() >>> a.b = b >>> b.c = c >>> list(a.submodules) == [b, c] True >>> list(b.submodules) == [c] True >>> list(c.submodules) == [] True Returns: A sequence of all submodules. """ return tuple(self._flatten(predicate=_is_module)) def _flatten(self, recursive=True, predicate=None, attribute_traversal_key=None, with_path=False, expand_composites=False): """Flattened attribute values in sorted order by attribute name. Modules are flattened by first walking their attributes in name order. Each attribute value is then flattened to find leaf values. If flatten is applied `recursive`ly and if the leaf is a `Module` it will also be flattened to find leaves. Finally every leaf value is optionally tested against the given `predicate` and finally yielded. ``` class Foo(tf.Module): def __init__(self): super().__init__() self.x = [tf.constant('a'), tf.constant('b')] self.y = {'i': tf.constant('c'), 'j': tf.constant('d')} self.z = tf.constant('e') @property def tensors(self): return tuple(self._flatten(predicate=is_tensor, with_path=True)) foo = Foo() foo.tensors # ==> ((('x', 0), <tf.Tensor: ...'a'>), # (('x', 1), <tf.Tensor: ...'b'>), # (('y', 'i'), <tf.Tensor: ...'c'>), # (('y', 'j'), <tf.Tensor: ...'d'>), # (('z',), <tf.Tensor: ...'e'>)) ``` `attribute_traversal_key` controls the order object properties are visited. If not set objects are visited in ascending order by name. Args: recursive: Whether to recurse into child modules or not. predicate: (Optional) If set then only values matching predicate are yielded. A value of `None` (the default) means no items will be filtered. attribute_traversal_key: (Optional) Method to rekey object attributes before they are sorted. Contract is the same as `key` argument to builtin `sorted` and only applies to object properties. with_path: (Optional) Whether to include the path to the object as well as the object itself. If `with_path` is `True` then leaves will not be de-duplicated (e.g. if the same leaf instance is reachable via multiple modules then it will be yielded multiple times with different paths). expand_composites: If true, then composite tensors are expanded into their component tensors. Returns: Flat generator for leaves of the current module and optionally all submodules. """ if predicate is None: predicate = lambda _: True return _flatten_module( self, recursive=recursive, predicate=predicate, attributes_to_ignore=self._TF_MODULE_IGNORED_PROPERTIES, attribute_traversal_key=attribute_traversal_key, with_path=with_path, expand_composites=expand_composites) @classmethod def with_name_scope(cls, method): """Decorator to automatically enter the module name scope. >>> class MyModule(tf.Module): ... @tf.Module.with_name_scope ... def __call__(self, x): ... if not hasattr(self, 'w'): ... self.w = tf.Variable(tf.random.normal([x.shape[1], 3])) ... return tf.matmul(x, self.w) Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose names included the module name: >>> mod = MyModule() >>> mod(tf.ones([1, 2])) <tf.Tensor: shape=(1, 3), dtype=float32, numpy=..., dtype=float32)> >>> mod.w <tf.Variable 'my_module/Variable:0' shape=(2, 3) dtype=float32, numpy=..., dtype=float32)> Args: method: The method to wrap. Returns: The original method wrapped such that it enters the module's name scope. """ def method_with_name_scope(self, *args, **kwargs): with self.name_scope: return method(self, *args, **kwargs) return tf_decorator.make_decorator(method, method_with_name_scope) def _is_variable(obj): return isinstance(obj, variables.Variable) def _is_trainable_variable(obj): return _is_variable(obj) and getattr(obj, "trainable", False) def _is_non_trainable_variable(obj): return _is_variable(obj) and not getattr(obj, "trainable", False) def _is_module(obj): return isinstance(obj, Module) _CAMEL_TO_SNAKE_R = re.compile(r"((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))") _VALID_IDENTIFIER = re.compile(r"^[a-zA-Z_]([a-zA-Z0-9_])*$") def valid_identifier(name): return bool(_VALID_IDENTIFIER.match(name)) def camel_to_snake(value): return _CAMEL_TO_SNAKE_R.sub(r"_\1", value).lower() def _flatten_non_variable_composites_with_tuple_path(structure, path_prefix=()): """Flattens composite tensors with tuple path expect variables.""" for path, child in nest.flatten_with_tuple_paths(structure): if (isinstance(child, composite_tensor.CompositeTensor) and not _is_variable(child)): # pylint: disable=protected-access spec = child._type_spec yield from _flatten_non_variable_composites_with_tuple_path( spec._to_components(child), path_prefix + path + (spec.value_type.__name__,)) # pylint: enable=protected-access else: yield path_prefix + path, child def _flatten_module(module, recursive, predicate, attribute_traversal_key, attributes_to_ignore, with_path, expand_composites, module_path=(), seen=None, recursion_stack=None): """Implementation of `flatten`. Args: module: Current module to process. recursive: Whether to recurse into child modules or not. predicate: (Optional) If set then only values matching predicate are yielded. A value of `None` (the default) means no items will be filtered. attribute_traversal_key: (Optional) Method to rekey object attributes before they are sorted. Contract is the same as `key` argument to builtin `sorted` and only applies to object properties. attributes_to_ignore: object attributes to ignored. with_path: (Optional) Whether to include the path to the object as well as the object itself. If `with_path` is `True` then leaves will not be de-duplicated (e.g. if the same leaf instance is reachable via multiple modules then it will be yielded multiple times with different paths). expand_composites: If true, then composite tensors are expanded into their component tensors. module_path: The path to the current module as a tuple. seen: A set containing all leaf IDs seen so far. recursion_stack: A list containing all module IDs associated with the current call stack. Yields: Matched leaves with the optional corresponding paths of the current module and optionally all its submodules. """ module_id = id(module) if seen is None: seen = set([module_id]) module_dict = vars(module) submodules = [] if recursion_stack is None: recursion_stack = [] # When calling `_flatten_module` with `with_path=False`, the global lookup # table `seen` guarantees the uniqueness of the matched objects. # In the case of `with_path=True`, there might be multiple paths associated # with the same predicate, so we don't stop traversing according to `seen` # to make sure all these paths are returned. # When there are cycles connecting submodules, we break cycles by avoiding # following back edges (links pointing to a node in `recursion_stack`). if module_id in recursion_stack: recursive = False for key in sorted(module_dict, key=attribute_traversal_key): if key in attributes_to_ignore: continue prop = module_dict[key] try: if expand_composites: leaves = list(_flatten_non_variable_composites_with_tuple_path(prop)) else: leaves = nest.flatten_with_tuple_paths(prop) except Exception as cause: # pylint: disable=broad-except raise ValueError("Error processing property {!r} of {!r}".format( key, prop)) from cause for leaf_path, leaf in leaves: leaf_path = (key,) + leaf_path if not with_path: leaf_id = id(leaf) if leaf_id in seen: continue seen.add(leaf_id) if predicate(leaf): if with_path: yield module_path + leaf_path, leaf else: yield leaf if recursive and _is_module(leaf): # Walk direct properties first then recurse. submodules.append((module_path + leaf_path, leaf)) recursion_stack.append(module_id) for submodule_path, submodule in submodules: subvalues = _flatten_module( submodule, recursive=recursive, predicate=predicate, attribute_traversal_key=attribute_traversal_key, attributes_to_ignore=submodule._TF_MODULE_IGNORED_PROPERTIES, # pylint: disable=protected-access with_path=with_path, expand_composites=expand_composites, module_path=submodule_path, seen=seen, recursion_stack=recursion_stack) for subvalue in subvalues: # Predicate is already tested for these values. yield subvalue recursion_stack.pop()
Module
python
altair-viz__altair
altair/vegalite/v6/schema/_config.py
{ "start": 178949, "end": 179795 }
class ____(TypedDict, total=False): """ :class:`altair.MultiPolygon` ``TypedDict`` wrapper. Parameters ---------- coordinates type Specifies the type of GeoJSON object. bbox Bounding box of the coordinate range of the object's Geometries, Features, or Feature Collections. The value of the bbox member is an array of length 2*n where n is the number of dimensions represented in the contained geometries, with all axes of the most southwesterly point followed by all axes of the more northeasterly point. The axes order of a bbox follows the axes order of geometries. https://tools.ietf.org/html/rfc7946#section-5 """ coordinates: Sequence[Sequence[Sequence[Sequence[float]]]] type: Literal["MultiPolygon"] bbox: Sequence[float]
MultiPolygonKwds
python
django__django
tests/sites_framework/models.py
{ "start": 413, "end": 507 }
class ____(AbstractArticle): site = models.ForeignKey(Site, models.CASCADE)
ExclusiveArticle
python
Netflix__metaflow
metaflow/plugins/metadata_providers/service.py
{ "start": 552, "end": 609 }
class ____(object): RUN = 1 TASK = 2
HeartbeatTypes
python
pandas-dev__pandas
asv_bench/benchmarks/inference.py
{ "start": 2280, "end": 2677 }
class ____: # maybe_convert_objects depends _almost_ exclusively on _libs, but # does have some run-time imports from outside of _libs def setup(self): N = 10**5 data = list(range(N)) data[0] = NaT data = np.array(data) self.data = data def time_maybe_convert_objects(self): lib.maybe_convert_objects(self.data)
MaybeConvertObjects
python
tornadoweb__tornado
tornado/web.py
{ "start": 79199, "end": 80712 }
class ____(ReversibleRuleRouter): """Routing implementation used internally by `Application`. Provides a binding between `Application` and `RequestHandler`. This implementation extends `~.routing.ReversibleRuleRouter` in a couple of ways: * it allows to use `RequestHandler` subclasses as `~.routing.Rule` target and * it allows to use a list/tuple of rules as `~.routing.Rule` target. ``process_rule`` implementation will substitute this list with an appropriate `_ApplicationRouter` instance. """ def __init__( self, application: "Application", rules: Optional[_RuleList] = None ) -> None: assert isinstance(application, Application) self.application = application super().__init__(rules) def process_rule(self, rule: Rule) -> Rule: rule = super().process_rule(rule) if isinstance(rule.target, (list, tuple)): rule.target = _ApplicationRouter( self.application, rule.target # type: ignore ) return rule def get_target_delegate( self, target: Any, request: httputil.HTTPServerRequest, **target_params: Any ) -> Optional[httputil.HTTPMessageDelegate]: if isclass(target) and issubclass(target, RequestHandler): return self.application.get_handler_delegate( request, target, **target_params ) return super().get_target_delegate(target, request, **target_params)
_ApplicationRouter
python
falconry__falcon
tests/test_httpstatus.py
{ "start": 7037, "end": 9171 }
class ____: def test_data_is_set(self, body_client): res = body_client.simulate_get('/status') assert res.status == falcon.HTTP_745 assert res.status_code == 745 assert res.content == b'' def test_media_is_set(self, body_client): res = body_client.simulate_post('/status') assert res.status == falcon.HTTP_725 assert res.status_code == 725 assert res.content == b'' def test_body_is_set(self, body_client): res = body_client.simulate_put('/status') assert res.status == falcon.HTTP_719 assert res.status_code == 719 assert res.content == b'' @pytest.fixture() def custom_status_client(asgi, util): def client(status): class Resource: def on_get(self, req, resp): resp.content_type = falcon.MEDIA_TEXT resp.data = b'Hello, World!' resp.status = status app = util.create_app(asgi) app.add_route('/status', Resource()) return testing.TestClient(app) return client @pytest.mark.parametrize( 'status,expected_code', [ (http.HTTPStatus(200), 200), (http.HTTPStatus(202), 202), (http.HTTPStatus(403), 403), (http.HTTPStatus(500), 500), (http.HTTPStatus.OK, 200), (http.HTTPStatus.USE_PROXY, 305), (http.HTTPStatus.NOT_FOUND, 404), (http.HTTPStatus.NOT_IMPLEMENTED, 501), (200, 200), (307, 307), (500, 500), (702, 702), (b'200 OK', 200), (b'702 Emacs', 702), ], ) def test_non_string_status(custom_status_client, status, expected_code): client = custom_status_client(status) resp = client.simulate_get('/status') assert resp.text == 'Hello, World!' assert resp.status_code == expected_code def test_deprecated_body(): with pytest.raises(TypeError) as type_error: sts = HTTPStatus(falcon.HTTP_701, body='foo') assert 'unexpected keyword argument' in str(type_error.value) sts = HTTPStatus(falcon.HTTP_701, text='foo') assert sts.text == 'foo'
TestNoBodyWithStatus
python
Netflix__metaflow
metaflow/plugins/datatools/s3/s3.py
{ "start": 3288, "end": 3389 }
class ____(MetaflowException): headline = "Insufficient disk space"
MetaflowS3InsufficientDiskSpace
python
doocs__leetcode
solution/1700-1799/1713.Minimum Operations to Make a Subsequence/Solution.py
{ "start": 0, "end": 418 }
class ____: __slots__ = "n", "c" def __init__(self, n: int): self.n = n self.c = [0] * (n + 1) def update(self, x: int, v: int): while x <= self.n: self.c[x] = max(self.c[x], v) x += x & -x def query(self, x: int) -> int: res = 0 while x: res = max(res, self.c[x]) x -= x & -x return res
BinaryIndexedTree
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/roles.py
{ "start": 1395, "end": 1496 }
class ____(SQLRole): __slots__ = () _role_name = "Cacheable Core or ORM object"
HasCacheKeyRole
python
scikit-image__scikit-image
src/skimage/feature/util.py
{ "start": 127, "end": 421 }
class ____: def __init__(self): self.keypoints_ = np.array([]) def detect(self, image): """Detect keypoints in image. Parameters ---------- image : 2D array Input image. """ raise NotImplementedError()
FeatureDetector
python
jazzband__django-oauth-toolkit
oauth2_provider/migrations/0005_auto_20211222_2352.py
{ "start": 109, "end": 1665 }
class ____(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('oauth2_provider', '0004_auto_20200902_2022'), ] operations = [ migrations.AlterField( model_name='accesstoken', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='%(app_label)s_%(class)s', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='application', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='%(app_label)s_%(class)s', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='grant', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='%(app_label)s_%(class)s', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='idtoken', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='%(app_label)s_%(class)s', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='refreshtoken', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='%(app_label)s_%(class)s', to=settings.AUTH_USER_MODEL), ), ]
Migration
python
pydantic__pydantic
pydantic/fields.py
{ "start": 3108, "end": 3287 }
class ____(_FromFieldInfoInputs, total=False): """This class exists solely to add type checking for the `**kwargs` in `FieldInfo.__init__`.""" default: Any
_FieldInfoInputs
python
matplotlib__matplotlib
lib/matplotlib/table.py
{ "start": 1017, "end": 7800 }
class ____(Rectangle): """ A cell is a `.Rectangle` with some associated `.Text`. As a user, you'll most likely not creates cells yourself. Instead, you should use either the `~matplotlib.table.table` factory function or `.Table.add_cell`. """ PAD = 0.1 """Padding between text and rectangle.""" _edges = 'BRTL' _edge_aliases = {'open': '', 'closed': _edges, # default 'horizontal': 'BT', 'vertical': 'RL' } def __init__(self, xy, width, height, *, edgecolor='k', facecolor='w', fill=True, text='', loc='right', fontproperties=None, visible_edges='closed', ): """ Parameters ---------- xy : 2-tuple The position of the bottom left corner of the cell. width : float The cell width. height : float The cell height. edgecolor : :mpltype:`color`, default: 'k' The color of the cell border. facecolor : :mpltype:`color`, default: 'w' The cell facecolor. fill : bool, default: True Whether the cell background is filled. text : str, optional The cell text. loc : {'right', 'center', 'left'} The alignment of the text within the cell. fontproperties : dict, optional A dict defining the font properties of the text. Supported keys and values are the keyword arguments accepted by `.FontProperties`. visible_edges : {'closed', 'open', 'horizontal', 'vertical'} or \ substring of 'BRTL' The cell edges to be drawn with a line: a substring of 'BRTL' (bottom, right, top, left), or one of 'open' (no edges drawn), 'closed' (all edges drawn), 'horizontal' (bottom and top), 'vertical' (right and left). """ # Call base super().__init__(xy, width=width, height=height, fill=fill, edgecolor=edgecolor, facecolor=facecolor) self.set_clip_on(False) self.visible_edges = visible_edges # Create text object self._loc = loc self._text = Text(x=xy[0], y=xy[1], clip_on=False, text=text, fontproperties=fontproperties, horizontalalignment=loc, verticalalignment='center') def set_transform(self, t): super().set_transform(t) # the text does not get the transform! self.stale = True def set_figure(self, fig): super().set_figure(fig) self._text.set_figure(fig) def get_text(self): """Return the cell `.Text` instance.""" return self._text def set_fontsize(self, size): """Set the text fontsize.""" self._text.set_fontsize(size) self.stale = True def get_fontsize(self): """Return the cell fontsize.""" return self._text.get_fontsize() def auto_set_font_size(self, renderer): """Shrink font size until the text fits into the cell width.""" fontsize = self.get_fontsize() required = self.get_required_width(renderer) while fontsize > 1 and required > self.get_width(): fontsize -= 1 self.set_fontsize(fontsize) required = self.get_required_width(renderer) return fontsize @allow_rasterization def draw(self, renderer): if not self.get_visible(): return # draw the rectangle super().draw(renderer) # position the text self._set_text_position(renderer) self._text.draw(renderer) self.stale = False def _set_text_position(self, renderer): """Set text up so it is drawn in the right place.""" bbox = self.get_window_extent(renderer) # center vertically y = bbox.y0 + bbox.height / 2 # position horizontally loc = self._text.get_horizontalalignment() if loc == 'center': x = bbox.x0 + bbox.width / 2 elif loc == 'left': x = bbox.x0 + bbox.width * self.PAD else: # right. x = bbox.x0 + bbox.width * (1 - self.PAD) self._text.set_position((x, y)) def get_text_bounds(self, renderer): """ Return the text bounds as *(x, y, width, height)* in table coordinates. """ return (self._text.get_window_extent(renderer) .transformed(self.get_data_transform().inverted()) .bounds) def get_required_width(self, renderer): """Return the minimal required width for the cell.""" l, b, w, h = self.get_text_bounds(renderer) return w * (1.0 + (2.0 * self.PAD)) @_docstring.interpd def set_text_props(self, **kwargs): """ Update the text properties. Valid keyword arguments are: %(Text:kwdoc)s """ self._text._internal_update(kwargs) self.stale = True @property def visible_edges(self): """ The cell edges to be drawn with a line. Reading this property returns a substring of 'BRTL' (bottom, right, top, left'). When setting this property, you can use a substring of 'BRTL' or one of {'open', 'closed', 'horizontal', 'vertical'}. """ return self._visible_edges @visible_edges.setter def visible_edges(self, value): if value is None: self._visible_edges = self._edges elif value in self._edge_aliases: self._visible_edges = self._edge_aliases[value] else: if any(edge not in self._edges for edge in value): raise ValueError('Invalid edge param {}, must only be one of ' '{} or string of {}'.format( value, ", ".join(self._edge_aliases), ", ".join(self._edges))) self._visible_edges = value self.stale = True def get_path(self): """Return a `.Path` for the `.visible_edges`.""" codes = [Path.MOVETO] codes.extend( Path.LINETO if edge in self._visible_edges else Path.MOVETO for edge in self._edges) if Path.MOVETO not in codes[1:]: # All sides are visible codes[-1] = Path.CLOSEPOLY return Path( [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]], codes, readonly=True ) CustomCell = Cell # Backcompat. alias.
Cell
python
pyenv__pyenv
plugins/python-build/scripts/add_miniconda.py
{ "start": 2580, "end": 2660 }
class ____(StrEnum): ANACONDA = "Anaconda" MINICONDA = "Miniconda"
TFlavor
python
ray-project__ray
python/ray/tune/execution/tune_controller.py
{ "start": 79008, "end": 80319 }
class ____: """Wraps around TrialExecutor class, intercepts API calls and warns users of restricted API access. This is meant to facilitate restricting the current API exposure of TrialExecutor by TrialScheduler. """ def __init__( self, trial_executor: "_FakeRayTrialExecutor", whitelist_attr: Optional[set] = None, ): self._trial_executor = trial_executor self._whitelist_attr = whitelist_attr or set() for attr in self._whitelist_attr: assert hasattr(self._trial_executor, attr) def __getattr__(self, attr): if attr not in self._whitelist_attr: if log_once("restrict_accessing_trial_executor"): logger.warning( f"You are trying to access {attr} interface of " f"TrialExecutor in TrialScheduler, which is being " f"restricted. If you believe it is reasonable for " f"your scheduler to access this TrialExecutor API, " f"please reach out to Ray team on GitHub. A more " f"strict API access pattern would be enforced " f"starting 1.12.0" ) return getattr(self._trial_executor, attr) @DeveloperAPI
_TrialExecutorWrapper
python
sympy__sympy
sympy/codegen/fnodes.py
{ "start": 3104, "end": 3863 }
class ____(Token): """ Represents a module in Fortran. Examples ======== >>> from sympy.codegen.fnodes import Module >>> from sympy import fcode >>> print(fcode(Module('signallib', ['implicit none'], []), source_format='free')) module signallib implicit none <BLANKLINE> contains <BLANKLINE> <BLANKLINE> end module """ __slots__ = _fields = ('name', 'declarations', 'definitions') defaults = {'declarations': Tuple()} _construct_name = String @classmethod def _construct_declarations(cls, args): args = [Str(arg) if isinstance(arg, str) else arg for arg in args] return CodeBlock(*args) _construct_definitions = staticmethod(lambda arg: CodeBlock(*arg))
Module
python
jackfrued__Python-100-Days
Day31-35/code/example17.py
{ "start": 331, "end": 563 }
class ____(): """自定义混入类""" __slots__ = () def __setitem__(self, key, value): if key in self: raise KeyError(str(key) + ' already set') return super().__setitem__(key, value)
SetOnceMappingMixin
python
ansible__ansible
test/units/config/test_manager.py
{ "start": 854, "end": 1154 }
class ____(c.Mapping): def __init__(self, values: c.Mapping) -> None: self._values = values def __getitem__(self, key, /): return self._values[key] def __len__(self): return len(self._values) def __iter__(self): return iter(self._values)
CustomMapping
python
PyCQA__pylint
tests/functional/r/regression/regression_no_value_for_parameter.py
{ "start": 90, "end": 1320 }
class ____(Unknown): RENAMED_SECTIONS = { 'permissions': 'content' } def test(self): self.RENAMED_SECTIONS.items() #@ def items(self, sectname, raw=True): pass def func(*, key=None): return key def varargs_good(*parts): """All good""" return os.path.join(*parts) def varargs_no_expr(*parts): """False positives below this line""" ret = os.path.join(*parts) if ret: return ret print(os.path.join(*parts)) if os.path.join(*parts): print() elif os.path.join(*parts): print() while os.path.join(*parts): print() with os.path.join(*parts): # pylint:disable=not-context-manager print() return os.path.join(*parts) + os.path.join(*parts) - os.path.join(*parts) def kwargs_good(**kwargs): return func(**kwargs) def kwargs_no_expr(**kwargs): ret = func(**kwargs) if ret: return ret print(func(**kwargs)) if func(**kwargs): print() elif func(**kwargs): print() while func(**kwargs): print() with func(**kwargs): # pylint:disable=not-context-manager print() return func(**kwargs) + func(**kwargs) - func(**kwargs)
ConfigManager
python
celery__celery
t/unit/backends/test_database.py
{ "start": 8461, "end": 13394 }
class ____(): def setup_method(self): self.uri = 'sqlite:///' + DB_PATH self.app.conf.result_serializer = 'pickle' self.app.conf.result_extended = True @pytest.mark.parametrize( 'result_serializer, args, kwargs', [ ('pickle', (SomeClass(1), SomeClass(2)), {'foo': SomeClass(123)}), ('json', ['a', 'b'], {'foo': 'bar'}), ], ids=['using pickle', 'using json'] ) def test_store_result(self, result_serializer, args, kwargs): self.app.conf.result_serializer = result_serializer tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() request = Context(args=args, kwargs=kwargs, task='mytask', retries=2, hostname='celery@worker_1', delivery_info={'routing_key': 'celery'}) tb.store_result(tid, {'fizz': 'buzz'}, states.SUCCESS, request=request) meta = tb.get_task_meta(tid) assert meta['result'] == {'fizz': 'buzz'} assert meta['args'] == args assert meta['kwargs'] == kwargs assert meta['queue'] == 'celery' assert meta['name'] == 'mytask' assert meta['retries'] == 2 assert meta['worker'] == "celery@worker_1" @pytest.mark.parametrize( 'result_serializer, args, kwargs', [ ('pickle', (SomeClass(1), SomeClass(2)), {'foo': SomeClass(123)}), ('json', ['a', 'b'], {'foo': 'bar'}), ], ids=['using pickle', 'using json'] ) def test_store_none_result(self, result_serializer, args, kwargs): self.app.conf.result_serializer = result_serializer tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() request = Context(args=args, kwargs=kwargs, task='mytask', retries=2, hostname='celery@worker_1', delivery_info={'routing_key': 'celery'}) tb.store_result(tid, None, states.SUCCESS, request=request) meta = tb.get_task_meta(tid) assert meta['result'] is None assert meta['args'] == args assert meta['kwargs'] == kwargs assert meta['queue'] == 'celery' assert meta['name'] == 'mytask' assert meta['retries'] == 2 assert meta['worker'] == "celery@worker_1" @pytest.mark.parametrize( 'result_serializer, args, kwargs', [ ('pickle', (SomeClass(1), SomeClass(2)), {'foo': SomeClass(123)}), ('json', ['a', 'b'], {'foo': 'bar'}), ], ids=['using pickle', 'using json'] ) def test_get_result_meta(self, result_serializer, args, kwargs): self.app.conf.result_serializer = result_serializer tb = DatabaseBackend(self.uri, app=self.app) request = Context(args=args, kwargs=kwargs, task='mytask', retries=2, hostname='celery@worker_1', delivery_info={'routing_key': 'celery'}) meta = tb._get_result_meta(result={'fizz': 'buzz'}, state=states.SUCCESS, traceback=None, request=request, format_date=False, encode=True) assert meta['result'] == {'fizz': 'buzz'} assert tb.decode(meta['args']) == args assert tb.decode(meta['kwargs']) == kwargs assert meta['queue'] == 'celery' assert meta['name'] == 'mytask' assert meta['retries'] == 2 assert meta['worker'] == "celery@worker_1" @pytest.mark.parametrize( 'result_serializer, args, kwargs', [ ('pickle', (SomeClass(1), SomeClass(2)), {'foo': SomeClass(123)}), ('json', ['a', 'b'], {'foo': 'bar'}), ], ids=['using pickle', 'using json'] ) def test_get_result_meta_with_none(self, result_serializer, args, kwargs): self.app.conf.result_serializer = result_serializer tb = DatabaseBackend(self.uri, app=self.app) request = Context(args=args, kwargs=kwargs, task='mytask', retries=2, hostname='celery@worker_1', delivery_info={'routing_key': 'celery'}) meta = tb._get_result_meta(result=None, state=states.SUCCESS, traceback=None, request=request, format_date=False, encode=True) assert meta['result'] is None assert tb.decode(meta['args']) == args assert tb.decode(meta['kwargs']) == kwargs assert meta['queue'] == 'celery' assert meta['name'] == 'mytask' assert meta['retries'] == 2 assert meta['worker'] == "celery@worker_1"
test_DatabaseBackend_result_extended
python
PrefectHQ__prefect
src/prefect/client/schemas/objects.py
{ "start": 25164, "end": 25313 }
class ____(RunInput): """Represents a parameter input to a task run.""" input_type: Literal["parameter"] = "parameter" name: str
Parameter