language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
streamlit__streamlit
lib/streamlit/type_util.py
{ "start": 1569, "end": 1634 }
class ____(Protocol): def __str__(self) -> str: ...
SupportsStr
python
run-llama__llama_index
llama-index-packs/llama-index-packs-multidoc-autoretrieval/llama_index/packs/multidoc_autoretrieval/base.py
{ "start": 810, "end": 1697 }
class ____(BaseRetriever): """ Index auto-retriever. Simple wrapper around VectorIndexAutoRetriever to convert text nodes to index nodes. """ def __init__(self, retriever: VectorIndexAutoRetriever): """Init params.""" self.retriever = retriever def _retrieve(self, query_bundle: QueryBundle): """Convert nodes to index node.""" retrieved_nodes = self.retriever.retrieve(query_bundle) new_retrieved_nodes = [] for retrieved_node in retrieved_nodes: index_id = retrieved_node.metadata["index_id"] index_node = IndexNode.from_text_node( retrieved_node.node, index_id=index_id ) new_retrieved_nodes.append( NodeWithScore(node=index_node, score=retrieved_node.score) ) return new_retrieved_nodes
IndexAutoRetriever
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_slots/SLOT002.py
{ "start": 455, "end": 519 }
class ____(namedtuple("foo", ["str", "int"]), Enum): pass
Good
python
great-expectations__great_expectations
great_expectations/core/batch_spec.py
{ "start": 5134, "end": 5179 }
class ____(PathBatchSpec): pass
S3BatchSpec
python
doocs__leetcode
solution/2400-2499/2428.Maximum Sum of an Hourglass/Solution.py
{ "start": 0, "end": 434 }
class ____: def maxSum(self, grid: List[List[int]]) -> int: m, n = len(grid), len(grid[0]) ans = 0 for i in range(1, m - 1): for j in range(1, n - 1): s = -grid[i][j - 1] - grid[i][j + 1] s += sum( grid[x][y] for x in range(i - 1, i + 2) for y in range(j - 1, j + 2) ) ans = max(ans, s) return ans
Solution
python
great-expectations__great_expectations
great_expectations/exceptions/exceptions.py
{ "start": 14742, "end": 14960 }
class ____(ValidationActionRegistryError): def __init__(self, action_type: str) -> None: super().__init__(message=f"Action of type {action_type} is already registered.")
ValidationActionAlreadyRegisteredError
python
dagster-io__dagster
python_modules/dagster/dagster/_config/traversal_context.py
{ "start": 550, "end": 2911 }
class ____(ContextData): def for_field_snap(self, field_snap: ConfigFieldSnap) -> "ValidationContext": check.inst_param(field_snap, "field_snap", ConfigFieldSnap) field_snap_name = check.not_none(field_snap.name) return ValidationContext( config_schema_snapshot=self.config_schema_snapshot, config_type_snap=self.config_schema_snapshot.get_config_snap(field_snap.type_key), stack=self.stack.for_field(field_snap_name), ) def for_array(self, index: int) -> "ValidationContext": check.int_param(index, "index") return ValidationContext( config_schema_snapshot=self.config_schema_snapshot, config_type_snap=self.config_schema_snapshot.get_config_snap( self.config_type_snap.inner_type_key ), stack=self.stack.for_array_index(index), ) def for_map_key(self, key: object) -> "ValidationContext": return ValidationContext( config_schema_snapshot=self.config_schema_snapshot, config_type_snap=self.config_schema_snapshot.get_config_snap( self.config_type_snap.key_type_key ), stack=self.stack.for_map_key(key), ) def for_map_value(self, key: object) -> "ValidationContext": return ValidationContext( config_schema_snapshot=self.config_schema_snapshot, config_type_snap=self.config_schema_snapshot.get_config_snap( self.config_type_snap.inner_type_key ), stack=self.stack.for_map_value(key), ) def for_new_config_type_key(self, config_type_key: str) -> "ValidationContext": check.str_param(config_type_key, "config_type_key") return ValidationContext( config_schema_snapshot=self.config_schema_snapshot, config_type_snap=self.config_schema_snapshot.get_config_snap(config_type_key), stack=self.stack, ) def for_nullable_inner_type(self) -> "ValidationContext": return ValidationContext( config_schema_snapshot=self.config_schema_snapshot, config_type_snap=self.config_schema_snapshot.get_config_snap( self.config_type_snap.inner_type_key ), stack=self.stack, ) @record
ValidationContext
python
huggingface__transformers
src/transformers/models/deepseek_v2/modeling_deepseek_v2.py
{ "start": 13466, "end": 18649 }
class ____(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: DeepseekV2Config, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = config.head_dim self.max_position_embeddings = config.max_position_embeddings self.q_lora_rank = config.q_lora_rank self.qk_rope_head_dim = config.qk_rope_head_dim self.kv_lora_rank = config.kv_lora_rank self.v_head_dim = config.v_head_dim self.qk_nope_head_dim = config.qk_nope_head_dim self.qk_head_dim = config.qk_nope_head_dim + config.qk_rope_head_dim self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.is_causal = True if self.q_lora_rank is None: self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.qk_head_dim, bias=False) else: self.q_a_proj = nn.Linear(self.hidden_size, config.q_lora_rank, bias=config.attention_bias) self.q_a_layernorm = DeepseekV2RMSNorm(config.q_lora_rank) self.q_b_proj = nn.Linear(config.q_lora_rank, self.num_heads * self.qk_head_dim, bias=False) self.kv_a_proj_with_mqa = nn.Linear( self.hidden_size, config.kv_lora_rank + config.qk_rope_head_dim, bias=config.attention_bias, ) self.kv_a_layernorm = DeepseekV2RMSNorm(config.kv_lora_rank) self.kv_b_proj = nn.Linear( config.kv_lora_rank, self.num_heads * (self.qk_head_dim - self.qk_rope_head_dim + self.v_head_dim), bias=False, ) self.o_proj = nn.Linear( self.num_heads * self.v_head_dim, self.hidden_size, bias=config.attention_bias, ) self.scaling = self.qk_head_dim ** (-0.5) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, position_ids: Optional[torch.Tensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: batch_size, seq_length = hidden_states.shape[:-1] query_shape = (batch_size, seq_length, -1, self.qk_head_dim) key_shape = (batch_size, seq_length, -1, self.qk_nope_head_dim + self.v_head_dim) if self.q_lora_rank is None: q = self.q_proj(hidden_states) else: q = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states))) q = q.view(query_shape).transpose(1, 2) q_nope, q_pe = torch.split(q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1) compressed_kv = self.kv_a_proj_with_mqa(hidden_states) k_nope, k_pe = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1) k_nope = self.kv_b_proj(self.kv_a_layernorm(k_nope)).view(key_shape).transpose(1, 2) k_nope, value_states = torch.split(k_nope, [self.qk_nope_head_dim, self.v_head_dim], dim=-1) k_pe = k_pe.view(batch_size, 1, seq_length, self.qk_rope_head_dim) q_pe, k_pe = apply_rotary_emb(q_pe, k_pe, position_embeddings.to(q_pe.device)) k_pe = k_pe.expand(*k_nope.shape[:-1], -1) query_states = torch.cat((q_nope, q_pe), dim=-1) key_states = torch.cat((k_nope, k_pe), dim=-1) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) if self.config._attn_implementation == "flash_attention_2" and self.qk_head_dim != self.v_head_dim: value_states = F.pad(value_states, [0, self.qk_head_dim - self.v_head_dim]) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) if self.config._attn_implementation == "flash_attention_2" and self.qk_head_dim != self.v_head_dim: attn_output = attn_output[:, :, :, : self.v_head_dim] attn_output = attn_output.reshape(batch_size, seq_length, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights
DeepseekV2Attention
python
getsentry__sentry
src/sentry/db/router.py
{ "start": 500, "end": 8827 }
class ____: """ Django database router for multi-region deployments. We support two configurations: - Monolith - all tables reside in the same database. - Siloed - tables for control and region are separated. Within Siloed there are two flavours: - simulated - If the application is configured with `control` and `default` connections, then we are in 'simulated' silo environment (like our testsuite). We'll also use simulated mode for the time period after the database is split but before the application instances are separated. - isolated - If there are no control/region connections we map the `default` connection to be the region/control database and assume the 'other' silo is inaccessible. """ __simulated_map = { SiloMode.MONOLITH: "default", SiloMode.REGION: "default", SiloMode.CONTROL: "control", } __is_simulated = False """Whether or not we're operating in a simulated silo environment""" contrib_models = { "django_admin_log", "django_content_type", "django_site", "django_session", "auth_user", "auth_group", "auth_permission", "auth_group_permissions", "auth_user_groups", "auth_user_user_permissions", } """ We use a bunch of django contrib models that don't have silo annotations. For now they are put in control silo. """ historical_silo_assignments = { "authidentity_duplicate": SiloMode.CONTROL, "authprovider_duplicate": SiloMode.CONTROL, "feedback_feedback": SiloMode.REGION, "releases_commit": SiloMode.REGION, "releases_commitfilechange": SiloMode.REGION, "sentry_actor": SiloMode.REGION, "sentry_alertruleactivations": SiloMode.REGION, "sentry_dashboardwidgetsnapshot": SiloMode.REGION, "sentry_datasecrecywaiver": SiloMode.REGION, "sentry_incidentseen": SiloMode.REGION, "sentry_incidentsubscription": SiloMode.REGION, "sentry_monitorlocation": SiloMode.REGION, "sentry_notificationsetting": SiloMode.CONTROL, "sentry_pagerdutyservice": SiloMode.REGION, "sentry_projectavatar": SiloMode.REGION, "sentry_scheduledjob": SiloMode.CONTROL, "sentry_teamavatar": SiloMode.REGION, "uptime_projectuptimesubscription": SiloMode.REGION, "workflow_engine_actiongroupstatus": SiloMode.REGION, "workflow_engine_workflowaction": SiloMode.REGION, } """ When we remove models, we are no longer able to resolve silo assignments because the model classes are removed. Losing silo assignments means historical migrations for a model can no longer run. By preserving the historical silo assignments we can provide compatibility for existing migrations. """ def __init__(self): # Memoized results of table : silo pairings self.__table_to_silo: dict[str, str | None] = {} try: # By accessing the connections Django will raise # Use `assert` to appease linters assert connections["control"] self.__is_simulated = True logging.debug("Using simulated silos") except (AssertionError, ConnectionDoesNotExist) as err: logging.debug("Cannot use simulated silos", extra={"error": str(err)}) self.__is_simulated = False def use_simulated(self, value: bool): if not in_test_environment(): raise ValueError("Cannot mutate simulation mode outside of tests") self.__is_simulated = value def _resolve_silo_connection(self, silo_modes: Iterable[SiloMode], table: str) -> str | None: # XXX This method has an override in getsentry for region silo primary splits. active_mode = SiloMode.get_current_mode() # In monolith mode we only use a single database. if active_mode == SiloMode.MONOLITH and not self.__is_simulated: return "default" for silo_mode in silo_modes: if self.__is_simulated: return self.__simulated_map[silo_mode] if active_mode == silo_mode: return "default" # If we're in tests raise an error, otherwise return 'no decision' # so that django skips migration operations that won't work. if in_test_environment(): raise SiloConnectionUnavailableError( f"Cannot resolve table {table} in {silo_modes}. " f"Application silo mode is {active_mode} and simulated silos are not enabled." ) else: return None def _find_model(self, table: str) -> type[Model] | None: # Use django's model inventory to find our table and what silo it is on. for model in apps.get_models(include_auto_created=True): if model._meta.db_table == table: return model return None def _silo_limit(self, model: type[Model]) -> SiloLimit | None: silo_limit = getattr(model._meta, "silo_limit", None) if silo_limit: return silo_limit db_table = model._meta.db_table if not silo_limit and db_table in self.contrib_models: return ModelSiloLimit(SiloMode.CONTROL) # If we didn't find a silo_limit we could be working with __fake__ model # from django, so we need to locate the real class by table. real_model = self._find_model(db_table) if real_model: return getattr(real_model._meta, "silo_limit", None) return None def _db_for_model(self, model: type[Model]) -> str | None: silo_limit = self._silo_limit(model) if not silo_limit: return "default" return self._resolve_silo_connection(silo_limit.modes, table=model._meta.db_table) def _db_for_table(self, table: str, app_label: str) -> str | None: if table in self.__table_to_silo: return self.__table_to_silo[table] model = self._find_model(table) if model: # Incrementally build up our result cache so we don't # have to scan through models more than once. self.__table_to_silo[table] = self._db_for_model(model) elif table in self.historical_silo_assignments: silo_mode = self.historical_silo_assignments[table] connection = self._resolve_silo_connection([silo_mode], table=table) self.__table_to_silo[table] = connection else: # We no longer have the model and can't determine silo assignment. # Default to None for sentry/getsentry app_label as models # in those apps must have silo assignments, and 'default' # for other app_labels that can't have silo assignments. fallback: str | None = "default" if app_label in {"sentry", "getsentry"}: fallback = None self.__table_to_silo[table] = fallback return self.__table_to_silo[table] def db_for_read(self, model, **hints): return self._db_for_model(model) def db_for_write(self, model, **hints): return self._db_for_model(model) def allow_relation(self, obj1, obj2, **hints): return self._db_for_model(obj1) == self._db_for_model(obj2) def allow_syncdb(self, db, model): return self._db_for_model(model) == db def allow_migrate(self, db, app_label, model=None, **hints): if model: return self._db_for_table(model._meta.db_table, app_label) == db # We use this hint in our SafeRunSql/RunPython migrations to help resolve databases. if "tables" in hints: dbs = {self._db_for_table(table, app_label) for table in hints["tables"]} if len(dbs) > 1: raise RuntimeError( "Migration tables resolve to multiple databases. " f"Got {dbs} when only one database should be used." "Please also ensure your table names in the hint are correct." ) return dbs.pop() == db # Assume migrations with no model routing or hints need to run on # the default database. return db == "default"
SiloRouter
python
django__django
tests/view_tests/tests/test_debug.py
{ "start": 79094, "end": 80047 }
class ____(SimpleTestCase): def setUp(self): get_default_exception_reporter_filter.cache_clear() self.addCleanup(get_default_exception_reporter_filter.cache_clear) def test_setting_allows_custom_subclass(self): self.assertIsInstance( get_default_exception_reporter_filter(), CustomExceptionReporterFilter, ) def test_cleansed_substitute_override(self): reporter_filter = get_default_exception_reporter_filter() self.assertEqual( reporter_filter.cleanse_setting("password", "super_secret"), reporter_filter.cleansed_substitute, ) def test_hidden_settings_override(self): reporter_filter = get_default_exception_reporter_filter() self.assertEqual( reporter_filter.cleanse_setting("database_url", "super_secret"), reporter_filter.cleansed_substitute, )
CustomExceptionReporterFilterTests
python
python-openxml__python-docx
tests/oxml/unitdata/styles.py
{ "start": 94, "end": 250 }
class ____(BaseBuilder): __tag__ = "w:style" __nspfxs__ = ("w",) __attrs__ = ("w:type", "w:styleId", "w:default", "w:customStyle")
CT_StyleBuilder
python
automl__auto-sklearn
autosklearn/metalearning/metafeatures/metafeatures.py
{ "start": 10930, "end": 11125 }
class ____(MetaFeature): def _calculate(self, X, y, logger, feat_type): return np.log(metafeatures.get_value("DatasetRatio")) @metafeatures.define("InverseDatasetRatio")
LogDatasetRatio
python
openai__openai-python
tests/api_resources/evals/runs/test_output_items.py
{ "start": 485, "end": 5452 }
class ____: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_retrieve(self, client: OpenAI) -> None: output_item = client.evals.runs.output_items.retrieve( output_item_id="output_item_id", eval_id="eval_id", run_id="run_id", ) assert_matches_type(OutputItemRetrieveResponse, output_item, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.evals.runs.output_items.with_raw_response.retrieve( output_item_id="output_item_id", eval_id="eval_id", run_id="run_id", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" output_item = response.parse() assert_matches_type(OutputItemRetrieveResponse, output_item, path=["response"]) @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.evals.runs.output_items.with_streaming_response.retrieve( output_item_id="output_item_id", eval_id="eval_id", run_id="run_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" output_item = response.parse() assert_matches_type(OutputItemRetrieveResponse, output_item, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `eval_id` but received ''"): client.evals.runs.output_items.with_raw_response.retrieve( output_item_id="output_item_id", eval_id="", run_id="run_id", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.evals.runs.output_items.with_raw_response.retrieve( output_item_id="output_item_id", eval_id="eval_id", run_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `output_item_id` but received ''"): client.evals.runs.output_items.with_raw_response.retrieve( output_item_id="", eval_id="eval_id", run_id="run_id", ) @parametrize def test_method_list(self, client: OpenAI) -> None: output_item = client.evals.runs.output_items.list( run_id="run_id", eval_id="eval_id", ) assert_matches_type(SyncCursorPage[OutputItemListResponse], output_item, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: output_item = client.evals.runs.output_items.list( run_id="run_id", eval_id="eval_id", after="after", limit=0, order="asc", status="fail", ) assert_matches_type(SyncCursorPage[OutputItemListResponse], output_item, path=["response"]) @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.evals.runs.output_items.with_raw_response.list( run_id="run_id", eval_id="eval_id", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" output_item = response.parse() assert_matches_type(SyncCursorPage[OutputItemListResponse], output_item, path=["response"]) @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.evals.runs.output_items.with_streaming_response.list( run_id="run_id", eval_id="eval_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" output_item = response.parse() assert_matches_type(SyncCursorPage[OutputItemListResponse], output_item, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_list(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `eval_id` but received ''"): client.evals.runs.output_items.with_raw_response.list( run_id="run_id", eval_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.evals.runs.output_items.with_raw_response.list( run_id="", eval_id="eval_id", )
TestOutputItems
python
facebookresearch__faiss
tests/test_index_binary.py
{ "start": 11115, "end": 13191 }
class ____(unittest.TestCase): @unittest.skipIf(os.name == "posix" and os.uname().sysname == "Darwin", "There is a bug in the OpenMP implementation on OSX.") def test_replicas(self): d = 32 nq = 100 nb = 200 (_, xb, xq) = make_binary_dataset(d, 0, nb, nq) index_ref = faiss.IndexBinaryFlat(d) index_ref.add(xb) Dref, Iref = index_ref.search(xq, 10) nrep = 5 index = faiss.IndexBinaryReplicas() for _i in range(nrep): sub_idx = faiss.IndexBinaryFlat(d) sub_idx.add(xb) index.addIndex(sub_idx) self.assertEqual(index_ref.code_size, index.code_size) D, I = index.search(xq, 10) self.assertTrue((Dref == D).all()) self.assertTrue((Iref == I).all()) index2 = faiss.IndexBinaryReplicas() for _i in range(nrep): sub_idx = faiss.IndexBinaryFlat(d) index2.addIndex(sub_idx) index2.add(xb) D2, I2 = index2.search(xq, 10) self.assertTrue((Dref == D2).all()) self.assertTrue((Iref == I2).all()) def test_shards(self): d = 32 nq = 100 nb = 200 (_, xb, xq) = make_binary_dataset(d, 0, nb, nq) index_ref = faiss.IndexBinaryFlat(d) index_ref.add(xb) Dref, Iref = index_ref.search(xq, 10) nrep = 5 index = faiss.IndexBinaryShards(d) for i in range(nrep): sub_idx = faiss.IndexBinaryFlat(d) sub_idx.add(xb[i * nb // nrep : (i + 1) * nb // nrep]) index.add_shard(sub_idx) D, I = index.search(xq, 10) compare_binary_result_lists(Dref, Iref, D, I) index2 = faiss.IndexBinaryShards(d) for _i in range(nrep): sub_idx = faiss.IndexBinaryFlat(d) index2.add_shard(sub_idx) index2.add(xb) D2, I2 = index2.search(xq, 10) compare_binary_result_lists(Dref, Iref, D2, I2) if __name__ == '__main__': unittest.main()
TestReplicasAndShards
python
palantir__python-language-server
pyls/_version.py
{ "start": 1117, "end": 1595 }
class ____: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "" cfg.parentdir_prefix = "" cfg.versionfile_source = "pyls/_version.py" cfg.verbose = False return cfg
VersioneerConfig
python
gevent__gevent
src/greentest/3.9/test_ssl.py
{ "start": 110311, "end": 119770 }
class ____(threading.Thread): # this one's based on asyncore.dispatcher class EchoServer (asyncore.dispatcher): class ConnectionHandler(asyncore.dispatcher_with_send): def __init__(self, conn, certfile): self.socket = test_wrap_socket(conn, server_side=True, certfile=certfile, do_handshake_on_connect=False) asyncore.dispatcher_with_send.__init__(self, self.socket) self._ssl_accepting = True self._do_ssl_handshake() def readable(self): if isinstance(self.socket, ssl.SSLSocket): while self.socket.pending() > 0: self.handle_read_event() return True def _do_ssl_handshake(self): try: self.socket.do_handshake() except (ssl.SSLWantReadError, ssl.SSLWantWriteError): return except ssl.SSLEOFError: return self.handle_close() except ssl.SSLError: raise except OSError as err: if err.args[0] == errno.ECONNABORTED: return self.handle_close() else: self._ssl_accepting = False def handle_read(self): if self._ssl_accepting: self._do_ssl_handshake() else: data = self.recv(1024) if support.verbose: sys.stdout.write(" server: read %s from client\n" % repr(data)) if not data: self.close() else: self.send(data.lower()) def handle_close(self): self.close() if support.verbose: sys.stdout.write(" server: closed connection %s\n" % self.socket) def handle_error(self): raise def __init__(self, certfile): self.certfile = certfile sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.port = socket_helper.bind_port(sock, '') asyncore.dispatcher.__init__(self, sock) self.listen(5) def handle_accepted(self, sock_obj, addr): if support.verbose: sys.stdout.write(" server: new connection from %s:%s\n" %addr) self.ConnectionHandler(sock_obj, self.certfile) def handle_error(self): raise def __init__(self, certfile): self.flag = None self.active = False self.server = self.EchoServer(certfile) self.port = self.server.port threading.Thread.__init__(self) self.daemon = True def __str__(self): return "<%s %s>" % (self.__class__.__name__, self.server) def __enter__(self): self.start(threading.Event()) self.flag.wait() return self def __exit__(self, *args): if support.verbose: sys.stdout.write(" cleanup: stopping server.\n") self.stop() if support.verbose: sys.stdout.write(" cleanup: joining server thread.\n") self.join() if support.verbose: sys.stdout.write(" cleanup: successfully joined.\n") # make sure that ConnectionHandler is removed from socket_map asyncore.close_all(ignore_all=True) def start (self, flag=None): self.flag = flag threading.Thread.start(self) def run(self): self.active = True if self.flag: self.flag.set() while self.active: try: asyncore.loop(1) except: pass def stop(self): self.active = False self.server.close() def server_params_test(client_context, server_context, indata=b"FOO\n", chatty=True, connectionchatty=False, sni_name=None, session=None): """ Launch a server, connect a client to it and try various reads and writes. """ stats = {} server = ThreadedEchoServer(context=server_context, chatty=chatty, connectionchatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=sni_name, session=session) as s: s.connect((HOST, server.port)) for arg in [indata, bytearray(indata), memoryview(indata)]: if connectionchatty: if support.verbose: sys.stdout.write( " client: sending %r...\n" % indata) s.write(arg) outdata = s.read() if connectionchatty: if support.verbose: sys.stdout.write(" client: read %r\n" % outdata) if outdata != indata.lower(): raise AssertionError( "bad data <<%r>> (%d) received; expected <<%r>> (%d)\n" % (outdata[:20], len(outdata), indata[:20].lower(), len(indata))) s.write(b"over\n") if connectionchatty: if support.verbose: sys.stdout.write(" client: closing connection.\n") stats.update({ 'compression': s.compression(), 'cipher': s.cipher(), 'peercert': s.getpeercert(), 'client_alpn_protocol': s.selected_alpn_protocol(), 'client_npn_protocol': s.selected_npn_protocol(), 'version': s.version(), 'session_reused': s.session_reused, 'session': s.session, }) s.close() stats['server_alpn_protocols'] = server.selected_alpn_protocols stats['server_npn_protocols'] = server.selected_npn_protocols stats['server_shared_ciphers'] = server.shared_ciphers return stats def try_protocol_combo(server_protocol, client_protocol, expect_success, certsreqs=None, server_options=0, client_options=0): """ Try to SSL-connect using *client_protocol* to *server_protocol*. If *expect_success* is true, assert that the connection succeeds, if it's false, assert that the connection fails. Also, if *expect_success* is a string, assert that it is the protocol version actually used by the connection. """ if certsreqs is None: certsreqs = ssl.CERT_NONE certtype = { ssl.CERT_NONE: "CERT_NONE", ssl.CERT_OPTIONAL: "CERT_OPTIONAL", ssl.CERT_REQUIRED: "CERT_REQUIRED", }[certsreqs] if support.verbose: formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n" sys.stdout.write(formatstr % (ssl.get_protocol_name(client_protocol), ssl.get_protocol_name(server_protocol), certtype)) client_context = ssl.SSLContext(client_protocol) client_context.options |= client_options server_context = ssl.SSLContext(server_protocol) server_context.options |= server_options min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None) if (min_version is not None # SSLContext.minimum_version is only available on recent OpenSSL # (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1) and hasattr(server_context, 'minimum_version') and server_protocol == ssl.PROTOCOL_TLS and server_context.minimum_version > min_version): # If OpenSSL configuration is strict and requires more recent TLS # version, we have to change the minimum to test old TLS versions. server_context.minimum_version = min_version # NOTE: we must enable "ALL" ciphers on the client, otherwise an # SSLv23 client will send an SSLv3 hello (rather than SSLv2) # starting from OpenSSL 1.0.0 (see issue #8322). if client_context.protocol == ssl.PROTOCOL_TLS: client_context.set_ciphers("ALL") seclevel_workaround(server_context, client_context) for ctx in (client_context, server_context): ctx.verify_mode = certsreqs ctx.load_cert_chain(SIGNED_CERTFILE) ctx.load_verify_locations(SIGNING_CA) try: stats = server_params_test(client_context, server_context, chatty=False, connectionchatty=False) # Protocol mismatch can result in either an SSLError, or a # "Connection reset by peer" error. except ssl.SSLError: if expect_success: raise except OSError as e: if expect_success or e.errno != errno.ECONNRESET: raise else: if not expect_success: raise AssertionError( "Client protocol %s succeeded with server protocol %s!" % (ssl.get_protocol_name(client_protocol), ssl.get_protocol_name(server_protocol))) elif (expect_success is not True and expect_success != stats['version']): raise AssertionError("version mismatch: expected %r, got %r" % (expect_success, stats['version']))
AsyncoreEchoServer
python
pytorch__pytorch
torch/_inductor/pattern_matcher.py
{ "start": 3464, "end": 3573 }
class ____(Protocol): __name__: str def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
SearchFn
python
kamyu104__LeetCode-Solutions
Python/search-suggestions-system.py
{ "start": 213, "end": 711 }
class ____(object): def __init__(self): self.__TOP_COUNT = 3 self.leaves = collections.defaultdict(TrieNode) self.infos = [] def insert(self, words, i): curr = self for c in words[i]: curr = curr.leaves[c] curr.add_info(words, i) def add_info(self, words, i): self.infos.append(i) self.infos.sort(key=lambda x: words[x]) if len(self.infos) > self.__TOP_COUNT: self.infos.pop()
TrieNode
python
huggingface__transformers
src/transformers/models/edgetam/modeling_edgetam.py
{ "start": 15276, "end": 17488 }
class ____(nn.Module): def __init__(self, config: EdgeTamVisionConfig): super().__init__() self.config = config self.position_encoding = EdgeTamSinePositionEmbedding( num_pos_feats=config.fpn_hidden_size // 2, normalize=True ) self.convs = nn.ModuleList() for in_channels in config.backbone_channel_list: self.convs.append( nn.Conv2d( in_channels=in_channels, out_channels=config.fpn_hidden_size, kernel_size=config.fpn_kernel_size, stride=config.fpn_stride, padding=config.fpn_padding, ), ) self.fpn_top_down_levels = config.fpn_top_down_levels def forward(self, hidden_states: torch.Tensor) -> tuple[tuple[torch.Tensor, ...], tuple[torch.Tensor, ...]]: fpn_hidden_states = () fpn_position_encoding = () # forward in top-down order (from low to high resolution) n = len(self.convs) - 1 for i in range(n, -1, -1): lateral_features = hidden_states[i].permute(0, 3, 1, 2) lateral_features = self.convs[n - i](lateral_features) if i not in self.fpn_top_down_levels or i == n: prev_features = lateral_features else: top_down_features = F.interpolate( prev_features.to(dtype=torch.float32), scale_factor=2.0, mode="nearest", align_corners=None, antialias=False, ).to(lateral_features.dtype) prev_features = lateral_features + top_down_features prev_position_encoding = self.position_encoding( prev_features.shape, prev_features.device, prev_features.dtype ).to(prev_features.dtype) fpn_hidden_states += (prev_features,) fpn_position_encoding += (prev_position_encoding,) return fpn_hidden_states, fpn_position_encoding @auto_docstring( custom_intro=""" The vision model from EdgeTAM without any head or projection on top. """ )
EdgeTamVisionNeck
python
yangshun__tech-interview-handbook
apps/website/experimental/utilities/python/linked_list.py
{ "start": 151, "end": 3243 }
class ____: def __init__(self, value): self.value = value self.next = None def linked_list_append(linked_list, value): '''Appends a value to the end of the linked list''' node = linked_list insert_node = LinkedListNode(value) if not node: return insert_node while node.next: node = node.next node.next = insert_node return linked_list def linked_list_insert_index(linked_list, value, index): '''Inserts a value at a particular index''' node = linked_list insert_node = LinkedListNode(value) # Check if inserting at head if index == 0: insert_node.next = node return insert_node # Skip ahead for _ in range(index - 1): node = node.next if not node: raise ValueError insert_node.next = node.next node.next = insert_node return linked_list def linked_list_delete(linked_list, value): '''Deletes the first occurrence of a value in the linked list''' node = linked_list # Check if deleting at head if node.value == value: return node.next # Skip ahead while node.next: if node.next.value == value: node.next = node.next.next return linked_list node = node.next raise ValueError def linked_list_delete_index(linked_list, index): '''Deletes the element at a particular index in the linked list''' node = linked_list # Check if deleting at head if index == 0: return node.next # Skip ahead for _ in range(index - 1): node = node.next if not node: raise ValueError if not node.next: raise ValueError node.next = node.next.next return linked_list def linked_list_iter(linked_list): '''Lazy iterator over each node in the linked list''' node = linked_list while node is not None: yield node node = node.next # Append to back linked_list = None # Start with an empty linked list linked_list = linked_list_append(linked_list, 1) linked_list = linked_list_append(linked_list, 2) linked_list = linked_list_append(linked_list, 4) print([node.value for node in linked_list_iter(linked_list)]) # Insert by index linked_list = linked_list_insert_index(linked_list, 0, 0) # Front print([node.value for node in linked_list_iter(linked_list)]) linked_list = linked_list_insert_index(linked_list, 3, 3) # Back print([node.value for node in linked_list_iter(linked_list)]) # Delete "3" linked_list = linked_list_delete(linked_list, 3) print([node.value for node in linked_list_iter(linked_list)]) # Delete by index linked_list = linked_list_delete_index(linked_list, 0) print([node.value for node in linked_list_iter(linked_list)]) linked_list = linked_list_delete_index(linked_list, 1) print([node.value for node in linked_list_iter(linked_list)]) # Delete until empty linked_list = linked_list_delete_index(linked_list, 0) linked_list = linked_list_delete_index(linked_list, 0) print([node.value for node in linked_list_iter(linked_list)])
LinkedListNode
python
conda__conda
tests/plugins/test_env_specs.py
{ "start": 1296, "end": 1510 }
class ____: @plugins.hookimpl def conda_environment_specifiers(self): yield CondaEnvironmentSpecifier( name="rand-spec", environment_spec=RandomSpec, )
RandomSpecPlugin
python
qiwsir__algorithm
binary_tree2.py
{ "start": 366, "end": 3589 }
class ____: def __init__(self): # initializes the root member self.root = None def addNode(self, data): # creates a new node and returns it return CNode(data) def insert(self, root, data): # inserts a new data if root == None: # it there isn't any data # adds it and returns return self.addNode(data) else: # enters into the tree if data <= root.data: # if the data is less than the stored one # goes into the left-sub-tree root.left = self.insert(root.left, data) else: # processes the right-sub-tree root.right = self.insert(root.right, data) return root def lookup(self, root, target): # looks for a value into the tree if root == None: return 0 else: # if it has found it... if target == root.data: return 1 else: if target < root.data: # left side return self.lookup(root.left, target) else: # right side return self.lookup(root.right, target) def minValue(self, root): # goes down into the left # arm and returns the last value while(root.left != None): root = root.left return root.data def maxDepth(self, root): if root == None: return 0 else: # computes the two depths ldepth = self.maxDepth(root.left) rdepth = self.maxDepth(root.right) # returns the appropriate depth return max(ldepth, rdepth) + 1 def size(self, root): if root == None: return 0 else: return self.size(root.left) + 1 + self.size(root.right) def printTree(self, root): # prints the tree path if root == None: pass else: self.printTree(root.left) print root.data, self.printTree(root.right) def printRevTree(self, root): # prints the tree path in reverse # order if root == None: pass else: self.printRevTree(root.right) print root.data, self.printRevTree(root.left) if __name__ == "__main__": # create the binary tree BTree = CBOrdTree() # add the root node root = BTree.addNode(0) # ask the user to insert values for i in range(0, 5): data = int(raw_input("insert the node value nr %d: " % i)) # insert values BTree.insert(root, data) print BTree.printTree(root) print BTree.printRevTree(root) print data = int(raw_input("insert a value to find: ")) if BTree.lookup(root, data): print "found" else: print "not found" print BTree.minValue(root) print BTree.maxDepth(root) print BTree.size(root)
CBOrdTree
python
ray-project__ray
python/ray/serve/schema.py
{ "start": 8785, "end": 17424 }
class ____(BaseModel, allow_population_by_field_name=True): """ Specifies options for one deployment within a Serve application. For each deployment this can optionally be included in `ServeApplicationSchema` to override deployment options specified in code. """ name: str = Field( ..., description=("Globally-unique name identifying this deployment.") ) num_replicas: Optional[Union[PositiveInt, str]] = Field( default=DEFAULT.VALUE, description=( "The number of processes that handle requests to this " "deployment. Uses a default if null. Can also be set to " "`auto` for a default autoscaling configuration " "(experimental)." ), ) max_ongoing_requests: int = Field( default=DEFAULT.VALUE, description=( "Maximum number of requests that are sent in parallel " "to each replica of this deployment. The limit is enforced across all " "callers (HTTP requests or DeploymentHandles). Defaults to " f"{DEFAULT_MAX_ONGOING_REQUESTS}." ), gt=0, ) max_queued_requests: StrictInt = Field( default=DEFAULT.VALUE, description=( "[DEPRECATED] The max number of requests that will be executed at once in " f"each replica. Defaults to {DEFAULT_MAX_ONGOING_REQUESTS}." ), ) user_config: Optional[Dict] = Field( default=DEFAULT.VALUE, description=( "Config to pass into this deployment's " "reconfigure method. This can be updated dynamically " "without restarting replicas" ), ) autoscaling_config: Optional[Dict] = Field( default=DEFAULT.VALUE, description=( "Config specifying autoscaling " "parameters for the deployment's number of replicas. " "If null, the deployment won't autoscale its number of " "replicas; the number of replicas will be fixed at " "num_replicas." ), ) graceful_shutdown_wait_loop_s: float = Field( default=DEFAULT.VALUE, description=( "Duration that deployment replicas will wait until there " "is no more work to be done before shutting down. Uses a " "default if null." ), ge=0, ) graceful_shutdown_timeout_s: float = Field( default=DEFAULT.VALUE, description=( "Serve controller waits for this duration before " "forcefully killing the replica for shutdown. Uses a " "default if null." ), ge=0, ) health_check_period_s: float = Field( default=DEFAULT.VALUE, description=( "Frequency at which the controller will health check " "replicas. Uses a default if null." ), gt=0, ) health_check_timeout_s: float = Field( default=DEFAULT.VALUE, description=( "Timeout that the controller will wait for a response " "from the replica's health check before marking it " "unhealthy. Uses a default if null." ), gt=0, ) ray_actor_options: RayActorOptionsSchema = Field( default=DEFAULT.VALUE, description="Options set for each replica actor." ) placement_group_bundles: List[Dict[str, float]] = Field( default=DEFAULT.VALUE, description=( "Define a set of placement group bundles to be " "scheduled *for each replica* of this deployment. The replica actor will " "be scheduled in the first bundle provided, so the resources specified in " "`ray_actor_options` must be a subset of the first bundle's resources. All " "actors and tasks created by the replica actor will be scheduled in the " "placement group by default (`placement_group_capture_child_tasks` is set " "to True)." ), ) placement_group_strategy: str = Field( default=DEFAULT.VALUE, description=( "Strategy to use for the replica placement group " "specified via `placement_group_bundles`. Defaults to `PACK`." ), ) max_replicas_per_node: int = Field( default=DEFAULT.VALUE, description=( "The max number of replicas of this deployment that can run on a single " "Valid values are None (default, no limit) or an integer in the range of " "[1, 100]. " ), ) logging_config: LoggingConfig = Field( default=DEFAULT.VALUE, description="Logging config for configuring serve deployment logs.", ) request_router_config: Union[Dict, RequestRouterConfig] = Field( default=DEFAULT.VALUE, description="Config for the request router used for this deployment.", ) @root_validator def validate_num_replicas_and_autoscaling_config(cls, values): num_replicas = values.get("num_replicas", None) autoscaling_config = values.get("autoscaling_config", None) # Cannot have `num_replicas` be an int and a non-null # autoscaling config if isinstance(num_replicas, int): if autoscaling_config not in [None, DEFAULT.VALUE]: raise ValueError( "Manually setting num_replicas is not allowed " "when autoscaling_config is provided." ) # A null `num_replicas` or `num_replicas="auto"` can be paired # with a non-null autoscaling_config elif num_replicas not in ["auto", None, DEFAULT.VALUE]: raise ValueError( f'`num_replicas` must be an int or "auto", but got: {num_replicas}' ) return values @root_validator def validate_max_replicas_per_node_and_placement_group_bundles(cls, values): max_replicas_per_node = values.get("max_replicas_per_node", None) placement_group_bundles = values.get("placement_group_bundles", None) if max_replicas_per_node not in [ DEFAULT.VALUE, None, ] and placement_group_bundles not in [DEFAULT.VALUE, None]: raise ValueError( "Setting max_replicas_per_node is not allowed when " "placement_group_bundles is provided." ) return values @root_validator def validate_max_queued_requests(cls, values): max_queued_requests = values.get("max_queued_requests", None) if max_queued_requests is None or max_queued_requests == DEFAULT.VALUE: return values if max_queued_requests < 1 and max_queued_requests != -1: raise ValueError( "max_queued_requests must be -1 (no limit) or a positive integer." ) return values def _get_user_configured_option_names(self) -> Set[str]: """Get set of names for all user-configured options. Any field not set to DEFAULT.VALUE is considered a user-configured option. """ return { field for field, value in self.dict().items() if value is not DEFAULT.VALUE } def is_autoscaling_configured(self) -> bool: return self.num_replicas == "auto" or self.autoscaling_config not in [ None, DEFAULT.VALUE, ] def _deployment_info_to_schema(name: str, info: DeploymentInfo) -> DeploymentSchema: """Converts a DeploymentInfo object to DeploymentSchema.""" schema = DeploymentSchema( name=name, max_ongoing_requests=info.deployment_config.max_ongoing_requests, max_queued_requests=info.deployment_config.max_queued_requests, user_config=info.deployment_config.user_config, graceful_shutdown_wait_loop_s=( info.deployment_config.graceful_shutdown_wait_loop_s ), graceful_shutdown_timeout_s=info.deployment_config.graceful_shutdown_timeout_s, health_check_period_s=info.deployment_config.health_check_period_s, health_check_timeout_s=info.deployment_config.health_check_timeout_s, ray_actor_options=info.replica_config.ray_actor_options, request_router_config=info.deployment_config.request_router_config, ) if info.deployment_config.autoscaling_config is not None: schema.autoscaling_config = info.deployment_config.autoscaling_config.dict() else: schema.num_replicas = info.deployment_config.num_replicas return schema @PublicAPI(stability="stable")
DeploymentSchema
python
coleifer__peewee
examples/hexastore.py
{ "start": 2981, "end": 3104 }
class ____(object): def __getattr__(self, name): return Variable(name) __call__ = __getattr__
_VariableFactory
python
OmkarPathak__pygorithm
tests/test_math.py
{ "start": 557, "end": 687 }
class ____(unittest.TestCase): def test_factorial(self): self.assertEqual(factorial.factorial(10), 3628800)
TestFactorial
python
sqlalchemy__sqlalchemy
test/ext/test_associationproxy.py
{ "start": 79632, "end": 81731 }
class ____(fixtures.DeclarativeMappedTest): run_create_tables = None @classmethod def setup_classes(cls): class Point(cls.Basic): def __init__(self, x, y): self.x = x self.y = y def __composite_values__(self): return [self.x, self.y] __hash__ = None def __eq__(self, other): return ( isinstance(other, Point) and other.x == self.x and other.y == self.y ) def __ne__(self, other): return not isinstance(other, Point) or not self.__eq__(other) class Graph(cls.DeclarativeBasic): __tablename__ = "graph" id = Column( Integer, primary_key=True, test_needs_autoincrement=True ) name = Column(String(30)) point_data = relationship("PointData") points = association_proxy( "point_data", "point", creator=lambda point: PointData(point=point), ) class PointData(ComparableEntity, cls.DeclarativeBasic): __tablename__ = "point" id = Column( Integer, primary_key=True, test_needs_autoincrement=True ) graph_id = Column(ForeignKey("graph.id")) x1 = Column(Integer) y1 = Column(Integer) point = composite(Point, x1, y1) return Point, Graph, PointData def test_append(self): Point, Graph, PointData = self.classes("Point", "Graph", "PointData") g1 = Graph() g1.points.append(Point(3, 5)) eq_(g1.point_data, [PointData(point=Point(3, 5))]) def test_access(self): Point, Graph, PointData = self.classes("Point", "Graph", "PointData") g1 = Graph() g1.point_data.append(PointData(point=Point(3, 5))) g1.point_data.append(PointData(point=Point(10, 7))) eq_(g1.points, [Point(3, 5), Point(10, 7)])
CompositeAccessTest
python
gabrielfalcao__HTTPretty
httpretty/http.py
{ "start": 3526, "end": 4736 }
class ____(BaseClass): GET = 'GET' PUT = 'PUT' POST = 'POST' DELETE = 'DELETE' HEAD = 'HEAD' PATCH = 'PATCH' OPTIONS = 'OPTIONS' CONNECT = 'CONNECT' METHODS = (GET, PUT, POST, DELETE, HEAD, PATCH, OPTIONS, CONNECT) def parse_requestline(s): """ http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5 >>> parse_requestline('GET / HTTP/1.0') ('GET', '/', '1.0') >>> parse_requestline('post /testurl htTP/1.1') ('POST', '/testurl', '1.1') >>> parse_requestline('Im not a RequestLine') Traceback (most recent call last): ... ValueError: Not a Request-Line """ methods = '|'.join(HttpBaseClass.METHODS) m = re.match(r'(' + methods + r')\s+(.*)\s+HTTP/(1.[0|1])', s, re.I) if m: return m.group(1).upper(), m.group(2), m.group(3) else: raise ValueError('Not a Request-Line') def last_requestline(sent_data): """ Find the last line in sent_data that can be parsed with parse_requestline """ for line in reversed(sent_data): try: parse_requestline(decode_utf8(line)) except ValueError: pass else: return line
HttpBaseClass
python
getsentry__sentry
src/sentry/utils/prompts.py
{ "start": 172, "end": 2252 }
class ____(TypedDict): required_fields: list[str] DEFAULT_PROMPTS: dict[str, _PromptConfig] = { "alert_stream": {"required_fields": ["organization_id"]}, "chonk_ui_dot_indicator": {"required_fields": ["organization_id"]}, "chonk_ui_banner": {"required_fields": ["organization_id"]}, "code_owners": {"required_fields": ["organization_id", "project_id"]}, "data_consent_banner": {"required_fields": ["organization_id"]}, "data_consent_priority": {"required_fields": ["organization_id"]}, "distributed_tracing": {"required_fields": ["organization_id", "project_id"]}, "github_missing_members": {"required_fields": ["organization_id"]}, "issue_feedback_hidden": {"required_fields": ["organization_id", "project_id"]}, "issue_priority": {"required_fields": ["organization_id"]}, "issue_replay_inline_onboarding": {"required_fields": ["organization_id", "project_id"]}, "issue_views_add_view_banner": {"required_fields": ["organization_id"]}, "issue_views_all_views_banner": {"required_fields": ["organization_id"]}, "metric_alert_ignore_archived_issues": {"required_fields": ["organization_id", "project_id"]}, "profiling_onboarding": {"required_fields": ["organization_id"]}, "quick_trace_missing": {"required_fields": ["organization_id", "project_id"]}, "releases": {"required_fields": ["organization_id", "project_id"]}, "sdk_updates": {"required_fields": ["organization_id"]}, "seer_autofix_setup_acknowledged": {"required_fields": ["organization_id"]}, "stacked_navigation_banner": {"required_fields": ["organization_id"]}, "stacked_navigation_help_menu": {"required_fields": ["organization_id"]}, "stacktrace_link": {"required_fields": ["organization_id", "project_id"]}, "suggest_mobile_project": {"required_fields": ["organization_id"]}, "suspect_commits": {"required_fields": ["organization_id", "project_id"]}, "user_snooze_deprecation": {"required_fields": ["organization_id", "project_id"]}, "vitals_alert": {"required_fields": ["organization_id"]}, }
_PromptConfig
python
scikit-learn__scikit-learn
sklearn/utils/_metadata_requests.py
{ "start": 28366, "end": 44882 }
class ____: """Coordinates metadata routing for a :term:`router` object. This class is used by :term:`meta-estimators` or functions that can route metadata, to handle their metadata routing. Routing information is stored in a dictionary-like structure of the form ``{"object_name": RouterMappingPair(mapping, router)}``, where ``mapping`` is an instance of :class:`~sklearn.utils.metadata_routing.MethodMapping` and ``router`` is either a :class:`~sklearn.utils.metadata_routing.MetadataRequest` or another :class:`~sklearn.utils.metadata_routing.MetadataRouter` instance. .. versionadded:: 1.3 Parameters ---------- owner : object The object to which these requests belong. """ # this is here for us to use this attribute's value instead of doing # `isinstance`` in our checks, so that we avoid issues when people vendor # this file instead of using it directly from scikit-learn. _type = "metadata_router" def __init__(self, owner): self._route_mappings = dict() # `_self_request` is used if the router is also a consumer. # _self_request, (added using `add_self_request()`) is treated # differently from the other consumer objects which are stored in # _route_mappings. self._self_request = None self.owner = owner def add_self_request(self, obj): """Add `self` (as a :term:`consumer`) to the `MetadataRouter`. This method is used if the :term:`router` is also a :term:`consumer`, and hence the router itself needs to be included in the routing. The passed object can be an estimator or a :class:`~sklearn.utils.metadata_routing.MetadataRequest`. A router should add itself using this method instead of `add` since it should be treated differently than the other consumer objects to which metadata is routed by the router. Parameters ---------- obj : object This is typically the router instance, i.e. `self` in a ``get_metadata_routing()`` implementation. It can also be a ``MetadataRequest`` instance. Returns ------- self : MetadataRouter Returns `self`. """ if getattr(obj, "_type", None) == "metadata_request": self._self_request = deepcopy(obj) elif hasattr(obj, "_get_metadata_request"): self._self_request = deepcopy(obj._get_metadata_request()) else: raise ValueError( "Given `obj` is neither a `MetadataRequest` nor does it implement the" " required API. Inheriting from `BaseEstimator` implements the required" " API." ) return self def add(self, *, method_mapping, **objs): """Add :term:`consumers <consumer>` to the `MetadataRouter`. The estimators that consume metadata are passed as named objects along with a method mapping, that defines how their methods relate to those of the :term:`router`. Parameters ---------- method_mapping : MethodMapping The mapping between the child (:term:`consumer`) and the parent's (:term:`router`'s) methods. **objs : dict A dictionary of objects, whose requests are extracted by calling :func:`~sklearn.utils.metadata_routing.get_routing_for_object` on them. Returns ------- self : MetadataRouter Returns `self`. """ method_mapping = deepcopy(method_mapping) for name, obj in objs.items(): self._route_mappings[name] = RouterMappingPair( mapping=method_mapping, router=get_routing_for_object(obj) ) return self def consumes(self, method, params): """Return params consumed as metadata in a :term:`router` or its sub-estimators. This method returns the subset of `params` that are consumed by the `method`. A `param` is considered consumed if it is used in the specified method of the :term:`router` itself or any of its sub-estimators (or their sub-estimators). .. versionadded:: 1.4 Parameters ---------- method : str The name of the method for which to determine consumed parameters. params : iterable of str An iterable of parameter names to test for consumption. Returns ------- consumed_params : set of str A subset of parameters from `params` which are consumed by this method. """ consumed_params = set() if self._self_request: consumed_params.update( self._self_request.consumes(method=method, params=params) ) for _, route_mapping in self._route_mappings.items(): for caller, callee in route_mapping.mapping: if caller == method: consumed_params.update( route_mapping.router.consumes(method=callee, params=params) ) return consumed_params def _get_param_names(self, *, method, return_alias, ignore_self_request): """Get names of all metadata that can be consumed or routed by specified \ method. This method returns the names of all metadata, even the ``False`` ones. Parameters ---------- method : str The name of the method for which metadata names are requested. return_alias : bool Controls whether original or aliased names should be returned, which only applies to the stored `self`. If no `self` routing object is stored, this parameter has no effect. ignore_self_request : bool If `self._self_request` should be ignored. This is used in `_route_params`. If ``True``, ``return_alias`` has no effect. Returns ------- names : set of str A set of strings with the names of all metadata. """ res = set() if self._self_request and not ignore_self_request: res = res.union( self._self_request._get_param_names( method=method, return_alias=return_alias ) ) for name, route_mapping in self._route_mappings.items(): for caller, callee in route_mapping.mapping: if caller == method: res = res.union( route_mapping.router._get_param_names( method=callee, return_alias=True, ignore_self_request=False ) ) return res def _route_params(self, *, params, method, parent, caller): """Prepare the given metadata to be passed to the method. This is used when a router is used as a child object of another router. The parent router then passes all parameters understood by the child object to it and delegates their validation to the child. The output of this method can be used directly as the input to the corresponding method as **kwargs. Parameters ---------- params : dict A dictionary of provided metadata. method : str The name of the method for which the metadata is requested and routed. parent : object Parent class object, that routes the metadata. caller : str Method from the parent class object, where the metadata is routed from. Returns ------- params : Bunch A :class:`~sklearn.utils.Bunch` of {metadata: value} which can be given to the corresponding method. """ res = Bunch() if self._self_request: res.update( self._self_request._route_params( params=params, method=method, parent=parent, caller=caller, ) ) param_names = self._get_param_names( method=method, return_alias=True, ignore_self_request=True ) child_params = { key: value for key, value in params.items() if key in param_names } for key in set(res.keys()).intersection(child_params.keys()): # conflicts are okay if the passed objects are the same, but it's # an issue if they're different objects. if child_params[key] is not res[key]: raise ValueError( f"In {_routing_repr(self.owner)}, there is a conflict on {key}" " between what is requested for this estimator and what is" " requested by its children. You can resolve this conflict by" " using an alias for the child estimators' requested metadata." ) res.update(child_params) return res def route_params(self, *, caller, params): """Get the values of metadata requested by :term:`consumers <consumer>`. Returns a :class:`~sklearn.utils.Bunch` containing the metadata that this :term:`router`'s `caller` method needs to route, organized by each :term:`consumer` and their corresponding methods. This can be used to pass the required metadata to corresponding methods in consumers. Parameters ---------- caller : str The name of the :term:`router`'s method through which the metadata is routed. For example, if called inside the :term:`fit` method of a router, this would be `"fit"`. params : dict A dictionary of provided metadata. Returns ------- params : Bunch A :class:`~sklearn.utils.Bunch` of the form ``{"object_name": {"method_name": {metadata: value}}}``. """ if self._self_request: self._self_request._check_warnings(params=params, method=caller) res = Bunch() for name, route_mapping in self._route_mappings.items(): router, mapping = route_mapping.router, route_mapping.mapping res[name] = Bunch() for _caller, _callee in mapping: if _caller == caller: res[name][_callee] = router._route_params( params=params, method=_callee, parent=self.owner, caller=caller, ) return res def validate_metadata(self, *, method, params): """Validate given metadata for a method. This raises a ``TypeError`` if some of the passed metadata are not understood by child objects. Parameters ---------- method : str The name of the :term:`router`'s method through which the metadata is routed. For example, if called inside the :term:`fit` method of a router, this would be `"fit"`. params : dict A dictionary of provided metadata. """ param_names = self._get_param_names( method=method, return_alias=False, ignore_self_request=False ) if self._self_request: self_params = self._self_request._get_param_names( method=method, return_alias=False ) else: self_params = set() extra_keys = set(params.keys()) - param_names - self_params if extra_keys: raise TypeError( f"{_routing_repr(self.owner)}.{method} got unexpected argument(s)" f" {extra_keys}, which are not routed to any object." ) def _serialize(self): """Serialize the object. Returns ------- obj : dict A serialized version of the instance in the form of a dictionary. """ res = dict() if self._self_request: res["$self_request"] = self._self_request._serialize() for name, route_mapping in self._route_mappings.items(): res[name] = dict() res[name]["mapping"] = route_mapping.mapping._serialize() res[name]["router"] = route_mapping.router._serialize() return res def __iter__(self): if self._self_request: method_mapping = MethodMapping() for method in METHODS: method_mapping.add(caller=method, callee=method) yield ( "$self_request", RouterMappingPair(mapping=method_mapping, router=self._self_request), ) for name, route_mapping in self._route_mappings.items(): yield (name, route_mapping) def __repr__(self): return str(self._serialize()) def __str__(self): return str(repr(self)) def get_routing_for_object(obj=None): """Get a ``Metadata{Router, Request}`` instance from the given object. This function returns a :class:`~sklearn.utils.metadata_routing.MetadataRouter` or a :class:`~sklearn.utils.metadata_routing.MetadataRequest` from the given input. This function always returns a copy or an instance constructed from the input, such that changing the output of this function will not change the original object. .. versionadded:: 1.3 Parameters ---------- obj : object - If the object provides a `get_metadata_routing` method, return a copy of the output of that method. - If the object is already a :class:`~sklearn.utils.metadata_routing.MetadataRequest` or a :class:`~sklearn.utils.metadata_routing.MetadataRouter`, return a copy of that. - Returns an empty :class:`~sklearn.utils.metadata_routing.MetadataRequest` otherwise. Returns ------- obj : MetadataRequest or MetadataRouter A ``MetadataRequest`` or a ``MetadataRouter`` taken or created from the given object. """ # doing this instead of a try/except since an AttributeError could be raised # for other reasons. if hasattr(obj, "get_metadata_routing"): return deepcopy(obj.get_metadata_routing()) elif getattr(obj, "_type", None) in ["metadata_request", "metadata_router"]: return deepcopy(obj) return MetadataRequest(owner=None) # Request method # ============== # This section includes what's needed for the `RequestMethod` descriptor and # the dynamic generation of `set_{method}_request` methods in the `_MetadataRequester` # mixin class. # These strings are used to dynamically generate the docstrings for the methods. REQUESTER_DOC = """ Configure whether metadata should be requested to be \ passed to the ``{method}`` method. Note that this method is only relevant when this estimator is used as a sub-estimator within a :term:`meta-estimator` and metadata routing is enabled with ``enable_metadata_routing=True`` (see :func:`sklearn.set_config`). Please check the :ref:`User Guide <metadata_routing>` on how the routing mechanism works. The options for each parameter are: - ``True``: metadata is requested, and \ passed to ``{method}`` if provided. The request is ignored if \ metadata is not provided. - ``False``: metadata is not requested and the meta-estimator \ will not pass it to ``{method}``. - ``None``: metadata is not requested, and the meta-estimator \ will raise an error if the user provides it. - ``str``: metadata should be passed to the meta-estimator with \ this given alias instead of the original name. The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the existing request. This allows you to change the request for some parameters and not others. .. versionadded:: 1.3 Parameters ---------- """ REQUESTER_DOC_PARAM = """ {metadata} : str, True, False, or None, \ default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for ``{metadata}`` parameter in ``{method}``. """ REQUESTER_DOC_RETURN = """ Returns ------- self : object The updated object. """
MetadataRouter
python
bokeh__bokeh
src/bokeh/application/handlers/code_runner.py
{ "start": 1771, "end": 7886 }
class ____: ''' Compile and run Python source code. ''' _code: CodeType | None _doc: str | None _permanent_error: str | None _permanent_error_detail: str | None _path: PathLike _source: str _argv: list[str] _package: ModuleType | None ran: bool _failed: bool _error: str | None _error_detail: str | None def __init__(self, source: str, path: PathLike, argv: list[str], package: ModuleType | None = None) -> None: ''' Args: source (str) : A string containing Python source code to execute path (str) : A filename to use in any debugging or error output argv (list[str]) : A list of string arguments to make available as ``sys.argv`` when the code executes package (bool) : An optional package module to configure Raises: ValueError, if package is specified for an __init__.py ''' if package and basename(path) == "__init__.py": raise ValueError("__init__.py cannot have package specified") self._permanent_error = None self._permanent_error_detail = None self.reset_run_errors() import ast self._code = None try: nodes = ast.parse(source, os.fspath(path)) self._code = compile(nodes, filename=path, mode='exec', dont_inherit=True) # use a zip to associate code names with values, to then find the contents of the docstring d = dict(zip(self._code.co_names, self._code.co_consts)) self._doc = d.get('__doc__', None) except SyntaxError as e: self._code = None filename = os.path.basename(e.filename) if e.filename is not None else "???" self._permanent_error = f"Invalid syntax in {filename!r} on line {e.lineno or '???'}:\n{e.text or '???'}" self._permanent_error_detail = traceback.format_exc() self._path = path self._source = source self._argv = argv self._package = package self.ran = False # Properties -------------------------------------------------------------- @property def doc(self) -> str | None: ''' Contents of docstring, if code contains one. ''' return self._doc @property def error(self) -> str | None: ''' If code execution fails, may contain a related error message. ''' return self._error if self._permanent_error is None else self._permanent_error @property def error_detail(self) -> str | None: ''' If code execution fails, may contain a traceback or other details. ''' return self._error_detail if self._permanent_error_detail is None else self._permanent_error_detail @property def failed(self) -> bool: ''' ``True`` if code execution failed ''' return self._failed or self._code is None @property def path(self) -> PathLike: ''' The path that new modules will be configured with. ''' return self._path @property def source(self) -> str: ''' The configured source code that will be executed when ``run`` is called. ''' return self._source # Public methods ---------------------------------------------------------- def new_module(self) -> ModuleType | None: ''' Make a fresh module to run in. Returns: Module ''' from types import ModuleType self.reset_run_errors() if self._code is None: return None module_name = 'bokeh_app_' + make_globally_unique_id().replace('-', '') module = ModuleType(module_name) module.__dict__['__file__'] = os.path.abspath(self._path) if self._package: module.__package__ = self._package.__name__ module.__path__ = [os.path.dirname(self._path)] if basename(self.path) == "__init__.py": module.__package__ = module_name module.__path__ = [os.path.dirname(self._path)] return module def reset_run_errors(self) -> None: ''' Clears any transient error conditions from a previous run. Returns None ''' self._failed = False self._error = None self._error_detail = None def run(self, module: ModuleType, post_check: Callable[[], None] | None = None) -> None: ''' Execute the configured source code in a module and run any post checks. Args: module (Module) : A module to execute the configured code in. post_check (callable, optional) : A function that raises an exception if expected post-conditions are not met after code execution. ''' # Simulate the sys.path behaviour described here: # # https://docs.python.org/2/library/sys.html#sys.path _cwd = os.getcwd() _sys_path = list(sys.path) _sys_argv = list(sys.argv) sys.path.insert(0, os.path.dirname(self._path)) sys.argv = [os.path.basename(self._path), *self._argv] # XXX: self._code shouldn't be None at this point but types don't reflect this assert self._code is not None try: exec(self._code, module.__dict__) if post_check: post_check() except Exception as e: handle_exception(self, e) finally: # undo sys.path, CWD fixups os.chdir(_cwd) sys.path = _sys_path sys.argv = _sys_argv self.ran = True #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
CodeRunner
python
great-expectations__great_expectations
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_new_mexico_zip.py
{ "start": 757, "end": 1766 }
class ____(ColumnMapMetricProvider): # This is the id string that will be used to reference your metric. condition_metric_name = "column_values.valid_new_mexico_zip" # This method implements the core logic for the PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): return column.apply(lambda x: is_valid_new_mexico_zip(x)) # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # raise NotImplementedError # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # raise NotImplementedError # This class defines the Expectation itself
ColumnValuesToBeValidNewMexicoZip
python
jazzband__prettytable
tests/test_sorting.py
{ "start": 141, "end": 4594 }
class ____: def test_sort_by_different_per_columns(self, city_data: PrettyTable) -> None: city_data.sortby = city_data.field_names[0] old = city_data.get_string() for field in city_data.field_names[1:]: city_data.sortby = field new = city_data.get_string() assert new != old def test_reverse_sort(self, city_data: PrettyTable) -> None: for field in city_data.field_names: city_data.sortby = field city_data.reversesort = False forward = city_data.get_string() city_data.reversesort = True backward = city_data.get_string() forward_lines = forward.split("\n")[2:] # Discard header lines backward_lines = backward.split("\n")[2:] backward_lines.reverse() assert forward_lines == backward_lines def test_sort_key(self, city_data: PrettyTable) -> None: # Test sorting by length of city name def key(vals: RowType) -> list[int]: vals[0] = len(vals[0]) return vals city_data.sortby = "City name" city_data.sort_key = key assert ( city_data.get_string().strip() == """ +-----------+------+------------+-----------------+ | City name | Area | Population | Annual Rainfall | +-----------+------+------------+-----------------+ | Perth | 5386 | 1554769 | 869.4 | | Darwin | 112 | 120900 | 1714.7 | | Hobart | 1357 | 205556 | 619.5 | | Sydney | 2058 | 4336374 | 1214.8 | | Adelaide | 1295 | 1158259 | 600.5 | | Brisbane | 5905 | 1857594 | 1146.4 | | Melbourne | 1566 | 3806092 | 646.9 | +-----------+------+------------+-----------------+ """.strip() ) def test_sort_key_at_class_declaration(self) -> None: # Test sorting by length of city name def key(vals: RowType) -> list[int]: vals[0] = len(vals[0]) return vals table = PrettyTable( field_names=CITY_DATA_HEADER, sortby="City name", sort_key=key, ) assert table.sort_key == key for row in CITY_DATA: table.add_row(row) assert ( """+-----------+------+------------+-----------------+ | City name | Area | Population | Annual Rainfall | +-----------+------+------------+-----------------+ | Perth | 5386 | 1554769 | 869.4 | | Darwin | 112 | 120900 | 1714.7 | | Hobart | 1357 | 205556 | 619.5 | | Sydney | 2058 | 4336374 | 1214.8 | | Adelaide | 1295 | 1158259 | 600.5 | | Brisbane | 5905 | 1857594 | 1146.4 | | Melbourne | 1566 | 3806092 | 646.9 | +-----------+------+------------+-----------------+""" == table.get_string().strip() ) def test_sort_slice(self) -> None: """Make sure sorting and slicing interact in the expected way""" table = PrettyTable(["Foo"]) for i in range(20, 0, -1): table.add_row([i]) new_style = table.get_string(sortby="Foo", end=10) assert "10" in new_style assert "20" not in new_style oldstyle = table.get_string(sortby="Foo", end=10, oldsortslice=True) assert "10" not in oldstyle assert "20" in oldstyle def test_sortby_at_class_declaration(self) -> None: """ Fix #354 where initialization of a table with sortby fails """ table = PrettyTable( field_names=CITY_DATA_HEADER, sortby="Area", ) assert table.sortby == "Area" for row in CITY_DATA: table.add_row(row) assert ( """+-----------+------+------------+-----------------+ | City name | Area | Population | Annual Rainfall | +-----------+------+------------+-----------------+ | Darwin | 112 | 120900 | 1714.7 | | Adelaide | 1295 | 1158259 | 600.5 | | Hobart | 1357 | 205556 | 619.5 | | Melbourne | 1566 | 3806092 | 646.9 | | Sydney | 2058 | 4336374 | 1214.8 | | Perth | 5386 | 1554769 | 869.4 | | Brisbane | 5905 | 1857594 | 1146.4 | +-----------+------+------------+-----------------+""" == table.get_string().strip() )
TestSorting
python
pennersr__django-allauth
allauth/idp/oidc/internal/oauthlib/server.py
{ "start": 994, "end": 1736 }
class ____(DeviceApplicationServer): def __init__(self): verification_uri = context.request.build_absolute_uri( reverse("idp:oidc:device_authorization") ) super().__init__( request_validator=OAuthLibRequestValidator(), verification_uri=verification_uri, verification_uri_complete=verification_uri + "?code={user_code}", interval=app_settings.DEVICE_CODE_INTERVAL, user_code_generator=lambda: get_adapter().generate_user_code(), ) self._expires_in = app_settings.DEVICE_CODE_EXPIRES_IN def get_server(**kwargs): return OAuthLibServer(**kwargs) def get_device_server(): return DeviceOAuthLibServer()
DeviceOAuthLibServer
python
pytorch__pytorch
test/jit/test_backends.py
{ "start": 27286, "end": 28738 }
class ____(JitBackendTestCase): """ Tests for adding attributes to a model after lowering. """ def setUp(self): super().setUp() # Create Python, JIT and backend versions of BasicModule. self.module = BasicModule() self.scripted_module = torch.jit.script(BasicModule()) self.lowered_module = to_test_backend_multi( self.scripted_module, {"accum": {"": ""}, "sub_accum": {"": ""}, "forward": {"": ""}}, ) def test_attribute(self): input = [(torch.ones(5),)] pre_bundled = self.lowered_module(*input[0]) # Attach bundled inputs which adds several attributes and functions to the model self.lowered_module = ( torch.utils.bundled_inputs.augment_model_with_bundled_inputs( lowered_module, # noqa: F821 input, ) ) post_bundled = self.lowered_module( *self.lowered_module.get_all_bundled_inputs()[0] ) # Save and load the lowered module. self.save_load() # Use bundled after save and load to prove its preserved post_load = self.lowered_module( *self.lowered_module.get_all_bundled_inputs()[0] ) self.assertEqual(pre_bundled, post_bundled) self.assertEqual(post_bundled, post_load) if __name__ == "__main__": raise_on_run_directly("test/test_jit.py")
AddedAttributesTest
python
tornadoweb__tornado
tornado/test/simple_httpclient_test.py
{ "start": 3161, "end": 3270 }
class ____(RequestHandler): def get(self): self.write(self.request.headers["Host"])
HostEchoHandler
python
tensorflow__tensorflow
tensorflow/python/ops/init_ops_v2.py
{ "start": 28209, "end": 29793 }
class ____(VarianceScaling): """The Glorot uniform initializer, also called Xavier uniform initializer. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(6 / (fan_in + fan_out))` where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units in the weight tensor. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.GlorotUniform()) >>> v1 <tf.Variable ... shape=(3, 3) ... >>> v2 <tf.Variable ... shape=(3, 3, 3) ... >>> make_variables(4, tf.initializers.RandomNormal()) (<tf.Variable ... shape=(4, 4) dtype=float32... <tf.Variable ... shape=(4, 4, 4) dtype=float32... Args: seed: A Python integer. Used to create random seeds. See `tf.random.set_seed` for behavior. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)) """ def __init__(self, seed=None): super(GlorotUniform, self).__init__( scale=1.0, mode="fan_avg", distribution="uniform", seed=seed) def get_config(self): return {"seed": self.seed}
GlorotUniform
python
pandas-dev__pandas
pandas/tests/indexes/period/test_indexing.py
{ "start": 18115, "end": 20691 }
class ____: def test_where(self, listlike_box): i = period_range("20130101", periods=5, freq="D") cond = [True] * len(i) expected = i result = i.where(listlike_box(cond)) tm.assert_index_equal(result, expected) cond = [False] + [True] * (len(i) - 1) expected = PeriodIndex([NaT] + i[1:].tolist(), freq="D") result = i.where(listlike_box(cond)) tm.assert_index_equal(result, expected) def test_where_other(self): i = period_range("20130101", periods=5, freq="D") for arr in [np.nan, NaT]: result = i.where(notna(i), other=arr) expected = i tm.assert_index_equal(result, expected) i2 = i.copy() i2 = PeriodIndex([NaT, NaT] + i[2:].tolist(), freq="D") result = i.where(notna(i2), i2) tm.assert_index_equal(result, i2) i2 = i.copy() i2 = PeriodIndex([NaT, NaT] + i[2:].tolist(), freq="D") result = i.where(notna(i2), i2.values) tm.assert_index_equal(result, i2) def test_where_invalid_dtypes(self): pi = period_range("20130101", periods=5, freq="D") tail = pi[2:].tolist() i2 = PeriodIndex([NaT, NaT] + tail, freq="D") mask = notna(i2) result = pi.where(mask, i2.asi8) expected = pd.Index([NaT._value, NaT._value] + tail, dtype=object) assert isinstance(expected[0], int) tm.assert_index_equal(result, expected) tdi = i2.asi8.view("timedelta64[ns]") expected = pd.Index([tdi[0], tdi[1]] + tail, dtype=object) assert isinstance(expected[0], np.timedelta64) result = pi.where(mask, tdi) tm.assert_index_equal(result, expected) dti = i2.to_timestamp("s") expected = pd.Index([dti[0], dti[1]] + tail, dtype=object) assert expected[0] is NaT result = pi.where(mask, dti) tm.assert_index_equal(result, expected) td = Timedelta(days=4) expected = pd.Index([td, td] + tail, dtype=object) assert expected[0] == td result = pi.where(mask, td) tm.assert_index_equal(result, expected) def test_where_mismatched_nat(self): pi = period_range("20130101", periods=5, freq="D") cond = np.array([True, False, True, True, False]) tdnat = np.timedelta64("NaT", "ns") expected = pd.Index([pi[0], tdnat, pi[2], pi[3], tdnat], dtype=object) assert expected[1] is tdnat result = pi.where(cond, tdnat) tm.assert_index_equal(result, expected)
TestWhere
python
doocs__leetcode
solution/3600-3699/3616.Number of Student Replacements/Solution.py
{ "start": 0, "end": 221 }
class ____: def totalReplacements(self, ranks: List[int]) -> int: ans, cur = 0, ranks[0] for x in ranks: if x < cur: cur = x ans += 1 return ans
Solution
python
kamyu104__LeetCode-Solutions
Python/number-of-zigzag-arrays-i.py
{ "start": 60, "end": 530 }
class ____(object): def zigZagArrays(self, n, l, r): """ :type n: int :type l: int :type r: int :rtype: int """ MOD = 10**9+7 r -= l dp = [1]*(r+1) for _ in xrange(n-1): prefix = 0 for i in xrange(len(dp)): dp[i], prefix = prefix, (prefix+dp[i])%MOD dp.reverse() return (reduce(lambda accu, x: (accu+x)%MOD, dp, 0)*2)%MOD
Solution
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/triggers/test_bedrock.py
{ "start": 3047, "end": 4527 }
class ____(TestBaseBedrockTrigger): EXPECTED_WAITER_NAME = "provisioned_model_throughput_complete" PROVISIONED_MODEL_ID = "provisioned_model_id" def test_serialization(self): """Assert that arguments and classpath are correctly serialized.""" trigger = BedrockProvisionModelThroughputCompletedTrigger( provisioned_model_id=self.PROVISIONED_MODEL_ID ) classpath, kwargs = trigger.serialize() assert classpath == BASE_TRIGGER_CLASSPATH + "BedrockProvisionModelThroughputCompletedTrigger" assert kwargs.get("provisioned_model_id") == self.PROVISIONED_MODEL_ID @pytest.mark.asyncio @mock.patch.object(BedrockHook, "get_waiter") @mock.patch.object(BedrockHook, "get_async_conn") async def test_run_success(self, mock_async_conn, mock_get_waiter): mock_async_conn.__aenter__.return_value = mock.MagicMock() mock_get_waiter().wait = AsyncMock() trigger = BedrockProvisionModelThroughputCompletedTrigger( provisioned_model_id=self.PROVISIONED_MODEL_ID ) generator = trigger.run() response = await generator.asend(None) assert response == TriggerEvent( {"status": "success", "provisioned_model_id": self.PROVISIONED_MODEL_ID} ) assert_expected_waiter_type(mock_get_waiter, self.EXPECTED_WAITER_NAME) mock_get_waiter().wait.assert_called_once()
TestBedrockProvisionModelThroughputCompletedTrigger
python
google__python-fire
fire/test_components.py
{ "start": 1630, "end": 1857 }
class ____: """Test class for testing when class has a help= arg.""" def __init__(self, help=True): # pylint: disable=redefined-builtin self.has_help = help self.dictionary = {'__help': 'help in a dict'}
WithHelpArg
python
streamlit__streamlit
lib/tests/streamlit/runtime/state/test_presentation.py
{ "start": 1059, "end": 1163 }
class ____: def __init__(self) -> None: self.widget_metadata: dict[str, Any] = {}
_FakeWStates
python
openai__openai-python
src/openai/types/beta/file_search_tool.py
{ "start": 261, "end": 626 }
class ____(BaseModel): score_threshold: float """The score threshold for the file search. All values must be a floating point number between 0 and 1. """ ranker: Optional[Literal["auto", "default_2024_08_21"]] = None """The ranker to use for the file search. If not specified will use the `auto` ranker. """
FileSearchRankingOptions
python
walkccc__LeetCode
solutions/3091. Apply Operations to Make Sum of Array Greater Than or Equal to k/3091.py
{ "start": 0, "end": 574 }
class ____: def minOperations(self, k: int) -> int: # The required operations are # 1. Increase `1` to `x` # 2. Duplicate `x`, `y` times, to `sum` s.t. x * (1 + y) >= k. # The number of operations used would be (x - 1) + y. Equivalently, the # problem can be rephrased as finding min(x - 1 + y) s.t. x * (1 + y) >= k. # Optimally, `x` should equal to `1 + y`, implying that x^2 >= k, and # hence, x >= sqrt(k) and y = ceil(k / x) - 1. x = math.ceil(math.sqrt(k)) y = (k - 1) // x + 1 - 1 # ceil(k / x) - 1 return x - 1 + y
Solution
python
celery__celery
celery/worker/control.py
{ "start": 963, "end": 19921 }
class ____(UserDict): """Global registry of remote control commands.""" data = {} # global dict. meta = {} # -"- @classmethod def register(cls, *args, **kwargs): if args: return cls._register(**kwargs)(*args) return cls._register(**kwargs) @classmethod def _register(cls, name=None, alias=None, type='control', visible=True, default_timeout=1.0, help=None, signature=None, args=None, variadic=None): def _inner(fun): control_name = name or fun.__name__ _help = help or (fun.__doc__ or '').strip().split('\n')[0] cls.data[control_name] = fun cls.meta[control_name] = controller_info_t( alias, type, visible, default_timeout, _help, signature, args, variadic) if alias: cls.data[alias] = fun return fun return _inner def control_command(**kwargs): return Panel.register(type='control', **kwargs) def inspect_command(**kwargs): return Panel.register(type='inspect', **kwargs) # -- App @inspect_command() def report(state): """Information about Celery installation for bug reports.""" return ok(state.app.bugreport()) @inspect_command( alias='dump_conf', # XXX < backwards compatible signature='[include_defaults=False]', args=[('with_defaults', strtobool)], ) def conf(state, with_defaults=False, **kwargs): """List configuration.""" return jsonify(state.app.conf.table(with_defaults=with_defaults), keyfilter=_wanted_config_key, unknown_type_filter=safe_repr) def _wanted_config_key(key): return isinstance(key, str) and not key.startswith('__') # -- Task @inspect_command( variadic='ids', signature='[id1 [id2 [... [idN]]]]', ) def query_task(state, ids, **kwargs): """Query for task information by id.""" return { req.id: (_state_of_task(req), req.info()) for req in _find_requests_by_id(maybe_list(ids)) } def _find_requests_by_id(ids, get_request=worker_state.requests.__getitem__): for task_id in ids: try: yield get_request(task_id) except KeyError: pass def _state_of_task(request, is_active=worker_state.active_requests.__contains__, is_reserved=worker_state.reserved_requests.__contains__): if is_active(request): return 'active' elif is_reserved(request): return 'reserved' return 'ready' @control_command( variadic='task_id', signature='[id1 [id2 [... [idN]]]]', ) def revoke(state, task_id, terminate=False, signal=None, **kwargs): """Revoke task by task id (or list of ids). Keyword Arguments: terminate (bool): Also terminate the process if the task is active. signal (str): Name of signal to use for terminate (e.g., ``KILL``). """ # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. # supports list argument since 3.1 task_ids, task_id = set(maybe_list(task_id) or []), None task_ids = _revoke(state, task_ids, terminate, signal, **kwargs) if isinstance(task_ids, dict) and 'ok' in task_ids: return task_ids return ok(f'tasks {task_ids} flagged as revoked') @control_command( variadic='headers', signature='[key1=value1 [key2=value2 [... [keyN=valueN]]]]', ) def revoke_by_stamped_headers(state, headers, terminate=False, signal=None, **kwargs): """Revoke task by header (or list of headers). Keyword Arguments: headers(dictionary): Dictionary that contains stamping scheme name as keys and stamps as values. If headers is a list, it will be converted to a dictionary. terminate (bool): Also terminate the process if the task is active. signal (str): Name of signal to use for terminate (e.g., ``KILL``). Sample headers input: {'mtask_id': [id1, id2, id3]} """ # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. # supports list argument since 3.1 signum = _signals.signum(signal or TERM_SIGNAME) if isinstance(headers, list): headers = {h.split('=')[0]: h.split('=')[1] for h in headers} for header, stamps in headers.items(): updated_stamps = maybe_list(worker_state.revoked_stamps.get(header) or []) + list(maybe_list(stamps)) worker_state.revoked_stamps[header] = updated_stamps if not terminate: return ok(f'headers {headers} flagged as revoked, but not terminated') active_requests = list(worker_state.active_requests) terminated_scheme_to_stamps_mapping = defaultdict(set) # Terminate all running tasks of matching headers # Go through all active requests, and check if one of the # requests has a stamped header that matches the given headers to revoke for req in active_requests: # Check stamps exist if hasattr(req, "stamps") and req.stamps: # if so, check if any stamps match a revoked stamp for expected_header_key, expected_header_value in headers.items(): if expected_header_key in req.stamps: expected_header_value = maybe_list(expected_header_value) actual_header = maybe_list(req.stamps[expected_header_key]) matching_stamps_for_request = set(actual_header) & set(expected_header_value) # Check any possible match regardless if the stamps are a sequence or not if matching_stamps_for_request: terminated_scheme_to_stamps_mapping[expected_header_key].update(matching_stamps_for_request) req.terminate(state.consumer.pool, signal=signum) if not terminated_scheme_to_stamps_mapping: return ok(f'headers {headers} were not terminated') return ok(f'headers {terminated_scheme_to_stamps_mapping} revoked') def _revoke(state, task_ids, terminate=False, signal=None, **kwargs): size = len(task_ids) terminated = set() worker_state.revoked.update(task_ids) if terminate: signum = _signals.signum(signal or TERM_SIGNAME) for request in _find_requests_by_id(task_ids): if request.id not in terminated: terminated.add(request.id) logger.info('Terminating %s (%s)', request.id, signum) request.terminate(state.consumer.pool, signal=signum) if len(terminated) >= size: break if not terminated: return ok('terminate: tasks unknown') return ok('terminate: {}'.format(', '.join(terminated))) idstr = ', '.join(task_ids) logger.info('Tasks flagged as revoked: %s', idstr) return task_ids @control_command( variadic='task_id', args=[('signal', str)], signature='<signal> [id1 [id2 [... [idN]]]]' ) def terminate(state, signal, task_id, **kwargs): """Terminate task by task id (or list of ids).""" return revoke(state, task_id, terminate=True, signal=signal) @control_command( args=[('task_name', str), ('rate_limit', str)], signature='<task_name> <rate_limit (e.g., 5/s | 5/m | 5/h)>', ) def rate_limit(state, task_name, rate_limit, **kwargs): """Tell worker(s) to modify the rate limit for a task by type. See Also: :attr:`celery.app.task.Task.rate_limit`. Arguments: task_name (str): Type of task to set rate limit for. rate_limit (int, str): New rate limit. """ # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. try: rate(rate_limit) except ValueError as exc: return nok(f'Invalid rate limit string: {exc!r}') try: state.app.tasks[task_name].rate_limit = rate_limit except KeyError: logger.error('Rate limit attempt for unknown task %s', task_name, exc_info=True) return nok('unknown task') state.consumer.reset_rate_limits() if not rate_limit: logger.info('Rate limits disabled for tasks of type %s', task_name) return ok('rate limit disabled successfully') logger.info('New rate limit for tasks of type %s: %s.', task_name, rate_limit) return ok('new rate limit set successfully') @control_command( args=[('task_name', str), ('soft', float), ('hard', float)], signature='<task_name> <soft_secs> [hard_secs]', ) def time_limit(state, task_name=None, hard=None, soft=None, **kwargs): """Tell worker(s) to modify the time limit for task by type. Arguments: task_name (str): Name of task to change. hard (float): Hard time limit. soft (float): Soft time limit. """ try: task = state.app.tasks[task_name] except KeyError: logger.error('Change time limit attempt for unknown task %s', task_name, exc_info=True) return nok('unknown task') task.soft_time_limit = soft task.time_limit = hard logger.info('New time limits for tasks of type %s: soft=%s hard=%s', task_name, soft, hard) return ok('time limits set successfully') # -- Events @inspect_command() def clock(state, **kwargs): """Get current logical clock value.""" return {'clock': state.app.clock.value} @control_command() def election(state, id, topic, action=None, **kwargs): """Hold election. Arguments: id (str): Unique election id. topic (str): Election topic. action (str): Action to take for elected actor. """ if state.consumer.gossip: state.consumer.gossip.election(id, topic, action) @control_command() def enable_events(state): """Tell worker(s) to send task-related events.""" dispatcher = state.consumer.event_dispatcher if dispatcher.groups and 'task' not in dispatcher.groups: dispatcher.groups.add('task') logger.info('Events of group {task} enabled by remote.') return ok('task events enabled') return ok('task events already enabled') @control_command() def disable_events(state): """Tell worker(s) to stop sending task-related events.""" dispatcher = state.consumer.event_dispatcher if 'task' in dispatcher.groups: dispatcher.groups.discard('task') logger.info('Events of group {task} disabled by remote.') return ok('task events disabled') return ok('task events already disabled') @control_command() def heartbeat(state): """Tell worker(s) to send event heartbeat immediately.""" logger.debug('Heartbeat requested by remote.') dispatcher = state.consumer.event_dispatcher dispatcher.send('worker-heartbeat', freq=5, **worker_state.SOFTWARE_INFO) # -- Worker @inspect_command(visible=False) def hello(state, from_node, revoked=None, **kwargs): """Request mingle sync-data.""" # pylint: disable=redefined-outer-name # XXX Note that this redefines `revoked`: # Outside of this scope that is a function. if from_node != state.hostname: logger.info('sync with %s', from_node) if revoked: worker_state.revoked.update(revoked) # Do not send expired items to the other worker. worker_state.revoked.purge() return { 'revoked': worker_state.revoked._data, 'clock': state.app.clock.forward(), } @inspect_command(default_timeout=0.2) def ping(state, **kwargs): """Ping worker(s).""" return ok('pong') @inspect_command() def stats(state, **kwargs): """Request worker statistics/information.""" return state.consumer.controller.stats() @inspect_command(alias='dump_schedule') def scheduled(state, **kwargs): """List of currently scheduled ETA/countdown tasks.""" return list(_iter_schedule_requests(state.consumer.timer)) def _iter_schedule_requests(timer): for waiting in timer.schedule.queue: try: arg0 = waiting.entry.args[0] except (IndexError, TypeError): continue else: if isinstance(arg0, Request): yield { 'eta': arg0.eta.isoformat() if arg0.eta else None, 'priority': waiting.priority, 'request': arg0.info(), } @inspect_command(alias='dump_reserved') def reserved(state, **kwargs): """List of currently reserved tasks, not including scheduled/active.""" reserved_tasks = ( state.tset(worker_state.reserved_requests) - state.tset(worker_state.active_requests) ) if not reserved_tasks: return [] return [request.info() for request in reserved_tasks] @inspect_command(alias='dump_active') def active(state, safe=False, **kwargs): """List of tasks currently being executed.""" return [request.info(safe=safe) for request in state.tset(worker_state.active_requests)] @inspect_command(alias='dump_revoked') def revoked(state, **kwargs): """List of revoked task-ids.""" return list(worker_state.revoked) @inspect_command( alias='dump_tasks', variadic='taskinfoitems', signature='[attr1 [attr2 [... [attrN]]]]', ) def registered(state, taskinfoitems=None, builtins=False, **kwargs): """List of registered tasks. Arguments: taskinfoitems (Sequence[str]): List of task attributes to include. Defaults to ``exchange,routing_key,rate_limit``. builtins (bool): Also include built-in tasks. """ reg = state.app.tasks taskinfoitems = taskinfoitems or DEFAULT_TASK_INFO_ITEMS tasks = reg if builtins else ( task for task in reg if not task.startswith('celery.')) def _extract_info(task): fields = { field: str(getattr(task, field, None)) for field in taskinfoitems if getattr(task, field, None) is not None } if fields: info = ['='.join(f) for f in fields.items()] return '{} [{}]'.format(task.name, ' '.join(info)) return task.name return [_extract_info(reg[task]) for task in sorted(tasks)] # -- Debugging @inspect_command( default_timeout=60.0, args=[('type', str), ('num', int), ('max_depth', int)], signature='[object_type=Request] [num=200 [max_depth=10]]', ) def objgraph(state, num=200, max_depth=10, type='Request'): # pragma: no cover """Create graph of uncollected objects (memory-leak debugging). Arguments: num (int): Max number of objects to graph. max_depth (int): Traverse at most n levels deep. type (str): Name of object to graph. Default is ``"Request"``. """ try: import objgraph as _objgraph except ImportError: raise ImportError('Requires the objgraph library') logger.info('Dumping graph for type %r', type) with tempfile.NamedTemporaryFile(prefix='cobjg', suffix='.png', delete=False) as fh: objects = _objgraph.by_type(type)[:num] _objgraph.show_backrefs( objects, max_depth=max_depth, highlight=lambda v: v in objects, filename=fh.name, ) return {'filename': fh.name} @inspect_command() def memsample(state, **kwargs): """Sample current RSS memory usage.""" from celery.utils.debug import sample_mem return sample_mem() @inspect_command( args=[('samples', int)], signature='[n_samples=10]', ) def memdump(state, samples=10, **kwargs): # pragma: no cover """Dump statistics of previous memsample requests.""" from celery.utils import debug out = io.StringIO() debug.memdump(file=out) return out.getvalue() # -- Pool @control_command( args=[('n', int)], signature='[N=1]', ) def pool_grow(state, n=1, **kwargs): """Grow pool by n processes/threads.""" if state.consumer.controller.autoscaler: return nok("pool_grow is not supported with autoscale. Adjust autoscale range instead.") else: state.consumer.pool.grow(n) state.consumer._update_prefetch_count(n) return ok('pool will grow') @control_command( args=[('n', int)], signature='[N=1]', ) def pool_shrink(state, n=1, **kwargs): """Shrink pool by n processes/threads.""" if state.consumer.controller.autoscaler: return nok("pool_shrink is not supported with autoscale. Adjust autoscale range instead.") else: state.consumer.pool.shrink(n) state.consumer._update_prefetch_count(-n) return ok('pool will shrink') @control_command() def pool_restart(state, modules=None, reload=False, reloader=None, **kwargs): """Restart execution pool.""" if state.app.conf.worker_pool_restarts: state.consumer.controller.reload(modules, reload, reloader=reloader) return ok('reload started') else: raise ValueError('Pool restarts not enabled') @control_command( args=[('max', int), ('min', int)], signature='[max [min]]', ) def autoscale(state, max=None, min=None): """Modify autoscale settings.""" autoscaler = state.consumer.controller.autoscaler if autoscaler: max_, min_ = autoscaler.update(max, min) return ok(f'autoscale now max={max_} min={min_}') raise ValueError('Autoscale not enabled') @control_command() def shutdown(state, msg='Got shutdown from remote', **kwargs): """Shutdown worker(s).""" logger.warning(msg) raise WorkerShutdown(EX_OK) # -- Queues @control_command( args=[ ('queue', str), ('exchange', str), ('exchange_type', str), ('routing_key', str), ], signature='<queue> [exchange [type [routing_key]]]', ) def add_consumer(state, queue, exchange=None, exchange_type=None, routing_key=None, **options): """Tell worker(s) to consume from task queue by name.""" state.consumer.call_soon( state.consumer.add_task_queue, queue, exchange, exchange_type or 'direct', routing_key, **options) return ok(f'add consumer {queue}') @control_command( args=[('queue', str)], signature='<queue>', ) def cancel_consumer(state, queue, **_): """Tell worker(s) to stop consuming from task queue by name.""" state.consumer.call_soon( state.consumer.cancel_task_queue, queue, ) return ok(f'no longer consuming from {queue}') @inspect_command() def active_queues(state): """List the task queues a worker is currently consuming from.""" if state.consumer.task_consumer: return [dict(queue.as_dict(recurse=True)) for queue in state.consumer.task_consumer.queues] return []
Panel
python
airbytehq__airbyte
airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/tests/test_core.py
{ "start": 78555, "end": 88771 }
class ____(BaseTest): MANDATORY_FOR_TEST_STRICTNESS_LEVELS = [] # Used so that this is not part of the mandatory high strictness test suite yet PREREQUISITES = "Prerequisites" HEADING = "heading" CREDENTIALS_KEYWORDS = ["account", "auth", "credentials", "access"] CONNECTOR_SPECIFIC_HEADINGS = "<Connector-specific features>" @pytest.fixture(name="operational_certification_test") async def operational_certification_test_fixture(self, is_connector_certified: bool) -> bool: """ Fixture that is used to skip a test that is reserved only for connectors that are supposed to be tested against operational certification criteria """ if not is_connector_certified: pytest.skip("Skipping testing source connector documentation due to low ql.") return True def _get_template_headings(self, connector_name: str) -> tuple[tuple[str], tuple[str]]: """ https://hackmd.io/Bz75cgATSbm7DjrAqgl4rw - standard template Headings in order to docs structure. """ all_headings = ( connector_name, "Prerequisites", "Setup guide", f"Set up {connector_name}", "For Airbyte Cloud:", "For Airbyte Open Source:", f"Set up the {connector_name} connector in Airbyte", "For Airbyte Cloud:", "For Airbyte Open Source:", "Supported sync modes", "Supported Streams", self.CONNECTOR_SPECIFIC_HEADINGS, "Performance considerations", "Data type map", "Troubleshooting", "Tutorials", "Changelog", ) not_required_heading = ( f"Set up the {connector_name} connector in Airbyte", "For Airbyte Cloud:", "For Airbyte Open Source:", self.CONNECTOR_SPECIFIC_HEADINGS, "Performance considerations", "Data type map", "Troubleshooting", "Tutorials", ) return all_headings, not_required_heading def _headings_description(self, connector_name: str) -> dict[str:Path]: """ Headings with path to file with template description """ descriptions_paths = { connector_name: Path(__file__).parent / "doc_templates/source.txt", "For Airbyte Cloud:": Path(__file__).parent / "doc_templates/for_airbyte_cloud.txt", "For Airbyte Open Source:": Path(__file__).parent / "doc_templates/for_airbyte_open_source.txt", "Supported sync modes": Path(__file__).parent / "doc_templates/supported_sync_modes.txt", "Tutorials": Path(__file__).parent / "doc_templates/tutorials.txt", } return descriptions_paths def test_prerequisites_content( self, operational_certification_test, actual_connector_spec: ConnectorSpecification, connector_documentation: str, docs_path: str ): node = docs_utils.documentation_node(connector_documentation) header_line_map = {docs_utils.header_name(n): n.map[1] for n in node if n.type == self.HEADING} headings = tuple(header_line_map.keys()) if not header_line_map.get(self.PREREQUISITES): pytest.fail(f"Documentation does not have {self.PREREQUISITES} section.") prereq_start_line = header_line_map[self.PREREQUISITES] prereq_end_line = docs_utils.description_end_line_index(self.PREREQUISITES, headings, header_line_map) with open(docs_path, "r") as docs_file: prereq_content_lines = docs_file.readlines()[prereq_start_line:prereq_end_line] # adding real character to avoid accidentally joining lines into a wanted title. prereq_content = "|".join(prereq_content_lines).lower() required_titles, has_credentials = docs_utils.required_titles_from_spec(actual_connector_spec.connectionSpecification) for title in required_titles: assert title in prereq_content, ( f"Required '{title}' field is not in {self.PREREQUISITES} section " f"or title in spec doesn't match name in the docs." ) if has_credentials: # credentials has specific check for keywords as we have a lot of way how to describe this step credentials_validation = [k in prereq_content for k in self.CREDENTIALS_KEYWORDS] assert True in credentials_validation, f"Required 'credentials' field is not in {self.PREREQUISITES} section." def test_docs_structure(self, operational_certification_test, connector_documentation: str, connector_metadata: dict): """ test_docs_structure gets all top-level headers from source documentation file and check that the order is correct. The order of the headers should follow our standard template https://hackmd.io/Bz75cgATSbm7DjrAqgl4rw. _get_template_headings returns tuple of headers as in standard template and non-required headers that might nor be in the source docs. CONNECTOR_SPECIFIC_HEADINGS value in list of required headers that shows a place where should be a connector specific headers, which can be skipped as out of standard template and depend of connector. """ heading_names = docs_utils.prepare_headers(connector_documentation) template_headings, non_required_heading = self._get_template_headings(connector_metadata["data"]["name"]) heading_names_len, template_headings_len = len(heading_names), len(template_headings) heading_names_index, template_headings_index = 0, 0 while heading_names_index < heading_names_len and template_headings_index < template_headings_len: heading_names_value = heading_names[heading_names_index] template_headings_value = template_headings[template_headings_index] # check that template header is specific for connector and actual header should not be validated if template_headings_value == self.CONNECTOR_SPECIFIC_HEADINGS: # check that actual header is not in required headers, as required headers should be on a right place and order if heading_names_value not in template_headings: heading_names_index += 1 # go to the next actual header as CONNECTOR_SPECIFIC_HEADINGS can be more than one continue else: # if actual header is required go to the next template header to validate actual header order template_headings_index += 1 continue # strict check that actual header equals template header if heading_names_value == template_headings_value: # found expected header, go to the next header in template and actual headers heading_names_index += 1 template_headings_index += 1 continue # actual header != template header means that template value is not required and can be skipped if template_headings_value in non_required_heading: # found non-required header, go to the next template header to validate actual header template_headings_index += 1 continue # any check is True, indexes didn't move to the next step pytest.fail(docs_utils.reason_titles_not_match(heading_names_value, template_headings_value, template_headings)) # indexes didn't move to the last required one, so some headers are missed if template_headings_index != template_headings_len: pytest.fail(docs_utils.reason_missing_titles(template_headings_index, template_headings)) def test_docs_descriptions( self, operational_certification_test, docs_path: str, connector_documentation: str, connector_metadata: dict ): connector_name = connector_metadata["data"]["name"] template_descriptions = self._headings_description(connector_name) node = docs_utils.documentation_node(connector_documentation) header_line_map = {docs_utils.header_name(n): n.map[1] for n in node if n.type == self.HEADING} actual_headings = tuple(header_line_map.keys()) for heading, description in template_descriptions.items(): if heading in actual_headings: description_start_line = header_line_map[heading] description_end_line = docs_utils.description_end_line_index(heading, actual_headings, header_line_map) with open(docs_path, "r") as docs_file, open(description, "r") as template_file: docs_description_content = docs_file.readlines()[description_start_line:description_end_line] template_description_content = template_file.readlines() for d, t in zip(docs_description_content, template_description_content): d, t = docs_utils.prepare_lines_to_compare(connector_name, d, t) assert d == t, f"Description for '{heading}' does not follow structure.\nExpected: {t} Actual: {d}" def test_validate_links(self, operational_certification_test, connector_documentation: str): valid_status_codes = [200, 403, 401, 405] # we skip 4xx due to needed access links = re.findall("(https?://[^\s)]+)", connector_documentation) invalid_links = [] threads = [] def validate_docs_links(docs_link): response = requests.get(docs_link) if response.status_code not in valid_status_codes: invalid_links.append(docs_link) for link in links: process = Thread(target=validate_docs_links, args=[link]) process.start() threads.append(process) for process in threads: process.join(timeout=30) # 30s timeout for process else link will be skipped process.is_alive() assert not invalid_links, f"{len(invalid_links)} invalid links were found in the connector documentation: {invalid_links}."
TestConnectorDocumentation
python
euske__pdfminer
pdfminer/layout.py
{ "start": 4340, "end": 5107 }
class ____(LTComponent): def __init__(self, name, stream, bbox): LTComponent.__init__(self, bbox) self.name = name self.stream = stream self.srcsize = (stream.get_any(('W', 'Width')), stream.get_any(('H', 'Height'))) self.imagemask = stream.get_any(('IM', 'ImageMask')) self.bits = stream.get_any(('BPC', 'BitsPerComponent'), 1) self.colorspace = stream.get_any(('CS', 'ColorSpace')) if not isinstance(self.colorspace, list): self.colorspace = [self.colorspace] return def __repr__(self): return ('<%s(%s) %s %r>' % (self.__class__.__name__, self.name, bbox2str(self.bbox), self.srcsize)) ## LTAnno ##
LTImage
python
spack__spack
lib/spack/spack/vendor/jinja2/ext.py
{ "start": 1450, "end": 8361 }
class ____: """Extensions can be used to add extra functionality to the Jinja template system at the parser level. Custom extensions are bound to an environment but may not store environment specific data on `self`. The reason for this is that an extension can be bound to another environment (for overlays) by creating a copy and reassigning the `environment` attribute. As extensions are created by the environment they cannot accept any arguments for configuration. One may want to work around that by using a factory function, but that is not possible as extensions are identified by their import name. The correct way to configure the extension is storing the configuration values on the environment. Because this way the environment ends up acting as central configuration storage the attributes may clash which is why extensions have to ensure that the names they choose for configuration are not too generic. ``prefix`` for example is a terrible name, ``fragment_cache_prefix`` on the other hand is a good name as includes the name of the extension (fragment cache). """ identifier: t.ClassVar[str] def __init_subclass__(cls) -> None: cls.identifier = f"{cls.__module__}.{cls.__name__}" #: if this extension parses this is the list of tags it's listening to. tags: t.Set[str] = set() #: the priority of that extension. This is especially useful for #: extensions that preprocess values. A lower value means higher #: priority. #: #: .. versionadded:: 2.4 priority = 100 def __init__(self, environment: Environment) -> None: self.environment = environment def bind(self, environment: Environment) -> "Extension": """Create a copy of this extension bound to another environment.""" rv = t.cast(Extension, object.__new__(self.__class__)) rv.__dict__.update(self.__dict__) rv.environment = environment return rv def preprocess( self, source: str, name: t.Optional[str], filename: t.Optional[str] = None ) -> str: """This method is called before the actual lexing and can be used to preprocess the source. The `filename` is optional. The return value must be the preprocessed source. """ return source def filter_stream( self, stream: "TokenStream" ) -> t.Union["TokenStream", t.Iterable["Token"]]: """It's passed a :class:`~spack.vendor.jinja2.lexer.TokenStream` that can be used to filter tokens returned. This method has to return an iterable of :class:`~spack.vendor.jinja2.lexer.Token`\\s, but it doesn't have to return a :class:`~spack.vendor.jinja2.lexer.TokenStream`. """ return stream def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]: """If any of the :attr:`tags` matched this method is called with the parser as first argument. The token the parser stream is pointing at is the name token that matched. This method has to return one or a list of multiple nodes. """ raise NotImplementedError() def attr( self, name: str, lineno: t.Optional[int] = None ) -> nodes.ExtensionAttribute: """Return an attribute node for the current extension. This is useful to pass constants on extensions to generated template code. :: self.attr('_my_attribute', lineno=lineno) """ return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno) def call_method( self, name: str, args: t.Optional[t.List[nodes.Expr]] = None, kwargs: t.Optional[t.List[nodes.Keyword]] = None, dyn_args: t.Optional[nodes.Expr] = None, dyn_kwargs: t.Optional[nodes.Expr] = None, lineno: t.Optional[int] = None, ) -> nodes.Call: """Call a method of the extension. This is a shortcut for :meth:`attr` + :class:`spack.vendor.jinja2.nodes.Call`. """ if args is None: args = [] if kwargs is None: kwargs = [] return nodes.Call( self.attr(name, lineno=lineno), args, kwargs, dyn_args, dyn_kwargs, lineno=lineno, ) @pass_context def _gettext_alias( __context: Context, *args: t.Any, **kwargs: t.Any ) -> t.Union[t.Any, Undefined]: return __context.call(__context.resolve("gettext"), *args, **kwargs) def _make_new_gettext(func: t.Callable[[str], str]) -> t.Callable[..., str]: @pass_context def gettext(__context: Context, __string: str, **variables: t.Any) -> str: rv = __context.call(func, __string) if __context.eval_ctx.autoescape: rv = Markup(rv) # Always treat as a format string, even if there are no # variables. This makes translation strings more consistent # and predictable. This requires escaping return rv % variables # type: ignore return gettext def _make_new_ngettext(func: t.Callable[[str, str, int], str]) -> t.Callable[..., str]: @pass_context def ngettext( __context: Context, __singular: str, __plural: str, __num: int, **variables: t.Any, ) -> str: variables.setdefault("num", __num) rv = __context.call(func, __singular, __plural, __num) if __context.eval_ctx.autoescape: rv = Markup(rv) # Always treat as a format string, see gettext comment above. return rv % variables # type: ignore return ngettext def _make_new_pgettext(func: t.Callable[[str, str], str]) -> t.Callable[..., str]: @pass_context def pgettext( __context: Context, __string_ctx: str, __string: str, **variables: t.Any ) -> str: variables.setdefault("context", __string_ctx) rv = __context.call(func, __string_ctx, __string) if __context.eval_ctx.autoescape: rv = Markup(rv) # Always treat as a format string, see gettext comment above. return rv % variables # type: ignore return pgettext def _make_new_npgettext( func: t.Callable[[str, str, str, int], str] ) -> t.Callable[..., str]: @pass_context def npgettext( __context: Context, __string_ctx: str, __singular: str, __plural: str, __num: int, **variables: t.Any, ) -> str: variables.setdefault("context", __string_ctx) variables.setdefault("num", __num) rv = __context.call(func, __string_ctx, __singular, __plural, __num) if __context.eval_ctx.autoescape: rv = Markup(rv) # Always treat as a format string, see gettext comment above. return rv % variables # type: ignore return npgettext
Extension
python
Pylons__pyramid
tests/test_authorization.py
{ "start": 55, "end": 9555 }
class ____(unittest.TestCase): def setUp(self): cleanUp() def tearDown(self): cleanUp() def _getTargetClass(self): from pyramid.authorization import ACLAuthorizationPolicy return ACLAuthorizationPolicy def _makeOne(self): return self._getTargetClass()() def test_class_implements_IAuthorizationPolicy(self): from zope.interface.verify import verifyClass from pyramid.interfaces import IAuthorizationPolicy verifyClass(IAuthorizationPolicy, self._getTargetClass()) def test_instance_implements_IAuthorizationPolicy(self): from zope.interface.verify import verifyObject from pyramid.interfaces import IAuthorizationPolicy verifyObject(IAuthorizationPolicy, self._makeOne()) def test_permits_no_acl(self): context = DummyContext() policy = self._makeOne() self.assertEqual(policy.permits(context, [], 'view'), False) def test_permits(self): from pyramid.authorization import ( ALL_PERMISSIONS, DENY_ALL, Allow, Authenticated, Deny, Everyone, ) root = DummyContext() community = DummyContext(__name__='community', __parent__=root) blog = DummyContext(__name__='blog', __parent__=community) root.__acl__ = [(Allow, Authenticated, VIEW)] community.__acl__ = [ (Allow, 'fred', ALL_PERMISSIONS), (Allow, 'wilma', VIEW), DENY_ALL, ] blog.__acl__ = [ (Allow, 'barney', MEMBER_PERMS), (Allow, 'wilma', VIEW), ] policy = self._makeOne() result = policy.permits( blog, [Everyone, Authenticated, 'wilma'], 'view' ) self.assertEqual(result, True) self.assertEqual(result.context, blog) self.assertEqual(result.ace, (Allow, 'wilma', VIEW)) self.assertEqual(result.acl, blog.__acl__) result = policy.permits( blog, [Everyone, Authenticated, 'wilma'], 'delete' ) self.assertEqual(result, False) self.assertEqual(result.context, community) self.assertEqual(result.ace, (Deny, Everyone, ALL_PERMISSIONS)) self.assertEqual(result.acl, community.__acl__) result = policy.permits( blog, [Everyone, Authenticated, 'fred'], 'view' ) self.assertEqual(result, True) self.assertEqual(result.context, community) self.assertEqual(result.ace, (Allow, 'fred', ALL_PERMISSIONS)) result = policy.permits( blog, [Everyone, Authenticated, 'fred'], 'doesntevenexistyet' ) self.assertEqual(result, True) self.assertEqual(result.context, community) self.assertEqual(result.ace, (Allow, 'fred', ALL_PERMISSIONS)) self.assertEqual(result.acl, community.__acl__) result = policy.permits( blog, [Everyone, Authenticated, 'barney'], 'view' ) self.assertEqual(result, True) self.assertEqual(result.context, blog) self.assertEqual(result.ace, (Allow, 'barney', MEMBER_PERMS)) result = policy.permits( blog, [Everyone, Authenticated, 'barney'], 'administer' ) self.assertEqual(result, False) self.assertEqual(result.context, community) self.assertEqual(result.ace, (Deny, Everyone, ALL_PERMISSIONS)) self.assertEqual(result.acl, community.__acl__) result = policy.permits( root, [Everyone, Authenticated, 'someguy'], 'view' ) self.assertEqual(result, True) self.assertEqual(result.context, root) self.assertEqual(result.ace, (Allow, Authenticated, VIEW)) result = policy.permits( blog, [Everyone, Authenticated, 'someguy'], 'view' ) self.assertEqual(result, False) self.assertEqual(result.context, community) self.assertEqual(result.ace, (Deny, Everyone, ALL_PERMISSIONS)) self.assertEqual(result.acl, community.__acl__) result = policy.permits(root, [Everyone], 'view') self.assertEqual(result, False) self.assertEqual(result.context, root) self.assertEqual(result.ace, '<default deny>') self.assertEqual(result.acl, root.__acl__) context = DummyContext() result = policy.permits(context, [Everyone], 'view') self.assertEqual(result, False) self.assertEqual(result.ace, '<default deny>') self.assertEqual( result.acl, '<No ACL found on any object in resource lineage>' ) def test_permits_string_permissions_in_acl(self): from pyramid.authorization import Allow root = DummyContext() root.__acl__ = [(Allow, 'wilma', 'view_stuff')] policy = self._makeOne() result = policy.permits(root, ['wilma'], 'view') # would be True if matching against 'view_stuff' instead of against # ['view_stuff'] self.assertEqual(result, False) def test_principals_allowed_by_permission_direct(self): from pyramid.authorization import DENY_ALL, Allow context = DummyContext() acl = [ (Allow, 'chrism', ('read', 'write')), DENY_ALL, (Allow, 'other', 'read'), ] context.__acl__ = acl policy = self._makeOne() result = sorted( policy.principals_allowed_by_permission(context, 'read') ) self.assertEqual(result, ['chrism']) def test_principals_allowed_by_permission_callable_acl(self): from pyramid.authorization import DENY_ALL, Allow context = DummyContext() acl = lambda: [ (Allow, 'chrism', ('read', 'write')), DENY_ALL, (Allow, 'other', 'read'), ] context.__acl__ = acl policy = self._makeOne() result = sorted( policy.principals_allowed_by_permission(context, 'read') ) self.assertEqual(result, ['chrism']) def test_principals_allowed_by_permission_string_permission(self): from pyramid.authorization import Allow context = DummyContext() acl = [(Allow, 'chrism', 'read_it')] context.__acl__ = acl policy = self._makeOne() result = policy.principals_allowed_by_permission(context, 'read') # would be ['chrism'] if 'read' were compared against 'read_it' instead # of against ['read_it'] self.assertEqual(list(result), []) def test_principals_allowed_by_permission(self): from pyramid.authorization import ( ALL_PERMISSIONS, DENY_ALL, Allow, Deny, ) root = DummyContext(__name__='', __parent__=None) community = DummyContext(__name__='community', __parent__=root) blog = DummyContext(__name__='blog', __parent__=community) root.__acl__ = [ (Allow, 'chrism', ('read', 'write')), (Allow, 'other', ('read',)), (Allow, 'jim', ALL_PERMISSIONS), ] community.__acl__ = [ (Deny, 'flooz', 'read'), (Allow, 'flooz', 'read'), (Allow, 'mork', 'read'), (Deny, 'jim', 'read'), (Allow, 'someguy', 'manage'), ] blog.__acl__ = [(Allow, 'fred', 'read'), DENY_ALL] policy = self._makeOne() result = sorted(policy.principals_allowed_by_permission(blog, 'read')) self.assertEqual(result, ['fred']) result = sorted( policy.principals_allowed_by_permission(community, 'read') ) self.assertEqual(result, ['chrism', 'mork', 'other']) result = sorted( policy.principals_allowed_by_permission(community, 'read') ) result = sorted(policy.principals_allowed_by_permission(root, 'read')) self.assertEqual(result, ['chrism', 'jim', 'other']) def test_principals_allowed_by_permission_no_acls(self): context = DummyContext() policy = self._makeOne() result = sorted( policy.principals_allowed_by_permission(context, 'read') ) self.assertEqual(result, []) def test_principals_allowed_by_permission_deny_not_permission_in_acl(self): from pyramid.authorization import Deny, Everyone context = DummyContext() acl = [(Deny, Everyone, 'write')] context.__acl__ = acl policy = self._makeOne() result = sorted( policy.principals_allowed_by_permission(context, 'read') ) self.assertEqual(result, []) def test_principals_allowed_by_permission_deny_permission_in_acl(self): from pyramid.authorization import Deny, Everyone context = DummyContext() acl = [(Deny, Everyone, 'read')] context.__acl__ = acl policy = self._makeOne() result = sorted( policy.principals_allowed_by_permission(context, 'read') ) self.assertEqual(result, []) def test_callable_acl(self): from pyramid.authorization import Allow context = DummyContext() fn = lambda self: [(Allow, 'bob', 'read')] context.__acl__ = fn.__get__(context, context.__class__) policy = self._makeOne() result = policy.permits(context, ['bob'], 'read') self.assertTrue(result)
TestACLAuthorizationPolicy
python
geekcomputers__Python
venv/Lib/site-packages/pip/_vendor/rich/prompt.py
{ "start": 9501, "end": 11304 }
class ____(PromptBase[bool]): """A yes / no confirmation prompt. Example: >>> if Confirm.ask("Continue"): run_job() """ response_type = bool validate_error_message = "[prompt.invalid]Please enter Y or N" choices: List[str] = ["y", "n"] def render_default(self, default: DefaultType) -> Text: """Render the default as (y) or (n) rather than True/False.""" yes, no = self.choices return Text(f"({yes})" if default else f"({no})", style="prompt.default") def process_response(self, value: str) -> bool: """Convert choices to a bool.""" value = value.strip().lower() if value not in self.choices: raise InvalidResponse(self.validate_error_message) return value == self.choices[0] if __name__ == "__main__": # pragma: no cover from pip._vendor.rich import print if Confirm.ask("Run [i]prompt[/i] tests?", default=True): while True: result = IntPrompt.ask( ":rocket: Enter a number between [b]1[/b] and [b]10[/b]", default=5 ) if result >= 1 and result <= 10: break print(":pile_of_poo: [prompt.invalid]Number must be between 1 and 10") print(f"number={result}") while True: password = Prompt.ask( "Please enter a password [cyan](must be at least 5 characters)", password=True, ) if len(password) >= 5: break print("[prompt.invalid]password too short") print(f"password={password!r}") fruit = Prompt.ask("Enter a fruit", choices=["apple", "orange", "pear"]) print(f"fruit={fruit!r}") else: print("[b]OK :loudly_crying_face:")
Confirm
python
huggingface__transformers
src/transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py
{ "start": 11389, "end": 11564 }
class ____(MoshiModel): def __init__(self, config): super().__init__(config) self.embed_tokens = KyutaiSpeechToTextEmbeddings(config)
KyutaiSpeechToTextModel
python
kamyu104__LeetCode-Solutions
Python/cracking-the-safe.py
{ "start": 3121, "end": 3853 }
class ____(object): def crackSafe(self, n, k): """ :type n: int :type k: int :rtype: str """ def dfs(k, node, lookup, result): for i in xrange(k): # preorder like traversal relative to initial result to avoid getting stuck, i.e. don't use k-1 until there is no other choice neighbor = node + str(i) if neighbor not in lookup: lookup.add(neighbor) result.append(str(i)) dfs(k, neighbor[1:], lookup, result) break result = [str(k-1)]*(n-1) lookup = set() dfs(k, "".join(result), lookup, result) return "".join(result)
Solution5
python
mlflow__mlflow
tests/h2o/test_h2o_model_export.py
{ "start": 1031, "end": 14537 }
class ____(NamedTuple): model: Any inference_data: Any @pytest.fixture def h2o_iris_model(): h2o.init() iris = datasets.load_iris() data = h2o.H2OFrame( { "feature1": list(iris.data[:, 0]), "feature2": list(iris.data[:, 1]), "target": ([f"Flower {i}" for i in iris.target]), } ) train, test = data.split_frame(ratios=[0.7]) h2o_gbm = H2OGradientBoostingEstimator(ntrees=10, max_depth=6) h2o_gbm.train(["feature1", "feature2"], "target", training_frame=train) return ModelWithData(model=h2o_gbm, inference_data=test) @pytest.fixture(scope="module") def h2o_iris_model_signature(): return ModelSignature( inputs=Schema( [ ColSpec(name="feature1", type=DataType.double), ColSpec(name="feature2", type=DataType.double), ColSpec(name="target", type=DataType.string), ] ), outputs=Schema( [ ColSpec(name="predict", type=DataType.string), ColSpec(name="Flower 0", type=DataType.double), ColSpec(name="Flower 1", type=DataType.double), ColSpec(name="Flower 2", type=DataType.double), ] ), ) @pytest.fixture def model_path(tmp_path): return os.path.join(tmp_path, "model") @pytest.fixture def h2o_custom_env(tmp_path): conda_env = os.path.join(tmp_path, "conda_env.yml") _mlflow_conda_env(conda_env, additional_pip_deps=["h2o", "pytest"]) return conda_env def test_model_save_load(h2o_iris_model, model_path): h2o_model = h2o_iris_model.model mlflow.h2o.save_model(h2o_model=h2o_model, path=model_path) # Loading h2o model h2o_model_loaded = mlflow.h2o.load_model(model_path) assert all( h2o_model_loaded.predict(h2o_iris_model.inference_data).as_data_frame() == h2o_model.predict(h2o_iris_model.inference_data).as_data_frame() ) # Loading pyfunc model pyfunc_loaded = mlflow.pyfunc.load_model(model_path) assert all( pyfunc_loaded.predict(h2o_iris_model.inference_data.as_data_frame()) == h2o_model.predict(h2o_iris_model.inference_data).as_data_frame() ) def test_signature_and_examples_are_saved_correctly(h2o_iris_model, h2o_iris_model_signature): model = h2o_iris_model.model example_ = h2o_iris_model.inference_data.as_data_frame().head(3) for signature in (None, h2o_iris_model_signature): for example in (None, example_): with TempDir() as tmp: path = tmp.path("model") mlflow.h2o.save_model(model, path=path, signature=signature, input_example=example) mlflow_model = Model.load(path) if signature is None and example is None: assert mlflow_model.signature is None else: assert mlflow_model.signature == h2o_iris_model_signature if example is None: assert mlflow_model.saved_input_example_info is None else: assert all((_read_example(mlflow_model, path) == example).all()) def test_model_log(h2o_iris_model): h2o_model = h2o_iris_model.model try: artifact_path = "gbm_model" model_info = mlflow.h2o.log_model(h2o_model, name=artifact_path) # Load model h2o_model_loaded = mlflow.h2o.load_model(model_uri=model_info.model_uri) assert all( h2o_model_loaded.predict(h2o_iris_model.inference_data).as_data_frame() == h2o_model.predict(h2o_iris_model.inference_data).as_data_frame() ) finally: mlflow.end_run() def test_model_load_succeeds_with_missing_data_key_when_data_exists_at_default_path( h2o_iris_model, model_path ): """ This is a backwards compatibility test to ensure that models saved in MLflow version <= 0.7.0 can be loaded successfully. These models are missing the `data` flavor configuration key. """ h2o_model = h2o_iris_model.model mlflow.h2o.save_model(h2o_model=h2o_model, path=model_path) model_conf_path = os.path.join(model_path, "MLmodel") model_conf = Model.load(model_conf_path) flavor_conf = model_conf.flavors.get(mlflow.h2o.FLAVOR_NAME, None) assert flavor_conf is not None del flavor_conf["data"] model_conf.save(model_conf_path) h2o_model_loaded = mlflow.h2o.load_model(model_path) assert all( h2o_model_loaded.predict(h2o_iris_model.inference_data).as_data_frame() == h2o_model.predict(h2o_iris_model.inference_data).as_data_frame() ) def test_model_save_persists_specified_conda_env_in_mlflow_model_directory( h2o_iris_model, model_path, h2o_custom_env ): mlflow.h2o.save_model(h2o_model=h2o_iris_model.model, path=model_path, conda_env=h2o_custom_env) pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"]) assert os.path.exists(saved_conda_env_path) assert saved_conda_env_path != h2o_custom_env with open(h2o_custom_env) as f: h2o_custom_env_text = f.read() with open(saved_conda_env_path) as f: saved_conda_env_text = f.read() assert saved_conda_env_text == h2o_custom_env_text def test_model_save_persists_requirements_in_mlflow_model_directory( h2o_iris_model, model_path, h2o_custom_env ): mlflow.h2o.save_model(h2o_model=h2o_iris_model.model, path=model_path, conda_env=h2o_custom_env) saved_pip_req_path = os.path.join(model_path, "requirements.txt") _compare_conda_env_requirements(h2o_custom_env, saved_pip_req_path) def test_log_model_with_pip_requirements(h2o_iris_model, tmp_path): expected_mlflow_version = _mlflow_major_version_string() # Path to a requirements file req_file = tmp_path.joinpath("requirements.txt") req_file.write_text("a") with mlflow.start_run(): model_info = mlflow.h2o.log_model( h2o_iris_model.model, name="model", pip_requirements=str(req_file) ) _assert_pip_requirements(model_info.model_uri, [expected_mlflow_version, "a"], strict=True) # List of requirements with mlflow.start_run(): model_info = mlflow.h2o.log_model( h2o_iris_model.model, name="model", pip_requirements=[f"-r {req_file}", "b"], ) _assert_pip_requirements( model_info.model_uri, [expected_mlflow_version, "a", "b"], strict=True ) # Constraints file with mlflow.start_run(): model_info = mlflow.h2o.log_model( h2o_iris_model.model, name="model", pip_requirements=[f"-c {req_file}", "b"] ) _assert_pip_requirements( model_info.model_uri, [expected_mlflow_version, "b", "-c constraints.txt"], ["a"], strict=True, ) def test_log_model_with_extra_pip_requirements(h2o_iris_model, tmp_path): expected_mlflow_version = _mlflow_major_version_string() default_reqs = mlflow.h2o.get_default_pip_requirements() # Path to a requirements file req_file = tmp_path.joinpath("requirements.txt") req_file.write_text("a") with mlflow.start_run(): model_info = mlflow.h2o.log_model( h2o_iris_model.model, name="model", extra_pip_requirements=str(req_file) ) _assert_pip_requirements( model_info.model_uri, [expected_mlflow_version, *default_reqs, "a"] ) # List of requirements with mlflow.start_run(): model_info = mlflow.h2o.log_model( h2o_iris_model.model, name="model", extra_pip_requirements=[f"-r {req_file}", "b"] ) _assert_pip_requirements( model_info.model_uri, [expected_mlflow_version, *default_reqs, "a", "b"] ) # Constraints file with mlflow.start_run(): model_info = mlflow.h2o.log_model( h2o_iris_model.model, name="model", extra_pip_requirements=[f"-c {req_file}", "b"] ) _assert_pip_requirements( model_info.model_uri, [expected_mlflow_version, *default_reqs, "b", "-c constraints.txt"], ["a"], ) def test_model_save_accepts_conda_env_as_dict(h2o_iris_model, model_path): conda_env = dict(mlflow.h2o.get_default_conda_env()) conda_env["dependencies"].append("pytest") mlflow.h2o.save_model(h2o_model=h2o_iris_model.model, path=model_path, conda_env=conda_env) pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"]) assert os.path.exists(saved_conda_env_path) with open(saved_conda_env_path) as f: saved_conda_env_parsed = yaml.safe_load(f) assert saved_conda_env_parsed == conda_env def test_model_log_persists_specified_conda_env_in_mlflow_model_directory( h2o_iris_model, h2o_custom_env ): artifact_path = "model" with mlflow.start_run(): model_info = mlflow.h2o.log_model( h2o_iris_model.model, name=artifact_path, conda_env=h2o_custom_env ) model_path = _download_artifact_from_uri(model_info.model_uri) pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"]) assert os.path.exists(saved_conda_env_path) assert saved_conda_env_path != h2o_custom_env with open(h2o_custom_env) as f: h2o_custom_env_text = f.read() with open(saved_conda_env_path) as f: saved_conda_env_text = f.read() assert saved_conda_env_text == h2o_custom_env_text def test_model_log_persists_requirements_in_mlflow_model_directory(h2o_iris_model, h2o_custom_env): artifact_path = "model" with mlflow.start_run(): model_info = mlflow.h2o.log_model( h2o_iris_model.model, name=artifact_path, conda_env=h2o_custom_env ) model_path = _download_artifact_from_uri(model_info.model_uri) saved_pip_req_path = os.path.join(model_path, "requirements.txt") _compare_conda_env_requirements(h2o_custom_env, saved_pip_req_path) def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies( h2o_iris_model, model_path ): mlflow.h2o.save_model(h2o_model=h2o_iris_model.model, path=model_path) _assert_pip_requirements(model_path, mlflow.h2o.get_default_pip_requirements()) def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies( h2o_iris_model, ): artifact_path = "model" with mlflow.start_run(): model_info = mlflow.h2o.log_model(h2o_iris_model.model, name=artifact_path) _assert_pip_requirements(model_info.model_uri, mlflow.h2o.get_default_pip_requirements()) def test_pyfunc_serve_and_score(h2o_iris_model): model, inference_dataframe = h2o_iris_model artifact_path = "model" with mlflow.start_run(): model_info = mlflow.h2o.log_model( model, name=artifact_path, input_example=inference_dataframe.as_data_frame() ) inference_payload = load_serving_example(model_info.model_uri) resp = pyfunc_serve_and_score_model( model_info.model_uri, data=inference_payload, content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON, ) decoded_json = json.loads(resp.content.decode("utf-8")) scores = pd.DataFrame(data=decoded_json["predictions"]).drop("predict", axis=1) preds = model.predict(inference_dataframe).as_data_frame().drop("predict", axis=1) np.testing.assert_array_almost_equal(scores, preds) def test_log_model_with_code_paths(h2o_iris_model): with ( mlflow.start_run(), mock.patch("mlflow.h2o._add_code_from_conf_to_system_path") as add_mock, ): model_info = mlflow.h2o.log_model( h2o_iris_model.model, name="model_uri", code_paths=[__file__] ) _compare_logged_code_paths(__file__, model_info.model_uri, mlflow.h2o.FLAVOR_NAME) mlflow.h2o.load_model(model_info.model_uri) add_mock.assert_called() def test_model_save_load_with_metadata(h2o_iris_model, model_path): mlflow.h2o.save_model( h2o_iris_model.model, path=model_path, metadata={"metadata_key": "metadata_value"} ) reloaded_model = mlflow.pyfunc.load_model(model_uri=model_path) assert reloaded_model.metadata.metadata["metadata_key"] == "metadata_value" def test_model_log_with_metadata(h2o_iris_model): with mlflow.start_run(): model_info = mlflow.h2o.log_model( h2o_iris_model.model, name="model", metadata={"metadata_key": "metadata_value"}, ) reloaded_model = mlflow.pyfunc.load_model(model_uri=model_info.model_uri) assert reloaded_model.metadata.metadata["metadata_key"] == "metadata_value" def test_model_log_with_signature_inference(h2o_iris_model, h2o_iris_model_signature): artifact_path = "model" example = h2o_iris_model.inference_data.as_data_frame().head(3) with mlflow.start_run(): model_info = mlflow.h2o.log_model( h2o_iris_model.model, name=artifact_path, input_example=example ) mlflow_model = Model.load(model_info.model_uri) assert mlflow_model.signature == h2o_iris_model_signature
ModelWithData
python
realpython__materials
python-with-statement/exc_handling.py
{ "start": 0, "end": 611 }
class ____: def __enter__(self): print("Entering the context...") return "Hello, World!" def __exit__(self, exc_type, exc_value, exc_tb): print("Leaving the context...") if isinstance(exc_value, IndexError): # Handle IndexError here... print(f"An exception occurred in your with block: {exc_type}") print(f"Exception message: {exc_value}") return True if __name__ == "__main__": with HelloContextManager() as hello: print(hello) hello[100] print("Continue normally from here...")
HelloContextManager
python
FactoryBoy__factory_boy
tests/test_fuzzy.py
{ "start": 10236, "end": 14643 }
class ____(unittest.TestCase): @classmethod def setUpClass(cls): # Setup useful constants cls.jan1 = datetime.datetime(2013, 1, 1) cls.jan3 = datetime.datetime(2013, 1, 3) cls.jan31 = datetime.datetime(2013, 1, 31) def test_accurate_definition(self): """Tests explicit definition of a FuzzyNaiveDateTime.""" fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31) for _i in range(20): res = utils.evaluate_declaration(fuzz) self.assertLessEqual(self.jan1, res) self.assertLessEqual(res, self.jan31) def test_partial_definition(self): """Test defining a FuzzyNaiveDateTime without passing an end date.""" with utils.mocked_datetime_now(self.jan3, fuzzy): fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1) for _i in range(20): res = utils.evaluate_declaration(fuzz) self.assertLessEqual(self.jan1, res) self.assertLessEqual(res, self.jan3) def test_aware_start(self): """Tests that a timezone-aware start datetime is rejected.""" with self.assertRaises(ValueError): fuzzy.FuzzyNaiveDateTime(self.jan1.replace(tzinfo=datetime.timezone.utc), self.jan31) def test_aware_end(self): """Tests that a timezone-aware end datetime is rejected.""" with self.assertRaises(ValueError): fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31.replace(tzinfo=datetime.timezone.utc)) def test_force_year(self): fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31, force_year=4) for _i in range(20): res = utils.evaluate_declaration(fuzz) self.assertEqual(4, res.year) def test_force_month(self): fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31, force_month=4) for _i in range(20): res = utils.evaluate_declaration(fuzz) self.assertEqual(4, res.month) def test_force_day(self): fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31, force_day=4) for _i in range(20): res = utils.evaluate_declaration(fuzz) self.assertEqual(4, res.day) def test_force_hour(self): fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31, force_hour=4) for _i in range(20): res = utils.evaluate_declaration(fuzz) self.assertEqual(4, res.hour) def test_force_minute(self): fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31, force_minute=4) for _i in range(20): res = utils.evaluate_declaration(fuzz) self.assertEqual(4, res.minute) def test_force_second(self): fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31, force_second=4) for _i in range(20): res = utils.evaluate_declaration(fuzz) self.assertEqual(4, res.second) def test_force_microsecond(self): fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31, force_microsecond=4) for _i in range(20): res = utils.evaluate_declaration(fuzz) self.assertEqual(4, res.microsecond) def test_invalid_definition(self): with self.assertRaises(ValueError): fuzzy.FuzzyNaiveDateTime(self.jan31, self.jan1) def test_invalid_partial_definition(self): with utils.mocked_datetime_now(self.jan1, fuzzy): with self.assertRaises(ValueError): fuzzy.FuzzyNaiveDateTime(self.jan31) def test_biased(self): """Tests a FuzzyDate with a biased random.randint.""" fake_randint = lambda low, high: (low + high) // 2 fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31) with mock.patch('factory.random.randgen.randint', fake_randint): res = utils.evaluate_declaration(fuzz) self.assertEqual(datetime.datetime(2013, 1, 16), res) def test_biased_partial(self): """Tests a FuzzyDate with a biased random and implicit upper bound.""" with utils.mocked_datetime_now(self.jan3, fuzzy): fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1) fake_randint = lambda low, high: (low + high) // 2 with mock.patch('factory.random.randgen.randint', fake_randint): res = utils.evaluate_declaration(fuzz) self.assertEqual(datetime.datetime(2013, 1, 2), res)
FuzzyNaiveDateTimeTestCase
python
airbytehq__airbyte
airbyte-ci/connectors/pipelines/tests/test_tests/test_python_connectors.py
{ "start": 5491, "end": 8345 }
class ____: @pytest.fixture def compatible_connector(self): return Connector("source-faker") @pytest.fixture def incompatible_connector(self): return Connector("source-postgres") @pytest.fixture def context_for_valid_connector(self, compatible_connector, dagger_client, current_platform): context = ConnectorContext( pipeline_name="CLI smoke test with PyAirbyte", connector=compatible_connector, git_branch="test", git_revision="test", diffed_branch="test", git_repo_url="test", report_output_prefix="test", is_local=True, targeted_platforms=[current_platform], ) context.dagger_client = dagger_client return context @pytest.fixture def context_for_invalid_connector(self, incompatible_connector, dagger_client, current_platform): context = ConnectorContext( pipeline_name="CLI smoke test with PyAirbyte", connector=incompatible_connector, git_branch="test", git_revision="test", diffed_branch="test", git_repo_url="test", report_output_prefix="test", is_local=True, targeted_platforms=[current_platform], ) context.dagger_client = dagger_client return context async def test__run_validation_success(self, mocker, context_for_valid_connector: ConnectorContext): result = await PyAirbyteValidation(context_for_valid_connector)._run(mocker.MagicMock()) assert isinstance(result, StepResult) assert result.status == StepStatus.SUCCESS assert "Getting `spec` output from connector..." in result.stdout async def test__run_validation_skip_unpublished_connector( self, mocker, context_for_invalid_connector: ConnectorContext, ): result = await PyAirbyteValidation(context_for_invalid_connector)._run(mocker.MagicMock()) assert isinstance(result, StepResult) assert result.status == StepStatus.SKIPPED async def test__run_validation_fail( self, mocker, context_for_invalid_connector: ConnectorContext, ): metadata = context_for_invalid_connector.connector.metadata metadata["remoteRegistries"] = {"pypi": {"enabled": True, "packageName": "airbyte-source-postgres"}} metadata_mock = mocker.PropertyMock(return_value=metadata) with patch.object(Connector, "metadata", metadata_mock): result = await PyAirbyteValidation(context_for_invalid_connector)._run(mocker.MagicMock()) assert isinstance(result, StepResult) assert result.status == StepStatus.FAILURE assert "is not installable" in result.stderr
TestPyAirbyteValidationTests
python
plotly__plotly.py
plotly/graph_objs/layout/legend/_title.py
{ "start": 235, "end": 4642 }
class ____(_BaseLayoutHierarchyType): _parent_path_str = "layout.legend" _path_str = "layout.legend.title" _valid_props = {"font", "side", "text"} @property def font(self): """ Sets this legend's title font. Defaults to `legend.font` with its size increased about 20%. The 'font' property is an instance of Font that may be specified as: - An instance of :class:`plotly.graph_objs.layout.legend.title.Font` - A dict of string/value properties that will be passed to the Font constructor Returns ------- plotly.graph_objs.layout.legend.title.Font """ return self["font"] @font.setter def font(self, val): self["font"] = val @property def side(self): """ Determines the location of legend's title with respect to the legend items. Defaulted to "top" with `orientation` is "h". Defaulted to "left" with `orientation` is "v". The *top left* options could be used to expand top center and top right are for horizontal alignment legend area in both x and y sides. The 'side' property is an enumeration that may be specified as: - One of the following enumeration values: ['top', 'left', 'top left', 'top center', 'top right'] Returns ------- Any """ return self["side"] @side.setter def side(self, val): self["side"] = val @property def text(self): """ Sets the title of the legend. The 'text' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["text"] @text.setter def text(self, val): self["text"] = val @property def _prop_descriptions(self): return """\ font Sets this legend's title font. Defaults to `legend.font` with its size increased about 20%. side Determines the location of legend's title with respect to the legend items. Defaulted to "top" with `orientation` is "h". Defaulted to "left" with `orientation` is "v". The *top left* options could be used to expand top center and top right are for horizontal alignment legend area in both x and y sides. text Sets the title of the legend. """ def __init__(self, arg=None, font=None, side=None, text=None, **kwargs): """ Construct a new Title object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.layout.legend.Title` font Sets this legend's title font. Defaults to `legend.font` with its size increased about 20%. side Determines the location of legend's title with respect to the legend items. Defaulted to "top" with `orientation` is "h". Defaulted to "left" with `orientation` is "v". The *top left* options could be used to expand top center and top right are for horizontal alignment legend area in both x and y sides. text Sets the title of the legend. Returns ------- Title """ super().__init__("title") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.layout.legend.Title constructor must be a dict or an instance of :class:`plotly.graph_objs.layout.legend.Title`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("font", arg, font) self._set_property("side", arg, side) self._set_property("text", arg, text) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Title
python
pypa__pipenv
pipenv/patched/pip/_internal/req/req_set.py
{ "start": 272, "end": 2888 }
class ____: def __init__(self, check_supported_wheels: bool = True) -> None: """Create a RequirementSet.""" self.requirements: Dict[str, InstallRequirement] = OrderedDict() self.check_supported_wheels = check_supported_wheels self.unnamed_requirements: List[InstallRequirement] = [] def __str__(self) -> str: requirements = sorted( (req for req in self.requirements.values() if not req.comes_from), key=lambda req: canonicalize_name(req.name or ""), ) return " ".join(str(req.req) for req in requirements) def __repr__(self) -> str: requirements = sorted( self.requirements.values(), key=lambda req: canonicalize_name(req.name or ""), ) format_string = "<{classname} object; {count} requirement(s): {reqs}>" return format_string.format( classname=self.__class__.__name__, count=len(requirements), reqs=", ".join(str(req.req) for req in requirements), ) def add_unnamed_requirement(self, install_req: InstallRequirement) -> None: assert not install_req.name self.unnamed_requirements.append(install_req) def add_named_requirement(self, install_req: InstallRequirement) -> None: assert install_req.name project_name = canonicalize_name(install_req.name) self.requirements[project_name] = install_req def has_requirement(self, name: str) -> bool: project_name = canonicalize_name(name) return ( project_name in self.requirements and not self.requirements[project_name].constraint ) def get_requirement(self, name: str) -> InstallRequirement: project_name = canonicalize_name(name) if project_name in self.requirements: return self.requirements[project_name] raise KeyError(f"No project with the name {name!r}") @property def all_requirements(self) -> List[InstallRequirement]: return self.unnamed_requirements + list(self.requirements.values()) @property def requirements_to_install(self) -> List[InstallRequirement]: """Return the list of requirements that need to be installed. TODO remove this property together with the legacy resolver, since the new resolver only returns requirements that need to be installed. """ return [ install_req for install_req in self.all_requirements if not install_req.constraint and not install_req.satisfied_by ]
RequirementSet
python
pytorch__pytorch
test/jit/test_fuser_common.py
{ "start": 173, "end": 788 }
class ____(JitTestCase): def test_autodiff_fallback(self): for rq in [True, False]: @torch.jit.script def fn(x): return torch.max(x**2.0, x**3.0) x = torch.randn(5, requires_grad=not rq) # cause optimization to be created for _ in range(5): fn(x) # test fallback when optimization is not applicable y = fn(torch.randn(5, requires_grad=rq)) self.assertEqual(y.requires_grad, rq) if __name__ == "__main__": raise_on_run_directly("test/test_jit_fuser_te.py")
TestFuserCommon
python
walkccc__LeetCode
solutions/1657. Determine if Two Strings Are Close/1657.py
{ "start": 0, "end": 328 }
class ____: def closeStrings(self, word1: str, word2: str) -> bool: if len(word1) != len(word2): return False count1 = collections.Counter(word1) count2 = collections.Counter(word2) if count1.keys() != count2.keys(): return False return sorted(count1.values()) == sorted(count2.values())
Solution
python
doocs__leetcode
solution/3100-3199/3115.Maximum Prime Difference/Solution.py
{ "start": 0, "end": 436 }
class ____: def maximumPrimeDifference(self, nums: List[int]) -> int: def is_prime(x: int) -> bool: if x < 2: return False return all(x % i for i in range(2, int(sqrt(x)) + 1)) for i, x in enumerate(nums): if is_prime(x): for j in range(len(nums) - 1, i - 1, -1): if is_prime(nums[j]): return j - i
Solution
python
pytorch__pytorch
test/dynamo/test_modules.py
{ "start": 7737, "end": 8593 }
class ____(torch.nn.Module): def __init__(self) -> None: super().__init__() self.layers = torch.nn.ModuleList( [ torch.nn.Linear(10, 10), torch.nn.ReLU(), torch.nn.Linear(10, 10), torch.nn.ReLU(), ] ) def forward(self, x): for i in range(len(self.layers)): x = self.layers[i](x) for layer in self.layers: x = layer(x) for layer, val in zip(self.layers, (x, x, x, x)): x = layer(x) + val for layer, val in zip(self.layers, (1, 2, 3, 4)): x = layer(x) + val for idx, layer in enumerate(self.layers): x = layer(x) * idx for idx, layer in enumerate(self.layers[::-1]): x = layer(x) * idx return x
ModuleList
python
ray-project__ray
python/ray/air/_internal/json.py
{ "start": 49, "end": 908 }
class ____(json.JSONEncoder): def __init__(self, nan_str="null", **kwargs): super(SafeFallbackEncoder, self).__init__(**kwargs) self.nan_str = nan_str def default(self, value): try: if type(value).__module__ == np.__name__ and isinstance(value, np.ndarray): return value.tolist() if isinstance(value, np.bool_): return bool(value) if np.isnan(value): return self.nan_str if issubclass(type(value), numbers.Integral): return int(value) if issubclass(type(value), numbers.Number): return float(value) return super(SafeFallbackEncoder, self).default(value) except Exception: return str(value) # give up, just stringify it (ok for logs)
SafeFallbackEncoder
python
tensorflow__tensorflow
tensorflow/python/ops/linalg/linear_operator_block_diag.py
{ "start": 1569, "end": 32814 }
class ____(linear_operator.LinearOperator): """Combines one or more `LinearOperators` in to a Block Diagonal matrix. This operator combines one or more linear operators `[op1,...,opJ]`, building a new `LinearOperator`, whose underlying matrix representation has each operator `opi` on the main diagonal, and zero's elsewhere. #### Shape compatibility If `opj` acts like a [batch] matrix `Aj`, then `op_combined` acts like the [batch] matrix formed by having each matrix `Aj` on the main diagonal. Each `opj` is required to represent a matrix, and hence will have shape `batch_shape_j + [M_j, N_j]`. If `opj` has shape `batch_shape_j + [M_j, N_j]`, then the combined operator has shape `broadcast_batch_shape + [sum M_j, sum N_j]`, where `broadcast_batch_shape` is the mutual broadcast of `batch_shape_j`, `j = 1,...,J`, assuming the intermediate batch shapes broadcast. Arguments to `matmul`, `matvec`, `solve`, and `solvevec` may either be single `Tensor`s or lists of `Tensor`s that are interpreted as blocks. The `j`th element of a blockwise list of `Tensor`s must have dimensions that match `opj` for the given method. If a list of blocks is input, then a list of blocks is returned as well. When the `opj` are not guaranteed to be square, this operator's methods might fail due to the combined operator not being square and/or lack of efficient methods. ```python # Create a 4 x 4 linear operator combined of two 2 x 2 operators. operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]]) operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]]) operator = LinearOperatorBlockDiag([operator_1, operator_2]) operator.to_dense() ==> [[1., 2., 0., 0.], [3., 4., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]] operator.shape ==> [4, 4] operator.log_abs_determinant() ==> scalar Tensor x1 = ... # Shape [2, 2] Tensor x2 = ... # Shape [2, 2] Tensor x = tf.concat([x1, x2], 0) # Shape [2, 4] Tensor operator.matmul(x) ==> tf.concat([operator_1.matmul(x1), operator_2.matmul(x2)]) # Create a 5 x 4 linear operator combining three blocks. operator_1 = LinearOperatorFullMatrix([[1.], [3.]]) operator_2 = LinearOperatorFullMatrix([[1., 6.]]) operator_3 = LinearOperatorFullMatrix([[2.], [7.]]) operator = LinearOperatorBlockDiag([operator_1, operator_2, operator_3]) operator.to_dense() ==> [[1., 0., 0., 0.], [3., 0., 0., 0.], [0., 1., 6., 0.], [0., 0., 0., 2.]] [0., 0., 0., 7.]] operator.shape ==> [5, 4] # Create a [2, 3] batch of 4 x 4 linear operators. matrix_44 = tf.random.normal(shape=[2, 3, 4, 4]) operator_44 = LinearOperatorFullMatrix(matrix) # Create a [1, 3] batch of 5 x 5 linear operators. matrix_55 = tf.random.normal(shape=[1, 3, 5, 5]) operator_55 = LinearOperatorFullMatrix(matrix_55) # Combine to create a [2, 3] batch of 9 x 9 operators. operator_99 = LinearOperatorBlockDiag([operator_44, operator_55]) # Create a shape [2, 3, 9] vector. x = tf.random.normal(shape=[2, 3, 9]) operator_99.matmul(x) ==> Shape [2, 3, 9] Tensor # Create a blockwise list of vectors. x = [tf.random.normal(shape=[2, 3, 4]), tf.random.normal(shape=[2, 3, 5])] operator_99.matmul(x) ==> [Shape [2, 3, 4] Tensor, Shape [2, 3, 5] Tensor] ``` #### Performance The performance of `LinearOperatorBlockDiag` on any operation is equal to the sum of the individual operators' operations. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, operators, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=True, name=None): r"""Initialize a `LinearOperatorBlockDiag`. `LinearOperatorBlockDiag` is initialized with a list of operators `[op_1,...,op_J]`. Args: operators: Iterable of `LinearOperator` objects, each with the same `dtype` and composable shape. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. This is true by default, and will raise a `ValueError` otherwise. name: A name for this `LinearOperator`. Default is the individual operators names joined with `_o_`. Raises: TypeError: If all operators do not have the same `dtype`. ValueError: If `operators` is empty or are non-square. """ parameters = dict( operators=operators, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name ) # Validate operators. check_ops.assert_proper_iterable(operators) operators = list(operators) if not operators: raise ValueError( "Expected a non-empty list of operators. Found: %s" % operators) self._operators = operators # Define diagonal operators, for functions that are shared across blockwise # `LinearOperator` types. self._diagonal_operators = operators # Validate dtype. dtype = operators[0].dtype for operator in operators: if operator.dtype != dtype: name_type = (str((o.name, o.dtype)) for o in operators) raise TypeError( "Expected all operators to have the same dtype. Found %s" % " ".join(name_type)) # Auto-set and check hints. if all(operator.is_non_singular for operator in operators): if is_non_singular is False: raise ValueError( "The direct sum of non-singular operators is always non-singular.") is_non_singular = True if all(operator.is_self_adjoint for operator in operators): if is_self_adjoint is False: raise ValueError( "The direct sum of self-adjoint operators is always self-adjoint.") is_self_adjoint = True if all(operator.is_positive_definite for operator in operators): if is_positive_definite is False: raise ValueError( "The direct sum of positive definite operators is always " "positive definite.") is_positive_definite = True if name is None: # Using ds to mean direct sum. name = "_ds_".join(operator.name for operator in operators) with ops.name_scope(name): super(LinearOperatorBlockDiag, self).__init__( dtype=dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, parameters=parameters, name=name) @property def operators(self): return self._operators def _block_range_dimensions(self): return [op.range_dimension for op in self._diagonal_operators] def _block_domain_dimensions(self): return [op.domain_dimension for op in self._diagonal_operators] def _block_range_dimension_tensors(self): return [op.range_dimension_tensor() for op in self._diagonal_operators] def _block_domain_dimension_tensors(self): return [op.domain_dimension_tensor() for op in self._diagonal_operators] def _shape(self): # Get final matrix shape. domain_dimension = sum(self._block_domain_dimensions()) range_dimension = sum(self._block_range_dimensions()) matrix_shape = tensor_shape.TensorShape([range_dimension, domain_dimension]) # Get broadcast batch shape. # broadcast_shape checks for compatibility. batch_shape = self.operators[0].batch_shape for operator in self.operators[1:]: batch_shape = common_shapes.broadcast_shape( batch_shape, operator.batch_shape) return batch_shape.concatenate(matrix_shape) def _shape_tensor(self): # Avoid messy broadcasting if possible. if self.shape.is_fully_defined(): return tensor_conversion.convert_to_tensor_v2_with_dispatch( self.shape.as_list(), dtype=dtypes.int32, name="shape" ) domain_dimension = sum(self._block_domain_dimension_tensors()) range_dimension = sum(self._block_range_dimension_tensors()) matrix_shape = array_ops_stack.stack([range_dimension, domain_dimension]) # Dummy Tensor of zeros. Will never be materialized. zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor()) for operator in self.operators[1:]: zeros += array_ops.zeros(shape=operator.batch_shape_tensor()) batch_shape = array_ops.shape(zeros) return array_ops.concat((batch_shape, matrix_shape), 0) def _linop_adjoint(self) -> "LinearOperatorBlockDiag": # We take the adjoint of each block on the diagonal. return LinearOperatorBlockDiag( operators=[operator.adjoint() for operator in self.operators], is_non_singular=self.is_non_singular, is_self_adjoint=self.is_self_adjoint, is_positive_definite=self.is_positive_definite, is_square=True) def _linop_cholesky(self) -> "LinearOperatorBlockDiag": # We take the cholesky of each block on the diagonal. return LinearOperatorBlockDiag( operators=[operator.cholesky() for operator in self.operators], is_non_singular=True, is_self_adjoint=None, # Let the operators passed in decide. is_square=True) def _linop_inverse(self) -> "LinearOperatorBlockDiag": # We take the inverse of each block on the diagonal. return LinearOperatorBlockDiag( operators=[ operator.inverse() for operator in self.operators], is_non_singular=self.is_non_singular, is_self_adjoint=self.is_self_adjoint, is_positive_definite=self.is_positive_definite, is_square=True) def _linop_matmul( self, left_operator: "LinearOperatorBlockDiag", right_operator: linear_operator.LinearOperator, ) -> linear_operator.LinearOperator: if isinstance(right_operator, LinearOperatorBlockDiag): return LinearOperatorBlockDiag( operators=[ o1.matmul(o2) for o1, o2 in zip( left_operator.operators, right_operator.operators)], is_non_singular=property_hint_util.combined_non_singular_hint( left_operator, right_operator), # In general, a product of self-adjoint positive-definite # block diagonal matrices is not self-adjoint. is_self_adjoint=None, # In general, a product of positive-definite block diagonal # matrices is not positive-definite. is_positive_definite=None, is_square=True) return super()._linop_matmul(left_operator, right_operator) def _linop_solve( self, left_operator: "LinearOperatorBlockDiag", right_operator: linear_operator.LinearOperator, ) -> linear_operator.LinearOperator: if isinstance(right_operator, LinearOperatorBlockDiag): return LinearOperatorBlockDiag( operators=[ o1.solve(o2) for o1, o2 in zip( left_operator.operators, right_operator.operators)], is_non_singular=property_hint_util.combined_non_singular_hint( left_operator, right_operator), # In general, a solve of self-adjoint positive-definite block diagonal # matrices is not self-=adjoint. is_self_adjoint=None, # In general, a solve of positive-definite block diagonal matrices is # not positive-definite. is_positive_definite=None, is_square=True) return super()._linop_solve(left_operator, right_operator) # TODO(b/188080761): Add a more efficient implementation of `cond` that # constructs the condition number from the blockwise singular values. def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"): """Transform [batch] matrix `x` with left multiplication: `x --> Ax`. ```python # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] operator = LinearOperator(...) operator.shape = [..., M, N] X = ... # shape [..., N, R], batch matrix, R > 0. Y = operator.matmul(X) Y.shape ==> [..., M, R] Y[..., :, r] = sum_j A[..., :, j] X[j, r] ``` Args: x: `LinearOperator`, `Tensor` with compatible shape and same `dtype` as `self`, or a blockwise iterable of `LinearOperator`s or `Tensor`s. See class docstring for definition of shape compatibility. adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`. adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is the hermitian transpose (transposition and complex conjugation). name: A name for this `Op`. Returns: A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype` as `self`, or if `x` is blockwise, a list of `Tensor`s with shapes that concatenate to `[..., M, R]`. """ def _check_operators_agree(r, l, message): if (r.range_dimension is not None and l.domain_dimension is not None and r.range_dimension != l.domain_dimension): raise ValueError(message) if isinstance(x, linear_operator.LinearOperator): left_operator = self.adjoint() if adjoint else self right_operator = x.adjoint() if adjoint_arg else x _check_operators_agree( right_operator, left_operator, "Operators are incompatible. Expected `x` to have dimension" " {} but got {}.".format( left_operator.domain_dimension, right_operator.range_dimension)) # We can efficiently multiply BlockDiag LinearOperators if the number of # blocks agree. if isinstance(x, LinearOperatorBlockDiag): if len(left_operator.operators) != len(right_operator.operators): raise ValueError( "Can not efficiently multiply two `LinearOperatorBlockDiag`s " "together when number of blocks differ.") for o1, o2 in zip(left_operator.operators, right_operator.operators): _check_operators_agree( o2, o1, "Blocks are incompatible. Expected `x` to have dimension" " {} but got {}.".format( o1.domain_dimension, o2.range_dimension)) with self._name_scope(name): # pylint: disable=not-callable return self._linop_matmul(left_operator, right_operator) with self._name_scope(name): # pylint: disable=not-callable arg_dim = -1 if adjoint_arg else -2 block_dimensions = (self._block_range_dimensions() if adjoint else self._block_domain_dimensions()) if linear_operator_util.arg_is_blockwise(block_dimensions, x, arg_dim): for i, block in enumerate(x): if not isinstance(block, linear_operator.LinearOperator): block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block) self._check_input_dtype(block) block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim]) x[i] = block else: x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x") self._check_input_dtype(x) op_dimension = (self.range_dimension if adjoint else self.domain_dimension) op_dimension.assert_is_compatible_with(x.shape[arg_dim]) return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg) def _matmul(self, x, adjoint=False, adjoint_arg=False): arg_dim = -1 if adjoint_arg else -2 block_dimensions = (self._block_range_dimensions() if adjoint else self._block_domain_dimensions()) block_dimensions_fn = ( self._block_range_dimension_tensors if adjoint else self._block_domain_dimension_tensors) blockwise_arg = linear_operator_util.arg_is_blockwise( block_dimensions, x, arg_dim) if blockwise_arg: split_x = x else: split_dim = -1 if adjoint_arg else -2 # Split input by rows normally, and otherwise columns. split_x = linear_operator_util.split_arg_into_blocks( block_dimensions, block_dimensions_fn, x, axis=split_dim) result_list = [] for index, operator in enumerate(self.operators): result_list += [operator.matmul( split_x[index], adjoint=adjoint, adjoint_arg=adjoint_arg)] if blockwise_arg: return result_list result_list = linear_operator_util.broadcast_matrix_batch_dims( result_list) return array_ops.concat(result_list, axis=-2) def matvec(self, x, adjoint=False, name="matvec"): """Transform [batch] vector `x` with left multiplication: `x --> Ax`. ```python # Make an operator acting like batch matric A. Assume A.shape = [..., M, N] operator = LinearOperator(...) X = ... # shape [..., N], batch vector Y = operator.matvec(X) Y.shape ==> [..., M] Y[..., :] = sum_j A[..., :, j] X[..., j] ``` Args: x: `Tensor` with compatible shape and same `dtype` as `self`, or an iterable of `Tensor`s (for blockwise operators). `Tensor`s are treated a [batch] vectors, meaning for every set of leading dimensions, the last dimension defines a vector. See class docstring for definition of compatibility. adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`. name: A name for this `Op`. Returns: A `Tensor` with shape `[..., M]` and same `dtype` as `self`. """ with self._name_scope(name): # pylint: disable=not-callable block_dimensions = (self._block_range_dimensions() if adjoint else self._block_domain_dimensions()) if linear_operator_util.arg_is_blockwise(block_dimensions, x, -1): for i, block in enumerate(x): if not isinstance(block, linear_operator.LinearOperator): block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block) self._check_input_dtype(block) block_dimensions[i].assert_is_compatible_with(block.shape[-1]) x[i] = block x_mat = [block[..., array_ops.newaxis] for block in x] y_mat = self.matmul(x_mat, adjoint=adjoint) return [array_ops.squeeze(y, axis=-1) for y in y_mat] x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x") self._check_input_dtype(x) op_dimension = (self.range_dimension if adjoint else self.domain_dimension) op_dimension.assert_is_compatible_with(x.shape[-1]) x_mat = x[..., array_ops.newaxis] y_mat = self.matmul(x_mat, adjoint=adjoint) return array_ops.squeeze(y_mat, axis=-1) def _determinant(self): result = self.operators[0].determinant() for operator in self.operators[1:]: result *= operator.determinant() return result def _log_abs_determinant(self): result = self.operators[0].log_abs_determinant() for operator in self.operators[1:]: result += operator.log_abs_determinant() return result def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"): """Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`. The returned `Tensor` will be close to an exact solution if `A` is well conditioned. Otherwise closeness will vary. See class docstring for details. Examples: ```python # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] operator = LinearOperator(...) operator.shape = [..., M, N] # Solve R > 0 linear systems for every member of the batch. RHS = ... # shape [..., M, R] X = operator.solve(RHS) # X[..., :, r] is the solution to the r'th linear system # sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r] operator.matmul(X) ==> RHS ``` Args: rhs: `Tensor` with same `dtype` as this operator and compatible shape, or a list of `Tensor`s (for blockwise operators). `Tensor`s are treated like a [batch] matrices meaning for every set of leading dimensions, the last two dimensions defines a matrix. See class docstring for definition of compatibility. adjoint: Python `bool`. If `True`, solve the system involving the adjoint of this `LinearOperator`: `A^H X = rhs`. adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H` is the hermitian transpose (transposition and complex conjugation). name: A name scope to use for ops added by this method. Returns: `Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`. Raises: NotImplementedError: If `self.is_non_singular` or `is_square` is False. """ if self.is_non_singular is False: raise NotImplementedError( "Exact solve not implemented for an operator that is expected to " "be singular.") if self.is_square is False: raise NotImplementedError( "Exact solve not implemented for an operator that is expected to " "not be square.") def _check_operators_agree(r, l, message): if (r.range_dimension is not None and l.domain_dimension is not None and r.range_dimension != l.domain_dimension): raise ValueError(message) if isinstance(rhs, linear_operator.LinearOperator): left_operator = self.adjoint() if adjoint else self right_operator = rhs.adjoint() if adjoint_arg else rhs _check_operators_agree( right_operator, left_operator, "Operators are incompatible. Expected `x` to have dimension" " {} but got {}.".format( left_operator.domain_dimension, right_operator.range_dimension)) # We can efficiently solve BlockDiag LinearOperators if the number of # blocks agree. if isinstance(right_operator, LinearOperatorBlockDiag): if len(left_operator.operators) != len(right_operator.operators): raise ValueError( "Can not efficiently solve `LinearOperatorBlockDiag` when " "number of blocks differ.") for o1, o2 in zip(left_operator.operators, right_operator.operators): _check_operators_agree( o2, o1, "Blocks are incompatible. Expected `x` to have dimension" " {} but got {}.".format( o1.domain_dimension, o2.range_dimension)) with self._name_scope(name): # pylint: disable=not-callable return self._linop_solve(left_operator, right_operator) with self._name_scope(name): # pylint: disable=not-callable block_dimensions = (self._block_domain_dimensions() if adjoint else self._block_range_dimensions()) arg_dim = -1 if adjoint_arg else -2 blockwise_arg = linear_operator_util.arg_is_blockwise( block_dimensions, rhs, arg_dim) if blockwise_arg: split_rhs = rhs for i, block in enumerate(split_rhs): if not isinstance(block, linear_operator.LinearOperator): block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block) self._check_input_dtype(block) block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim]) split_rhs[i] = block else: rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch( rhs, name="rhs" ) self._check_input_dtype(rhs) op_dimension = (self.domain_dimension if adjoint else self.range_dimension) op_dimension.assert_is_compatible_with(rhs.shape[arg_dim]) split_dim = -1 if adjoint_arg else -2 # Split input by rows normally, and otherwise columns. split_rhs = linear_operator_util.split_arg_into_blocks( self._block_domain_dimensions(), self._block_domain_dimension_tensors, rhs, axis=split_dim) solution_list = [] for index, operator in enumerate(self.operators): solution_list += [operator.solve( split_rhs[index], adjoint=adjoint, adjoint_arg=adjoint_arg)] if blockwise_arg: return solution_list solution_list = linear_operator_util.broadcast_matrix_batch_dims( solution_list) return array_ops.concat(solution_list, axis=-2) def solvevec(self, rhs, adjoint=False, name="solve"): """Solve single equation with best effort: `A X = rhs`. The returned `Tensor` will be close to an exact solution if `A` is well conditioned. Otherwise closeness will vary. See class docstring for details. Examples: ```python # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] operator = LinearOperator(...) operator.shape = [..., M, N] # Solve one linear system for every member of the batch. RHS = ... # shape [..., M] X = operator.solvevec(RHS) # X is the solution to the linear system # sum_j A[..., :, j] X[..., j] = RHS[..., :] operator.matvec(X) ==> RHS ``` Args: rhs: `Tensor` with same `dtype` as this operator, or list of `Tensor`s (for blockwise operators). `Tensor`s are treated as [batch] vectors, meaning for every set of leading dimensions, the last dimension defines a vector. See class docstring for definition of compatibility regarding batch dimensions. adjoint: Python `bool`. If `True`, solve the system involving the adjoint of this `LinearOperator`: `A^H X = rhs`. name: A name scope to use for ops added by this method. Returns: `Tensor` with shape `[...,N]` and same `dtype` as `rhs`. Raises: NotImplementedError: If `self.is_non_singular` or `is_square` is False. """ with self._name_scope(name): # pylint: disable=not-callable block_dimensions = (self._block_domain_dimensions() if adjoint else self._block_range_dimensions()) if linear_operator_util.arg_is_blockwise(block_dimensions, rhs, -1): for i, block in enumerate(rhs): if not isinstance(block, linear_operator.LinearOperator): block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block) self._check_input_dtype(block) block_dimensions[i].assert_is_compatible_with(block.shape[-1]) rhs[i] = block rhs_mat = [array_ops.expand_dims(block, axis=-1) for block in rhs] solution_mat = self.solve(rhs_mat, adjoint=adjoint) return [array_ops.squeeze(x, axis=-1) for x in solution_mat] rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch( rhs, name="rhs" ) self._check_input_dtype(rhs) op_dimension = (self.domain_dimension if adjoint else self.range_dimension) op_dimension.assert_is_compatible_with(rhs.shape[-1]) rhs_mat = array_ops.expand_dims(rhs, axis=-1) solution_mat = self.solve(rhs_mat, adjoint=adjoint) return array_ops.squeeze(solution_mat, axis=-1) def _diag_part(self): if not all(operator.is_square for operator in self.operators): raise NotImplementedError( "`diag_part` not implemented for an operator whose blocks are not " "square.") diag_list = [] for operator in self.operators: # Extend the axis for broadcasting. diag_list += [operator.diag_part()[..., array_ops.newaxis]] diag_list = linear_operator_util.broadcast_matrix_batch_dims(diag_list) diagonal = array_ops.concat(diag_list, axis=-2) return array_ops.squeeze(diagonal, axis=-1) def _trace(self): if not all(operator.is_square for operator in self.operators): raise NotImplementedError( "`trace` not implemented for an operator whose blocks are not " "square.") result = self.operators[0].trace() for operator in self.operators[1:]: result += operator.trace() return result def _to_dense(self): num_cols = 0 rows = [] broadcasted_blocks = [operator.to_dense() for operator in self.operators] broadcasted_blocks = linear_operator_util.broadcast_matrix_batch_dims( broadcasted_blocks) for block in broadcasted_blocks: batch_row_shape = array_ops.shape(block)[:-1] zeros_to_pad_before_shape = array_ops.concat( [batch_row_shape, [num_cols]], axis=-1) zeros_to_pad_before = array_ops.zeros( shape=zeros_to_pad_before_shape, dtype=block.dtype) num_cols += array_ops.shape(block)[-1] zeros_to_pad_after_shape = array_ops.concat( [batch_row_shape, [self.domain_dimension_tensor() - num_cols]], axis=-1) zeros_to_pad_after = array_ops.zeros( shape=zeros_to_pad_after_shape, dtype=block.dtype) rows.append(array_ops.concat( [zeros_to_pad_before, block, zeros_to_pad_after], axis=-1)) mat = array_ops.concat(rows, axis=-2) mat.set_shape(self.shape) return mat def _assert_non_singular(self): return control_flow_ops.group([ operator.assert_non_singular() for operator in self.operators]) def _assert_self_adjoint(self): return control_flow_ops.group([ operator.assert_self_adjoint() for operator in self.operators]) def _assert_positive_definite(self): return control_flow_ops.group([ operator.assert_positive_definite() for operator in self.operators]) def _eigvals(self): if not all(operator.is_square for operator in self.operators): raise NotImplementedError( "`eigvals` not implemented for an operator whose blocks are not " "square.") eig_list = [] for operator in self.operators: # Extend the axis for broadcasting. eig_list += [operator.eigvals()[..., array_ops.newaxis]] eig_list = linear_operator_util.broadcast_matrix_batch_dims(eig_list) eigs = array_ops.concat(eig_list, axis=-2) return array_ops.squeeze(eigs, axis=-1) @property def _composite_tensor_fields(self): return ("operators",) @property def _experimental_parameter_ndims_to_matrix_ndims(self): return {"operators": [0] * len(self.operators)}
LinearOperatorBlockDiag
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/sqltypes.py
{ "start": 77026, "end": 77893 }
class ____(HasExpressionLookup, TypeEngine[dt.timedelta]): operator_classes = OperatorClass.DATETIME @util.memoized_property def _expression_adaptations(self): # Based on # https://www.postgresql.org/docs/current/static/functions-datetime.html. return { operators.add: { Date: DateTime, Interval: self.__class__, DateTime: DateTime, Time: Time, }, operators.sub: {Interval: self.__class__}, operators.mul: {Numeric: self.__class__, Float: self.__class__}, operators.truediv: { Numeric: self.__class__, Float: self.__class__, }, } @util.ro_non_memoized_property def _type_affinity(self) -> Type[Interval]: return Interval
_AbstractInterval
python
bottlepy__bottle
bottle.py
{ "start": 17214, "end": 22417 }
class ____: """ This class wraps a route callback along with route specific metadata and configuration and applies Plugins on demand. It is also responsible for turning an URL path rule into a regular expression usable by the Router. """ def __init__(self, app, rule, method, callback, name=None, plugins=None, skiplist=None, **config): #: The application this route is installed to. self.app = app #: The path-rule string (e.g. ``/wiki/<page>``). self.rule = rule #: The HTTP method as a string (e.g. ``GET``). self.method = method #: The original callback with no plugins applied. Useful for introspection. self.callback = callback #: The name of the route (if specified) or ``None``. self.name = name or None #: A list of route-specific plugins (see :meth:`Bottle.route`). self.plugins = plugins or [] #: A list of plugins to not apply to this route (see :meth:`Bottle.route`). self.skiplist = skiplist or [] #: Additional keyword arguments passed to the :meth:`Bottle.route` #: decorator are stored in this dictionary. Used for route-specific #: plugin configuration and meta-data. self.config = app.config._make_overlay() self.config.load_dict(config) @cached_property def call(self): """ The route callback with all plugins applied. This property is created on demand and then cached to speed up subsequent requests.""" return self._make_callback() def reset(self): """ Forget any cached values. The next time :attr:`call` is accessed, all plugins are re-applied. """ self.__dict__.pop('call', None) def prepare(self): """ Do all on-demand work immediately (useful for debugging).""" self.call def all_plugins(self): """ Yield all Plugins affecting this route. """ unique = set() for p in reversed(self.app.plugins + self.plugins): if True in self.skiplist: break name = getattr(p, 'name', False) if name and (name in self.skiplist or name in unique): continue if p in self.skiplist or type(p) in self.skiplist: continue if name: unique.add(name) yield p def _make_callback(self): callback = self.callback for plugin in self.all_plugins(): if hasattr(plugin, 'apply'): callback = plugin.apply(callback, self) else: callback = plugin(callback) if callback is not self.callback: update_wrapper(callback, self.callback) return callback def get_undecorated_callback(self): """ Return the callback. If the callback is a decorated function, try to recover the original function. """ func = self.callback while True: if getattr(func, '__wrapped__', False): func = func.__wrapped__ elif getattr(func, '__func__', False): func = func.__func__ elif getattr(func, '__closure__', False): depr(0, 14, "Decorated callback without __wrapped__", "When applying decorators to route callbacks, make sure" " the decorator uses @functools.wraps or update_wrapper." " This warning may also trigger if you reference callables" " from a nonlocal scope.") cells_values = (cell.cell_contents for cell in func.__closure__) isfunc = lambda x: isinstance(x, FunctionType) or hasattr(x, '__call__') func = next(filter(isfunc, cells_values), func) else: return func def get_callback_args(self): """ Return a list of argument names the callback (most likely) accepts as keyword arguments. If the callback is a decorated function, try to recover the original function before inspection. """ sig = inspect.signature(self.get_undecorated_callback()) return [p.name for p in sig.parameters.values() if p.kind in ( p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY )] def get_config(self, key, default=None): """ Lookup a config field and return its value, first checking the route.config, then route.app.config.""" depr(0, 13, "Route.get_config() is deprecated.", "The Route.config property already includes values from the" " application config for missing keys. Access it directly.") return self.config.get(key, default) def __repr__(self): cb = self.get_undecorated_callback() return '<%s %s -> %s:%s>' % ( self.method, self.rule, cb.__module__, getattr(cb, '__name__', '?') ) ############################################################################### # Application Object ########################################################### ###############################################################################
Route
python
huggingface__transformers
src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py
{ "start": 2886, "end": 3847 }
class ____(DPRState): def load_dpr_model(self): model = DPRQuestionEncoder(DPRConfig(**BertConfig.get_config_dict("google-bert/bert-base-uncased")[0])) print(f"Loading DPR biencoder from {self.src_file}") saved_state = load_states_from_checkpoint(self.src_file) encoder, prefix = model.question_encoder, "question_model." # Fix changes from https://github.com/huggingface/transformers/commit/614fef1691edb806de976756d4948ecbcd0c0ca3 state_dict = {"bert_model.embeddings.position_ids": model.question_encoder.bert_model.embeddings.position_ids} for key, value in saved_state.model_dict.items(): if key.startswith(prefix): key = key[len(prefix) :] if not key.startswith("encode_proj."): key = "bert_model." + key state_dict[key] = value encoder.load_state_dict(state_dict) return model
DPRQuestionEncoderState
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1303758, "end": 1308379 }
class ____(sgqlc.types.Type, Node, Closable, Updatable): """Projects manage issues, pull requests and notes within a project owner. """ __schema__ = github_schema __field_names__ = ( "body", "body_html", "columns", "created_at", "creator", "database_id", "name", "number", "owner", "pending_cards", "progress", "resource_path", "state", "updated_at", "url", ) body = sgqlc.types.Field(String, graphql_name="body") """The project's description body.""" body_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="bodyHTML") """The projects description body rendered to HTML.""" columns = sgqlc.types.Field( sgqlc.types.non_null(ProjectColumnConnection), graphql_name="columns", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) """List of columns in the project Arguments: * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. """ created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt") """Identifies the date and time when the object was created.""" creator = sgqlc.types.Field(Actor, graphql_name="creator") """The actor who originally created the project.""" database_id = sgqlc.types.Field(Int, graphql_name="databaseId") """Identifies the primary key from the database.""" name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name") """The project's name.""" number = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="number") """The project's number.""" owner = sgqlc.types.Field(sgqlc.types.non_null(ProjectOwner), graphql_name="owner") """The project's owner. Currently limited to repositories, organizations, and users. """ pending_cards = sgqlc.types.Field( sgqlc.types.non_null(ProjectCardConnection), graphql_name="pendingCards", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ( "archived_states", sgqlc.types.Arg( sgqlc.types.list_of(ProjectCardArchivedState), graphql_name="archivedStates", default=("ARCHIVED", "NOT_ARCHIVED") ), ), ) ), ) """List of pending cards in this project Arguments: * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. * `archived_states` (`[ProjectCardArchivedState]`): A list of archived states to filter the cards by (default: `[ARCHIVED, NOT_ARCHIVED]`) """ progress = sgqlc.types.Field(sgqlc.types.non_null(ProjectProgress), graphql_name="progress") """Project progress details.""" resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="resourcePath") """The HTTP path for this project""" state = sgqlc.types.Field(sgqlc.types.non_null(ProjectState), graphql_name="state") """Whether the project is open or closed.""" updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt") """Identifies the date and time when the object was last updated.""" url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url") """The HTTP URL for this project"""
Project
python
PrefectHQ__prefect
src/prefect/client/schemas/objects.py
{ "start": 24907, "end": 25069 }
class ____(RunInput): """Represents a task run result input to another task run.""" input_type: Literal["task_run"] = "task_run" id: UUID
TaskRunResult
python
fastai__fastai
fastai/callback/training.py
{ "start": 2254, "end": 2450 }
class ____(Callback): run_after=TrainEvalCallback "Freeze moving average statistics in all non-trainable batchnorm layers." def before_train(self): set_bn_eval(self.model)
BnFreeze
python
Textualize__textual
src/textual/canvas.py
{ "start": 921, "end": 1176 }
class ____: """Base class for a canvas primitive.""" def render(self, canvas: Canvas) -> None: """Render to the canvas. Args: canvas: Canvas instance. """ raise NotImplementedError() @dataclass
Primitive
python
getsentry__sentry
src/sentry/api/serializers/models/rule.py
{ "start": 2173, "end": 2557 }
class ____(RuleSerializerResponseOptional): """ This represents a Sentry Rule. """ id: str | None conditions: list[dict] filters: list[dict] actions: list[dict] actionMatch: str filterMatch: str frequency: int name: str dateCreated: datetime projects: list[str] status: str snooze: bool @register(Rule)
RuleSerializerResponse
python
tensorflow__tensorflow
tensorflow/python/training/session_run_hook.py
{ "start": 9485, "end": 10450 }
class ____( collections.namedtuple("SessionRunValues", ["results", "options", "run_metadata"])): """Contains the results of `Session.run()`. In the future we may use this object to add more information about result of run without changing the Hook API. Args: results: The return values from `Session.run()` corresponding to the fetches attribute returned in the RunArgs. Note that this has the same shape as the RunArgs fetches. For example: fetches = global_step_tensor => results = nparray(int) fetches = [train_op, summary_op, global_step_tensor] => results = [None, nparray(string), nparray(int)] fetches = {'step': global_step_tensor, 'summ': summary_op} => results = {'step': nparray(int), 'summ': nparray(string)} options: `RunOptions` from the `Session.run()` call. run_metadata: `RunMetadata` from the `Session.run()` call. """
SessionRunValues
python
scipy__scipy
scipy/_build_utils/tempita/_tempita.py
{ "start": 13666, "end": 14449 }
class ____(dict): def __init__(self, **kw): for name, value in kw.items(): setattr(self, name, value) def __setattr__(self, name, value): self[name] = value def __getattr__(self, name): try: return self[name] except KeyError: raise AttributeError(name) def __getitem__(self, key): if 'default' in self: try: return dict.__getitem__(self, key) except KeyError: return dict.__getitem__(self, 'default') else: return dict.__getitem__(self, key) def __repr__(self): return '<%s %s>' % ( self.__class__.__name__, ' '.join(['%s=%r' % (k, v) for k, v in sorted(self.items())]))
bunch
python
crytic__slither
slither/solc_parsing/declarations/event_top_level.py
{ "start": 546, "end": 2628 }
class ____(CallerContextExpression): """ EventTopLevel class """ def __init__( self, event: EventTopLevel, event_data: Dict, slither_parser: "SlitherCompilationUnitSolc" ) -> None: self._event = event self._slither_parser = slither_parser if self.is_compact_ast: self._event.name = event_data["name"] elems = event_data["parameters"] assert elems["nodeType"] == "ParameterList" self._elemsNotParsed = elems["parameters"] else: self._event.name = event_data["attributes"]["name"] for elem in event_data["children"]: # From Solidity 0.6.3 to 0.6.10 (included) # Comment above a event might be added in the children # of an event for the legacy ast if elem["name"] == "ParameterList": if "children" in elem: self._elemsNotParsed = elem["children"] else: self._elemsNotParsed = [] def analyze(self) -> None: for elem_to_parse in self._elemsNotParsed: elem = EventVariable() # Todo: check if the source offset is always here if "src" in elem_to_parse: elem.set_offset(elem_to_parse["src"], self._slither_parser.compilation_unit) elem_parser = EventVariableSolc(elem, elem_to_parse) elem_parser.analyze(self) self._event.elems.append(elem) self._elemsNotParsed = [] @property def is_compact_ast(self) -> bool: return self._slither_parser.is_compact_ast @property def compilation_unit(self) -> SlitherCompilationUnit: return self._slither_parser.compilation_unit def get_key(self) -> str: return self._slither_parser.get_key() @property def slither_parser(self) -> "SlitherCompilationUnitSolc": return self._slither_parser @property def underlying_event(self) -> EventTopLevel: return self._event
EventTopLevelSolc
python
python__mypy
mypy/plugin.py
{ "start": 20285, "end": 20536 }
class ____(NamedTuple): call: CallExpr # The r.h.s. of dynamic class definition name: str # The name this class is being assigned to api: SemanticAnalyzerPluginInterface @mypyc_attr(allow_interpreted_subclasses=True)
DynamicClassDefContext
python
run-llama__llama_index
llama-index-core/llama_index/core/query_engine/sql_join_query_engine.py
{ "start": 6471, "end": 14295 }
class ____(BaseQueryEngine): """ SQL Join Query Engine. This query engine can "Join" a SQL database results with another query engine. It can decide it needs to query the SQL database or the other query engine. If it decides to query the SQL database, it will first query the SQL database, whether to augment information with retrieved results from the other query engine. Args: sql_query_tool (QueryEngineTool): Query engine tool for SQL database. other_query_tool (QueryEngineTool): Other query engine tool. selector (Optional[Union[LLMSingleSelector, PydanticSingleSelector]]): Selector to use. sql_join_synthesis_prompt (Optional[BasePromptTemplate]): PromptTemplate to use for SQL join synthesis. sql_augment_query_transform (Optional[SQLAugmentQueryTransform]): Query transform to use for SQL augmentation. use_sql_join_synthesis (bool): Whether to use SQL join synthesis. callback_manager (Optional[CallbackManager]): Callback manager to use. verbose (bool): Whether to print intermediate results. """ def __init__( self, sql_query_tool: QueryEngineTool, other_query_tool: QueryEngineTool, selector: Optional[Union[LLMSingleSelector, PydanticSingleSelector]] = None, llm: Optional[LLM] = None, sql_join_synthesis_prompt: Optional[BasePromptTemplate] = None, sql_augment_query_transform: Optional[SQLAugmentQueryTransform] = None, use_sql_join_synthesis: bool = True, callback_manager: Optional[CallbackManager] = None, verbose: bool = True, streaming: bool = False, ) -> None: """Initialize params.""" super().__init__(callback_manager=callback_manager) # validate that the query engines are of the right type if not isinstance( sql_query_tool.query_engine, (BaseSQLTableQueryEngine, NLSQLTableQueryEngine), ): raise ValueError( "sql_query_tool.query_engine must be an instance of " "BaseSQLTableQueryEngine or NLSQLTableQueryEngine" ) self._sql_query_tool = sql_query_tool self._other_query_tool = other_query_tool self._llm = llm or Settings.llm self._selector = selector or get_selector_from_llm(self._llm, is_multi=False) # type: ignore assert isinstance(self._selector, (LLMSingleSelector, PydanticSingleSelector)) self._sql_join_synthesis_prompt = ( sql_join_synthesis_prompt or DEFAULT_SQL_JOIN_SYNTHESIS_PROMPT ) self._sql_augment_query_transform = ( sql_augment_query_transform or SQLAugmentQueryTransform(llm=self._llm) ) self._use_sql_join_synthesis = use_sql_join_synthesis self._verbose = verbose self._streaming = streaming def _get_prompt_modules(self) -> PromptMixinType: """Get prompt sub-modules.""" return { "selector": self._selector, "sql_augment_query_transform": self._sql_augment_query_transform, } def _get_prompts(self) -> PromptDictType: """Get prompts.""" return {"sql_join_synthesis_prompt": self._sql_join_synthesis_prompt} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" if "sql_join_synthesis_prompt" in prompts: self._sql_join_synthesis_prompt = prompts["sql_join_synthesis_prompt"] def _query_sql_other(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: """Query SQL database + other query engine in sequence.""" # first query SQL database sql_response = self._sql_query_tool.query_engine.query(query_bundle) if not self._use_sql_join_synthesis: return sql_response sql_query = ( sql_response.metadata["sql_query"] if sql_response.metadata else None ) if self._verbose: print_text(f"SQL query: {sql_query}\n", color="yellow") print_text(f"SQL response: {sql_response}\n", color="yellow") # given SQL db, transform query into new query new_query = self._sql_augment_query_transform( query_bundle.query_str, metadata={ "sql_query": _format_sql_query(sql_query), "sql_query_response": str(sql_response), }, ) if self._verbose: print_text( f"Transformed query given SQL response: {new_query.query_str}\n", color="blue", ) logger.info(f"> Transformed query given SQL response: {new_query.query_str}") if self._sql_augment_query_transform.check_stop(new_query): return sql_response other_response = self._other_query_tool.query_engine.query(new_query) if self._verbose: print_text(f"query engine response: {other_response}\n", color="pink") logger.info(f"> query engine response: {other_response}") if self._streaming: response_gen = self._llm.stream( self._sql_join_synthesis_prompt, query_str=query_bundle.query_str, sql_query_str=sql_query, sql_response_str=str(sql_response), query_engine_query_str=new_query.query_str, query_engine_response_str=str(other_response), ) response_metadata = { **(sql_response.metadata or {}), **(other_response.metadata or {}), } source_nodes = other_response.source_nodes return StreamingResponse( response_gen, metadata=response_metadata, source_nodes=source_nodes, ) else: response_str = self._llm.predict( self._sql_join_synthesis_prompt, query_str=query_bundle.query_str, sql_query_str=sql_query, sql_response_str=str(sql_response), query_engine_query_str=new_query.query_str, query_engine_response_str=str(other_response), ) response_metadata = { **(sql_response.metadata or {}), **(other_response.metadata or {}), } source_nodes = other_response.source_nodes return Response( response_str, metadata=response_metadata, source_nodes=source_nodes, ) def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: """Query and get response.""" # TODO: see if this can be consolidated with logic in RouterQueryEngine metadatas = [self._sql_query_tool.metadata, self._other_query_tool.metadata] result = self._selector.select(metadatas, query_bundle) # pick sql query if result.ind == 0: if self._verbose: print_text(f"Querying SQL database: {result.reason}\n", color="blue") logger.info(f"> Querying SQL database: {result.reason}") return self._query_sql_other(query_bundle) elif result.ind == 1: if self._verbose: print_text( f"Querying other query engine: {result.reason}\n", color="blue" ) logger.info(f"> Querying other query engine: {result.reason}") return self._other_query_tool.query_engine.query(query_bundle) else: raise ValueError(f"Invalid result.ind: {result.ind}") async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: # TODO: make async return self._query(query_bundle)
SQLJoinQueryEngine
python
dagster-io__dagster
python_modules/dagster/dagster/_core/executor/child_process_executor.py
{ "start": 763, "end": 883 }
class ____( NamedTuple("ChildProcessStartEvent", [("pid", int)]), ChildProcessEvent ): pass
ChildProcessStartEvent
python
justquick__django-activity-stream
actstream/tests/test_feeds.py
{ "start": 121, "end": 3523 }
class ____(base.DataTestCase): urls = 'actstream.urls' @property def rss_base(self): return ['<?xml version="1.0" encoding="utf-8"?>\n', '<rss ', 'xmlns:atom="http://www.w3.org/2005/Atom"', 'version="2.0"', '<language>%s' % settings.LANGUAGE_CODE] @property def atom_base(self): return ['<?xml version="1.0" encoding="utf-8"?>\n', 'xmlns="http://www.w3.org/2005/Atom"', 'xml:lang="%s"' % settings.LANGUAGE_CODE, '<uri>http://example.com/detail/', '<id>tag:example.com,2000-01-01:/detail/'] def test_feed(self): self.client.login(username='admin', password='admin') expected = [ 'Activity feed for your followed actors', 'Public activities of actors you follow', 'Two started following CoolGroup %s ago' % self.timesince, 'Two joined CoolGroup %s ago' % self.timesince, ] expected_json = expected[2:] expected_times = [ 'admin started following Two %s ago' % self.timesince, 'admin joined CoolGroup %s ago' % self.timesince, 'admin commented on CoolGroup %s ago' % self.timesince ] expected_json_with_user_activity = expected_json + expected_times rss = self.capture('actstream_feed') self.assertAllIn(self.rss_base + expected, rss) atom = self.capture('actstream_feed_atom') self.assertAllIn(self.atom_base + expected, atom) json = self.capture('actstream_feed_json') self.assertAllIn(expected_json, json) json_with_user_activity = self.capture('actstream_feed_json', query_string='with_user_activity=true') self.assertAllIn(expected_json_with_user_activity, json_with_user_activity) def test_model_feed(self): expected = [ 'Activity feed from %s' % self.User.__name__, 'Public activities of %s' % self.User.__name__, 'admin commented on CoolGroup %s ago' % self.timesince, 'Two started following CoolGroup %s ago' % self.timesince, 'Two joined CoolGroup %s ago' % self.timesince, 'admin started following Two %s ago' % self.timesince, 'admin joined CoolGroup %s ago' % self.timesince, ] rss = self.capture('actstream_model_feed', self.user_ct.pk) self.assertAllIn(self.rss_base + expected, rss) atom = self.capture('actstream_model_feed_atom', self.user_ct.pk) self.assertAllIn(self.atom_base + expected, atom) json = self.capture('actstream_model_feed_json', self.user_ct.pk) self.assertEqual(len(json['items']), 10) def test_object_feed(self): expected = [ 'Activity for Two', 'admin started following Two %s ago' % self.timesince, ] rss = self.capture('actstream_object_feed', self.user_ct.pk, self.user2.pk) self.assertAllIn(self.rss_base + expected, rss) atom = self.capture('actstream_object_feed_atom', self.user_ct.pk, self.user2.pk) self.assertAllIn(self.atom_base + expected, atom) json = self.capture( 'actstream_object_feed_json', self.user_ct.pk, self.user2.pk ) self.assertEqual(len(json['items']), 3)
FeedsTestCase
python
scipy__scipy
scipy/special/tests/test_basic.py
{ "start": 39496, "end": 39564 }
class ____: def test_besselpoly(self): pass
TestBesselpoly
python
huggingface__transformers
src/transformers/models/sam3_tracker/modeling_sam3_tracker.py
{ "start": 13693, "end": 16119 }
class ____(nn.Module): """ SAM3_TRACKER's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and values. """ def __init__(self, config, downsample_rate=None): super().__init__() downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate self.config = config self.hidden_size = config.hidden_size self.internal_dim = config.hidden_size // downsample_rate self.num_attention_heads = config.num_attention_heads self.head_dim = self.internal_dim // config.num_attention_heads self.scaling = self.head_dim**-0.5 self.is_causal = False self.q_proj = nn.Linear(self.hidden_size, self.internal_dim) self.k_proj = nn.Linear(self.hidden_size, self.internal_dim) self.v_proj = nn.Linear(self.hidden_size, self.internal_dim) self.o_proj = nn.Linear(self.internal_dim, self.hidden_size) def forward( self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_similarity: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor]: # Input projections batch_size, point_batch_size = query.shape[:2] new_shape = (batch_size * point_batch_size, -1, self.num_attention_heads, self.head_dim) query = self.q_proj(query).view(*new_shape).transpose(1, 2) key = self.k_proj(key).view(*new_shape).transpose(1, 2) value = self.v_proj(value).view(*new_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query, key, value, attention_mask=attention_similarity, dropout=0.0, scaling=self.scaling, is_causal=self.is_causal, **kwargs, ) attn_output = attn_output.reshape( batch_size, point_batch_size, -1, self.num_attention_heads * self.head_dim ).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights
Sam3TrackerAttention
python
pyinstaller__pyinstaller
PyInstaller/utils/win32/versioninfo.py
{ "start": 11050, "end": 12660 }
class ____: """ WORD wLength; // length of the version resource WORD wValueLength; // length of the Value member in the current // VS_VERSION_INFO structure WORD wType; // 1 means text, 0 means binary WCHAR szKey[]; // Contains the Unicode string 'StringFileInfo'. WORD Padding[]; StringTable Children[]; // list of zero or more String structures """ def __init__(self, kids=None): self.name = 'StringFileInfo' self.kids = kids or [] def fromRaw(self, sublen, vallen, name, data, i, limit): self.name = name while i < limit: st = StringTable() j = st.fromRaw(data, i, limit) self.kids.append(st) i = j return i def toRaw(self): raw_name = getRaw(self.name) vallen = 0 typ = 1 sublen = 6 + len(raw_name) + 2 pad = b'' if sublen % 4: pad = b'\000\000' tmp = b''.join([kid.toRaw() for kid in self.kids]) sublen = sublen + len(pad) + len(tmp) return struct.pack('HHH', sublen, vallen, typ) + raw_name + b'\000\000' + pad + tmp def __eq__(self, other): return self.toRaw() == other def __str__(self, indent=''): new_indent = indent + ' ' tmp = ', \n'.join(kid.__str__(new_indent) for kid in self.kids) return f'{indent}StringFileInfo(\n{new_indent}[\n{tmp}\n{new_indent}])' def __repr__(self): return 'versioninfo.StringFileInfo(%r)' % self.kids
StringFileInfo
python
walkccc__LeetCode
solutions/2392. Build a Matrix With Conditions/2392.py
{ "start": 0, "end": 1177 }
class ____: def buildMatrix(self, k: int, rowConditions: list[list[int]], colConditions: list[list[int]]) -> list[list[int]]: rowOrder = self._topologicalSort(rowConditions, k) if not rowOrder: return [] colOrder = self._topologicalSort(colConditions, k) if not colOrder: return [] ans = [[0] * k for _ in range(k)] nodeToRowIndex = [0] * (k + 1) for i, node in enumerate(rowOrder): nodeToRowIndex[node] = i for j, node in enumerate(colOrder): i = nodeToRowIndex[node] ans[i][j] = node return ans def _topologicalSort(self, conditions: list[list[int]], n: int) -> list[int]: order = [] graph = [[] for _ in range(n + 1)] inDegrees = [0] * (n + 1) # Build the graph. for u, v in conditions: graph[u].append(v) inDegrees[v] += 1 # Perform topological sorting. q = collections.deque([i for i in range(1, n + 1) if inDegrees[i] == 0]) while q: u = q.popleft() order.append(u) for v in graph[u]: inDegrees[v] -= 1 if inDegrees[v] == 0: q.append(v) return order if len(order) == n else []
Solution
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_django/DJ008.py
{ "start": 1680, "end": 1933 }
class ____(models.Model): new_field = models.CharField(max_length=10) class Meta: abstract = True @property def my_brand_new_property(self): return 1 def my_beautiful_method(self): return 2
AbstractTestModel1
python
tensorflow__tensorflow
tensorflow/python/debug/cli/analyzer_cli.py
{ "start": 4509, "end": 58529 }
class ____(object): """Analyzer for debug data from dump directories.""" _TIMESTAMP_COLUMN_HEAD = "t (ms)" _DUMP_SIZE_COLUMN_HEAD = "Size (B)" _OP_TYPE_COLUMN_HEAD = "Op type" _TENSOR_NAME_COLUMN_HEAD = "Tensor name" # Op types to be omitted when generating descriptions of graph structure. _GRAPH_STRUCT_OP_TYPE_DENYLIST = ("_Send", "_Recv", "_HostSend", "_HostRecv", "_Retval") def __init__(self, debug_dump, config): """DebugAnalyzer constructor. Args: debug_dump: A DebugDumpDir object. config: A `cli_config.CLIConfig` object that carries user-facing configurations. """ self._debug_dump = debug_dump self._evaluator = evaluator.ExpressionEvaluator(self._debug_dump) # Initialize tensor filters state. self._tensor_filters = {} self._build_argument_parsers(config) config.set_callback("graph_recursion_depth", self._build_argument_parsers) # TODO(cais): Implement list_nodes. def _build_argument_parsers(self, config): """Build argument parsers for DebugAnalayzer. Args: config: A `cli_config.CLIConfig` object. Returns: A dict mapping command handler name to `ArgumentParser` instance. """ # Argument parsers for command handlers. self._arg_parsers = {} # Parser for list_tensors. ap = argparse.ArgumentParser( description="List dumped intermediate tensors.", usage=argparse.SUPPRESS) ap.add_argument( "-f", "--tensor_filter", dest="tensor_filter", type=str, default="", help="List only Tensors passing the filter of the specified name") ap.add_argument( "-fenn", "--filter_exclude_node_names", dest="filter_exclude_node_names", type=str, default="", help="When applying the tensor filter, exclude node with names " "matching the regular expression. Applicable only if --tensor_filter " "or -f is used.") ap.add_argument( "-n", "--node_name_filter", dest="node_name_filter", type=str, default="", help="filter node name by regex.") ap.add_argument( "-t", "--op_type_filter", dest="op_type_filter", type=str, default="", help="filter op type by regex.") ap.add_argument( "-s", "--sort_by", dest="sort_by", type=str, default=SORT_TENSORS_BY_TIMESTAMP, help=("the field to sort the data by: (%s | %s | %s | %s)" % (SORT_TENSORS_BY_TIMESTAMP, SORT_TENSORS_BY_DUMP_SIZE, SORT_TENSORS_BY_OP_TYPE, SORT_TENSORS_BY_TENSOR_NAME))) ap.add_argument( "-r", "--reverse", dest="reverse", action="store_true", help="sort the data in reverse (descending) order") self._arg_parsers["list_tensors"] = ap # Parser for node_info. ap = argparse.ArgumentParser( description="Show information about a node.", usage=argparse.SUPPRESS) ap.add_argument( "node_name", type=str, help="Name of the node or an associated tensor, e.g., " "hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0") ap.add_argument( "-a", "--attributes", dest="attributes", action="store_true", help="Also list attributes of the node.") ap.add_argument( "-d", "--dumps", dest="dumps", action="store_true", help="Also list dumps available from the node.") ap.add_argument( "-t", "--traceback", dest="traceback", action="store_true", help="Also include the traceback of the node's creation " "(if available in Python).") self._arg_parsers["node_info"] = ap # Parser for list_inputs. ap = argparse.ArgumentParser( description="Show inputs to a node.", usage=argparse.SUPPRESS) ap.add_argument( "node_name", type=str, help="Name of the node or an output tensor from the node, e.g., " "hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0") ap.add_argument( "-c", "--control", action="store_true", help="Include control inputs.") ap.add_argument( "-d", "--depth", dest="depth", type=int, default=config.get("graph_recursion_depth"), help="Maximum depth of recursion used when showing the input tree.") ap.add_argument( "-r", "--recursive", dest="recursive", action="store_true", help="Show inputs to the node recursively, i.e., the input tree.") ap.add_argument( "-t", "--op_type", action="store_true", help="Show op types of input nodes.") self._arg_parsers["list_inputs"] = ap # Parser for list_outputs. ap = argparse.ArgumentParser( description="Show the nodes that receive the outputs of given node.", usage=argparse.SUPPRESS) ap.add_argument( "node_name", type=str, help="Name of the node or an output tensor from the node, e.g., " "hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0") ap.add_argument( "-c", "--control", action="store_true", help="Include control inputs.") ap.add_argument( "-d", "--depth", dest="depth", type=int, default=config.get("graph_recursion_depth"), help="Maximum depth of recursion used when showing the output tree.") ap.add_argument( "-r", "--recursive", dest="recursive", action="store_true", help="Show recipients of the node recursively, i.e., the output " "tree.") ap.add_argument( "-t", "--op_type", action="store_true", help="Show op types of recipient nodes.") self._arg_parsers["list_outputs"] = ap # Parser for print_tensor. self._arg_parsers["print_tensor"] = ( command_parser.get_print_tensor_argparser( "Print the value of a dumped tensor.")) # Parser for print_source. ap = argparse.ArgumentParser( description="Print a Python source file with overlaid debug " "information, including the nodes (ops) or Tensors created at the " "source lines.", usage=argparse.SUPPRESS) ap.add_argument( "source_file_path", type=str, help="Path to the source file.") ap.add_argument( "-t", "--tensors", dest="tensors", action="store_true", help="Label lines with dumped Tensors, instead of ops.") ap.add_argument( "-m", "--max_elements_per_line", type=int, default=10, help="Maximum number of elements (ops or Tensors) to show per source " "line.") ap.add_argument( "-b", "--line_begin", type=int, default=1, help="Print source beginning at line number (1-based.)") self._arg_parsers["print_source"] = ap # Parser for list_source. ap = argparse.ArgumentParser( description="List source files responsible for constructing nodes and " "tensors present in the run().", usage=argparse.SUPPRESS) ap.add_argument( "-p", "--path_filter", type=str, default="", help="Regular expression filter for file path.") ap.add_argument( "-n", "--node_name_filter", type=str, default="", help="Regular expression filter for node name.") self._arg_parsers["list_source"] = ap # Parser for eval. ap = argparse.ArgumentParser( description="""Evaluate an arbitrary expression. Can use tensor values from the current debug dump. The debug tensor names should be enclosed in pairs of backticks. Expressions with spaces should be enclosed in a pair of double quotes or a pair of single quotes. By default, numpy is imported as np and can be used in the expressions. E.g., 1) eval np.argmax(`Softmax:0`), 2) eval 'np.sum(`Softmax:0`, axis=1)', 3) eval "np.matmul((`output/Identity:0`/`Softmax:0`).T, `Softmax:0`)". """, usage=argparse.SUPPRESS) ap.add_argument( "expression", type=str, help="""Expression to be evaluated. 1) in the simplest case, use <node_name>:<output_slot>, e.g., hidden_0/MatMul:0. 2) if the default debug op "DebugIdentity" is to be overridden, use <node_name>:<output_slot>:<debug_op>, e.g., hidden_0/MatMul:0:DebugNumericSummary. 3) if the tensor of the same name exists on more than one device, use <device_name>:<node_name>:<output_slot>[:<debug_op>], e.g., /job:worker/replica:0/task:0/gpu:0:hidden_0/MatMul:0 /job:worker/replica:0/task:2/cpu:0:hidden_0/MatMul:0:DebugNanCount. 4) if the tensor is executed multiple times in a given `Session.run` call, specify the execution index with a 0-based integer enclose in a pair of brackets at the end, e.g., RNN/tanh:0[0] /job:worker/replica:0/task:0/gpu:0:RNN/tanh:0[0].""") ap.add_argument( "-a", "--all", dest="print_all", action="store_true", help="Print the tensor in its entirety, i.e., do not use ellipses " "(may be slow for large results).") ap.add_argument( "-w", "--write_path", default="", help="Path of the numpy file to write the evaluation result to, " "using numpy.save()") self._arg_parsers["eval"] = ap def add_tensor_filter(self, filter_name, filter_callable): """Add a tensor filter. A tensor filter is a named callable of the signature: filter_callable(dump_datum, tensor), wherein dump_datum is an instance of debug_data.DebugTensorDatum carrying metadata about the dumped tensor, including tensor name, timestamps, etc. tensor is the value of the dumped tensor as an numpy.ndarray object. The return value of the function is a bool. This is the same signature as the input argument to debug_data.DebugDumpDir.find(). Args: filter_name: (str) name of the filter. Cannot be empty. filter_callable: (callable) a filter function of the signature described as above. Raises: ValueError: If filter_name is an empty str. TypeError: If filter_name is not a str. Or if filter_callable is not callable. """ if not isinstance(filter_name, str): raise TypeError("Input argument filter_name is expected to be str, " "but is not.") # Check that filter_name is not an empty str. if not filter_name: raise ValueError("Input argument filter_name cannot be empty.") # Check that filter_callable is callable. if not callable(filter_callable): raise TypeError( "Input argument filter_callable is expected to be callable, " "but is not.") self._tensor_filters[filter_name] = filter_callable def get_tensor_filter(self, filter_name): """Retrieve filter function by name. Args: filter_name: Name of the filter set during add_tensor_filter() call. Returns: The callable associated with the filter name. Raises: ValueError: If there is no tensor filter of the specified filter name. """ if filter_name not in self._tensor_filters: raise ValueError("There is no tensor filter named \"%s\"" % filter_name) return self._tensor_filters[filter_name] def get_help(self, handler_name): return self._arg_parsers[handler_name].format_help() def list_tensors(self, args, screen_info=None): """Command handler for list_tensors. List tensors dumped during debugged Session.run() call. Args: args: Command-line arguments, excluding the command prefix, as a list of str. screen_info: Optional dict input containing screen information such as cols. Returns: Output text lines as a RichTextLines object. Raises: ValueError: If `--filter_exclude_node_names` is used without `-f` or `--tensor_filter` being used. """ # TODO(cais): Add annotations of substrings for dumped tensor names, to # facilitate on-screen highlighting/selection of node names. _ = screen_info parsed = self._arg_parsers["list_tensors"].parse_args(args) output = [] filter_strs = [] if parsed.op_type_filter: op_type_regex = re.compile(parsed.op_type_filter) filter_strs.append("Op type regex filter: \"%s\"" % parsed.op_type_filter) else: op_type_regex = None if parsed.node_name_filter: node_name_regex = re.compile(parsed.node_name_filter) filter_strs.append("Node name regex filter: \"%s\"" % parsed.node_name_filter) else: node_name_regex = None output = debugger_cli_common.RichTextLines(filter_strs) output.append("") if parsed.tensor_filter: try: filter_callable = self.get_tensor_filter(parsed.tensor_filter) except ValueError: output = cli_shared.error("There is no tensor filter named \"%s\"." % parsed.tensor_filter) _add_main_menu(output, node_name=None, enable_list_tensors=False) return output data_to_show = self._debug_dump.find( filter_callable, exclude_node_names=parsed.filter_exclude_node_names) else: if parsed.filter_exclude_node_names: raise ValueError( "The flag --filter_exclude_node_names is valid only when " "the flag -f or --tensor_filter is used.") data_to_show = self._debug_dump.dumped_tensor_data # TODO(cais): Implement filter by lambda on tensor value. max_timestamp_width, max_dump_size_width, max_op_type_width = ( self._measure_tensor_list_column_widths(data_to_show)) # Sort the data. data_to_show = self._sort_dump_data_by( data_to_show, parsed.sort_by, parsed.reverse) output.extend( self._tensor_list_column_heads(parsed, max_timestamp_width, max_dump_size_width, max_op_type_width)) dump_count = 0 for dump in data_to_show: if node_name_regex and not node_name_regex.match(dump.node_name): continue if op_type_regex: op_type = self._debug_dump.node_op_type(dump.node_name) if not op_type_regex.match(op_type): continue rel_time = (dump.timestamp - self._debug_dump.t0) / 1000.0 dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes) dumped_tensor_name = "%s:%d" % (dump.node_name, dump.output_slot) op_type = self._debug_dump.node_op_type(dump.node_name) line = "[%.3f]" % rel_time line += " " * (max_timestamp_width - len(line)) line += dump_size_str line += " " * (max_timestamp_width + max_dump_size_width - len(line)) line += op_type line += " " * (max_timestamp_width + max_dump_size_width + max_op_type_width - len(line)) line += dumped_tensor_name output.append( line, font_attr_segs=[( len(line) - len(dumped_tensor_name), len(line), debugger_cli_common.MenuItem("", "pt %s" % dumped_tensor_name))]) dump_count += 1 if parsed.tensor_filter: output.prepend([ "%d dumped tensor(s) passing filter \"%s\":" % (dump_count, parsed.tensor_filter) ]) else: output.prepend(["%d dumped tensor(s):" % dump_count]) _add_main_menu(output, node_name=None, enable_list_tensors=False) return output def _measure_tensor_list_column_widths(self, data): """Determine the maximum widths of the timestamp and op-type column. This method assumes that data is sorted in the default order, i.e., by ascending timestamps. Args: data: (list of DebugTensorDaum) the data based on which the maximum column widths will be determined. Returns: (int) maximum width of the timestamp column. 0 if data is empty. (int) maximum width of the dump size column. 0 if data is empty. (int) maximum width of the op type column. 0 if data is empty. """ max_timestamp_width = 0 if data: max_rel_time_ms = (data[-1].timestamp - self._debug_dump.t0) / 1000.0 max_timestamp_width = len("[%.3f] " % max_rel_time_ms) + 1 max_timestamp_width = max(max_timestamp_width, len(self._TIMESTAMP_COLUMN_HEAD) + 1) max_dump_size_width = 0 for dump in data: dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes) if len(dump_size_str) + 1 > max_dump_size_width: max_dump_size_width = len(dump_size_str) + 1 max_dump_size_width = max(max_dump_size_width, len(self._DUMP_SIZE_COLUMN_HEAD) + 1) max_op_type_width = 0 for dump in data: op_type = self._debug_dump.node_op_type(dump.node_name) if len(op_type) + 1 > max_op_type_width: max_op_type_width = len(op_type) + 1 max_op_type_width = max(max_op_type_width, len(self._OP_TYPE_COLUMN_HEAD) + 1) return max_timestamp_width, max_dump_size_width, max_op_type_width def _sort_dump_data_by(self, data, sort_by, reverse): """Sort a list of DebugTensorDatum in specified order. Args: data: (list of DebugTensorDatum) the data to be sorted. sort_by: The field to sort data by. reverse: (bool) Whether to use reversed (descending) order. Returns: (list of DebugTensorDatum) in sorted order. Raises: ValueError: given an invalid value of sort_by. """ if sort_by == SORT_TENSORS_BY_TIMESTAMP: return sorted( data, reverse=reverse, key=lambda x: x.timestamp) elif sort_by == SORT_TENSORS_BY_DUMP_SIZE: return sorted(data, reverse=reverse, key=lambda x: x.dump_size_bytes) elif sort_by == SORT_TENSORS_BY_OP_TYPE: return sorted( data, reverse=reverse, key=lambda x: self._debug_dump.node_op_type(x.node_name)) elif sort_by == SORT_TENSORS_BY_TENSOR_NAME: return sorted( data, reverse=reverse, key=lambda x: "%s:%d" % (x.node_name, x.output_slot)) else: raise ValueError("Unsupported key to sort tensors by: %s" % sort_by) def _tensor_list_column_heads(self, parsed, max_timestamp_width, max_dump_size_width, max_op_type_width): """Generate a line containing the column heads of the tensor list. Args: parsed: Parsed arguments (by argparse) of the list_tensors command. max_timestamp_width: (int) maximum width of the timestamp column. max_dump_size_width: (int) maximum width of the dump size column. max_op_type_width: (int) maximum width of the op type column. Returns: A RichTextLines object. """ base_command = "list_tensors" if parsed.tensor_filter: base_command += " -f %s" % parsed.tensor_filter if parsed.op_type_filter: base_command += " -t %s" % parsed.op_type_filter if parsed.node_name_filter: base_command += " -n %s" % parsed.node_name_filter attr_segs = {0: []} row = self._TIMESTAMP_COLUMN_HEAD command = "%s -s %s" % (base_command, SORT_TENSORS_BY_TIMESTAMP) if parsed.sort_by == SORT_TENSORS_BY_TIMESTAMP and not parsed.reverse: command += " -r" attr_segs[0].append( (0, len(row), [debugger_cli_common.MenuItem(None, command), "bold"])) row += " " * (max_timestamp_width - len(row)) prev_len = len(row) row += self._DUMP_SIZE_COLUMN_HEAD command = "%s -s %s" % (base_command, SORT_TENSORS_BY_DUMP_SIZE) if parsed.sort_by == SORT_TENSORS_BY_DUMP_SIZE and not parsed.reverse: command += " -r" attr_segs[0].append((prev_len, len(row), [debugger_cli_common.MenuItem(None, command), "bold"])) row += " " * (max_dump_size_width + max_timestamp_width - len(row)) prev_len = len(row) row += self._OP_TYPE_COLUMN_HEAD command = "%s -s %s" % (base_command, SORT_TENSORS_BY_OP_TYPE) if parsed.sort_by == SORT_TENSORS_BY_OP_TYPE and not parsed.reverse: command += " -r" attr_segs[0].append((prev_len, len(row), [debugger_cli_common.MenuItem(None, command), "bold"])) row += " " * ( max_op_type_width + max_dump_size_width + max_timestamp_width - len(row) ) prev_len = len(row) row += self._TENSOR_NAME_COLUMN_HEAD command = "%s -s %s" % (base_command, SORT_TENSORS_BY_TENSOR_NAME) if parsed.sort_by == SORT_TENSORS_BY_TENSOR_NAME and not parsed.reverse: command += " -r" attr_segs[0].append((prev_len, len(row), [debugger_cli_common.MenuItem("", command), "bold"])) row += " " * ( max_op_type_width + max_dump_size_width + max_timestamp_width - len(row) ) return debugger_cli_common.RichTextLines([row], font_attr_segs=attr_segs) def node_info(self, args, screen_info=None): """Command handler for node_info. Query information about a given node. Args: args: Command-line arguments, excluding the command prefix, as a list of str. screen_info: Optional dict input containing screen information such as cols. Returns: Output text lines as a RichTextLines object. """ # TODO(cais): Add annotation of substrings for node names, to facilitate # on-screen highlighting/selection of node names. _ = screen_info parsed = self._arg_parsers["node_info"].parse_args(args) # Get a node name, regardless of whether the input is a node name (without # output slot attached) or a tensor name (with output slot attached). node_name, unused_slot = debug_graphs.parse_node_or_tensor_name( parsed.node_name) if not self._debug_dump.node_exists(node_name): output = cli_shared.error( "There is no node named \"%s\" in the partition graphs" % node_name) _add_main_menu( output, node_name=None, enable_list_tensors=True, enable_node_info=False, enable_list_inputs=False, enable_list_outputs=False) return output # TODO(cais): Provide UI glossary feature to explain to users what the # term "partition graph" means and how it is related to TF graph objects # in Python. The information can be along the line of: # "A tensorflow graph defined in Python is stripped of unused ops # according to the feeds and fetches and divided into a number of # partition graphs that may be distributed among multiple devices and # hosts. The partition graphs are what's actually executed by the C++ # runtime during a run() call." lines = ["Node %s" % node_name] font_attr_segs = { 0: [(len(lines[-1]) - len(node_name), len(lines[-1]), "bold")] } lines.append("") lines.append(" Op: %s" % self._debug_dump.node_op_type(node_name)) lines.append(" Device: %s" % self._debug_dump.node_device(node_name)) output = debugger_cli_common.RichTextLines( lines, font_attr_segs=font_attr_segs) # List node inputs (non-control and control). inputs = self._exclude_denylisted_ops( self._debug_dump.node_inputs(node_name)) ctrl_inputs = self._exclude_denylisted_ops( self._debug_dump.node_inputs(node_name, is_control=True)) output.extend(self._format_neighbors("input", inputs, ctrl_inputs)) # List node output recipients (non-control and control). recs = self._exclude_denylisted_ops( self._debug_dump.node_recipients(node_name)) ctrl_recs = self._exclude_denylisted_ops( self._debug_dump.node_recipients(node_name, is_control=True)) output.extend(self._format_neighbors("recipient", recs, ctrl_recs)) # Optional: List attributes of the node. if parsed.attributes: output.extend(self._list_node_attributes(node_name)) # Optional: List dumps available from the node. if parsed.dumps: output.extend(self._list_node_dumps(node_name)) if parsed.traceback: output.extend(self._render_node_traceback(node_name)) _add_main_menu(output, node_name=node_name, enable_node_info=False) return output def _exclude_denylisted_ops(self, node_names): """Exclude all nodes whose op types are in _GRAPH_STRUCT_OP_TYPE_DENYLIST. Args: node_names: An iterable of node or graph element names. Returns: A list of node names that are not denylisted. """ return [ node_name for node_name in node_names if self._debug_dump.node_op_type(debug_graphs.get_node_name(node_name)) not in self._GRAPH_STRUCT_OP_TYPE_DENYLIST ] def _render_node_traceback(self, node_name): """Render traceback of a node's creation in Python, if available. Args: node_name: (str) name of the node. Returns: A RichTextLines object containing the stack trace of the node's construction. """ lines = [RL(""), RL(""), RL("Traceback of node construction:", "bold")] try: node_stack = self._debug_dump.node_traceback(node_name) for depth, (file_path, line, function_name, text) in enumerate( node_stack): lines.append("%d: %s" % (depth, file_path)) attribute = debugger_cli_common.MenuItem( "", "ps %s -b %d" % (file_path, line)) if text else None line_number_line = RL(" ") line_number_line += RL("Line: %d" % line, attribute) lines.append(line_number_line) lines.append(" Function: %s" % function_name) lines.append(" Text: " + (("\"%s\"" % text) if text else "None")) lines.append("") except KeyError: lines.append("(Node unavailable in the loaded Python graph)") except LookupError: lines.append("(Unavailable because no Python graph has been loaded)") return debugger_cli_common.rich_text_lines_from_rich_line_list(lines) def list_inputs(self, args, screen_info=None): """Command handler for inputs. Show inputs to a given node. Args: args: Command-line arguments, excluding the command prefix, as a list of str. screen_info: Optional dict input containing screen information such as cols. Returns: Output text lines as a RichTextLines object. """ # Screen info not currently used by this handler. Include this line to # mute pylint. _ = screen_info # TODO(cais): Use screen info to format the output lines more prettily, # e.g., hanging indent of long node names. parsed = self._arg_parsers["list_inputs"].parse_args(args) output = self._list_inputs_or_outputs( parsed.recursive, parsed.node_name, parsed.depth, parsed.control, parsed.op_type, do_outputs=False) node_name = debug_graphs.get_node_name(parsed.node_name) _add_main_menu(output, node_name=node_name, enable_list_inputs=False) return output def print_tensor(self, args, screen_info=None): """Command handler for print_tensor. Print value of a given dumped tensor. Args: args: Command-line arguments, excluding the command prefix, as a list of str. screen_info: Optional dict input containing screen information such as cols. Returns: Output text lines as a RichTextLines object. """ parsed = self._arg_parsers["print_tensor"].parse_args(args) np_printoptions = cli_shared.numpy_printoptions_from_screen_info( screen_info) # Determine if any range-highlighting is required. highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges) tensor_name, tensor_slicing = ( command_parser.parse_tensor_name_with_slicing(parsed.tensor_name)) node_name, output_slot = debug_graphs.parse_node_or_tensor_name(tensor_name) if (self._debug_dump.loaded_partition_graphs() and not self._debug_dump.node_exists(node_name)): output = cli_shared.error( "Node \"%s\" does not exist in partition graphs" % node_name) _add_main_menu( output, node_name=None, enable_list_tensors=True, enable_print_tensor=False) return output watch_keys = self._debug_dump.debug_watch_keys(node_name) if output_slot is None: output_slots = set() for watch_key in watch_keys: output_slots.add(int(watch_key.split(":")[1])) if len(output_slots) == 1: # There is only one dumped tensor from this node, so there is no # ambiguity. Proceed to show the only dumped tensor. output_slot = list(output_slots)[0] else: # There are more than one dumped tensors from this node. Indicate as # such. # TODO(cais): Provide an output screen with command links for # convenience. lines = [ "Node \"%s\" generated debug dumps from %s output slots:" % (node_name, len(output_slots)), "Please specify the output slot: %s:x." % node_name ] output = debugger_cli_common.RichTextLines(lines) _add_main_menu( output, node_name=node_name, enable_list_tensors=True, enable_print_tensor=False) return output # Find debug dump data that match the tensor name (node name + output # slot). matching_data = [] for watch_key in watch_keys: debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key) for datum in debug_tensor_data: if datum.output_slot == output_slot: matching_data.append(datum) if not matching_data: # No dump for this tensor. output = cli_shared.error("Tensor \"%s\" did not generate any dumps." % parsed.tensor_name) elif len(matching_data) == 1: # There is only one dump for this tensor. if parsed.number <= 0: output = cli_shared.format_tensor( matching_data[0].get_tensor(), matching_data[0].watch_key, np_printoptions, print_all=parsed.print_all, tensor_slicing=tensor_slicing, highlight_options=highlight_options, include_numeric_summary=parsed.numeric_summary, write_path=parsed.write_path) else: output = cli_shared.error( "Invalid number (%d) for tensor %s, which generated one dump." % (parsed.number, parsed.tensor_name)) _add_main_menu(output, node_name=node_name, enable_print_tensor=False) else: # There are more than one dumps for this tensor. if parsed.number < 0: lines = [ "Tensor \"%s\" generated %d dumps:" % (parsed.tensor_name, len(matching_data)) ] font_attr_segs = {} for i, datum in enumerate(matching_data): rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0 lines.append("#%d [%.3f ms] %s" % (i, rel_time, datum.watch_key)) command = "print_tensor %s -n %d" % (parsed.tensor_name, i) font_attr_segs[len(lines) - 1] = [( len(lines[-1]) - len(datum.watch_key), len(lines[-1]), debugger_cli_common.MenuItem(None, command))] lines.append("") lines.append( "You can use the -n (--number) flag to specify which dump to " "print.") lines.append("For example:") lines.append(" print_tensor %s -n 0" % parsed.tensor_name) output = debugger_cli_common.RichTextLines( lines, font_attr_segs=font_attr_segs) elif parsed.number >= len(matching_data): output = cli_shared.error( "Specified number (%d) exceeds the number of available dumps " "(%d) for tensor %s" % (parsed.number, len(matching_data), parsed.tensor_name)) else: output = cli_shared.format_tensor( matching_data[parsed.number].get_tensor(), matching_data[parsed.number].watch_key + " (dump #%d)" % parsed.number, np_printoptions, print_all=parsed.print_all, tensor_slicing=tensor_slicing, highlight_options=highlight_options, write_path=parsed.write_path) _add_main_menu(output, node_name=node_name, enable_print_tensor=False) return output def list_outputs(self, args, screen_info=None): """Command handler for inputs. Show inputs to a given node. Args: args: Command-line arguments, excluding the command prefix, as a list of str. screen_info: Optional dict input containing screen information such as cols. Returns: Output text lines as a RichTextLines object. """ # Screen info not currently used by this handler. Include this line to # mute pylint. _ = screen_info # TODO(cais): Use screen info to format the output lines more prettily, # e.g., hanging indent of long node names. parsed = self._arg_parsers["list_outputs"].parse_args(args) output = self._list_inputs_or_outputs( parsed.recursive, parsed.node_name, parsed.depth, parsed.control, parsed.op_type, do_outputs=True) node_name = debug_graphs.get_node_name(parsed.node_name) _add_main_menu(output, node_name=node_name, enable_list_outputs=False) return output def evaluate_expression(self, args, screen_info=None): parsed = self._arg_parsers["eval"].parse_args(args) eval_res = self._evaluator.evaluate(parsed.expression) np_printoptions = cli_shared.numpy_printoptions_from_screen_info( screen_info) return cli_shared.format_tensor( eval_res, "from eval of expression '%s'" % parsed.expression, np_printoptions, print_all=parsed.print_all, include_numeric_summary=True, write_path=parsed.write_path) def _reconstruct_print_source_command(self, parsed, line_begin, max_elements_per_line_increase=0): return "ps %s %s -b %d -m %d" % ( parsed.source_file_path, "-t" if parsed.tensors else "", line_begin, parsed.max_elements_per_line + max_elements_per_line_increase) def print_source(self, args, screen_info=None): """Print the content of a source file.""" del screen_info # Unused. parsed = self._arg_parsers["print_source"].parse_args(args) source_annotation = source_utils.annotate_source( self._debug_dump, parsed.source_file_path, do_dumped_tensors=parsed.tensors) source_lines, line_num_width = source_utils.load_source( parsed.source_file_path) labeled_source_lines = [] actual_initial_scroll_target = 0 for i, line in enumerate(source_lines): annotated_line = RL("L%d" % (i + 1), cli_shared.COLOR_YELLOW) annotated_line += " " * (line_num_width - len(annotated_line)) annotated_line += line labeled_source_lines.append(annotated_line) if i + 1 == parsed.line_begin: actual_initial_scroll_target = len(labeled_source_lines) - 1 if i + 1 in source_annotation: sorted_elements = sorted(source_annotation[i + 1]) for k, element in enumerate(sorted_elements): if k >= parsed.max_elements_per_line: omitted_info_line = RL(" (... Omitted %d of %d %s ...) " % ( len(sorted_elements) - parsed.max_elements_per_line, len(sorted_elements), "tensor(s)" if parsed.tensors else "op(s)")) omitted_info_line += RL( "+5", debugger_cli_common.MenuItem( None, self._reconstruct_print_source_command( parsed, i + 1, max_elements_per_line_increase=5))) labeled_source_lines.append(omitted_info_line) break label = RL(" " * 4) if self._debug_dump.debug_watch_keys( debug_graphs.get_node_name(element)): attribute = debugger_cli_common.MenuItem("", "pt %s" % element) else: attribute = cli_shared.COLOR_BLUE label += RL(element, attribute) labeled_source_lines.append(label) output = debugger_cli_common.rich_text_lines_from_rich_line_list( labeled_source_lines, annotations={debugger_cli_common.INIT_SCROLL_POS_KEY: actual_initial_scroll_target}) _add_main_menu(output, node_name=None) return output def _make_source_table(self, source_list, is_tf_py_library): """Make a table summarizing the source files that create nodes and tensors. Args: source_list: List of source files and related information as a list of tuples (file_path, is_tf_library, num_nodes, num_tensors, num_dumps, first_line). is_tf_py_library: (`bool`) whether this table is for files that belong to the TensorFlow Python library. Returns: The table as a `debugger_cli_common.RichTextLines` object. """ path_head = "Source file path" num_nodes_head = "#(nodes)" num_tensors_head = "#(tensors)" num_dumps_head = "#(tensor dumps)" if is_tf_py_library: # Use color to mark files that are guessed to belong to TensorFlow Python # library. color = cli_shared.COLOR_GRAY lines = [RL("TensorFlow Python library file(s):", color)] else: color = cli_shared.COLOR_WHITE lines = [RL("File(s) outside TensorFlow Python library:", color)] if not source_list: lines.append(RL("[No files.]")) lines.append(RL()) return debugger_cli_common.rich_text_lines_from_rich_line_list(lines) path_column_width = max( max(len(item[0]) for item in source_list), len(path_head)) + 1 num_nodes_column_width = max( max(len(str(item[2])) for item in source_list), len(num_nodes_head)) + 1 num_tensors_column_width = max( max(len(str(item[3])) for item in source_list), len(num_tensors_head)) + 1 head = RL(path_head + " " * (path_column_width - len(path_head)), color) head += RL(num_nodes_head + " " * ( num_nodes_column_width - len(num_nodes_head)), color) head += RL(num_tensors_head + " " * ( num_tensors_column_width - len(num_tensors_head)), color) head += RL(num_dumps_head, color) lines.append(head) for (file_path, _, num_nodes, num_tensors, num_dumps, first_line_num) in source_list: path_attributes = [color] if source_utils.is_extension_uncompiled_python_source(file_path): path_attributes.append( debugger_cli_common.MenuItem(None, "ps %s -b %d" % (file_path, first_line_num))) line = RL(file_path, path_attributes) line += " " * (path_column_width - len(line)) line += RL( str(num_nodes) + " " * (num_nodes_column_width - len(str(num_nodes))), color) line += RL( str(num_tensors) + " " * (num_tensors_column_width - len(str(num_tensors))), color) line += RL(str(num_dumps), color) lines.append(line) lines.append(RL()) return debugger_cli_common.rich_text_lines_from_rich_line_list(lines) def list_source(self, args, screen_info=None): """List Python source files that constructed nodes and tensors.""" del screen_info # Unused. parsed = self._arg_parsers["list_source"].parse_args(args) source_list = source_utils.list_source_files_against_dump( self._debug_dump, path_regex_allowlist=parsed.path_filter, node_name_regex_allowlist=parsed.node_name_filter) top_lines = [ RL("List of source files that created nodes in this run", "bold")] if parsed.path_filter: top_lines.append( RL("File path regex filter: \"%s\"" % parsed.path_filter)) if parsed.node_name_filter: top_lines.append( RL("Node name regex filter: \"%s\"" % parsed.node_name_filter)) top_lines.append(RL()) output = debugger_cli_common.rich_text_lines_from_rich_line_list(top_lines) if not source_list: output.append("[No source file information.]") return output output.extend(self._make_source_table( [item for item in source_list if not item[1]], False)) output.extend(self._make_source_table( [item for item in source_list if item[1]], True)) _add_main_menu(output, node_name=None) return output def _list_inputs_or_outputs(self, recursive, node_name, depth, control, op_type, do_outputs=False): """Helper function used by list_inputs and list_outputs. Format a list of lines to display the inputs or output recipients of a given node. Args: recursive: Whether the listing is to be done recursively, as a boolean. node_name: The name of the node in question, as a str. depth: Maximum recursion depth, applies only if recursive == True, as an int. control: Whether control inputs or control recipients are included, as a boolean. op_type: Whether the op types of the nodes are to be included, as a boolean. do_outputs: Whether recipients, instead of input nodes are to be listed, as a boolean. Returns: Input or recipient tree formatted as a RichTextLines object. """ if do_outputs: tracker = self._debug_dump.node_recipients type_str = "Recipients of" short_type_str = "recipients" else: tracker = self._debug_dump.node_inputs type_str = "Inputs to" short_type_str = "inputs" lines = [] font_attr_segs = {} # Check if this is a tensor name, instead of a node name. node_name, _ = debug_graphs.parse_node_or_tensor_name(node_name) # Check if node exists. if not self._debug_dump.node_exists(node_name): return cli_shared.error( "There is no node named \"%s\" in the partition graphs" % node_name) if recursive: max_depth = depth else: max_depth = 1 if control: include_ctrls_str = ", control %s included" % short_type_str else: include_ctrls_str = "" line = "%s node \"%s\"" % (type_str, node_name) font_attr_segs[0] = [(len(line) - 1 - len(node_name), len(line) - 1, "bold") ] lines.append(line + " (Depth limit = %d%s):" % (max_depth, include_ctrls_str )) command_template = "lo -c -r %s" if do_outputs else "li -c -r %s" self._dfs_from_node( lines, font_attr_segs, node_name, tracker, max_depth, 1, [], control, op_type, command_template=command_template) # Include legend. lines.append("") lines.append("Legend:") lines.append(" (d): recursion depth = d.") if control: lines.append(" (Ctrl): Control input.") if op_type: lines.append(" [Op]: Input node has op type Op.") # TODO(cais): Consider appending ":0" at the end of 1st outputs of nodes. return debugger_cli_common.RichTextLines( lines, font_attr_segs=font_attr_segs) def _dfs_from_node(self, lines, attr_segs, node_name, tracker, max_depth, depth, unfinished, include_control=False, show_op_type=False, command_template=None): """Perform depth-first search (DFS) traversal of a node's input tree. It recursively tracks the inputs (or output recipients) of the node called node_name, and append these inputs (or output recipients) to a list of text lines (lines) with proper indentation that reflects the recursion depth, together with some formatting attributes (to attr_segs). The formatting attributes can include command shortcuts, for example. Args: lines: Text lines to append to, as a list of str. attr_segs: (dict) Attribute segments dictionary to append to. node_name: Name of the node, as a str. This arg is updated during the recursion. tracker: A callable that takes one str as the node name input and returns a list of str as the inputs/outputs. This makes it this function general enough to be used with both node-input and node-output tracking. max_depth: Maximum recursion depth, as an int. depth: Current recursion depth. This arg is updated during the recursion. unfinished: A stack of unfinished recursion depths, as a list of int. include_control: Whether control dependencies are to be included as inputs (and marked as such). show_op_type: Whether op type of the input nodes are to be displayed alongside the nodes' names. command_template: (str) Template for command shortcut of the node names. """ # Make a shallow copy of the list because it may be extended later. all_inputs = self._exclude_denylisted_ops( copy.copy(tracker(node_name, is_control=False))) is_ctrl = [False] * len(all_inputs) if include_control: # Sort control inputs or recipients in alphabetical order of the node # names. ctrl_inputs = self._exclude_denylisted_ops( sorted(tracker(node_name, is_control=True))) all_inputs.extend(ctrl_inputs) is_ctrl.extend([True] * len(ctrl_inputs)) if not all_inputs: if depth == 1: lines.append(" [None]") return unfinished.append(depth) # Create depth-dependent hanging indent for the line. hang = "" for k in range(depth): if k < depth - 1: if k + 1 in unfinished: hang += HANG_UNFINISHED else: hang += HANG_FINISHED else: hang += HANG_SUFFIX if all_inputs and depth > max_depth: lines.append(hang + ELLIPSIS) unfinished.pop() return hang += DEPTH_TEMPLATE % depth for i, inp in enumerate(all_inputs): op_type = self._debug_dump.node_op_type(debug_graphs.get_node_name(inp)) if op_type in self._GRAPH_STRUCT_OP_TYPE_DENYLIST: continue if is_ctrl[i]: ctrl_str = CTRL_LABEL else: ctrl_str = "" op_type_str = "" if show_op_type: op_type_str = OP_TYPE_TEMPLATE % op_type if i == len(all_inputs) - 1: unfinished.pop() line = hang + ctrl_str + op_type_str + inp lines.append(line) if command_template: attr_segs[len(lines) - 1] = [( len(line) - len(inp), len(line), debugger_cli_common.MenuItem(None, command_template % inp))] # Recursive call. # The input's/output's name can be a tensor name, in the case of node # with >1 output slots. inp_node_name, _ = debug_graphs.parse_node_or_tensor_name(inp) self._dfs_from_node( lines, attr_segs, inp_node_name, tracker, max_depth, depth + 1, unfinished, include_control=include_control, show_op_type=show_op_type, command_template=command_template) def _format_neighbors(self, neighbor_type, non_ctrls, ctrls): """List neighbors (inputs or recipients) of a node. Args: neighbor_type: ("input" | "recipient") non_ctrls: Non-control neighbor node names, as a list of str. ctrls: Control neighbor node names, as a list of str. Returns: A RichTextLines object. """ # TODO(cais): Return RichTextLines instead, to allow annotation of node # names. lines = [] font_attr_segs = {} lines.append("") lines.append(" %d %s(s) + %d control %s(s):" % (len(non_ctrls), neighbor_type, len(ctrls), neighbor_type)) lines.append(" %d %s(s):" % (len(non_ctrls), neighbor_type)) for non_ctrl in non_ctrls: line = " [%s] %s" % (self._debug_dump.node_op_type(non_ctrl), non_ctrl) lines.append(line) font_attr_segs[len(lines) - 1] = [( len(line) - len(non_ctrl), len(line), debugger_cli_common.MenuItem(None, "ni -a -d -t %s" % non_ctrl))] if ctrls: lines.append("") lines.append(" %d control %s(s):" % (len(ctrls), neighbor_type)) for ctrl in ctrls: line = " [%s] %s" % (self._debug_dump.node_op_type(ctrl), ctrl) lines.append(line) font_attr_segs[len(lines) - 1] = [( len(line) - len(ctrl), len(line), debugger_cli_common.MenuItem(None, "ni -a -d -t %s" % ctrl))] return debugger_cli_common.RichTextLines( lines, font_attr_segs=font_attr_segs) def _list_node_attributes(self, node_name): """List neighbors (inputs or recipients) of a node. Args: node_name: Name of the node of which the attributes are to be listed. Returns: A RichTextLines object. """ lines = [] lines.append("") lines.append("Node attributes:") attrs = self._debug_dump.node_attributes(node_name) for attr_key in attrs: lines.append(" %s:" % attr_key) attr_val_str = repr(attrs[attr_key]).strip().replace("\n", " ") lines.append(" %s" % attr_val_str) lines.append("") return debugger_cli_common.RichTextLines(lines) def _list_node_dumps(self, node_name): """List dumped tensor data from a node. Args: node_name: Name of the node of which the attributes are to be listed. Returns: A RichTextLines object. """ lines = [] font_attr_segs = {} watch_keys = self._debug_dump.debug_watch_keys(node_name) dump_count = 0 for watch_key in watch_keys: debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key) for datum in debug_tensor_data: line = " Slot %d @ %s @ %.3f ms" % ( datum.output_slot, datum.debug_op, (datum.timestamp - self._debug_dump.t0) / 1000.0) lines.append(line) command = "pt %s:%d -n %d" % (node_name, datum.output_slot, dump_count) font_attr_segs[len(lines) - 1] = [( 2, len(line), debugger_cli_common.MenuItem(None, command))] dump_count += 1 output = debugger_cli_common.RichTextLines( lines, font_attr_segs=font_attr_segs) output_with_header = debugger_cli_common.RichTextLines( ["%d dumped tensor(s):" % dump_count, ""]) output_with_header.extend(output) return output_with_header def create_analyzer_ui(debug_dump, tensor_filters=None, ui_type="readline", on_ui_exit=None, config=None): """Create an instance of ReadlineUI based on a DebugDumpDir object. Args: debug_dump: (debug_data.DebugDumpDir) The debug dump to use. tensor_filters: (dict) A dict mapping tensor filter name (str) to tensor filter (Callable). ui_type: (str) requested UI type, only "readline" is supported. on_ui_exit: (`Callable`) the callback to be called when the UI exits. config: A `cli_config.CLIConfig` object. Returns: (base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer commands and tab-completions registered. """ if config is None: config = cli_config.CLIConfig() analyzer = DebugAnalyzer(debug_dump, config=config) if tensor_filters: for tensor_filter_name in tensor_filters: analyzer.add_tensor_filter( tensor_filter_name, tensor_filters[tensor_filter_name]) cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit, config=config) cli.register_command_handler( "list_tensors", analyzer.list_tensors, analyzer.get_help("list_tensors"), prefix_aliases=["lt"]) cli.register_command_handler( "node_info", analyzer.node_info, analyzer.get_help("node_info"), prefix_aliases=["ni"]) cli.register_command_handler( "list_inputs", analyzer.list_inputs, analyzer.get_help("list_inputs"), prefix_aliases=["li"]) cli.register_command_handler( "list_outputs", analyzer.list_outputs, analyzer.get_help("list_outputs"), prefix_aliases=["lo"]) cli.register_command_handler( "print_tensor", analyzer.print_tensor, analyzer.get_help("print_tensor"), prefix_aliases=["pt"]) cli.register_command_handler( "print_source", analyzer.print_source, analyzer.get_help("print_source"), prefix_aliases=["ps"]) cli.register_command_handler( "list_source", analyzer.list_source, analyzer.get_help("list_source"), prefix_aliases=["ls"]) cli.register_command_handler( "eval", analyzer.evaluate_expression, analyzer.get_help("eval"), prefix_aliases=["ev"]) dumped_tensor_names = [] for datum in debug_dump.dumped_tensor_data: dumped_tensor_names.append("%s:%d" % (datum.node_name, datum.output_slot)) # Tab completions for command "print_tensors". cli.register_tab_comp_context(["print_tensor", "pt"], dumped_tensor_names) return cli
DebugAnalyzer
python
kamyu104__LeetCode-Solutions
Python/replace-the-substring-for-balanced-string.py
{ "start": 50, "end": 563 }
class ____(object): def balancedString(self, s): """ :type s: str :rtype: int """ count = collections.Counter(s) result = len(s) left = 0 for right in xrange(len(s)): count[s[right]] -= 1 while left < len(s) and \ all(v <= len(s)//4 for v in count.itervalues()): result = min(result, right-left+1) count[s[left]] += 1 left += 1 return result
Solution
python
openai__openai-python
src/openai/types/fine_tuning/dpo_hyperparameters_param.py
{ "start": 238, "end": 1048 }
class ____(TypedDict, total=False): batch_size: Union[Literal["auto"], int] """Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance. """ beta: Union[Literal["auto"], float] """The beta value for the DPO method. A higher beta value will increase the weight of the penalty between the policy and reference model. """ learning_rate_multiplier: Union[Literal["auto"], float] """Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting. """ n_epochs: Union[Literal["auto"], int] """The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. """
DpoHyperparametersParam
python
pandas-dev__pandas
pandas/core/window/ewm.py
{ "start": 3788, "end": 29117 }
class ____(BaseWindow): r""" Provide exponentially weighted (EW) calculations. Exactly one of ``com``, ``span``, ``halflife``, or ``alpha`` must be provided if ``times`` is not provided. If ``times`` is provided and ``adjust=True``, ``halflife`` and one of ``com``, ``span`` or ``alpha`` may be provided. If ``times`` is provided and ``adjust=False``, ``halflife`` must be the only provided decay-specification parameter. Parameters ---------- com : float, optional Specify decay in terms of center of mass :math:`\alpha = 1 / (1 + com)`, for :math:`com \geq 0`. span : float, optional Specify decay in terms of span :math:`\alpha = 2 / (span + 1)`, for :math:`span \geq 1`. halflife : float, str, timedelta, optional Specify decay in terms of half-life :math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for :math:`halflife > 0`. If ``times`` is specified, a timedelta convertible unit over which an observation decays to half its value. Only applicable to ``mean()``, and halflife value will not apply to the other functions. alpha : float, optional Specify smoothing factor :math:`\alpha` directly :math:`0 < \alpha \leq 1`. min_periods : int, default 0 Minimum number of observations in window required to have a value; otherwise, result is ``np.nan``. adjust : bool, default True Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings (viewing EWMA as a moving average). - When ``adjust=True`` (default), the EW function is calculated using weights :math:`w_i = (1 - \alpha)^i`. For example, the EW moving average of the series [:math:`x_0, x_1, ..., x_t`] would be: .. math:: y_t = \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ... + (1 - \alpha)^t x_0}{1 + (1 - \alpha) + (1 - \alpha)^2 + ... + (1 - \alpha)^t} - When ``adjust=False``, the exponentially weighted function is calculated recursively: .. math:: \begin{split} y_0 &= x_0\\ y_t &= (1 - \alpha) y_{t-1} + \alpha x_t, \end{split} ignore_na : bool, default False Ignore missing values when calculating weights. - When ``ignore_na=False`` (default), weights are based on absolute positions. For example, the weights of :math:`x_0` and :math:`x_2` used in calculating the final weighted average of [:math:`x_0`, None, :math:`x_2`] are :math:`(1-\alpha)^2` and :math:`1` if ``adjust=True``, and :math:`(1-\alpha)^2` and :math:`\alpha` if ``adjust=False``. - When ``ignore_na=True``, weights are based on relative positions. For example, the weights of :math:`x_0` and :math:`x_2` used in calculating the final weighted average of [:math:`x_0`, None, :math:`x_2`] are :math:`1-\alpha` and :math:`1` if ``adjust=True``, and :math:`1-\alpha` and :math:`\alpha` if ``adjust=False``. times : np.ndarray, Series, default None Only applicable to ``mean()``. Times corresponding to the observations. Must be monotonically increasing and ``datetime64[ns]`` dtype. If 1-D array like, a sequence with the same shape as the observations. method : str {'single', 'table'}, default 'single' Execute the rolling operation per single column or row (``'single'``) or over the entire object (``'table'``). This argument is only implemented when specifying ``engine='numba'`` in the method call. Only applicable to ``mean()`` Returns ------- pandas.api.typing.ExponentialMovingWindow An instance of ExponentialMovingWindow for further exponentially weighted (EW) calculations, e.g. using the ``mean`` method. See Also -------- rolling : Provides rolling window calculations. expanding : Provides expanding transformations. Notes ----- See :ref:`Windowing Operations <window.exponentially_weighted>` for further usage details and examples. Examples -------- >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) >>> df B 0 0.0 1 1.0 2 2.0 3 NaN 4 4.0 >>> df.ewm(com=0.5).mean() B 0 0.000000 1 0.750000 2 1.615385 3 1.615385 4 3.670213 >>> df.ewm(alpha=2 / 3).mean() B 0 0.000000 1 0.750000 2 1.615385 3 1.615385 4 3.670213 **adjust** >>> df.ewm(com=0.5, adjust=True).mean() B 0 0.000000 1 0.750000 2 1.615385 3 1.615385 4 3.670213 >>> df.ewm(com=0.5, adjust=False).mean() B 0 0.000000 1 0.666667 2 1.555556 3 1.555556 4 3.650794 **ignore_na** >>> df.ewm(com=0.5, ignore_na=True).mean() B 0 0.000000 1 0.750000 2 1.615385 3 1.615385 4 3.225000 >>> df.ewm(com=0.5, ignore_na=False).mean() B 0 0.000000 1 0.750000 2 1.615385 3 1.615385 4 3.670213 **times** Exponentially weighted mean with weights calculated with a timedelta ``halflife`` relative to ``times``. >>> times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17'] >>> df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean() B 0 0.000000 1 0.585786 2 1.523889 3 1.523889 4 3.233686 """ _attributes = [ "com", "span", "halflife", "alpha", "min_periods", "adjust", "ignore_na", "times", "method", ] def __init__( self, obj: NDFrame, com: float | None = None, span: float | None = None, halflife: float | TimedeltaConvertibleTypes | None = None, alpha: float | None = None, min_periods: int | None = 0, adjust: bool = True, ignore_na: bool = False, times: np.ndarray | NDFrame | None = None, method: str = "single", *, selection=None, ) -> None: super().__init__( obj=obj, min_periods=1 if min_periods is None else max(int(min_periods), 1), on=None, center=False, closed=None, method=method, selection=selection, ) self.com = com self.span = span self.halflife = halflife self.alpha = alpha self.adjust = adjust self.ignore_na = ignore_na self.times = times if self.times is not None: times_dtype = getattr(self.times, "dtype", None) if not ( is_datetime64_dtype(times_dtype) or isinstance(times_dtype, DatetimeTZDtype) ): raise ValueError("times must be datetime64 dtype.") if len(self.times) != len(obj): raise ValueError("times must be the same length as the object.") if not isinstance(self.halflife, (str, datetime.timedelta, np.timedelta64)): raise ValueError("halflife must be a timedelta convertible object") if isna(self.times).any(): raise ValueError("Cannot convert NaT values to integer") self._deltas = _calculate_deltas(self.times, self.halflife) # Halflife is no longer applicable when calculating COM # But allow COM to still be calculated if the user passes other decay args if common.count_not_none(self.com, self.span, self.alpha) > 0: if not self.adjust: raise NotImplementedError( "None of com, span, or alpha can be specified if " "times is provided and adjust=False" ) self._com = get_center_of_mass(self.com, self.span, None, self.alpha) else: self._com = 1.0 else: if self.halflife is not None and isinstance( self.halflife, (str, datetime.timedelta, np.timedelta64) ): raise ValueError( "halflife can only be a timedelta convertible argument if " "times is not None." ) # Without times, points are equally spaced self._deltas = np.ones(max(self.obj.shape[0] - 1, 0), dtype=np.float64) self._com = get_center_of_mass( # error: Argument 3 to "get_center_of_mass" has incompatible type # "Union[float, Any, None, timedelta64, signedinteger[_64Bit]]"; # expected "Optional[float]" self.com, self.span, self.halflife, # type: ignore[arg-type] self.alpha, ) def _check_window_bounds( self, start: np.ndarray, end: np.ndarray, num_vals: int ) -> None: # emw algorithms are iterative with each point # ExponentialMovingWindowIndexer "bounds" are the entire window pass def _get_window_indexer(self) -> BaseIndexer: """ Return an indexer class that will compute the window start and end bounds """ return ExponentialMovingWindowIndexer() def online( self, engine: str = "numba", engine_kwargs=None ) -> OnlineExponentialMovingWindow: """ Return an ``OnlineExponentialMovingWindow`` object to calculate exponentially moving window aggregations in an online method. Parameters ---------- engine: str, default ``'numba'`` Execution engine to calculate online aggregations. Applies to all supported aggregation methods. engine_kwargs : dict, default None Applies to all supported aggregation methods. * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` and ``parallel`` dictionary keys. The values must either be ``True`` or ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be applied to the function Returns ------- OnlineExponentialMovingWindow """ return OnlineExponentialMovingWindow( obj=self.obj, com=self.com, span=self.span, halflife=self.halflife, alpha=self.alpha, min_periods=self.min_periods, adjust=self.adjust, ignore_na=self.ignore_na, times=self.times, engine=engine, engine_kwargs=engine_kwargs, selection=self._selection, ) @doc( _shared_docs["aggregate"], see_also=dedent( """ See Also -------- pandas.DataFrame.rolling.aggregate """ ), examples=dedent( """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) >>> df A B C 0 1 4 7 1 2 5 8 2 3 6 9 >>> df.ewm(alpha=0.5).mean() A B C 0 1.000000 4.000000 7.000000 1 1.666667 4.666667 7.666667 2 2.428571 5.428571 8.428571 """ ), klass="Series/Dataframe", axis="", ) def aggregate(self, func=None, *args, **kwargs): return super().aggregate(func, *args, **kwargs) agg = aggregate @doc( template_header, create_section_header("Parameters"), kwargs_numeric_only, window_agg_numba_parameters(), create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also, create_section_header("Notes"), numba_notes, create_section_header("Examples"), dedent( """\ >>> ser = pd.Series([1, 2, 3, 4]) >>> ser.ewm(alpha=.2).mean() 0 1.000000 1 1.555556 2 2.147541 3 2.775068 dtype: float64 """ ), window_method="ewm", aggregation_description="(exponential weighted moment) mean", agg_method="mean", ) def mean( self, numeric_only: bool = False, engine=None, engine_kwargs=None, ): if maybe_use_numba(engine): if self.method == "single": func = generate_numba_ewm_func else: func = generate_numba_ewm_table_func ewm_func = func( **get_jit_arguments(engine_kwargs), com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, deltas=tuple(self._deltas), normalize=True, ) return self._apply(ewm_func, name="mean") elif engine in ("cython", None): if engine_kwargs is not None: raise ValueError("cython engine does not accept engine_kwargs") deltas = None if self.times is None else self._deltas window_func = partial( window_aggregations.ewm, com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, deltas=deltas, normalize=True, ) return self._apply(window_func, name="mean", numeric_only=numeric_only) else: raise ValueError("engine must be either 'numba' or 'cython'") @doc( template_header, create_section_header("Parameters"), kwargs_numeric_only, window_agg_numba_parameters(), create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also, create_section_header("Notes"), numba_notes, create_section_header("Examples"), dedent( """\ >>> ser = pd.Series([1, 2, 3, 4]) >>> ser.ewm(alpha=.2).sum() 0 1.000 1 2.800 2 5.240 3 8.192 dtype: float64 """ ), window_method="ewm", aggregation_description="(exponential weighted moment) sum", agg_method="sum", ) def sum( self, numeric_only: bool = False, engine=None, engine_kwargs=None, ): if not self.adjust: raise NotImplementedError("sum is not implemented with adjust=False") if self.times is not None: raise NotImplementedError("sum is not implemented with times") if maybe_use_numba(engine): if self.method == "single": func = generate_numba_ewm_func else: func = generate_numba_ewm_table_func ewm_func = func( **get_jit_arguments(engine_kwargs), com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, deltas=tuple(self._deltas), normalize=False, ) return self._apply(ewm_func, name="sum") elif engine in ("cython", None): if engine_kwargs is not None: raise ValueError("cython engine does not accept engine_kwargs") deltas = None if self.times is None else self._deltas window_func = partial( window_aggregations.ewm, com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, deltas=deltas, normalize=False, ) return self._apply(window_func, name="sum", numeric_only=numeric_only) else: raise ValueError("engine must be either 'numba' or 'cython'") @doc( template_header, create_section_header("Parameters"), dedent( """\ bias : bool, default False Use a standard estimation bias correction. """ ), kwargs_numeric_only, create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also, create_section_header("Examples"), dedent( """\ >>> ser = pd.Series([1, 2, 3, 4]) >>> ser.ewm(alpha=.2).std() 0 NaN 1 0.707107 2 0.995893 3 1.277320 dtype: float64 """ ), window_method="ewm", aggregation_description="(exponential weighted moment) standard deviation", agg_method="std", ) def std(self, bias: bool = False, numeric_only: bool = False): if ( numeric_only and self._selected_obj.ndim == 1 and not is_numeric_dtype(self._selected_obj.dtype) ): # Raise directly so error message says std instead of var raise NotImplementedError( f"{type(self).__name__}.std does not implement numeric_only" ) if self.times is not None: raise NotImplementedError("std is not implemented with times") return zsqrt(self.var(bias=bias, numeric_only=numeric_only)) @doc( template_header, create_section_header("Parameters"), dedent( """\ bias : bool, default False Use a standard estimation bias correction. """ ), kwargs_numeric_only, create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also, create_section_header("Examples"), dedent( """\ >>> ser = pd.Series([1, 2, 3, 4]) >>> ser.ewm(alpha=.2).var() 0 NaN 1 0.500000 2 0.991803 3 1.631547 dtype: float64 """ ), window_method="ewm", aggregation_description="(exponential weighted moment) variance", agg_method="var", ) def var(self, bias: bool = False, numeric_only: bool = False): if self.times is not None: raise NotImplementedError("var is not implemented with times") window_func = window_aggregations.ewmcov wfunc = partial( window_func, com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, bias=bias, ) def var_func(values, begin, end, min_periods): return wfunc(values, begin, end, min_periods, values) return self._apply(var_func, name="var", numeric_only=numeric_only) @doc( template_header, create_section_header("Parameters"), dedent( """\ other : Series or DataFrame , optional If not supplied then will default to self and produce pairwise output. pairwise : bool, default None If False then only matching columns between self and other will be used and the output will be a DataFrame. If True then all pairwise combinations will be calculated and the output will be a MultiIndex DataFrame in the case of DataFrame inputs. In the case of missing elements, only complete pairwise observations will be used. bias : bool, default False Use a standard estimation bias correction. """ ), kwargs_numeric_only, create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also, create_section_header("Examples"), dedent( """\ >>> ser1 = pd.Series([1, 2, 3, 4]) >>> ser2 = pd.Series([10, 11, 13, 16]) >>> ser1.ewm(alpha=.2).cov(ser2) 0 NaN 1 0.500000 2 1.524590 3 3.408836 dtype: float64 """ ), window_method="ewm", aggregation_description="(exponential weighted moment) sample covariance", agg_method="cov", ) def cov( self, other: DataFrame | Series | None = None, pairwise: bool | None = None, bias: bool = False, numeric_only: bool = False, ): if self.times is not None: raise NotImplementedError("cov is not implemented with times") from pandas import Series self._validate_numeric_only("cov", numeric_only) def cov_func(x, y): x_array = self._prep_values(x) y_array = self._prep_values(y) window_indexer = self._get_window_indexer() min_periods = ( self.min_periods if self.min_periods is not None else window_indexer.window_size ) start, end = window_indexer.get_window_bounds( num_values=len(x_array), min_periods=min_periods, center=self.center, closed=self.closed, step=self.step, ) result = window_aggregations.ewmcov( x_array, start, end, # error: Argument 4 to "ewmcov" has incompatible type # "Optional[int]"; expected "int" self.min_periods, # type: ignore[arg-type] y_array, self._com, self.adjust, self.ignore_na, bias, ) return Series(result, index=x.index, name=x.name, copy=False) return self._apply_pairwise( self._selected_obj, other, pairwise, cov_func, numeric_only ) @doc( template_header, create_section_header("Parameters"), dedent( """\ other : Series or DataFrame, optional If not supplied then will default to self and produce pairwise output. pairwise : bool, default None If False then only matching columns between self and other will be used and the output will be a DataFrame. If True then all pairwise combinations will be calculated and the output will be a MultiIndex DataFrame in the case of DataFrame inputs. In the case of missing elements, only complete pairwise observations will be used. """ ), kwargs_numeric_only, create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also, create_section_header("Examples"), dedent( """\ >>> ser1 = pd.Series([1, 2, 3, 4]) >>> ser2 = pd.Series([10, 11, 13, 16]) >>> ser1.ewm(alpha=.2).corr(ser2) 0 NaN 1 1.000000 2 0.982821 3 0.977802 dtype: float64 """ ), window_method="ewm", aggregation_description="(exponential weighted moment) sample correlation", agg_method="corr", ) def corr( self, other: DataFrame | Series | None = None, pairwise: bool | None = None, numeric_only: bool = False, ): if self.times is not None: raise NotImplementedError("corr is not implemented with times") from pandas import Series self._validate_numeric_only("corr", numeric_only) def cov_func(x, y): x_array = self._prep_values(x) y_array = self._prep_values(y) window_indexer = self._get_window_indexer() min_periods = ( self.min_periods if self.min_periods is not None else window_indexer.window_size ) start, end = window_indexer.get_window_bounds( num_values=len(x_array), min_periods=min_periods, center=self.center, closed=self.closed, step=self.step, ) def _cov(X, Y): return window_aggregations.ewmcov( X, start, end, min_periods, Y, self._com, self.adjust, self.ignore_na, True, ) with np.errstate(all="ignore"): cov = _cov(x_array, y_array) x_var = _cov(x_array, x_array) y_var = _cov(y_array, y_array) result = cov / zsqrt(x_var * y_var) return Series(result, index=x.index, name=x.name, copy=False) return self._apply_pairwise( self._selected_obj, other, pairwise, cov_func, numeric_only ) @set_module("pandas.api.typing")
ExponentialMovingWindow
python
django__django
tests/auth_tests/test_models.py
{ "start": 2027, "end": 2324 }
class ____(TestCase): fixtures = ["natural.json"] def test_user_is_created_and_added_to_group(self): user = User.objects.get(username="my_username") group = Group.objects.get(name="my_group") self.assertEqual(group, user.groups.get())
LoadDataWithNaturalKeysTestCase
python
doocs__leetcode
solution/2100-2199/2109.Adding Spaces to a String/Solution.py
{ "start": 0, "end": 300 }
class ____: def addSpaces(self, s: str, spaces: List[int]) -> str: ans = [] j = 0 for i, c in enumerate(s): if j < len(spaces) and i == spaces[j]: ans.append(' ') j += 1 ans.append(c) return ''.join(ans)
Solution
python
eventlet__eventlet
eventlet/db_pool.py
{ "start": 380, "end": 9557 }
class ____(Pool): def __init__(self, db_module, min_size=0, max_size=4, max_idle=10, max_age=30, connect_timeout=5, cleanup=cleanup_rollback, *args, **kwargs): """ Constructs a pool with at least *min_size* connections and at most *max_size* connections. Uses *db_module* to construct new connections. The *max_idle* parameter determines how long pooled connections can remain idle, in seconds. After *max_idle* seconds have elapsed without the connection being used, the pool closes the connection. *max_age* is how long any particular connection is allowed to live. Connections that have been open for longer than *max_age* seconds are closed, regardless of idle time. If *max_age* is 0, all connections are closed on return to the pool, reducing it to a concurrency limiter. *connect_timeout* is the duration in seconds that the pool will wait before timing out on connect() to the database. If triggered, the timeout will raise a ConnectTimeout from get(). The remainder of the arguments are used as parameters to the *db_module*'s connection constructor. """ assert(db_module) self._db_module = db_module self._args = args self._kwargs = kwargs self.max_idle = max_idle self.max_age = max_age self.connect_timeout = connect_timeout self._expiration_timer = None self.cleanup = cleanup super().__init__(min_size=min_size, max_size=max_size, order_as_stack=True) def _schedule_expiration(self): """Sets up a timer that will call _expire_old_connections when the oldest connection currently in the free pool is ready to expire. This is the earliest possible time that a connection could expire, thus, the timer will be running as infrequently as possible without missing a possible expiration. If this function is called when a timer is already scheduled, it does nothing. If max_age or max_idle is 0, _schedule_expiration likewise does nothing. """ if self.max_age == 0 or self.max_idle == 0: # expiration is unnecessary because all connections will be expired # on put return if (self._expiration_timer is not None and not getattr(self._expiration_timer, 'called', False)): # the next timer is already scheduled return try: now = time.time() self._expire_old_connections(now) # the last item in the list, because of the stack ordering, # is going to be the most-idle idle_delay = (self.free_items[-1][0] - now) + self.max_idle oldest = min([t[1] for t in self.free_items]) age_delay = (oldest - now) + self.max_age next_delay = min(idle_delay, age_delay) except (IndexError, ValueError): # no free items, unschedule ourselves self._expiration_timer = None return if next_delay > 0: # set up a continuous self-calling loop self._expiration_timer = Timer(next_delay, GreenThread(hubs.get_hub().greenlet).switch, self._schedule_expiration, [], {}) self._expiration_timer.schedule() def _expire_old_connections(self, now): """Iterates through the open connections contained in the pool, closing ones that have remained idle for longer than max_idle seconds, or have been in existence for longer than max_age seconds. *now* is the current time, as returned by time.time(). """ original_count = len(self.free_items) expired = [ conn for last_used, created_at, conn in self.free_items if self._is_expired(now, last_used, created_at)] new_free = [ (last_used, created_at, conn) for last_used, created_at, conn in self.free_items if not self._is_expired(now, last_used, created_at)] self.free_items.clear() self.free_items.extend(new_free) # adjust the current size counter to account for expired # connections self.current_size -= original_count - len(self.free_items) for conn in expired: self._safe_close(conn, quiet=True) def _is_expired(self, now, last_used, created_at): """Returns true and closes the connection if it's expired. """ if (self.max_idle <= 0 or self.max_age <= 0 or now - last_used > self.max_idle or now - created_at > self.max_age): return True return False def _unwrap_connection(self, conn): """If the connection was wrapped by a subclass of BaseConnectionWrapper and is still functional (as determined by the __nonzero__, or __bool__ in python3, method), returns the unwrapped connection. If anything goes wrong with this process, returns None. """ base = None try: if conn: base = conn._base conn._destroy() else: base = None except AttributeError: pass return base def _safe_close(self, conn, quiet=False): """Closes the (already unwrapped) connection, squelching any exceptions. """ try: conn.close() except AttributeError: pass # conn is None, or junk except Exception: if not quiet: print("Connection.close raised: %s" % (sys.exc_info()[1])) def get(self): conn = super().get() # None is a flag value that means that put got called with # something it couldn't use if conn is None: try: conn = self.create() except Exception: # unconditionally increase the free pool because # even if there are waiters, doing a full put # would incur a greenlib switch and thus lose the # exception stack self.current_size -= 1 raise # if the call to get() draws from the free pool, it will come # back as a tuple if isinstance(conn, tuple): _last_used, created_at, conn = conn else: created_at = time.time() # wrap the connection so the consumer can call close() safely wrapped = PooledConnectionWrapper(conn, self) # annotating the wrapper so that when it gets put in the pool # again, we'll know how old it is wrapped._db_pool_created_at = created_at return wrapped def put(self, conn, cleanup=_MISSING): created_at = getattr(conn, '_db_pool_created_at', 0) now = time.time() conn = self._unwrap_connection(conn) if self._is_expired(now, now, created_at): self._safe_close(conn, quiet=False) conn = None elif cleanup is not None: if cleanup is _MISSING: cleanup = self.cleanup # by default, call rollback in case the connection is in the middle # of a transaction. However, rollback has performance implications # so optionally do nothing or call something else like ping try: if conn: cleanup(conn) except Exception as e: # we don't care what the exception was, we just know the # connection is dead print("WARNING: cleanup %s raised: %s" % (cleanup, e)) conn = None except: conn = None raise if conn is not None: super().put((now, created_at, conn)) else: # wake up any waiters with a flag value that indicates # they need to manufacture a connection if self.waiting() > 0: super().put(None) else: # no waiters -- just change the size self.current_size -= 1 self._schedule_expiration() @contextmanager def item(self, cleanup=_MISSING): conn = self.get() try: yield conn finally: self.put(conn, cleanup=cleanup) def clear(self): """Close all connections that this pool still holds a reference to, and removes all references to them. """ if self._expiration_timer: self._expiration_timer.cancel() free_items, self.free_items = self.free_items, deque() for item in free_items: # Free items created using min_size>0 are not tuples. conn = item[2] if isinstance(item, tuple) else item self._safe_close(conn, quiet=True) self.current_size -= 1 def __del__(self): self.clear()
BaseConnectionPool
python
apache__airflow
providers/standard/src/airflow/providers/standard/operators/trigger_dagrun.py
{ "start": 2320, "end": 2612 }
class ____(AirflowException): """Raise when a dag is paused and something tries to run it.""" def __init__(self, dag_id: str) -> None: super().__init__(dag_id) self.dag_id = dag_id def __str__(self) -> str: return f"Dag {self.dag_id} is paused"
DagIsPaused
python
sphinx-doc__sphinx
sphinx/domains/c/__init__.py
{ "start": 24818, "end": 26304 }
class ____(SphinxRole): def __init__(self, asCode: bool) -> None: super().__init__() if asCode: # render the expression as inline code self.class_type = 'c-expr' else: # render the expression as inline text self.class_type = 'c-texpr' def run(self) -> tuple[list[Node], list[system_message]]: text = self.text.replace('\n', ' ') parser = DefinitionParser( text, location=self.get_location(), config=self.config ) # attempt to mimic XRefRole classes, except that... try: ast = parser.parse_expression() except DefinitionError as ex: logger.warning( 'Unparseable C expression: %r\n%s', text, ex, location=self.get_location(), ) # see below node = addnodes.desc_inline('c', text, text, classes=[self.class_type]) return [node], [] parent_symbol = self.env.current_document.c_parent_symbol if parent_symbol is None: parent_symbol = self.env.domaindata['c']['root_symbol'] # ...most if not all of these classes should really apply to the individual references, # not the container node signode = addnodes.desc_inline('c', classes=[self.class_type]) ast.describe_signature(signode, 'markType', self.env, parent_symbol) return [signode], []
CExprRole
python
jazzband__django-model-utils
tests/models.py
{ "start": 7880, "end": 8100 }
class ____(models.Model): name = models.CharField(max_length=20) number = models.IntegerField() name_tracker = FieldTracker(fields=['name']) number_tracker = FieldTracker(fields=['number'])
TrackedMultiple
python
astropy__astropy
astropy/modeling/tests/test_fitters.py
{ "start": 27914, "end": 31748 }
class ____: def setup_class(self): self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j] self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8) def Gaussian_2D(p, pos): return p[0] * np.exp( -0.5 * (pos[0] - p[2]) ** 2 / p[4] ** 2 - 0.5 * (pos[1] - p[1]) ** 2 / p[3] ** 2 ) self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x])) def initial_guess(self, data, pos): y = pos[0] x = pos[1] """computes the centroid of the data as the initial guess for the center position""" wx = x * data wy = y * data total_intensity = np.sum(data) x_mean = np.sum(wx) / total_intensity y_mean = np.sum(wy) / total_intensity x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0]) y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0]) x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.0).astype(int) y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.0).astype(int) amplitude = data[y_pos][x_pos] return amplitude, x_mean, y_mean @pytest.mark.filterwarnings("ignore:The fit may be unsuccessful") @pytest.mark.filterwarnings( r"ignore:Values in x were outside bounds during a minimize step, " r"clipping to bounds" ) @pytest.mark.parametrize("fitter", non_linear_fitters_bounds + fitters) def test_with_fitters_and_sigma_clip(self, fitter): import scipy.stats as stats fitter = fitter() np.random.seed(0) c = stats.bernoulli.rvs(0.25, size=self.z.shape) z = self.z + ( np.random.normal(0.0, 0.2, self.z.shape) + c * np.random.normal(self.z, 2.0, self.z.shape) ) guess = self.initial_guess(self.z, np.array([self.y, self.x])) g2_init = models.Gaussian2D( amplitude=guess[0], x_mean=guess[1], y_mean=guess[2], x_stddev=0.75, y_stddev=1.25, ) fit = FittingWithOutlierRemoval(fitter, sigma_clip, niter=3, sigma=3.0) fitted_model, _ = fit(g2_init, self.x, self.y, z) assert_allclose(fitted_model.parameters[0:5], self.model_params, atol=1e-1) def test_1d_set_fitting_with_outlier_removal(): """Test model set fitting with outlier removal (issue #6819)""" poly_set = models.Polynomial1D(2, n_models=2) fitter = FittingWithOutlierRemoval( LinearLSQFitter(), sigma_clip, sigma=2.5, niter=3, cenfunc=np.ma.mean, stdfunc=np.ma.std, ) x = np.arange(10) y = np.array([2.5 * x - 4, 2 * x * x + x + 10]) y[1, 5] = -1000 # outlier poly_set, filt_y = fitter(poly_set, x, y) assert_allclose(poly_set.c0, [-4.0, 10.0], atol=1e-14) assert_allclose(poly_set.c1, [2.5, 1.0], atol=1e-14) assert_allclose(poly_set.c2, [0.0, 2.0], atol=1e-14) def test_2d_set_axis_2_fitting_with_outlier_removal(): """Test fitting 2D model set (axis 2) with outlier removal (issue #6819)""" poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2) fitter = FittingWithOutlierRemoval( LinearLSQFitter(), sigma_clip, sigma=2.5, niter=3, cenfunc=np.ma.mean, stdfunc=np.ma.std, ) y, x = np.mgrid[0:5, 0:5] z = np.rollaxis(np.array([x + y, 1 - 0.1 * x + 0.2 * y]), 0, 3) z[3, 3:5, 0] = 100.0 # outliers poly_set, filt_z = fitter(poly_set, x, y, z) assert_allclose(poly_set.c0_0, [[[0.0, 1.0]]], atol=1e-14) assert_allclose(poly_set.c1_0, [[[1.0, -0.1]]], atol=1e-14) assert_allclose(poly_set.c0_1, [[[1.0, 0.2]]], atol=1e-14) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
Test2DFittingWithOutlierRemoval
python
getsentry__sentry
src/sentry/users/api/endpoints/user_identity_details.py
{ "start": 370, "end": 776 }
class ____(UserEndpoint): publish_status = { "DELETE": ApiPublishStatus.PRIVATE, } def delete(self, request: Request, user: User, identity_id: int) -> Response: try: ai = AuthIdentity.objects.get(user=user, id=identity_id) ai.delete() except AuthIdentity.DoesNotExist: pass return Response(status=204)
UserIdentityDetailsEndpoint
python
tensorflow__tensorflow
tensorflow/python/keras/utils/tf_utils.py
{ "start": 6341, "end": 17278 }
class ____(object): """A wrapper for lists to be treated as elements for `nest`.""" def __init__(self, list_to_wrap): self._list = list_to_wrap def as_list(self): return self._list def convert_inner_node_data(nested, wrap=False): """Either wraps or unwraps innermost node data lists in `ListWrapper` objects. Args: nested: A nested data structure. wrap: If `True`, wrap innermost lists in `ListWrapper` objects. If `False`, unwraps `ListWrapper` objects into lists. Returns: Structure of same type as nested, with lists wrapped/unwrapped. """ def _is_serialized_node_data(nested): # Node data can be of form `[layer_name, node_id, tensor_id]` or # `[layer_name, node_id, tensor_id, kwargs]`. if (isinstance(nested, list) and (len(nested) in [3, 4]) and isinstance(nested[0], str)): return True return False def _is_atomic_nested(nested): """Returns `True` if `nested` is a list representing node data.""" if isinstance(nested, ListWrapper): return True if _is_serialized_node_data(nested): return True return not nest.is_nested(nested) def _convert_object_or_list(nested): """Convert b/t `ListWrapper` object and list representations.""" if wrap: if isinstance(nested, ListWrapper): return nested if _is_serialized_node_data(nested): return ListWrapper(nested) return nested else: if isinstance(nested, ListWrapper): return nested.as_list() return nested return map_structure_with_atomic(_is_atomic_nested, _convert_object_or_list, nested) def shape_type_conversion(fn): """Decorator that handles tuple/TensorShape conversion. Used in `compute_output_shape` and `build`. Args: fn: function to wrap. Returns: Wrapped function. """ def wrapper(instance, input_shape): # Pass shapes as tuples to `fn` # This preserves compatibility with external Keras. if input_shape is not None: input_shape = convert_shapes(input_shape, to_tuples=True) output_shape = fn(instance, input_shape) # Return shapes from `fn` as TensorShapes. if output_shape is not None: output_shape = convert_shapes(output_shape, to_tuples=False) return output_shape return wrapper def are_all_symbolic_tensors(tensors): return all(map(is_symbolic_tensor, tensors)) _user_convertible_tensor_types = set() def is_extension_type(tensor): """Returns whether a tensor is of an ExtensionType. github.com/tensorflow/community/pull/269 Currently it works by checking if `tensor` is a `CompositeTensor` instance, but this will be changed to use an appropriate extensiontype protocol check once ExtensionType is made public. Args: tensor: An object to test Returns: True if the tensor is an extension type object, false if not. """ return isinstance(tensor, composite_tensor.CompositeTensor) def is_symbolic_tensor(tensor): """Returns whether a tensor is symbolic (from a TF graph) or an eager tensor. A Variable can be seen as either: it is considered symbolic when we are in a graph scope, and eager when we are in an eager scope. Args: tensor: A tensor instance to test. Returns: True for symbolic tensors, False for eager tensors. """ if isinstance(tensor, tensor_lib.Tensor): return hasattr(tensor, 'graph') elif is_extension_type(tensor): component_tensors = nest.flatten(tensor, expand_composites=True) return any(hasattr(t, 'graph') for t in component_tensors) elif isinstance(tensor, variables.Variable): # Variables that are output of a Keras Layer in Functional API mode # should be considered symbolic. # TODO(omalleyt): We need a better way to check this in order to # enable `run_eagerly=True` for Models containing Layers that # return Variables as outputs. return (getattr(tensor, '_keras_history', False) or not context.executing_eagerly()) elif isinstance(tensor, tuple(_user_convertible_tensor_types)): tensor = ops.convert_to_tensor_or_composite(tensor) return is_symbolic_tensor(tensor) else: return False def register_symbolic_tensor_type(cls): """Allows users to specify types regarded as symbolic `Tensor`s. Used in conjunction with `tf.register_tensor_conversion_function`, calling `tf.keras.__internal__.utils.register_symbolic_tensor_type(cls)` allows non-`Tensor` objects to be plumbed through Keras layers. Example: ```python # One-time setup. class Foo(object): def __init__(self, input_): self._input = input_ def value(self): return tf.constant(42.) tf.register_tensor_conversion_function( Foo, lambda x, *args, **kwargs: x.value()) tf.keras.__internal__.utils.register_symbolic_tensor_type(Foo) # User-land. layer = tf.keras.layers.Lambda(lambda input_: Foo(input_)) ``` Args: cls: A `class` type which shall be regarded as a symbolic `Tensor`. """ global _user_convertible_tensor_types if cls not in _user_convertible_tensor_types: keras_tensor.register_keras_tensor_specialization( cls, keras_tensor.UserRegisteredTypeKerasTensor) _user_convertible_tensor_types.add(cls) def type_spec_from_value(value): """Grab type_spec without converting array-likes to tensors.""" if is_extension_type(value): return value._type_spec # pylint: disable=protected-access # Get a TensorSpec for array-like data without # converting the data to a Tensor if hasattr(value, 'shape') and hasattr(value, 'dtype'): return tensor_lib.TensorSpec(value.shape, value.dtype) else: return type_spec.type_spec_from_value(value) def is_ragged(tensor): """Returns true if `tensor` is a ragged tensor or ragged tensor value.""" return isinstance( tensor, (ragged_tensor.RaggedTensor, ragged_tensor_value.RaggedTensorValue)) def is_sparse(tensor): """Returns true if `tensor` is a sparse tensor or sparse tensor value.""" return isinstance( tensor, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)) def is_tensor_or_variable(x): return tensor_util.is_tf_type(x) or isinstance(x, variables.Variable) def assert_no_legacy_layers(layers): """Prevent tf.layers.Layers from being used with Keras. Certain legacy layers inherit from their keras analogs; however they are not supported with keras and can lead to subtle and hard to diagnose bugs. Args: layers: A list of layers to check Raises: TypeError: If any elements of layers are tf.layers.Layers """ # isinstance check for tf.layers.Layer introduces a circular dependency. legacy_layers = [l for l in layers if getattr(l, '_is_legacy_layer', None)] if legacy_layers: layer_str = '\n'.join(' ' + str(l) for l in legacy_layers) raise TypeError( 'The following are legacy tf.layers.Layers:\n{}\nTo use keras as a ' 'framework (for instance using the Network, Model, or Sequential ' 'classes), please use the tf.keras.layers implementation instead. ' '(Or, if writing custom layers, subclass from tf.keras.layers rather ' 'than tf.layers)'.format(layer_str)) @tf_contextlib.contextmanager def maybe_init_scope(layer): """Open an `init_scope` if in V2 mode and using the keras graph. Args: layer: The Layer/Model that is currently active. Yields: None """ # Don't open an init_scope in V1 mode or when using legacy tf.layers. if (ops.executing_eagerly_outside_functions() and getattr(layer, '_keras_style', True)): with ops.init_scope(): yield else: yield @tf_contextlib.contextmanager def graph_context_for_symbolic_tensors(*args, **kwargs): """Returns graph context manager if any of the inputs is a symbolic tensor.""" if any(is_symbolic_tensor(v) for v in list(args) + list(kwargs.values())): with K.get_graph().as_default(): yield else: yield def dataset_is_infinite(dataset): """True if the passed dataset is infinite.""" if ops.executing_eagerly_outside_functions(): return math_ops.equal( cardinality.cardinality(dataset), cardinality.INFINITE) else: dataset_size = K.get_session().run(cardinality.cardinality(dataset)) return dataset_size == cardinality.INFINITE def get_tensor_spec(t, dynamic_batch=False, name=None): """Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`.""" # pylint: disable=protected-access if isinstance(t, type_spec.TypeSpec): spec = t elif is_extension_type(t): # TODO(b/148821952): Should these specs have a name attr? spec = t._type_spec elif (hasattr(t, '_keras_history') and hasattr(t._keras_history[0], '_type_spec')): return t._keras_history[0]._type_spec elif hasattr(t, 'shape') and hasattr(t, 'dtype'): spec = tensor_lib.TensorSpec(shape=t.shape, dtype=t.dtype, name=name) else: return None # Allow non-Tensors to pass through. if not dynamic_batch: return spec dynamic_batch_spec = copy.deepcopy(spec) # RaggedTensorSpec only has a private _shape. shape = dynamic_batch_spec._shape if shape.rank is not None and shape.rank > 0: shape_list = shape.as_list() shape_list[0] = None dynamic_batch_spec._shape = tensor_shape.TensorShape(shape_list) return dynamic_batch_spec # pylint: enable=protected-access def sync_to_numpy_or_python_type(tensors): """Syncs and converts a structure of `Tensor`s to `NumPy` arrays or Python scalar types. For each tensor, it calls `tensor.numpy()`. If the result is a scalar value, it converts it to a Python type, such as a float or int, by calling `result.item()`. Numpy scalars are converted, as Python types are often more convenient to deal with. This is especially useful for bfloat16 Numpy scalars, which don't support as many operations as other Numpy values. Async strategies (such as `TPUStrategy` and `ParameterServerStrategy`) are forced to sync during this process. Args: tensors: A structure of tensors. Returns: `tensors`, but scalar tensors are converted to Python types and non-scalar tensors are converted to Numpy arrays. """ if isinstance(tensors, coordinator_lib.RemoteValue): return tensors.fetch() def _to_single_numpy_or_python_type(t): if isinstance(t, tensor_lib.Tensor): x = t.numpy() return x.item() if np.ndim(x) == 0 else x return t # Don't turn ragged or sparse tensors to NumPy. return nest.map_structure(_to_single_numpy_or_python_type, tensors) def _astuple(attrs): """Converts the given attrs to tuple non-recursively.""" cls = type(attrs) fields = getattr(cls, '__attrs_attrs__', None) if fields is None: raise ValueError('%r is not an attrs-decorated class.' % cls) values = [] for field in fields: values.append(getattr(attrs, field.name)) return tuple(values)
ListWrapper
python
airbytehq__airbyte
airbyte-integrations/connectors/source-instagram/unit_tests/integration/test_media_insights.py
{ "start": 4230, "end": 14436 }
class ____(TestCase): @staticmethod def _read(config_: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput: return read_output( config_builder=config_, stream_name=_STREAM_NAME, sync_mode=SyncMode.full_refresh, expecting_exception=expecting_exception, ) @HttpMocker() def test_instagram_insights_for_reels(self, http_mocker: HttpMocker) -> None: test = REELS http_mocker.get( get_account_request().build(), get_account_response(), ) http_mocker.get( _get_parent_request().build(), _get_response(stream_name=_PARENT_STREAM_NAME, test=test) .with_record(_record(stream_name=_PARENT_STREAM_NAME, test=test)) .build(), ) http_mocker.get( _get_child_request(media_id=MEDIA_ID_REELS, metric=_METRICS[MEDIA_ID_REELS]).build(), HttpResponse(json.dumps(find_template(f"{_STREAM_NAME}_for_{test}", __file__)), 200), ) output = self._read(config_=config()) assert len(output.records) == 1 assert output.records[0].record.data["page_id"] assert output.records[0].record.data["business_account_id"] assert output.records[0].record.data["id"] for metric in _METRICS[MEDIA_ID_REELS]: assert metric in output.records[0].record.data @HttpMocker() def test_instagram_insights_for_video_feed(self, http_mocker: HttpMocker) -> None: test = VIDEO_FEED http_mocker.get( get_account_request().build(), get_account_response(), ) http_mocker.get( _get_parent_request().build(), _get_response(stream_name=_PARENT_STREAM_NAME, test=test) .with_record(_record(stream_name=_PARENT_STREAM_NAME, test=test)) .build(), ) http_mocker.get( _get_child_request(media_id=MEDIA_ID_VIDEO_FEED, metric=_METRICS[MEDIA_ID_VIDEO_FEED]).build(), HttpResponse(json.dumps(find_template(f"{_STREAM_NAME}_for_{test}", __file__)), 200), ) output = self._read(config_=config()) assert len(output.records) == 1 assert output.records[0].record.data["page_id"] assert output.records[0].record.data["business_account_id"] assert output.records[0].record.data["id"] for metric in _METRICS[MEDIA_ID_VIDEO_FEED]: assert metric in output.records[0].record.data @HttpMocker() def test_instagram_insights_for_video(self, http_mocker: HttpMocker) -> None: test = VIDEO http_mocker.get( get_account_request().build(), get_account_response(), ) http_mocker.get( _get_parent_request().build(), _get_response(stream_name=_PARENT_STREAM_NAME, test=test) .with_record(_record(stream_name=_PARENT_STREAM_NAME, test=test)) .build(), ) http_mocker.get( _get_child_request(media_id=MEDIA_ID_VIDEO, metric=_METRICS[MEDIA_ID_VIDEO]).build(), HttpResponse(json.dumps(find_template(f"{_STREAM_NAME}_for_{test}", __file__)), 200), ) output = self._read(config_=config()) assert len(output.records) == 1 assert output.records[0].record.data["page_id"] assert output.records[0].record.data["business_account_id"] assert output.records[0].record.data["id"] for metric in _METRICS[MEDIA_ID_VIDEO]: assert metric in output.records[0].record.data @HttpMocker() def test_instagram_insights_carousel_album(self, http_mocker: HttpMocker) -> None: test = CAROUSEL_ALBUM http_mocker.get( get_account_request().build(), get_account_response(), ) http_mocker.get( _get_parent_request().build(), _get_response(stream_name=_PARENT_STREAM_NAME, test=test) .with_record(_record(stream_name=_PARENT_STREAM_NAME, test=test)) .build(), ) http_mocker.get( _get_child_request(media_id=MEDIA_ID_CAROUSEL_ALBUM, metric=_METRICS[MEDIA_ID_CAROUSEL_ALBUM]).build(), HttpResponse(json.dumps(find_template(f"{_STREAM_NAME}_for_{test}", __file__)), 200), ) output = self._read(config_=config()) assert len(output.records) == 1 assert output.records[0].record.data["page_id"] assert output.records[0].record.data["business_account_id"] assert output.records[0].record.data["id"] for metric in _METRICS[MEDIA_ID_CAROUSEL_ALBUM]: assert metric in output.records[0].record.data @HttpMocker() def test_instagram_insights_general_media(self, http_mocker: HttpMocker) -> None: test = GENERAL_MEDIA http_mocker.get( get_account_request().build(), get_account_response(), ) http_mocker.get( _get_parent_request().build(), _get_response(stream_name=_PARENT_STREAM_NAME, test=test) .with_record(_record(stream_name=_PARENT_STREAM_NAME, test=test)) .build(), ) http_mocker.get( _get_child_request(media_id=MEDIA_ID_GENERAL_MEDIA, metric=_METRICS[MEDIA_ID_GENERAL_MEDIA]).build(), HttpResponse(json.dumps(find_template(f"{_STREAM_NAME}_for_{test}", __file__)), 200), ) output = self._read(config_=config()) assert len(output.records) == 1 assert output.records[0].record.data["page_id"] assert output.records[0].record.data["business_account_id"] assert output.records[0].record.data["id"] for metric in _METRICS[MEDIA_ID_GENERAL_MEDIA]: assert metric in output.records[0].record.data @HttpMocker() def test_instagram_insights_error_posted_before_business(self, http_mocker: HttpMocker) -> None: test = ERROR_POSTED_BEFORE_BUSINESS http_mocker.get( get_account_request().build(), get_account_response(), ) http_mocker.get( _get_parent_request().build(), HttpResponse(json.dumps(find_template(f"{_PARENT_STREAM_NAME}_for_{test}", __file__)), 200) ) http_mocker.get( _get_child_request(media_id=MEDIA_ID_GENERAL_MEDIA, metric=_METRICS[MEDIA_ID_GENERAL_MEDIA]).build(), HttpResponse(json.dumps(find_template(f"{_STREAM_NAME}_for_{GENERAL_MEDIA}", __file__)), 200), ) http_mocker.get( _get_child_request( media_id=MEDIA_ID_ERROR_POSTED_BEFORE_BUSINESS, metric=_METRICS[MEDIA_ID_ERROR_POSTED_BEFORE_BUSINESS] ).build(), HttpResponse(json.dumps(find_template(f"{_STREAM_NAME}_for_{test}", __file__)), 400), ) output = self._read(config_=config()) assert len(output.records) == 1 assert output.records[0].record.data["page_id"] assert output.records[0].record.data["business_account_id"] assert output.records[0].record.data["id"] for metric in _METRICS[MEDIA_ID_GENERAL_MEDIA]: assert metric in output.records[0].record.data @HttpMocker() def test_instagram_insights_error_with_wrong_permissions(self, http_mocker: HttpMocker) -> None: test = ERROR_WITH_WRONG_PERMISSIONS http_mocker.get( get_account_request().build(), get_account_response(), ) http_mocker.get( _get_parent_request().build(), HttpResponse(json.dumps(find_template(f"{_PARENT_STREAM_NAME}_for_{test}", __file__)), 200) ) http_mocker.get( _get_child_request(media_id=MEDIA_ID_GENERAL_MEDIA, metric=_METRICS[MEDIA_ID_GENERAL_MEDIA]).build(), HttpResponse(json.dumps(find_template(f"{_STREAM_NAME}_for_{GENERAL_MEDIA}", __file__)), 200), ) http_mocker.get( _get_child_request( media_id=MEDIA_ID_ERROR_WITH_WRONG_PERMISSIONS, metric=_METRICS[MEDIA_ID_ERROR_WITH_WRONG_PERMISSIONS] ).build(), HttpResponse(json.dumps(find_template(f"{_STREAM_NAME}_for_{test}", __file__)), 400), ) output = self._read(config_=config()) # error was ignored and correct record was processed assert len(output.records) == 1 assert output.records[0].record.data["page_id"] assert output.records[0].record.data["business_account_id"] assert output.records[0].record.data["id"] for metric in _METRICS[MEDIA_ID_GENERAL_MEDIA]: assert metric in output.records[0].record.data @HttpMocker() def test_instagram_insights_error_with_wrong_permissions_code_10(self, http_mocker: HttpMocker) -> None: test = ERROR_WITH_WRONG_PERMISSIONS_CODE_10 http_mocker.get( get_account_request().build(), get_account_response(), ) http_mocker.get( _get_parent_request().build(), HttpResponse(json.dumps(find_template(f"{_PARENT_STREAM_NAME}_for_{test}", __file__)), 200) ) http_mocker.get( _get_child_request(media_id=MEDIA_ID_GENERAL_MEDIA, metric=_METRICS[MEDIA_ID_GENERAL_MEDIA]).build(), HttpResponse(json.dumps(find_template(f"{_STREAM_NAME}_for_{GENERAL_MEDIA}", __file__)), 200), ) http_mocker.get( _get_child_request( media_id=MEDIA_ID_ERROR_WITH_WRONG_PERMISSIONS_CODE_10, metric=_METRICS[MEDIA_ID_ERROR_WITH_WRONG_PERMISSIONS_CODE_10] ).build(), HttpResponse(json.dumps(find_template(f"{_STREAM_NAME}_for_{test}", __file__)), 400), ) output = self._read(config_=config()) # error was ignored and correct record was processed assert len(output.records) == 1 assert output.records[0].record.data["page_id"] assert output.records[0].record.data["business_account_id"] assert output.records[0].record.data["id"] for metric in _METRICS[MEDIA_ID_GENERAL_MEDIA]: assert metric in output.records[0].record.data
TestFullRefresh
python
getsentry__sentry
tests/sentry/workflow_engine/endpoints/validators/actions/test_email_validator.py
{ "start": 176, "end": 2334 }
class ____(TestCase): def setUp(self) -> None: super().setUp() self.team = self.create_team(organization=self.organization) self.valid_data = { "type": Action.Type.EMAIL, "config": {"targetType": "user", "targetIdentifier": str(self.user.id)}, "data": {}, } def test_validate__user(self) -> None: validator = BaseActionValidator( data=self.valid_data, context={"organization": self.organization}, ) result = validator.is_valid() assert result is True def test_validate_user__missing_identifier(self) -> None: validator = BaseActionValidator( data={ **self.valid_data, "config": {"target_type": "user"}, }, context={"organization": self.organization}, ) result = validator.is_valid() assert result is False def test_validate__team(self) -> None: validator = BaseActionValidator( data={ **self.valid_data, "config": { "target_type": "team", "target_identifier": str(self.team.id), }, }, context={"organization": self.organization}, ) result = validator.is_valid(raise_exception=True) assert result is True def test_validate__issue_owners(self) -> None: validator = BaseActionValidator( data={ **self.valid_data, "config": {"target_type": "issue_owners"}, }, context={"organization": self.organization}, ) result = validator.is_valid(raise_exception=True) assert result is True def test_validate__invalid_target_type(self) -> None: validator = BaseActionValidator( data={ **self.valid_data, "config": {"targetType": "specific"}, }, context={"organization": self.organization}, ) result = validator.is_valid() assert result is False
TestEmailActionValidator
python
google__jax
tests/mosaic/gpu_torch_test_distributed.py
{ "start": 1447, "end": 5084 }
class ____(parameterized.TestCase): def setUpClass(): torch.cuda.set_device("cuda:0") torch.set_default_device("cuda") if torch is None: raise unittest.SkipTest("Test requires torch") if not torch.cuda.is_available(): raise unittest.SkipTest("Test requires torch with CUDA support") if (not jtu.test_device_matches(["cuda"]) or not jtu.is_cuda_compute_capability_at_least("9.0")): raise unittest.SkipTest("Only works on GPU with capability >= sm90") device_count = torch.cuda.device_count() for d1 in range(device_count - 1): for d2 in range(d1 + 1, device_count): if not torch.cuda.can_device_access_peer(d1, d2): raise unittest.SkipTest("Test requires p2p access") if jax.process_count() == 1: raise unittest.SkipTest("Test requires multiple processes.") if jax.device_count() != jax.process_count(): raise unittest.SkipTest("Need 1 device per process") os.environ["RANK"] = str(jax.process_index()) os.environ["WORLD_SIZE"] = str(jax.process_count()) os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "5728" dist.init_process_group("nccl") symm_mem.enable_symm_mem_for_group(dist.group.WORLD.group_name) assert dist.is_initialized() assert symm_mem.is_nvshmem_available() symm_mem.set_backend("NVSHMEM") symm_mem.empty(1) # Just to initialize NVSHMEM def setUp(self): self.prng = np.random.default_rng(1234) self.context = mlir.make_ir_context() if mgpu_dialect is not None: mgpu_dialect.register_dialect(self.context) self.enter_context(config.traceback_filtering("off")) self.enter_context(self.context) self.enter_context(ir.Location.unknown()) def test_get_device_id(self): index = ir.IndexType.get() def kernel_body(ctx, dst, _): device_id = ctx.device_id() memref.store(device_id, dst, [arith.constant(index, 0)]) out_shape = jax.ShapeDtypeStruct((1,), jnp.int32) kernel = mgpu.as_torch_gpu_kernel( kernel_body, (1, 1, 1), (128, 1, 1), (), out_shape, () ) gathered = torch.empty((2,), dtype=torch.int32) dist.all_gather_into_tensor(gathered, kernel()) self.assertEqual(gathered.tolist(), list(range(jax.process_count()))) def test_remote_semaphore(self): if dist.get_world_size() != 2: self.skipTest("Test assumes 2 devices") i32 = ir.IntegerType.get_signless(32) def kernel(ctx, sem, _): my_device = ctx.device_id() other_device = arith.subi(arith.constant(i32, 1), my_device) my_sem = mgpu.SemaphoreRef(mgpu.utils.memref_ptr(sem)) other_dst = ctx.to_remote(sem, other_device) other_sem = mgpu.SemaphoreRef(mgpu.utils.memref_ptr(other_dst)) # We signal and wait a different amount on each device to make sure we're # really communicating here. other_sem.signal(arith.addi(arith.constant(i32, 1), other_device)) @mgpu.fori(arith.addi(arith.constant(i32, 1), my_device), None) def wait_loop(i, _): my_sem.wait(1) sem_shape = jax.ShapeDtypeStruct((1,), jnp.int32) kernel = mgpu.as_torch_gpu_kernel( kernel, (1, 1, 1), (128, 1, 1), (), (), (), inout_shape=sem_shape ) gathered = torch.empty((2,), dtype=torch.int32) sem = symm_mem.empty((1,), dtype=torch.int32) sem_symm = symm_mem.rendezvous(sem, dist.group.WORLD) (sem_again,) = kernel(sem) self.assertEqual(sem_again.data_ptr(), sem.data_ptr()) dist.all_gather_into_tensor(gathered, sem) self.assertEqual(gathered.tolist(), [0, 0]) if __name__ == "__main__": jt_multiprocess.main()
TorchTest