language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | solution/0600-0699/0642.Design Search Autocomplete System/Solution.py | {
"start": 0,
"end": 659
} | class ____:
def __init__(self):
self.children = [None] * 27
self.v = 0
self.w = ''
def insert(self, w, t):
node = self
for c in w:
idx = 26 if c == ' ' else ord(c) - ord('a')
if node.children[idx] is None:
node.children[idx] = Trie()
node = node.children[idx]
node.v += t
node.w = w
def search(self, pref):
node = self
for c in pref:
idx = 26 if c == ' ' else ord(c) - ord('a')
if node.children[idx] is None:
return None
node = node.children[idx]
return node
| Trie |
python | ray-project__ray | rllib/models/torch/misc.py | {
"start": 9323,
"end": 10924
} | class ____(nn.Module):
"""Simple PyTorch version of `linear` function"""
def __init__(
self,
in_size: int,
out_size: int,
initializer: Any = None,
activation_fn: Any = None,
use_bias: bool = True,
bias_init: float = 0.0,
):
"""Creates a standard FC layer, similar to torch.nn.Linear
Args:
in_size: Input size for FC Layer
out_size: Output size for FC Layer
initializer: Initializer function for FC layer weights
activation_fn: Activation function at the end of layer
use_bias: Whether to add bias weights or not
bias_init: Initalize bias weights to bias_init const
"""
super(SlimFC, self).__init__()
layers = []
# Actual nn.Linear layer (including correct initialization logic).
linear = nn.Linear(in_size, out_size, bias=use_bias)
if initializer is None:
initializer = nn.init.xavier_uniform_
initializer(linear.weight)
if use_bias is True:
nn.init.constant_(linear.bias, bias_init)
layers.append(linear)
# Activation function (if any; default=None (linear)).
if isinstance(activation_fn, str):
activation_fn = get_activation_fn(activation_fn, "torch")
if activation_fn is not None:
layers.append(activation_fn())
# Put everything in sequence.
self._model = nn.Sequential(*layers)
def forward(self, x: TensorType) -> TensorType:
return self._model(x)
@DeveloperAPI
| SlimFC |
python | sqlalchemy__sqlalchemy | test/sql/test_constraints.py | {
"start": 1143,
"end": 24353
} | class ____(fixtures.TestBase, AssertsExecutionResults):
__dialect__ = "default"
__sparse_driver_backend__ = True
@testing.provide_metadata
def test_pk_fk_constraint_create(self):
metadata = self.metadata
Table(
"employees",
metadata,
Column("id", Integer),
Column("soc", String(40)),
Column("name", String(30)),
PrimaryKeyConstraint("id", "soc"),
)
Table(
"elements",
metadata,
Column("id", Integer),
Column("stuff", String(30)),
Column("emp_id", Integer),
Column("emp_soc", String(40)),
PrimaryKeyConstraint("id", name="elements_primkey"),
ForeignKeyConstraint(
["emp_id", "emp_soc"], ["employees.id", "employees.soc"]
),
)
self.assert_sql_execution(
testing.db,
lambda: metadata.create_all(testing.db, checkfirst=False),
CompiledSQL(
"CREATE TABLE employees ("
"id INTEGER NOT NULL, "
"soc VARCHAR(40) NOT NULL, "
"name VARCHAR(30), "
"PRIMARY KEY (id, soc)"
")"
),
CompiledSQL(
"CREATE TABLE elements ("
"id INTEGER NOT NULL, "
"stuff VARCHAR(30), "
"emp_id INTEGER, "
"emp_soc VARCHAR(40), "
"CONSTRAINT elements_primkey PRIMARY KEY (id), "
"FOREIGN KEY(emp_id, emp_soc) "
"REFERENCES employees (id, soc)"
")"
),
)
@testing.force_drop_names("a", "b")
def test_fk_cant_drop_cycled_unnamed(self):
metadata = MetaData()
Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("bid", Integer),
ForeignKeyConstraint(["bid"], ["b.id"]),
)
Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("aid", Integer),
ForeignKeyConstraint(["aid"], ["a.id"]),
)
metadata.create_all(testing.db)
if testing.db.dialect.supports_alter:
assert_raises_message(
exc.CircularDependencyError,
"Can't sort tables for DROP; an unresolvable foreign key "
"dependency exists between tables: a, b. Please ensure "
"that the ForeignKey and ForeignKeyConstraint objects "
"involved in the cycle have names so that they can be "
"dropped using DROP CONSTRAINT.",
metadata.drop_all,
testing.db,
)
else:
with expect_warnings(
"Can't sort tables for DROP; an unresolvable "
"foreign key dependency "
):
with self.sql_execution_asserter() as asserter:
metadata.drop_all(testing.db, checkfirst=False)
asserter.assert_(
AllOf(CompiledSQL("DROP TABLE a"), CompiledSQL("DROP TABLE b"))
)
@testing.provide_metadata
def test_fk_table_auto_alter_constraint_create(self):
metadata = self.metadata
Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("bid", Integer),
ForeignKeyConstraint(["bid"], ["b.id"]),
)
Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("aid", Integer),
ForeignKeyConstraint(["aid"], ["a.id"], name="bfk"),
)
self._assert_cyclic_constraint(
metadata, auto=True, sqlite_warning=True
)
@testing.provide_metadata
def test_fk_column_auto_alter_inline_constraint_create(self):
metadata = self.metadata
Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("bid", Integer, ForeignKey("b.id")),
)
Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("aid", Integer, ForeignKey("a.id", name="bfk")),
)
self._assert_cyclic_constraint(
metadata, auto=True, sqlite_warning=True
)
@testing.provide_metadata
def test_fk_column_use_alter_inline_constraint_create(self):
metadata = self.metadata
Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("bid", Integer, ForeignKey("b.id")),
)
Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column(
"aid", Integer, ForeignKey("a.id", name="bfk", use_alter=True)
),
)
self._assert_cyclic_constraint(metadata, auto=False)
@testing.provide_metadata
def test_fk_table_use_alter_constraint_create(self):
metadata = self.metadata
Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("bid", Integer),
ForeignKeyConstraint(["bid"], ["b.id"]),
)
Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("aid", Integer),
ForeignKeyConstraint(
["aid"], ["a.id"], use_alter=True, name="bfk"
),
)
self._assert_cyclic_constraint(metadata)
@testing.provide_metadata
def test_fk_column_use_alter_constraint_create(self):
metadata = self.metadata
Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("bid", Integer, ForeignKey("b.id")),
)
Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column(
"aid", Integer, ForeignKey("a.id", use_alter=True, name="bfk")
),
)
self._assert_cyclic_constraint(metadata, auto=False)
def _assert_cyclic_constraint(
self, metadata, auto=False, sqlite_warning=False
):
if testing.db.dialect.supports_alter:
self._assert_cyclic_constraint_supports_alter(metadata, auto=auto)
else:
self._assert_cyclic_constraint_no_alter(
metadata, auto=auto, sqlite_warning=sqlite_warning
)
def _assert_cyclic_constraint_supports_alter(self, metadata, auto=False):
table_assertions = []
if auto:
table_assertions = [
CompiledSQL(
"CREATE TABLE b ("
"id INTEGER NOT NULL, "
"aid INTEGER, "
"PRIMARY KEY (id)"
")"
),
CompiledSQL(
"CREATE TABLE a ("
"id INTEGER NOT NULL, "
"bid INTEGER, "
"PRIMARY KEY (id)"
")"
),
]
else:
table_assertions = [
CompiledSQL(
"CREATE TABLE b ("
"id INTEGER NOT NULL, "
"aid INTEGER, "
"PRIMARY KEY (id)"
")"
),
CompiledSQL(
"CREATE TABLE a ("
"id INTEGER NOT NULL, "
"bid INTEGER, "
"PRIMARY KEY (id), "
"FOREIGN KEY(bid) REFERENCES b (id)"
")"
),
]
assertions = [AllOf(*table_assertions)]
fk_assertions = []
fk_assertions.append(
CompiledSQL(
"ALTER TABLE b ADD CONSTRAINT bfk "
"FOREIGN KEY(aid) REFERENCES a (id)"
)
)
if auto:
fk_assertions.append(
CompiledSQL(
"ALTER TABLE a ADD FOREIGN KEY(bid) REFERENCES b (id)"
)
)
assertions.append(AllOf(*fk_assertions))
with self.sql_execution_asserter() as asserter:
metadata.create_all(testing.db, checkfirst=False)
asserter.assert_(*assertions)
assertions = [
CompiledSQL("ALTER TABLE b DROP CONSTRAINT bfk"),
CompiledSQL("DROP TABLE a"),
CompiledSQL("DROP TABLE b"),
]
with self.sql_execution_asserter() as asserter:
metadata.drop_all(testing.db, checkfirst=False),
asserter.assert_(*assertions)
def _assert_cyclic_constraint_no_alter(
self, metadata, auto=False, sqlite_warning=False
):
table_assertions = []
if auto:
table_assertions.append(
DialectSQL(
"CREATE TABLE b ("
"id INTEGER NOT NULL, "
"aid INTEGER, "
"PRIMARY KEY (id), "
"CONSTRAINT bfk FOREIGN KEY(aid) REFERENCES a (id)"
")"
)
)
table_assertions.append(
DialectSQL(
"CREATE TABLE a ("
"id INTEGER NOT NULL, "
"bid INTEGER, "
"PRIMARY KEY (id), "
"FOREIGN KEY(bid) REFERENCES b (id)"
")"
)
)
else:
table_assertions.append(
DialectSQL(
"CREATE TABLE b ("
"id INTEGER NOT NULL, "
"aid INTEGER, "
"PRIMARY KEY (id), "
"CONSTRAINT bfk FOREIGN KEY(aid) REFERENCES a (id)"
")"
)
)
table_assertions.append(
DialectSQL(
"CREATE TABLE a ("
"id INTEGER NOT NULL, "
"bid INTEGER, "
"PRIMARY KEY (id), "
"FOREIGN KEY(bid) REFERENCES b (id)"
")"
)
)
assertions = [AllOf(*table_assertions)]
with self.sql_execution_asserter() as asserter:
metadata.create_all(testing.db, checkfirst=False)
asserter.assert_(*assertions)
assertions = [
AllOf(CompiledSQL("DROP TABLE a"), CompiledSQL("DROP TABLE b"))
]
if sqlite_warning:
with expect_warnings("Can't sort tables for DROP; "):
with self.sql_execution_asserter() as asserter:
metadata.drop_all(testing.db, checkfirst=False),
else:
with self.sql_execution_asserter() as asserter:
metadata.drop_all(testing.db, checkfirst=False),
asserter.assert_(*assertions)
@testing.force_drop_names("a", "b")
def test_cycle_unnamed_fks(self):
metadata = MetaData()
Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("bid", Integer, ForeignKey("b.id")),
)
Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("aid", Integer, ForeignKey("a.id")),
)
assertions = [
AllOf(
CompiledSQL(
"CREATE TABLE b ("
"id INTEGER NOT NULL, "
"aid INTEGER, "
"PRIMARY KEY (id)"
")"
),
CompiledSQL(
"CREATE TABLE a ("
"id INTEGER NOT NULL, "
"bid INTEGER, "
"PRIMARY KEY (id)"
")"
),
),
AllOf(
CompiledSQL(
"ALTER TABLE b ADD FOREIGN KEY(aid) REFERENCES a (id)"
),
CompiledSQL(
"ALTER TABLE a ADD FOREIGN KEY(bid) REFERENCES b (id)"
),
),
]
with self.sql_execution_asserter() as asserter:
metadata.create_all(testing.db, checkfirst=False)
if testing.db.dialect.supports_alter:
asserter.assert_(*assertions)
assert_raises_message(
exc.CircularDependencyError,
"Can't sort tables for DROP; an unresolvable foreign key "
"dependency exists between tables: a, b. "
"Please ensure that the "
"ForeignKey and ForeignKeyConstraint objects involved in the "
"cycle have names so that they can be dropped using "
"DROP CONSTRAINT.",
metadata.drop_all,
testing.db,
checkfirst=False,
)
else:
with expect_warnings(
"Can't sort tables for DROP; an unresolvable "
"foreign key dependency exists between tables"
):
with self.sql_execution_asserter() as asserter:
metadata.drop_all(testing.db, checkfirst=False)
asserter.assert_(
AllOf(CompiledSQL("DROP TABLE b"), CompiledSQL("DROP TABLE a"))
)
@testing.force_drop_names("a", "b")
def test_cycle_named_fks(self):
metadata = MetaData()
Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("bid", Integer, ForeignKey("b.id")),
)
Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column(
"aid",
Integer,
ForeignKey("a.id", use_alter=True, name="aidfk"),
),
)
assertions = [
AllOf(
CompiledSQL(
"CREATE TABLE b ("
"id INTEGER NOT NULL, "
"aid INTEGER, "
"PRIMARY KEY (id)"
")"
),
CompiledSQL(
"CREATE TABLE a ("
"id INTEGER NOT NULL, "
"bid INTEGER, "
"PRIMARY KEY (id), "
"FOREIGN KEY(bid) REFERENCES b (id)"
")"
),
),
CompiledSQL(
"ALTER TABLE b ADD CONSTRAINT aidfk "
"FOREIGN KEY(aid) REFERENCES a (id)"
),
]
with self.sql_execution_asserter() as asserter:
metadata.create_all(testing.db, checkfirst=False)
if testing.db.dialect.supports_alter:
asserter.assert_(*assertions)
with self.sql_execution_asserter() as asserter:
metadata.drop_all(testing.db, checkfirst=False)
asserter.assert_(
CompiledSQL("ALTER TABLE b DROP CONSTRAINT aidfk"),
AllOf(
CompiledSQL("DROP TABLE b"), CompiledSQL("DROP TABLE a")
),
)
else:
with self.sql_execution_asserter() as asserter:
metadata.drop_all(testing.db, checkfirst=False)
asserter.assert_(
AllOf(CompiledSQL("DROP TABLE b"), CompiledSQL("DROP TABLE a"))
)
@testing.requires.check_constraints
@testing.provide_metadata
def test_check_constraint_create(self):
metadata = self.metadata
Table(
"foo",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
CheckConstraint("x>y"),
)
Table(
"bar",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer, CheckConstraint("x>7")),
Column("z", Integer),
)
self.assert_sql_execution(
testing.db,
lambda: metadata.create_all(testing.db, checkfirst=False),
AllOf(
CompiledSQL(
"CREATE TABLE foo ("
"id INTEGER NOT NULL, "
"x INTEGER, "
"y INTEGER, "
"PRIMARY KEY (id), "
"CHECK (x>y)"
")"
),
CompiledSQL(
"CREATE TABLE bar ("
"id INTEGER NOT NULL, "
"x INTEGER CHECK (x>7), "
"z INTEGER, "
"PRIMARY KEY (id)"
")"
),
),
)
@testing.provide_metadata
def test_unique_constraint_create(self):
metadata = self.metadata
Table(
"foo",
metadata,
Column("id", Integer, primary_key=True),
Column("value", String(30), unique=True),
)
Table(
"bar",
metadata,
Column("id", Integer, primary_key=True),
Column("value", String(30)),
Column("value2", String(30)),
UniqueConstraint("value", "value2", name="uix1"),
)
self.assert_sql_execution(
testing.db,
lambda: metadata.create_all(testing.db, checkfirst=False),
AllOf(
CompiledSQL(
"CREATE TABLE foo ("
"id INTEGER NOT NULL, "
"value VARCHAR(30), "
"PRIMARY KEY (id), "
"UNIQUE (value)"
")"
),
CompiledSQL(
"CREATE TABLE bar ("
"id INTEGER NOT NULL, "
"value VARCHAR(30), "
"value2 VARCHAR(30), "
"PRIMARY KEY (id), "
"CONSTRAINT uix1 UNIQUE (value, value2)"
")"
),
),
)
@testing.provide_metadata
def test_index_create(self):
metadata = self.metadata
employees = Table(
"employees",
metadata,
Column("id", Integer, primary_key=True),
Column("first_name", String(30)),
Column("last_name", String(30)),
Column("email_address", String(30)),
)
i = Index(
"employee_name_index",
employees.c.last_name,
employees.c.first_name,
)
assert i in employees.indexes
i2 = Index(
"employee_email_index", employees.c.email_address, unique=True
)
assert i2 in employees.indexes
self.assert_sql_execution(
testing.db,
lambda: metadata.create_all(testing.db, checkfirst=False),
RegexSQL("^CREATE TABLE"),
AllOf(
CompiledSQL(
"CREATE INDEX employee_name_index ON "
"employees (last_name, first_name)",
[],
),
CompiledSQL(
"CREATE UNIQUE INDEX employee_email_index ON "
"employees (email_address)",
[],
),
),
)
@testing.provide_metadata
def test_index_create_camelcase(self):
"""test that mixed-case index identifiers are legal"""
metadata = self.metadata
employees = Table(
"companyEmployees",
metadata,
Column("id", Integer, primary_key=True),
Column("firstName", String(30)),
Column("lastName", String(30)),
Column("emailAddress", String(30)),
)
Index("employeeNameIndex", employees.c.lastName, employees.c.firstName)
Index("employeeEmailIndex", employees.c.emailAddress, unique=True)
self.assert_sql_execution(
testing.db,
lambda: metadata.create_all(testing.db, checkfirst=False),
RegexSQL("^CREATE TABLE"),
AllOf(
CompiledSQL(
'CREATE INDEX "employeeNameIndex" ON '
'"companyEmployees" ("lastName", "firstName")',
[],
),
CompiledSQL(
'CREATE UNIQUE INDEX "employeeEmailIndex" ON '
'"companyEmployees" ("emailAddress")',
[],
),
),
)
@testing.provide_metadata
def test_index_create_inline(self):
# test an index create using index=True, unique=True
metadata = self.metadata
events = Table(
"events",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(30), index=True, unique=True),
Column("location", String(30), index=True),
Column("sport", String(30)),
Column("announcer", String(30)),
Column("winner", String(30)),
)
Index(
"sport_announcer", events.c.sport, events.c.announcer, unique=True
)
Index("idx_winners", events.c.winner)
eq_(
{ix.name for ix in events.indexes},
{
"ix_events_name",
"ix_events_location",
"sport_announcer",
"idx_winners",
},
)
self.assert_sql_execution(
testing.db,
lambda: events.create(testing.db),
RegexSQL("^CREATE TABLE events"),
AllOf(
CompiledSQL(
"CREATE UNIQUE INDEX ix_events_name ON events (name)"
),
CompiledSQL(
"CREATE INDEX ix_events_location ON events (location)"
),
CompiledSQL(
"CREATE UNIQUE INDEX sport_announcer ON events "
"(sport, announcer)"
),
CompiledSQL("CREATE INDEX idx_winners ON events (winner)"),
),
)
@testing.provide_metadata
def test_index_functional_create(self):
metadata = self.metadata
t = Table(
"sometable",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
Index("myindex", t.c.data.desc())
self.assert_sql_execution(
testing.db,
lambda: t.create(testing.db),
CompiledSQL(
"CREATE TABLE sometable (id INTEGER NOT NULL, "
"data VARCHAR(50), PRIMARY KEY (id))"
),
CompiledSQL("CREATE INDEX myindex ON sometable (data DESC)"),
)
| ConstraintGenTest |
python | bokeh__bokeh | src/bokeh/models/filters.py | {
"start": 3255,
"end": 3618
} | class ____(Filter):
""" Base class for composite filters. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
operands = Required(NonEmpty(Seq(Instance(Filter))), help="""
A collection of filters to perform an operation on.
""")
| CompositeFilter |
python | weaviate__weaviate-python-client | weaviate/collections/classes/cluster.py | {
"start": 739,
"end": 990
} | class ____(Generic[Sh, St]):
"""The properties of a single node in the cluster."""
git_hash: str
name: str
shards: Sh
stats: St
status: str
version: str
NodeVerbose = Node[Shards, Stats]
NodeMinimal = Node[None, None]
| Node |
python | PyCQA__pylint | pylint/testutils/functional/test_file.py | {
"start": 737,
"end": 1315
} | class ____(TypedDict):
min_pyver: tuple[int, ...]
max_pyver: tuple[int, ...]
min_pyver_end_position: tuple[int, ...]
requires: list[str]
except_implementations: list[str]
exclude_platforms: list[str]
exclude_from_minimal_messages_config: bool
# mypy need something literal, we can't create this dynamically from TestFileOptions
POSSIBLE_TEST_OPTIONS = {
"min_pyver",
"max_pyver",
"min_pyver_end_position",
"requires",
"except_implementations",
"exclude_platforms",
"exclude_from_minimal_messages_config",
}
| TestFileOptions |
python | spulec__freezegun | tests/test_datetimes.py | {
"start": 21051,
"end": 21239
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
pass
@classmethod
def tearDownClass(cls) -> None:
pass
| BaseInheritanceFreezableTests |
python | huggingface__transformers | src/transformers/models/vaultgemma/modeling_vaultgemma.py | {
"start": 2824,
"end": 7224
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_activation]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
dropout: float = 0.0,
scaling: Optional[float] = None,
softcap: Optional[float] = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor]:
if scaling is None:
scaling = module.head_dim**-0.5
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if softcap is not None:
attn_weights = attn_weights / softcap
attn_weights = torch.tanh(attn_weights)
attn_weights = attn_weights * softcap
if attention_mask is not None: # no matter the length, we just slice it
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| VaultGemmaMLP |
python | getsentry__sentry | src/sentry/discover/compare_tables.py | {
"start": 1476,
"end": 12355
} | class ____(TypedDict):
passed: bool
reason: CompareTableResult
fields: list[str] | None
widget_query: DashboardWidgetQuery
mismatches: list[str] | None
query: str | None
def compare_table_results(
metrics_query_result: EventsResponse, eap_result: EAPResponse
) -> tuple[bool, list[str], CompareTableResult]:
eap_data_row = eap_result["data"][0] if len(eap_result["data"]) > 0 else {}
metrics_data_row = (
metrics_query_result["data"][0] if len(metrics_query_result["data"]) > 0 else {}
)
metrics_fields = metrics_query_result["meta"]["fields"]
mismatches: list[str] = []
no_metrics_data = len(metrics_data_row) == 0
no_eap_data = len(eap_data_row) == 0
# if there's no metrics data we know there are mismatches,
# we will check the EAP data for the names of the mismatched fields
if no_metrics_data:
return (False, [], CompareTableResult.NO_DATA)
if no_eap_data:
return (False, [], CompareTableResult.QUERY_FAILED)
try:
for field, data in metrics_data_row.items():
if is_equation(field):
continue
[translated_field, *rest], dropped_columns = translate_columns([field])
# if we're dropping the field in eap then we can skip checking for mismatches
if len(dropped_columns) > 0:
continue
arg: str | None = None
if match := is_function(field):
function = match.group("function")
args = parse_arguments(function, match.group("columns"))
if args:
arg = args[0]
if data is None or (
arg and (arg == "transaction.duration" or is_measurement(arg)) and data == 0
):
continue
if eap_data_row[translated_field] is None:
logger.info("Field %s not found in EAP response", field)
mismatches.append(field)
except KeyError:
# if there is an error trying to access fields in the EAP data,
# return all queried fields as mismatches
all_fields_mismatch: list[str] = []
for field, data in metrics_fields.items():
if is_equation(field):
continue
all_fields_mismatch.append(field)
return (
len(all_fields_mismatch) == 0,
all_fields_mismatch,
CompareTableResult.FIELD_NOT_FOUND,
)
return (
len(mismatches) == 0,
mismatches,
CompareTableResult.FIELD_NOT_FOUND if len(mismatches) > 0 else CompareTableResult.PASSED,
)
@sentry_sdk.tracing.trace
def compare_tables_for_dashboard_widget_queries(
widget_query: DashboardWidgetQuery,
) -> CompareTableResultDict:
widget: DashboardWidget = widget_query.widget
dashboard: Dashboard = widget.dashboard
organization: Organization = dashboard.organization
# if the dashboard has no projects, we will use all projects in the organization
projects = dashboard.projects.all() or Project.objects.filter(
organization_id=dashboard.organization.id, status=ObjectStatus.ACTIVE
)
widget_viewer_url = (
generate_organization_url(organization.slug)
+ f"/dashboard/{dashboard.id}/widget/{widget.id}/"
)
if len(list(projects)) == 0:
with sentry_sdk.isolation_scope() as scope:
scope.set_tag("passed", False)
scope.set_tag("failed_reason", CompareTableResult.NO_PROJECT.value)
scope.set_tag(
"widget_viewer_url",
widget_viewer_url,
)
sentry_sdk.capture_message(
"dashboard_widget_comparison_done", level="info", scope=scope
)
return {
"passed": False,
"reason": CompareTableResult.NO_PROJECT,
"fields": None,
"widget_query": widget_query,
"mismatches": None,
"query": None,
}
aggregates = widget_query.aggregates
columns = widget_query.columns
query_fields = widget_query.fields
fields_set = set()
if aggregates:
fields_set.update(aggregates)
if columns:
fields_set.update(columns)
if query_fields:
fields_set.update(query_fields)
fields = list(fields_set)
if len(fields) == 0:
with sentry_sdk.isolation_scope() as scope:
scope.set_tag("passed", False)
scope.set_tag("failed_reason", CompareTableResult.NO_FIELDS.value)
scope.set_tag("widget_fields", fields)
scope.set_tag(
"widget_viewer_url",
widget_viewer_url,
)
sentry_sdk.capture_message(
"dashboard_widget_comparison_done", level="info", scope=scope
)
return {
"passed": False,
"reason": CompareTableResult.NO_FIELDS,
"fields": None,
"widget_query": widget_query,
"mismatches": None,
"query": None,
}
selected_columns = _get_field_list(fields)
equations = [equation for equation in _get_equation_list(widget_query.fields or []) if equation]
query = widget_query.conditions
environment_names: str | list[str] = (
dashboard.filters.get("environment", []) if dashboard.filters else []
)
if environment_names:
environments = list(
Environment.objects.filter(
name__in=to_list(environment_names), organization_id=organization.id
)
)
else:
environments = []
snuba_params = SnubaParams(
environments=environments,
projects=list(projects),
organization=organization,
stats_period="7d",
)
has_metrics_error = False
has_eap_error = False
try:
metrics_query_result = metrics_enhanced_performance.query(
selected_columns,
query,
snuba_params,
equations,
orderby=None,
offset=None,
limit=1,
referrer="dashboards.transactions_spans_comparison",
transform_alias_to_input_format=True,
fallback_to_transactions=True,
)
except Exception as e:
logger.info("Metrics query failed: %s", e)
has_metrics_error = True
eap_query_parts, dropped_fields = translate_mep_to_eap(
QueryParts(
query=query,
selected_columns=selected_columns,
equations=equations,
orderby=None,
)
)
try:
eap_result = Spans.run_table_query(
params=snuba_params,
query_string=eap_query_parts["query"],
selected_columns=eap_query_parts["selected_columns"],
orderby=None,
offset=0,
limit=1,
referrer="dashboards.transactions_spans_comparison",
config=SearchResolverConfig(),
sampling_mode="NORMAL",
)
except Exception as e:
logger.info("EAP query failed: %s", e)
has_eap_error = True
if has_metrics_error and has_eap_error:
with sentry_sdk.isolation_scope() as scope:
scope.set_tag("passed", False)
scope.set_tag("failed_reason", CompareTableResult.BOTH_FAILED.value)
scope.set_tag("widget_filter_query", query)
scope.set_tag("widget_fields", str(fields))
scope.set_tag(
"widget_viewer_url",
widget_viewer_url,
)
sentry_sdk.capture_message(
"dashboard_widget_comparison_done", level="info", scope=scope
)
return {
"passed": False,
"reason": CompareTableResult.BOTH_FAILED,
"fields": fields,
"widget_query": widget_query,
"mismatches": None,
"query": query,
}
elif has_metrics_error:
with sentry_sdk.isolation_scope() as scope:
scope.set_tag("passed", False)
scope.set_tag("failed_reason", CompareTableResult.METRICS_FAILED.value)
scope.set_tag("widget_filter_query", query)
scope.set_tag("widget_fields", str(fields))
scope.set_tag(
"widget_viewer_url",
widget_viewer_url,
)
sentry_sdk.capture_message(
"dashboard_widget_comparison_done", level="info", scope=scope
)
return {
"passed": False,
"reason": CompareTableResult.METRICS_FAILED,
"fields": fields,
"widget_query": widget_query,
"mismatches": None,
"query": query,
}
elif has_eap_error:
with sentry_sdk.isolation_scope() as scope:
scope.set_tag("passed", False)
scope.set_tag("failed_reason", CompareTableResult.EAP_FAILED.value)
scope.set_tag("widget_filter_query", query)
scope.set_tag("widget_fields", str(fields))
scope.set_tag(
"widget_viewer_url",
widget_viewer_url,
)
sentry_sdk.capture_message(
"dashboard_widget_comparison_done", level="info", scope=scope
)
return {
"passed": False,
"reason": CompareTableResult.EAP_FAILED,
"fields": fields,
"widget_query": widget_query,
"mismatches": fields,
"query": query,
}
else:
passed, mismatches, reason = compare_table_results(metrics_query_result, eap_result)
if passed:
return {
"passed": True,
"reason": CompareTableResult.PASSED,
"fields": fields,
"widget_query": widget_query,
"mismatches": mismatches,
"query": query,
}
else:
with sentry_sdk.isolation_scope() as scope:
scope.set_tag("passed", False)
scope.set_tag("failed_reason", reason.value)
scope.set_tag("mismatches", str(mismatches))
scope.set_tag("widget_filter_query", query)
scope.set_tag("widget_fields", str(fields))
scope.set_tag(
"widget_viewer_url",
widget_viewer_url,
)
sentry_sdk.capture_message(
"dashboard_widget_comparison_done", level="info", scope=scope
)
return {
"passed": False,
"reason": reason,
"fields": fields,
"widget_query": widget_query,
"mismatches": mismatches,
"query": query,
}
| CompareTableResultDict |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/many_conditional_deps/package.py | {
"start": 149,
"end": 713
} | class ____(Package):
"""Simple package with one optional dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version("1.0")
variant("cuda", description="enable foo dependencies", default=True)
variant("rocm", description="enable bar dependencies", default=True)
for i in range(30):
depends_on(f"gpu-dep +cuda cuda_arch={i}", when=f"+cuda cuda_arch={i}")
for i in range(30):
depends_on(f"gpu-dep +rocm amdgpu_target={i}", when=f"+rocm amdgpu_target={i}")
| ManyConditionalDeps |
python | langchain-ai__langchain | libs/core/langchain_core/prompts/chat.py | {
"start": 11186,
"end": 11259
} | class ____(TypedDict, total=False):
text: str | dict
| _TextTemplateParam |
python | huggingface__transformers | src/transformers/models/emu3/modeling_emu3.py | {
"start": 21207,
"end": 23757
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Emu3VQVAEConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
# for compatibility with the attention interface
self.num_key_value_groups = 1
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
queries,
keys,
values,
attention_mask,
is_causal=self.is_causal,
scaling=self.scale,
dropout=0.0 if not self.training else self.dropout,
)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
| Emu3VQVAEAttentionBlock |
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/layout.py | {
"start": 29175,
"end": 32154
} | class ____(LayoutOperatorBase):
"""Operator for torch.chunk() operation."""
def __init__(self):
"""Initialize ChunkOperator."""
super().__init__("chunk")
@property
def torch_op_name(self) -> str | None:
"""Return the torch operation name."""
return "torch.chunk"
def can_produce(self, output_spec: Spec) -> bool:
"""Chunk can produce any tensor output."""
if not isinstance(output_spec, TensorSpec):
return False
# Chunk can produce any tensor with at least one dimension
return len(output_spec.size) > 0
def _get_chunk_params(self, output_spec: TensorSpec) -> tuple[int, int]:
"""Get consistent chunk parameters based on output spec.
This method uses the output_spec to deterministically choose chunk parameters,
ensuring that fuzz_inputs_specs and codegen make the same choices.
"""
# Use output_spec properties to seed random choices
# This ensures both methods make the same choices
seed_value = hash((output_spec.size, output_spec.dtype))
rng = random.Random(seed_value)
chunk_dim = rng.randint(0, len(output_spec.size) - 1)
num_chunks = rng.randint(2, 4)
return chunk_dim, num_chunks
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input spec for chunk operation."""
if not isinstance(output_spec, TensorSpec):
raise ValueError("ChunkOperator can only produce TensorSpec outputs")
# torch.chunk() splits a tensor into chunks along a dimension
# The output will be one of the chunks from the split
if len(output_spec.size) == 0:
raise ValueError("Cannot chunk a scalar tensor")
chunk_dim, num_chunks = self._get_chunk_params(output_spec)
# Calculate input size: input will have chunk_dim with size = output_size * num_chunks
# torch.chunk() tries to split evenly, but the last chunk may be smaller
input_size = list(output_spec.size)
input_size[chunk_dim] = output_spec.size[chunk_dim] * num_chunks
# Create input tensor spec
from torchfuzz.tensor_fuzzer import fuzz_valid_stride
input_stride = fuzz_valid_stride(tuple(input_size))
return [
TensorSpec(
size=tuple(input_size), stride=input_stride, dtype=output_spec.dtype
)
]
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for chunk operation."""
if not isinstance(output_spec, TensorSpec):
raise ValueError("ChunkOperator can only produce TensorSpec outputs")
chunk_dim, num_chunks = self._get_chunk_params(output_spec)
# Generate the chunk operation and select the first chunk
return f"{output_name} = torch.chunk({input_names[0]}, {num_chunks}, dim={chunk_dim})[0]"
| ChunkOperator |
python | viewflow__viewflow | viewflow/forms/renderers.py | {
"start": 14252,
"end": 14873
} | class ____(InputRenderer):
tag = "vf-field-autocomplete"
def create_root(self, context):
root = super().create_root(context)
field = self.bound_field.field
value_label = ""
try:
obj = field.to_python(self.bound_field.value())
if obj is not None:
value_label = field.label_from_instance(obj)
if value_label is None:
value_label = ""
except ValidationError:
pass
root.attrib["type"] = "hidden"
root.attrib["value-label"] = value_label
return root
| AjaxModelSelectRenderer |
python | django__django | tests/test_client_regress/tests.py | {
"start": 50200,
"end": 51558
} | class ____(SimpleTestCase):
"""Regression tests for #10571."""
def test_simple_payload(self):
"""A simple ASCII-only text can be POSTed."""
text = "English: mountain pass"
response = self.client.post(
"/parse_encoded_text/", text, content_type="text/plain"
)
self.assertEqual(response.content, text.encode())
def test_utf8_payload(self):
"""Non-ASCII data encoded as UTF-8 can be POSTed."""
text = "dog: собака"
response = self.client.post(
"/parse_encoded_text/", text, content_type="text/plain; charset=utf-8"
)
self.assertEqual(response.content, text.encode())
def test_utf16_payload(self):
"""Non-ASCII data encoded as UTF-16 can be POSTed."""
text = "dog: собака"
response = self.client.post(
"/parse_encoded_text/", text, content_type="text/plain; charset=utf-16"
)
self.assertEqual(response.content, text.encode("utf-16"))
def test_non_utf_payload(self):
"""Non-ASCII data as a non-UTF based encoding can be POSTed."""
text = "dog: собака"
response = self.client.post(
"/parse_encoded_text/", text, content_type="text/plain; charset=koi8-r"
)
self.assertEqual(response.content, text.encode("koi8-r"))
| PayloadEncodingTests |
python | pydantic__pydantic | pydantic-core/tests/test_json.py | {
"start": 11538,
"end": 12034
} | class ____:
def __str__(self):
return 'Foobar.__str__'
def __hash__(self):
return 1
def test_json_key_fallback():
x = {FoobarHash(): 1}
assert to_jsonable_python(x, serialize_unknown=True) == {'Foobar.__str__': 1}
assert to_jsonable_python(x, fallback=fallback_func) == {'fallback:FoobarHash': 1}
assert to_json(x, serialize_unknown=True) == b'{"Foobar.__str__":1}'
assert to_json(x, fallback=fallback_func) == b'{"fallback:FoobarHash":1}'
| FoobarHash |
python | ipython__ipython | IPython/core/prefilter.py | {
"start": 17602,
"end": 18206
} | class ____(PrefilterChecker):
priority = Integer(900).tag(config=True)
def check(self, line_info):
"""If the 'rest' of the line begins with a function call or pretty much
any python operator, we should simply execute the line (regardless of
whether or not there's a possible autocall expansion). This avoids
spurious (and very confusing) geattr() accesses."""
if line_info.the_rest and line_info.the_rest[0] in "!=()<>,+*/%^&|":
return self.prefilter_manager.get_handler_by_name("normal")
else:
return None
| PythonOpsChecker |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/boolean_test.py | {
"start": 1247,
"end": 1805
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, M, N, device):
self.inputs = {
"input_one": torch.randint(0, 2, (M, N), device=device, dtype=torch.bool)
}
self.set_module_name("any")
def forward(self, input_one):
return torch.any(input_one)
any_configs = op_bench.cross_product_configs(
M=[8, 256],
N=[256, 16],
device=["cpu", "cuda"],
tags=["any"],
)
op_bench.generate_pt_test(any_configs, AnyBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| AnyBenchmark |
python | sqlalchemy__sqlalchemy | test/orm/test_eager_relations.py | {
"start": 223558,
"end": 230169
} | class ____(fixtures.MappedTest):
"""test that the contains_eager() option doesn't bleed
into a secondary load."""
run_inserts = "once"
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"base",
metadata,
Column("id", Integer, primary_key=True),
Column("type", String(50), nullable=False),
)
Table(
"child1",
metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
Column(
"child2id", Integer, ForeignKey("child2.id"), nullable=False
),
)
Table(
"child2",
metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
)
Table(
"related",
metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
)
@classmethod
def setup_mappers(cls):
child1, child2, base, related = (
cls.tables.child1,
cls.tables.child2,
cls.tables.base,
cls.tables.related,
)
class Base(cls.Comparable):
pass
class Child1(Base):
pass
class Child2(Base):
pass
class Related(cls.Comparable):
pass
cls.mapper_registry.map_imperatively(
Base,
base,
polymorphic_on=base.c.type,
properties={"related": relationship(Related, uselist=False)},
)
cls.mapper_registry.map_imperatively(
Child1,
child1,
inherits=Base,
polymorphic_identity="child1",
properties={
"child2": relationship(
Child2,
primaryjoin=child1.c.child2id == base.c.id,
foreign_keys=child1.c.child2id,
)
},
)
cls.mapper_registry.map_imperatively(
Child2, child2, inherits=Base, polymorphic_identity="child2"
)
cls.mapper_registry.map_imperatively(Related, related)
@classmethod
def insert_data(cls, connection):
child1, child2, base, related = (
cls.tables.child1,
cls.tables.child2,
cls.tables.base,
cls.tables.related,
)
connection.execute(
base.insert(),
[
{"id": 1, "type": "child1"},
{"id": 2, "type": "child1"},
{"id": 3, "type": "child1"},
{"id": 4, "type": "child2"},
{"id": 5, "type": "child2"},
{"id": 6, "type": "child2"},
],
)
connection.execute(child2.insert(), [{"id": 4}, {"id": 5}, {"id": 6}])
connection.execute(
child1.insert(),
[
{"id": 1, "child2id": 4},
{"id": 2, "child2id": 5},
{"id": 3, "child2id": 6},
],
)
connection.execute(
related.insert(),
[{"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}, {"id": 6}],
)
def test_contains_eager(self):
Child1, Related = self.classes.Child1, self.classes.Related
sess = fixture_session()
child1s = (
sess.query(Child1)
.join(Child1.related)
.options(sa.orm.contains_eager(Child1.related))
.order_by(Child1.id)
)
def go():
eq_(
child1s.all(),
[
Child1(id=1, related=Related(id=1)),
Child1(id=2, related=Related(id=2)),
Child1(id=3, related=Related(id=3)),
],
)
self.assert_sql_count(testing.db, go, 1)
c1 = child1s[0]
self.assert_sql_execution(
testing.db,
lambda: c1.child2,
CompiledSQL(
"SELECT child2.id, base.id, "
"base.type "
"FROM base JOIN child2 ON base.id = child2.id "
"WHERE base.id = :pk_1",
{"pk_1": 4},
),
)
def test_joinedload_on_other(self):
Child1, Related = self.classes.Child1, self.classes.Related
sess = fixture_session()
child1s = (
sess.query(Child1)
.join(Child1.related)
.options(sa.orm.joinedload(Child1.related))
.order_by(Child1.id)
)
def go():
eq_(
child1s.all(),
[
Child1(id=1, related=Related(id=1)),
Child1(id=2, related=Related(id=2)),
Child1(id=3, related=Related(id=3)),
],
)
self.assert_sql_count(testing.db, go, 1)
c1 = child1s[0]
self.assert_sql_execution(
testing.db,
lambda: c1.child2,
CompiledSQL(
"SELECT child2.id, base.id, "
"base.type "
"FROM base JOIN child2 ON base.id = child2.id "
"WHERE base.id = :pk_1",
{"pk_1": 4},
),
)
def test_joinedload_on_same(self):
Child1, Child2, Related = (
self.classes.Child1,
self.classes.Child2,
self.classes.Related,
)
sess = fixture_session()
child1s = (
sess.query(Child1)
.join(Child1.related)
.options(sa.orm.joinedload(Child1.child2, Child2.related))
.order_by(Child1.id)
)
def go():
eq_(
child1s.all(),
[
Child1(id=1, related=Related(id=1)),
Child1(id=2, related=Related(id=2)),
Child1(id=3, related=Related(id=3)),
],
)
self.assert_sql_count(testing.db, go, 4)
c1 = child1s[0]
# this *does* joinedload
self.assert_sql_execution(
testing.db,
lambda: c1.child2,
CompiledSQL(
"SELECT child2.id, base.id, "
"base.type, "
"related_1.id FROM base JOIN child2 "
"ON base.id = child2.id "
"LEFT OUTER JOIN related AS related_1 "
"ON base.id = related_1.id WHERE base.id = :pk_1",
{"pk_1": 4},
),
)
| SecondaryOptionsTest |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 31593,
"end": 36243
} | class ____(TestCase):
def setUp(self):
self.foreign_key_target = ForeignKeyTargetModel.objects.create(
name='foreign_key'
)
self.one_to_one_target = OneToOneTargetModel.objects.create(
name='one_to_one'
)
self.many_to_many_targets = [
ManyToManyTargetModel.objects.create(
name='many_to_many (%d)' % idx
) for idx in range(3)
]
self.instance = RelationalModel.objects.create(
foreign_key=self.foreign_key_target,
one_to_one=self.one_to_one_target,
)
self.instance.many_to_many.set(self.many_to_many_targets)
def test_pk_retrieval(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
fields = '__all__'
serializer = TestSerializer(self.instance)
expected = {
'id': self.instance.pk,
'foreign_key': self.foreign_key_target.pk,
'one_to_one': self.one_to_one_target.pk,
'many_to_many': [item.pk for item in self.many_to_many_targets],
'through': []
}
self.assertEqual(serializer.data, expected)
def test_pk_create(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
fields = '__all__'
new_foreign_key = ForeignKeyTargetModel.objects.create(
name='foreign_key'
)
new_one_to_one = OneToOneTargetModel.objects.create(
name='one_to_one'
)
new_many_to_many = [
ManyToManyTargetModel.objects.create(
name='new many_to_many (%d)' % idx
) for idx in range(3)
]
data = {
'foreign_key': new_foreign_key.pk,
'one_to_one': new_one_to_one.pk,
'many_to_many': [item.pk for item in new_many_to_many],
}
# Serializer should validate okay.
serializer = TestSerializer(data=data)
assert serializer.is_valid()
# Creating the instance, relationship attributes should be set.
instance = serializer.save()
assert instance.foreign_key.pk == new_foreign_key.pk
assert instance.one_to_one.pk == new_one_to_one.pk
assert [
item.pk for item in instance.many_to_many.all()
] == [
item.pk for item in new_many_to_many
]
assert list(instance.through.all()) == []
# Representation should be correct.
expected = {
'id': instance.pk,
'foreign_key': new_foreign_key.pk,
'one_to_one': new_one_to_one.pk,
'many_to_many': [item.pk for item in new_many_to_many],
'through': []
}
self.assertEqual(serializer.data, expected)
def test_pk_update(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
fields = '__all__'
new_foreign_key = ForeignKeyTargetModel.objects.create(
name='foreign_key'
)
new_one_to_one = OneToOneTargetModel.objects.create(
name='one_to_one'
)
new_many_to_many = [
ManyToManyTargetModel.objects.create(
name='new many_to_many (%d)' % idx
) for idx in range(3)
]
data = {
'foreign_key': new_foreign_key.pk,
'one_to_one': new_one_to_one.pk,
'many_to_many': [item.pk for item in new_many_to_many],
}
# Serializer should validate okay.
serializer = TestSerializer(self.instance, data=data)
assert serializer.is_valid()
# Creating the instance, relationship attributes should be set.
instance = serializer.save()
assert instance.foreign_key.pk == new_foreign_key.pk
assert instance.one_to_one.pk == new_one_to_one.pk
assert [
item.pk for item in instance.many_to_many.all()
] == [
item.pk for item in new_many_to_many
]
assert list(instance.through.all()) == []
# Representation should be correct.
expected = {
'id': self.instance.pk,
'foreign_key': new_foreign_key.pk,
'one_to_one': new_one_to_one.pk,
'many_to_many': [item.pk for item in new_many_to_many],
'through': []
}
self.assertEqual(serializer.data, expected)
# Tests for bulk create using `ListSerializer`.
| TestIntegration |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 88755,
"end": 88937
} | class ____(CIntType):
to_py_function = "PyLong_FromSsize_t"
from_py_function = "__Pyx_PyIndex_AsSsize_t"
def sign_and_name(self):
return "Py_ssize_t"
| CPySSizeTType |
python | facebook__pyre-check | tools/typeshed_patcher/tests/patch_specs_test.py | {
"start": 535,
"end": 1082
} | class ____(testslide.TestCase):
def test_qualified_name(self) -> None:
def assert_name_preserved(name: str) -> None:
self.assertEqual(QualifiedName.from_string(name).to_string(), name)
assert_name_preserved("")
assert_name_preserved("foo")
assert_name_preserved("foo.bar")
assert_name_preserved("foo.bar.baz")
self.assertTrue(QualifiedName.from_string("").is_empty())
self.assertFalse(QualifiedName.from_string("foo").is_empty())
T = TypeVar("T")
U = TypeVar("U")
| PatchTest |
python | getsentry__sentry | tests/sentry/conf/test_kafka_definition.py | {
"start": 900,
"end": 2224
} | class ____(TestCase):
def test_exception_on_invalid_consumer_definition(self) -> None:
invalid_definitions: list[ConsumerDefinition] = [
{
"topic": Topic.INGEST_METRICS,
"strategy_factory": "sentry.sentry_metrics.consumers.indexer.parallel.MetricsConsumerStrategyFactory",
"static_args": {
"ingest_profile": "release-health",
},
"dlq_max_invalid_ratio": 0.01,
"dlq_max_consecutive_count": 1000,
}
]
for invalid_definition in invalid_definitions:
with pytest.raises(ValueError):
validate_consumer_definition(invalid_definition)
def test_kafka_consumer_definition_validity(self) -> None:
for definition in KAFKA_CONSUMERS.values():
validate_consumer_definition(definition)
def test_get_topic_codec() -> None:
"""Test that get_topic_codec works with Topic enum values."""
# Test with a known topic
codec = get_topic_codec(Topic.BUFFERED_SEGMENTS)
assert codec is not None
# Should be equivalent to calling sentry_kafka_schemas.get_codec directly
expected_codec = sentry_kafka_schemas.get_codec(Topic.BUFFERED_SEGMENTS.value)
assert codec == expected_codec
| ConsumersDefinitionTest |
python | pypa__warehouse | warehouse/email/ses/models.py | {
"start": 6212,
"end": 6924
} | class ____(db.Model):
__tablename__ = "ses_emails"
created: Mapped[datetime_now]
status: Mapped[Enum] = mapped_column(
Enum(EmailStatuses, values_callable=lambda x: [e.value for e in x]),
server_default=EmailStatuses.Accepted.value,
)
message_id: Mapped[str] = mapped_column(unique=True, index=True)
from_: Mapped[str] = mapped_column("from")
to: Mapped[str] = mapped_column(index=True)
subject: Mapped[str]
missing: Mapped[bool_false]
# Relationships!
events: Mapped[list["Event"]] = orm.relationship(
back_populates="email",
cascade="all, delete-orphan",
lazy=False,
order_by=lambda: Event.created,
)
| EmailMessage |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess1.py | {
"start": 988,
"end": 1080
} | class ____:
def __get__(self, obj: Any, cls: type[_T]) -> _T:
return cls()
| Factory |
python | MongoEngine__mongoengine | mongoengine/fields.py | {
"start": 78256,
"end": 78629
} | class ____(GeoJsonBaseField):
"""A GeoJSON field storing a longitude and latitude coordinate.
The data is represented as:
.. code-block:: js
{'type' : 'Point' ,
'coordinates' : [x, y]}
You can either pass a dict with the full information or a list
to set the value.
Requires mongodb >= 2.4
"""
_type = "Point"
| PointField |
python | automl__auto-sklearn | test/test_pipeline/components/feature_preprocessing/test_extra_trees_regression.py | {
"start": 343,
"end": 3044
} | class ____(PreprocessingTestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(ExtraTreesPreprocessorRegression)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertFalse((transformation == 0).all())
def test_default_configuration_regression(self):
for i in range(2):
X_train, Y_train, X_test, Y_test = get_dataset(
dataset="boston", make_sparse=False
)
configuration_space = (
ExtraTreesPreprocessorRegression.get_hyperparameter_search_space()
)
default = configuration_space.get_default_configuration()
preprocessor = ExtraTreesPreprocessorRegression(
random_state=1, **{hp_name: default[hp_name] for hp_name in default}
)
preprocessor.fit(X_train, Y_train)
X_train_trans = preprocessor.transform(X_train)
X_test_trans = preprocessor.transform(X_test)
# fit a regressor on top
regressor = ExtraTreesRegressor(random_state=1)
predictor = regressor.fit(X_train_trans, Y_train)
predictions = predictor.predict(X_test_trans)
error = sklearn.metrics.mean_squared_error(predictions, Y_test)
self.assertAlmostEqual(error, 18.074952764044944, places=2)
def test_default_configuration_classify_sparse(self):
for i in range(2):
X_train, Y_train, X_test, Y_test = get_dataset(
dataset="boston", make_sparse=True
)
configuration_space = (
ExtraTreesPreprocessorRegression.get_hyperparameter_search_space()
)
default = configuration_space.get_default_configuration()
preprocessor = ExtraTreesPreprocessorRegression(
random_state=1, **{hp_name: default[hp_name] for hp_name in default}
)
preprocessor.fit(X_train, Y_train)
X_train_trans = preprocessor.transform(X_train)
X_test_trans = preprocessor.transform(X_test)
# fit a regressor on top
regressor = ExtraTreesRegressor(random_state=1)
predictor = regressor.fit(X_train_trans, Y_train)
predictions = predictor.predict(X_test_trans)
error = sklearn.metrics.mean_squared_error(predictions, Y_test)
self.assertAlmostEqual(error, 55.69613978965742, places=2)
def test_preprocessing_dtype(self):
super(ExtraTreesRegressionComponentTest, self)._test_preprocessing_dtype(
ExtraTreesPreprocessorRegression
)
| ExtraTreesRegressionComponentTest |
python | pytorch__pytorch | torch/jit/_trace.py | {
"start": 23717,
"end": 30829
} | class ____(Warning):
@staticmethod
def ignore_lib_warnings():
# We ignore warnings from all submodules excluding the JIT, because we need them e.g. for _check_trace
warnings.filterwarnings(
"ignore", category=TracerWarning, module="torch.(?!jit)"
)
warnings.filterwarnings("ignore", "torch::jit::fuser::cuda")
# We ignore the tracer warnings coming form inside the library, because all our shape
# checks in nn will trigger them.
TracerWarning.ignore_lib_warnings()
torch._C._tracer_warn_use_python()
def make_tuple(example_inputs):
if isinstance(example_inputs, (torch.Tensor, dict)):
return (example_inputs,)
# done primarily so that weird iterables fail here and not pybind11 code
if not isinstance(example_inputs, tuple):
return tuple(example_inputs)
return example_inputs
def make_module(mod, _module_class, _compilation_unit):
if isinstance(mod, ScriptModule):
return mod
elif torch._jit_internal.module_has_exports(mod):
infer_methods_stubs_fn = torch.jit._recursive.make_stubs_from_exported_methods
return torch.jit._recursive.create_script_module(
mod, infer_methods_stubs_fn, share_types=False, is_tracing=True
)
else:
if _module_class is None:
_module_class = TopLevelTracedModule
return _module_class(mod, _compilation_unit=_compilation_unit)
def wrap_check_inputs(check_inputs):
if check_inputs is None:
return None
return [{"forward": c} for c in check_inputs]
def analyze_ts_result_with_export_result(export, trace):
import torch.utils._pytree as pytree
flat_export = pytree.tree_leaves(export)
flat_trace = pytree.tree_leaves(trace)
for orig, loaded in zip(flat_export, flat_trace):
if orig.layout != loaded.layout:
return False
# mkldnn is not supported for torch.allclose
if orig.layout == torch._mkldnn: # type: ignore[attr-defined]
return True
if type(orig) is not type(loaded):
return False
if isinstance(orig, torch._subclasses.FakeTensor):
# Skip for FakeTensor.
return True
elif isinstance(orig, torch.Tensor):
if orig.dtype != loaded.dtype:
return False
if not torch.allclose(orig, loaded):
return False
else:
if orig != loaded:
return False
return True
def _trace_impl(
func,
example_inputs=None,
optimize=None,
check_trace=True,
check_inputs=None,
check_tolerance=1e-5,
strict=True,
_force_outplace=False,
_module_class=None,
_compilation_unit=_python_cu,
example_kwarg_inputs=None,
_store_inputs=True,
):
if isinstance(func, torch.jit.ScriptModule):
# it is hard to trace it because the forward method on ScriptModule is already defined, so it
# would result in an error.
warnings.warn(
"The input to trace is already a ScriptModule, tracing it is a no-op. Returning the object as is.",
stacklevel=2,
)
return func
if isinstance(func, torch.nn.Module):
if example_inputs is None:
if isinstance(example_kwarg_inputs, dict):
example_inputs = example_kwarg_inputs
else:
raise RuntimeError("example_kwarg_inputs should be a dict")
return trace_module(
func,
{"forward": example_inputs},
None,
check_trace,
wrap_check_inputs(check_inputs),
check_tolerance,
strict,
_force_outplace,
_module_class,
example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict),
_store_inputs=_store_inputs,
)
if (
hasattr(func, "__self__")
and isinstance(func.__self__, torch.nn.Module)
and func.__name__ == "forward"
):
if example_inputs is None:
if isinstance(example_kwarg_inputs, dict):
example_inputs = example_kwarg_inputs
else:
raise RuntimeError("example_kwarg_inputs should be a dict")
return trace_module(
func.__self__,
{"forward": example_inputs},
None,
check_trace,
wrap_check_inputs(check_inputs),
check_tolerance,
strict,
_force_outplace,
_module_class,
example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict),
_store_inputs=_store_inputs,
)
# Special case for common case of passing a single Tensor
if (
isinstance(example_inputs, (torch.Tensor, dict))
and example_kwarg_inputs is None
):
example_inputs = (example_inputs,)
# done primarily so that weird iterables fail here and not pybind11 code
elif example_kwarg_inputs is None and not isinstance(example_inputs, tuple):
# pyrefly: ignore [bad-argument-type]
example_inputs = tuple(example_inputs)
var_lookup_fn = _create_interpreter_name_lookup_fn(0)
if hasattr(func, "__self__") and isinstance(func.__self__, torch.nn.Module):
raise AttributeError(
"trace doesn't support compiling individual module's functions.\n"
"Please use trace_module"
)
name = _qualified_name(func)
if isinstance(example_kwarg_inputs, dict):
example_inputs = example_kwarg_inputs
traced = torch._C._create_function_from_trace_with_dict(
name,
func,
example_kwarg_inputs,
var_lookup_fn,
strict,
_force_outplace,
get_callable_argument_names(func),
)
else:
traced = torch._C._create_function_from_trace(
name,
func,
# pyrefly: ignore [bad-argument-type]
example_inputs,
var_lookup_fn,
strict,
_force_outplace,
get_callable_argument_names(func),
)
# Check the trace against new traces created from user-specified inputs
if check_trace:
if check_inputs is not None:
_check_trace(
check_inputs,
func,
traced,
check_tolerance,
strict,
_force_outplace,
False,
_module_class,
example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict),
)
else:
_check_trace(
[example_inputs],
func,
traced,
check_tolerance,
strict,
_force_outplace,
False,
_module_class,
example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict),
)
# Allow torch.compile() to inline
traced._torchdynamo_inline = func # type: ignore[attr-defined]
return traced
| TracerWarning |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py | {
"start": 84321,
"end": 91551
} | class ____:
@mock.patch(VERTEX_AI_LINKS_PATH.format("VertexAIBatchPredictionJobLink.persist"))
@mock.patch(VERTEX_AI_PATH.format("batch_prediction_job.BatchPredictionJobHook"))
def test_execute(self, mock_hook, mock_link_persist):
mock_job = mock_hook.return_value.submit_batch_prediction_job.return_value
mock_job.name = TEST_BATCH_PREDICTION_JOB_ID
op = CreateBatchPredictionJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
job_display_name=TEST_JOB_DISPLAY_NAME,
model_name=TEST_MODEL_NAME,
instances_format="jsonl",
predictions_format="jsonl",
create_request_timeout=TEST_CREATE_REQUEST_TIMEOUT,
batch_size=TEST_BATCH_SIZE,
)
context = {"ti": mock.MagicMock(), "task": mock.MagicMock()}
op.execute(context=context)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.submit_batch_prediction_job.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
job_display_name=TEST_JOB_DISPLAY_NAME,
model_name=TEST_MODEL_NAME,
instances_format="jsonl",
predictions_format="jsonl",
gcs_source=None,
bigquery_source=None,
gcs_destination_prefix=None,
bigquery_destination_prefix=None,
model_parameters=None,
machine_type=None,
accelerator_type=None,
accelerator_count=None,
starting_replica_count=None,
max_replica_count=None,
generate_explanation=False,
explanation_metadata=None,
explanation_parameters=None,
labels=None,
encryption_spec_key_name=None,
create_request_timeout=TEST_CREATE_REQUEST_TIMEOUT,
batch_size=TEST_BATCH_SIZE,
)
mock_job.wait_for_completion.assert_called_once()
mock_job.to_dict.assert_called_once()
mock_link_persist.assert_called_once_with(
context=context,
batch_prediction_job_id=TEST_BATCH_PREDICTION_JOB_ID,
)
@mock.patch(VERTEX_AI_LINKS_PATH.format("VertexAIBatchPredictionJobLink.persist"))
@mock.patch(VERTEX_AI_PATH.format("batch_prediction_job.BatchPredictionJobHook"))
def test_execute_deferrable(self, mock_hook, mock_link_persist):
mock_job = mock_hook.return_value.submit_batch_prediction_job.return_value
mock_job.name = TEST_BATCH_PREDICTION_JOB_ID
op = CreateBatchPredictionJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
job_display_name=TEST_JOB_DISPLAY_NAME,
model_name=TEST_MODEL_NAME,
instances_format="jsonl",
predictions_format="jsonl",
create_request_timeout=TEST_CREATE_REQUEST_TIMEOUT,
batch_size=TEST_BATCH_SIZE,
deferrable=True,
)
context = {"ti": mock.MagicMock(), "task": mock.MagicMock()}
with (
pytest.raises(TaskDeferred) as exception_info,
pytest.warns(
AirflowProviderDeprecationWarning,
match=SYNC_DEPRECATION_WARNING.format("28.08.2024"),
),
):
op.execute(context=context)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.submit_batch_prediction_job.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
job_display_name=TEST_JOB_DISPLAY_NAME,
model_name=TEST_MODEL_NAME,
instances_format="jsonl",
predictions_format="jsonl",
gcs_source=None,
bigquery_source=None,
gcs_destination_prefix=None,
bigquery_destination_prefix=None,
model_parameters=None,
machine_type=None,
accelerator_type=None,
accelerator_count=None,
starting_replica_count=None,
max_replica_count=None,
generate_explanation=False,
explanation_metadata=None,
explanation_parameters=None,
labels=None,
encryption_spec_key_name=None,
create_request_timeout=TEST_CREATE_REQUEST_TIMEOUT,
batch_size=TEST_BATCH_SIZE,
)
mock_job.wait_for_completion.assert_not_called()
mock_job.to_dict.assert_not_called()
mock_link_persist.assert_called_once_with(
batch_prediction_job_id=TEST_BATCH_PREDICTION_JOB_ID,
context=context,
)
assert hasattr(exception_info.value, "trigger")
assert exception_info.value.trigger.conn_id == GCP_CONN_ID
assert exception_info.value.trigger.project_id == GCP_PROJECT
assert exception_info.value.trigger.location == GCP_LOCATION
assert exception_info.value.trigger.job_id == TEST_BATCH_PREDICTION_JOB_ID
assert exception_info.value.trigger.poll_interval == 10
assert exception_info.value.trigger.impersonation_chain == IMPERSONATION_CHAIN
@mock.patch(VERTEX_AI_PATH.format("batch_prediction_job.BatchPredictionJobHook"))
def test_execute_complete(self, mock_hook):
mock_ti = mock.MagicMock()
mock_context = {"ti": mock_ti}
mock_job = {"name": TEST_JOB_DISPLAY_NAME}
event = {
"status": "success",
"job": mock_job,
}
mock_hook.return_value.extract_batch_prediction_job_id.return_value = TEST_BATCH_PREDICTION_JOB_ID
op = CreateBatchPredictionJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
job_display_name=TEST_JOB_DISPLAY_NAME,
model_name=TEST_MODEL_NAME,
instances_format="jsonl",
predictions_format="jsonl",
create_request_timeout=TEST_CREATE_REQUEST_TIMEOUT,
batch_size=TEST_BATCH_SIZE,
)
execute_complete_result = op.execute_complete(context=mock_context, event=event)
mock_hook.return_value.extract_batch_prediction_job_id.assert_called_once_with(mock_job)
mock_ti.xcom_push.assert_has_calls(
[
call(key="batch_prediction_job_id", value=TEST_BATCH_PREDICTION_JOB_ID),
call(
key="training_conf",
value={
"training_conf_id": TEST_BATCH_PREDICTION_JOB_ID,
"region": GCP_LOCATION,
"project_id": GCP_PROJECT,
},
),
]
)
assert execute_complete_result == mock_job
| TestVertexAICreateBatchPredictionJobOperator |
python | numpy__numpy | numpy/f2py/tests/util.py | {
"start": 2211,
"end": 8122
} | class ____:
def __init__(self):
self.compilers_checked = False
self.has_c = False
self.has_f77 = False
self.has_f90 = False
def check_compilers(self):
if (not self.compilers_checked) and (not sys.platform == "cygwin"):
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [
executor.submit(check_language, "c"),
executor.submit(check_language, "fortran", fortran77_code),
executor.submit(check_language, "fortran", fortran90_code)
]
self.has_c = futures[0].result()
self.has_f77 = futures[1].result()
self.has_f90 = futures[2].result()
self.compilers_checked = True
if not IS_WASM:
checker = CompilerChecker()
checker.check_compilers()
def has_c_compiler():
return checker.has_c
def has_f77_compiler():
return checker.has_f77
def has_f90_compiler():
return checker.has_f90
def has_fortran_compiler():
return (checker.has_f90 and checker.has_f77)
#
# Maintaining a temporary module directory
#
_module_dir = None
_module_num = 5403
if sys.platform == "cygwin":
NUMPY_INSTALL_ROOT = Path(__file__).parent.parent.parent
_module_list = list(NUMPY_INSTALL_ROOT.glob("**/*.dll"))
def _cleanup():
global _module_dir
if _module_dir is not None:
try:
sys.path.remove(_module_dir)
except ValueError:
pass
try:
shutil.rmtree(_module_dir)
except OSError:
pass
_module_dir = None
def get_module_dir():
global _module_dir
if _module_dir is None:
_module_dir = tempfile.mkdtemp()
atexit.register(_cleanup)
if _module_dir not in sys.path:
sys.path.insert(0, _module_dir)
return _module_dir
def get_temp_module_name():
# Assume single-threaded, and the module dir usable only by this thread
global _module_num
get_module_dir()
name = "_test_ext_module_%d" % _module_num
_module_num += 1
if name in sys.modules:
# this should not be possible, but check anyway
raise RuntimeError("Temporary module name already in use.")
return name
def _memoize(func):
memo = {}
def wrapper(*a, **kw):
key = repr((a, kw))
if key not in memo:
try:
memo[key] = func(*a, **kw)
except Exception as e:
memo[key] = e
raise
ret = memo[key]
if isinstance(ret, Exception):
raise ret
return ret
wrapper.__name__ = func.__name__
return wrapper
#
# Building modules
#
@_memoize
def build_module(source_files, options=[], skip=[], only=[], module_name=None):
"""
Compile and import a f2py module, built from the given files.
"""
code = f"import sys; sys.path = {sys.path!r}; import numpy.f2py; numpy.f2py.main()"
d = get_module_dir()
# gh-27045 : Skip if no compilers are found
if not has_fortran_compiler():
pytest.skip("No Fortran compiler available")
# Copy files
dst_sources = []
f2py_sources = []
for fn in source_files:
if not os.path.isfile(fn):
raise RuntimeError(f"{fn} is not a file")
dst = os.path.join(d, os.path.basename(fn))
shutil.copyfile(fn, dst)
dst_sources.append(dst)
base, ext = os.path.splitext(dst)
if ext in (".f90", ".f95", ".f", ".c", ".pyf"):
f2py_sources.append(dst)
assert f2py_sources
# Prepare options
if module_name is None:
module_name = get_temp_module_name()
gil_options = []
if '--freethreading-compatible' not in options and '--no-freethreading-compatible' not in options:
# default to disabling the GIL if unset in options
gil_options = ['--freethreading-compatible']
f2py_opts = ["-c", "-m", module_name] + options + gil_options + f2py_sources
f2py_opts += ["--backend", "meson"]
if skip:
f2py_opts += ["skip:"] + skip
if only:
f2py_opts += ["only:"] + only
# Build
cwd = os.getcwd()
try:
os.chdir(d)
cmd = [sys.executable, "-c", code] + f2py_opts
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError(f"Running f2py failed: {cmd[4:]}\n{asunicode(out)}")
finally:
os.chdir(cwd)
# Partial cleanup
for fn in dst_sources:
os.unlink(fn)
# Rebase (Cygwin-only)
if sys.platform == "cygwin":
# If someone starts deleting modules after import, this will
# need to change to record how big each module is, rather than
# relying on rebase being able to find that from the files.
_module_list.extend(
glob.glob(os.path.join(d, f"{module_name:s}*"))
)
subprocess.check_call(
["/usr/bin/rebase", "--database", "--oblivious", "--verbose"]
+ _module_list
)
# Import
return import_module(module_name)
@_memoize
def build_code(source_code,
options=[],
skip=[],
only=[],
suffix=None,
module_name=None):
"""
Compile and import Fortran code using f2py.
"""
if suffix is None:
suffix = ".f"
with temppath(suffix=suffix) as path:
with open(path, "w") as f:
f.write(source_code)
return build_module([path],
options=options,
skip=skip,
only=only,
module_name=module_name)
#
# Building with meson
#
| CompilerChecker |
python | great-expectations__great_expectations | great_expectations/core/config_provider.py | {
"start": 1685,
"end": 3886
} | class ____(_AbstractConfigurationProvider):
"""
Wrapper class around the other environment-specific configuraiton provider classes.
Based on relevance, specific providers are registered to this object and are invoked
using the API defined by the AbstractConfigurationProvider.
In short, this class' purpose is to aggregate all configuration variables that may
be present for a given user environment (config variables, env vars, runtime environment, etc.)
"""
def __init__(self) -> None:
self._providers: OrderedDict[
Type[_AbstractConfigurationProvider], _AbstractConfigurationProvider
] = OrderedDict()
super().__init__()
def register_provider(self, provider: _AbstractConfigurationProvider) -> None:
"""
Saves a configuration provider to the object's state for downstream usage.
See `get_values()` for more information.
Args:
provider: An instance of a provider to register.
"""
type_ = type(provider)
if type_ in self._providers:
raise ValueError(f"Provider of type {type_} has already been registered!") # noqa: TRY003 # FIXME CoP
self._providers[type_] = provider
def get_provider(
self, type_: Type[_AbstractConfigurationProvider]
) -> Optional[_AbstractConfigurationProvider]:
"""
Retrieves a registered configuration provider (if available).
Args:
type_: The class of the configuration provider to retrieve.
Returns:
A registered provider if available.
If not, None is returned.
"""
return self._providers.get(type_)
@override
def get_values(self) -> Dict[str, str]:
"""
Iterates through all registered providers to aggregate a list of configuration values.
Values are generated based on the order of registration; if there is a conflict,
subsequent providers will overwrite existing values.
"""
values: Dict[str, str] = {}
for provider in self._providers.values():
values.update(provider.get_values())
return values
| _ConfigurationProvider |
python | pytest-dev__pytest-django | pytest_django_test/db_router.py | {
"start": 0,
"end": 587
} | class ____:
def db_for_read(self, model, **hints): # noqa: ARG002
if model._meta.app_label == "app" and model._meta.model_name == "seconditem":
return "second"
return None
def db_for_write(self, model, **hints): # noqa: ARG002
if model._meta.app_label == "app" and model._meta.model_name == "seconditem":
return "second"
return None
def allow_migrate(self, db, app_label, model_name=None, **hints): # noqa: ARG002
if app_label == "app" and model_name == "seconditem":
return db == "second"
| DbRouter |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-snowflake-cortex/destination_snowflake_cortex/config.py | {
"start": 636,
"end": 2290
} | class ____(BaseModel):
host: str = Field(
...,
title="Host",
order=1,
description="Enter the account name you want to use to access the database. This is usually the identifier before .snowflakecomputing.com",
examples=["AIRBYTE_ACCOUNT"],
)
role: str = Field(
...,
title="Role",
order=2,
description="Enter the role that you want to use to access Snowflake",
examples=["AIRBYTE_ROLE", "ACCOUNTADMIN"],
)
warehouse: str = Field(
...,
title="Warehouse",
order=3,
description="Enter the name of the warehouse that you want to use as a compute cluster",
examples=["AIRBYTE_WAREHOUSE"],
)
database: str = Field(
...,
title="Database",
order=4,
description="Enter the name of the database that you want to sync data into",
examples=["AIRBYTE_DATABASE"],
)
default_schema: str = Field(
...,
title="Default Schema",
order=5,
description="Enter the name of the default schema",
examples=["AIRBYTE_SCHEMA"],
)
username: str = Field(
...,
title="Username",
order=6,
description="Enter the name of the user you want to use to access the database",
examples=["AIRBYTE_USER"],
)
credentials: PasswordBasedAuthorizationModel
class Config:
title = "Snowflake Connection"
schema_extra = {
"description": "Snowflake can be used to store vector data and retrieve embeddings.",
"group": "indexing",
}
| SnowflakeCortexIndexingModel |
python | plotly__plotly.py | plotly/graph_objs/densitymapbox/colorbar/_tickformatstop.py | {
"start": 233,
"end": 8544
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "densitymapbox.colorbar"
_path_str = "densitymapbox.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.densitymapbox.
colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.densitymapbox.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.densitymapbox.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickformatstop |
python | huggingface__transformers | src/transformers/models/pegasus/modeling_pegasus.py | {
"start": 39543,
"end": 46790
} | class ____(PegasusPreTrainedModel):
_tied_weights_keys = {
"decoder.embed_tokens.weight": "shared.weight",
"encoder.embed_tokens.weight": "shared.weight",
}
def __init__(self, config: PegasusConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = PegasusEncoder(config)
self.decoder = PegasusDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.encoder.resize_position_embeddings(new_num_position_embeddings)
self.decoder.resize_position_embeddings(new_num_position_embeddings)
def get_position_embeddings(self) -> tuple[nn.Embedding]:
"""
Returns the position embeddings matrix
"""
return (self.encoder.get_position_embeddings(), self.decoder.get_position_embeddings())
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.Tensor] = None,
decoder_attention_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[tuple[torch.FloatTensor]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, Seq2SeqModelOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Pegasus uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
Example:
```python
>>> from transformers import AutoTokenizer, PegasusModel
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
>>> model = PegasusModel.from_pretrained("google/pegasus-large")
>>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
>>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt")
>>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 4, 1024]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The PEGASUS Model with a language modeling head. Can be used for summarization.
"""
)
| PegasusModel |
python | getsentry__sentry | tests/sentry/incidents/test_logic.py | {
"start": 8629,
"end": 9820
} | class ____:
def validate_result(self, incident, result, expected_results, start, end, windowed_stats):
# Duration of 300s, but no alert rule
time_window = incident.alert_rule.snuba_query.time_window if incident.alert_rule else 60
assert result.rollup == time_window
expected_start = start if start else incident.date_started - timedelta(seconds=time_window)
expected_end = end if end else incident.current_end_date + timedelta(seconds=time_window)
if windowed_stats:
now = timezone.now()
expected_end = expected_start + timedelta(
seconds=time_window * (WINDOWED_STATS_DATA_POINTS / 2)
)
expected_start = expected_start - timedelta(
seconds=time_window * (WINDOWED_STATS_DATA_POINTS / 2)
)
if expected_end > now:
expected_end = now
expected_start = now - timedelta(seconds=time_window * WINDOWED_STATS_DATA_POINTS)
assert result.start == expected_start
assert result.end == expected_end
assert [r["count"] for r in result.data["data"]] == expected_results
| BaseIncidentsValidation |
python | bokeh__bokeh | src/bokeh/models/graphs.py | {
"start": 2367,
"end": 3046
} | class ____(LayoutProvider):
'''
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
graph_layout = Dict(Either(Int, String), Len(Seq(Float), 2), default={}, help="""
The coordinates of the graph nodes in cartesian space. The keys of
the dictionary correspond to node indices or labels and the values
are two element sequences containing the x and y coordinates of
the nodes.
.. code-block:: python
{
0 : [0.5, 0.5],
1 : [1.0, 0.86],
2 : [0.86, 1],
}
""")
@abstract
| StaticLayoutProvider |
python | numpy__numpy | numpy/_core/tests/test_dtype.py | {
"start": 872,
"end": 9374
} | class ____:
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object])
def test_run(self, t):
"""Only test hash runs at all."""
dt = np.dtype(t)
hash(dt)
@pytest.mark.parametrize('t', [int, float])
def test_dtype(self, t):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
assert_(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
assert_(dt.byteorder != dt3.byteorder, "bogus test")
assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
uintp = np.dtype(np.uintp)
if uintp.itemsize == 4:
left = uintp
right = np.dtype(np.uint32)
else:
left = uintp
right = np.dtype(np.ulonglong)
assert_(left == right)
assert_(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
assert_raises(TypeError, np.dtype, 'O3')
assert_raises(TypeError, np.dtype, 'O5')
assert_raises(TypeError, np.dtype, 'O7')
assert_raises(TypeError, np.dtype, 'b3')
assert_raises(TypeError, np.dtype, 'h4')
assert_raises(TypeError, np.dtype, 'I5')
assert_raises(TypeError, np.dtype, 'e3')
assert_raises(TypeError, np.dtype, 'f5')
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
assert_raises(TypeError, np.dtype, 'g12')
elif np.dtype('g').itemsize == 12:
assert_raises(TypeError, np.dtype, 'g16')
if np.dtype('l').itemsize == 8:
assert_raises(TypeError, np.dtype, 'l4')
assert_raises(TypeError, np.dtype, 'L4')
else:
assert_raises(TypeError, np.dtype, 'l8')
assert_raises(TypeError, np.dtype, 'L8')
if np.dtype('q').itemsize == 8:
assert_raises(TypeError, np.dtype, 'q4')
assert_raises(TypeError, np.dtype, 'Q4')
else:
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
# Make sure negative-sized dtype raises an error
assert_raises(TypeError, np.dtype, 'S-1')
assert_raises(TypeError, np.dtype, 'U-1')
assert_raises(TypeError, np.dtype, 'V-1')
def test_richcompare_invalid_dtype_equality(self):
# Make sure objects that cannot be converted to valid
# dtypes results in False/True when compared to valid dtypes.
# Here 7 cannot be converted to dtype. No exceptions should be raised
assert not np.dtype(np.int32) == 7, "dtype richcompare failed for =="
assert np.dtype(np.int32) != 7, "dtype richcompare failed for !="
@pytest.mark.parametrize(
'operation',
[operator.le, operator.lt, operator.ge, operator.gt])
def test_richcompare_invalid_dtype_comparison(self, operation):
# Make sure TypeError is raised for comparison operators
# for invalid dtypes. Here 7 is an invalid dtype.
with pytest.raises(TypeError):
operation(np.dtype(np.int32), 7)
@pytest.mark.parametrize("dtype",
['Bool', 'Bytes0', 'Complex32', 'Complex64',
'Datetime64', 'Float16', 'Float32', 'Float64',
'Int8', 'Int16', 'Int32', 'Int64',
'Object0', 'Str0', 'Timedelta64',
'UInt8', 'UInt16', 'Uint32', 'UInt32',
'Uint64', 'UInt64', 'Void0',
"Float128", "Complex128"])
def test_numeric_style_types_are_invalid(self, dtype):
with assert_raises(TypeError):
np.dtype(dtype)
def test_expired_dtypes_with_bad_bytesize(self):
match: str = r".*removed in NumPy 2.0.*"
with pytest.raises(TypeError, match=match):
np.dtype("int0")
with pytest.raises(TypeError, match=match):
np.dtype("uint0")
with pytest.raises(TypeError, match=match):
np.dtype("bool8")
with pytest.raises(TypeError, match=match):
np.dtype("bytes0")
with pytest.raises(TypeError, match=match):
np.dtype("str0")
with pytest.raises(TypeError, match=match):
np.dtype("object0")
with pytest.raises(TypeError, match=match):
np.dtype("void0")
@pytest.mark.parametrize(
'value',
['m8', 'M8', 'datetime64', 'timedelta64',
'i4, (2,3)f8, f4', 'S3, 3u8, (3,4)S10',
'>f', '<f', '=f', '|f',
])
def test_dtype_bytes_str_equivalence(self, value):
bytes_value = value.encode('ascii')
from_bytes = np.dtype(bytes_value)
from_str = np.dtype(value)
assert_dtype_equal(from_bytes, from_str)
def test_dtype_from_bytes(self):
# Empty bytes object
assert_raises(TypeError, np.dtype, b'')
# Byte order indicator, but no type
assert_raises(TypeError, np.dtype, b'|')
# Single character with ordinal < NPY_NTYPES_LEGACY returns
# type by index into _builtin_descrs
assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
# Single character where value is a valid type code
assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
# Bytes with non-ascii values raise errors
assert_raises(TypeError, np.dtype, b'\xff')
assert_raises(TypeError, np.dtype, b's\xff')
def test_bad_param(self):
# Can't give a size that's too small
assert_raises(ValueError, np.dtype,
{'names': ['f0', 'f1'],
'formats': ['i4', 'i1'],
'offsets': [0, 4],
'itemsize': 4})
# If alignment is enabled, the alignment (4) must divide the itemsize
assert_raises(ValueError, np.dtype,
{'names': ['f0', 'f1'],
'formats': ['i4', 'i1'],
'offsets': [0, 4],
'itemsize': 9}, align=True)
# If alignment is enabled, the individual fields must be aligned
assert_raises(ValueError, np.dtype,
{'names': ['f0', 'f1'],
'formats': ['i1', 'f4'],
'offsets': [0, 2]}, align=True)
def test_field_order_equality(self):
x = np.dtype({'names': ['A', 'B'],
'formats': ['i4', 'f4'],
'offsets': [0, 4]})
y = np.dtype({'names': ['B', 'A'],
'formats': ['i4', 'f4'],
'offsets': [4, 0]})
assert_equal(x == y, False)
# This is a safe cast (not equiv) due to the different names:
assert np.can_cast(x, y, casting="safe")
@pytest.mark.parametrize(
["type_char", "char_size", "scalar_type"],
[["U", 4, np.str_],
["S", 1, np.bytes_]])
def test_create_string_dtypes_directly(
self, type_char, char_size, scalar_type):
dtype_class = type(np.dtype(type_char))
dtype = dtype_class(8)
assert dtype.type is scalar_type
assert dtype.itemsize == 8 * char_size
def test_create_invalid_string_errors(self):
one_too_big = np.iinfo(np.intc).max + 1
with pytest.raises(TypeError):
type(np.dtype("U"))(one_too_big // 4)
with pytest.raises(TypeError):
# Code coverage for very large numbers:
type(np.dtype("U"))(np.iinfo(np.intp).max // 4 + 1)
if one_too_big < sys.maxsize:
with pytest.raises(TypeError):
type(np.dtype("S"))(one_too_big)
with pytest.raises(ValueError):
type(np.dtype("U"))(-1)
# OverflowError on 32 bit
with pytest.raises((TypeError, OverflowError)):
# see gh-26556
type(np.dtype("S"))(2**61)
with pytest.raises(TypeError):
np.dtype("S1234hello")
def test_leading_zero_parsing(self):
dt1 = np.dtype('S010')
dt2 = np.dtype('S10')
assert dt1 == dt2
assert repr(dt1) == "dtype('S10')"
assert dt1.itemsize == 10
| TestBuiltin |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_str.py | {
"start": 457,
"end": 515
} | class ____:
def __str__(self):
return "ruff"
| Str |
python | tensorflow__tensorflow | tensorflow/python/keras/utils/version_utils.py | {
"start": 2085,
"end": 2447
} | class ____(object):
"""Chooses between Keras v1 and v2 Layer class."""
def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument
use_v2 = should_use_v2()
cls = swap_class(cls, base_layer.Layer, base_layer_v1.Layer, use_v2) # pylint: disable=self-cls-assignment
return super(LayerVersionSelector, cls).__new__(cls)
| LayerVersionSelector |
python | great-expectations__great_expectations | great_expectations/expectations/window.py | {
"start": 116,
"end": 319
} | class ____(pydantic.BaseModel):
"""
A threshold in which a metric will be considered passable
"""
positive: float
negative: float
class Config:
extra = Extra.forbid
| Offset |
python | django__django | django/test/client.py | {
"start": 13120,
"end": 22068
} | class ____:
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(
self,
*,
json_encoder=DjangoJSONEncoder,
headers=None,
query_params=None,
**defaults,
):
self.json_encoder = json_encoder
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
if headers:
self.defaults.update(HttpHeaders.to_wsgi_names(headers))
if query_params:
self.defaults["QUERY_STRING"] = urlencode(query_params, doseq=True)
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See https://www.python.org/dev/peps/pep-3333/#environ-variables
return {
"HTTP_COOKIE": "; ".join(
sorted(
"%s=%s" % (morsel.key, morsel.coded_value)
for morsel in self.cookies.values()
)
),
"PATH_INFO": "/",
"REMOTE_ADDR": "127.0.0.1",
"REQUEST_METHOD": "GET",
"SCRIPT_NAME": "",
"SERVER_NAME": "testserver",
"SERVER_PORT": "80",
"SERVER_PROTOCOL": "HTTP/1.1",
"wsgi.version": (1, 0),
"wsgi.url_scheme": "http",
"wsgi.input": FakePayload(b""),
"wsgi.errors": self.errors,
"wsgi.multiprocess": True,
"wsgi.multithread": False,
"wsgi.run_once": False,
**self.defaults,
**request,
}
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match[1]
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _encode_json(self, data, content_type):
"""
Return encoded JSON if data is a dict, list, or tuple and content_type
is application/json.
"""
should_encode = JSON_CONTENT_TYPE_RE.match(content_type) and isinstance(
data, (dict, list, tuple)
)
return json.dumps(data, cls=self.json_encoder) if should_encode else data
def _get_path(self, parsed):
path = unquote_to_bytes(parsed.path)
# Replace the behavior where non-ASCII values in the WSGI environ are
# arbitrarily decoded with ISO-8859-1.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode("iso-8859-1")
def get(
self, path, data=None, secure=False, *, headers=None, query_params=None, **extra
):
"""Construct a GET request."""
if query_params and data:
raise ValueError("query_params and data arguments are mutually exclusive.")
query_params = data or query_params
query_params = {} if query_params is None else query_params
return self.generic(
"GET",
path,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
def post(
self,
path,
data=None,
content_type=MULTIPART_CONTENT,
secure=False,
*,
headers=None,
query_params=None,
**extra,
):
"""Construct a POST request."""
data = self._encode_json({} if data is None else data, content_type)
post_data = self._encode_data(data, content_type)
return self.generic(
"POST",
path,
post_data,
content_type,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
def head(
self, path, data=None, secure=False, *, headers=None, query_params=None, **extra
):
"""Construct a HEAD request."""
if query_params and data:
raise ValueError("query_params and data arguments are mutually exclusive.")
query_params = data or query_params
query_params = {} if query_params is None else query_params
return self.generic(
"HEAD",
path,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
def trace(self, path, secure=False, *, headers=None, query_params=None, **extra):
"""Construct a TRACE request."""
return self.generic(
"TRACE",
path,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
def options(
self,
path,
data="",
content_type="application/octet-stream",
secure=False,
*,
headers=None,
query_params=None,
**extra,
):
"Construct an OPTIONS request."
return self.generic(
"OPTIONS",
path,
data,
content_type,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
def put(
self,
path,
data="",
content_type="application/octet-stream",
secure=False,
*,
headers=None,
query_params=None,
**extra,
):
"""Construct a PUT request."""
data = self._encode_json(data, content_type)
return self.generic(
"PUT",
path,
data,
content_type,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
def patch(
self,
path,
data="",
content_type="application/octet-stream",
secure=False,
*,
headers=None,
query_params=None,
**extra,
):
"""Construct a PATCH request."""
data = self._encode_json(data, content_type)
return self.generic(
"PATCH",
path,
data,
content_type,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
def delete(
self,
path,
data="",
content_type="application/octet-stream",
secure=False,
*,
headers=None,
query_params=None,
**extra,
):
"""Construct a DELETE request."""
data = self._encode_json(data, content_type)
return self.generic(
"DELETE",
path,
data,
content_type,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
def generic(
self,
method,
path,
data="",
content_type="application/octet-stream",
secure=False,
*,
headers=None,
query_params=None,
**extra,
):
"""Construct an arbitrary HTTP request."""
parsed = urlsplit(str(path)) # path can be lazy
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
"PATH_INFO": self._get_path(parsed),
"REQUEST_METHOD": method,
"SERVER_PORT": "443" if secure else "80",
"wsgi.url_scheme": "https" if secure else "http",
}
if data:
r.update(
{
"CONTENT_LENGTH": str(len(data)),
"CONTENT_TYPE": content_type,
"wsgi.input": FakePayload(data),
}
)
if headers:
extra.update(HttpHeaders.to_wsgi_names(headers))
if query_params:
extra["QUERY_STRING"] = urlencode(query_params, doseq=True)
r.update(extra)
# If QUERY_STRING is absent or empty, extract it from the URL.
if not r.get("QUERY_STRING"):
# WSGI requires latin-1 encoded strings. See get_path_info().
r["QUERY_STRING"] = parsed.query.encode().decode("iso-8859-1")
return self.request(**r)
| RequestFactory |
python | kamyu104__LeetCode-Solutions | Python/boats-to-save-people.py | {
"start": 33,
"end": 462
} | class ____(object):
def numRescueBoats(self, people, limit):
"""
:type people: List[int]
:type limit: int
:rtype: int
"""
people.sort()
result = 0
left, right = 0, len(people)-1
while left <= right:
result += 1
if people[left] + people[right] <= limit:
left += 1
right -= 1
return result
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 840191,
"end": 840935
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for ProjectColumn."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("ProjectColumnEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("ProjectColumn"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| ProjectColumnConnection |
python | sqlalchemy__sqlalchemy | test/orm/test_eager_relations.py | {
"start": 232040,
"end": 235038
} | class ____(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
"""test for issue #11965, regression from #11449"""
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Source(Base):
__tablename__ = "source"
id: Mapped[int] = mapped_column(primary_key=True)
class Day(Base):
__tablename__ = "day"
id: Mapped[int] = mapped_column(primary_key=True)
class Run(Base):
__tablename__ = "run"
id: Mapped[int] = mapped_column(primary_key=True)
source_id: Mapped[int] = mapped_column(
ForeignKey(Source.id), nullable=False
)
source = relationship(Source, lazy="joined", innerjoin=True)
day = relationship(
Day,
lazy="joined",
innerjoin=True,
)
day_id: Mapped[int] = mapped_column(
ForeignKey(Day.id), nullable=False
)
class Event(Base):
__tablename__ = "event"
id: Mapped[int] = mapped_column(primary_key=True)
run_id: Mapped[int] = mapped_column(
ForeignKey(Run.id), nullable=False
)
run = relationship(Run, lazy="joined", innerjoin=True)
class Room(Base):
__tablename__ = "room"
id: Mapped[int] = mapped_column(primary_key=True)
event_id: Mapped[int] = mapped_column(
ForeignKey(Event.id), nullable=False
)
event = relationship(Event, foreign_keys=event_id, lazy="joined")
@classmethod
def insert_data(cls, connection):
Room, Run, Source, Event, Day = cls.classes(
"Room", "Run", "Source", "Event", "Day"
)
run = Run(source=Source(), day=Day())
event = Event(run=run)
room = Room(event=event)
with Session(connection) as session:
session.add(room)
session.commit()
def test_compile(self):
Room = self.classes.Room
self.assert_compile(
select(Room),
"SELECT room.id, room.event_id, source_1.id AS id_1, "
"day_1.id AS id_2, run_1.id AS id_3, run_1.source_id, "
"run_1.day_id, event_1.id AS id_4, event_1.run_id "
"FROM room LEFT OUTER JOIN "
"(event AS event_1 "
"JOIN run AS run_1 ON run_1.id = event_1.run_id "
"JOIN day AS day_1 ON day_1.id = run_1.day_id "
"JOIN source AS source_1 ON source_1.id = run_1.source_id) "
"ON event_1.id = room.event_id",
)
def test_roundtrip(self):
Room = self.classes.Room
session = fixture_session()
rooms = session.scalars(select(Room)).unique().all()
session.close()
# verify eager-loaded correctly
assert rooms[0].event.run.day
| NestedInnerjoinTestIssue11965 |
python | mkdocs__mkdocs | mkdocs/tests/utils/utils_tests.py | {
"start": 21630,
"end": 21656
} | class ____:
url: str
| _Page |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_G.py | {
"start": 2531,
"end": 3960
} | class ____(Benchmark):
r"""
Goldstein-Price objective function.
This class defines the Goldstein-Price [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{GoldsteinPrice}}(x) = \left[ 1 + (x_1 + x_2 + 1)^2
(19 - 14 x_1 + 3 x_1^2 - 14 x_2 + 6 x_1 x_2 + 3 x_2^2) \right]
\left[ 30 + ( 2x_1 - 3 x_2)^2 (18 - 32 x_1 + 12 x_1^2
+ 48 x_2 - 36 x_1 x_2 + 27 x_2^2) \right]
with :math:`x_i \in [-2, 2]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 3` for :math:`x = [0, -1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-2.0] * self.N, [2.0] * self.N))
self.global_optimum = [[0., -1.]]
self.fglob = 3.0
def fun(self, x, *args):
self.nfev += 1
a = (1 + (x[0] + x[1] + 1) ** 2
* (19 - 14 * x[0] + 3 * x[0] ** 2
- 14 * x[1] + 6 * x[0] * x[1] + 3 * x[1] ** 2))
b = (30 + (2 * x[0] - 3 * x[1]) ** 2
* (18 - 32 * x[0] + 12 * x[0] ** 2
+ 48 * x[1] - 36 * x[0] * x[1] + 27 * x[1] ** 2))
return a * b
| GoldsteinPrice |
python | django-guardian__django-guardian | guardian/testapp/migrations/0003_auto_20190611_0440.py | {
"start": 130,
"end": 1005
} | class ____(migrations.Migration):
dependencies = [
("testapp", "0002_logentrywithgroup"),
]
operations = [
migrations.AlterField(
model_name="customuser",
name="last_name",
field=models.CharField(blank=True, max_length=150, verbose_name="last name"),
),
migrations.AlterField(
model_name="customuser",
name="username",
field=models.CharField(
error_messages={"unique": "A user with that username already exists."},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[django.contrib.auth.validators.UnicodeUsernameValidator()],
verbose_name="username",
),
),
]
| Migration |
python | numpy__numpy | numpy/polynomial/tests/test_hermite.py | {
"start": 17029,
"end": 18666
} | class ____:
def test_hermfromroots(self):
res = herm.hermfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2])
pol = herm.hermfromroots(roots)
res = herm.hermval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(herm.herm2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_hermroots(self):
assert_almost_equal(herm.hermroots([1]), [])
assert_almost_equal(herm.hermroots([1, 1]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = herm.hermroots(herm.hermfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_hermtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, herm.hermtrim, coef, -1)
# Test results
assert_equal(herm.hermtrim(coef), coef[:-1])
assert_equal(herm.hermtrim(coef, 1), coef[:-3])
assert_equal(herm.hermtrim(coef, 2), [0])
def test_hermline(self):
assert_equal(herm.hermline(3, 4), [3, 2])
def test_herm2poly(self):
for i in range(10):
assert_almost_equal(herm.herm2poly([0] * i + [1]), Hlist[i])
def test_poly2herm(self):
for i in range(10):
assert_almost_equal(herm.poly2herm(Hlist[i]), [0] * i + [1])
def test_weight(self):
x = np.linspace(-5, 5, 11)
tgt = np.exp(-x**2)
res = herm.hermweight(x)
assert_almost_equal(res, tgt)
| TestMisc |
python | pytorch__pytorch | torch/_inductor/codegen/cuda/gemm_template.py | {
"start": 67106,
"end": 77188
} | class ____(CUTLASSGemmTemplate):
def __init__(
self,
input_nodes: list[Buffer],
layout: Layout,
alpha: float,
beta: float,
input_reorder: Optional[list[int]] = None,
):
super().__init__(input_nodes, layout, alpha, beta, input_reorder)
@staticmethod
def add_cutlass_gemm_choices(
choices: list[ChoiceCaller],
layout: ir.Layout,
input_nodes: list[Buffer],
alpha: Union[float, int] = 1,
beta: Union[float, int] = 0,
input_reorder: Optional[list[int]] = None,
use_fast_accum: Optional[bool] = False,
**extra_kwargs,
) -> None:
template = CUTLASS2xGemmTemplate(
input_nodes, layout, alpha, beta, input_reorder
)
template._add_cutlass_gemm_choices(
choices, layout, input_nodes, alpha, beta, input_reorder, **extra_kwargs
)
@staticmethod
def _get_supported_ops() -> "list[cutlass_library.gemm_operation.GemmOperation]": # type: ignore[name-defined] # noqa: F821
import cutlass_library.library as cutlass_lib
return [cutlass_lib.GemmKind.Universal, cutlass_lib.GemmKind.Sparse]
@staticmethod
def _has_tma_epilogue(self) -> bool:
return False
def _get_template(self) -> str:
return GEMM_TEMPLATE_CUTLASS_2X
def _get_template_args(
self,
op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined] # noqa: F821
) -> tuple[str, Optional[str]]:
import cutlass_library.library as cutlass_lib
if op.gemm_kind == cutlass_lib.GemmKind.Sparse:
return (GEMM_ARGS_SPARSE_CUTLASS_2X, None)
return (GEMM_ARGS_CUTLASS_2X, None)
def _are_inputs_layout_compatible(self, layouts: list[Layout]) -> bool:
"""
Evaluates whether input layouts are compatible for set of operations supported by this class.
Args:
layouts (List[Layout]): List containing Layout objects representing
the input matrices.
Returns:
bool: True if layouts are GEMM compatible, otherwise False.
"""
assert len(layouts) == 2 or len(layouts) == 3
# Check if A and B are compatible
A_layout, B_layout = layouts[:2]
if len(A_layout.size) != 2:
return False
if len(B_layout.size) != 2:
return False
A_size = [int(i) for i in A_layout.size]
B_size = [int(i) for i in B_layout.size]
K = max(A_size[1], B_size[0])
return (K == A_size[1] or K == 2 * A_size[1]) and K == B_size[0]
def _shape_match(
self,
op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined] # noqa: F821
) -> bool:
import cutlass_library.library as cutlass_lib
X, W = self.input_nodes[0], self.input_nodes[1]
if op.gemm_kind == cutlass_lib.GemmKind.Sparse:
return X.get_size()[1] * 2 == W.get_size()[0]
return X.get_size()[1] == W.get_size()[0]
def _alignment_match(
self,
op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined] # noqa: F821
) -> bool:
import cutlass_library.library as cutlass_lib
if op.gemm_kind != cutlass_lib.GemmKind.Sparse:
return True
# SparseGemm in CUTLASS has specific alignment check that for
# small k could make some of the choices throw kMisalignedOperand
# CUTLASS error when run, see:
# https://github.com/NVIDIA/cutlass/blob/e01b9b5029b7caca5a43c29f7d2714d7cf1dcae8/include/cutlass/gemm/kernel/sparse_gemm.h#L198-L200 # noqa: B950
# So, let's skip these choices if that would be the case.
X = self.input_nodes[0]
return (X.get_size()[1] * 2) % op.tile_description.tile_shape[2] == 0
def _set_bias_layout_and_alignment(
self,
op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined] # noqa: F821
) -> bool:
import cutlass_library.library as cutlass_lib
if op.gemm_kind == cutlass_lib.GemmKind.Sparse:
op.C.layout = op.D.layout
return True
if len(self.input_nodes) >= 3 and self.input_nodes[2] is not None:
Bias = self.input_nodes[2]
bias_layout = CUTLASSGemmTemplate.cutlass_layout(Bias.get_layout())
if bias_layout != op.D.layout:
# For cutlass2, bias and output layout must match
return False
if not self.set_alignment(Bias.get_layout(), op.C):
return False
else:
op.C.layout = op.D.layout
return True
def _define_gemm_instance(
self,
op: GemmOperation,
evt_name: Optional[str] = None,
) -> tuple[str, str]:
"""Defines and renders the Cutlass / CUDA C++ code for a given GEMM operation instance.
This function uses the Cutlass library to generate key parts of the codegen process. General Matrix Multiply
forms a core part of a number of scientific applications, so this efficient and adaptable implementation is
crucial.
Args:
op (cutlass_library.gemm_op.GemmOperation): This is the core GEMM operation that we are defining and rendering.
Returns:
tuple[str, str]: A tuple where the first part is a string that constitutes the defined GEMM operation in C++
code (render) and the second part is the string that specifies the operation type.
"""
assert cutlass_utils.try_import_cutlass()
import cutlass_library.gemm_operation as cutlass_gemm_op
import cutlass_library.library as cutlass_lib
if op.gemm_kind == cutlass_lib.GemmKind.Sparse:
emitter = cutlass_gemm_op.EmitSparseGemmInstance()
else:
emitter = cutlass_gemm_op.EmitGemmInstance()
op_def = emitter.emit(op)
op_def = op_def.replace(
"cutlass::gemm::device::Gemm", "cutlass::gemm::device::GemmUniversal"
)
if op.gemm_kind != cutlass_lib.GemmKind.Sparse:
op_def = op_def.replace("false,", "")
pattern = re.compile(r"\s*using\s(.*?)\s=")
decl = op_def.split("\n")[2]
match = pattern.match(decl)
if match is None:
raise RuntimeError("Invalid Gemm config: \n" + op_def)
op_type = match.groups()[0]
return op_def, op_type
def _get_extra_inputs_and_names(
self,
op: "cutlass_gemm_op.GemmOperation" = None, # type: ignore[name-defined] # noqa: F821
) -> tuple[Optional[Buffer], list[Optional[Buffer]], list[str]]:
import cutlass_library.library as cutlass_lib
if op.gemm_kind == cutlass_lib.GemmKind.Sparse:
Bias = None
Meta = self.input_nodes[2]
else:
Bias = None if len(self.input_nodes) == 2 else self.input_nodes[2]
Meta = None
inputs = [Meta]
names = ["Meta"]
return (Bias, inputs, names)
def _update_arg_names_for_test_call_statement(
self,
arg_names: list[str],
input_nodes: list[Buffer],
) -> list[str]:
if input_nodes[3] is None:
del arg_names[3]
if input_nodes[2] is None:
del arg_names[2]
return arg_names
def render_gemm_arguments(
self,
instance_type: str,
argument_template: str,
epilogue_template: str,
should_swap_xw: bool,
X: IRNode,
W: IRNode,
Bias: IRNode,
Meta: IRNode,
Y: IRNode,
alpha: float,
beta: float,
kernel: CUDATemplateKernel,
epilogue_args,
) -> str:
"""
Render the Cutlass CUDA C++ code required for passing arguments to the GEMM operation.
Args:
instance_type (str): GEMM instance type.
argument_template (str): Template for the GEMM operation arguments.
epilogue_template (str): Template for the epilogue arguments.
should_swap_xw (bool): Determines whether X, W operands should be swapped. If True, applies an explicit
transpose operation to X and W.
X (IRNode): The X input tensor.
W (IRNode): The W input tensor.
Bias (IRNode): The bias tensor.
Meta (IRNode): The meta tensor.
Y (IRNode): The output tensor.
alpha (float): Scaling factor for the product of the inputs.
beta (float): Scaling factor for the output tensor.
kernel (CUDATemplateKernel): CUDA Template kernel for the operation.
epilogue_args (any): Additional arguments for the epilogue state.
Returns:
str: A block of CUDA C++ code as a string, ready to be used as arguments for the GEMM operation.
Note: If `should_swap_xw` is True, a transpose operation will be applied to the X, W, Bias, and Y
tensors. This operation also implies the M and N dimensions of Bias and GEMM output to be swapped
before the function call.
"""
options = {
"instance_type": instance_type,
"alpha": alpha,
"beta": beta,
"X": X,
"W": W,
"Y": Y,
"Bias": Bias,
"Meta": Meta,
"template": self,
"kernel": kernel,
"M": "M",
"N": "N",
"epilogue_args": epilogue_args,
}
if epilogue_template is None:
arguments = self._template_from_string(argument_template).render(
split_k=1, **options
)
return arguments
epilogue_arguments = self._template_from_string(epilogue_template).render(
**options
)
arguments = self._template_from_string(argument_template).render(
epilogue_arguments=epilogue_arguments, **options
)
return arguments
| CUTLASS2xGemmTemplate |
python | ansible__ansible | lib/ansible/_internal/_wrapt.py | {
"start": 21699,
"end": 25170
} | class ____(_FunctionWrapperBase):
def __call__(*args, **kwargs):
def _unpack_self(self, *args):
return self, args
self, args = _unpack_self(*args)
# If enabled has been specified, then evaluate it at this point and if
# the wrapper is not to be executed, then simply return the bound
# function rather than a bound wrapper for the bound function. When
# evaluating enabled, if it is callable we call it, otherwise we
# evaluate it as a boolean.
if self._self_enabled is not None:
if callable(self._self_enabled):
if not self._self_enabled():
return self.__wrapped__(*args, **kwargs)
elif not self._self_enabled:
return self.__wrapped__(*args, **kwargs)
# We need to do things different depending on whether we are likely
# wrapping an instance method vs a static method or class method.
if self._self_binding == 'function':
if self._self_instance is None and args:
instance, newargs = args[0], args[1:]
if isinstance(instance, self._self_owner):
wrapped = PartialCallableObjectProxy(self.__wrapped__, instance)
return self._self_wrapper(wrapped, instance, newargs, kwargs)
return self._self_wrapper(self.__wrapped__, self._self_instance,
args, kwargs)
elif self._self_binding == 'callable':
if self._self_instance is None:
# This situation can occur where someone is calling the
# instancemethod via the class type and passing the instance as
# the first argument. We need to shift the args before making
# the call to the wrapper and effectively bind the instance to
# the wrapped function using a partial so the wrapper doesn't
# see anything as being different.
if not args:
raise TypeError('missing 1 required positional argument')
instance, args = args[0], args[1:]
wrapped = PartialCallableObjectProxy(self.__wrapped__, instance)
return self._self_wrapper(wrapped, instance, args, kwargs)
return self._self_wrapper(self.__wrapped__, self._self_instance,
args, kwargs)
else:
# As in this case we would be dealing with a classmethod or
# staticmethod, then _self_instance will only tell us whether
# when calling the classmethod or staticmethod they did it via an
# instance of the class it is bound to and not the case where
# done by the class type itself. We thus ignore _self_instance
# and use the __self__ attribute of the bound function instead.
# For a classmethod, this means instance will be the class type
# and for a staticmethod it will be None. This is probably the
# more useful thing we can pass through even though we loose
# knowledge of whether they were called on the instance vs the
# class type, as it reflects what they have available in the
# decoratored function.
instance = getattr(self.__wrapped__, '__self__', None)
return self._self_wrapper(self.__wrapped__, instance, args,
kwargs)
| BoundFunctionWrapper |
python | keras-team__keras | keras/src/layers/reshaping/cropping3d.py | {
"start": 258,
"end": 11265
} | class ____(Layer):
"""Cropping layer for 3D data (e.g. spatial or spatio-temporal).
Example:
>>> input_shape = (2, 28, 28, 10, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = keras.layers.Cropping3D(cropping=(2, 4, 2))(x)
>>> y.shape
(2, 24, 20, 6, 3)
Args:
cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric cropping is applied to depth, height,
and width.
- If tuple of 3 ints: interpreted as three different symmetric
cropping values for depth, height, and width:
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
- If tuple of 3 tuples of 2 ints: interpreted as
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop,
right_dim2_crop), (left_dim3_crop, right_dim3_crop))`.
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_cropped_axis, second_cropped_axis,
third_cropped_axis, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, first_cropped_axis, second_cropped_axis,
third_cropped_axis)`
"""
def __init__(
self, cropping=((1, 1), (1, 1), (1, 1)), data_format=None, **kwargs
):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
if isinstance(cropping, int):
if cropping < 0:
raise ValueError(
"`cropping` cannot be negative. "
f"Received: cropping={cropping}."
)
self.cropping = (
(cropping, cropping),
(cropping, cropping),
(cropping, cropping),
)
elif hasattr(cropping, "__len__"):
if len(cropping) != 3:
raise ValueError(
f"`cropping` should have 3 elements. Received: {cropping}."
)
dim1_cropping = argument_validation.standardize_tuple(
cropping[0], 2, "1st entry of cropping", allow_zero=True
)
dim2_cropping = argument_validation.standardize_tuple(
cropping[1], 2, "2nd entry of cropping", allow_zero=True
)
dim3_cropping = argument_validation.standardize_tuple(
cropping[2], 2, "3rd entry of cropping", allow_zero=True
)
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
else:
raise ValueError(
"`cropping` should be either an int, a tuple of 3 ints "
"(symmetric_dim1_crop, symmetric_dim2_crop, "
"symmetric_dim3_crop), "
"or a tuple of 3 tuples of 2 ints "
"((left_dim1_crop, right_dim1_crop),"
" (left_dim2_crop, right_dim2_crop),"
" (left_dim3_crop, right_dim2_crop)). "
f"Received: {cropping}."
)
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
if self.data_format == "channels_first":
spatial_dims = list(input_shape[2:5])
else:
spatial_dims = list(input_shape[1:4])
for index in range(0, 3):
if spatial_dims[index] is None:
continue
spatial_dims[index] -= sum(self.cropping[index])
if spatial_dims[index] <= 0:
raise ValueError(
"Values in `cropping` argument should be smaller than the "
"corresponding spatial dimension of the input. Received: "
f"input_shape={input_shape}, cropping={self.cropping}"
)
if self.data_format == "channels_first":
return (input_shape[0], input_shape[1], *spatial_dims)
else:
return (input_shape[0], *spatial_dims, input_shape[4])
def call(self, inputs):
if self.data_format == "channels_first":
spatial_dims = list(inputs.shape[2:5])
else:
spatial_dims = list(inputs.shape[1:4])
for index in range(0, 3):
if spatial_dims[index] is None:
continue
spatial_dims[index] -= sum(self.cropping[index])
if spatial_dims[index] <= 0:
raise ValueError(
"Values in `cropping` argument should be smaller than the "
"corresponding spatial dimension of the input. Received: "
f"inputs.shape={inputs.shape}, cropping={self.cropping}"
)
if self.data_format == "channels_first":
if (
self.cropping[0][1]
== self.cropping[1][1]
== self.cropping[2][1]
== 0
):
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] :,
]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] :,
]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
]
elif self.cropping[0][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
]
elif self.cropping[1][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
]
elif self.cropping[2][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
]
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
]
else:
if (
self.cropping[0][1]
== self.cropping[1][1]
== self.cropping[2][1]
== 0
):
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] :,
:,
]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
:,
]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] :,
:,
]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
:,
]
elif self.cropping[0][1] == 0:
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
:,
]
elif self.cropping[1][1] == 0:
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
:,
]
elif self.cropping[2][1] == 0:
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
:,
]
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
:,
]
def get_config(self):
config = {"cropping": self.cropping, "data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
| Cropping3D |
python | kamyu104__LeetCode-Solutions | Python/replace-question-marks-in-string-to-minimize-its-value.py | {
"start": 1576,
"end": 2533
} | class ____(object):
def minimizeStringValue(self, s):
"""
:type s: str
:rtype: str
"""
def counting_sort(cnt):
for i in xrange(len(cnt)):
for _ in xrange(cnt[i]):
yield i
cnt = [0]*26
for x in s:
if x == '?':
continue
cnt[ord(x)-ord('a')] += 1
min_heap = [(x, i) for i, x in enumerate(cnt)]
heapq.heapify(min_heap)
cnt2 = [0]*26
for _ in xrange(s.count('?')):
c, i = heapq.heappop(min_heap)
heapq.heappush(min_heap, (c+1, i))
cnt2[i] += 1
it = counting_sort(cnt2)
result = list(s)
for i in xrange(len(result)):
if result[i] != '?':
continue
result[i] = chr(ord('a')+next(it))
return "".join(result)
# Time: O(n + n * 26)
# Space: O(26)
# greedy, counting sort
| Solution2 |
python | kamyu104__LeetCode-Solutions | Python/count-ways-to-distribute-candies.py | {
"start": 33,
"end": 383
} | class ____(object):
def waysToDistribute(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
MOD = 10**9+7
dp = [1]*k
for i in xrange(1, n):
for j in reversed(xrange(1, min(i, k))):
dp[j] = ((j+1)*dp[j] + dp[j-1]) % MOD
return dp[k-1]
| Solution |
python | doocs__leetcode | solution/0100-0199/0174.Dungeon Game/Solution.py | {
"start": 0,
"end": 410
} | class ____:
def calculateMinimumHP(self, dungeon: List[List[int]]) -> int:
m, n = len(dungeon), len(dungeon[0])
dp = [[inf] * (n + 1) for _ in range(m + 1)]
dp[m][n - 1] = dp[m - 1][n] = 1
for i in range(m - 1, -1, -1):
for j in range(n - 1, -1, -1):
dp[i][j] = max(1, min(dp[i + 1][j], dp[i][j + 1]) - dungeon[i][j])
return dp[0][0]
| Solution |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 12001,
"end": 12249
} | class ____(HTTPException):
"""
base class for exceptions with status codes in the 400s and 500s
This is an exception which indicates that an error has occurred,
and that any work in progress should not be committed.
"""
| HTTPError |
python | getsentry__sentry | tests/sentry/seer/explorer/test_tools.py | {
"start": 28113,
"end": 28267
} | class ____(BaseModel):
"""Output of ActorSerializer."""
type: Literal["user", "team"]
id: str
name: str
email: str | None = None
| _Actor |
python | ray-project__ray | python/ray/llm/_internal/common/utils/upload_utils.py | {
"start": 448,
"end": 4513
} | class ____(CloudModelAccessor):
"""Unified uploader to upload models to cloud storage (S3 or GCS).
Args:
model_id: The model id to upload.
mirror_config: The mirror config for the model.
"""
def upload_model(self) -> str:
"""Upload the model to cloud storage (s3 or gcs).
Returns:
The remote path of the uploaded model.
"""
bucket_uri = self.mirror_config.bucket_uri
lock_path = self._get_lock_path()
path = self._get_model_path()
storage_type = self.mirror_config.storage_type
try:
# Timeout 0 means there will be only one attempt to acquire
# the file lock. If it cannot be acquired, a TimeoutError
# will be thrown.
# This ensures that subsequent processes don't duplicate work.
with FileLock(lock_path, timeout=0):
try:
CloudFileSystem.upload_model(
local_path=path,
bucket_uri=bucket_uri,
)
logger.info(
"Finished uploading %s to %s storage",
self.model_id,
storage_type.upper() if storage_type else "cloud",
)
except RuntimeError:
logger.exception(
"Failed to upload model %s to %s storage",
self.model_id,
storage_type.upper() if storage_type else "cloud",
)
except TimeoutError:
# If the directory is already locked, then wait but do not do anything.
with FileLock(lock_path, timeout=-1):
pass
return bucket_uri
def upload_model_files(model_id: str, bucket_uri: str) -> str:
"""Upload the model files to cloud storage (s3 or gcs).
If `model_id` is a local path, the files will be uploaded to the cloud storage.
If `model_id` is a huggingface model id, the model will be downloaded from huggingface
and then uploaded to the cloud storage.
Args:
model_id: The huggingface model id, or local model path to upload.
bucket_uri: The bucket uri to upload the model to, must start with `s3://` or `gs://`.
Returns:
The remote path of the uploaded model.
"""
assert not is_remote_path(
model_id
), f"model_id must NOT be a remote path: {model_id}"
assert is_remote_path(bucket_uri), f"bucket_uri must be a remote path: {bucket_uri}"
if not Path(model_id).exists():
maybe_downloaded_model_path = get_model_entrypoint(model_id)
if not Path(maybe_downloaded_model_path).exists():
logger.info(
"Assuming %s is huggingface model id, and downloading it.", model_id
)
import huggingface_hub
huggingface_hub.snapshot_download(repo_id=model_id)
# Try to get the model path again after downloading.
maybe_downloaded_model_path = get_model_entrypoint(model_id)
assert Path(
maybe_downloaded_model_path
).exists(), f"Failed to download the model {model_id} to {maybe_downloaded_model_path}"
return upload_model_files(maybe_downloaded_model_path, bucket_uri)
else:
return upload_model_files(maybe_downloaded_model_path, bucket_uri)
uploader = CloudModelUploader(model_id, CloudMirrorConfig(bucket_uri=bucket_uri))
return uploader.upload_model()
def upload_model_cli(
model_source: Annotated[
str,
typer.Option(
help="HuggingFace model ID to download, or local model path to upload",
),
],
bucket_uri: Annotated[
str,
typer.Option(
help="The bucket uri to upload the model to, must start with `s3://` or `gs://`",
),
],
):
"""Upload the model files to cloud storage (s3 or gcs)."""
upload_model_files(model_source, bucket_uri)
| CloudModelUploader |
python | apache__airflow | providers/elasticsearch/src/airflow/providers/elasticsearch/hooks/elasticsearch.py | {
"start": 1572,
"end": 3440
} | class ____:
"""A PEP 249-like Cursor class for Elasticsearch SQL API."""
def __init__(self, es: Elasticsearch, **kwargs):
self.es = es
self.body = {
"fetch_size": kwargs.get("fetch_size", 1000),
"field_multi_value_leniency": kwargs.get("field_multi_value_leniency", False),
}
self._response: ObjectApiResponse | None = None
@property
def response(self) -> ObjectApiResponse:
return self._response or {} # type: ignore
@response.setter
def response(self, value):
self._response = value
@property
def cursor(self):
return self.response.get("cursor")
@property
def rows(self):
return self.response.get("rows", [])
@property
def rowcount(self) -> int:
return len(self.rows)
@property
def description(self) -> list[tuple]:
return [(column["name"], column["type"]) for column in self.response.get("columns", [])]
def execute(
self, statement: str, params: Iterable | Mapping[str, Any] | None = None
) -> ObjectApiResponse:
self.body["query"] = statement
if params:
self.body["params"] = params
self.response = self.es.sql.query(body=self.body)
if self.cursor:
self.body["cursor"] = self.cursor
else:
self.body.pop("cursor", None)
return self.response
def fetchone(self):
if self.rows:
return self.rows[0]
return None
def fetchmany(self, size: int | None = None):
raise NotImplementedError()
def fetchall(self):
results = self.rows
while self.cursor:
self.execute(statement=self.body["query"])
results.extend(self.rows)
return results
def close(self):
self._response = None
| ElasticsearchSQLCursor |
python | google__pytype | pytype/tools/merge_pyi/test_data/heuristics.pep484.py | {
"start": 142,
"end": 227
} | class ____:
def __init__(self):
pass
def f(self, x: e1):
pass
| B |
python | getsentry__sentry | tests/sentry/integrations/utils/test_atlassian_connect.py | {
"start": 355,
"end": 6291
} | class ____(TestCase):
def setUp(self) -> None:
self.factory = RequestFactory()
self.provider = "jira"
self.integration = self.create_integration(
organization=self.organization,
external_id="testserver.jira:123",
metadata={"shared_secret": "shared-super-secret"},
provider=self.provider,
)
self.path = f"/extensions/{self.provider}/configure/"
self.method = "GET"
self.query_params = {"a": "1", "b": "2", "c": "3", "test": "pass"}
self.query_string = "a=1&b=2&c=3&test=pass"
self.query_hash = "36f43b88d6a8cdf89bb8f744e2378bb0ceb6378e80ab0b513082a8b72396bccc"
self.valid_jwt = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ0ZXN0c2VydmVyLmppcmE6MTIzIiwiaWF0IjoxMjM0NTY3ODkwLCJleHAiOjk5OTk5OTk5OTksInFzaCI6IjM2ZjQzYjg4ZDZhOGNkZjg5YmI4Zjc0NGUyMzc4YmIwY2ViNjM3OGU4MGFiMGI1MTMwODJhOGI3MjM5NmJjY2MiLCJzdWIiOiJjb25uZWN0OjEyMyJ9.DjaYGvzLDO0RWTbNRHk3jyXsUvo9Jb7fAP8hguqpMvE"
self.unknown_issuer_jwt = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ0ZXN0c2VydmVyLmppcmE6dW5rbm93biIsImlhdCI6MTIzNDU2Nzg5MCwiZXhwIjo5OTk5OTk5OTk5LCJxc2giOiIzNmY0M2I4OGQ2YThjZGY4OWJiOGY3NDRlMjM3OGJiMGNlYjYzNzhlODBhYjBiNTEzMDgyYThiNzIzOTZiY2NjIiwic3ViIjoiY29ubmVjdDoxMjMifQ.dhIYA45uNkp4jONnpniNeW-k7E3dywJhPzMI55KVlus"
self.invalid_secret_jwt = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ0ZXN0c2VydmVyLmppcmE6MTIzIiwiaWF0IjoxMjM0NTY3ODkwLCJleHAiOjk5OTk5OTk5OTksInFzaCI6IjM2ZjQzYjg4ZDZhOGNkZjg5YmI4Zjc0NGUyMzc4YmIwY2ViNjM3OGU4MGFiMGI1MTMwODJhOGI3MjM5NmJjY2MiLCJzdWIiOiJjb25uZWN0OjEyMyJ9.7nGQQWUeXewnfL8_yvwzLGyf_rgkGdaQxKbDoi7tu_g"
self.expired_jwt = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ0ZXN0c2VydmVyLmppcmE6MTIzIiwiaWF0IjoxMjM0NTY3ODkwLCJleHAiOjEyMzQ1Njc4OTAsInFzaCI6IjM2ZjQzYjg4ZDZhOGNkZjg5YmI4Zjc0NGUyMzc4YmIwY2ViNjM3OGU4MGFiMGI1MTMwODJhOGI3MjM5NmJjY2MiLCJzdWIiOiJjb25uZWN0OjEyMyJ9.1ZIrXDbaS6nUMgtmdCE1BFbsT7yvNKTkzVnSjX-Q7TA"
def test_get_token_success(self) -> None:
request = self.factory.post(path=self.path, HTTP_AUTHORIZATION=f"JWT {self.valid_jwt}")
assert get_token(request) == self.valid_jwt
request = self.factory.post(path=self.path, HTTP_AUTHORIZATION=f"Bearer {self.valid_jwt}")
assert get_token(request) == self.valid_jwt
def test_get_token_error(self) -> None:
request = self.factory.post(path=self.path, AUTHORIZATION=f"JWT {self.valid_jwt}")
with pytest.raises(AtlassianConnectValidationError):
get_token(request)
request = self.factory.post(path=self.path, HTTP_AUTHORIZATION=f"JWT{self.valid_jwt}")
with pytest.raises(AtlassianConnectValidationError):
get_token(request)
def test_get_query_hash(self) -> None:
result = get_query_hash(uri=self.path, method=self.method, query_params=self.query_params)
assert result == self.query_hash
def test_get_integration_from_jwt_success(self) -> None:
integration = get_integration_from_jwt(
token=self.valid_jwt,
path=self.path,
provider=self.provider,
query_params=self.query_params,
method=self.method,
)
assert integration.id == self.integration.id
def test_get_integration_from_jwt_failure(self) -> None:
try:
get_integration_from_jwt(
token=None, path=self.path, provider=self.provider, query_params=None
)
except AtlassianConnectValidationError as e:
assert str(e) == "No token parameter"
try:
get_integration_from_jwt(
token=self.unknown_issuer_jwt,
path=self.path,
provider=self.provider,
query_params=self.query_params,
method=self.method,
)
except AtlassianConnectValidationError as e:
assert str(e) == "No integration found"
try:
get_integration_from_jwt(
token=self.invalid_secret_jwt,
path=self.path,
provider=self.provider,
query_params=self.query_params,
method=self.method,
)
except AtlassianConnectValidationError as e:
assert str(e) == "Signature is invalid"
try:
get_integration_from_jwt(
token=self.valid_jwt,
path=self.path,
provider=self.provider,
query_params={"wrong": "query_params"},
method=self.method,
)
except AtlassianConnectValidationError as e:
assert str(e) == "Query hash mismatch"
try:
get_integration_from_jwt(
token=self.expired_jwt,
path=self.path,
provider=self.provider,
query_params=self.query_params,
method=self.method,
)
except AtlassianConnectValidationError as e:
assert str(e) == "Signature is expired"
@override_settings(SILO_MODE=SiloMode.CONTROL)
def test_parse_integration_from_request(self) -> None:
"""This is the only function unique to the Control Silo"""
# From request header...
request = self.factory.get(
path=self.path,
HTTP_AUTHORIZATION=f"JWT {self.valid_jwt}",
QUERY_STRING=self.query_string,
)
integration = parse_integration_from_request(request=request, provider=self.provider)
assert integration == self.integration
# From query string...
request = self.factory.get(
path=self.path,
QUERY_STRING=self.query_string + f"&jwt={self.valid_jwt}",
)
integration = parse_integration_from_request(request=request, provider=self.provider)
assert integration == self.integration
| AtlassianConnectTest |
python | kamyu104__LeetCode-Solutions | Python/split-the-array.py | {
"start": 63,
"end": 276
} | class ____(object):
def isPossibleToSplit(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
return all(v <= 2 for v in collections.Counter(nums).itervalues())
| Solution |
python | pytorch__pytorch | test/package/test_analyze.py | {
"start": 310,
"end": 720
} | class ____(PackageTestCase):
"""Dependency analysis API tests."""
def test_trace_dependencies(self):
import test_trace_dep
obj = test_trace_dep.SumMod()
used_modules = analyze.trace_dependencies(obj, [(torch.randn(4),)])
self.assertNotIn("yaml", used_modules)
self.assertIn("test_trace_dep", used_modules)
if __name__ == "__main__":
run_tests()
| TestAnalyze |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 24791,
"end": 25047
} | class ____(WebHookMixin, CreateView):
success_message = _("Webhook created")
def get_success_url(self):
return reverse(
"projects_webhooks_edit",
args=[self.get_project().slug, self.object.pk],
)
| WebHookCreate |
python | huggingface__transformers | tests/models/omdet_turbo/test_processing_omdet_turbo.py | {
"start": 1104,
"end": 3518
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = OmDetTurboProcessor
text_input_name = "classes_input_ids"
input_keys = [
"tasks_input_ids",
"tasks_attention_mask",
"classes_input_ids",
"classes_attention_mask",
"classes_structure",
"pixel_values",
"pixel_mask",
]
batch_size = 5
num_queries = 5
embed_dim = 3
@classmethod
def _setup_tokenizer(cls):
tokenizer_class = cls._get_component_class_from_processor("tokenizer")
return tokenizer_class.from_pretrained("openai/clip-vit-base-patch32")
def get_fake_omdet_turbo_output(self):
classes = self.get_fake_omdet_turbo_classes()
classes_structure = torch.tensor([len(sublist) for sublist in classes])
torch.manual_seed(42)
return OmDetTurboObjectDetectionOutput(
decoder_coord_logits=torch.rand(self.batch_size, self.num_queries, 4),
decoder_class_logits=torch.rand(self.batch_size, self.num_queries, self.embed_dim),
classes_structure=classes_structure,
)
def get_fake_omdet_turbo_classes(self):
return [[f"class{i}_{j}" for i in range(self.num_queries)] for j in range(self.batch_size)]
def test_post_process_grounded_object_detection(self):
processor = self.get_processor()
omdet_turbo_output = self.get_fake_omdet_turbo_output()
omdet_turbo_classes = self.get_fake_omdet_turbo_classes()
post_processed = processor.post_process_grounded_object_detection(
omdet_turbo_output, omdet_turbo_classes, target_sizes=[(400, 30) for _ in range(self.batch_size)]
)
self.assertEqual(len(post_processed), self.batch_size)
self.assertEqual(list(post_processed[0].keys()), ["boxes", "scores", "labels", "text_labels"])
self.assertEqual(post_processed[0]["boxes"].shape, (self.num_queries, 4))
self.assertEqual(post_processed[0]["scores"].shape, (self.num_queries,))
expected_scores = torch.tensor([0.7310, 0.6579, 0.6513, 0.6444, 0.6252])
torch.testing.assert_close(post_processed[0]["scores"], expected_scores, rtol=1e-4, atol=1e-4)
expected_box_slice = torch.tensor([14.9657, 141.2052, 30.0000, 312.9670])
torch.testing.assert_close(post_processed[0]["boxes"][0], expected_box_slice, rtol=1e-4, atol=1e-4)
| OmDetTurboProcessorTest |
python | wandb__wandb | wandb/vendor/pygments/styles/tango.py | {
"start": 1664,
"end": 7096
} | class ____(Style):
"""
The Crunchy default Style inspired from the color palette from
the Tango Icon Theme Guidelines.
"""
# work in progress...
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Multiline: "italic #8f5902", # class: 'cm'
Comment.Preproc: "italic #8f5902", # class: 'cp'
Comment.Single: "italic #8f5902", # class: 'c1'
Comment.Special: "italic #8f5902", # class: 'cs'
Keyword: "bold #204a87", # class: 'k'
Keyword.Constant: "bold #204a87", # class: 'kc'
Keyword.Declaration: "bold #204a87", # class: 'kd'
Keyword.Namespace: "bold #204a87", # class: 'kn'
Keyword.Pseudo: "bold #204a87", # class: 'kp'
Keyword.Reserved: "bold #204a87", # class: 'kr'
Keyword.Type: "bold #204a87", # class: 'kt'
Operator: "bold #ce5c00", # class: 'o'
Operator.Word: "bold #204a87", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#204a87", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "bold #5c35cc", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #204a87", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
# since the tango light blue does not show up well in text, we choose
# a pure blue instead.
Number: "bold #0000cf", # class: 'm'
Number.Float: "bold #0000cf", # class: 'mf'
Number.Hex: "bold #0000cf", # class: 'mh'
Number.Integer: "bold #0000cf", # class: 'mi'
Number.Integer.Long: "bold #0000cf", # class: 'il'
Number.Oct: "bold #0000cf", # class: 'mo'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "italic #000000", # class: 'go'
Generic.Prompt: "#8f5902", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| TangoStyle |
python | getsentry__sentry | tests/sentry/workflow_engine/migration_helpers/test_migrate_alert_rule.py | {
"start": 58801,
"end": 61787
} | class ____(BaseMetricAlertMigrationTest):
"""
Tests for get_detector_trigger() and get_action_filter(), which are used to fetch the ACI
objects corresponding to an AlertRuleTrigger.
"""
def setUp(self) -> None:
self.metric_alert = self.create_alert_rule()
self.alert_rule_trigger = self.create_alert_rule_trigger(
alert_rule=self.metric_alert, label="critical"
)
self.create_migrated_metric_alert_objects(self.metric_alert)
self.detector_trigger, self.action_filter, self.resolve_action_filter = (
self.create_migrated_metric_alert_rule_trigger_objects(
self.alert_rule_trigger, DetectorPriorityLevel.HIGH, Condition.GREATER
)
)
def test_get_detector_trigger(self) -> None:
detector_trigger = get_detector_trigger(self.alert_rule_trigger, DetectorPriorityLevel.HIGH)
assert detector_trigger == self.detector_trigger
def test_get_action_filter(self) -> None:
action_filter = get_action_filter(self.alert_rule_trigger, DetectorPriorityLevel.HIGH)
assert action_filter == self.action_filter
def test_get_detector_trigger_no_detector_condition_group(self) -> None:
"""
Test that we raise an exception if the corresponding detector for an
alert rule trigger is missing its workflow condition group.
"""
detector = AlertRuleDetector.objects.get(alert_rule_id=self.metric_alert.id).detector
detector.update(workflow_condition_group=None)
with pytest.raises(MissingDataConditionGroup):
get_detector_trigger(self.alert_rule_trigger, DetectorPriorityLevel.HIGH)
def test_get_detector_trigger_no_detector_trigger(self) -> None:
"""
Test that we raise an exception if the corresponding detector trigger
for an alert rule trigger is missing.
"""
self.detector_trigger.delete()
with pytest.raises(DataCondition.DoesNotExist):
get_detector_trigger(self.alert_rule_trigger, DetectorPriorityLevel.HIGH)
def test_get_action_filter_no_workflow(self) -> None:
"""
Test that we raise an exception if the corresponding workflow for an
alert rule trigger action does not exist.
"""
workflow = AlertRuleWorkflow.objects.get(alert_rule_id=self.metric_alert.id).workflow
workflow.delete()
with pytest.raises(AlertRuleWorkflow.DoesNotExist):
get_action_filter(self.alert_rule_trigger, DetectorPriorityLevel.HIGH)
def test_get_action_filter_no_action_filter(self) -> None:
"""
Test that we raise an exception if the corresponding action filter for an
alert rule trigger action does not exist.
"""
self.action_filter.delete()
with pytest.raises(DataCondition.DoesNotExist):
get_action_filter(self.alert_rule_trigger, DetectorPriorityLevel.HIGH)
| DataConditionLookupHelpersTest |
python | Pylons__pyramid | tests/test_integration.py | {
"start": 10133,
"end": 11065
} | class ____(unittest.TestCase):
staticapp = static_view(os.path.join(here, 'fixtures'), use_subpath=False)
def _makeRequest(self, extra):
from io import BytesIO
from pyramid.request import Request
kw = {
'PATH_INFO': '',
'SCRIPT_NAME': '',
'SERVER_NAME': 'localhost',
'SERVER_PORT': '80',
'REQUEST_METHOD': 'GET',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': BytesIO(),
}
kw.update(extra)
request = Request(kw)
return request
def test_basic(self):
request = self._makeRequest({'PATH_INFO': '/minimal.txt'})
context = DummyContext()
result = self.staticapp(context, request)
self.assertEqual(result.status, '200 OK')
_assertBody(result.body, os.path.join(here, 'fixtures/minimal.txt'))
| TestStaticAppNoSubpath |
python | getsentry__sentry | src/sentry/api/serializers/models/organization_member/response.py | {
"start": 2140,
"end": 2265
} | class ____(OrganizationMemberResponse):
teams: list[str]
teamRoles: list[_TeamRole]
| OrganizationMemberWithTeamsResponse |
python | anthropics__anthropic-sdk-python | src/anthropic/_response.py | {
"start": 13963,
"end": 17419
} | class ____(BaseAPIResponse[R]):
@property
def request_id(self) -> str | None:
return self.http_response.headers.get("request-id") # type: ignore[no-any-return]
@overload
async def parse(self, *, to: type[_T]) -> _T: ...
@overload
async def parse(self) -> R: ...
async def parse(self, *, to: type[_T] | None = None) -> R | _T:
"""Returns the rich python representation of this response's data.
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
You can customise the type that the response is parsed into through
the `to` argument, e.g.
```py
from anthropic import BaseModel
class MyModel(BaseModel):
foo: str
obj = response.parse(to=MyModel)
print(obj.foo)
```
We support parsing:
- `BaseModel`
- `dict`
- `list`
- `Union`
- `str`
- `httpx.Response`
"""
cache_key = to if to is not None else self._cast_to
cached = self._parsed_by_type.get(cache_key)
if cached is not None:
return cached # type: ignore[no-any-return]
if not self._is_sse_stream:
await self.read()
parsed = self._parse(to=to)
if is_given(self._options.post_parser):
parsed = self._options.post_parser(parsed)
if isinstance(parsed, BaseModel):
add_request_id(parsed, self.request_id)
self._parsed_by_type[cache_key] = parsed
return cast(R, parsed)
async def read(self) -> bytes:
"""Read and return the binary response content."""
try:
return await self.http_response.aread()
except httpx.StreamConsumed as exc:
# the default error raised by httpx isn't very
# helpful in our case so we re-raise it with
# a different error message
raise StreamAlreadyConsumed() from exc
async def text(self) -> str:
"""Read and decode the response content into a string."""
await self.read()
return self.http_response.text
async def json(self) -> object:
"""Read and decode the JSON response content."""
await self.read()
return self.http_response.json()
async def close(self) -> None:
"""Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
await self.http_response.aclose()
async def iter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]:
"""
A byte-iterator over the decoded response content.
This automatically handles gzip, deflate and brotli encoded responses.
"""
async for chunk in self.http_response.aiter_bytes(chunk_size):
yield chunk
async def iter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]:
"""A str-iterator over the decoded response content
that handles both gzip, deflate, etc but also detects the content's
string encoding.
"""
async for chunk in self.http_response.aiter_text(chunk_size):
yield chunk
async def iter_lines(self) -> AsyncIterator[str]:
"""Like `iter_text()` but will only yield chunks for each line"""
async for chunk in self.http_response.aiter_lines():
yield chunk
| AsyncAPIResponse |
python | kamyu104__LeetCode-Solutions | Python/count-the-number-of-complete-components.py | {
"start": 35,
"end": 990
} | class ____(object):
def countCompleteComponents(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: int
"""
def bfs(u):
if lookup[u]:
return False
v_cnt = e_cnt = 0
lookup[u] = True
q = [u]
while q:
new_q = []
v_cnt += len(q)
for u in q:
e_cnt += len(adj[u])
for v in adj[u]:
if lookup[v]:
continue
lookup[v] = True
new_q.append(v)
q = new_q
return v_cnt*(v_cnt-1) == e_cnt
adj = [[] for _ in xrange(n)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
lookup = [False]*n
return sum(bfs(u) for u in xrange(n) if not lookup[u])
| Solution |
python | great-expectations__great_expectations | contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_profile_numeric_columns_diff_greater_than_or_equal_to_threshold.py | {
"start": 939,
"end": 5626
} | class ____(
DataProfilerProfileMetricProvider
):
metric_name = "data_profiler.profile_numeric_columns_diff_greater_than_or_equal_to_threshold"
value_keys = (
"profile_path",
"limit_check_report_keys",
"numerical_diff_statistics",
)
@metric_value(engine=PandasExecutionEngine)
def _pandas( # noqa: C901 - too complex
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
runtime_configuration: Dict,
):
profile_diff = metrics.get("data_profiler.profile_diff")
numeric_columns = metrics.get("data_profiler.profile_numeric_columns")
limit_check_report_keys = metric_value_kwargs["limit_check_report_keys"]
numerical_diff_statistics = metric_value_kwargs["numerical_diff_statistics"]
columns = list(profile_diff["global_stats"]["profile_schema"][1].keys())
data_stats = profile_diff["data_stats"]
requested_columns = {}
# Adds columns if generic column key is provided
# Note: Copy is required for all metric arguments to ensure metric_value_id is identified correctly
limit_check_report_keys_copy = copy.deepcopy(limit_check_report_keys)
limit_check_report_keys_copy = replace_generic_operator_in_report_keys(
limit_check_report_keys_copy, numeric_columns
)
for col, stats in limit_check_report_keys_copy.items():
if col not in numeric_columns: # Makes sure column requested is numeric
requested_columns[col] = "Column is Non-Numeric"
continue
# adds stats if generic stat key is provided
numerical_diff_statistics_copy = copy.deepcopy(numerical_diff_statistics)
stats = replace_generic_operator_in_report_keys(stats, numerical_diff_statistics_copy)
if col not in columns: # Makes sure column exists within profile schema
requested_columns[col] = "Column requested was not found."
continue
col_data_stats = {}
for data_stat in data_stats:
if data_stat["column_name"] == col:
col_data_stats = data_stat["statistics"]
break
requested_columns[col] = {}
for stat, threshold in stats.items():
if stat not in col_data_stats:
requested_columns[col][stat] = "Statistic requested was not found."
continue
diff_val = col_data_stats[stat]
if diff_val == "unchanged": # In the case there is no delta
diff_val = 0
is_greater = is_value_greater_than_or_equal_to_threshold(diff_val, threshold)
if not is_greater:
requested_columns[col][stat] = {
"threshold": threshold,
"value_found": diff_val,
}
else:
requested_columns[col][stat] = True
return requested_columns
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""
Returns a dictionary of given metric names and their corresponding configuration, specifying
the metric types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if (
metric.metric_name
== "data_profiler.profile_numeric_columns_diff_greater_than_or_equal_to_threshold"
):
dependencies["data_profiler.profile_diff"] = MetricConfiguration(
metric_name="data_profiler.profile_diff",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
dependencies["data_profiler.profile_numeric_columns"] = MetricConfiguration(
metric_name="data_profiler.profile_numeric_columns",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
return dependencies
| DataProfilerProfileNumericColumnsDiffGreaterThanOrEqualToThreshold |
python | celery__celery | examples/pydantic/tasks.py | {
"start": 101,
"end": 145
} | class ____(BaseModel):
value: int
| ArgModel |
python | sphinx-doc__sphinx | sphinx/ext/autodoc/importer.py | {
"start": 4654,
"end": 11929
} | class ____(NamedTuple):
name: str
directly_defined: bool
value: Any
# Retained: legacy class-based
def get_object_members(
subject: Any,
objpath: list[str],
attrgetter: _AttrGetter,
analyzer: ModuleAnalyzer | None = None,
) -> dict[str, Attribute]:
"""Get members and attributes of target object."""
from sphinx.ext.autodoc._legacy_class_based._sentinels import INSTANCEATTR
# the members directly defined in the class
obj_dict = attrgetter(subject, '__dict__', {})
members: dict[str, Attribute] = {}
# enum members
if isenumclass(subject):
for name, defining_class, value in _filter_enum_dict(
subject, attrgetter, obj_dict
):
# the order of occurrence of *name* matches the subject's MRO,
# allowing inherited attributes to be shadowed correctly
if unmangled := unmangle(defining_class, name):
members[unmangled] = Attribute(
name=unmangled,
directly_defined=defining_class is subject,
value=value,
)
# members in __slots__
try:
subject___slots__ = getslots(subject)
if subject___slots__:
from sphinx.ext.autodoc._legacy_class_based._sentinels import SLOTSATTR
for name in subject___slots__:
members[name] = Attribute(
name=name, directly_defined=True, value=SLOTSATTR
)
except (TypeError, ValueError):
pass
# other members
for name in dir(subject):
try:
value = attrgetter(subject, name)
directly_defined = name in obj_dict
unmangled = unmangle(subject, name)
if unmangled and unmangled not in members:
members[unmangled] = Attribute(
name=unmangled, directly_defined=directly_defined, value=value
)
except AttributeError:
continue
# annotation only member (ex. attr: int)
for cls in getmro(subject):
for name in getannotations(cls):
unmangled = unmangle(cls, name)
if unmangled and unmangled not in members:
members[unmangled] = Attribute(
name=unmangled, directly_defined=cls is subject, value=INSTANCEATTR
)
if analyzer:
# append instance attributes (cf. self.attr1) if analyzer knows
namespace = '.'.join(objpath)
for ns, name in analyzer.find_attr_docs():
if namespace == ns and name not in members:
members[name] = Attribute(
name=name, directly_defined=True, value=INSTANCEATTR
)
return members
# Retained: legacy class-based
def get_class_members(
subject: Any, objpath: Any, attrgetter: _AttrGetter, inherit_docstrings: bool = True
) -> dict[str, ObjectMember]:
"""Get members and attributes of target class."""
from sphinx.ext.autodoc._legacy_class_based._documenters import ObjectMember
from sphinx.ext.autodoc._legacy_class_based._sentinels import INSTANCEATTR
# the members directly defined in the class
obj_dict = attrgetter(subject, '__dict__', {})
members: dict[str, ObjectMember] = {}
# enum members
if isenumclass(subject):
for name, defining_class, value in _filter_enum_dict(
subject, attrgetter, obj_dict
):
# the order of occurrence of *name* matches the subject's MRO,
# allowing inherited attributes to be shadowed correctly
if unmangled := unmangle(defining_class, name):
members[unmangled] = ObjectMember(
unmangled, value, class_=defining_class
)
# members in __slots__
try:
subject___slots__ = getslots(subject)
if subject___slots__:
from sphinx.ext.autodoc._legacy_class_based._sentinels import SLOTSATTR
for name, docstring in subject___slots__.items():
members[name] = ObjectMember(
name, SLOTSATTR, class_=subject, docstring=docstring
)
except (TypeError, ValueError):
pass
# other members
for name in dir(subject):
try:
value = attrgetter(subject, name)
if ismock(value):
value = undecorate(value)
unmangled = unmangle(subject, name)
if unmangled and unmangled not in members:
if name in obj_dict:
members[unmangled] = ObjectMember(unmangled, value, class_=subject)
else:
members[unmangled] = ObjectMember(unmangled, value)
except AttributeError:
continue
try:
for cls in getmro(subject):
try:
modname = safe_getattr(cls, '__module__')
qualname = safe_getattr(cls, '__qualname__')
analyzer = ModuleAnalyzer.for_module(modname)
analyzer.analyze()
except AttributeError:
qualname = None
analyzer = None
except PycodeError:
analyzer = None
# annotation only member (ex. attr: int)
for name in getannotations(cls):
unmangled = unmangle(cls, name)
if unmangled and unmangled not in members:
if analyzer and (qualname, unmangled) in analyzer.attr_docs:
docstring = '\n'.join(analyzer.attr_docs[qualname, unmangled])
else:
docstring = None
members[unmangled] = ObjectMember(
unmangled, INSTANCEATTR, class_=cls, docstring=docstring
)
# append or complete instance attributes (cf. self.attr1) if analyzer knows
if analyzer:
for (ns, name), docstring in analyzer.attr_docs.items():
if ns == qualname and name not in members:
# otherwise unknown instance attribute
members[name] = ObjectMember(
name,
INSTANCEATTR,
class_=cls,
docstring='\n'.join(docstring),
)
elif (
ns == qualname
and docstring
and isinstance(members[name], ObjectMember)
and not members[name].docstring
):
if cls != subject and not inherit_docstrings:
# If we are in the MRO of the class and not the class itself,
# and we do not want to inherit docstrings, then skip setting
# the docstring below
continue
# attribute is already known, because dir(subject) enumerates it.
# But it has no docstring yet
members[name].docstring = '\n'.join(docstring)
except AttributeError:
pass
return members
| Attribute |
python | pytorch__pytorch | torch/_inductor/runtime/caching/locks.py | {
"start": 1049,
"end": 7511
} | class ____(Protocol): # noqa: PYI046
def __call__(self, timeout: float | None = None) -> _LockContextManager: ...
# Infinite timeout - blocks indefinitely until lock is acquired.
_BLOCKING: float = -1
# No timeout - returns immediately if lock cannot be acquired.
_NON_BLOCKING: float = 0
# Finite timeout - blocks for a specified duration before raising a timeout error.
_BLOCKING_WITH_TIMEOUT: float = 60.0
# Default timeout for lock acquisition.
_DEFAULT_TIMEOUT: float = _BLOCKING_WITH_TIMEOUT
@contextmanager
def _acquire_lock_with_timeout(
lock: Lock,
timeout: float | None = None,
) -> Generator[None, None, None]:
"""Context manager that safely acquires a threading.Lock with timeout and automatically releases it.
This function provides a safe way to acquire a lock with timeout support, ensuring
the lock is always released even if an exception occurs during execution.
Args:
lock: The threading.Lock object to acquire
timeout: Timeout in seconds. If None, uses _DEFAULT_TIMEOUT.
- Use _BLOCKING (-1.0) for infinite wait
- Use _NON_BLOCKING (0.0) for immediate return
- Use positive value for finite timeout
Yields:
None: Yields control to the caller while holding the lock
Raises:
LockTimeoutError: If the lock cannot be acquired within the timeout period
Example:
with _acquire_lock_with_timeout(my_lock, timeout=30.0):
# Critical section - lock is held
perform_critical_operation()
# Lock is automatically released here
"""
_unsafe_acquire_lock_with_timeout(lock, timeout=timeout)
try:
yield
finally:
lock.release()
def _unsafe_acquire_lock_with_timeout(lock: Lock, timeout: float | None = None) -> None:
"""Acquire a threading.Lock with timeout without automatic release (unsafe).
This function acquires a lock with timeout support but does NOT automatically
release it. The caller is responsible for releasing the lock explicitly.
Use this only when you need manual control over lock lifetime.
Args:
lock: The threading.Lock object to acquire
timeout: Timeout in seconds. If None, uses _DEFAULT_TIMEOUT.
- Use _BLOCKING (-1.0) for infinite wait
- Use _NON_BLOCKING (0.0) for immediate return
- Use positive value for finite timeout
Raises:
LockTimeoutError: If the lock cannot be acquired within the timeout period
Warning:
This is an "unsafe" function because it does not automatically release
the lock. Always call lock.release() when done, preferably in a try/finally
block or use the safe _acquire_lock_with_timeout context manager instead.
Example:
lock = Lock()
try:
_unsafe_acquire_lock_with_timeout(lock, timeout=30.0)
# Critical section - lock is held
perform_critical_operation()
finally:
lock.release() # Must manually release!
"""
_timeout: float = timeout if timeout is not None else _DEFAULT_TIMEOUT
if not lock.acquire(timeout=_timeout):
raise exceptions.LockTimeoutError(lock, _timeout)
@contextmanager
def _acquire_flock_with_timeout(
flock: FileLock,
timeout: float | None = None,
) -> Generator[None, None, None]:
"""Context manager that safely acquires a FileLock with timeout and automatically releases it.
This function provides a safe way to acquire a file lock with timeout support, ensuring
the lock is always released even if an exception occurs during execution.
Args:
flock: The FileLock object to acquire
timeout: Timeout in seconds. If None, uses _DEFAULT_TIMEOUT.
- Use _BLOCKING (-1.0) for infinite wait
- Use _NON_BLOCKING (0.0) for immediate return
- Use positive value for finite timeout
Yields:
None: Yields control to the caller while holding the file lock
Raises:
FileLockTimeoutError: If the file lock cannot be acquired within the timeout period
Example:
flock = FileLock("/tmp/my_process.lock")
with _acquire_flock_with_timeout(flock, timeout=30.0):
# Critical section - file lock is held
perform_exclusive_file_operation()
# File lock is automatically released here
"""
_unsafe_acquire_flock_with_timeout(flock, timeout=timeout)
try:
yield
finally:
flock.release()
def _unsafe_acquire_flock_with_timeout(flock: FileLock, timeout: float | None) -> None:
"""Acquire a FileLock with timeout without automatic release (unsafe).
This function acquires a file lock with timeout support but does NOT automatically
release it. The caller is responsible for releasing the lock explicitly.
Use this only when you need manual control over lock lifetime.
Args:
flock: The FileLock object to acquire
timeout: Timeout in seconds. If None, uses _DEFAULT_TIMEOUT.
- Use _BLOCKING (-1.0) for infinite wait
- Use _NON_BLOCKING (0.0) for immediate return
- Use positive value for finite timeout
Raises:
FileLockTimeoutError: If the file lock cannot be acquired within the timeout period
Warning:
This is an "unsafe" function because it does not automatically release
the lock. Always call flock.release() when done, preferably in a try/finally
block or use the safe _acquire_flock_with_timeout context manager instead.
Example:
flock = FileLock("/tmp/my_process.lock")
try:
_unsafe_acquire_flock_with_timeout(flock, timeout=30.0)
# Critical section - file lock is held
perform_exclusive_file_operation()
finally:
flock.release() # Must manually release!
"""
_timeout: float = timeout if timeout is not None else _DEFAULT_TIMEOUT
try:
_ = flock.acquire(timeout=_timeout)
except Timeout as err:
raise exceptions.FileLockTimeoutError(flock, _timeout) from err
@contextmanager
def _acquire_many_impl_locks_with_timeout(
*impls: impls._CacheImpl,
timeout: float | None = None,
) -> Generator[None, None, None]:
with ExitStack() as stack:
for impl in impls:
stack.enter_context(impl.lock(timeout))
yield
| _LockProtocol |
python | ray-project__ray | rllib/examples/envs/classes/multi_agent/footsies/game/constants.py | {
"start": 235,
"end": 363
} | class ____:
NONE = 0
LEFT = 1
RIGHT = 2
ATTACK = 3
LEFT_ATTACK = 4
RIGHT_ATTACK = 5
@dataclass
| GameActions |
python | pytorch__pytorch | test/distributed/tensor/test_optimizers.py | {
"start": 1451,
"end": 24775
} | class ____(DTensorTestBase):
def _assert_optimizer(
self,
mesh,
model,
optim,
dist_model,
dist_optim,
inputs,
*,
rtol: float = 1.3e-6,
atol: float = 1e-5,
):
for iter_idx in range(2):
# run forward/backward/optim for original model
optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
out = model(inputs)
loss = out.sum()
loss.backward()
optim.step()
# run forward/backward/optim for distributed model
dist_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
dist_out = dist_model(inputs)
dist_loss = dist_out.sum()
dist_loss.backward()
dist_optim.step()
# check that the optimizer update parameters with same numerics
for p1, p2 in zip(model.parameters(), dist_model.parameters()):
p2 = p2.full_tensor()
# Default 'rtol' and 'atol' for attr:`~torch.float32` are ``1.3e-6`` and ``1e-5``
self.assertEqual(p1, p2, atol=atol, rtol=rtol)
def test_optimizer_foreach_supported_types_include_DTensor(self):
from torch.optim.optimizer import _foreach_supported_types
self.assertTrue(DTensor in _foreach_supported_types)
@with_comms
def test_adam_1d_sharding(self):
mesh = self.build_device_mesh()
# lr as a Tensor is not supported for capturable=False and foreach=True
adam_float_lr_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05},
{"lr": 0.1, "weight_decay": 0.05, "amsgrad": True},
{
"lr": 0.1,
"weight_decay": 0.05,
"maximize": True,
"amsgrad": True,
},
]
fused_adam_float_lr_configs = [
{"lr": 0.1, "fused": True},
{"lr": 0.1, "weight_decay": 0.05, "amsgrad": True, "fused": True},
{
"lr": 0.1,
"weight_decay": 0.05,
"maximize": True,
"amsgrad": True,
"fused": True,
},
]
# lr could be a Tensor or a float when fused=True for adam optimizer
fused_adam_tensor_lr_configs = [
{**config, "lr": torch.tensor(0.1)}
for config in fused_adam_float_lr_configs
]
fused_adam_tensor_lr_configs.extend(
[
{**config, "lr": torch.tensor([0.1])}
for config in fused_adam_float_lr_configs
]
)
adam_configs = [
*adam_float_lr_configs,
*fused_adam_float_lr_configs,
*fused_adam_tensor_lr_configs,
]
for config in adam_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.Adam(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.Adam(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_adamw_1d_sharding(self):
mesh = self.build_device_mesh()
# lr as a Tensor is not supported for capturable=False and foreach=True
adamw_float_lr_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"amsgrad": True,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"maximize": True,
"amsgrad": True,
},
]
fused_adamw_float_lr_configs = [
{"lr": 0.1, "weight_decay": 0.05, "fused": True},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"amsgrad": True,
"fused": True,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"maximize": True,
"amsgrad": True,
"fused": True,
},
]
# lr could be a Tensor or a float when fused=True for adamW optimizer
fused_adamw_tensor_lr_configs = [
{**config, "lr": torch.tensor(0.1)}
for config in fused_adamw_float_lr_configs
]
fused_adamw_tensor_lr_configs.extend(
[
{**config, "lr": torch.tensor([0.1])}
for config in fused_adamw_float_lr_configs
]
)
adamw_configs = [
*adamw_float_lr_configs,
*fused_adamw_float_lr_configs,
*fused_adamw_tensor_lr_configs,
]
for config in adamw_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.AdamW(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.AdamW(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_sgd_1d_sharding(self):
mesh = self.build_device_mesh()
sgd_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "momentum": 0.05, "foreach": False},
{"lr": 0.1, "momentum": 0.05},
{"lr": 0.1, "momentum": 0.06, "dampening": 0.07},
{
"lr": 0.1,
"momentum": 0.08,
"weight_decay": 0.05,
"nesterov": True,
"maximize": True,
"foreach": False,
},
{
"lr": 0.1,
"momentum": 0.08,
"weight_decay": 0.05,
"nesterov": True,
"maximize": True,
},
]
for config in sgd_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.SGD(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.SGD(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_adagrad_1d_sharding(self):
mesh = self.build_device_mesh()
adagrad_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "lr_decay": 0.05, "foreach": False},
{"lr": 0.1, "lr_decay": 0.02, "weight_decay": 0.05, "foreach": False},
{
"lr": 0.1,
"lr_decay": 0.02,
"weight_decay": 0.05,
"initial_accumulator_value": 0.03,
"foreach": False,
},
{
"lr": 0.1,
"lr_decay": 0.02,
"weight_decay": 0.05,
"initial_accumulator_value": 0.03,
"eps": 1e-6,
"foreach": False,
},
{
"lr": 0.1,
"lr_decay": 0.02,
"weight_decay": 0.05,
"initial_accumulator_value": 0.03,
"eps": 1e-6,
"maximize": True,
"foreach": False,
},
{
"lr": 0.1,
"lr_decay": 0.02,
"weight_decay": 0.05,
"initial_accumulator_value": 0.03,
"eps": 1e-6,
"maximize": True,
},
]
for config in adagrad_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.Adagrad(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.Adagrad(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_RMSprop_1d_sharding(self):
mesh = self.build_device_mesh()
RMSprop_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "alpha": 0.85, "foreach": False},
{"lr": 0.1, "alpha": 0.88, "eps": 1e-6, "foreach": False},
{
"lr": 0.1,
"alpha": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
"foreach": False,
},
{
"lr": 0.1,
"alpha": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
"momentum": 0.9,
"foreach": False,
},
{
"lr": 0.1,
"alpha": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
"momentum": 0.9,
"centered": True,
"foreach": False,
},
{
"lr": 0.1,
"alpha": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
"momentum": 0.9,
"centered": True,
"maximize": True,
"foreach": False,
},
{
"lr": 0.1,
"alpha": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
"momentum": 0.9,
"centered": True,
"maximize": True,
},
]
for config in RMSprop_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.RMSprop(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.RMSprop(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_adadelta_1d_sharding(self):
mesh = self.build_device_mesh()
adadelta_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "rho": 0.85, "foreach": False},
{"lr": 0.1, "rho": 0.88, "eps": 1e-5, "foreach": False},
{
"lr": 0.1,
"rho": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
"foreach": False,
},
{
"lr": 0.1,
"rho": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
},
{
"lr": 0.1,
"rho": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
"maximize": True,
},
]
for config in adadelta_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.Adadelta(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.Adadelta(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_nadam_1d_sharding(self):
mesh = self.build_device_mesh()
nadam_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"decoupled_weight_decay": True,
},
]
for config in nadam_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.NAdam(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.NAdam(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_radam_1d_sharding(self):
mesh = self.build_device_mesh()
radam_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05, "foreach": False},
{
"lr": 0.1,
"weight_decay": 0.05,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"decoupled_weight_decay": True,
},
]
for config in radam_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.RAdam(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.RAdam(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_adamax_1d_sharding(self):
mesh = self.build_device_mesh()
adamax_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "betas": (0.6, 0.66), "foreach": False},
{"lr": 0.1, "betas": (0.6, 0.66), "eps": 1e-6, "foreach": False},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"foreach": False,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"maximize": True,
},
]
for config in adamax_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.Adamax(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.Adamax(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_asgd_1d_sharding(self):
mesh = self.build_device_mesh()
asgd_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "lambd": 0.001, "foreach": False},
{"lr": 0.1, "lambd": 0.001, "alpha": 0.85, "foreach": False},
{"lr": 0.1, "lambd": 0.001, "alpha": 0.85, "t0": 1e5, "foreach": False},
{
"lr": 0.1,
"lambd": 0.001,
"alpha": 0.85,
"t0": 1e5,
"weight_decay": 0.05,
"foreach": False,
},
{
"lr": 0.1,
"lambd": 0.001,
"alpha": 0.85,
"t0": 1e5,
"weight_decay": 0.05,
"foreach": True,
},
{
"lr": 0.1,
"lambd": 0.001,
"alpha": 0.85,
"t0": 1e5,
"weight_decay": 0.05,
"foreach": True,
"maximize": True,
},
]
for config in asgd_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.ASGD(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.ASGD(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
# TODO: We want to keep a unit test for ASGD optimizer for the time being, but we need to look into why
# when using ASGD we need higher atol and rtol when comparing model parameters.
# Default 'rtol' and 'atol' for attr:`~torch.float32` are ``1.3e-6`` and ``1e-5``
# Pointer here: https://github.com/pytorch/pytorch/blob/main/torch/testing/_comparison.py#L65
self._assert_optimizer(
mesh, mod, opt, dist_mod, dist_opt, inp, atol=1.3e-5, rtol=1e-4
)
@with_comms
def test_admaw_fused_across_meshes(self):
mesh_shape = (2, self.world_size // 2)
mesh_2d = init_device_mesh(
self.device_type, mesh_shape, mesh_dim_names=("x", "y")
)
mesh_flatten = mesh_2d[("x", "y")]._flatten(mesh_dim_name="mesh_flatten")
# lr as a Tensor is not supported for capturable=False and foreach=True
adamw_float_lr_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"amsgrad": True,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"maximize": True,
"amsgrad": True,
},
]
fused_adamw_float_lr_configs = [
{"lr": 0.1, "weight_decay": 0.05, "fused": True},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"amsgrad": True,
"fused": True,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"maximize": True,
"amsgrad": True,
"fused": True,
},
]
# lr could be a Tensor or a float when fused=True for adamW optimizer
fused_adamw_tensor_lr_configs = [
{**config, "lr": torch.tensor(0.1)}
for config in fused_adamw_float_lr_configs
]
fused_adamw_tensor_lr_configs.extend(
[
{**config, "lr": torch.tensor([0.1])}
for config in fused_adamw_float_lr_configs
]
)
adamw_configs = [
*adamw_float_lr_configs,
*fused_adamw_float_lr_configs,
*fused_adamw_tensor_lr_configs,
]
# shard function to do full sharding on all parameters of a module
def _shard_fn_2d(name, module, device_mesh):
if isinstance(module, nn.Linear):
for name, param in module.named_parameters():
dist_param = torch.nn.Parameter(
distribute_tensor(param, device_mesh, [Replicate(), Shard(0)])
)
# make sure partial sum get cleared after backward()
dist_param.register_hook(
lambda grad: grad.redistribute(
placements=[Replicate(), Shard(0)]
)
)
module.register_parameter(name, dist_param)
# prepare input
def _input_fn_2d(mod, inputs, device_mesh):
# split the input tensor to be sharded input on a 2d mesh
dist_inp = DTensor.from_local(
inputs[0], device_mesh, [Replicate(), Shard(0)], run_check=False
)
return dist_inp
for config in adamw_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.AdamW(mod.parameters(), **config)
mod_copy = deepcopy(mod)
# MLPModule.net1 is sharded on the flatten mesh
distribute_module(
mod_copy.net1, mesh_flatten, shard_fn, input_fn, output_fn
)
# MLPModule.net2 is sharded on the 2d mesh
distribute_module(
mod_copy.net2, mesh_2d, _shard_fn_2d, _input_fn_2d, output_fn
)
dist_opt = torch.optim.AdamW(mod_copy.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(None, mod, opt, mod_copy, dist_opt, inp)
TestDTensorOptimizerWithLocalTensor = create_local_tensor_test_class(
TestDTensorOptimizer,
)
if __name__ == "__main__":
run_tests()
| TestDTensorOptimizer |
python | pytorch__pytorch | tools/testing/test_run.py | {
"start": 8222,
"end": 10286
} | class ____:
test: TestRun
shard: int
num_shards: int
time: float | None # In seconds
def __init__(
self,
test: TestRun | str,
shard: int,
num_shards: int,
time: float | None = None,
) -> None:
if isinstance(test, str):
test = TestRun(test)
self.test = test
self.shard = shard
self.num_shards = num_shards
self.time = time
@property
def name(self) -> str:
return self.test.test_file
def __eq__(self, other: object) -> bool:
if not isinstance(other, ShardedTest):
return False
return (
self.test == other.test
and self.shard == other.shard
and self.num_shards == other.num_shards
and self.time == other.time
)
def __repr__(self) -> str:
ret = f"{self.test} {self.shard}/{self.num_shards}"
if self.time:
ret += f" ({self.time}s)"
return ret
def __lt__(self, other: object) -> bool:
if not isinstance(other, ShardedTest):
raise NotImplementedError
# This is how the list was implicitly sorted when it was a NamedTuple
if self.name != other.name:
return self.name < other.name
if self.shard != other.shard:
return self.shard < other.shard
if self.num_shards != other.num_shards:
return self.num_shards < other.num_shards
# None is the smallest value
if self.time is None:
return True
if other.time is None:
return False
return self.time < other.time
def __str__(self) -> str:
return f"{self.test} {self.shard}/{self.num_shards}"
def get_time(self, default: float = 0) -> float:
return self.time if self.time is not None else default
def get_pytest_args(self) -> list[str]:
filter = self.test.get_pytest_filter()
if filter:
return ["-k", self.test.get_pytest_filter()]
return []
| ShardedTest |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 16693,
"end": 17825
} | class ____(PrefectFilterBaseModel):
"""Filter by `FlowRun.end_time`."""
before_: Optional[DateTime] = Field(
default=None,
description="Only include flow runs ending at or before this time",
)
after_: Optional[DateTime] = Field(
default=None,
description="Only include flow runs ending at or after this time",
)
is_null_: Optional[bool] = Field(
default=None, description="If true, only return flow runs without an end time"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.before_ is not None:
filters.append(db.FlowRun.end_time <= self.before_)
if self.after_ is not None:
filters.append(db.FlowRun.end_time >= self.after_)
if self.is_null_ is not None:
filters.append(
db.FlowRun.end_time.is_(None)
if self.is_null_
else db.FlowRun.end_time.is_not(None)
)
return filters
| FlowRunFilterEndTime |
python | MongoEngine__mongoengine | mongoengine/queryset/base.py | {
"start": 1194,
"end": 77363
} | class ____:
"""A set of results returned from a query. Wraps a MongoDB cursor,
providing :class:`~mongoengine.Document` objects as the results.
"""
def __init__(self, document, collection):
self._document = document
self._collection_obj = collection
self._mongo_query = None
self._query_obj = Q()
self._cls_query = {}
self._where_clause = None
self._loaded_fields = QueryFieldList()
self._ordering = None
self._snapshot = False
self._timeout = True
self._allow_disk_use = False
self._read_preference = None
self._read_concern = None
self._iter = False
self._scalar = []
self._none = False
self._as_pymongo = False
self._search_text = None
self._search_text_score = None
self.__dereference = False
self.__auto_dereference = True
# If inheritance is allowed, only return instances and instances of
# subclasses of the class being used
if document._meta.get("allow_inheritance") is True:
if len(self._document._subclasses) == 1:
self._cls_query = {"_cls": self._document._subclasses[0]}
else:
self._cls_query = {"_cls": {"$in": self._document._subclasses}}
self._loaded_fields = QueryFieldList(always_include=["_cls"])
self._cursor_obj = None
self._limit = None
self._skip = None
self._hint = -1 # Using -1 as None is a valid value for hint
self._collation = None
self._batch_size = None
self._max_time_ms = None
self._comment = None
# Hack - As people expect cursor[5:5] to return
# an empty result set. It's hard to do that right, though, because the
# server uses limit(0) to mean 'no limit'. So we set _empty
# in that case and check for it when iterating. We also unset
# it anytime we change _limit. Inspired by how it is done in pymongo.Cursor
self._empty = False
def __call__(self, q_obj=None, **query):
"""Filter the selected documents by calling the
:class:`~mongoengine.queryset.QuerySet` with a query.
:param q_obj: a :class:`~mongoengine.queryset.Q` object to be used in
the query; the :class:`~mongoengine.queryset.QuerySet` is filtered
multiple times with different :class:`~mongoengine.queryset.Q`
objects, only the last one will be used.
:param query: Django-style query keyword arguments.
"""
query = Q(**query)
if q_obj:
# Make sure proper query object is passed.
if not isinstance(q_obj, QNode):
msg = (
"Not a query object: %s. "
"Did you intend to use key=value?" % q_obj
)
raise InvalidQueryError(msg)
query &= q_obj
queryset = self.clone()
queryset._query_obj &= query
queryset._mongo_query = None
queryset._cursor_obj = None
return queryset
def __getstate__(self):
"""
Need for pickling queryset
See https://github.com/MongoEngine/mongoengine/issues/442
"""
obj_dict = self.__dict__.copy()
# don't picke collection, instead pickle collection params
obj_dict.pop("_collection_obj")
# don't pickle cursor
obj_dict["_cursor_obj"] = None
return obj_dict
def __setstate__(self, obj_dict):
"""
Need for pickling queryset
See https://github.com/MongoEngine/mongoengine/issues/442
"""
obj_dict["_collection_obj"] = obj_dict["_document"]._get_collection()
# update attributes
self.__dict__.update(obj_dict)
# forse load cursor
# self._cursor
def __getitem__(self, key):
"""Return a document instance corresponding to a given index if
the key is an integer. If the key is a slice, translate its
bounds into a skip and a limit, and return a cloned queryset
with that skip/limit applied. For example:
>>> User.objects[0]
<User: User object>
>>> User.objects[1:3]
[<User: User object>, <User: User object>]
"""
queryset = self.clone()
queryset._empty = False
# Handle a slice
if isinstance(key, slice):
queryset._cursor_obj = queryset._cursor[key]
queryset._skip, queryset._limit = key.start, key.stop
if key.start and key.stop:
queryset._limit = key.stop - key.start
if queryset._limit == 0:
queryset._empty = True
# Allow further QuerySet modifications to be performed
return queryset
# Handle an index
elif isinstance(key, int):
if queryset._scalar:
return queryset._get_scalar(
queryset._document._from_son(
queryset._cursor[key],
_auto_dereference=self._auto_dereference,
)
)
if queryset._as_pymongo:
return queryset._cursor[key]
return queryset._document._from_son(
queryset._cursor[key],
_auto_dereference=self._auto_dereference,
)
raise TypeError("Provide a slice or an integer index")
def __iter__(self):
raise NotImplementedError
def _has_data(self):
"""Return True if cursor has any data."""
queryset = self.order_by()
return False if queryset.first() is None else True
def __bool__(self):
"""Avoid to open all records in an if stmt in Py3."""
return self._has_data()
# Core functions
def all(self):
"""Returns a copy of the current QuerySet."""
return self.__call__()
def filter(self, *q_objs, **query):
"""An alias of :meth:`~mongoengine.queryset.QuerySet.__call__`"""
return self.__call__(*q_objs, **query)
def search_text(self, text, language=None, text_score=True):
"""
Start a text search, using text indexes.
Require: MongoDB server version 2.6+.
:param language: The language that determines the list of stop words
for the search and the rules for the stemmer and tokenizer.
If not specified, the search uses the default language of the index.
For supported languages, see
`Text Search Languages <https://docs.mongodb.org/manual/reference/text-search-languages/#text-search-languages>`.
:param text_score: True to have it return the text_score (available through get_text_score()), False to disable that
Note that unless you order the results, leaving text_score=True may provide randomness in the returned documents
"""
queryset = self.clone()
if queryset._search_text:
raise OperationError("It is not possible to use search_text two times.")
query_kwargs = SON({"$search": text})
if language:
query_kwargs["$language"] = language
queryset._query_obj &= Q(__raw__={"$text": query_kwargs})
queryset._mongo_query = None
queryset._cursor_obj = None
queryset._search_text = text
queryset._search_text_score = text_score
return queryset
def get(self, *q_objs, **query):
"""Retrieve the matching object raising
:class:`~mongoengine.queryset.MultipleObjectsReturned` or
`DocumentName.MultipleObjectsReturned` exception if multiple results
and :class:`~mongoengine.queryset.DoesNotExist` or
`DocumentName.DoesNotExist` if no results are found.
"""
queryset = self.clone()
queryset = queryset.order_by().limit(2)
queryset = queryset.filter(*q_objs, **query)
try:
result = next(queryset)
except StopIteration:
msg = "%s matching query does not exist." % queryset._document._class_name
raise queryset._document.DoesNotExist(msg)
try:
# Check if there is another match
next(queryset)
except StopIteration:
return result
# If we were able to retrieve a 2nd doc, raise the MultipleObjectsReturned exception.
raise queryset._document.MultipleObjectsReturned(
"2 or more items returned, instead of 1"
)
def create(self, **kwargs):
"""Create new object. Returns the saved object instance."""
return self._document(**kwargs).save(force_insert=True)
def first(self):
"""Retrieve the first object matching the query."""
queryset = self.clone()
if self._none or self._empty:
return None
try:
result = queryset[0]
except IndexError:
result = None
return result
def insert(
self, doc_or_docs, load_bulk=True, write_concern=None, signal_kwargs=None
):
"""bulk insert documents
:param doc_or_docs: a document or list of documents to be inserted
:param load_bulk (optional): If True returns the list of document
instances
:param write_concern: Extra keyword arguments are passed down to
:meth:`~pymongo.collection.Collection.insert`
which will be used as options for the resultant
``getLastError`` command. For example,
``insert(..., {w: 2, fsync: True})`` will wait until at least
two servers have recorded the write and will force an fsync on
each server being written to.
:param signal_kwargs: (optional) kwargs dictionary to be passed to
the signal calls.
By default returns document instances, set ``load_bulk`` to False to
return just ``ObjectIds``
"""
Document = _import_class("Document")
if write_concern is None:
write_concern = {}
docs = doc_or_docs
return_one = False
if isinstance(docs, Document) or issubclass(docs.__class__, Document):
return_one = True
docs = [docs]
for doc in docs:
if not isinstance(doc, self._document):
msg = "Some documents inserted aren't instances of %s" % str(
self._document
)
raise OperationError(msg)
if doc.pk and not doc._created:
msg = "Some documents have ObjectIds, use doc.update() instead"
raise OperationError(msg)
signal_kwargs = signal_kwargs or {}
signals.pre_bulk_insert.send(self._document, documents=docs, **signal_kwargs)
raw = [doc.to_mongo() for doc in docs]
with set_write_concern(self._collection, write_concern) as collection:
insert_func = collection.insert_many
if return_one:
raw = raw[0]
insert_func = collection.insert_one
try:
inserted_result = insert_func(raw, session=_get_session())
ids = (
[inserted_result.inserted_id]
if return_one
else inserted_result.inserted_ids
)
except pymongo.errors.DuplicateKeyError as err:
message = "Could not save document (%s)"
raise NotUniqueError(message % err)
except pymongo.errors.BulkWriteError as err:
# inserting documents that already have an _id field will
# give huge performance debt or raise
message = "Bulk write error: (%s)"
raise BulkWriteError(message % err.details)
except pymongo.errors.OperationFailure as err:
message = "Could not save document (%s)"
if re.match("^E1100[01] duplicate key", str(err)):
# E11000 - duplicate key error index
# E11001 - duplicate key on update
message = "Tried to save duplicate unique keys (%s)"
raise NotUniqueError(message % err)
raise OperationError(message % err)
# Apply inserted_ids to documents
for doc, doc_id in zip(docs, ids):
doc.pk = doc_id
if not load_bulk:
signals.post_bulk_insert.send(
self._document, documents=docs, loaded=False, **signal_kwargs
)
return ids[0] if return_one else ids
documents = self.in_bulk(ids)
results = [documents.get(obj_id) for obj_id in ids]
signals.post_bulk_insert.send(
self._document, documents=results, loaded=True, **signal_kwargs
)
return results[0] if return_one else results
def count(self, with_limit_and_skip=False):
"""Count the selected elements in the query.
:param with_limit_and_skip (optional): take any :meth:`limit` or
:meth:`skip` that has been applied to this cursor into account when
getting the count
"""
# mimic the fact that setting .limit(0) in pymongo sets no limit
# https://www.mongodb.com/docs/manual/reference/method/cursor.limit/#zero-value
if (
self._limit == 0
and with_limit_and_skip is False
or self._none
or self._empty
):
return 0
kwargs = (
{"limit": self._limit, "skip": self._skip} if with_limit_and_skip else {}
)
if self._limit == 0:
# mimic the fact that historically .limit(0) sets no limit
kwargs.pop("limit", None)
if self._hint not in (-1, None):
kwargs["hint"] = self._hint
if self._collation:
kwargs["collation"] = self._collation
count = count_documents(
collection=self._cursor.collection,
filter=self._query,
**kwargs,
)
self._cursor_obj = None
return count
def delete(self, write_concern=None, _from_doc_delete=False, cascade_refs=None):
"""Delete the documents matched by the query.
:param write_concern: Extra keyword arguments are passed down which
will be used as options for the resultant
``getLastError`` command. For example,
``save(..., write_concern={w: 2, fsync: True}, ...)`` will
wait until at least two servers have recorded the write and
will force an fsync on the primary server.
:param _from_doc_delete: True when called from document delete therefore
signals will have been triggered so don't loop.
:returns number of deleted documents
"""
queryset = self.clone()
doc = queryset._document
if write_concern is None:
write_concern = {}
# Handle deletes where skips or limits have been applied or
# there is an untriggered delete signal
has_delete_signal = signals.signals_available and (
signals.pre_delete.has_receivers_for(doc)
or signals.post_delete.has_receivers_for(doc)
)
call_document_delete = (
queryset._skip or queryset._limit or has_delete_signal
) and not _from_doc_delete
if call_document_delete:
cnt = 0
for doc in queryset:
doc.delete(**write_concern)
cnt += 1
return cnt
delete_rules = doc._meta.get("delete_rules") or {}
delete_rules = list(delete_rules.items())
# Check for DENY rules before actually deleting/nullifying any other
# references
for rule_entry, rule in delete_rules:
document_cls, field_name = rule_entry
if document_cls._meta.get("abstract"):
continue
if rule == DENY:
refs = document_cls.objects(**{field_name + "__in": self})
if refs.limit(1).count() > 0:
raise OperationError(
"Could not delete document (%s.%s refers to it)"
% (document_cls.__name__, field_name)
)
# Check all the other rules
for rule_entry, rule in delete_rules:
document_cls, field_name = rule_entry
if document_cls._meta.get("abstract"):
continue
if rule == CASCADE:
cascade_refs = set() if cascade_refs is None else cascade_refs
# Handle recursive reference
if doc._collection == document_cls._collection:
for ref in queryset:
cascade_refs.add(ref.id)
refs = document_cls.objects(
**{field_name + "__in": self, "pk__nin": cascade_refs}
)
if refs.count() > 0:
refs.delete(write_concern=write_concern, cascade_refs=cascade_refs)
elif rule == NULLIFY:
document_cls.objects(**{field_name + "__in": self}).update(
write_concern=write_concern, **{"unset__%s" % field_name: 1}
)
elif rule == PULL:
document_cls.objects(**{field_name + "__in": self}).update(
write_concern=write_concern, **{"pull_all__%s" % field_name: self}
)
kwargs = {}
if self._hint not in (-1, None):
kwargs["hint"] = self._hint
if self._collation:
kwargs["collation"] = self._collation
if self._comment:
kwargs["comment"] = self._comment
with set_write_concern(queryset._collection, write_concern) as collection:
result = collection.delete_many(
queryset._query,
session=_get_session(),
**kwargs,
)
# If we're using an unack'd write concern, we don't really know how
# many items have been deleted at this point, hence we only return
# the count for ack'd ops.
if result.acknowledged:
return result.deleted_count
def update(
self,
upsert=False,
multi=True,
write_concern=None,
read_concern=None,
full_result=False,
array_filters=None,
**update,
):
"""Perform an atomic update on the fields matched by the query.
:param upsert: insert if document doesn't exist (default ``False``)
:param multi: Update multiple documents.
:param write_concern: Extra keyword arguments are passed down which
will be used as options for the resultant
``getLastError`` command. For example,
``save(..., write_concern={w: 2, fsync: True}, ...)`` will
wait until at least two servers have recorded the write and
will force an fsync on the primary server.
:param read_concern: Override the read concern for the operation
:param full_result: Return the associated ``pymongo.UpdateResult`` rather than just the number
updated items
:param array_filters: A list of filters specifying which array elements an update should apply.
:param update: Django-style update keyword arguments
:returns the number of updated documents (unless ``full_result`` is True)
"""
if not update and not upsert:
raise OperationError("No update parameters, would remove data")
if write_concern is None:
write_concern = {}
if self._none or self._empty:
return 0
queryset = self.clone()
query = queryset._query
if "__raw__" in update and isinstance(
update["__raw__"], list
): # Case of Update with Aggregation Pipeline
update = [
transform.update(queryset._document, **{"__raw__": u})
for u in update["__raw__"]
]
else:
update = transform.update(queryset._document, **update)
# If doing an atomic upsert on an inheritable class
# then ensure we add _cls to the update operation
if upsert and "_cls" in query:
if "$set" in update:
update["$set"]["_cls"] = queryset._document._class_name
else:
update["$set"] = {"_cls": queryset._document._class_name}
kwargs = {}
if self._hint not in (-1, None):
kwargs["hint"] = self._hint
if self._collation:
kwargs["collation"] = self._collation
if self._comment:
kwargs["comment"] = self._comment
try:
with set_read_write_concern(
queryset._collection, write_concern, read_concern
) as collection:
update_func = collection.update_one
if multi:
update_func = collection.update_many
result = update_func(
query,
update,
upsert=upsert,
array_filters=array_filters,
session=_get_session(),
**kwargs,
)
if full_result:
return result
elif result.raw_result:
return result.raw_result["n"]
except pymongo.errors.DuplicateKeyError as err:
raise NotUniqueError("Update failed (%s)" % err)
except pymongo.errors.OperationFailure as err:
if str(err) == "multi not coded yet":
message = "update() method requires MongoDB 1.1.3+"
raise OperationError(message)
raise OperationError("Update failed (%s)" % err)
def upsert_one(self, write_concern=None, read_concern=None, **update):
"""Overwrite or add the first document matched by the query.
:param write_concern: Extra keyword arguments are passed down which
will be used as options for the resultant
``getLastError`` command. For example,
``save(..., write_concern={w: 2, fsync: True}, ...)`` will
wait until at least two servers have recorded the write and
will force an fsync on the primary server.
:param read_concern: Override the read concern for the operation
:param update: Django-style update keyword arguments
:returns the new or overwritten document
"""
atomic_update = self.update(
multi=False,
upsert=True,
write_concern=write_concern,
read_concern=read_concern,
full_result=True,
**update,
)
if atomic_update.raw_result["updatedExisting"]:
document = self.get()
else:
document = self._document.objects.with_id(atomic_update.upserted_id)
return document
def update_one(
self,
upsert=False,
write_concern=None,
full_result=False,
array_filters=None,
**update,
):
"""Perform an atomic update on the fields of the first document
matched by the query.
:param upsert: insert if document doesn't exist (default ``False``)
:param write_concern: Extra keyword arguments are passed down which
will be used as options for the resultant
``getLastError`` command. For example,
``save(..., write_concern={w: 2, fsync: True}, ...)`` will
wait until at least two servers have recorded the write and
will force an fsync on the primary server.
:param full_result: Return the associated ``pymongo.UpdateResult`` rather than just the number
updated items
:param array_filters: A list of filters specifying which array elements an update should apply.
:param update: Django-style update keyword arguments
full_result
:returns the number of updated documents (unless ``full_result`` is True)
"""
return self.update(
upsert=upsert,
multi=False,
write_concern=write_concern,
full_result=full_result,
array_filters=array_filters,
**update,
)
def modify(
self,
upsert=False,
remove=False,
new=False,
array_filters=None,
**update,
):
"""Update and return the updated document.
Returns either the document before or after modification based on `new`
parameter. If no documents match the query and `upsert` is false,
returns ``None``. If upserting and `new` is false, returns ``None``.
:param upsert: insert if document doesn't exist (default ``False``)
:param remove: remove rather than updating (default ``False``)
:param new: return updated rather than original document
(default ``False``)
:param array_filters: A list of filters specifying which array elements an update should apply.
:param update: Django-style update keyword arguments
"""
if remove and new:
raise OperationError("Conflicting parameters: remove and new")
if not update and not upsert and not remove:
raise OperationError("No update parameters, must either update or remove")
if self._none or self._empty:
return None
queryset = self.clone()
query = queryset._query
if self._where_clause:
where_clause = self._sub_js_fields(self._where_clause)
query["$where"] = where_clause
if not remove:
update = transform.update(queryset._document, **update)
sort = queryset._ordering
try:
if remove:
result = queryset._collection.find_one_and_delete(
query, sort=sort, session=_get_session(), **self._cursor_args
)
else:
if new:
return_doc = ReturnDocument.AFTER
else:
return_doc = ReturnDocument.BEFORE
result = queryset._collection.find_one_and_update(
query,
update,
upsert=upsert,
sort=sort,
return_document=return_doc,
session=_get_session(),
array_filters=array_filters,
**self._cursor_args,
)
except pymongo.errors.DuplicateKeyError as err:
raise NotUniqueError("Update failed (%s)" % err)
except pymongo.errors.OperationFailure as err:
raise OperationError("Update failed (%s)" % err)
if result is not None:
result = self._document._from_son(result)
return result
def with_id(self, object_id):
"""Retrieve the object matching the id provided. Uses `object_id` only
and raises InvalidQueryError if a filter has been applied. Returns
`None` if no document exists with that id.
:param object_id: the value for the id of the document to look up
"""
queryset = self.clone()
if queryset._query_obj:
msg = "Cannot use a filter whilst using `with_id`"
raise InvalidQueryError(msg)
return queryset.filter(pk=object_id).first()
def in_bulk(self, object_ids):
"""Retrieve a set of documents by their ids.
:param object_ids: a list or tuple of ObjectId's
:rtype: dict of ObjectId's as keys and collection-specific
Document subclasses as values.
"""
doc_map = {}
docs = self._collection.find(
{"_id": {"$in": object_ids}}, session=_get_session(), **self._cursor_args
)
if self._scalar:
for doc in docs:
doc_map[doc["_id"]] = self._get_scalar(self._document._from_son(doc))
elif self._as_pymongo:
for doc in docs:
doc_map[doc["_id"]] = doc
else:
for doc in docs:
doc_map[doc["_id"]] = self._document._from_son(
doc,
_auto_dereference=self._auto_dereference,
)
return doc_map
def none(self):
"""Returns a queryset that never returns any objects and no query will be executed when accessing the results
inspired by django none() https://docs.djangoproject.com/en/dev/ref/models/querysets/#none
"""
queryset = self.clone()
queryset._none = True
return queryset
def no_sub_classes(self):
"""Filter for only the instances of this specific document.
Do NOT return any inherited documents.
"""
if self._document._meta.get("allow_inheritance") is True:
self._cls_query = {"_cls": self._document._class_name}
return self
def using(self, alias):
"""This method is for controlling which database the QuerySet will be
evaluated against if you are using more than one database.
:param alias: The database alias
"""
with switch_db(self._document, alias) as cls:
collection = cls._get_collection()
return self._clone_into(self.__class__(self._document, collection))
def clone(self):
"""Create a copy of the current queryset."""
return self._clone_into(self.__class__(self._document, self._collection_obj))
def _clone_into(self, new_qs):
"""Copy all the relevant properties of this queryset to
a new queryset (which has to be an instance of
:class:`~mongoengine.queryset.base.BaseQuerySet`).
"""
if not isinstance(new_qs, BaseQuerySet):
raise OperationError(
"%s is not a subclass of BaseQuerySet" % new_qs.__name__
)
copy_props = (
"_mongo_query",
"_cls_query",
"_none",
"_query_obj",
"_where_clause",
"_loaded_fields",
"_ordering",
"_snapshot",
"_timeout",
"_allow_disk_use",
"_read_preference",
"_read_concern",
"_iter",
"_scalar",
"_as_pymongo",
"_limit",
"_skip",
"_empty",
"_hint",
"_collation",
"_search_text",
"_search_text_score",
"_max_time_ms",
"_comment",
"_batch_size",
)
for prop in copy_props:
val = getattr(self, prop)
setattr(new_qs, prop, copy.copy(val))
new_qs.__auto_dereference = self._BaseQuerySet__auto_dereference
if self._cursor_obj:
new_qs._cursor_obj = self._cursor_obj.clone()
return new_qs
def select_related(self, max_depth=1):
"""Handles dereferencing of :class:`~bson.dbref.DBRef` objects or
:class:`~bson.object_id.ObjectId` a maximum depth in order to cut down
the number queries to mongodb.
"""
# Make select related work the same for querysets
max_depth += 1
queryset = self.clone()
return queryset._dereference(queryset, max_depth=max_depth)
def limit(self, n):
"""Limit the number of returned documents to `n`. This may also be
achieved using array-slicing syntax (e.g. ``User.objects[:5]``).
:param n: the maximum number of objects to return if n is greater than 0.
When 0 is passed, returns all the documents in the cursor
"""
queryset = self.clone()
queryset._limit = n
queryset._empty = False # cancels the effect of empty
# If a cursor object has already been created, apply the limit to it.
if queryset._cursor_obj:
queryset._cursor_obj.limit(queryset._limit)
return queryset
def skip(self, n):
"""Skip `n` documents before returning the results. This may also be
achieved using array-slicing syntax (e.g. ``User.objects[5:]``).
:param n: the number of objects to skip before returning results
"""
queryset = self.clone()
queryset._skip = n
# If a cursor object has already been created, apply the skip to it.
if queryset._cursor_obj:
queryset._cursor_obj.skip(queryset._skip)
return queryset
def hint(self, index=None):
"""Added 'hint' support, telling Mongo the proper index to use for the
query.
Judicious use of hints can greatly improve query performance. When
doing a query on multiple fields (at least one of which is indexed)
pass the indexed field as a hint to the query.
Hinting will not do anything if the corresponding index does not exist.
The last hint applied to this cursor takes precedence over all others.
"""
queryset = self.clone()
queryset._hint = index
# If a cursor object has already been created, apply the hint to it.
if queryset._cursor_obj:
queryset._cursor_obj.hint(queryset._hint)
return queryset
def collation(self, collation=None):
"""
Collation allows users to specify language-specific rules for string
comparison, such as rules for lettercase and accent marks.
:param collation: `~pymongo.collation.Collation` or dict with
following fields:
{
locale: str,
caseLevel: bool,
caseFirst: str,
strength: int,
numericOrdering: bool,
alternate: str,
maxVariable: str,
backwards: str
}
Collation should be added to indexes like in test example
"""
queryset = self.clone()
queryset._collation = collation
if queryset._cursor_obj:
queryset._cursor_obj.collation(collation)
return queryset
def batch_size(self, size):
"""Limit the number of documents returned in a single batch (each
batch requires a round trip to the server).
See https://pymongo.readthedocs.io/en/stable/api/pymongo/cursor.html#pymongo.cursor.Cursor
for details.
:param size: desired size of each batch.
"""
queryset = self.clone()
queryset._batch_size = size
# If a cursor object has already been created, apply the batch size to it.
if queryset._cursor_obj:
queryset._cursor_obj.batch_size(queryset._batch_size)
return queryset
def distinct(self, field):
"""Return a list of distinct values for a given field.
:param field: the field to select distinct values from
.. note:: This is a command and won't take ordering or limit into
account.
"""
queryset = self.clone()
try:
field = self._fields_to_dbfields([field]).pop()
except LookUpError:
pass
raw_values = queryset._cursor.distinct(field)
if not self._auto_dereference:
return raw_values
distinct = self._dereference(raw_values, 1, name=field, instance=self._document)
doc_field = self._document._fields.get(field.split(".", 1)[0])
instance = None
# We may need to cast to the correct type eg. ListField(EmbeddedDocumentField)
EmbeddedDocumentField = _import_class("EmbeddedDocumentField")
ListField = _import_class("ListField")
GenericEmbeddedDocumentField = _import_class("GenericEmbeddedDocumentField")
if isinstance(doc_field, ListField):
doc_field = getattr(doc_field, "field", doc_field)
if isinstance(doc_field, (EmbeddedDocumentField, GenericEmbeddedDocumentField)):
instance = getattr(doc_field, "document_type", None)
# handle distinct on subdocuments
if "." in field:
for field_part in field.split(".")[1:]:
# if looping on embedded document, get the document type instance
if instance and isinstance(
doc_field, (EmbeddedDocumentField, GenericEmbeddedDocumentField)
):
doc_field = instance
# now get the subdocument
doc_field = getattr(doc_field, field_part, doc_field)
# We may need to cast to the correct type eg. ListField(EmbeddedDocumentField)
if isinstance(doc_field, ListField):
doc_field = getattr(doc_field, "field", doc_field)
if isinstance(
doc_field, (EmbeddedDocumentField, GenericEmbeddedDocumentField)
):
instance = getattr(doc_field, "document_type", None)
if instance and isinstance(
doc_field, (EmbeddedDocumentField, GenericEmbeddedDocumentField)
):
distinct = [instance(**doc) for doc in distinct]
return distinct
def only(self, *fields):
"""Load only a subset of this document's fields. ::
post = BlogPost.objects(...).only('title', 'author.name')
.. note :: `only()` is chainable and will perform a union ::
So with the following it will fetch both: `title` and `author.name`::
post = BlogPost.objects.only('title').only('author.name')
:func:`~mongoengine.queryset.QuerySet.all_fields` will reset any
field filters.
:param fields: fields to include
"""
fields = {f: QueryFieldList.ONLY for f in fields}
return self.fields(True, **fields)
def exclude(self, *fields):
"""Opposite to .only(), exclude some document's fields. ::
post = BlogPost.objects(...).exclude('comments')
.. note :: `exclude()` is chainable and will perform a union ::
So with the following it will exclude both: `title` and `author.name`::
post = BlogPost.objects.exclude('title').exclude('author.name')
:func:`~mongoengine.queryset.QuerySet.all_fields` will reset any
field filters.
:param fields: fields to exclude
"""
fields = {f: QueryFieldList.EXCLUDE for f in fields}
return self.fields(**fields)
def fields(self, _only_called=False, **kwargs):
"""Manipulate how you load this document's fields. Used by `.only()`
and `.exclude()` to manipulate which fields to retrieve. If called
directly, use a set of kwargs similar to the MongoDB projection
document. For example:
Include only a subset of fields:
posts = BlogPost.objects(...).fields(author=1, title=1)
Exclude a specific field:
posts = BlogPost.objects(...).fields(comments=0)
To retrieve a subrange or sublist of array elements,
support exist for both the `slice` and `elemMatch` projection operator:
posts = BlogPost.objects(...).fields(slice__comments=5)
posts = BlogPost.objects(...).fields(elemMatch__comments="test")
:param kwargs: A set of keyword arguments identifying what to
include, exclude, or slice.
"""
# Check for an operator and transform to mongo-style if there is
operators = ["slice", "elemMatch"]
cleaned_fields = []
for key, value in kwargs.items():
parts = key.split("__")
if parts[0] in operators:
op = parts.pop(0)
value = {"$" + op: value}
key = ".".join(parts)
cleaned_fields.append((key, value))
# Sort fields by their values, explicitly excluded fields first, then
# explicitly included, and then more complicated operators such as
# $slice.
def _sort_key(field_tuple):
_, value = field_tuple
if isinstance(value, int):
return value # 0 for exclusion, 1 for inclusion
return 2 # so that complex values appear last
fields = sorted(cleaned_fields, key=_sort_key)
# Clone the queryset, group all fields by their value, convert
# each of them to db_fields, and set the queryset's _loaded_fields
queryset = self.clone()
for value, group in itertools.groupby(fields, lambda x: x[1]):
fields = [field for field, value in group]
fields = queryset._fields_to_dbfields(fields)
queryset._loaded_fields += QueryFieldList(
fields, value=value, _only_called=_only_called
)
return queryset
def all_fields(self):
"""Include all fields. Reset all previously calls of .only() or
.exclude(). ::
post = BlogPost.objects.exclude('comments').all_fields()
"""
queryset = self.clone()
queryset._loaded_fields = QueryFieldList(
always_include=queryset._loaded_fields.always_include
)
return queryset
def order_by(self, *keys, __raw__=None):
"""Order the :class:`~mongoengine.queryset.QuerySet` by the given keys.
The order may be specified by prepending each of the keys by a "+" or
a "-". Ascending order is assumed if there's no prefix.
If no keys are passed, existing ordering is cleared instead.
:param keys: fields to order the query results by; keys may be
prefixed with "+" or a "-" to determine the ordering direction.
:param __raw__: a raw pymongo "sort" argument (provided as a list of (key, direction))
see 'key_or_list' in `pymongo.cursor.Cursor.sort doc <https://pymongo.readthedocs.io/en/stable/api/pymongo/cursor.html#pymongo.cursor.Cursor.sort>`.
If both keys and __raw__ are provided, an exception is raised
"""
if __raw__ and keys:
raise OperationError("Can not use both keys and __raw__ with order_by() ")
queryset = self.clone()
old_ordering = queryset._ordering
if __raw__:
new_ordering = __raw__
else:
new_ordering = queryset._get_order_by(keys)
if queryset._cursor_obj:
# If a cursor object has already been created, apply the sort to it
if new_ordering:
queryset._cursor_obj.sort(new_ordering)
# If we're trying to clear a previous explicit ordering, we need
# to clear the cursor entirely (because PyMongo doesn't allow
# clearing an existing sort on a cursor).
elif old_ordering:
queryset._cursor_obj = None
queryset._ordering = new_ordering
return queryset
def clear_cls_query(self):
"""Clear the default "_cls" query.
By default, all queries generated for documents that allow inheritance
include an extra "_cls" clause. In most cases this is desirable, but
sometimes you might achieve better performance if you clear that
default query.
Scan the code for `_cls_query` to get more details.
"""
queryset = self.clone()
queryset._cls_query = {}
return queryset
def comment(self, text):
"""Add a comment to the query.
See https://www.mongodb.com/docs/manual/reference/method/cursor.comment/
for details.
"""
return self._chainable_method("comment", text)
def explain(self):
"""Return an explain plan record for the
:class:`~mongoengine.queryset.QuerySet` cursor.
"""
return self._cursor.explain()
# DEPRECATED. Has no more impact on PyMongo 3+
def snapshot(self, enabled):
"""Enable or disable snapshot mode when querying.
:param enabled: whether or not snapshot mode is enabled
"""
msg = "snapshot is deprecated as it has no impact when using PyMongo 3+."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
queryset = self.clone()
queryset._snapshot = enabled
return queryset
def allow_disk_use(self, enabled):
"""Enable or disable the use of temporary files on disk while processing a blocking sort operation.
(To store data exceeding the 100 megabyte system memory limit)
:param enabled: whether or not temporary files on disk are used
"""
queryset = self.clone()
queryset._allow_disk_use = enabled
return queryset
def timeout(self, enabled):
"""Enable or disable the default mongod timeout when querying. (no_cursor_timeout option)
:param enabled: whether or not the timeout is used
"""
queryset = self.clone()
queryset._timeout = enabled
return queryset
def read_preference(self, read_preference):
"""Change the read_preference when querying.
:param read_preference: override ReplicaSetConnection-level
preference.
"""
validate_read_preference("read_preference", read_preference)
queryset = self.clone()
queryset._read_preference = read_preference
queryset._cursor_obj = None # we need to re-create the cursor object whenever we apply read_preference
return queryset
def read_concern(self, read_concern):
"""Change the read_concern when querying.
:param read_concern: override ReplicaSetConnection-level
preference.
"""
if read_concern is not None and not isinstance(read_concern, Mapping):
raise TypeError(f"{read_concern!r} is not a valid read concern.")
queryset = self.clone()
queryset._read_concern = (
ReadConcern(**read_concern) if read_concern is not None else None
)
queryset._cursor_obj = None # we need to re-create the cursor object whenever we apply read_concern
return queryset
def scalar(self, *fields):
"""Instead of returning Document instances, return either a specific
value or a tuple of values in order.
Can be used along with
:func:`~mongoengine.queryset.QuerySet.no_dereference` to turn off
dereferencing.
.. note:: This effects all results and can be unset by calling
``scalar`` without arguments. Calls ``only`` automatically.
:param fields: One or more fields to return instead of a Document.
"""
queryset = self.clone()
queryset._scalar = list(fields)
if fields:
queryset = queryset.only(*fields)
else:
queryset = queryset.all_fields()
return queryset
def values_list(self, *fields):
"""An alias for scalar"""
return self.scalar(*fields)
def as_pymongo(self):
"""Instead of returning Document instances, return raw values from
pymongo.
This method is particularly useful if you don't need dereferencing
and care primarily about the speed of data retrieval.
"""
queryset = self.clone()
queryset._as_pymongo = True
return queryset
def max_time_ms(self, ms):
"""Wait `ms` milliseconds before killing the query on the server
:param ms: the number of milliseconds before killing the query on the server
"""
return self._chainable_method("max_time_ms", ms)
# JSON Helpers
def to_json(self, *args, **kwargs):
"""Converts a queryset to JSON"""
if "json_options" not in kwargs:
warnings.warn(
"No 'json_options' are specified! Falling back to "
"LEGACY_JSON_OPTIONS with uuid_representation=PYTHON_LEGACY. "
"For use with other MongoDB drivers specify the UUID "
"representation to use. This will be changed to "
"uuid_representation=UNSPECIFIED in a future release.",
DeprecationWarning,
stacklevel=2,
)
kwargs["json_options"] = LEGACY_JSON_OPTIONS
return json_util.dumps(self.as_pymongo(), *args, **kwargs)
def from_json(self, json_data):
"""Converts json data to unsaved objects"""
son_data = json_util.loads(json_data)
return [self._document._from_son(data) for data in son_data]
def aggregate(self, pipeline, **kwargs):
"""Perform an aggregate function based on your queryset params
If the queryset contains a query or skip/limit/sort or if the target Document class
uses inheritance, this method will add steps prior to the provided pipeline in an arbitrary order.
This may affect the performance or outcome of the aggregation, so use it consciously.
For complex/critical pipelines, we recommended to use the aggregation framework of Pymongo directly,
it is available through the collection object (YourDocument._collection.aggregate) and will guarantee
that you have full control on the pipeline.
:param pipeline: list of aggregation commands,
see: https://www.mongodb.com/docs/manual/core/aggregation-pipeline/
:param kwargs: (optional) kwargs dictionary to be passed to pymongo's aggregate call
See https://pymongo.readthedocs.io/en/stable/api/pymongo/collection.html#pymongo.collection.Collection.aggregate
"""
if not isinstance(pipeline, (tuple, list)):
raise TypeError(
f"Starting from 1.0 release pipeline must be a list/tuple, received: {type(pipeline)}"
)
initial_pipeline = []
if self._none or self._empty:
initial_pipeline.append({"$limit": 1})
initial_pipeline.append({"$match": {"$expr": False}})
if self._query:
initial_pipeline.append({"$match": self._query})
if self._ordering:
initial_pipeline.append({"$sort": dict(self._ordering)})
if self._limit is not None:
# As per MongoDB Documentation (https://www.mongodb.com/docs/manual/reference/operator/aggregation/limit/),
# keeping limit stage right after sort stage is more efficient. But this leads to wrong set of documents
# for a skip stage that might succeed these. So we need to maintain more documents in memory in such a
# case (https://stackoverflow.com/a/24161461).
initial_pipeline.append({"$limit": self._limit + (self._skip or 0)})
if self._skip is not None:
initial_pipeline.append({"$skip": self._skip})
# geoNear and collStats must be the first stages in the pipeline if present
first_step = []
new_user_pipeline = []
for step_step in pipeline:
if "$geoNear" in step_step:
first_step.append(step_step)
elif "$collStats" in step_step:
first_step.append(step_step)
else:
new_user_pipeline.append(step_step)
final_pipeline = first_step + initial_pipeline + new_user_pipeline
collection = self._collection
if self._read_preference is not None or self._read_concern is not None:
collection = self._collection.with_options(
read_preference=self._read_preference, read_concern=self._read_concern
)
if self._hint not in (-1, None):
kwargs.setdefault("hint", self._hint)
if self._collation:
kwargs.setdefault("collation", self._collation)
if self._comment:
kwargs.setdefault("comment", self._comment)
return collection.aggregate(
final_pipeline,
cursor={},
session=_get_session(),
**kwargs,
)
# JS functionality
def map_reduce(
self, map_f, reduce_f, output, finalize_f=None, limit=None, scope=None
):
"""Perform a map/reduce query using the current query spec
and ordering. While ``map_reduce`` respects ``QuerySet`` chaining,
it must be the last call made, as it does not return a maleable
``QuerySet``.
See the :meth:`~mongoengine.tests.QuerySetTest.test_map_reduce`
and :meth:`~mongoengine.tests.QuerySetTest.test_map_advanced`
tests in ``tests.queryset.QuerySetTest`` for usage examples.
:param map_f: map function, as :class:`~bson.code.Code` or string
:param reduce_f: reduce function, as
:class:`~bson.code.Code` or string
:param output: output collection name, if set to 'inline' will return
the results inline. This can also be a dictionary containing output options
see: https://www.mongodb.com/docs/manual/reference/command/mapReduce/#mongodb-dbcommand-dbcmd.mapReduce
:param finalize_f: finalize function, an optional function that
performs any post-reduction processing.
:param scope: values to insert into map/reduce global scope. Optional.
:param limit: number of objects from current query to provide
to map/reduce method
Returns an iterator yielding
:class:`~mongoengine.document.MapReduceDocument`.
"""
queryset = self.clone()
MapReduceDocument = _import_class("MapReduceDocument")
map_f_scope = {}
if isinstance(map_f, Code):
map_f_scope = map_f.scope
map_f = str(map_f)
map_f = Code(queryset._sub_js_fields(map_f), map_f_scope or None)
reduce_f_scope = {}
if isinstance(reduce_f, Code):
reduce_f_scope = reduce_f.scope
reduce_f = str(reduce_f)
reduce_f_code = queryset._sub_js_fields(reduce_f)
reduce_f = Code(reduce_f_code, reduce_f_scope or None)
mr_args = {"query": queryset._query}
if finalize_f:
finalize_f_scope = {}
if isinstance(finalize_f, Code):
finalize_f_scope = finalize_f.scope
finalize_f = str(finalize_f)
finalize_f_code = queryset._sub_js_fields(finalize_f)
finalize_f = Code(finalize_f_code, finalize_f_scope or None)
mr_args["finalize"] = finalize_f
if scope:
mr_args["scope"] = scope
if limit:
mr_args["limit"] = limit
if output == "inline" and not queryset._ordering:
inline = True
mr_args["out"] = {"inline": 1}
else:
inline = False
if isinstance(output, str):
mr_args["out"] = output
elif isinstance(output, dict):
ordered_output = []
for part in ("replace", "merge", "reduce"):
value = output.get(part)
if value:
ordered_output.append((part, value))
break
else:
raise OperationError("actionData not specified for output")
db_alias = output.get("db_alias")
remaing_args = ["db", "sharded", "nonAtomic"]
if db_alias:
ordered_output.append(("db", get_db(db_alias).name))
del remaing_args[0]
for part in remaing_args:
value = output.get(part)
if value:
ordered_output.append((part, value))
mr_args["out"] = SON(ordered_output)
db = queryset._document._get_db()
result = db.command(
{
"mapReduce": queryset._document._get_collection_name(),
"map": map_f,
"reduce": reduce_f,
**mr_args,
},
session=_get_session(),
)
if inline:
docs = result["results"]
else:
if isinstance(result["result"], str):
docs = db[result["result"]].find()
else:
info = result["result"]
docs = db.client[info["db"]][info["collection"]].find()
if queryset._ordering:
docs = docs.sort(queryset._ordering)
for doc in docs:
yield MapReduceDocument(
queryset._document, queryset._collection, doc["_id"], doc["value"]
)
def exec_js(self, code, *fields, **options):
"""Execute a Javascript function on the server. A list of fields may be
provided, which will be translated to their correct names and supplied
as the arguments to the function. A few extra variables are added to
the function's scope: ``collection``, which is the name of the
collection in use; ``query``, which is an object representing the
current query; and ``options``, which is an object containing any
options specified as keyword arguments.
As fields in MongoEngine may use different names in the database (set
using the :attr:`db_field` keyword argument to a :class:`Field`
constructor), a mechanism exists for replacing MongoEngine field names
with the database field names in Javascript code. When accessing a
field, use square-bracket notation, and prefix the MongoEngine field
name with a tilde (~).
:param code: a string of Javascript code to execute
:param fields: fields that you will be using in your function, which
will be passed in to your function as arguments
:param options: options that you want available to the function
(accessed in Javascript through the ``options`` object)
"""
queryset = self.clone()
code = queryset._sub_js_fields(code)
fields = [queryset._document._translate_field_name(f) for f in fields]
collection = queryset._document._get_collection_name()
scope = {"collection": collection, "options": options or {}}
query = queryset._query
if queryset._where_clause:
query["$where"] = queryset._where_clause
scope["query"] = query
code = Code(code, scope=scope)
db = queryset._document._get_db()
return db.command("eval", code, args=fields).get("retval")
def where(self, where_clause):
"""Filter ``QuerySet`` results with a ``$where`` clause (a Javascript
expression). Performs automatic field name substitution like
:meth:`mongoengine.queryset.Queryset.exec_js`.
.. note:: When using this mode of query, the database will call your
function, or evaluate your predicate clause, for each object
in the collection.
"""
queryset = self.clone()
where_clause = queryset._sub_js_fields(where_clause)
queryset._where_clause = where_clause
return queryset
def sum(self, field):
"""Sum over the values of the specified field.
:param field: the field to sum over; use dot notation to refer to
embedded document fields
"""
db_field = self._fields_to_dbfields([field]).pop()
pipeline = [
{"$match": self._query},
{"$group": {"_id": "sum", "total": {"$sum": "$" + db_field}}},
]
# if we're performing a sum over a list field, we sum up all the
# elements in the list, hence we need to $unwind the arrays first
ListField = _import_class("ListField")
field_parts = field.split(".")
field_instances = self._document._lookup_field(field_parts)
if isinstance(field_instances[-1], ListField):
pipeline.insert(1, {"$unwind": "$" + field})
result = tuple(
self._document._get_collection().aggregate(pipeline, session=_get_session())
)
if result:
return result[0]["total"]
return 0
def average(self, field):
"""Average over the values of the specified field.
:param field: the field to average over; use dot notation to refer to
embedded document fields
"""
db_field = self._fields_to_dbfields([field]).pop()
pipeline = [
{"$match": self._query},
{"$group": {"_id": "avg", "total": {"$avg": "$" + db_field}}},
]
# if we're performing an average over a list field, we average out
# all the elements in the list, hence we need to $unwind the arrays
# first
ListField = _import_class("ListField")
field_parts = field.split(".")
field_instances = self._document._lookup_field(field_parts)
if isinstance(field_instances[-1], ListField):
pipeline.insert(1, {"$unwind": "$" + field})
result = tuple(
self._document._get_collection().aggregate(pipeline, session=_get_session())
)
if result:
return result[0]["total"]
return 0
def item_frequencies(self, field, normalize=False, map_reduce=True):
"""Returns a dictionary of all items present in a field across
the whole queried set of documents, and their corresponding frequency.
This is useful for generating tag clouds, or searching documents.
.. note::
Can only do direct simple mappings and cannot map across
:class:`~mongoengine.fields.ReferenceField` or
:class:`~mongoengine.fields.GenericReferenceField` for more complex
counting a manual map reduce call is required.
If the field is a :class:`~mongoengine.fields.ListField`, the items within
each list will be counted individually.
:param field: the field to use
:param normalize: normalize the results so they add to 1.0
:param map_reduce: Use map_reduce over exec_js
"""
if map_reduce:
return self._item_frequencies_map_reduce(field, normalize=normalize)
return self._item_frequencies_exec_js(field, normalize=normalize)
# Iterator helpers
def __next__(self):
"""Wrap the result in a :class:`~mongoengine.Document` object."""
if self._none or self._empty:
raise StopIteration
raw_doc = next(self._cursor)
if self._as_pymongo:
return raw_doc
doc = self._document._from_son(
raw_doc,
_auto_dereference=self._auto_dereference,
)
if self._scalar:
return self._get_scalar(doc)
return doc
def rewind(self):
"""Rewind the cursor to its unevaluated state."""
self._iter = False
self._cursor.rewind()
# Properties
@property
def _collection(self):
"""Property that returns the collection object. This allows us to
perform operations only if the collection is accessed.
"""
return self._collection_obj
@property
def _cursor_args(self):
fields_name = "projection"
# snapshot is not handled at all by PyMongo 3+
# TODO: evaluate similar possibilities using modifiers
if self._snapshot:
msg = "The snapshot option is not anymore available with PyMongo 3+"
warnings.warn(msg, DeprecationWarning, stacklevel=3)
cursor_args = {}
if not self._timeout:
cursor_args["no_cursor_timeout"] = True
if self._allow_disk_use:
cursor_args["allow_disk_use"] = True
if self._loaded_fields:
cursor_args[fields_name] = self._loaded_fields.as_dict()
if self._search_text:
if fields_name not in cursor_args:
cursor_args[fields_name] = {}
if self._search_text_score:
cursor_args[fields_name]["_text_score"] = {"$meta": "textScore"}
return cursor_args
@property
def _cursor(self):
"""Return a PyMongo cursor object corresponding to this queryset."""
# If _cursor_obj already exists, return it immediately.
if self._cursor_obj is not None:
return self._cursor_obj
# Create a new PyMongo cursor.
# XXX In PyMongo 3+, we define the read preference on a collection
# level, not a cursor level. Thus, we need to get a cloned collection
# object using `with_options` first.
if self._read_preference is not None or self._read_concern is not None:
self._cursor_obj = self._collection.with_options(
read_preference=self._read_preference, read_concern=self._read_concern
).find(self._query, session=_get_session(), **self._cursor_args)
else:
self._cursor_obj = self._collection.find(
self._query, session=_get_session(), **self._cursor_args
)
# Apply "where" clauses to cursor
if self._where_clause:
where_clause = self._sub_js_fields(self._where_clause)
self._cursor_obj.where(where_clause)
# Apply ordering to the cursor.
# XXX self._ordering can be equal to:
# * None if we didn't explicitly call order_by on this queryset.
# * A list of PyMongo-style sorting tuples.
# * An empty list if we explicitly called order_by() without any
# arguments. This indicates that we want to clear the default
# ordering.
if self._ordering:
# explicit ordering
self._cursor_obj.sort(self._ordering)
elif self._ordering is None and self._document._meta["ordering"]:
# default ordering
order = self._get_order_by(self._document._meta["ordering"])
self._cursor_obj.sort(order)
if self._limit is not None:
self._cursor_obj.limit(self._limit)
if self._skip is not None:
self._cursor_obj.skip(self._skip)
if self._hint != -1:
self._cursor_obj.hint(self._hint)
if self._collation is not None:
self._cursor_obj.collation(self._collation)
if self._batch_size is not None:
self._cursor_obj.batch_size(self._batch_size)
if self._comment is not None:
self._cursor_obj.comment(self._comment)
return self._cursor_obj
def __deepcopy__(self, memo):
"""Essential for chained queries with ReferenceFields involved"""
return self.clone()
@property
def _query(self):
if self._mongo_query is None:
self._mongo_query = self._query_obj.to_query(self._document)
if self._cls_query:
if "_cls" in self._mongo_query:
self._mongo_query = {"$and": [self._cls_query, self._mongo_query]}
else:
self._mongo_query.update(self._cls_query)
return self._mongo_query
@property
def _dereference(self):
if not self.__dereference:
self.__dereference = _import_class("DeReference")()
return self.__dereference
@property
def _auto_dereference(self):
should_deref = not no_dereferencing_active_for_class(self._document)
return should_deref and self.__auto_dereference
def no_dereference(self):
"""Turn off any dereferencing for the results of this queryset."""
queryset = self.clone()
queryset.__auto_dereference = False
return queryset
# Helper Functions
def _item_frequencies_map_reduce(self, field, normalize=False):
map_func = """
function() {{
var path = '{{{{~{field}}}}}'.split('.');
var field = this;
for (p in path) {{
if (typeof field != 'undefined')
field = field[path[p]];
else
break;
}}
if (field && field.constructor == Array) {{
field.forEach(function(item) {{
emit(item, 1);
}});
}} else if (typeof field != 'undefined') {{
emit(field, 1);
}} else {{
emit(null, 1);
}}
}}
""".format(
field=field
)
reduce_func = """
function(key, values) {
var total = 0;
var valuesSize = values.length;
for (var i=0; i < valuesSize; i++) {
total += parseInt(values[i], 10);
}
return total;
}
"""
values = self.map_reduce(map_func, reduce_func, "inline")
frequencies = {}
for f in values:
key = f.key
if isinstance(key, float):
if int(key) == key:
key = int(key)
frequencies[key] = int(f.value)
if normalize:
count = sum(frequencies.values())
frequencies = {k: float(v) / count for k, v in frequencies.items()}
return frequencies
def _item_frequencies_exec_js(self, field, normalize=False):
"""Uses exec_js to execute"""
freq_func = """
function(path) {
var path = path.split('.');
var total = 0.0;
db[collection].find(query).forEach(function(doc) {
var field = doc;
for (p in path) {
if (field)
field = field[path[p]];
else
break;
}
if (field && field.constructor == Array) {
total += field.length;
} else {
total++;
}
});
var frequencies = {};
var types = {};
var inc = 1.0;
db[collection].find(query).forEach(function(doc) {
field = doc;
for (p in path) {
if (field)
field = field[path[p]];
else
break;
}
if (field && field.constructor == Array) {
field.forEach(function(item) {
frequencies[item] = inc + (isNaN(frequencies[item]) ? 0: frequencies[item]);
});
} else {
var item = field;
types[item] = item;
frequencies[item] = inc + (isNaN(frequencies[item]) ? 0: frequencies[item]);
}
});
return [total, frequencies, types];
}
"""
total, data, types = self.exec_js(freq_func, field)
values = {types.get(k): int(v) for k, v in data.items()}
if normalize:
values = {k: float(v) / total for k, v in values.items()}
frequencies = {}
for k, v in values.items():
if isinstance(k, float):
if int(k) == k:
k = int(k)
frequencies[k] = v
return frequencies
def _fields_to_dbfields(self, fields):
"""Translate fields' paths to their db equivalents."""
subclasses = []
if self._document._meta["allow_inheritance"]:
subclasses = [_DocumentRegistry.get(x) for x in self._document._subclasses][
1:
]
db_field_paths = []
for field in fields:
field_parts = field.split(".")
try:
field = ".".join(
f if isinstance(f, str) else f.db_field
for f in self._document._lookup_field(field_parts)
)
db_field_paths.append(field)
except LookUpError as err:
found = False
# If a field path wasn't found on the main document, go
# through its subclasses and see if it exists on any of them.
for subdoc in subclasses:
try:
subfield = ".".join(
f if isinstance(f, str) else f.db_field
for f in subdoc._lookup_field(field_parts)
)
db_field_paths.append(subfield)
found = True
break
except LookUpError:
pass
if not found:
raise err
return db_field_paths
def _get_order_by(self, keys):
"""Given a list of MongoEngine-style sort keys, return a list
of sorting tuples that can be applied to a PyMongo cursor. For
example:
>>> qs._get_order_by(['-last_name', 'first_name'])
[('last_name', -1), ('first_name', 1)]
"""
key_list = []
for key in keys:
if not key:
continue
if key == "$text_score":
key_list.append(("_text_score", {"$meta": "textScore"}))
continue
direction = pymongo.ASCENDING
if key[0] == "-":
direction = pymongo.DESCENDING
if key[0] in ("-", "+"):
key = key[1:]
key = key.replace("__", ".")
try:
key = self._document._translate_field_name(key)
except Exception:
# TODO this exception should be more specific
pass
key_list.append((key, direction))
return key_list
def _get_scalar(self, doc):
def lookup(obj, name):
chunks = name.split("__")
for chunk in chunks:
obj = getattr(obj, chunk)
return obj
data = [lookup(doc, n) for n in self._scalar]
if len(data) == 1:
return data[0]
return tuple(data)
def _sub_js_fields(self, code):
"""When fields are specified with [~fieldname] syntax, where
*fieldname* is the Python name of a field, *fieldname* will be
substituted for the MongoDB name of the field (specified using the
:attr:`name` keyword argument in a field's constructor).
"""
def field_sub(match):
# Extract just the field name, and look up the field objects
field_name = match.group(1).split(".")
fields = self._document._lookup_field(field_name)
# Substitute the correct name for the field into the javascript
return '["%s"]' % fields[-1].db_field
def field_path_sub(match):
# Extract just the field name, and look up the field objects
field_name = match.group(1).split(".")
fields = self._document._lookup_field(field_name)
# Substitute the correct name for the field into the javascript
return ".".join([f.db_field for f in fields])
code = re.sub(r"\[\s*~([A-z_][A-z_0-9.]+?)\s*\]", field_sub, code)
code = re.sub(r"\{\{\s*~([A-z_][A-z_0-9.]+?)\s*\}\}", field_path_sub, code)
return code
def _chainable_method(self, method_name, val):
"""Call a particular method on the PyMongo cursor call a particular chainable method
with the provided value.
"""
queryset = self.clone()
# Get an existing cursor object or create a new one
cursor = queryset._cursor
# Find the requested method on the cursor and call it with the
# provided value
getattr(cursor, method_name)(val)
# Cache the value on the queryset._{method_name}
setattr(queryset, "_" + method_name, val)
return queryset
| BaseQuerySet |
python | HIPS__autograd | autograd/core.py | {
"start": 6403,
"end": 8318
} | class ____:
__slots__ = []
mappings = {}
iscomplex = False
def __init__(self, value):
pass
def zeros(self):
assert False, repr(self)
def ones(self):
assert False, repr(self)
def standard_basis(self):
assert False, repr(self)
def randn(self):
assert False, repr(self)
@primitive
def mut_add(self, x_prev, x_new):
x_prev = x_prev if x_prev is not None else self.zeros()
return self._mut_add(x_prev, x_new)
@primitive
def add(self, x_prev, x_new):
return self._add(x_prev, x_new)
@primitive
def scalar_mul(self, x, a):
return self._scalar_mul(x, a)
@primitive
def inner_prod(self, x, y):
return self._inner_prod(x, y)
@primitive
def covector(self, x):
return self._covector(x)
def _add(self, x, y):
return x + y
def _mut_add(self, x, y):
x += y
return x
def _scalar_mul(self, x, a):
return x * a
def _inner_prod(self, x, y):
assert False
def _covector(self, x):
return x
def __eq__(self, other):
return type(self) == type(other) and self.__dict__ == other.__dict__
def __repr__(self):
return f"{type(self).__name__}_{self.__dict__}"
@classmethod
def register(cls, value_type, vspace_maker=None):
if vspace_maker:
VSpace.mappings[value_type] = vspace_maker
else:
VSpace.mappings[value_type] = cls
def vspace(value):
try:
return VSpace.mappings[type(value)](value)
except KeyError:
if isbox(value):
return vspace(getval(value))
else:
raise TypeError(
"Can't find vector space for value {} of type {}. Valid types are {}".format(
value, type(value), VSpace.mappings.keys()
)
)
| VSpace |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mssql/pyodbc.py | {
"start": 19219,
"end": 19457
} | class ____(sqltypes.String):
def get_dbapi_type(self, dbapi):
if self.length in (None, "max") or self.length >= 2000:
return (dbapi.SQL_VARCHAR, 0, 0)
else:
return dbapi.SQL_VARCHAR
| _String_pyodbc |
python | huggingface__transformers | src/transformers/testing_utils.py | {
"start": 77380,
"end": 82706
} | class ____:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:"))),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.MULTILINE)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple(nested_simplify(item, decimals) for item in obj)
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)) or obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32, np.float16)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path) as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
| _RunOutput |
python | scrapy__scrapy | scrapy/pqueues.py | {
"start": 7510,
"end": 8190
} | class ____:
def __init__(self, crawler: Crawler):
assert crawler.engine
self.downloader: Downloader = crawler.engine.downloader
def stats(self, possible_slots: Iterable[str]) -> list[tuple[int, str]]:
return [(self._active_downloads(slot), slot) for slot in possible_slots]
def get_slot_key(self, request: Request) -> str:
return self.downloader.get_slot_key(request)
def _active_downloads(self, slot: str) -> int:
"""Return a number of requests in a Downloader for a given slot"""
if slot not in self.downloader.slots:
return 0
return len(self.downloader.slots[slot].active)
| DownloaderInterface |
python | pallets__werkzeug | examples/simplewiki/utils.py | {
"start": 2177,
"end": 2371
} | class ____(BaseRequest):
"""
Simple request subclass that allows to bind the object to the
current context.
"""
def bind_to_context(self):
local.request = self
| Request |
python | doocs__leetcode | solution/3400-3499/3442.Maximum Difference Between Even and Odd Frequency I/Solution.py | {
"start": 0,
"end": 259
} | class ____:
def maxDifference(self, s: str) -> int:
cnt = Counter(s)
a, b = 0, inf
for v in cnt.values():
if v % 2:
a = max(a, v)
else:
b = min(b, v)
return a - b
| Solution |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/rpc_test.py | {
"start": 18353,
"end": 31885
} | class ____:
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _self_py_udf_remote(self, worker_info, x, y, z):
rref = rpc.remote(worker_info, my_function, args=(x, y, z))
self.assertEqual(rref.to_here(), x + y + z)
def _self_remote_rref_as_rpc_arg(self, dst, x, y, z):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(x, y, z))
fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, x))
ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, x + y))
self.assertEqual(ret, x + y + z + x + y)
self.assertEqual(fut.wait(), x + y + z + x)
def _self_remote_rref_as_remote_arg(self, dst, x, y, z):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(x, y, z))
ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, x))
self.assertEqual(ret_rref.to_here(), x + y + z + x)
def _world_size_one(self, a, b):
if self.rank == 0:
rpc.init_rpc(
name="me",
backend=self.rpc_backend,
rank=0,
world_size=1,
rpc_backend_options=self.rpc_backend_options,
)
def _rpc_sync(x, y):
expect = x * 2
result = rpc.rpc_sync("me", my_tensor_function, args=(x, y))
self.assertEqual(expect, result)
def _rpc_async(x, y):
expect = x * 2
result = rpc.rpc_async("me", my_tensor_function, args=(x, y)).wait()
self.assertEqual(expect, result)
def _remote(x, y):
expect = x * 2
result = rpc.remote("me", my_tensor_function, args=(x, y)).to_here()
self.assertEqual(expect, result)
_rpc_sync(a, b)
_rpc_async(a, b)
_remote(a, b)
rpc.shutdown()
def _multi_rpc(self, sparse):
dst_rank = (self.rank + 1) % self.world_size
for i in range(20):
n = i + self.rank + 1
if sparse:
x = build_sparse_tensor() * n
y = build_sparse_tensor() * n
else:
x = torch.ones(2, 2)
y = torch.ones(2, 2)
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(x, y),
)
self.assertEqual(ret, x * 2)
def _run_uneven_workload(self, f, x, num_repeat=30):
# worker0 drives and waits for worker1 and worker2
# throughout the test.
if self.rank == 0:
self.assertTrue(self.world_size >= 3)
# Phase 1: Only worker1 has workload.
dst = "worker1"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, f, args=(x,))
futs.append(fut)
for fut in torch.futures.collect_all(futs).wait():
self.assertEqual(fut.wait(), 0)
# Phase 2: Only worker2 has workload.
# If join is not correctly implemented,
# worker2 should be closed by now.
dst = "worker2"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, f, args=(x,))
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
def _wait_all_workers(self, f, x):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=f"worker{self.rank:d}",
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload(f, x)
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def _wait_all_workers_twice(self, f, x):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=f"worker{self.rank:d}",
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload(f, x)
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def _nested_rpc(self, f, expected):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
f,
args=(worker_name(self.rank),),
)
self.assertEqual(ret, expected)
def _stress_test_rpc(self, f, repeat=1000, args=()):
n = self.rank + 1
dst_rank = n % self.world_size
futs = []
tik = time.time()
for _ in range(repeat):
fut = rpc.rpc_async(worker_name(dst_rank), f, args=args)
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
tok = time.time()
print(
f"Rank {self.rank} finished testing {repeat} times in {tok - tik} seconds."
)
def _builtin_remote_ret(self, x, y, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(x, y),
)
self.assertEqual(rref.to_here(), expected)
def _builtin_remote_self(self, x, y, expected):
rref = rpc.remote(
worker_name(self.rank),
torch.add,
args=(x, y),
)
self.assertEqual(rref.local_value(), expected)
def _test_multi_remote_call(
self, fn, sparse, args_fn=lambda x, y: (), kwargs_fn=lambda x, y: {}
):
m = 10
n = self.rank + 1
dst_rank = n % self.world_size
rrefs = []
expected = []
for i in range(m):
n = n + i
rrefs.append(
rpc.remote(
worker_name(dst_rank),
fn,
args=args_fn(n, sparse),
kwargs=kwargs_fn(n, sparse),
)
)
expected.append(fn(*args_fn(n, sparse), **kwargs_fn(n, sparse)))
for i in range(m):
self.assertEqual(rrefs[i].to_here(), expected[i])
def _py_rref_args(self, a, b, x, y, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(worker_name(dst_rank), torch.add, args=(a, b))
rref_b = rpc.remote(worker_name(dst_rank), torch.add, args=(x, y))
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), expected)
def _py_rref_args_user_share(self, a, b, c, x, y, z, expected):
n = self.rank + 1
owner_rank = n % self.world_size
user_rank = (n + 1) % self.world_size
rref_a = rpc.remote(worker_name(owner_rank), my_function, args=(a, b, c))
rref_b = rpc.remote(worker_name(owner_rank), my_function, args=(x, y, z))
rref_c = rpc.remote(
worker_name(user_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), expected)
def _py_rpc_rref_args(self, a, b, c, x, y, z, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(worker_name(dst_rank), my_function, args=(a, b, c))
rref_b = rpc.remote(worker_name(dst_rank), my_function, args=(x, y, z))
c = rpc.rpc_sync(worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b))
self.assertEqual(c, expected)
def _nested_remote(self, f, expected):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), expected)
def _nested_rref(self, f, expected1, expected2):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref_of_rrefs = rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
# Say C has 2 OwnerRRefs.
# B has 2 UserRRefs to those 2 OwnerRRefs, respectively.
# This call is effectively A asking B to share its 2 UserRRefs.
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), expected1)
self.assertEqual(rrefs[1].to_here(), expected2)
def _nested_rref_stress(self, f, expected1, expected2):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
all_rrefs = [
rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
for _ in range(20)
]
for i in range(20):
rref_of_rrefs = all_rrefs[i]
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), expected1)
self.assertEqual(rrefs[1].to_here(), expected2)
def _trainer_func(self, rref, sparse):
m = MyEmbeddingBagModel(sparse=sparse)
loss_fn = nn.MSELoss()
for i in range(10):
outputs = m(torch.rand(10, 10).long())
loss_fn(outputs, torch.rand(10, 10)).backward()
gradient = next(iter(m.parameters())).grad
fut = rref.rpc_async().average(rref, i, gradient)
gradient = fut.wait()
if gradient.is_sparse:
gradient = gradient.to_dense().double()
ps_gradient = rref.rpc_sync().get_gradient(rref)
if ps_gradient.is_sparse:
ps_gradient = ps_gradient.to_dense().double()
self.assertTrue(torch.equal(gradient, ps_gradient))
def _my_parameter_server(self, sparse):
ps_rref = RRef(MyParameterServer(self.world_size - 1))
futures = [
rpc.rpc_async(
worker_name((self.rank + index) % self.world_size),
self._trainer_func,
args=(ps_rref, sparse),
)
for index in range(1, self.world_size)
]
torch.futures.wait_all(futures)
def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor):
# We check proper CUDA stream synchronization by adding to the tensor
# in one stream to get the expected value, and reading it from another stream.
future = Future(devices=["cuda:0"])
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
another_stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
if sparse_tensor:
tensor = build_sparse_tensor().to("cuda:0")
add_tensor = build_sparse_tensor().to("cuda:0")
expected_tensor = (tensor + add_tensor).coalesce()
else:
tensor = torch.zeros((100,), device="cuda:0")
add_tensor = torch.ones((100,), device="cuda:0")
expected_tensor = tensor + add_tensor
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor += add_tensor
if sparse_tensor:
tensor = tensor.coalesce()
future.set_result(wrapper(tensor))
with torch.cuda.stream(another_stream):
tensor = unwrapper(future.wait())
if sparse_tensor:
self.assertTrue(
torch.eq(tensor.indices(), expected_tensor.indices())
.all()
.item()
)
self.assertTrue(
torch.eq(tensor.values(), expected_tensor.values()).all().item()
)
self.assertEqual(tensor.size(), expected_tensor.size())
else:
self.assertTrue(torch.eq(tensor, expected_tensor).all().item())
| RpcTestCommon |
python | doocs__leetcode | solution/2800-2899/2850.Minimum Moves to Spread Stones Over Grid/Solution.py | {
"start": 0,
"end": 1105
} | class ____:
def minimumMoves(self, grid: List[List[int]]) -> int:
q = deque([tuple(tuple(row) for row in grid)])
vis = set(q)
ans = 0
dirs = (-1, 0, 1, 0, -1)
while 1:
for _ in range(len(q)):
cur = q.popleft()
if all(x for row in cur for x in row):
return ans
for i in range(3):
for j in range(3):
if cur[i][j] > 1:
for a, b in pairwise(dirs):
x, y = i + a, j + b
if 0 <= x < 3 and 0 <= y < 3 and cur[x][y] < 2:
nxt = [list(row) for row in cur]
nxt[i][j] -= 1
nxt[x][y] += 1
nxt = tuple(tuple(row) for row in nxt)
if nxt not in vis:
vis.add(nxt)
q.append(nxt)
ans += 1
| Solution |
python | pytorch__pytorch | torch/_inductor/tiling_utils.py | {
"start": 21836,
"end": 27625
} | class ____:
# Var -> Memory Score - not strictly the amount of memory
# because we multiply writes x2
# TODO: separate into dataclass that olds mem, dtype, is_write
coalesced_by_var: dict[sympy.Expr, int]
uncoalesced_addrs: dict[sympy.Expr, int]
norm_read_writes: FusedNormalizedReadsWrites
suggested_split: Optional[VarTiling] = None
def analyze_memory_coalescing(
fused_node: Union["FusedSchedulerNode", "SchedulerNode"],
) -> Optional[CoalesceVarAnalysis]:
"""
Find variables that coalesce the reads and writes and score the total size.
If uncoalesced memory expressions are found, look for additionally tiling of variables
which will coalesce memory accesses.
For instance - for the following expression:
(32*p0) // 2048
Tiling p0 by 64 will make this expression coalesced.
"""
norm_read_writes = extract_normalized_read_writes(fused_node)
if norm_read_writes is None:
return None
reads = norm_read_writes.reads
writes = norm_read_writes.writes
var_ranges = norm_read_writes.var_ranges
coalesced_by_var: dict[sympy.Symbol, int] = Counter()
uncoalesced_addrs: dict[sympy.Expr, int] = Counter()
for is_read, (memory_expr, buf_names) in itertools.chain(
((True, item) for item in reads.items()),
((False, item) for item in writes.items()),
):
# skip memory deps with indirect vars - todo: better handling
indirect_expr = bool(
memory_expr.free_symbols - norm_read_writes.var_ranges.keys()
)
if indirect_expr:
continue
size = get_score(memory_expr, var_ranges, buf_names)
if size == 0:
continue
maybe_coalesced_var = find_coalesced_var(memory_expr, var_ranges)
# while broadcasting vars are not technically coalesced,
# accesses at least stay in cache, so they provide most of the benefit.
# treat the same for now.
if maybe_coalesced_var is None:
maybe_coalesced_var = find_broadcast_var(memory_expr, var_ranges)
total_score = 0
for buf_name in buf_names:
if (buf := V.graph.try_get_buffer(buf_name)) and (
buf_size := try_get_buf_size(buf_name)
):
# constrain by buf size since we'll read at most that many elements
# score could be more through either masking or by broadcasting (e.g. x // 16)
total_score += min(buf_size, size) * buf.dtype.itemsize
# coalesced writes more important
total_score *= 1 if is_read else 2
if maybe_coalesced_var:
coalesced_by_var[maybe_coalesced_var] += total_score
else:
uncoalesced_addrs[memory_expr] += total_score
if not uncoalesced_addrs:
return CoalesceVarAnalysis(
coalesced_by_var=coalesced_by_var,
uncoalesced_addrs=uncoalesced_addrs,
norm_read_writes=norm_read_writes,
)
# map from var -> tiling -> total_score
tiling_scores: dict[sympy.Expr, dict[int, int]] = defaultdict(Counter)
for uncoalesced_expr, addr_score in uncoalesced_addrs.items():
expr_subs = dict.fromkeys(uncoalesced_expr.free_symbols, 0)
for v in uncoalesced_expr.free_symbols:
# skip non iter/reduce var variables
if v not in var_ranges:
continue
# skip small addrs
if addr_score == 0:
continue
del expr_subs[v]
single_var_expr = sympy_subs(uncoalesced_expr, expr_subs)
expr_subs[v] = 0
tiling_factor = solve_for_tiling(single_var_expr)
if (
tiling_factor is None
or not tiling_factor.is_constant()
or not tiling_factor.is_integer
):
continue
tiling_factor = int(tiling_factor)
if not V.graph.sizevars.statically_known_lt(tiling_factor, var_ranges[v]):
continue
# TODO - if a var is in the middle, such as [n0, n1, n2]
# n1 can can be split beyond range
MIN_TILING_BLOCK = 8
if not all(
V.graph.sizevars.statically_known_lt(MIN_TILING_BLOCK, block)
for block in (tiling_factor, var_ranges[v] // tiling_factor)
):
continue
tiling_scores[v][tiling_factor] += addr_score
if len(tiling_scores) == 0:
return CoalesceVarAnalysis(
coalesced_by_var=coalesced_by_var,
uncoalesced_addrs=uncoalesced_addrs,
norm_read_writes=norm_read_writes,
)
best_tiling: Optional[tuple[sympy.Expr, int]] = None
best_tiling_score = 0
for var, tiling_counter in tiling_scores.items():
for tile, tile_score in tiling_counter.items():
if tile_score > best_tiling_score:
best_tiling = (var, tile)
best_tiling_score = tile_score
if best_tiling is None:
return CoalesceVarAnalysis(
coalesced_by_var=coalesced_by_var,
uncoalesced_addrs=uncoalesced_addrs,
norm_read_writes=norm_read_writes,
)
# TODO - for strictly pointwise fusions,
# we can consider just swizzling the var if the var we are going to tile
# does not coalesce a significant portion of global reads
# TODO - could also prefer index var splits to reduction, better tested
return CoalesceVarAnalysis(
coalesced_by_var=coalesced_by_var,
uncoalesced_addrs=uncoalesced_addrs,
norm_read_writes=norm_read_writes,
suggested_split=VarTiling(best_tiling[0], best_tiling[1], best_tiling_score),
)
| CoalesceVarAnalysis |
python | Lightning-AI__lightning | src/lightning/fabric/accelerators/xla.py | {
"start": 954,
"end": 6467
} | class ____(Accelerator):
"""Accelerator for XLA devices, normally TPUs.
.. warning:: Use of this accelerator beyond import and instantiation is experimental.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
if not _XLA_AVAILABLE:
raise ModuleNotFoundError(str(_XLA_AVAILABLE))
if not _using_pjrt():
raise RuntimeError("The XLA XRT runtime is not supported anymore.")
super().__init__(*args, **kwargs)
@override
def setup_device(self, device: torch.device) -> None:
pass
@override
def teardown(self) -> None:
pass
@staticmethod
@override
def parse_devices(devices: Union[int, str, list[int]]) -> Union[int, list[int]]:
"""Accelerator device parsing logic."""
return _parse_tpu_devices(devices)
@staticmethod
@override
def get_parallel_devices(devices: Union[int, list[int]]) -> list[torch.device]:
"""Gets parallel devices for the Accelerator."""
devices = _parse_tpu_devices(devices)
if isinstance(devices, int):
return [torch.device("xla", i) for i in range(devices)]
# list of devices is not supported, just a specific index, fine to access [0]
return [torch.device("xla", devices[0])]
# we cannot create `xla_device` here because processes have not been spawned yet (this is called in the
# accelerator connector init). However, there doesn't seem to be a problem with instantiating `torch.device`.
# it will be replaced with `xla_device` (also a torch.device`, but with extra logic) in the strategy
@staticmethod
@override
# XLA's multiprocessing will pop the TPU_NUM_DEVICES key, so we need to cache it
# https://github.com/pytorch/xla/blob/v2.0.0/torch_xla/distributed/xla_multiprocessing.py#L280
@functools.lru_cache(maxsize=1)
def auto_device_count() -> int:
"""Get the devices when set to auto."""
if not _XLA_AVAILABLE:
return 0
if _XLA_GREATER_EQUAL_2_1:
from torch_xla._internal import tpu
return tpu.num_available_devices()
from torch_xla.experimental import tpu
device_count_on_version = {2: 8, 3: 8, 4: 4}
return device_count_on_version.get(tpu.version(), 8)
@staticmethod
@override
@functools.lru_cache(maxsize=1)
def is_available() -> bool:
try:
return XLAAccelerator.auto_device_count() > 0
except (ValueError, AssertionError, OSError):
# XLA may raise these exceptions if it's not properly configured. This needs to be avoided for the cases
# when `torch_xla` is imported but not used
return False
@staticmethod
@override
def name() -> str:
return "tpu"
@classmethod
@override
def register_accelerators(cls, accelerator_registry: _AcceleratorRegistry) -> None:
accelerator_registry.register(
cls.name(),
cls,
description=cls.__name__,
)
# PJRT support requires this minimum version
_XLA_AVAILABLE = RequirementCache("torch_xla>=1.13", "torch_xla")
_XLA_GREATER_EQUAL_2_1 = RequirementCache("torch_xla>=2.1")
_XLA_GREATER_EQUAL_2_5 = RequirementCache("torch_xla>=2.5")
def _using_pjrt() -> bool:
# `using_pjrt` is removed in torch_xla 2.5
if _XLA_GREATER_EQUAL_2_5:
from torch_xla import runtime as xr
return xr.device_type() is not None
# delete me when torch_xla 2.2 is the min supported version, where XRT support has been dropped.
if _XLA_GREATER_EQUAL_2_1:
from torch_xla import runtime as xr
return xr.using_pjrt()
from torch_xla.experimental import pjrt
return pjrt.using_pjrt()
def _parse_tpu_devices(devices: Union[int, str, list[int]]) -> Union[int, list[int]]:
"""Parses the TPU devices given in the format as accepted by the
:class:`~lightning.pytorch.trainer.trainer.Trainer` and :class:`~lightning.fabric.Fabric`.
Args:
devices: An int of 1 or string '1' indicates that 1 core with multi-processing should be used
An int 8 or string '8' indicates that all 8 cores with multi-processing should be used
A single element list of int or string can be used to indicate the specific TPU core to use.
Returns:
A list of tpu cores to be used.
"""
_check_data_type(devices)
if isinstance(devices, str):
devices = _parse_tpu_devices_str(devices)
_check_tpu_devices_valid(devices)
return devices
def _check_tpu_devices_valid(devices: object) -> None:
device_count = XLAAccelerator.auto_device_count()
if (
# support number of devices
isinstance(devices, int)
and devices in {1, device_count}
# support picking a specific device
or isinstance(devices, (list, tuple))
and len(devices) == 1
and 0 <= devices[0] <= device_count - 1
):
return
raise ValueError(
f"`devices` can only be 'auto', 1, {device_count} or [<0-{device_count - 1}>] for TPUs. Got {devices!r}"
)
def _parse_tpu_devices_str(devices: str) -> Union[int, list[int]]:
devices = devices.strip()
try:
return int(devices)
except ValueError:
try:
return [int(x.strip()) for x in devices.split(",") if len(x) > 0]
except ValueError:
raise ValueError(f"Could not parse the selected TPU devices: {devices!r}")
| XLAAccelerator |
python | django__django | tests/modeladmin/test_checks.py | {
"start": 15025,
"end": 16752
} | class ____(CheckTestCase):
def test_not_dictionary(self):
class TestModelAdmin(ModelAdmin):
radio_fields = ()
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'radio_fields' must be a dictionary.",
"admin.E021",
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
radio_fields = {"non_existent_field": VERTICAL}
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'radio_fields' refers to 'non_existent_field', "
"which is not a field of 'modeladmin.ValidationTestModel'.",
"admin.E022",
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
radio_fields = {"name": VERTICAL}
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'radio_fields' refers to 'name', which is not an instance "
"of ForeignKey, and does not have a 'choices' definition.",
"admin.E023",
)
def test_invalid_value(self):
class TestModelAdmin(ModelAdmin):
radio_fields = {"state": None}
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'radio_fields[\"state\"]' must be either admin.HORIZONTAL or "
"admin.VERTICAL.",
"admin.E024",
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
radio_fields = {"state": VERTICAL}
self.assertIsValid(TestModelAdmin, ValidationTestModel)
| RadioFieldsCheckTests |
python | dagster-io__dagster | python_modules/dagster/dagster/_vendored/dateutil/tz/tz.py | {
"start": 33860,
"end": 38491
} | class ____(tzrange):
"""
``tzstr`` objects are time zone objects specified by a time-zone string as
it would be passed to a ``TZ`` variable on POSIX-style systems (see
the `GNU C Library: TZ Variable`_ for more details).
There is one notable exception, which is that POSIX-style time zones use an
inverted offset format, so normally ``GMT+3`` would be parsed as an offset
3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an
offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX
behavior, pass a ``True`` value to ``posix_offset``.
The :class:`tzrange` object provides the same functionality, but is
specified using :class:`relativedelta.relativedelta` objects. rather than
strings.
:param s:
A time zone string in ``TZ`` variable format. This can be a
:class:`bytes` (2.x: :class:`str`), :class:`str` (2.x:
:class:`unicode`) or a stream emitting unicode characters
(e.g. :class:`StringIO`).
:param posix_offset:
Optional. If set to ``True``, interpret strings such as ``GMT+3`` or
``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the
POSIX standard.
.. caution::
Prior to version 2.7.0, this function also supported time zones
in the format:
* ``EST5EDT,4,0,6,7200,10,0,26,7200,3600``
* ``EST5EDT,4,1,0,7200,10,-1,0,7200,3600``
This format is non-standard and has been deprecated; this function
will raise a :class:`DeprecatedTZFormatWarning` until
support is removed in a future version.
.. _`GNU C Library: TZ Variable`:
https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
"""
def __init__(self, s, posix_offset=False):
global parser
# CHANGED IN VENDORED VERSION
from ..parser import _parser as parser
self._s = s
res = parser._parsetz(s)
if res is None or res.any_unused_tokens:
raise ValueError("unknown string format")
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC") and not posix_offset:
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
self.hasdst = bool(self._start_delta)
def _delta(self, x, isend=0):
# CHANGED IN VENDORED VERSION
from .. import relativedelta
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset - self._std_offset
kwargs["seconds"] -= delta.seconds + delta.days * 86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
| tzstr |
python | python__mypy | mypy/visitor.py | {
"start": 5136,
"end": 8009
} | class ____(Generic[T]):
# Definitions
@abstractmethod
def visit_assignment_stmt(self, o: mypy.nodes.AssignmentStmt, /) -> T:
pass
@abstractmethod
def visit_for_stmt(self, o: mypy.nodes.ForStmt, /) -> T:
pass
@abstractmethod
def visit_with_stmt(self, o: mypy.nodes.WithStmt, /) -> T:
pass
@abstractmethod
def visit_del_stmt(self, o: mypy.nodes.DelStmt, /) -> T:
pass
@abstractmethod
def visit_func_def(self, o: mypy.nodes.FuncDef, /) -> T:
pass
@abstractmethod
def visit_overloaded_func_def(self, o: mypy.nodes.OverloadedFuncDef, /) -> T:
pass
@abstractmethod
def visit_class_def(self, o: mypy.nodes.ClassDef, /) -> T:
pass
@abstractmethod
def visit_global_decl(self, o: mypy.nodes.GlobalDecl, /) -> T:
pass
@abstractmethod
def visit_nonlocal_decl(self, o: mypy.nodes.NonlocalDecl, /) -> T:
pass
@abstractmethod
def visit_decorator(self, o: mypy.nodes.Decorator, /) -> T:
pass
# Module structure
@abstractmethod
def visit_import(self, o: mypy.nodes.Import, /) -> T:
pass
@abstractmethod
def visit_import_from(self, o: mypy.nodes.ImportFrom, /) -> T:
pass
@abstractmethod
def visit_import_all(self, o: mypy.nodes.ImportAll, /) -> T:
pass
# Statements
@abstractmethod
def visit_block(self, o: mypy.nodes.Block, /) -> T:
pass
@abstractmethod
def visit_expression_stmt(self, o: mypy.nodes.ExpressionStmt, /) -> T:
pass
@abstractmethod
def visit_operator_assignment_stmt(self, o: mypy.nodes.OperatorAssignmentStmt, /) -> T:
pass
@abstractmethod
def visit_while_stmt(self, o: mypy.nodes.WhileStmt, /) -> T:
pass
@abstractmethod
def visit_return_stmt(self, o: mypy.nodes.ReturnStmt, /) -> T:
pass
@abstractmethod
def visit_assert_stmt(self, o: mypy.nodes.AssertStmt, /) -> T:
pass
@abstractmethod
def visit_if_stmt(self, o: mypy.nodes.IfStmt, /) -> T:
pass
@abstractmethod
def visit_break_stmt(self, o: mypy.nodes.BreakStmt, /) -> T:
pass
@abstractmethod
def visit_continue_stmt(self, o: mypy.nodes.ContinueStmt, /) -> T:
pass
@abstractmethod
def visit_pass_stmt(self, o: mypy.nodes.PassStmt, /) -> T:
pass
@abstractmethod
def visit_raise_stmt(self, o: mypy.nodes.RaiseStmt, /) -> T:
pass
@abstractmethod
def visit_try_stmt(self, o: mypy.nodes.TryStmt, /) -> T:
pass
@abstractmethod
def visit_match_stmt(self, o: mypy.nodes.MatchStmt, /) -> T:
pass
@abstractmethod
def visit_type_alias_stmt(self, o: mypy.nodes.TypeAliasStmt, /) -> T:
pass
@trait
@mypyc_attr(allow_interpreted_subclasses=True)
| StatementVisitor |
python | sqlalchemy__sqlalchemy | test/dialect/oracle/test_types.py | {
"start": 47272,
"end": 49995
} | class ____(fixtures.TestBase):
"""
test the numeric output_type_handler when using non-US locale for NLS_LANG.
"""
__only_on__ = ("oracle+cx_oracle", "oracle+oracledb")
__backend__ = True
def setup_test(self):
connect = testing.db.pool._creator
def _creator():
conn = connect()
cursor = conn.cursor()
cursor.execute("ALTER SESSION SET NLS_TERRITORY='GERMANY'")
cursor.close()
return conn
self.engine = testing_engine(options={"creator": _creator})
def teardown_test(self):
self.engine.dispose()
def test_detection(self):
# revised as of #8744
with self.engine.connect() as conn:
connection = conn.connection
with connection.cursor() as cursor:
cx_Oracle = self.engine.dialect.dbapi
def output_type_handler(
cursor, name, defaultType, size, precision, scale
):
return cursor.var(
cx_Oracle.STRING, 255, arraysize=cursor.arraysize
)
cursor.outputtypehandler = output_type_handler
cursor.execute("SELECT 1.1 FROM DUAL")
row = cursor.fetchone()
decimal_char = row[0][1]
if testing.against("+cx_oracle"):
eq_(decimal_char, ",")
else:
assert decimal_char in ",."
eq_(conn.dialect._decimal_char, decimal_char)
@testing.combinations(
("SELECT 0.1 FROM DUAL", decimal.Decimal("0.1"), {}),
("SELECT CAST(15 AS INTEGER) FROM DUAL", 15, {}),
(
"SELECT CAST(15 AS NUMERIC(3, 1)) FROM DUAL",
decimal.Decimal("15"),
{},
),
(
"SELECT CAST(0.1 AS NUMERIC(5, 2)) FROM DUAL",
decimal.Decimal("0.1"),
{},
),
(
"SELECT :num FROM DUAL",
decimal.Decimal("2.5"),
{"num": decimal.Decimal("2.5")},
),
(
text(
"SELECT CAST(28.532 AS NUMERIC(5, 3)) " "AS val FROM DUAL"
).columns(val=Numeric(5, 3, asdecimal=True)),
decimal.Decimal("28.532"),
{},
),
)
def test_output_type_handler(self, stmt, expected, kw):
with self.engine.connect() as conn:
if isinstance(stmt, str):
test_exp = conn.exec_driver_sql(stmt, kw).scalar()
else:
test_exp = conn.scalar(stmt, **kw)
eq_(test_exp, expected)
assert type(test_exp) is type(expected)
| EuroNumericTest |
python | fluentpython__example-code-2e | 17-it-generator/aritprog_v1.py | {
"start": 726,
"end": 1363
} | class ____:
def __init__(self, begin, step, end=None): # <1>
self.begin = begin
self.step = step
self.end = end # None -> "infinite" series
def __iter__(self):
result_type = type(self.begin + self.step) # <2>
result = result_type(self.begin) # <3>
forever = self.end is None # <4>
index = 0
while forever or result < self.end: # <5>
yield result # <6>
index += 1
result = self.begin + self.step * index # <7>
# end::ARITPROG_CLASS[]
| ArithmeticProgression |
python | coleifer__peewee | tests/fields.py | {
"start": 46018,
"end": 49570
} | class ____(ModelTestCase):
requires = [NQ, NQItem]
def setUp(self):
super(TestForeignKeyLazyLoad, self).setUp()
with self.database.atomic():
a1, a2, a3, a4 = [NQ.create(name='a%s' % i) for i in range(1, 5)]
ai = NQItem.create(nq=a1, nq_null=a2, nq_lazy=a3, nq_lazy_null=a4)
b = NQ.create(name='b')
bi = NQItem.create(nq=b, nq_lazy=b)
def test_doesnotexist_lazy_load(self):
n = NQ.create(name='n1')
i = NQItem.create(nq=n, nq_null=n, nq_lazy=n, nq_lazy_null=n)
i_db = NQItem.select(NQItem.id).where(NQItem.nq == n).get()
with self.assertQueryCount(0):
# Only raise DoesNotExist for non-nullable *and* lazy-load=True.
# Otherwise we just return None.
self.assertRaises(NQ.DoesNotExist, lambda: i_db.nq)
self.assertTrue(i_db.nq_null is None)
self.assertTrue(i_db.nq_lazy is None)
self.assertTrue(i_db.nq_lazy_null is None)
def test_foreign_key_lazy_load(self):
a1, a2, a3, a4 = (NQ.select()
.where(NQ.name.startswith('a'))
.order_by(NQ.name))
b = NQ.get(NQ.name == 'b')
ai = NQItem.get(NQItem.nq_id == a1.id)
bi = NQItem.get(NQItem.nq_id == b.id)
# Accessing the lazy foreign-key fields will not result in any queries
# being executed.
with self.assertQueryCount(0):
self.assertEqual(ai.nq_lazy, a3.id)
self.assertEqual(ai.nq_lazy_null, a4.id)
self.assertEqual(bi.nq_lazy, b.id)
self.assertTrue(bi.nq_lazy_null is None)
self.assertTrue(bi.nq_null is None)
# Accessing the regular foreign-key fields uses a query to get the
# related model instance.
with self.assertQueryCount(2):
self.assertEqual(ai.nq.id, a1.id)
self.assertEqual(ai.nq_null.id, a2.id)
with self.assertQueryCount(1):
self.assertEqual(bi.nq.id, b.id)
def test_fk_lazy_load_related_instance(self):
nq = NQ(name='b1')
nqi = NQItem(nq=nq, nq_null=nq, nq_lazy=nq, nq_lazy_null=nq)
nq.save()
nqi.save()
with self.assertQueryCount(1):
nqi_db = NQItem.get(NQItem.id == nqi.id)
self.assertEqual(nqi_db.nq_lazy, nq.id)
self.assertEqual(nqi_db.nq_lazy_null, nq.id)
def test_fk_lazy_select_related(self):
NA, NB, NC, ND = [NQ.alias(a) for a in ('na', 'nb', 'nc', 'nd')]
LO = JOIN.LEFT_OUTER
query = (NQItem.select(NQItem, NA, NB, NC, ND)
.join_from(NQItem, NA, LO, on=NQItem.nq)
.join_from(NQItem, NB, LO, on=NQItem.nq_null)
.join_from(NQItem, NC, LO, on=NQItem.nq_lazy)
.join_from(NQItem, ND, LO, on=NQItem.nq_lazy_null)
.order_by(NQItem.id))
# If we explicitly / eagerly select lazy foreign-key models, they
# behave just like regular foreign keys.
with self.assertQueryCount(1):
ai, bi = [ni for ni in query]
self.assertEqual(ai.nq.name, 'a1')
self.assertEqual(ai.nq_null.name, 'a2')
self.assertEqual(ai.nq_lazy.name, 'a3')
self.assertEqual(ai.nq_lazy_null.name, 'a4')
self.assertEqual(bi.nq.name, 'b')
self.assertEqual(bi.nq_lazy.name, 'b')
self.assertTrue(bi.nq_null is None)
self.assertTrue(bi.nq_lazy_null is None)
| TestForeignKeyLazyLoad |
python | doocs__leetcode | solution/0700-0799/0797.All Paths From Source to Target/Solution.py | {
"start": 0,
"end": 405
} | class ____:
def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:
n = len(graph)
q = deque([[0]])
ans = []
while q:
path = q.popleft()
u = path[-1]
if u == n - 1:
ans.append(path)
continue
for v in graph[u]:
q.append(path + [v])
return ans
| Solution |
python | realpython__materials | queue/src/thread_safe_queues.py | {
"start": 2478,
"end": 5076
} | class ____:
def __init__(self, buffer, producers, consumers):
self.buffer = buffer
self.producers = producers
self.consumers = consumers
def animate(self):
with Live(self.render(), screen=True, refresh_per_second=10) as live:
while True:
live.update(self.render())
def render(self):
match self.buffer:
case PriorityQueue():
title = "Priority Queue"
products = map(str, reversed(list(self.buffer.queue)))
case LifoQueue():
title = "Stack"
products = list(self.buffer.queue)
case Queue():
title = "Queue"
products = reversed(list(self.buffer.queue))
case _:
title = products = ""
rows = [Panel(f"[bold]{title}:[/] {', '.join(products)}", width=82)]
pairs = zip_longest(self.producers, self.consumers)
for i, (producer, consumer) in enumerate(pairs, 1):
left_panel = self.panel(producer, f"Producer {i}")
right_panel = self.panel(consumer, f"Consumer {i}")
rows.append(Columns([left_panel, right_panel], width=40))
return Group(*rows)
def panel(self, worker, title):
if worker is None:
return ""
padding = " " * int(29 / 100 * worker.progress)
align = Align(padding + worker.state, align="left", vertical="middle")
return Panel(align, height=5, title=title)
def main(args):
buffer = QUEUE_TYPES[args.queue]()
products = PRIORITIZED_PRODUCTS if args.queue == "heap" else PRODUCTS
producers = [
Producer(args.producer_speed, buffer, products)
for _ in range(args.producers)
]
consumers = [
Consumer(args.consumer_speed, buffer) for _ in range(args.consumers)
]
for producer in producers:
producer.start()
for consumer in consumers:
consumer.start()
view = View(buffer, producers, consumers)
view.animate()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-q", "--queue", choices=QUEUE_TYPES, default="fifo")
parser.add_argument("-p", "--producers", type=int, default=3)
parser.add_argument("-c", "--consumers", type=int, default=2)
parser.add_argument("-ps", "--producer-speed", type=int, default=1)
parser.add_argument("-cs", "--consumer-speed", type=int, default=1)
return parser.parse_args()
if __name__ == "__main__":
try:
main(parse_args())
except KeyboardInterrupt:
pass
| View |
python | pytorch__pytorch | torch/_dynamo/precompile_context.py | {
"start": 1757,
"end": 7420
} | class ____:
"""
PrecompileContext is a special CacheArtifactManager for handling precompilation
It uses the same interface as CacheArtifactManager, but handles deserialization differently: instead
of placing each artifact into respective caches, it will stitch all the cache artifacts for a single key
together and place it into a global Precompile Cache.
PrecompileContext has two main portions: dynamo_cache_entries and backend_cache_artifacts.
When saving, PrecompileContext.serialize() will serialize all dynamo cache entries along with any PrecompileCacheArtifacts that
are needed to save those dynamo cache entries.
The following artifact types are supported by PrecompileContext:
- BundledAOTAutogradCacheArtifact
"""
# Protected by the compile_lock
# _backend_artifacts_by_key organizes results by the key of each artifact.
# Each object here must be serializable
_backend_artifacts_by_key: dict[_BackendId, BackendCacheArtifact[Any]] = {}
# On call to `serialize()`, all cache artifacts in _dynamo_cache_entries are converted
# into DynamoCacheArtifacts and added to _new_cache_artifacts for serialization
_dynamo_cache_entries: dict[str, _DynamoCacheEntry] = {}
@classmethod
def clear(cls) -> None:
cls._backend_artifacts_by_key.clear()
cls._dynamo_cache_entries.clear()
@classmethod
def record_artifact(
cls,
artifact: BackendCacheArtifact[Any],
) -> None:
"""
Records a backend artifact to be used with dynamo cache entries
"""
cls._backend_artifacts_by_key[_BackendId(artifact.key)] = copy.deepcopy(
artifact
)
@classmethod
def record_dynamo_cache_entry(
cls, cache_entry: _DynamoCacheEntry, key: str
) -> None:
cls._dynamo_cache_entries[key] = cache_entry
@classmethod
def edit_artifact(cls, key: str, edit_fn: Callable[..., Any]) -> None:
"""
Edit the content of an existing artifact
"""
assert key in cls._backend_artifacts_by_key, f"Key {key} not found in artifacts"
artifact = cls._backend_artifacts_by_key[_BackendId(key)]
artifact.edit_contents(edit_fn)
@classmethod
def serialize_artifact_by_key(cls, key: str) -> Optional[BackendCacheArtifact[Any]]:
"""
Return the backend cache artifact with the associated key
"""
return cls._backend_artifacts_by_key.get(_BackendId(key), None)
@staticmethod
def dump_debug_info(
dynamo_entries: dict[str, _DynamoCacheEntry],
backend_artifacts: dict[_BackendId, BackendCacheArtifact[Any]],
) -> dict[str, Any]:
"""
Return a JSON serializable debug dump of all entries in the precompile context
Called in serialize before serialization, and in populate_caches after deserialization
"""
# Print debug information
debug_info: defaultdict[str, list[Any]] = defaultdict(list)
for key, cache_entry in dynamo_entries.items():
info = cache_entry.debug_info()
info["key"] = key
debug_info["dynamo"].append(info)
for artifact in backend_artifacts.values():
debug_info["backends"].append(artifact.key)
return debug_info
@classmethod
def save_to_dynamo_cache(cls) -> dict[str, Any]:
precompile_cache_entries, debug_info = cls.create_cache_entries()
for key, entry in precompile_cache_entries.items():
DynamoCache.write(entry, key)
return debug_info
@classmethod
def create_cache_entries(
cls,
) -> tuple[dict[str, PrecompileCacheEntry], dict[str, Any]]:
"""
Grabs all the cache entries in the precompile context and
stitches them together into full PrecompileCacheEntries.
"""
dynamo_entries = cls._dynamo_cache_entries
backend_artifacts = cls._backend_artifacts_by_key
num_artifacts = len(dynamo_entries)
debug_info = PrecompileContext.dump_debug_info(
dynamo_entries, backend_artifacts
)
debug_str = json.dumps(
{
"num_entries": num_artifacts,
"artifacts": debug_info,
},
)
torch._logging.trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "dynamo_cache_entries",
"encoding": "json",
},
payload_fn=lambda: debug_str,
expect_trace_id=False,
)
precompile_cache_entries = {}
for key, cache_entry in dynamo_entries.items():
try:
result = PrecompileCacheEntry.from_cache_entry(
cache_entry, backend_artifacts
)
if result is not None:
precompile_cache_entries[key] = result
except Exception as e:
logger.warning("Failed to create cache entry %s", key, exc_info=True)
error = e
data = json.dumps(
{
"key": key,
"error": str(error),
}
)
torch._logging.trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "dynamo_cache_exception",
"encoding": "json",
},
payload_fn=lambda: data,
)
continue
return precompile_cache_entries, debug_info
| PrecompileContext |
python | falconry__falcon | tests/_inspect_fixture.py | {
"start": 44,
"end": 87
} | class ____(CompiledRouter):
pass
| MyRouter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.