language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | torch/_export/db/examples/dynamic_shape_if_guard.py | {
"start": 41,
"end": 560
} | class ____(torch.nn.Module):
"""
`if` statement with backed dynamic shape predicate will be specialized into
one particular branch and generate a guard. However, export will fail if the
the dimension is marked as dynamic shape from higher level API.
"""
def forward(self, x):
if x.shape[0] == 3:
return x.cos()
return x.sin()
example_args = (torch.randn(3, 2, 2),)
tags = {"torch.dynamic-shape", "python.control-flow"}
model = DynamicShapeIfGuard()
| DynamicShapeIfGuard |
python | walkccc__LeetCode | solutions/2102. Sequentially Ordinal Rank Tracker/2102.py | {
"start": 253,
"end": 813
} | class ____:
def __init__(self):
self.l = []
self.r = []
self.k = 0 the number of times get() called
def add(self, name: str, score: int) -> None:
heapq.heappush(self.l, Location(name, score))
if len(self.l) > self.k + 1:
location = heapq.heappop(self.l)
heapq.heappush(self.r, (-location.score, location.name))
def get(self) -> str:
name = self.l[0].name
if self.r:
topScore, topName = heapq.heappop(self.r)
heapq.heappush(self.l, Location(topName, -topScore))
self.k += 1
return name
| SORTracker |
python | mkdocstrings__mkdocstrings | tests/fixtures/string_annotation.py | {
"start": 29,
"end": 119
} | class ____:
@property
def foo() -> Literal["hi"]:
"hi"
return "hi"
| Foo |
python | getsentry__sentry | src/sentry/api/serializers/release_details_types.py | {
"start": 753,
"end": 948
} | class ____(AuthorOptional):
id: int
name: str
username: str
email: str
avatarUrl: str
isActive: bool
hasPasswordAuth: bool
isManaged: bool
dateJoined: str
| Author |
python | Textualize__textual | tests/css/test_initial.py | {
"start": 204,
"end": 313
} | class ____(Base):
DEFAULT_CSS = """
CustomWidget1 {
background: red
}
"""
| CustomWidget1 |
python | gevent__gevent | src/greentest/3.14/test_urllib2.py | {
"start": 12208,
"end": 12406
} | class ____:
def add_cookie_header(self, request):
self.ach_req = request
def extract_cookies(self, response, request):
self.ec_req, self.ec_r = request, response
| MockCookieJar |
python | huggingface__transformers | src/transformers/models/apertus/modeling_apertus.py | {
"start": 21934,
"end": 22145
} | class ____(GenericForTokenClassification, ApertusPreTrainedModel):
pass
__all__ = ["ApertusModel", "ApertusForCausalLM", "ApertusForTokenClassification", "ApertusPreTrainedModel"]
| ApertusForTokenClassification |
python | great-expectations__great_expectations | tests/datasource/fluent/data_asset/test_data_asset.py | {
"start": 13006,
"end": 15956
} | class ____(FluentBaseModel):
"""Partitioner that adhere's to the expected protocol."""
sort_ascending: bool = True
@property
def param_names(self) -> List[str]:
return ["a", "b"]
@pytest.fixture
def metadata_1_1(mocker):
return mocker.MagicMock(spec=Batch, metadata={"a": 1, "b": 1})
@pytest.fixture
def metadata_1_2(mocker):
return mocker.MagicMock(spec=Batch, metadata={"a": 1, "b": 2})
@pytest.fixture
def metadata_2_1(mocker):
return mocker.MagicMock(spec=Batch, metadata={"a": 2, "b": 1})
@pytest.fixture
def metadata_2_2(mocker):
return mocker.MagicMock(spec=Batch, metadata={"a": 2, "b": 2})
@pytest.fixture
def metadata_2_none(mocker):
return mocker.MagicMock(spec=Batch, metadata={"a": 2, "b": None})
@pytest.fixture
def metadata_none_2(mocker):
return mocker.MagicMock(spec=Batch, metadata={"a": None, "b": 2})
@pytest.fixture
def metadata_none_none(mocker):
return mocker.MagicMock(spec=Batch, metadata={"a": None, "b": None})
@pytest.mark.unit
def test_sort_batches__ascending(
empty_data_asset,
metadata_1_1,
metadata_1_2,
metadata_2_1,
metadata_2_2,
metadata_none_2,
metadata_2_none,
metadata_none_none,
):
partitioner = _MyPartitioner(sort_ascending=True)
batches = [
metadata_1_1,
metadata_1_2,
metadata_2_1,
metadata_2_2,
metadata_2_none,
metadata_none_2,
metadata_none_none,
]
batches = empty_data_asset.sort_batches(batches, partitioner)
assert batches == [
metadata_none_none,
metadata_none_2,
metadata_1_1,
metadata_1_2,
metadata_2_none,
metadata_2_1,
metadata_2_2,
]
@pytest.mark.unit
def test_sort_batches__descending(
empty_data_asset,
metadata_1_1,
metadata_1_2,
metadata_2_1,
metadata_2_2,
metadata_none_2,
metadata_2_none,
metadata_none_none,
):
partitioner = _MyPartitioner(sort_ascending=False)
batches = [
metadata_1_1,
metadata_1_2,
metadata_2_1,
metadata_2_2,
metadata_2_none,
metadata_none_2,
metadata_none_none,
]
batches = empty_data_asset.sort_batches(batches, partitioner)
assert batches == [
metadata_2_2,
metadata_2_1,
metadata_2_none,
metadata_1_2,
metadata_1_1,
metadata_none_2,
metadata_none_none,
]
@pytest.mark.unit
def test_sort_batches__requires_keys(empty_data_asset, mocker):
partitioner = _MyPartitioner()
wheres_my_b = mocker.MagicMock(spec=Batch, metadata={"a": 1})
i_have_a_b = mocker.MagicMock(spec=Batch, metadata={"a": 1, "b": 2})
expected_error = "Trying to sort my data asset for batch configs's batches on key b"
with pytest.raises(KeyError, match=expected_error):
empty_data_asset.sort_batches([wheres_my_b, i_have_a_b], partitioner)
| _MyPartitioner |
python | huggingface__transformers | src/transformers/models/olmo2/modeling_olmo2.py | {
"start": 3072,
"end": 9396
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Olmo2Config, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[Olmo2Config] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos, sin
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
q_type, k_type = q.dtype, k.dtype
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed.to(q_type), k_embed.to(k_type)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
| Olmo2RotaryEmbedding |
python | sqlalchemy__sqlalchemy | test/orm/test_core_compilation.py | {
"start": 76019,
"end": 83873
} | class ____(InheritedTest, AssertsCompiledSQL):
__dialect__ = "default"
straight_company_to_person_expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN people ON companies.company_id = people.company_id"
)
default_pjoin = (
"(people LEFT OUTER "
"JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers "
"ON people.person_id = managers.person_id "
"LEFT OUTER JOIN boss ON managers.person_id = boss.boss_id) "
"ON companies.company_id = people.company_id"
)
flat_aliased_pjoin = (
"(people AS people_1 LEFT OUTER JOIN engineers AS "
"engineers_1 ON people_1.person_id = engineers_1.person_id "
"LEFT OUTER JOIN managers AS managers_1 "
"ON people_1.person_id = managers_1.person_id "
"LEFT OUTER JOIN boss AS boss_1 ON "
"managers_1.person_id = boss_1.boss_id) "
"ON companies.company_id = people_1.company_id"
)
aliased_pjoin = (
"(SELECT people.person_id AS people_person_id, people.company_id "
"AS people_company_id, people.name AS people_name, people.type "
"AS people_type, engineers.person_id AS engineers_person_id, "
"engineers.status AS engineers_status, engineers.engineer_name "
"AS engineers_engineer_name, engineers.primary_language "
"AS engineers_primary_language, managers.person_id "
"AS managers_person_id, managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name, "
"boss.boss_id AS boss_boss_id, boss.golf_swing AS boss_golf_swing "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers ON "
"people.person_id = managers.person_id LEFT OUTER JOIN boss "
"ON managers.person_id = boss.boss_id) AS anon_1 "
"ON companies.company_id = anon_1.people_company_id"
)
person_paperwork_expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN people ON companies.company_id = people.company_id "
"JOIN paperwork ON people.person_id = paperwork.person_id"
)
c_to_p_whereclause = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN people ON companies.company_id = people.company_id "
"WHERE people.name = :name_1"
)
poly_columns = "SELECT people.person_id FROM people"
def test_straight(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
stmt1 = select(Company).select_from(
orm_join(Company, Person, Company.employees)
)
stmt2 = select(Company).join(Company.employees)
stmt3 = (
fixture_session()
.query(Company)
.join(Company.employees)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, self.straight_company_to_person_expected)
self.assert_compile(stmt2, self.straight_company_to_person_expected)
self.assert_compile(stmt3, self.straight_company_to_person_expected)
def test_columns(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
stmt = select(Person.person_id)
self.assert_compile(stmt, self.poly_columns)
def test_straight_whereclause(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
stmt1 = (
select(Company)
.select_from(orm_join(Company, Person, Company.employees))
.where(Person.name == "ed")
)
stmt2 = (
select(Company).join(Company.employees).where(Person.name == "ed")
)
stmt3 = (
fixture_session()
.query(Company)
.join(Company.employees)
.filter(Person.name == "ed")
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, self.c_to_p_whereclause)
self.assert_compile(stmt2, self.c_to_p_whereclause)
self.assert_compile(stmt3, self.c_to_p_whereclause)
def test_two_level(self):
Company, Person, Paperwork = self.classes(
"Company", "Person", "Paperwork"
)
stmt1 = select(Company).select_from(
orm_join(Company, Person, Company.employees).join(
Paperwork, Person.paperwork
)
)
stmt2 = select(Company).join(Company.employees).join(Person.paperwork)
stmt3 = (
fixture_session()
.query(Company)
.join(Company.employees)
.join(Person.paperwork)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, self.person_paperwork_expected)
self.assert_compile(stmt2, self.person_paperwork_expected)
self.assert_compile(stmt3, self.person_paperwork_expected)
def test_wpoly_of_type(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
p1 = with_polymorphic(Person, "*")
stmt1 = select(Company).select_from(
orm_join(Company, p1, Company.employees.of_type(p1))
)
stmt2 = select(Company).join(Company.employees.of_type(p1))
stmt3 = (
fixture_session()
.query(Company)
.join(Company.employees.of_type(p1))
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT companies.company_id, companies.name "
"FROM companies JOIN %s" % self.default_pjoin
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
self.assert_compile(stmt3, expected)
def test_wpoly_aliased_of_type(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
s = fixture_session()
p1 = with_polymorphic(Person, "*", aliased=True)
stmt1 = select(Company).select_from(
orm_join(Company, p1, Company.employees.of_type(p1))
)
stmt2 = select(Company).join(p1, Company.employees.of_type(p1))
stmt3 = (
s.query(Company)
.join(Company.employees.of_type(p1))
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN %s" % self.aliased_pjoin
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
self.assert_compile(stmt3, expected)
def test_wpoly_aliased_flat_of_type(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
p1 = with_polymorphic(Person, "*", aliased=True, flat=True)
stmt1 = select(Company).select_from(
orm_join(Company, p1, Company.employees.of_type(p1))
)
stmt2 = select(Company).join(p1, Company.employees.of_type(p1))
stmt3 = (
fixture_session()
.query(Company)
.join(Company.employees.of_type(p1))
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN %s" % self.flat_aliased_pjoin
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
self.assert_compile(stmt3, expected)
| RelationshipNaturalInheritedTest |
python | pytorch__pytorch | test/test_type_info.py | {
"start": 456,
"end": 5288
} | class ____(TestCase):
def test_invalid_input(self):
for dtype in [
torch.float16,
torch.float32,
torch.float64,
torch.bfloat16,
torch.complex64,
torch.complex128,
torch.bool,
]:
with self.assertRaises(TypeError):
_ = torch.iinfo(dtype)
for dtype in [
torch.int64,
torch.int32,
torch.int16,
torch.int8,
torch.uint8,
torch.bool,
]:
with self.assertRaises(TypeError):
_ = torch.finfo(dtype)
with self.assertRaises(RuntimeError):
dtype.to_complex()
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_iinfo(self):
for dtype in [torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8]:
x = torch.zeros((2, 2), dtype=dtype)
xinfo = torch.iinfo(x.dtype)
xn = x.cpu().numpy()
xninfo = np.iinfo(xn.dtype)
self.assertEqual(xinfo.bits, xninfo.bits)
self.assertEqual(xinfo.max, xninfo.max)
self.assertEqual(xinfo.min, xninfo.min)
self.assertEqual(xinfo.dtype, xninfo.dtype)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_finfo(self):
for dtype in [
torch.float16,
torch.float32,
torch.float64,
torch.complex64,
torch.complex128,
]:
x = torch.zeros((2, 2), dtype=dtype)
xinfo = torch.finfo(x.dtype)
xn = x.cpu().numpy()
xninfo = np.finfo(xn.dtype)
self.assertEqual(xinfo.bits, xninfo.bits)
self.assertEqual(xinfo.max, xninfo.max)
self.assertEqual(xinfo.min, xninfo.min)
self.assertEqual(xinfo.eps, xninfo.eps)
self.assertEqual(xinfo.tiny, xninfo.tiny)
self.assertEqual(xinfo.resolution, xninfo.resolution)
self.assertEqual(xinfo.dtype, xninfo.dtype)
if not dtype.is_complex:
with set_default_dtype(dtype):
self.assertEqual(torch.finfo(dtype), torch.finfo())
# Special test case for BFloat16 type
x = torch.zeros((2, 2), dtype=torch.bfloat16)
xinfo = torch.finfo(x.dtype)
self.assertEqual(xinfo.bits, 16)
self.assertEqual(xinfo.max, 3.38953e38)
self.assertEqual(xinfo.min, -3.38953e38)
self.assertEqual(xinfo.eps, 0.0078125)
self.assertEqual(xinfo.tiny, 1.17549e-38)
self.assertEqual(xinfo.tiny, xinfo.smallest_normal)
self.assertEqual(xinfo.resolution, 0.01)
self.assertEqual(xinfo.dtype, "bfloat16")
with set_default_dtype(x.dtype):
self.assertEqual(torch.finfo(x.dtype), torch.finfo())
# Special test case for Float8_E5M2
xinfo = torch.finfo(torch.float8_e5m2)
self.assertEqual(xinfo.bits, 8)
self.assertEqual(xinfo.max, 57344.0)
self.assertEqual(xinfo.min, -57344.0)
self.assertEqual(xinfo.eps, 0.25)
self.assertEqual(xinfo.tiny, 6.10352e-05)
self.assertEqual(xinfo.resolution, 1.0)
self.assertEqual(xinfo.dtype, "float8_e5m2")
# Special test case for Float8_E4M3FN
xinfo = torch.finfo(torch.float8_e4m3fn)
self.assertEqual(xinfo.bits, 8)
self.assertEqual(xinfo.max, 448.0)
self.assertEqual(xinfo.min, -448.0)
self.assertEqual(xinfo.eps, 0.125)
self.assertEqual(xinfo.tiny, 0.015625)
self.assertEqual(xinfo.resolution, 1.0)
self.assertEqual(xinfo.dtype, "float8_e4m3fn")
def test_to_complex(self):
# Regression test for https://github.com/pytorch/pytorch/issues/124868
# If reference count is leaked this would be a set of 10 elements
ref_cnt = {sys.getrefcount(torch.float32.to_complex()) for _ in range(10)}
self.assertLess(len(ref_cnt), 3)
self.assertEqual(torch.float64.to_complex(), torch.complex128)
self.assertEqual(torch.float32.to_complex(), torch.complex64)
self.assertEqual(torch.float16.to_complex(), torch.complex32)
def test_to_real(self):
# Regression test for https://github.com/pytorch/pytorch/issues/124868
# If reference count is leaked this would be a set of 10 elements
ref_cnt = {sys.getrefcount(torch.cfloat.to_real()) for _ in range(10)}
self.assertLess(len(ref_cnt), 3)
self.assertEqual(torch.complex128.to_real(), torch.double)
self.assertEqual(torch.complex64.to_real(), torch.float32)
self.assertEqual(torch.complex32.to_real(), torch.float16)
if __name__ == "__main__":
TestCase._default_dtype_check_enabled = True
run_tests()
| TestDTypeInfo |
python | wandb__wandb | wandb/vendor/pygments/lexers/dalvik.py | {
"start": 444,
"end": 4420
} | class ____(RegexLexer):
"""
For `Smali <http://code.google.com/p/smali/>`_ (Android/Dalvik) assembly
code.
.. versionadded:: 1.6
"""
name = 'Smali'
aliases = ['smali']
filenames = ['*.smali']
mimetypes = ['text/smali']
tokens = {
'root': [
include('comment'),
include('label'),
include('field'),
include('method'),
include('class'),
include('directive'),
include('access-modifier'),
include('instruction'),
include('literal'),
include('punctuation'),
include('type'),
include('whitespace')
],
'directive': [
(r'^[ \t]*\.(class|super|implements|field|subannotation|annotation|'
r'enum|method|registers|locals|array-data|packed-switch|'
r'sparse-switch|catchall|catch|line|parameter|local|prologue|'
r'epilogue|source)', Keyword),
(r'^[ \t]*\.end (field|subannotation|annotation|method|array-data|'
'packed-switch|sparse-switch|parameter|local)', Keyword),
(r'^[ \t]*\.restart local', Keyword),
],
'access-modifier': [
(r'(public|private|protected|static|final|synchronized|bridge|'
r'varargs|native|abstract|strictfp|synthetic|constructor|'
r'declared-synchronized|interface|enum|annotation|volatile|'
r'transient)', Keyword),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
],
'instruction': [
(r'\b[vp]\d+\b', Name.Builtin), # registers
(r'\b[a-z][A-Za-z0-9/-]+\s+', Text), # instructions
],
'literal': [
(r'".*"', String),
(r'0x[0-9A-Fa-f]+t?', Number.Hex),
(r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+L?', Number.Integer),
],
'field': [
(r'(\$?\b)([\w$]*)(:)',
bygroups(Punctuation, Name.Variable, Punctuation)),
],
'method': [
(r'<(?:cl)?init>', Name.Function), # constructor
(r'(\$?\b)([\w$]*)(\()',
bygroups(Punctuation, Name.Function, Punctuation)),
],
'label': [
(r':\w+', Name.Label),
],
'class': [
# class names in the form Lcom/namespace/ClassName;
# I only want to color the ClassName part, so the namespace part is
# treated as 'Text'
(r'(L)((?:[\w$]+/)*)([\w$]+)(;)',
bygroups(Keyword.Type, Text, Name.Class, Text)),
],
'punctuation': [
(r'->', Punctuation),
(r'[{},():=.-]', Punctuation),
],
'type': [
(r'[ZBSCIJFDV\[]+', Keyword.Type),
],
'comment': [
(r'#.*?\n', Comment),
],
}
def analyse_text(text):
score = 0
if re.search(r'^\s*\.class\s', text, re.MULTILINE):
score += 0.5
if re.search(r'\b((check-cast|instance-of|throw-verification-error'
r')\b|(-to|add|[ais]get|[ais]put|and|cmpl|const|div|'
r'if|invoke|move|mul|neg|not|or|rem|return|rsub|shl|'
r'shr|sub|ushr)[-/])|{|}', text, re.MULTILINE):
score += 0.3
if re.search(r'(\.(catchall|epilogue|restart local|prologue)|'
r'\b(array-data|class-change-error|declared-synchronized|'
r'(field|inline|vtable)@0x[0-9a-fA-F]|generic-error|'
r'illegal-class-access|illegal-field-access|'
r'illegal-method-access|instantiation-error|no-error|'
r'no-such-class|no-such-field|no-such-method|'
r'packed-switch|sparse-switch))\b', text, re.MULTILINE):
score += 0.6
return score
| SmaliLexer |
python | PyCQA__pylint | tests/functional/p/postponed/postponed_evaluation_activated.py | {
"start": 347,
"end": 379
} | class ____:
obj: Other
| Example |
python | Netflix__metaflow | metaflow/_vendor/click/types.py | {
"start": 393,
"end": 2667
} | class ____(object):
"""Helper for converting values through types. The following is
necessary for a valid type:
* it needs a name
* it needs to pass through None unchanged
* it needs to convert from a string
* it needs to convert its result type through unchanged
(eg: needs to be idempotent)
* it needs to be able to deal with param and context being `None`.
This can be the case when the object is used with prompt
inputs.
"""
is_composite = False
#: the descriptive name of this type
name = None
#: if a list of this type is expected and the value is pulled from a
#: string environment variable, this is what splits it up. `None`
#: means any whitespace. For all parameters the general rule is that
#: whitespace splits them up. The exception are paths and files which
#: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
#: Windows).
envvar_list_splitter = None
def __call__(self, value, param=None, ctx=None):
if value is not None:
return self.convert(value, param, ctx)
def get_metavar(self, param):
"""Returns the metavar default for this param if it provides one."""
def get_missing_message(self, param):
"""Optionally might return extra information about a missing
parameter.
.. versionadded:: 2.0
"""
def convert(self, value, param, ctx):
"""Converts the value. This is not invoked for values that are
`None` (the missing value).
"""
return value
def split_envvar_value(self, rv):
"""Given a value from an environment variable this splits it up
into small chunks depending on the defined envvar list splitter.
If the splitter is set to `None`, which means that whitespace splits,
then leading and trailing whitespace is ignored. Otherwise, leading
and trailing splitters usually lead to empty items being included.
"""
return (rv or "").split(self.envvar_list_splitter)
def fail(self, message, param=None, ctx=None):
"""Helper method to fail with an invalid value message."""
raise BadParameter(message, ctx=ctx, param=param)
| ParamType |
python | getsentry__sentry | src/sentry/relay/types/generic_filters.py | {
"start": 282,
"end": 431
} | class ____(TypedDict):
"""Top-level configuration for generic filters."""
version: int
filters: Sequence[GenericFilter]
| GenericFiltersConfig |
python | matplotlib__matplotlib | lib/matplotlib/animation.py | {
"start": 1694,
"end": 3122
} | class ____:
"""Registry of available writer classes by human readable name."""
def __init__(self):
self._registered = dict()
def register(self, name):
"""
Decorator for registering a class under a name.
Example use::
@registry.register(name)
class Foo:
pass
"""
def wrapper(writer_cls):
self._registered[name] = writer_cls
return writer_cls
return wrapper
def is_available(self, name):
"""
Check if given writer is available by name.
Parameters
----------
name : str
Returns
-------
bool
"""
try:
cls = self._registered[name]
except KeyError:
return False
return cls.isAvailable()
def __iter__(self):
"""Iterate over names of available writer class."""
for name in self._registered:
if self.is_available(name):
yield name
def list(self):
"""Get a list of available MovieWriters."""
return [*self]
def __getitem__(self, name):
"""Get an available writer class from its name."""
if self.is_available(name):
return self._registered[name]
raise RuntimeError(f"Requested MovieWriter ({name}) not available")
writers = MovieWriterRegistry()
| MovieWriterRegistry |
python | getsentry__sentry | src/sentry/integrations/api/endpoints/organization_integration_request.py | {
"start": 1863,
"end": 3634
} | class ____(OrganizationRequestChangeEndpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
def post(self, request: Request, organization) -> Response:
"""
Email the organization owners asking them to install an integration.
````````````````````````````````````````````````````````````````````
When a non-owner user views integrations in the integrations directory,
they lack the ability to install them themselves. POSTing to this API
alerts users with permission that there is demand for this integration.
:param string providerSlug: Unique string that identifies the integration.
:param string providerType: One of: first_party, plugin, sentry_app.
:param string message: Optional message from the requester to the owners.
"""
provider_type = str(request.data.get("providerType"))
provider_slug = str(request.data.get("providerSlug"))
message_option = request.data.get("message", "").strip()
requester = request.user
if requester.id in [user.id for user in organization.get_owners()]:
return Response({"detail": "User can install integration"}, status=200)
provider_name = get_provider_name(provider_type, provider_slug)
if not provider_name:
return Response({"detail": f"Provider {provider_slug} not found"}, status=400)
async_send_notification(
IntegrationRequestNotification,
organization,
requester,
provider_type,
provider_slug,
provider_name,
message_option,
)
return Response(status=201)
| OrganizationIntegrationRequestEndpoint |
python | numpy__numpy | numpy/ma/core.py | {
"start": 26330,
"end": 27045
} | class ____:
"""
Define a domain for safe division.
"""
def __init__(self, tolerance=None):
self.tolerance = tolerance
def __call__(self, a, b):
# Delay the selection of the tolerance to here in order to reduce numpy
# import times. The calculation of these parameters is a substantial
# component of numpy's import time.
if self.tolerance is None:
self.tolerance = np.finfo(float).tiny
# don't call ma ufuncs from __array_wrap__ which would fail for scalars
a, b = np.asarray(a), np.asarray(b)
with np.errstate(all='ignore'):
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
| _DomainSafeDivide |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/xcom_arg.py | {
"start": 19133,
"end": 22746
} | class ____(XComArg):
"""
Concatenating multiple XCom references into one.
This is done by calling ``concat()`` on an XComArg to combine it with
others. The effect is similar to Python's :func:`itertools.chain`, but the
return value also supports index access.
"""
args: Sequence[XComArg] = attrs.field(validator=attrs.validators.min_len(1))
def __repr__(self) -> str:
args_iter = iter(self.args)
first = repr(next(args_iter))
rest = ", ".join(repr(arg) for arg in args_iter)
return f"{first}.concat({rest})"
def _serialize(self) -> dict[str, Any]:
return {"args": [serialize_xcom_arg(arg) for arg in self.args]}
def iter_references(self) -> Iterator[tuple[Operator, str]]:
for arg in self.args:
yield from arg.iter_references()
def concat(self, *others: XComArg) -> ConcatXComArg:
# Flatten foo.concat(x).concat(y) into one call.
return ConcatXComArg([*self.args, *others])
def resolve(self, context: Mapping[str, Any]) -> Any:
values = [arg.resolve(context) for arg in self.args]
for value in values:
if not isinstance(value, (Sequence, dict)):
raise ValueError(f"XCom concat expects sequence or dict, not {type(value).__name__}")
return _ConcatResult(values)
_XCOM_ARG_TYPES: Mapping[str, type[XComArg]] = {
"": PlainXComArg,
"concat": ConcatXComArg,
"map": MapXComArg,
"zip": ZipXComArg,
}
def serialize_xcom_arg(value: XComArg) -> dict[str, Any]:
"""Dag serialization interface."""
key = next(k for k, v in _XCOM_ARG_TYPES.items() if isinstance(value, v))
if key:
return {"type": key, **value._serialize()}
return value._serialize()
@singledispatch
def get_task_map_length(
xcom_arg: XComArg, resolved_val: Sized, upstream_map_indexes: dict[str, int]
) -> int | None:
# The base implementation -- specific XComArg subclasses have specialised implementations
raise NotImplementedError()
@get_task_map_length.register
def _(xcom_arg: PlainXComArg, resolved_val: Sized, upstream_map_indexes: dict[str, int]):
task_id = xcom_arg.operator.task_id
if xcom_arg.operator.is_mapped:
# TODO: How to tell if all the upstream TIs finished?
pass
return (upstream_map_indexes.get(task_id) or 1) * len(resolved_val)
@get_task_map_length.register
def _(xcom_arg: MapXComArg, resolved_val: Sized, upstream_map_indexes: dict[str, int]):
return get_task_map_length(xcom_arg.arg, resolved_val, upstream_map_indexes)
@get_task_map_length.register
def _(xcom_arg: ZipXComArg, resolved_val: Sized, upstream_map_indexes: dict[str, int]):
all_lengths = (get_task_map_length(arg, resolved_val, upstream_map_indexes) for arg in xcom_arg.args)
ready_lengths = [length for length in all_lengths if length is not None]
if len(ready_lengths) != len(xcom_arg.args):
return None # If any of the referenced XComs is not ready, we are not ready either.
if is_arg_set(xcom_arg.fillvalue):
return max(ready_lengths)
return min(ready_lengths)
@get_task_map_length.register
def _(xcom_arg: ConcatXComArg, resolved_val: Sized, upstream_map_indexes: dict[str, int]):
all_lengths = (get_task_map_length(arg, resolved_val, upstream_map_indexes) for arg in xcom_arg.args)
ready_lengths = [length for length in all_lengths if length is not None]
if len(ready_lengths) != len(xcom_arg.args):
return None # If any of the referenced XComs is not ready, we are not ready either.
return sum(ready_lengths)
| ConcatXComArg |
python | huggingface__transformers | src/transformers/models/jetmoe/modeling_jetmoe.py | {
"start": 2243,
"end": 2968
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
JetMoeRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| JetMoeRMSNorm |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 12249,
"end": 12604
} | class ____(HTTPException):
"""
base class for exceptions with status codes in the 300s (redirections)
This is an abstract base class for 3xx redirection. It indicates
that further action needs to be taken by the user agent in order
to fulfill the request. It does not necessarily signal an error
condition.
"""
| HTTPRedirection |
python | google__pytype | pytype/errors/error_types.py | {
"start": 4689,
"end": 4856
} | class ____(Exception):
def __init__(self, bad_type: BadType, *args, **kwargs):
self.bad_type = bad_type
super().__init__(bad_type, *args, **kwargs)
| MatchError |
python | facebook__pyre-check | tools/generate_taint_models/tests/inspect_parser_test.py | {
"start": 5549,
"end": 6998
} | class ____(unittest.TestCase):
def test_dataclass_parameters_annotation(self) -> None:
annotations = extract_parameters_with_types(
test_dataclass_parameter, strip_optional=True, strip_annotated=True
)
self.assertEqual(
annotations,
{"data": TestRequestDataclass},
)
def test_with_mixed_args_annotations(self) -> None:
annotations = extract_parameters_with_types(
test_mixed_args,
strip_optional=True,
strip_annotated=True,
)
self.assertEqual(
annotations,
{
"data1": TestRequestDataclass,
"data2": TestRequestDataclass,
"x": str,
"y": None,
"**kwargs": None,
"*args": None,
},
)
def test_with_args_kwargs_with_any_annotation(self) -> None:
annotations = extract_parameters_with_types(
test_args_kwargs_with_any_annotation,
)
self.assertEqual(
annotations,
{
"*args": Any,
"**kwargs": Any,
},
)
def test_with_no_parameters_annotations(self) -> None:
annotations = extract_parameters_with_types(
test_no_parameters,
)
self.assertEqual(
annotations,
{},
)
| ExtractParametersWithTypesTester |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py | {
"start": 87060,
"end": 88662
} | class ____(GeneratedAirbyteDestination):
@public
def __init__(
self,
name: str,
host: str,
port: int,
database: str,
username: str,
password: Optional[str] = None,
ssl: Optional[bool] = None,
jdbc_url_params: Optional[str] = None,
):
"""Airbyte Destination for Tidb.
Documentation can be found at https://docs.airbyte.com/integrations/destinations/tidb
Args:
name (str): The name of the destination.
host (str): Hostname of the database.
port (int): Port of the database.
database (str): Name of the database.
username (str): Username to use to access the database.
password (Optional[str]): Password associated with the username.
ssl (Optional[bool]): Encrypt data using SSL.
jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
"""
self.host = check.str_param(host, "host")
self.port = check.int_param(port, "port")
self.database = check.str_param(database, "database")
self.username = check.str_param(username, "username")
self.password = check.opt_str_param(password, "password")
self.ssl = check.opt_bool_param(ssl, "ssl")
self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")
super().__init__("Tidb", name)
| TidbDestination |
python | pytorch__pytorch | torch/fx/experimental/schema_type_annotation.py | {
"start": 305,
"end": 5379
} | class ____(Transformer):
"""
Use Python function signatures to annotate types for `Nodes` within an FX graph.
This pulls out Python function signatures for:
1. Standard `torch.nn` Module calls
2. `torch.nn.functional` calls
3. Attribute fetches via `get_attr`
Example usage:
m = torchvision.models.resnet18()
traced = torch.fx.symbolic_trace(m)
traced = AnnotateTypesWithSchema(traced).transform()
"""
def __init__(
self,
module: torch.nn.Module,
annotate_functionals: bool = True,
annotate_modules: bool = True,
annotate_get_attrs: bool = True,
):
super().__init__(module)
self.annotate_functionals = annotate_functionals
self.annotate_modules = annotate_modules
self.annotate_get_attrs = annotate_get_attrs
def call_function(
self, target: Target, args: tuple[Argument, ...], kwargs: dict[str, Any]
):
python_ret_type = None
if self.annotate_functionals and target.__module__ == "torch.nn.functional":
target_for_analysis = target
if target in boolean_dispatched:
# HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have
# a 2-way dispatch based on a boolean value. Here we check that the `true` and `false`
# branches of the dispatch have exactly the same signature. If they do, use the `true`
# branch signature for analysis. Otherwise, leave this un-normalized
assert not isinstance(target, str)
dispatched = boolean_dispatched[target]
if_true, if_false = dispatched["if_true"], dispatched["if_false"]
# TODO: can we emit the union of these? What are the implications on TorchScript
# compilation?
if (
inspect.signature(if_true).return_annotation
!= inspect.signature(if_false).return_annotation
):
return super().call_function(target, args, kwargs)
target_for_analysis = if_true
python_ret_type = self._extract_python_return_type(target_for_analysis)
return_proxy = super().call_function(target, args, kwargs)
return_proxy.node.type = (
return_proxy.node.type if return_proxy.node.type else python_ret_type
)
return return_proxy
def call_module(
self, target: Target, args: tuple[Argument, ...], kwargs: dict[str, Any]
):
python_ret_type = None
assert isinstance(target, str)
submod = self.fetch_attr(target)
if self.annotate_modules and hasattr(submod.__class__, "__name__"):
classname = submod.__class__.__name__
if getattr(torch.nn, classname, None) == submod.__class__:
python_ret_type = self._extract_python_return_type(submod.forward)
return_proxy = super().call_module(target, args, kwargs)
return_proxy.node.type = (
return_proxy.node.type if return_proxy.node.type else python_ret_type
)
return return_proxy
def get_attr(
self,
target: torch.fx.node.Target,
args: tuple[Argument, ...],
kwargs: dict[str, Any],
):
attr_proxy = super().get_attr(target, args, kwargs)
if self.annotate_get_attrs:
module_itr = self.module
assert isinstance(target, str)
atoms = target.split(".")
for i, atom in enumerate(atoms):
if not hasattr(module_itr, atom):
raise RuntimeError(
f"Node referenced nonextent target {'.'.join(atoms[:i])}!"
)
module_itr = getattr(module_itr, atom)
maybe_inferred_ts_type = torch._C._jit_try_infer_type(module_itr)
if maybe_inferred_ts_type.success():
python_type = _torchscript_type_to_python_type(
maybe_inferred_ts_type.type()
)
attr_proxy.node.type = (
python_type if not attr_proxy.node.type else attr_proxy.node.type
)
return attr_proxy
def _extract_python_return_type(self, target: Target) -> Optional[Any]:
"""
Given a Python call target, try to extract the Python return annotation
if it is available, otherwise return None
Args:
target (Callable): Python callable to get return annotation for
Returns:
Optional[Any]: Return annotation from the `target`, or None if it was
not available.
"""
assert callable(target)
try:
sig = inspect.signature(target)
except (ValueError, TypeError):
return None
return (
sig.return_annotation
if sig.return_annotation is not inspect.Signature.empty
else None
)
| AnnotateTypesWithSchema |
python | Lightning-AI__lightning | tests/tests_pytorch/plugins/precision/test_double.py | {
"start": 3069,
"end": 4118
} | class ____(BoringModel):
def training_step(self, batch, batch_idx):
assert batch.dtype == torch.float64
output = self.layer(batch)
assert output.dtype == torch.float64
loss = self.loss(output)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
assert batch.dtype == torch.float64
output = self.layer(batch)
assert output.dtype == torch.float64
loss = self.loss(output)
return {"x": loss}
def test_step(self, batch, batch_idx):
assert batch.dtype == torch.float64
output = self.layer(batch)
assert output.dtype == torch.float64
loss = self.loss(output)
return {"y": loss}
def predict_step(self, batch, batch_idx, dataloader_idx=0):
assert batch.dtype == torch.float64
output = self.layer(batch)
assert output.dtype == torch.float64
return output
def predict_dataloader(self):
return DataLoader(RandomDataset(32, 64))
| DoublePrecisionBoringModelNoForward |
python | has2k1__plotnine | plotnine/geoms/geom_violin.py | {
"start": 566,
"end": 7214
} | class ____(geom):
"""
Violin Plot
{usage}
Parameters
----------
{common_parameters}
draw_quantiles : float | list[float], default=None
draw horizontal lines at the given quantiles (0..1)
of the density estimate.
style : str, default="full"
The type of violin plot to draw. The options are:
```python
'full' # Regular (2 sided violins)
'left' # Left-sided half violins
'right' # Right-sided half violins
'left-right' # Alternate (left first) half violins by the group
'right-left' # Alternate (right first) half violins by the group
```
See Also
--------
plotnine.stat_ydensity : The default `stat` for this `geom`.
"""
DEFAULT_AES = {
"alpha": 1,
"color": "#333333",
"fill": "white",
"linetype": "solid",
"size": 0.5,
"weight": 1,
}
REQUIRED_AES = {"x", "y"}
DEFAULT_PARAMS = {
"stat": "ydensity",
"position": "dodge",
"draw_quantiles": None,
"style": "full",
"scale": "area",
"trim": True,
"width": None,
"na_rm": False,
}
draw_legend = staticmethod(geom_polygon.draw_legend)
def __init__(
self,
mapping: aes | None = None,
data: DataLike | None = None,
**kwargs: Any,
):
if "draw_quantiles" in kwargs:
kwargs["draw_quantiles"] = np.repeat(kwargs["draw_quantiles"], 1)
if not all(0 < q < 1 for q in kwargs["draw_quantiles"]):
raise ValueError(
"draw_quantiles must be a float or "
"an iterable of floats (>0.0; < 1.0)"
)
if "style" in kwargs:
allowed = ("full", "left", "right", "left-right", "right-left")
if kwargs["style"] not in allowed:
raise ValueError(f"style must be either {allowed}")
super().__init__(mapping, data, **kwargs)
def setup_data(self, data: pd.DataFrame) -> pd.DataFrame:
if "width" not in data:
if self.params["width"]:
data["width"] = self.params["width"]
else:
data["width"] = resolution(data["x"], False) * 0.9
def func(df: pd.DataFrame) -> pd.DataFrame:
df["ymin"] = df["y"].min()
df["ymax"] = df["y"].max()
df["xmin"] = df["x"] - df["width"] / 2
df["xmax"] = df["x"] + df["width"] / 2
return df
# This is a plyr::ddply
data = groupby_apply(data, ["group", "PANEL"], func)
return data
def draw_panel(
self,
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
):
params = self.params.copy()
quantiles = params.pop("draw_quantiles")
style = params.pop("style")
zorder = params.pop("zorder")
for i, (group, df) in enumerate(data.groupby("group")):
# Place the violins with the smalleer group number on top
# of those with larger numbers. The group_zorder values should be
# in the range [zorder, zorder + 1) to stay within the layer.
group = cast("int", group)
group_zorder = zorder + 0.9 / group
params["zorder"] = group_zorder
# Find the points for the line to go all the way around
df["xminv"] = df["x"] - df["violinwidth"] * (df["x"] - df["xmin"])
df["xmaxv"] = df["x"] + df["violinwidth"] * (df["xmax"] - df["x"])
even = i % 2 == 0
if (
style == "left"
or (style == "left-right" and even)
or (style == "right-left" and not even)
):
df["xmaxv"] = df["x"]
elif (
style == "right"
or (style == "right-left" and even)
or (style == "left-right" and not even)
):
df["xminv"] = df["x"]
# Make sure it's sorted properly to draw the outline
# i.e violin = kde + mirror kde,
# bottom to top then top to bottom
n = len(df)
polygon_df = pd.concat(
[df.sort_values("y"), df.sort_values("y", ascending=False)],
axis=0,
ignore_index=True,
)
_df = polygon_df.iloc
_loc = polygon_df.columns.get_loc
_df[:n, _loc("x")] = _df[:n, _loc("xminv")] # type: ignore
_df[n:, _loc("x")] = _df[n:, _loc("xmaxv")] # type: ignore
# Close the polygon: set first and last point the same
polygon_df.loc[-1, :] = polygon_df.loc[0, :]
# plot violin polygon
geom_polygon.draw_group(
polygon_df,
panel_params,
coord,
ax,
params,
)
if quantiles is not None:
# Get dataframe with quantile segments and that
# with aesthetics then put them together
# Each quantile segment is defined by 2 points and
# they all get similar aesthetics
aes_df = df.drop(["x", "y", "group"], axis=1)
aes_df.reset_index(inplace=True)
idx = [0] * 2 * len(quantiles)
aes_df = aes_df.iloc[idx, :].reset_index(drop=True)
segment_df = pd.concat(
[make_quantile_df(df, quantiles), aes_df], axis=1
)
# plot quantile segments
geom_path.draw_group(
segment_df,
panel_params,
coord,
ax,
params,
)
def make_quantile_df(
data: pd.DataFrame, draw_quantiles: FloatArray
) -> pd.DataFrame:
"""
Return a dataframe with info needed to draw quantile segments
"""
from scipy.interpolate import interp1d
dens = data["density"].cumsum() / data["density"].sum()
ecdf = interp1d(dens, data["y"], assume_sorted=True)
ys = ecdf(draw_quantiles)
# Get the violin bounds for the requested quantiles
violin_xminvs = interp1d(data["y"], data["xminv"])(ys)
violin_xmaxvs = interp1d(data["y"], data["xmaxv"])(ys)
data = pd.DataFrame(
{
"x": interleave(violin_xminvs, violin_xmaxvs),
"y": np.repeat(ys, 2),
"group": np.repeat(np.arange(1, len(ys) + 1), 2),
}
)
return data
| geom_violin |
python | joblib__joblib | joblib/externals/loky/process_executor.py | {
"start": 9633,
"end": 10190
} | class ____:
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
# Store the current loky_pickler so it is correctly set in the worker
self.loky_pickler = get_loky_pickler_name()
def __call__(self):
set_loky_pickler(self.loky_pickler)
return self.fn(*self.args, **self.kwargs)
def __repr__(self):
return (
f"CallItem({self.work_id}, {self.fn}, {self.args}, {self.kwargs})"
)
| _CallItem |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/collective_ops_test.py | {
"start": 22959,
"end": 25982
} | class ____(test.TestCase, parameterized.TestCase):
def testGroupAssignmentBeforeAllReduce(self, jit_compile):
device0 = '/device:GPU:0'
device1 = '/device:GPU:1'
instance_key = 100
results = []
group_assignment = [[0], [1]]
def all_reduce(device, device_index):
with ops.device(device):
token = create_ordering_token()
@def_function.function(jit_compile=jit_compile)
def f(device_index):
group_size, group_key = _collective_ops.assign_group_v2(
group_assignment=group_assignment,
device_index=device_index,
base_key=1)
return _collective_ops.all_reduce_v2([1.],
group_size,
group_key,
instance_key,
ordering_token=token)
with ops.device(device):
results.append(f(device_index))
t0 = threading.Thread(target=all_reduce, args=(device0, 0))
t1 = threading.Thread(target=all_reduce, args=(device1, 1))
t0.start()
t1.start()
t0.join()
t1.join()
self.assertAllEqual(results, [[1.], [1.]])
def testTwoGroupAssignmentBeforeAllReduce(self, jit_compile):
device0 = '/device:GPU:0'
device1 = '/device:GPU:1'
instance_key = 100
results = []
group_assignment1 = [[0], [1]]
group_assignment2 = [[0, 1]]
def all_reduce(device, device_index):
with ops.device(device):
token = create_ordering_token()
@def_function.function(jit_compile=jit_compile)
def f(device_index):
group_size, group_key = _collective_ops.assign_group_v2(
group_assignment=group_assignment1,
device_index=device_index,
base_key=1)
r1 = _collective_ops.all_reduce_v2([1.],
group_size,
group_key,
instance_key,
ordering_token=token)
group_size, group_key = _collective_ops.assign_group_v2(
group_assignment=group_assignment2,
device_index=device_index,
base_key=10000)
r2 = _collective_ops.all_reduce_v2([1.],
group_size,
group_key,
instance_key,
ordering_token=token)
return r1, r2
with ops.device(device):
results.append(f(device_index))
t0 = threading.Thread(target=all_reduce, args=(device0, 0))
t1 = threading.Thread(target=all_reduce, args=(device1, 1))
t0.start()
t1.start()
t0.join()
t1.join()
self.assertAllEqual(results, [[[1.], [2.]], [[1.], [2.]]])
@combinations.generate(
combinations.times(collective_op_combinations, device_combination))
| GroupAssignmentTest |
python | huggingface__transformers | src/transformers/models/dac/modeling_dac.py | {
"start": 2157,
"end": 3273
} | class ____(ModelOutput):
r"""
loss (`torch.Tensor`):
Loss from the encoder model, comprising the weighted combination of the commitment and codebook losses.
quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`, *optional*):
Quantized continuous representation of input.
audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`, *optional*):
Codebook indices for each codebook (quantized discrete representation of input).
projected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`, *optional*):
Projected latents (continuous representation of input before quantization).
"""
loss: Optional[torch.FloatTensor] = None
quantized_representation: Optional[torch.FloatTensor] = None
audio_codes: Optional[torch.FloatTensor] = None
projected_latents: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring
# Copied from transformers.models.encodec.modeling_encodec.EncodecDecoderOutput with Encodec->Dac, segment_length->input_length
| DacEncoderOutput |
python | huggingface__transformers | src/transformers/models/zamba2/modeling_zamba2.py | {
"start": 70704,
"end": 78452
} | class ____(Zamba2PreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
def __init__(self, config: Zamba2Config):
super().__init__(config)
self.model = Zamba2Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Zamba2HybridDynamicCache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, CausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Zamba2ForCausalLM
>>> model = Zamba2ForCausalLM.from_pretrained("Zyphra/Zamba2-7B-v1")
>>> tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-7B-v1")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
return_dict=return_dict,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
**kwargs,
):
# Overwritten -- has a unique cache type, `Zamba2HybridDynamicCache`
empty_past_kv = past_key_values is None
# Omit tokens covered by past_key_values
if not empty_past_kv:
# If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
# Exception 1: when passing input_embeds, input_ids may be missing entries
# Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
# Exception 3: with synced GPUs cache_position may go out of bounds, but we only want dummy token in that case.
# (we can't check exception 3 while compiling)
if (
inputs_embeds is not None # Exception 1
or cache_position[-1] >= input_ids.shape[1] # Exception 3
):
input_ids = input_ids[:, -cache_position.shape[0] :]
elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
input_ids = input_ids[:, cache_position]
else:
past_key_values = Zamba2HybridDynamicCache(
self.config, input_ids.shape[0], dtype=self.dtype, device=self.device
)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if not empty_past_kv:
position_ids = position_ids[:, -input_ids.shape[1] :]
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and empty_past_kv:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids.contiguous()} # `contiguous()` needed for compilation use cases
model_inputs.update(
{
"position_ids": position_ids,
"past_key_values": past_key_values,
"use_cache": use_cache,
"attention_mask": attention_mask,
"logits_to_keep": self.config.num_logits_to_keep,
"cache_position": cache_position,
}
)
# Forward ALL kwargs that are uninitialized (e.g. `use_cache`).
for key, value in kwargs.items():
if key not in model_inputs:
model_inputs[key] = value
return model_inputs
@auto_docstring(
custom_intro="""
The Zamba2 Model with a sequence classification head on top (linear layer).
[`Zamba2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
(e.g. GPT-2) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
"""
)
| Zamba2ForCausalLM |
python | apache__airflow | airflow-core/src/airflow/timetables/assets.py | {
"start": 1297,
"end": 3829
} | class ____(AssetTriggeredTimetable):
"""Combine time-based scheduling with event-based scheduling."""
def __init__(
self,
*,
timetable: Timetable,
assets: Collection[Asset] | BaseAsset,
) -> None:
self.timetable = timetable
if isinstance(assets, BaseAsset):
self.asset_condition = assets
else:
self.asset_condition = AssetAll(*assets)
self.description = f"Triggered by assets or {timetable.description}"
self.periodic = timetable.periodic
self.can_be_scheduled = timetable.can_be_scheduled
self.active_runs_limit = timetable.active_runs_limit
@classmethod
def deserialize(cls, data: dict[str, typing.Any]) -> Timetable:
from airflow.serialization.serialized_objects import decode_asset_condition, decode_timetable
return cls(
assets=decode_asset_condition(data["asset_condition"]),
timetable=decode_timetable(data["timetable"]),
)
def serialize(self) -> dict[str, typing.Any]:
from airflow.serialization.serialized_objects import encode_asset_condition, encode_timetable
return {
"asset_condition": encode_asset_condition(self.asset_condition),
"timetable": encode_timetable(self.timetable),
}
def validate(self) -> None:
if isinstance(self.timetable, AssetTriggeredTimetable):
raise AirflowTimetableInvalid("cannot nest asset timetables")
if not isinstance(self.asset_condition, BaseAsset):
raise AirflowTimetableInvalid("all elements in 'assets' must be assets")
@property
def summary(self) -> str:
return f"Asset or {self.timetable.summary}"
def infer_manual_data_interval(self, *, run_after: pendulum.DateTime) -> DataInterval:
return self.timetable.infer_manual_data_interval(run_after=run_after)
def next_dagrun_info(
self, *, last_automated_data_interval: DataInterval | None, restriction: TimeRestriction
) -> DagRunInfo | None:
return self.timetable.next_dagrun_info(
last_automated_data_interval=last_automated_data_interval,
restriction=restriction,
)
def generate_run_id(self, *, run_type: DagRunType, **kwargs: typing.Any) -> str:
if run_type != DagRunType.ASSET_TRIGGERED:
return self.timetable.generate_run_id(run_type=run_type, **kwargs)
return super().generate_run_id(run_type=run_type, **kwargs)
| AssetOrTimeSchedule |
python | crytic__slither | slither/detectors/functions/dead_code.py | {
"start": 305,
"end": 3020
} | class ____(AbstractDetector):
"""
Unprotected function detector
"""
ARGUMENT = "dead-code"
HELP = "Functions that are not used"
IMPACT = DetectorClassification.INFORMATIONAL
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#dead-code"
WIKI_TITLE = "Dead-code"
WIKI_DESCRIPTION = "Functions that are not used."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract Contract{
function dead_code() internal() {}
}
```
`dead_code` is not used in the contract, and make the code's review more difficult."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Remove unused functions."
def _detect(self) -> List[Output]:
results = []
functions_used = set()
for contract in self.compilation_unit.contracts_derived:
all_functionss_called = [
f.all_internal_calls() for f in contract.functions_entry_points
]
all_functions_called = [
item.function for sublist in all_functionss_called for item in sublist
]
functions_used |= {
f.canonical_name for f in all_functions_called if isinstance(f, Function)
}
all_libss_called = [f.all_library_calls() for f in contract.functions_entry_points]
all_libs_called: List[Tuple[Contract, Function]] = [
item.function for sublist in all_libss_called for item in sublist
]
functions_used |= {
lib[1].canonical_name for lib in all_libs_called if isinstance(lib, tuple)
}
for function in sorted(self.compilation_unit.functions, key=lambda x: x.canonical_name):
if (
function.visibility in ["public", "external"]
or function.is_constructor
or function.is_fallback
or function.is_constructor_variables
):
continue
if function.canonical_name in functions_used:
continue
if isinstance(function, FunctionContract) and (
function.contract_declarer.is_from_dependency()
or function.contract_declarer.is_library
):
continue
# Continue if the function is not implemented because it means the contract is abstract
if not function.is_implemented:
continue
info: DETECTOR_INFO = [function, " is never used and should be removed\n"]
res = self.generate_result(info)
results.append(res)
return results
| DeadCode |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 47064,
"end": 49568
} | class ____(Request):
"""
Get an attachment containing the task's log
:param task: Task ID
:type task: str
:param line_type: Line format type
:type line_type: str
:param line_format: Line string format. Used if the line type is 'text'
:type line_format: str
"""
_service = "events"
_action = "download_task_log"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"line_format": {
"default": "{asctime} {worker} {level} {msg}",
"description": "Line string format. Used if the line type is 'text'",
"type": "string",
},
"line_type": {
"description": "Line format type",
"enum": ["json", "text"],
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
line_type: Optional[str] = None,
line_format: Optional[str] = "{asctime} {worker} {level} {msg}",
**kwargs: Any
) -> None:
super(DownloadTaskLogRequest, self).__init__(**kwargs)
self.task = task
self.line_type = line_type
self.line_format = line_format
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("line_type")
def line_type(self) -> Optional[str]:
return self._property_line_type
@line_type.setter
def line_type(self, value: Optional[str]) -> None:
if value is None:
self._property_line_type = None
return
self.assert_isinstance(value, "line_type", six.string_types)
self._property_line_type = value
@schema_property("line_format")
def line_format(self) -> Optional[str]:
return self._property_line_format
@line_format.setter
def line_format(self, value: Optional[str]) -> None:
if value is None:
self._property_line_format = None
return
self.assert_isinstance(value, "line_format", six.string_types)
self._property_line_format = value
| DownloadTaskLogRequest |
python | kamyu104__LeetCode-Solutions | Python/next-permutation.py | {
"start": 673,
"end": 1223
} | class ____(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
k, l = -1, 0
for i in xrange(len(nums)-1):
if nums[i] < nums[i+1]:
k = i
if k == -1:
nums.reverse()
return
for i in xrange(k+1, len(nums)):
if nums[i] > nums[k]:
l = i
nums[k], nums[l] = nums[l], nums[k]
nums[k+1:] = nums[:k:-1]
| Solution2 |
python | kamyu104__LeetCode-Solutions | Python/count-distinct-subarrays-divisible-by-k-in-sorted-array.py | {
"start": 806,
"end": 1473
} | class ____(object):
def numGoodSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
result = prefix = 0
cnt = collections.defaultdict(int)
cnt[0] = 1
for x in nums:
prefix = (prefix+x)%k
result += cnt[prefix]
cnt[prefix] += 1
l = 0
for i in xrange(len(nums)):
l += 1
if i+1 == len(nums) or nums[i+1] != nums[i]:
for j in xrange(1, l+1):
if nums[i]*j%k == 0:
result -= (l-j+1)-1
l = 0
return result
| Solution2 |
python | simonw__datasette | datasette/tracer.py | {
"start": 1879,
"end": 5031
} | class ____:
# If the body is larger than this we don't attempt to append the trace
max_body_bytes = 1024 * 256 # 256 KB
def __init__(self, app):
self.app = app
async def __call__(self, scope, receive, send):
if b"_trace=1" not in scope.get("query_string", b"").split(b"&"):
await self.app(scope, receive, send)
return
trace_start = time.perf_counter()
traces = []
accumulated_body = b""
size_limit_exceeded = False
response_headers = []
async def wrapped_send(message):
nonlocal accumulated_body, size_limit_exceeded, response_headers
if message["type"] == "http.response.start":
response_headers = message["headers"]
await send(message)
return
if message["type"] != "http.response.body" or size_limit_exceeded:
await send(message)
return
# Accumulate body until the end or until size is exceeded
accumulated_body += message["body"]
if len(accumulated_body) > self.max_body_bytes:
# Send what we have accumulated so far
await send(
{
"type": "http.response.body",
"body": accumulated_body,
"more_body": bool(message.get("more_body")),
}
)
size_limit_exceeded = True
return
if not message.get("more_body"):
# We have all the body - modify it and send the result
# TODO: What to do about Content-Type or other cases?
trace_info = {
"request_duration_ms": 1000 * (time.perf_counter() - trace_start),
"sum_trace_duration_ms": sum(t["duration_ms"] for t in traces),
"num_traces": len(traces),
"traces": traces,
}
try:
content_type = [
v.decode("utf8")
for k, v in response_headers
if k.lower() == b"content-type"
][0]
except IndexError:
content_type = ""
if "text/html" in content_type and b"</body>" in accumulated_body:
extra = escape(json.dumps(trace_info, indent=2))
extra_html = f"<pre>{extra}</pre></body>".encode("utf8")
accumulated_body = accumulated_body.replace(b"</body>", extra_html)
elif "json" in content_type and accumulated_body.startswith(b"{"):
data = json.loads(accumulated_body.decode("utf8"))
if "_trace" not in data:
data["_trace"] = trace_info
accumulated_body = json.dumps(data).encode("utf8")
await send({"type": "http.response.body", "body": accumulated_body})
with capture_traces(traces):
await self.app(scope, receive, wrapped_send)
| AsgiTracer |
python | sphinx-doc__sphinx | sphinx/domains/c/_ast.py | {
"start": 48597,
"end": 50004
} | class ____(ASTBase):
def __init__(self, exprs: list[ASTExpression], trailingComma: bool) -> None:
self.exprs = exprs
self.trailingComma = trailingComma
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTBracedInitList):
return NotImplemented
return self.exprs == other.exprs and self.trailingComma == other.trailingComma
def __hash__(self) -> int:
return hash((self.exprs, self.trailingComma))
def _stringify(self, transform: StringifyTransform) -> str:
exprs = ', '.join(transform(e) for e in self.exprs)
trailing_comma = ',' if self.trailingComma else ''
return f'{{{exprs}{trailing_comma}}}'
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
signode += addnodes.desc_sig_punctuation('{', '{')
first = True
for e in self.exprs:
if not first:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_space()
else:
first = False
e.describe_signature(signode, mode, env, symbol)
if self.trailingComma:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_punctuation('}', '}')
| ASTBracedInitList |
python | pytorch__pytorch | torch/export/dynamic_shapes.py | {
"start": 2682,
"end": 8477
} | class ____:
"""
The ``Dim`` class allows users to specify dynamism in their exported
programs. By marking a dimension with a ``Dim``, the compiler associates the
dimension with a symbolic integer containing a dynamic range.
The API can be used in 2 ways: Dim hints (i.e. automatic dynamic shapes:
``Dim.AUTO``, ``Dim.DYNAMIC``, ``Dim.STATIC``), or named Dims (i.e.
``Dim("name", min=1, max=2)``).
Dim hints provide the lowest barrier to exportability, with the user only
needing to specify if a dimension if dynamic, static, or left for the
compiler to decide (``Dim.AUTO``). The export process will automatically
infer the remaining constraints on min/max ranges and relationships between
dimensions.
Example::
class Foo(nn.Module):
def forward(self, x, y):
assert x.shape[0] == 4
assert y.shape[0] >= 16
return x @ y
x = torch.randn(4, 8)
y = torch.randn(8, 16)
dynamic_shapes = {
"x": {0: Dim.AUTO, 1: Dim.AUTO},
"y": {0: Dim.AUTO, 1: Dim.AUTO},
}
ep = torch.export(Foo(), (x, y), dynamic_shapes=dynamic_shapes)
Here, export would raise an exception if we replaced all uses of ``Dim.AUTO`` with ``Dim.DYNAMIC``,
as ``x.shape[0]`` is constrained to be static by the model.
More complex relations between dimensions may also be codegened as runtime assertion nodes by the compiler,
e.g. ``(x.shape[0] + y.shape[1]) % 4 == 0``, to be raised if runtime inputs do not satisfy such constraints.
You may also specify min-max bounds for Dim hints, e.g. ``Dim.AUTO(min=16, max=32)``, ``Dim.DYNAMIC(max=64)``,
with the compiler inferring the remaining constraints within the ranges. An exception will be raised if
the valid range is entirely outside the user-specified range.
Named Dims provide a stricter way of specifying dynamism, where exceptions are raised if the compiler
infers constraints that do not match the user specification. For example, exporting the previous
model, the user would need the following ``dynamic_shapes`` argument::
s0 = Dim("s0")
s1 = Dim("s1", min=16)
dynamic_shapes = {
"x": {0: 4, 1: s0},
"y": {0: s0, 1: s1},
}
ep = torch.export(Foo(), (x, y), dynamic_shapes=dynamic_shapes)
Named Dims also allow specification of relationships between dimensions, up
to univariate linear relations. For example, the following indicates one
dimension is a multiple of another plus 4::
s0 = Dim("s0")
s1 = 3 * s0 + 4
"""
AUTO = _DimHint.AUTO()
DYNAMIC = _DimHint.DYNAMIC()
STATIC = _DimHint.STATIC()
def __init__(
self, name: str, *, min: Optional[int] = None, max: Optional[int] = None
):
from torch.utils._sympy.numbers import int_oo
_min = 0 if min is None else min
_max = int_oo if max is None else max
assert _max > _min, f"Cannot create Dim with inconsistent min={min}, max={max}"
assert name.isidentifier(), f"Dim name must be a valid identifier, got {name}"
self.__name__ = name
self.min = _min
self.max = _max
def __add__(self, other) -> "Dim":
# e.g., dim + 1
if type(other) is not int:
raise NotImplementedError(
f"Attempted to add {other} to {self.__name__}, where an integer was expected. "
"(Only increasing linear operations with integer coefficients are supported.)"
)
return self._derive(lambda x: x + other)
def __radd__(self, other) -> "Dim":
return self + other
def __sub__(self, other) -> "Dim":
# e.g., dim - 1
if type(other) is not int:
raise NotImplementedError(
f"Attempted to subtract {other} from {self.__name__}, where an integer was expected. "
"(Only increasing linear operations with integer coefficients are supported.)"
)
return self._derive(lambda x: x - other)
def __rsub__(self, other) -> "Dim":
raise NotImplementedError(
f"Attempted to negate {self.__name__}. "
"(Only increasing linear operations with integer coefficients are supported.)"
)
def __mul__(self, other) -> "Dim":
# e.g., dim * 2
if type(other) is not int or other <= 0:
raise NotImplementedError(
f"Attempted to multiply {other} with {self.__name__}, where a positive integer was expected. "
"(Only increasing linear operations with integer coefficients are supported.)"
)
return self._derive(lambda x: x * other)
def __rmul__(self, other) -> "Dim":
return self * other
def _derived_name(self, fn) -> str:
from sympy import sympify
return str(fn(sympify(self.__name__)))
def _derive(self, fn) -> "Dim":
return _DerivedDim(self._derived_name(fn), self, fn)
@staticmethod
def _readable(name: str, min_: int, max_: int) -> str:
from torch.utils._sympy.numbers import int_oo
if min_ == 2:
min_ = None # type: ignore[assignment]
if max_ == int_oo:
max_ = None # type: ignore[assignment]
if min_ is None and max_ is None:
return f"Dim('{name}')"
if min_ is None:
return f"Dim('{name}', max={max_})"
if max_ is None:
return f"Dim('{name}', min={min_})"
return f"Dim('{name}', min={min_}, max={max_})"
def __repr__(self):
return Dim._readable(self.__name__, self.min, self.max)
_Dim = Dim # TODO(pianpwk): remove after it's no longer internally breaking
| Dim |
python | prabhupant__python-ds | data_structures/bst/deletion.py | {
"start": 0,
"end": 1172
} | class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def inorder(root):
if not root:
return None
stack = []
while True:
if root:
stack.append(root)
root = root.left
else:
if not stack:
break
root = stack.pop()
print(root.val, end=" ")
root = root.right()
def min_value_node(root):
curr = root
while curr.left:
curr = curr.left
return curr
def delete(root, val):
if not root:
return root
if val < root.val:
root.left = delete(root.left, val)
elif val > root.val:
root.right = delete(root.right, val)
else:
# Root with one child or no child
if root.left is None:
temp = root.right
root = None
return temp
elif root.right is None:
temp = root.left
root = None
return temp
temp = min_value_node(root.right)
root.val = temp.val
root.right = delete(root.right, temp.val)
return root
| Node |
python | apache__airflow | providers/standard/tests/unit/standard/utils/test_skipmixin.py | {
"start": 1996,
"end": 18957
} | class ____:
@staticmethod
def clean_db():
clear_db_dags()
clear_db_runs()
def setup_method(self):
self.clean_db()
def teardown_method(self):
self.clean_db()
def test_skip(self, dag_maker, session, time_machine):
now = datetime.datetime.now(tz=datetime.timezone.utc)
time_machine.move_to(now, tick=False)
with dag_maker("dag"):
tasks = [EmptyOperator(task_id="task")]
if AIRFLOW_V_3_0_PLUS:
dag_run = dag_maker.create_dagrun(
run_type=DagRunType.MANUAL,
logical_date=now,
state=State.FAILED,
)
with pytest.raises(DownstreamTasksSkipped) as exc_info:
SkipMixin().skip(ti=dag_run.get_task_instance("task"), tasks=tasks)
assert exc_info.value.tasks == ["task"]
else:
from airflow import settings
dag_run = dag_maker.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=now,
state=State.FAILED,
)
session = settings.Session()
SkipMixin().skip(dag_run=dag_run, execution_date=now, tasks=tasks)
session.query(TI).filter(
TI.dag_id == "dag",
TI.task_id == "task",
TI.state == State.SKIPPED,
TI.start_date == now,
TI.end_date == now,
).one()
def test_skip_none_tasks(self):
if AIRFLOW_V_3_0_PLUS:
assert SkipMixin().skip(ti=Mock(), tasks=[]) is None
else:
session = Mock()
assert SkipMixin().skip(dag_run=None, execution_date=None, tasks=[]) is None
assert not session.query.called
assert not session.commit.called
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Airflow 2 had a different implementation")
def test_skip__only_mapped_operators_passed(self):
ti = Mock(map_index=2)
assert (
SkipMixin().skip(
ti=ti,
tasks=[MagicMock(spec=MappedOperator)],
)
is None
)
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Airflow 2 had a different implementation")
def test_skip__only_none_mapped_operators_passed(self):
ti = Mock(map_index=-1)
with pytest.raises(DownstreamTasksSkipped) as exc_info:
SkipMixin().skip(
ti=ti,
tasks=[MagicMock(spec=MappedOperator, task_id="task")],
)
assert exc_info.value.tasks == ["task"]
@pytest.mark.parametrize(
("branch_task_ids", "expected_states"),
[
(None, {"task2": State.SKIPPED, "task3": State.SKIPPED}),
([], {"task2": State.SKIPPED, "task3": State.SKIPPED}),
],
ids=["None", "empty-list"],
)
def test_skip_all_except__branch_task_ids_none(
self, dag_maker, branch_task_ids, expected_states, session
):
with dag_maker(
"dag_test_skip_all_except",
serialized=True,
) as dag:
task1 = EmptyOperator(task_id="task1")
task2 = EmptyOperator(task_id="task2")
task3 = EmptyOperator(task_id="task3")
task1 >> [task2, task3]
dag_maker.create_dagrun(run_id=DEFAULT_DAG_RUN_ID)
if AIRFLOW_V_3_0_PLUS:
dag_version = DagVersion.get_latest_version(dag.dag_id)
ti1 = TI(task1, run_id=DEFAULT_DAG_RUN_ID, dag_version_id=dag_version.id)
else:
ti1 = TI(task1, run_id=DEFAULT_DAG_RUN_ID)
if AIRFLOW_V_3_0_PLUS:
with pytest.raises(DownstreamTasksSkipped) as exc_info:
SkipMixin().skip_all_except(ti=ti1, branch_task_ids=branch_task_ids)
assert set(exc_info.value.tasks) == {("task2", -1), ("task3", -1)}
else:
ti2 = TI(task2, run_id=DEFAULT_DAG_RUN_ID)
ti3 = TI(task3, run_id=DEFAULT_DAG_RUN_ID)
SkipMixin().skip_all_except(ti=ti1, branch_task_ids=branch_task_ids)
session.expire_all()
def get_state(ti):
ti.refresh_from_db()
return ti.state
executed_states = {"task2": get_state(ti2), "task3": get_state(ti3)}
assert executed_states == expected_states
@pytest.mark.parametrize(
("branch_task_ids", "expected_states"),
[
(["task2"], {"task2": State.NONE, "task3": State.SKIPPED}),
(("task2",), {"task2": State.NONE, "task3": State.SKIPPED}),
("task2", {"task2": State.NONE, "task3": State.SKIPPED}),
],
ids=["list-of-task-ids", "tuple-of-task-ids", "str-task-id"],
)
def test_skip_all_except__skip_task3(self, dag_maker, branch_task_ids, expected_states, session):
with dag_maker(
"dag_test_skip_all_except",
serialized=True,
):
task1 = EmptyOperator(task_id="task1")
task2 = EmptyOperator(task_id="task2")
task3 = EmptyOperator(task_id="task3")
task1 >> [task2, task3]
dag_maker.create_dagrun(run_id=DEFAULT_DAG_RUN_ID)
if AIRFLOW_V_3_0_PLUS:
dag_version = DagVersion.get_latest_version(task1.dag_id)
ti1 = TI(task1, run_id=DEFAULT_DAG_RUN_ID, dag_version_id=dag_version.id)
else:
ti1 = TI(task1, run_id=DEFAULT_DAG_RUN_ID)
if AIRFLOW_V_3_0_PLUS:
with pytest.raises(DownstreamTasksSkipped) as exc_info:
SkipMixin().skip_all_except(ti=ti1, branch_task_ids=branch_task_ids)
assert set(exc_info.value.tasks) == {("task3", -1)}
else:
ti2 = TI(task2, run_id=DEFAULT_DAG_RUN_ID)
ti3 = TI(task3, run_id=DEFAULT_DAG_RUN_ID)
SkipMixin().skip_all_except(ti=ti1, branch_task_ids=branch_task_ids)
session.expire_all()
def get_state(ti):
ti.refresh_from_db()
return ti.state
executed_states = {"task2": get_state(ti2), "task3": get_state(ti3)}
assert executed_states == expected_states
@pytest.mark.skipif(
AIRFLOW_V_3_0_PLUS, reason="In Airflow 3, `NotPreviouslySkippedDep` is used for this case"
)
@pytest.mark.need_serialized_dag
def test_mapped_tasks_skip_all_except(self, dag_maker):
with dag_maker("dag_test_skip_all_except") as dag:
@task
def branch_op(k): ...
@task_group
def task_group_op(k):
branch_a = EmptyOperator(task_id="branch_a")
branch_b = EmptyOperator(task_id="branch_b")
branch_op(k) >> [branch_a, branch_b]
task_group_op.expand(k=[0, 1])
dag_maker.create_dagrun(run_id=DEFAULT_DAG_RUN_ID)
if AIRFLOW_V_3_0_PLUS:
dag_version = DagVersion.get_latest_version(dag.dag_id)
branch_op_ti_0 = TI(
dag.get_task("task_group_op.branch_op"),
run_id=DEFAULT_DAG_RUN_ID,
map_index=0,
dag_version_id=dag_version.id,
)
branch_op_ti_1 = TI(
dag.get_task("task_group_op.branch_op"),
run_id=DEFAULT_DAG_RUN_ID,
map_index=1,
dag_version_id=dag_version.id,
)
branch_a_ti_0 = TI(
dag.get_task("task_group_op.branch_a"),
run_id=DEFAULT_DAG_RUN_ID,
map_index=0,
dag_version_id=dag_version.id,
)
branch_a_ti_1 = TI(
dag.get_task("task_group_op.branch_a"),
run_id=DEFAULT_DAG_RUN_ID,
map_index=1,
dag_version_id=dag_version.id,
)
branch_b_ti_0 = TI(
dag.get_task("task_group_op.branch_b"),
run_id=DEFAULT_DAG_RUN_ID,
map_index=0,
dag_version_id=dag_version.id,
)
branch_b_ti_1 = TI(
dag.get_task("task_group_op.branch_b"),
run_id=DEFAULT_DAG_RUN_ID,
map_index=1,
dag_version_id=dag_version.id,
)
else:
branch_op_ti_0 = TI(
dag.get_task("task_group_op.branch_op"), run_id=DEFAULT_DAG_RUN_ID, map_index=0
)
branch_op_ti_1 = TI(
dag.get_task("task_group_op.branch_op"), run_id=DEFAULT_DAG_RUN_ID, map_index=1
)
branch_a_ti_0 = TI(dag.get_task("task_group_op.branch_a"), run_id=DEFAULT_DAG_RUN_ID, map_index=0)
branch_a_ti_1 = TI(dag.get_task("task_group_op.branch_a"), run_id=DEFAULT_DAG_RUN_ID, map_index=1)
branch_b_ti_0 = TI(dag.get_task("task_group_op.branch_b"), run_id=DEFAULT_DAG_RUN_ID, map_index=0)
branch_b_ti_1 = TI(dag.get_task("task_group_op.branch_b"), run_id=DEFAULT_DAG_RUN_ID, map_index=1)
SkipMixin().skip_all_except(ti=branch_op_ti_0, branch_task_ids="task_group_op.branch_a")
SkipMixin().skip_all_except(ti=branch_op_ti_1, branch_task_ids="task_group_op.branch_b")
def get_state(ti):
ti.refresh_from_db()
return ti.state
assert get_state(branch_a_ti_0) == State.NONE
assert get_state(branch_b_ti_0) == State.SKIPPED
assert get_state(branch_a_ti_1) == State.SKIPPED
assert get_state(branch_b_ti_1) == State.NONE
def test_raise_exception_on_not_accepted_branch_task_ids_type(self, dag_maker):
with dag_maker("dag_test_skip_all_except_wrong_type"):
task = EmptyOperator(task_id="task")
dag_maker.create_dagrun(run_id=DEFAULT_DAG_RUN_ID)
if AIRFLOW_V_3_0_PLUS:
dag_version = DagVersion.get_latest_version(task.dag_id)
ti1 = TI(task, run_id=DEFAULT_DAG_RUN_ID, dag_version_id=dag_version.id)
else:
ti1 = TI(task, run_id=DEFAULT_DAG_RUN_ID)
error_message = (
r"'branch_task_ids' must be either None, a task ID, or an Iterable of IDs, but got 'int'\."
)
with pytest.raises(AirflowException, match=error_message):
SkipMixin().skip_all_except(ti=ti1, branch_task_ids=42)
def test_raise_exception_on_not_accepted_iterable_branch_task_ids_type(self, dag_maker):
with dag_maker("dag_test_skip_all_except_wrong_type"):
task = EmptyOperator(task_id="task")
dag_maker.create_dagrun(run_id=DEFAULT_DAG_RUN_ID)
if AIRFLOW_V_3_0_PLUS:
dag_version = DagVersion.get_latest_version(task.dag_id)
ti1 = TI(task, run_id=DEFAULT_DAG_RUN_ID, dag_version_id=dag_version.id)
else:
ti1 = TI(task, run_id=DEFAULT_DAG_RUN_ID)
if AIRFLOW_V_3_0_PLUS:
# Improved error message for Airflow 3.0+
error_message = (
r"Unable to branch to the specified tasks\. "
r"The branching function returned invalid 'branch_task_ids': \{\(42, 'int'\)\}\. "
r"Please check that your function returns an Iterable of valid task IDs that exist in your DAG\."
)
else:
# Old error message for Airflow 2.x
error_message = (
r"'branch_task_ids' expected all task IDs are strings\. "
r"Invalid tasks found: \{\(42, 'int'\)\}\."
)
with pytest.raises(AirflowException, match=error_message):
SkipMixin().skip_all_except(ti=ti1, branch_task_ids=["task", 42])
@pytest.mark.parametrize(
"branch_task_ids",
[
pytest.param("task4", id="invalid-single-task"),
pytest.param(["task2", "task4"], id="invalid-any-task-in-list"),
pytest.param(["task5", "task4"], id="invalid-all-task-in-list"),
],
)
def test_raise_exception_on_not_valid_branch_task_ids(self, dag_maker, branch_task_ids):
with dag_maker("dag_test_skip_all_except_wrong_type", serialized=True):
task1 = EmptyOperator(task_id="task1")
task2 = EmptyOperator(task_id="task2")
task3 = EmptyOperator(task_id="task3")
task1 >> [task2, task3]
dag_maker.create_dagrun(run_id=DEFAULT_DAG_RUN_ID)
if AIRFLOW_V_3_0_PLUS:
dag_version = DagVersion.get_latest_version(task1.dag_id)
ti1 = TI(task1, run_id=DEFAULT_DAG_RUN_ID, dag_version_id=dag_version.id)
else:
ti1 = TI(task1, run_id=DEFAULT_DAG_RUN_ID)
error_message = r"'branch_task_ids' must contain only valid task_ids. Invalid tasks found: .*"
with pytest.raises(AirflowException, match=error_message):
SkipMixin().skip_all_except(ti=ti1, branch_task_ids=branch_task_ids)
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Issue only exists in Airflow 3.x")
def test_ensure_tasks_includes_sensors_airflow_3x(self, dag_maker):
"""Test that sensors (inheriting from airflow.sdk.BaseOperator) are properly handled by _ensure_tasks."""
from airflow.providers.standard.utils.skipmixin import _ensure_tasks
from airflow.sdk import BaseOperator as SDKBaseOperator
from airflow.sdk.bases.sensor import BaseSensorOperator
class DummySensor(BaseSensorOperator):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.timeout = 0
self.poke_interval = 0
def poke(self, context):
return True
with dag_maker("dag_test_sensor_skipping") as dag:
regular_task = EmptyOperator(task_id="regular_task")
sensor_task = DummySensor(task_id="sensor_task")
downstream_task = EmptyOperator(task_id="downstream_task")
regular_task >> [sensor_task, downstream_task]
dag_maker.create_dagrun(run_id=DEFAULT_DAG_RUN_ID)
downstream_nodes = dag.get_task("regular_task").downstream_list
task_list = _ensure_tasks(downstream_nodes)
# Verify both the regular operator and sensor are included
task_ids = [t.task_id for t in task_list]
assert "sensor_task" in task_ids, "Sensor should be included in task list"
assert "downstream_task" in task_ids, "Regular task should be included in task list"
assert len(task_list) == 2, "Both tasks should be included"
# Also verify that the sensor is actually an instance of the correct BaseOperator
sensor_in_list = next((t for t in task_list if t.task_id == "sensor_task"), None)
assert sensor_in_list is not None, "Sensor task should be found in list"
assert isinstance(sensor_in_list, SDKBaseOperator), "Sensor should be instance of SDK BaseOperator"
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Integration test for Airflow 3.x sensor skipping")
def test_skip_sensor_in_branching_scenario(self, dag_maker):
"""Integration test: verify sensors are properly skipped by branching operators in Airflow 3.x."""
from airflow.sdk.bases.sensor import BaseSensorOperator
# Create a dummy sensor for testing
class DummySensor(BaseSensorOperator):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.timeout = 0
self.poke_interval = 0
def poke(self, context):
return True
with dag_maker("dag_test_branch_sensor_skipping"):
branch_task = EmptyOperator(task_id="branch_task")
regular_task = EmptyOperator(task_id="regular_task")
sensor_task = DummySensor(task_id="sensor_task")
branch_task >> [regular_task, sensor_task]
dag_maker.create_dagrun(run_id=DEFAULT_DAG_RUN_ID)
dag_version = DagVersion.get_latest_version(branch_task.dag_id)
ti_branch = TI(branch_task, run_id=DEFAULT_DAG_RUN_ID, dag_version_id=dag_version.id)
# Test skipping the sensor (follow regular_task branch)
with pytest.raises(DownstreamTasksSkipped) as exc_info:
SkipMixin().skip_all_except(ti=ti_branch, branch_task_ids="regular_task")
# Verify that the sensor task is properly marked for skipping
skipped_tasks = set(exc_info.value.tasks)
assert ("sensor_task", -1) in skipped_tasks, "Sensor task should be marked for skipping"
# Test skipping the regular task (follow sensor_task branch)
with pytest.raises(DownstreamTasksSkipped) as exc_info:
SkipMixin().skip_all_except(ti=ti_branch, branch_task_ids="sensor_task")
# Verify that the regular task is properly marked for skipping
skipped_tasks = set(exc_info.value.tasks)
assert ("regular_task", -1) in skipped_tasks, "Regular task should be marked for skipping"
| TestSkipMixin |
python | facebook__pyre-check | tools/incremental_test/specification.py | {
"start": 11863,
"end": 14627
} | class ____:
old_state: RepositoryState
new_state: RepositoryUpdate
pyre_check_pyre_options: str = ""
pyre_check_options: str = ""
pyre_start_pyre_options: str = ""
pyre_start_options: str = ""
pyre_stop_pyre_options: str = ""
pyre_stop_options: str = ""
pyre_incremental_pyre_options: str = ""
pyre_incremental_options: str = ""
def to_json(self) -> Dict[str, Any]:
result: Dict[str, Any] = {
"old_state": self.old_state.to_json(),
"new_state": self.new_state.to_json(),
}
if len(self.pyre_check_pyre_options) > 0:
result["pyre_check_pyre_options"] = self.pyre_check_pyre_options
if len(self.pyre_check_options) > 0:
result["pyre_check_options"] = self.pyre_check_options
if len(self.pyre_start_pyre_options) > 0:
result["pyre_start_pyre_options"] = self.pyre_start_pyre_options
if len(self.pyre_start_options) > 0:
result["pyre_start_options"] = self.pyre_start_options
if len(self.pyre_stop_pyre_options) > 0:
result["pyre_stop_pyre_options"] = self.pyre_stop_pyre_options
if len(self.pyre_stop_options) > 0:
result["pyre_stop_options"] = self.pyre_stop_options
if len(self.pyre_incremental_pyre_options) > 0:
result["pyre_incremental_pyre_options"] = self.pyre_incremental_pyre_options
if len(self.pyre_incremental_options) > 0:
result["pyre_incremental_options"] = self.pyre_incremental_options
return result
@staticmethod
def from_json(input_json: Dict[str, Any]) -> "Specification":
try:
return Specification(
old_state=RepositoryState.from_json(input_json["old_state"]),
new_state=RepositoryUpdate.from_json(input_json["new_state"]),
pyre_check_pyre_options=input_json.get("pyre_check_pyre_options", ""),
pyre_check_options=input_json.get("pyre_check_options", ""),
pyre_start_pyre_options=input_json.get("pyre_start_pyre_options", ""),
pyre_start_options=input_json.get("pyre_start_options", ""),
pyre_stop_pyre_options=input_json.get("pyre_stop_pyre_options", ""),
pyre_stop_options=input_json.get("pyre_stop_options", ""),
pyre_incremental_pyre_options=input_json.get(
"pyre_incremental_pyre_options", ""
),
pyre_incremental_options=input_json.get("pyre_incremental_options", ""),
)
except KeyError as key:
raise InvalidSpecificationException(
f"Cannot create Specification due to missing field '{key}'"
)
| Specification |
python | weaviate__weaviate-python-client | weaviate/collections/classes/aggregate.py | {
"start": 658,
"end": 940
} | class ____:
"""The aggregation result for a number property."""
count: Optional[int]
maximum: Optional[float]
mean: Optional[float]
median: Optional[float]
minimum: Optional[float]
mode: Optional[float]
sum_: Optional[float]
@dataclass
| AggregateNumber |
python | django__django | tests/backends/test_ddl_references.py | {
"start": 7528,
"end": 9846
} | class ____(SimpleTestCase):
def test_references_table(self):
statement = Statement(
"", reference=MockReference("", {"table"}, {}, {}), non_reference=""
)
self.assertIs(statement.references_table("table"), True)
self.assertIs(statement.references_table("other"), False)
def test_references_column(self):
statement = Statement(
"",
reference=MockReference("", {}, {("table", "column")}, {}),
non_reference="",
)
self.assertIs(statement.references_column("table", "column"), True)
self.assertIs(statement.references_column("other", "column"), False)
def test_references_index(self):
statement = Statement(
"",
reference=MockReference("", {}, {}, {("table", "index")}),
non_reference="",
)
self.assertIs(statement.references_index("table", "index"), True)
self.assertIs(statement.references_index("other", "index"), False)
def test_rename_table_references(self):
reference = MockReference("", {"table"}, {}, {})
statement = Statement("", reference=reference, non_reference="")
statement.rename_table_references("table", "other")
self.assertEqual(reference.referenced_tables, {"other"})
def test_rename_column_references(self):
reference = MockReference("", {}, {("table", "column")}, {})
statement = Statement("", reference=reference, non_reference="")
statement.rename_column_references("table", "column", "other")
self.assertEqual(reference.referenced_columns, {("table", "other")})
def test_repr(self):
reference = MockReference("reference", {}, {}, {})
statement = Statement(
"%(reference)s - %(non_reference)s",
reference=reference,
non_reference="non_reference",
)
self.assertEqual(repr(statement), "<Statement 'reference - non_reference'>")
def test_str(self):
reference = MockReference("reference", {}, {}, {})
statement = Statement(
"%(reference)s - %(non_reference)s",
reference=reference,
non_reference="non_reference",
)
self.assertEqual(str(statement), "reference - non_reference")
| StatementTests |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/runtime_wrappers.py | {
"start": 5270,
"end": 19003
} | class ____:
def __init__(self, info, runtime_metadata, trace_joint):
self._unwrap_aliased_base_tensor = _identity
if info.output_type in (
OutputType.alias_of_intermediate,
OutputType.alias_of_intermediate_save_as_output,
):
num_user_outputs = len(runtime_metadata.output_info)
self.base_idx = info.base_idx + num_user_outputs
else:
self.base_idx = info.base_idx
if self.base_idx in runtime_metadata.aliased_out_indices:
self._unwrap_aliased_base_tensor = _unwrap_tensoralias
self.unwrap_out = _unwrap_tensoralias if trace_joint else _identity
self.requires_grad = info.requires_grad
self.view_meta_sequence = info.view_meta_sequence
self.replay_views = config.view_replay_for_aliased_outputs
def __call__(self, orig_inputs, fw_outs, out):
aliased_base_tensor = fw_outs[self.base_idx]
return gen_alias_from_base(
self._unwrap_aliased_base_tensor(aliased_base_tensor),
self.unwrap_out(out),
self.requires_grad,
self.view_meta_sequence,
replay_views=self.replay_views,
)
_HANDLER_MAP = {
OutputType.non_alias: NoopAliasHandler,
OutputType.unsafe_view_alias: NoopAliasHandler,
OutputType.custom_function_view: NoopAliasHandler,
OutputType.alias_of_input: AliasOfInputHandler,
OutputType.is_input: IsInputHandler,
OutputType.alias_of_intermediate: AliasOfIntermediateHandler,
OutputType.alias_of_intermediate_save_as_output: AliasOfIntermediateHandler,
OutputType.alias_of_intermediate_base_is_user_output: AliasOfIntermediateHandler,
}
def make_output_handler(info, runtime_metadata, trace_joint):
handler_type = _HANDLER_MAP[info.output_type]
return handler_type(info, runtime_metadata, trace_joint)
# not sure why AOTDispatcher needs to manually set this
def maybe_mark_dynamic_helper(t: torch.Tensor, dims: set[int]):
if hasattr(t, "_dynamo_weak_dynamic_indices"):
# pyrefly: ignore [missing-attribute]
t._dynamo_weak_dynamic_indices |= dims
else:
t._dynamo_weak_dynamic_indices = dims.copy() # type: ignore[attr-defined]
def _should_disable_saved_tensors_hooks():
# Compiled autograd is not supported yet, to be added in future.
if torch._dynamo.compiled_autograd.in_compiled_autograd_region:
return False
get_hooks = torch._functorch._aot_autograd.utils.top_saved_tensors_hooks
are_inline_hooks = (
torch._functorch._aot_autograd.utils.saved_tensors_hooks_are_inlineable
)
hooks = get_hooks()
if are_inline_hooks(hooks):
return True
return False
def _create_runtime_wrapper(
compiled_fn,
*,
runtime_metadata: ViewAndMutationMeta,
indices_of_inps_to_detach: list[int],
trace_joint: bool,
keep_input_mutations: bool,
disable_amp: bool,
):
if not getattr(compiled_fn, "_boxed_call", False):
compiled_fn = make_boxed_func(compiled_fn)
# Note [Inputs needed in runtime epilogue after list clearing]
# In Python functions, you can't free the input arguments of a function within the scope of that function. A workaround is to
# wrap the input arguments in a list, and clear the list from within the function.
# Here, this is implemented as `call_func_at_runtime_with_args(..., steal_args=True)`.
#
# This is needed for Compiled Autograd since some of the inputs (activations) should be freed early.
# However, we cannot blindly clear the entire list, because AOTAutograd may need access to some of the graph inputs
# **after** the compiled function has finished running. There are two main cases:
# (1) Input mutations: If there are an input mutations that we must run outside of the graph, we need access to the input.
# (2) Output aliasing: Outputs that aliases graph inputs generally must be regenerated outside of the `autograd.Function`,
# and doing so requires us accessing the corresponding input after the compiled artifact has run.
epilogue_args_idx = []
epilogue_args_idx.extend(runtime_metadata.mutated_inp_runtime_indices)
for info in runtime_metadata.output_info:
if (
info.output_type == OutputType.alias_of_input
or info.output_type == OutputType.is_input
):
assert isinstance(info.base_idx, int)
epilogue_args_idx.append(info.base_idx)
if config.unlift_effect_tokens:
assert len(runtime_metadata.tokens) == 0
if runtime_metadata.num_outputs_aliased > 0:
output_handlers = tuple(
make_output_handler(info, runtime_metadata, trace_joint)
for info in runtime_metadata.output_info
)
def record_runtime_wrapper_prologue_enter() -> Optional[
AbstractContextManager[None]
]:
if (
torch.autograd.profiler._is_profiler_enabled
and dynamo_config.record_runtime_overhead
):
cm = torch._C._profiler._RecordFunctionFast(
"AOTDispatcher Runtime Wrapper Prologue"
)
cm.__enter__()
return cm
return None
def record_runtime_wrapper_prologue_exit(
cm: Optional[AbstractContextManager[None]],
) -> None:
if cm is not None:
cm.__exit__(None, None, None)
@simple_wraps(compiled_fn)
def runtime_wrapper(args: list[Any]):
# Create context manager for profiler
cm = record_runtime_wrapper_prologue_enter()
# stash a ref to each input tensor we plan to use after the compiled function
orig_inputs = {i: args[i] for i in epilogue_args_idx}
if keep_input_mutations:
mutated_args = (
args[i]
for i in runtime_metadata.mutated_graph_handled_indices_seen_by_autograd
)
torch.autograd.graph.increment_version(mutated_args)
if trace_joint:
args_ = list(args)
# See Note [Detaching inputs that never need gradients]
for idx in indices_of_inps_to_detach:
if isinstance(args_[idx], torch.Tensor):
args_[idx] = args_[idx].detach()
# It's possible to have trace_joint inside user specified with no_grad() region,
# if there is a nested with enable_grad(), that forces some outputs to require gradients.
# Therefore, we unconditionally turn on enable_grad() for compiled_fn execution.
with (
torch.autograd._force_original_view_tracking(True),
torch.enable_grad(),
):
record_runtime_wrapper_prologue_exit(cm)
all_outs = call_func_at_runtime_with_args(
compiled_fn, args_, disable_amp=disable_amp, steal_args=True
)
else:
# When we have an inference graph, we run with grad disabled.
# It's possible to get an inference graph with inputs that require grad,
# in which case we want to make sure autograd is disabled
# (since e.g., inductor will generate aten.addmm.out calls which autograd will complain on)
# NOTE: We use _set_grad_enabled directly to reduce runtime overhead
grad_enabled = torch.is_grad_enabled()
try:
if grad_enabled:
torch._C._set_grad_enabled(False)
record_runtime_wrapper_prologue_exit(cm)
all_outs = call_func_at_runtime_with_args(
compiled_fn, args, disable_amp=disable_amp, steal_args=True
)
finally:
if grad_enabled:
torch._C._set_grad_enabled(True)
del args
num_mutated_runtime_inps = runtime_metadata.num_mutated_inp_runtime_indices
num_intermediate_bases = runtime_metadata.num_intermediate_bases
assert (
len(all_outs)
== num_mutated_runtime_inps
+ runtime_metadata.num_outputs
+ num_intermediate_bases
)
# Step 3: After running the compiled fw, apply updates to mutated inputs
num_mutations_to_apply = runtime_metadata.num_mutated_inp_runtime_indices
if num_mutations_to_apply > 0:
updated_inputs = all_outs[:num_mutations_to_apply]
fw_outs = all_outs[num_mutations_to_apply:]
for i, inpt_idx in enumerate(runtime_metadata.mutated_inp_runtime_indices):
meta = runtime_metadata.input_info[inpt_idx]
if not meta.mutates_data and not meta.mutates_metadata:
continue
original_inpt = orig_inputs[inpt_idx]
updated_inpt = updated_inputs[i]
if meta.mutates_storage_metadata:
# See Note [set_() Input Mutations in AOTAutograd]
# mutates_storage_metadata means our input saw a x.set_(y) call.
# What if x **also** saw a data and/or a metadata mutation?
# (1) If the [meta]data mutation occurred after the set_(),
# then there is no need to copy_() the data.
# When we perform x.set_(x_updated), we are guaranteed that
# x_updated already has the final version of the data/metadata
# (2) If a data mutation occurred before the set_().
# This case seems very difficult to support.
# TODO: discuss on the PR and decide if we want to tr to
# either support it, or detect and ban it.
if trace_joint:
assert isinstance(updated_inpt, TensorAlias)
updated_inpt = updated_inpt.alias
with torch.no_grad():
original_inpt.set_(updated_inpt)
continue
if meta.mutates_metadata and not meta.mutates_data:
if trace_joint:
assert isinstance(updated_inpt, TensorAlias)
updated_inpt = updated_inpt.alias
# We need to grab the size/stride/storage_offset from the compiled forward,
# and use that to mutate the metadata of the input
original_inpt.as_strided_(
updated_inpt.size(),
updated_inpt.stride(),
updated_inpt.storage_offset(),
)
else:
if meta.mutates_data and meta.mutates_metadata:
original_inpt.as_strided_(
updated_inpt.size(),
updated_inpt.stride(),
updated_inpt.storage_offset(),
)
else:
assert meta.mutates_data
if meta.is_leaf and original_inpt.requires_grad:
# We can hit this situation in this case:
# def f(x):
# x.detach().mul_(2)
# return x + 1
# AOTAutograd will see a mutation in the above case, and try to
# apply a copy_() here, in the epilogue.
# But if x required gradients, and is a leaf, then autograd
# will yell at us for trying to mutate it.
# However, it's only possible to end up in this scenario (like the above)
# if all of the mutations to the leaf input were non-autograd-tracking mutations
# (aka mutations under no_grad(), or on detached views).
# In that case, we fully want to hide the mutation from autograd, so detaching is ok.
original_inpt.detach().copy_(updated_inpt)
else:
original_inpt.copy_(updated_inpt)
else:
fw_outs = all_outs
# Step 4: Manually regenerate any outputs that are aliased to inputs, instead of
# compiling them.
if runtime_metadata.num_outputs_aliased > 0:
# The compiled forward also returned intermediate bases. We don't want to return them to the user.
expect_num_outputs = (
len(output_handlers) + runtime_metadata.num_intermediate_bases
)
assert len(fw_outs) == expect_num_outputs
ret_outs = [
handler(orig_inputs, fw_outs, out)
for out, handler in builtins.zip(fw_outs, output_handlers)
]
else:
ret_outs = fw_outs
if runtime_metadata.dynamic_outputs:
for t, o in zip(ret_outs, runtime_metadata.output_info):
if o.dynamic_dims is None:
continue
maybe_mark_dynamic_helper(t, o.dynamic_dims)
if runtime_metadata.grad_enabled_mutation is not None:
torch._C._set_grad_enabled(runtime_metadata.grad_enabled_mutation)
return ret_outs
if not (trace_joint and _should_disable_saved_tensors_hooks()):
return runtime_wrapper
# Disabling saved tensors hooks
@simple_wraps(runtime_wrapper)
def _runtime_wrapper(*args, **kwargs):
with _disable_saved_tensors_hooks():
return runtime_wrapper(*args, **kwargs)
return _runtime_wrapper
# WARNING: this does NOT operate on TraceFn
@dataclass
| AliasOfIntermediateHandler |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_django/DJ012.py | {
"start": 1409,
"end": 1717
} | class ____(models.Model):
"""Model with an assignment to a constant after `__str__`."""
first_name = models.CharField(max_length=32)
class Meta:
verbose_name = "test"
verbose_name_plural = "tests"
def __str__(self):
pass
MY_CONSTANT = id(1)
| ConstantsAreNotFields |
python | pandas-dev__pandas | pandas/errors/__init__.py | {
"start": 28745,
"end": 29849
} | class ____(Warning):
"""
Warning raised when index attributes conflict when using HDFStore.
Occurs when attempting to append an index with a different
name than the existing index on an HDFStore or attempting to append an index with a
different frequency than the existing index on an HDFStore.
See Also
--------
HDFStore : Dict-like IO interface for storing pandas objects in PyTables.
DataFrame.to_hdf : Write the contained data to an HDF5 file using HDFStore.
read_hdf : Read from an HDF5 file into a DataFrame.
Examples
--------
>>> idx1 = pd.Index(["a", "b"], name="name1")
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], index=idx1)
>>> df1.to_hdf("file", "data", "w", append=True) # doctest: +SKIP
>>> idx2 = pd.Index(["c", "d"], name="name2")
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], index=idx2)
>>> df2.to_hdf("file", "data", "a", append=True) # doctest: +SKIP
AttributeConflictWarning: the [index_name] attribute of the existing index is
[name1] which conflicts with the new [name2]...
"""
| AttributeConflictWarning |
python | keras-team__keras | keras/src/layers/core/masking_test.py | {
"start": 201,
"end": 2468
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_masking_basics(self):
self.run_layer_test(
layers.Masking,
init_kwargs={"mask_value": 0.0},
input_shape=(2, 3, 2),
expected_output_shape=(2, 3, 2),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
@pytest.mark.requires_trainable_backend
def test_masking_correctness(self):
x = np.array(
[
[[0.0, 0.0], [1.0, 2.0], [0.0, 0.0]],
[[2.0, 2.0], [0.0, 0.0], [2.0, 1.0]],
]
)
expected_mask = [[False, True, False], [True, False, True]]
layer = layers.Masking(mask_value=0.0)
self.assertAllClose(layer.compute_mask(x), expected_mask)
test_obj = self
class TestLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, inputs, mask=None):
assert mask is not None
test_obj.assertAllClose(mask, expected_mask)
return inputs
model = models.Sequential(
[
layers.Masking(mask_value=0.0),
TestLayer(),
]
)
model(x)
@pytest.mark.requires_trainable_backend
def test_masking_with_tensor(self):
model = models.Sequential(
[
layers.Masking(mask_value=ops.convert_to_tensor([0.0])),
layers.LSTM(1),
]
)
x = np.array(
[
[[0.0, 0.0], [1.0, 2.0], [0.0, 0.0]],
[[2.0, 2.0], [0.0, 0.0], [2.0, 1.0]],
]
)
model(x)
temp_filepath = os.path.join(self.get_temp_dir(), "model.keras")
model.save(temp_filepath)
reload_model = load_model(temp_filepath)
reload_model(x)
| MaskingTest |
python | tensorflow__tensorflow | tensorflow/python/ops/math_ops_test.py | {
"start": 44060,
"end": 45559
} | class ____(test_util.TensorFlowTestCase):
def testXdivyNoZero(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant([[0.1, 0.2, 3.5], [-2., -5., 30.]], dtype=dtype)
y = constant_op.constant([[0.1, 0.2, 3.5], [3.1, 4., 2.]], dtype=dtype)
with test_util.use_gpu():
xdivy = self.evaluate(math_ops.xdivy(x, y))
x_over_y = self.evaluate(x / y)
self.assertAllClose(xdivy, x_over_y)
def testXdivyWithZero(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(np.zeros((2, 3)), dtype=dtype)
y = constant_op.constant([[0.1, 0.2, 3.5], [0., 1., 2.]], dtype=dtype)
with test_util.use_gpu():
xdivy_tf_np = self.evaluate(math_ops.xdivy(x, y))
zeros_np = self.evaluate(array_ops.zeros_like(y))
self.assertAllClose(xdivy_tf_np, zeros_np)
def testXdivyWithZeroBroadcast(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant([[0.], [1.]], dtype=dtype)
y = constant_op.constant([[0.1, 0.2, 3.5], [0., 1., 2.]], dtype=dtype)
with test_util.use_gpu():
xdivy_tf_np = self.evaluate(math_ops.xdivy(x, y))
zeros_np = self.evaluate(array_ops.zeros_like(y[0]))
x_over_y = self.evaluate(1 / y[1])
self.assertAllClose(zeros_np, xdivy_tf_np[0])
self.assertAllClose(x_over_y, xdivy_tf_np[1])
@test_util.run_all_in_graph_and_eager_modes
| XdivyTest |
python | astropy__astropy | astropy/table/column.py | {
"start": 18720,
"end": 41330
} | class ____(_ColumnGetitemShim, np.ndarray):
meta = MetaData(default_factory=dict)
def __new__(
cls,
data=None,
name=None,
dtype=None,
shape=(),
length=0,
description=None,
unit=None,
format=None,
meta=None,
copy=COPY_IF_NEEDED,
copy_indices=True,
):
if data is None:
self_data = np.zeros((length,) + shape, dtype=dtype)
elif isinstance(data, BaseColumn) and hasattr(data, "_name"):
# When unpickling a MaskedColumn, ``data`` will be a bare
# BaseColumn with none of the expected attributes. In this case
# do NOT execute this block which initializes from ``data``
# attributes.
self_data = np.array(data.data, dtype=dtype, copy=copy)
if description is None:
description = data.description
if unit is None:
unit = unit or data.unit
if format is None:
format = data.format
if meta is None:
meta = data.meta
if name is None:
name = data.name
elif isinstance(data, Quantity):
if unit is None:
self_data = np.array(data, dtype=dtype, copy=copy)
unit = data.unit
else:
self_data = Quantity(data, unit, dtype=dtype, copy=copy).value
# If 'info' has been defined, copy basic properties (if needed).
if "info" in data.__dict__:
if description is None:
description = data.info.description
if format is None:
format = data.info.format
if meta is None:
meta = data.info.meta
if name is None:
name = data.info.name
else:
if np.dtype(dtype).char == "S":
data = cls._encode_str(data)
self_data = np.array(data, dtype=dtype, copy=copy)
self = self_data.view(cls)
self._name = None if name is None else str(name)
self._parent_table = None
self.unit = unit
self._format = format
self.description = description
self.meta = meta
self.indices = deepcopy(getattr(data, "indices", [])) if copy_indices else []
for index in self.indices:
index.replace_col(data, self)
return self
@property
def data(self):
return self.view(np.ndarray)
@property
def value(self):
"""
An alias for the existing ``data`` attribute.
"""
return self.data
@property
def parent_table(self):
# Note: It seems there are some cases where _parent_table is not set,
# such after restoring from a pickled Column. Perhaps that should be
# fixed, but this is also okay for now.
if getattr(self, "_parent_table", None) is None:
return None
else:
return self._parent_table()
@parent_table.setter
def parent_table(self, table):
if table is None:
self._parent_table = None
else:
self._parent_table = weakref.ref(table)
info = ColumnInfo()
def copy(self, order="C", data=None, copy_data=True):
"""
Return a copy of the current instance.
If ``data`` is supplied then a view (reference) of ``data`` is used,
and ``copy_data`` is ignored.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.) Default is 'C'.
data : array, optional
If supplied then use a view of ``data`` instead of the instance
data. This allows copying the instance attributes and meta.
copy_data : bool, optional
Make a copy of the internal numpy array instead of using a
reference. Default is True.
Returns
-------
col : Column or MaskedColumn
Copy of the current column (same type as original)
"""
if data is None:
data = self.data
if copy_data:
data = data.copy(order)
out = data.view(self.__class__)
out.__array_finalize__(self)
# If there is meta on the original column then deepcopy (since "copy" of column
# implies complete independence from original). __array_finalize__ will have already
# made a light copy. I'm not sure how to avoid that initial light copy.
if self.meta is not None:
out.meta = self.meta # MetaData descriptor does a deepcopy here
# for MaskedColumn, MaskedArray.__array_finalize__ also copies mask
# from self, which is not the idea here, so undo
if isinstance(self, MaskedColumn):
out._mask = data._mask
self._copy_groups(out)
return out
def __setstate__(self, state):
"""
Restore the internal state of the Column/MaskedColumn for pickling
purposes. This requires that the last element of ``state`` is a
5-tuple that has Column-specific state values.
"""
# Get the Column attributes
names = ("_name", "_unit", "_format", "description", "meta", "indices")
attrs = dict(zip(names, state[-1]))
state = state[:-1]
# Using super().__setstate__(state) gives
# "TypeError 'int' object is not iterable", raised in
# astropy.table._column_mixins._ColumnGetitemShim.__setstate_cython__()
# Previously, it seems to have given an infinite recursion.
# Hence, manually call the right super class to actually set up
# the array object.
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
super_class.__setstate__(self, state)
# Set the Column attributes
for name, val in attrs.items():
setattr(self, name, val)
self._parent_table = None
def __reduce__(self):
"""
Return a 3-tuple for pickling a Column. Use the super-class
functionality but then add in a 5-tuple of Column-specific values
that get used in __setstate__.
"""
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
reconstruct_func, reconstruct_func_args, state = super_class.__reduce__(self)
# Define Column-specific attrs and meta that gets added to state.
column_state = (
self.name,
self.unit,
self.format,
self.description,
self.meta,
self.indices,
)
state = state + (column_state,)
return reconstruct_func, reconstruct_func_args, state
def __array_finalize__(self, obj):
# Obj will be none for direct call to Column() creator
if obj is None:
return
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
self.parent_table = None
if not hasattr(self, "indices"): # may have been copied in __new__
self.indices = []
self._copy_attrs(obj)
if "info" in getattr(obj, "__dict__", {}):
self.info = obj.info
def __array_wrap__(self, out_arr, context=None, return_scalar=False):
"""
__array_wrap__ is called at the end of every ufunc.
Normally, we want a Column object back and do not have to do anything
special. But there are two exceptions:
1) If the output shape is different (e.g. for reduction ufuncs
like sum() or mean()), a Column still linking to a parent_table
makes little sense, so we return the output viewed as the
column content (ndarray or MaskedArray).
For this case, if numpy tells us to ``return_scalar`` (for numpy
>= 2.0, otherwise assume to be true), we use "[()]" to ensure we
convert a zero rank array to a scalar. (For some reason np.sum()
returns a zero rank scalar array while np.mean() returns a scalar;
So the [()] is needed for this case.)
2) When the output is created by any function that returns a boolean
we also want to consistently return an array rather than a column
(see #1446 and #1685)
"""
if NUMPY_LT_2_0:
out_arr = super().__array_wrap__(out_arr, context)
return_scalar = True
else:
out_arr = super().__array_wrap__(out_arr, context, return_scalar)
if self.shape != out_arr.shape or (
isinstance(out_arr, BaseColumn)
and (context is not None and context[0] in _comparison_functions)
):
return out_arr.data[()] if return_scalar else out_arr.data
else:
return out_arr
@property
def name(self):
"""
The name of this column.
"""
return self._name
@name.setter
def name(self, val: str | None):
if isinstance(val, str):
val = str(val)
elif val is not None:
raise TypeError(
f"Expected a str value, got {val} with type {type(val).__name__}"
)
if self.parent_table is not None:
table = self.parent_table
table.columns._rename_column(self.name, val)
self._name = val
@property
def format(self):
"""
Format string for displaying values in this column.
"""
return self._format
@format.setter
def format(self, format_string):
prev_format = getattr(self, "_format", None)
self._format = format_string # set new format string
try:
# test whether it formats without error exemplarily
self.pformat(max_lines=1)
except Exception as err:
# revert to restore previous format if there was one
self._format = prev_format
raise ValueError(
f"Invalid format for column '{self.name}': could not display "
"values in this column using this format"
) from err
@property
def descr(self):
"""Array-interface compliant full description of the column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
return (self.name, self.dtype.str, self.shape[1:])
def iter_str_vals(self):
"""
Return an iterator that yields the string-formatted values of this
column.
Returns
-------
str_vals : iterator
Column values formatted as strings
"""
# Iterate over formatted values with no max number of lines, no column
# name, no unit, and ignoring the returned header info in outs.
_pformat_col_iter = self._formatter._pformat_col_iter
yield from _pformat_col_iter(
self, -1, show_name=False, show_unit=False, show_dtype=False, outs={}
)
def attrs_equal(self, col):
"""Compare the column attributes of ``col`` to this object.
The comparison attributes are: ``name``, ``unit``, ``dtype``,
``format``, ``description``, and ``meta``.
Parameters
----------
col : Column
Comparison column
Returns
-------
equal : bool
True if all attributes are equal
"""
if not isinstance(col, BaseColumn):
raise ValueError("Comparison `col` must be a Column or MaskedColumn object")
attrs = ("name", "unit", "dtype", "format", "description", "meta")
equal = all(getattr(self, x) == getattr(col, x) for x in attrs)
return equal
@property
def _formatter(self):
return FORMATTER if (self.parent_table is None) else self.parent_table.formatter
def pformat(
self,
max_lines=-1,
show_name=True,
show_unit=False,
show_dtype=False,
html=False,
):
"""Return a list of formatted string representation of column values.
If ``max_lines=None`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied (default).
Parameters
----------
max_lines : int or None
Maximum lines of output (header + data rows).
-1 (default) implies no limit, ``None`` implies using the
height of the current terminal.
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is False.
html : bool
Format the output as an HTML table. Default is False.
Returns
-------
lines : list
List of lines with header and formatted column values
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(
self,
max_lines,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
html=html,
)
return lines
def pprint(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False):
"""Print a formatted string representation of column values.
If ``max_lines=None`` (default) then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum number of values in output
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is True.
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(
self,
max_lines,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
)
n_header = outs["n_header"]
for i, line in enumerate(lines):
if i < n_header:
color_print(line, "red")
else:
print(line)
def more(self, max_lines=None, show_name=True, show_unit=False):
"""Interactively browse column with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
"""
_more_tabcol = self._formatter._more_tabcol
_more_tabcol(
self, max_lines=max_lines, show_name=show_name, show_unit=show_unit
)
@property
def unit(self):
"""
The unit associated with this column. May be a string or a
`astropy.units.UnitBase` instance.
Setting the ``unit`` property does not change the values of the
data. To perform a unit conversion, use ``convert_unit_to``.
"""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
else:
self._unit = Unit(unit, parse_strict="silent")
@unit.deleter
def unit(self):
self._unit = None
def searchsorted(self, v, side="left", sorter=None):
# For bytes type data, encode the `v` value as UTF-8 (if necessary) before
# calling searchsorted. This prevents a factor of 1000 slowdown in
# searchsorted in this case.
a = self.data
if a.dtype.kind == "S" and not isinstance(v, bytes):
v = np.asarray(v)
if v.dtype.kind == "U":
v = np.char.encode(v, "utf-8")
return np.searchsorted(a, v, side=side, sorter=sorter)
searchsorted.__doc__ = np.ndarray.searchsorted.__doc__
def convert_unit_to(self, new_unit, equivalencies=[]):
"""
Converts the values of the column in-place from the current
unit to the given unit.
To change the unit associated with this column without
actually changing the data values, simply set the ``unit``
property.
Parameters
----------
new_unit : str or `astropy.units.UnitBase` instance
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the unit are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
Raises
------
astropy.units.UnitsError
If units are inconsistent
"""
if self.unit is None:
raise ValueError("No unit set on column")
self.data[:] = self.unit.to(new_unit, self.data, equivalencies=equivalencies)
self.unit = new_unit
@property
def groups(self):
if not hasattr(self, "_groups"):
self._groups = groups.ColumnGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this column by the specified ``keys``.
This effectively splits the column into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`Column` or `MaskedColumn` which contains a copy of this column but
sorted by row according to ``keys``.
The ``keys`` input to ``group_by`` must be a numpy array with the
same length as this column.
Parameters
----------
keys : numpy array
Key grouping object
Returns
-------
out : Column
New column with groups attribute set accordingly
"""
return groups.column_group_by(self, keys)
def _copy_groups(self, out):
"""
Copy current groups into a copy of self ``out``.
"""
if self.parent_table:
if hasattr(self.parent_table, "_groups"):
out._groups = groups.ColumnGroups(
out, indices=self.parent_table._groups._indices
)
elif hasattr(self, "_groups"):
out._groups = groups.ColumnGroups(out, indices=self._groups._indices)
# Strip off the BaseColumn-ness for repr and str so that
# MaskedColumn.data __repr__ does not include masked_BaseColumn(data =
# [1 2], ...).
def __repr__(self):
return np.asarray(self).__repr__()
@property
def quantity(self):
"""
A view of this table column as a `~astropy.units.Quantity` object with
units given by the Column's `unit` parameter.
"""
# the Quantity initializer is used here because it correctly fails
# if the column's values are non-numeric (like strings), while .view
# will happily return a quantity with gibberish for numerical values
return Quantity(
self, self.unit, copy=False, dtype=self.dtype, order="A", subok=True
)
def to(self, unit, equivalencies=[], **kwargs):
"""
Converts this table column to a `~astropy.units.Quantity` object with
the requested units.
Parameters
----------
unit : unit-like
The unit to convert to (i.e., a valid argument to the
:meth:`astropy.units.Quantity.to` method).
equivalencies : list of tuple
Equivalencies to use for this conversion. See
:meth:`astropy.units.Quantity.to` for more details.
Returns
-------
quantity : `~astropy.units.Quantity`
A quantity object with the contents of this column in the units
``unit``.
"""
return self.quantity.to(unit, equivalencies)
def _copy_attrs(self, obj):
"""
Copy key column attributes from ``obj`` to self.
"""
for attr in ("name", "unit", "_format", "description"):
val = getattr(obj, attr, None)
setattr(self, attr, val)
# Light copy of meta if it is not empty
obj_meta = getattr(obj, "meta", None)
if obj_meta:
self.meta = obj_meta.copy()
@staticmethod
def _encode_str(value):
"""
Encode anything that is unicode-ish as utf-8. This method is only
called for Py3+.
"""
if isinstance(value, str):
value = value.encode("utf-8")
elif isinstance(value, bytes) or value is np.ma.masked:
pass
else:
arr = np.asarray(value)
if arr.dtype.char == "U":
arr = np.char.encode(arr, encoding="utf-8")
if isinstance(value, np.ma.MaskedArray):
arr = np.ma.array(arr, mask=value.mask, copy=False)
value = arr
return value
def tolist(self):
if self.dtype.kind == "S":
return np.char.chararray.decode(self, encoding="utf-8").tolist()
else:
return super().tolist()
| BaseColumn |
python | pytorch__pytorch | torch/testing/_internal/common_utils.py | {
"start": 119246,
"end": 119837
} | class ____(unittest.case._AssertRaisesContext):
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None and issubclass(exc_type, NotImplementedError):
self.test_case.skipTest(f"not_implemented: {exc_value}") # type: ignore[attr-defined]
return super().__exit__(exc_type, exc_value, tb)
@contextmanager
def set_warn_always_context(new_val: bool):
old_val = torch.is_warn_always_enabled()
torch.set_warn_always(new_val)
try:
yield
finally:
torch.set_warn_always(old_val)
| AssertRaisesContextIgnoreNotImplementedError |
python | doocs__leetcode | solution/2400-2499/2402.Meeting Rooms III/Solution.py | {
"start": 0,
"end": 699
} | class ____:
def mostBooked(self, n: int, meetings: List[List[int]]) -> int:
meetings.sort()
busy = []
idle = list(range(n))
heapify(idle)
cnt = [0] * n
for s, e in meetings:
while busy and busy[0][0] <= s:
heappush(idle, heappop(busy)[1])
if idle:
i = heappop(idle)
cnt[i] += 1
heappush(busy, (e, i))
else:
a, i = heappop(busy)
cnt[i] += 1
heappush(busy, (a + e - s, i))
ans = 0
for i, v in enumerate(cnt):
if cnt[ans] < v:
ans = i
return ans
| Solution |
python | getsentry__sentry | src/sentry/notifications/notifications/activity/release.py | {
"start": 1054,
"end": 7834
} | class ____(ActivityNotification):
metrics_key = "release_activity"
notification_setting_type_enum = NotificationSettingEnum.DEPLOY
template_path = "sentry/emails/activity/release"
def __init__(self, activity: Activity) -> None:
super().__init__(activity)
self.user_id_team_lookup: Mapping[int, list[int]] | None = None
self.deploy = get_deploy(activity)
self.release = get_release(activity, self.organization)
if not self.release:
self.email_list: set[str] = set()
self.repos: Iterable[Mapping[str, Any]] = set()
self.projects: set[Project] = set()
self.version = "unknown"
self.version_parsed = self.version
self.user_ids = set()
return
self.projects = set(self.release.projects.all())
self.commit_list = list(Commit.objects.get_for_release(self.release))
self.email_list = {c.author.email for c in self.commit_list if c.author}
users = user_service.get_many_by_email(
emails=list(self.email_list),
organization_id=self.organization.id,
is_verified=True,
)
self.user_ids = {u.id for u in users}
self.repos = get_repos(self.commit_list, {u.email: u for u in users}, self.organization)
self.environment = get_environment_for_deploy(self.deploy)
self.group_counts_by_project = get_group_counts_by_project(self.release, self.projects)
self.version = self.release.version
self.version_parsed = parse_release(self.version, json_loads=orjson.loads)["description"]
def get_participants_with_group_subscription_reason(self) -> ParticipantMap:
return get_participants_for_release(self.projects, self.organization, self.user_ids)
def get_users_by_teams(self) -> Mapping[int, list[int]]:
if not self.user_id_team_lookup:
lookup = OrganizationMember.objects.get_teams_by_user(self.organization)
self.user_id_team_lookup = lookup
return self.user_id_team_lookup
def get_context(self) -> MutableMapping[str, Any]:
return {
**self.get_base_context(),
"author_count": len(self.email_list),
"commit_count": len(self.commit_list),
"deploy": self.deploy,
"environment": self.environment,
"file_count": CommitFileChange.objects.get_count_for_commits(self.commit_list),
"release": self.release,
"repos": self.repos,
"setup_repo_link": self.organization.absolute_url(
f"/organizations/{self.organization.slug}/repos/",
query=urlencode(
{"referrer": self.metrics_key, "notification_uuid": self.notification_uuid}
),
),
"text_description": f"Version {self.version_parsed} was deployed to {self.environment}",
"version_parsed": self.version_parsed,
}
def get_projects(self, recipient: Actor) -> set[Project]:
if not self.release:
return set()
if recipient.is_user:
if self.organization.flags.allow_joinleave:
return self.projects
team_ids = self.get_users_by_teams()[recipient.id]
else:
team_ids = [recipient.id]
projects = Project.objects.get_for_team_ids(team_ids).filter(
id__in={p.id for p in self.projects}
)
return set(projects)
def get_recipient_context(
self, recipient: Actor, extra_context: Mapping[str, Any]
) -> MutableMapping[str, Any]:
projects = self.get_projects(recipient)
release_links = [
self.organization.absolute_url(
f"/organizations/{self.organization.slug}/releases/{self.version}/?project={p.id}",
query=urlencode(
{"referrer": self.metrics_key, "notification_uuid": self.notification_uuid}
),
)
for p in projects
]
resolved_issue_counts = [self.group_counts_by_project.get(p.id, 0) for p in projects]
return {
**super().get_recipient_context(recipient, extra_context),
"projects": list(zip(projects, release_links, resolved_issue_counts)),
"project_count": len(projects),
}
def get_subject(self, context: Mapping[str, Any] | None = None) -> str:
return f"Deployed version {self.version_parsed} to {self.environment}"
@property
def title(self) -> str:
return self.get_subject()
def get_notification_title(
self, provider: ExternalProviders, context: Mapping[str, Any] | None = None
) -> str:
projects_text = ""
if len(self.projects) == 1:
projects_text = " for this project"
elif len(self.projects) > 1:
projects_text = " for these projects"
return f"Release {self.version_parsed} was deployed to {self.environment}{projects_text}"
def get_message_actions(
self, recipient: Actor, provider: ExternalProviders
) -> Sequence[MessageAction]:
if self.release:
release = get_release(self.activity, self.project.organization)
if release:
return [
MessageAction(
name=project.slug,
label=project.slug,
url=self.organization.absolute_url(
f"/organizations/{project.organization.slug}/releases/{release.version}/",
query=f"project={project.id}&unselectedSeries=Healthy&referrer={self.metrics_key}¬ification_uuid={self.notification_uuid}",
),
)
for project in self.release.projects.all()
]
return []
def build_attachment_title(self, recipient: Actor) -> str:
return ""
def get_title_link(self, recipient: Actor, provider: ExternalProviders) -> str | None:
return None
def build_notification_footer(self, recipient: Actor, provider: ExternalProviders) -> str:
settings_url = self.get_settings_url(recipient, provider)
# no environment related to a deploy
footer = ""
if self.release:
footer += f"{self.release.projects.all()[0].slug} | "
footer += (
f"{self.format_url(text='Notification Settings', url=settings_url, provider=provider)}"
)
return footer
def send(self) -> None:
# Don't create a message when the Activity doesn't have a release and deploy.
if bool(self.release and self.deploy):
return super().send()
| ReleaseActivityNotification |
python | facelessuser__soupsieve | tests/test_level1/test_pseudo_element.py | {
"start": 63,
"end": 320
} | class ____(util.TestCase):
"""Test pseudo-elements."""
def test_pseudo_element(self):
"""Test that pseudo elements always fail because they are not supported."""
self.assert_raises('::first-line', NotImplementedError)
| TestPseudoElement |
python | django__django | django/contrib/staticfiles/testing.py | {
"start": 112,
"end": 463
} | class ____(LiveServerTestCase):
"""
Extend django.test.LiveServerTestCase to transparently overlay at test
execution-time the assets provided by the staticfiles app finders. This
means you don't need to run collectstatic before or as a part of your tests
setup.
"""
static_handler = StaticFilesHandler
| StaticLiveServerTestCase |
python | getsentry__sentry | src/sentry/api/endpoints/release_thresholds/release_threshold_index.py | {
"start": 667,
"end": 779
} | class ____(TypedDict, total=False):
environment: list[str]
project: list[int]
| ReleaseThresholdIndexGETData |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/static_analysis/reaching_definitions_py3_test.py | {
"start": 895,
"end": 2576
} | class ____(
reaching_definitions_test.ReachingDefinitionsAnalyzerTestBase):
"""Tests which can only run in Python 3."""
def test_nonlocal(self):
a = 3
b = 13
def test_fn():
nonlocal a
nonlocal b
if a:
b = []
return a, b
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[2].test, 1)
self.assertHasDefs(fn_body[2].body[0].targets[0], 1)
self.assertHasDefs(fn_body[3].value.elts[0], 1)
self.assertHasDefs(fn_body[3].value.elts[1], 2)
self.assertSameDef(fn_body[2].test, fn_body[3].value.elts[0])
self.assertHasDefinedIn(fn_body[2], ('a', 'b'))
def test_nonlocal_in_nested_function(self):
a = 3
b = 13
def test_fn():
a = 3
b = 13
def local_fn():
nonlocal a, b
if a:
b = []
return a, b
return local_fn()
node = self._parse_and_analyze(test_fn)
local_body = node.body[2].body
self.assertHasDefs(local_body[1].test, 1)
self.assertHasDefs(local_body[1].body[0].targets[0], 1)
self.assertHasDefs(local_body[2].value.elts[0], 1)
self.assertHasDefs(local_body[2].value.elts[1], 2)
self.assertSameDef(local_body[1].test, local_body[2].value.elts[0])
# Note: the function name is visible inside the function body. But it's
# a closure variable, not a local.
#
# Example:
#
# >>> def f():
# ... print(f)
# >>> g = f
# >>> f = 'something else'
# >>> g()
# something else
#
self.assertHasDefinedIn(local_body[1], ('a', 'b'))
if __name__ == '__main__':
test.main()
| ReachingDefinitionsAnalyzerTest |
python | pypa__warehouse | warehouse/db.py | {
"start": 2318,
"end": 2920
} | class ____(DeclarativeBase):
"""Base class for models using declarative syntax."""
metadata = metadata
type_annotation_map = {
# All of our enums prefer the `.value` for database persistence
# instead of `.name`, which is the default.
enum.Enum: sqlalchemy.Enum(
enum.Enum, values_callable=lambda x: [e.value for e in x]
),
}
def __repr__(self):
inst = inspect(self)
self.__repr__ = make_repr(
*[c_attr.key for c_attr in inst.mapper.column_attrs], _self=self
)
return self.__repr__()
| ModelBase |
python | huggingface__transformers | src/transformers/models/granitemoeshared/modular_granitemoeshared.py | {
"start": 5058,
"end": 5353
} | class ____(GraniteMoeModel):
def __init__(self, config: GraniteMoeSharedConfig):
super().__init__(config)
self.layers = nn.ModuleList(
[GraniteMoeSharedDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
| GraniteMoeSharedModel |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 36666,
"end": 36880
} | class ____(PrefectBaseModel, extra="allow"):
type: str = Field(default=..., description="The type of version info.")
version: str = Field(default=..., description="The version of the deployment.")
| VersionInfo |
python | pytest-dev__pytest | testing/test_argcomplete.py | {
"start": 2239,
"end": 3241
} | class ____:
@pytest.mark.skipif("sys.platform in ('win32', 'darwin')")
def test_compare_with_compgen(
self, tmp_path: Path, monkeypatch: MonkeyPatch
) -> None:
from _pytest._argcomplete import FastFilesCompleter
ffc = FastFilesCompleter()
fc = FilesCompleter()
monkeypatch.chdir(tmp_path)
assert equal_with_bash("", ffc, fc, out=sys.stdout)
tmp_path.cwd().joinpath("data").touch()
for x in ["d", "data", "doesnotexist", ""]:
assert equal_with_bash(x, ffc, fc, out=sys.stdout)
@pytest.mark.skipif("sys.platform in ('win32', 'darwin')")
def test_remove_dir_prefix(self):
"""This is not compatible with compgen but it is with bash itself: ls /usr/<TAB>."""
from _pytest._argcomplete import FastFilesCompleter
ffc = FastFilesCompleter()
fc = FilesCompleter()
for x in "/usr/".split():
assert not equal_with_bash(x, ffc, fc, out=sys.stdout)
| TestArgComplete |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/colliding_class_names.py | {
"start": 216,
"end": 272
} | class ____:
def foo():
return _test_source()
| C |
python | django__django | tests/generic_relations_regress/models.py | {
"start": 636,
"end": 816
} | class ____(models.Model):
name = models.CharField(max_length=100)
links = GenericRelation(Link, related_query_name="places")
link_proxy = GenericRelation(LinkProxy)
| Place |
python | pypa__installer | src/installer/scripts.py | {
"start": 1272,
"end": 5551
} | class ____:
"""Describes a script based on an entry point declaration."""
name: str
"""Name of the script."""
module: str
"""Module path, to load the entry point from."""
attr: str
"""Final attribute access, for the entry point."""
section: "ScriptSection" = field(repr=False)
"""
Denotes the "entry point section" where this was specified. Valid values
are ``"gui"`` and ``"console"``.
"""
def _get_launcher_data(self, kind: "LauncherKind") -> Optional[bytes]:
if kind == "posix":
return None
key = (self.section, kind)
try:
name = _ALLOWED_LAUNCHERS[key]
except KeyError:
error = f"{key!r} not in {sorted(_ALLOWED_LAUNCHERS)!r}"
raise InvalidScript(error) from None
return (files(_scripts) / name).read_bytes()
def _get_alternate_executable(self, executable: str, kind: "LauncherKind") -> str:
"""Get an alternate executable for the launcher.
On Windows, when the script section is gui-script, pythonw.exe should be used.
"""
if self.section == "gui" and kind != "posix":
dn, fn = os.path.split(executable)
fn = fn.replace("python", "pythonw")
executable = os.path.join(dn, fn) # noqa: PTH118
return executable
def generate(self, executable: str, kind: "LauncherKind") -> tuple[str, bytes]:
"""Generate a launcher for this script.
:param executable: Path to the executable to invoke.
:param kind: Which launcher template should be used.
Valid values are ``"posix"``, ``"win-ia32"``, ``"win-amd64"`` and
``"win-arm"``.
:type kind: str
:raises InvalidScript: if no appropriate template is available.
:return: The name and contents of the launcher file.
"""
launcher = self._get_launcher_data(kind)
executable = self._get_alternate_executable(executable, kind)
shebang = self._build_shebang(executable, forlauncher=bool(launcher))
code = _SCRIPT_TEMPLATE.format(
module=self.module,
import_name=self.attr.split(".")[0],
func_path=self.attr,
).encode("utf-8")
if launcher is None:
return self.name, shebang + b"\n" + code
stream = io.BytesIO()
with zipfile.ZipFile(stream, "w") as zf:
zf.writestr("__main__.py", code)
name = f"{self.name}.exe"
data = launcher + shebang + b"\n" + stream.getvalue()
return name, data
@staticmethod
def _is_executable_simple(executable: bytes) -> bool:
if b" " in executable:
return False
shebang_length = len(executable) + 3 # Prefix #! and newline after.
# According to distlib, Darwin can handle up to 512 characters. But I want
# to avoid platform sniffing to make this as platform-agnostic as possible.
# The "complex" script isn't that bad anyway.
return shebang_length <= 127
def _build_shebang(self, executable: str, forlauncher: bool) -> bytes:
"""Build a shebang line.
The non-launcher cases are taken directly from distlib's implementation,
which tries its best to account for command length, spaces in path, etc.
https://bitbucket.org/pypa/distlib/src/58cd5c6/distlib/scripts.py#lines-124
"""
executable_bytes = executable.encode("utf-8")
if forlauncher: # The launcher can just use the command as-is.
return b"#!" + executable_bytes
if self._is_executable_simple(executable_bytes):
return b"#!" + executable_bytes
# Shebang support for an executable with a space in it is under-specified
# and platform-dependent, so we use a clever hack to generate a script to
# run in ``/bin/sh`` that should work on all reasonably modern platforms.
# Read the following message to understand how the hack works:
# https://github.com/pypa/installer/pull/4#issuecomment-623668717
quoted = shlex.quote(executable).encode("utf-8")
# I don't understand a lick what this is trying to do.
return b"#!/bin/sh\n'''exec' " + quoted + b' "$0" "$@"\n' + b"' '''"
| Script |
python | huggingface__transformers | src/transformers/models/videomae/modeling_videomae.py | {
"start": 1691,
"end": 2198
} | class ____(ModelOutput):
r"""
logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
Pixel reconstruction logits.
"""
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Class for VideoMAEForPreTraining's outputs, with potential hidden states and attentions.
"""
)
| VideoMAEDecoderOutput |
python | tensorflow__tensorflow | tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py | {
"start": 191371,
"end": 217000
} | class ____(test.TestCase):
def _compareCpu(self, x, y, np_func, tf_func, also_compare_variables=False):
np_ans = np_func(x, y)
with test_util.force_cpu():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = self.evaluate(out)
# Test that the op takes precedence over numpy operators.
np_left = self.evaluate(tf_func(x, iny))
np_right = self.evaluate(tf_func(inx, y))
if also_compare_variables:
var_x = variables.Variable(x)
var_y = variables.Variable(y)
self.evaluate(variables.global_variables_initializer())
print(type(x), type(y), type(var_x), type(var_y))
print(type(tf_func(x, var_y)), type(tf_func(var_x, y)))
np_var_left = self.evaluate(tf_func(x, var_y))
np_var_right = self.evaluate(tf_func(var_x, y))
if np_ans.dtype != np.object:
self.assertAllClose(np_ans, tf_cpu)
self.assertAllClose(np_ans, np_left)
self.assertAllClose(np_ans, np_right)
if also_compare_variables:
self.assertAllClose(np_ans, np_var_left)
self.assertAllClose(np_ans, np_var_right)
self.assertShapeEqual(np_ans, out)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _sigmoid(self, x):
return 1.0 / (1.0 + np.exp(-x))
def _log_sigmoid(self, x):
return np.log(self._sigmoid(x))
def _replace_domain_error_with_inf(self, fn):
def func(x):
try:
return fn(x)
except ValueError as e:
if "domain error" in str(e):
return np.inf * np.ones_like(x)
else:
raise e
return func
def _compareTanhGrad(self, x, y):
default = gen_math_ops.tanh_grad(x, y)
with test_util.device(use_gpu=False):
cpu = gen_math_ops.tanh_grad(x, y)
self.assertAllClose(cpu, default)
def testTanhGrad(self):
x = np.random.uniform(-2.0, 2.0, size=[4, 4]).astype(np.float32)
y = np.random.uniform(-2.0, 2.0, size=[4, 4]).astype(np.float32)
self._compareTanhGrad(x, y)
_GRAD_TOL = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-3,
dtypes.complex64: 1e-2,
dtypes.float64: 1e-5,
dtypes.complex128: 1e-4,
}
def _compareGradientX(
self, x, y, np_func, tf_func, numeric_gradient_type=None
):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x
)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, xs, outf, zs, x_init_value=xf, delta=1e-3
)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(
self, x, y, np_func, tf_func, numeric_gradient_type=None
):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y
)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, ys, outf, zs, x_init_value=yf
)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def compareUnaryGradient_CPU_GPU(self, inx, func, test_name):
with test_util.force_cpu():
with backprop.GradientTape() as t:
t.watch(inx)
y = func(inx)
cpu_gradient = t.gradient(y, inx)
print(test_name, " (CPU) = ", cpu_gradient)
with test_util.force_gpu():
with backprop.GradientTape() as t:
t.watch(inx)
y = func(inx)
gpu_gradient = t.gradient(y, inx)
print(test_name, " (GPU) = ", gpu_gradient)
tol = self._GRAD_TOL[dtypes.as_dtype(inx.dtype)]
self.assertAllClose(cpu_gradient, gpu_gradient, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with test_util.use_gpu():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
def _compareBoth(self, x, y, np_func, tf_func, also_compare_variables=False):
self._compareCpu(x, y, np_func, tf_func, also_compare_variables)
self._compareGpu(x, y, np_func, tf_func)
def _compare(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with test_util.use_gpu():
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
@test_util.run_deprecated_v1
def testGradGrad(self):
np.random.seed(7)
shape = (5,)
dtype_tols = [
(np.float32, 5e-4),
(np.float64, 1e-6),
(np.complex64, 5e-4),
(np.complex128, 1e-6),
]
op_range = [
(gen_math_ops.tanh_grad, [-2, 2]),
]
def rand(dtype, real_range):
x = np.random.uniform(real_range[0], real_range[1], size=shape[0]).astype(
dtype
)
return x
for op, real_range in op_range:
with self.cached_session():
for dtype, tol in dtype_tols:
x = constant_op.constant(rand(dtype, real_range))
y = constant_op.constant(rand(dtype, real_range))
z = op(x, y)
grads = gradient_checker.compute_gradient(
[x, y],
[shape, shape],
z,
shape,
x_init_value=[rand(dtype, real_range), rand(dtype, real_range)],
)
if isinstance(grads, tuple):
grads = [grads]
for analytical, numerical in grads:
self.assertAllClose(analytical, numerical, rtol=tol, atol=tol)
def testFloatCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape((1, 3, 2))
y = np.linspace(20, -10, 6).reshape((1, 3, 2))
for t in [np.float32, np.float16]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(xt, yt, np.less, math_ops.less)
self._compare(xt, yt, np.less_equal, math_ops.less_equal)
self._compare(xt, yt, np.greater, math_ops.greater)
self._compare(xt, yt, np.greater_equal, math_ops.greater_equal)
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
def testFloatBasic(self):
x = np.linspace(-5, 20, 30).reshape((1, 2, 3, 5)).astype(np.float32)
y = np.linspace(20, -5, 30).reshape((1, 2, 3, 5)).astype(np.float32)
self._compareBoth(x, y, np.add, math_ops.add, True)
self._compareBoth(x, y, np.subtract, math_ops.subtract, True)
self._compareBoth(x, y, np.multiply, math_ops.multiply, True)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
def testHalfBasic(self):
x = np.linspace(-5, 20, 30).reshape((1, 2, 3, 5)).astype(np.float16)
y = np.linspace(20, -5, 30).reshape((1, 2, 3, 5)).astype(np.float16)
self._compareBoth(x, y, np.add, math_ops.add, True)
self._compareBoth(x, y, np.subtract, math_ops.subtract, True)
self._compareBoth(x, y, np.multiply, math_ops.multiply, True)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
def testIntBasic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.mod, math_ops.mod)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
# _compareBoth tests on GPU only for floating point types, so test
# _MOD for int32 on GPU by calling _compareGpu
self._compareGpu(x, y, np.mod, _MOD)
def testZeroElementBinaryOp(self):
x = array_ops.ones([0, 3])
y = 4.0
self._compareBoth(x, y, np.add, math_ops.add, True)
self._compareBoth(x, y, np.subtract, math_ops.subtract, True)
self._compareBoth(x, y, np.multiply, math_ops.multiply, True)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
def testAssignMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign(2.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign(3.0, read_value=True)
self.assertEqual(3.0, self.evaluate(assign_with_read))
assign_without_read = v.assign(4.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testAssignIncompatibleShape(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
self.evaluate(v.initializer)
pattern = re.compile("shapes must be equal", re.IGNORECASE)
with self.assertRaisesRegex(Exception, pattern):
self.evaluate(v.assign_add(1))
def _compareUnaryCpu(
self, x, np_func, tf_func, grad_rtol=None, grad_atol=None
):
if grad_rtol is None:
grad_rtol = _default_tolerance(x.dtype)
if grad_atol is None:
grad_atol = _default_tolerance(x.dtype)
np_ans = np_func(x)
with self.cached_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
if x.dtype in (
np.float32,
np.float64,
dtypes.bfloat16.as_numpy_dtype,
):
y = 1.1 * tf_func(inx)
np_ans *= 1.1
else:
y = tf_func(inx)
tf_cpu = self.evaluate(y)
self.assertShapeEqual(np_ans, y)
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_cpu, rtol=1e-3, atol=1e-3)
elif x.dtype == dtypes.bfloat16.as_numpy_dtype:
self.assertAllClose(np_ans, tf_cpu, rtol=1e-2, atol=1e-2)
else:
self.assertAllClose(np_ans, tf_cpu)
if x.dtype in (np.complex64, np.complex128) and tf_func == math_ops.sign:
return # Return early
if x.dtype == np.float16:
s = list(np.shape(x))
jacob_t, _ = gradient_checker.compute_gradient(
inx, s, y, s, x_init_value=x
)
xf = x.astype(np.float)
inxf = ops.convert_to_tensor(xf)
yf = tf_func(inxf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, s, yf, s, x_init_value=xf, delta=1e-2
)
jacob_n = jacob_n.astype(np.float16)
self.assertAllClose(jacob_t, jacob_n, rtol=grad_rtol, atol=grad_atol)
elif x.dtype in (np.float32, np.complex64):
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, y, s, x_init_value=x, delta=1e-3
)
self.assertAllClose(jacob_t, jacob_n, rtol=grad_rtol, atol=grad_atol)
elif x.dtype in (np.float64, np.complex128):
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, y, s, x_init_value=x, delta=1e-5
)
self.assertAllClose(jacob_t, jacob_n, rtol=grad_rtol, atol=grad_atol)
def _compareUnaryGpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with test_util.use_gpu():
result = tf_func(ops.convert_to_tensor(x))
tf_gpu = self.evaluate(result)
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_gpu, rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(np_ans, tf_gpu)
def _compareUnaryBoth(self, x, np_func, tf_func):
self._compareUnaryGpu(x, np_func, tf_func)
def compareConv2d(
self, input, filter, padding, format="NHWC", dilations=None
):
stride = 2
strides = [stride, stride]
with test_util.force_gpu():
gpu = nn_ops.conv2d(
input=input,
filter=filter,
strides=strides,
padding=padding,
data_format=format,
dilations=dilations,
)
with test_util.force_cpu():
if format == "NCHW":
input = array_ops.transpose(input, [0, 2, 3, 1])
if not isinstance(padding, str):
padding = [padding[0], padding[2], padding[3], padding[1]]
cpu = nn_ops.conv2d(
input=input,
filter=filter,
strides=strides,
padding=padding,
data_format="NHWC",
dilations=dilations,
)
if format == "NCHW":
cpu = array_ops.transpose(cpu, [0, 3, 1, 2])
if math_ops.reduce_any(math_ops.not_equal(cpu, gpu)):
print(
"Error: padding: {0} format: {1} dilations: {2}".format(
padding, format, dilations
)
)
print("CPU: ", cpu)
print("GPU: ", gpu)
else:
print(
"Passed: padding: {0} format: {1} dilations: {2}".format(
padding, format, dilations
)
)
print("CPU: ", cpu)
print("GPU: ", gpu)
self.assertAllEqual(cpu, gpu)
def testConvolution(self):
input = constant_op.constant([[
[[1], [2.0], [3.0], [4.0]],
[[6], [7], [8], [9]],
[[10], [11], [12], [13]],
[[14], [15], [16], [17]],
]])
input2 = constant_op.constant([[
[[1], [2.0], [3.0], [4.0], [5.0]],
[[6], [7], [8], [9], [15.0]],
[[10], [11], [12], [13], [25.0]],
[[14], [15], [16], [17], [35.0]],
]])
input4 = constant_op.constant([[
[[1], [2.0], [3.0], [4.0], [5.0], [1], [2.0]],
[[6], [7], [8], [9], [15.0], [1], [2.0]],
[[10], [11], [12], [13], [25.0], [1], [2.0]],
[[14], [15], [16], [17], [35.0], [1], [2.0]],
[[6], [7], [8], [9], [15.0], [1], [2.0]],
[[10], [11], [12], [13], [25.0], [1], [2.0]],
]])
print("input: ", input)
## (2,2,1,1)
filter2x2 = constant_op.constant(
[
[[[1.0]], [[1]]],
[[[1.0]], [[1]]],
],
)
## (3,2,1,1)
filter3x2 = constant_op.constant(
[[[[1.0]], [[1]]], [[[1.0]], [[1]]], [[[1.0]], [[1]]]],
)
## (4,2,1,1)
filter4x2 = constant_op.constant(
[
[[[1.0]], [[1]]],
[[[1.0]], [[1]]],
[[[1.0]], [[1]]],
[[[1.0]], [[1]]],
],
)
## (5,2,1,1)
filter5x2 = constant_op.constant(
[
[[[1.0]], [[1]]],
[[[1.0]], [[1]]],
[[[1.0]], [[1]]],
[[[1.0]], [[1]]],
[[[1.0]], [[1]]],
],
)
print("filter2x2: ", filter2x2)
self.compareConv2d(input, filter2x2, "VALID")
self.compareConv2d(input, filter3x2, "VALID")
self.compareConv2d(input, filter4x2, "VALID")
self.compareConv2d(input, filter5x2, "VALID")
self.compareConv2d(input, filter2x2, "SAME")
self.compareConv2d(input, filter3x2, "SAME")
self.compareConv2d(input, filter4x2, "SAME")
self.compareConv2d(input, filter5x2, "SAME")
self.compareConv2d(input2, filter2x2, "VALID")
self.compareConv2d(input2, filter2x2, "SAME")
pad_top = 2
pad_bottom = 3
pad_left = 1
pad_right = 5
self.compareConv2d(
input2,
filter2x2,
[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]],
)
self.compareConv2d(input2, filter2x2, "VALID", dilations=[2, 2])
self.compareConv2d(input2, filter2x2, "SAME", dilations=[2, 2])
self.compareConv2d(input4, filter2x2, "VALID", dilations=[2, 3])
self.compareConv2d(input4, filter2x2, "SAME", dilations=[3, 2])
self.compareConv2d(input4, filter3x2, "VALID", dilations=[2, 3])
self.compareConv2d(input4, filter3x2, "SAME", dilations=[3, 2])
self.compareConv2d(input4, filter5x2, "VALID", dilations=[2, 3])
self.compareConv2d(input4, filter5x2, "SAME", dilations=[3, 2])
self.compareConv2d(
input2,
filter2x2,
[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]],
dilations=[2, 2],
)
input3 = constant_op.constant([[[
[1, 2.0, 3.0, 4.0, 5.0],
[6, 7, 8, 9, 15],
[10, 11, 12, 13, 25.0],
[14, 15, 16, 17, 35.0],
]]])
self.compareConv2d(input3, filter2x2, "VALID", "NCHW")
self.compareConv2d(input3, filter2x2, "SAME", "NCHW")
self.compareConv2d(
input3,
filter2x2,
[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]],
"NCHW",
)
def compareTranspose(self, input, perm):
with test_util.force_gpu():
gpu = array_ops.transpose(input, perm)
with test_util.force_cpu():
cpu = array_ops.transpose(input, perm)
if math_ops.reduce_any(math_ops.not_equal(cpu, gpu)):
print("Error")
print("CPU: ", cpu)
print("GPU: ", gpu)
else:
print("Passed")
self.assertAllEqual(cpu, gpu)
def testTranspose(self):
for dtype in [dtypes.float32, dtypes.bfloat16]:
input = tf.convert_to_tensor(np.arange(0.0, 5 * 2 * 13), dtype=dtype)
input = array_ops.reshape(input, [5, 2, 13])
self.compareTranspose(input, [1, 2, 0])
self.compareTranspose(input, [0, 2, 1])
self.compareTranspose(input, [2, 0, 1])
self.compareTranspose(input, [2, 1, 0])
input = tf.convert_to_tensor(np.arange(0.0, 2 * 4 * 3 * 5), dtype=dtype)
input = array_ops.reshape(input, [2, 4, 3, 5])
self.compareTranspose(input, [1, 0, 2, 3])
self.compareTranspose(input, [0, 3, 1, 2])
self.compareTranspose(input, [3, 2, 1, 0])
def testUnaryHalfBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float16)
_ = x - x.min() + 1.02 # all greater than 1
y = (x + 0.5).astype(np.float16) # no zero
z = (x + 15.5).astype(np.float16) # all positive
_ = np.arange(-0.90, 0.90, 0.25).astype(np.float16) # between -1 and 1
self._compareUnaryBoth(x, np.abs, math_ops.abs)
self._compareUnaryBoth(x, np.abs, _ABS)
self._compareUnaryBoth(x, np.negative, math_ops.negative)
self._compareUnaryBoth(x, np.negative, _NEG)
self._compareUnaryBoth(y, self._inv, math_ops.reciprocal)
self._compareUnaryBoth(z, np.log, math_ops.log)
self._compareUnaryBoth(x, self._sigmoid, math_ops.sigmoid)
self._compareUnaryBoth(z, np.sqrt, math_ops.sqrt)
self._compareUnaryBoth(z, self._rsqrt, math_ops.rsqrt)
self._compareUnaryBoth(x, np.exp, math_ops.exp)
self._compareUnaryBoth(x, self._sigmoid, math_ops.sigmoid)
self._compareUnaryBoth(x, np.square, math_ops.square)
self._compareUnaryBoth(y, np.sign, math_ops.sign)
self._compareUnaryBoth(x, np.tanh, math_ops.tanh)
def testUnaryFloatBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
_ = x - x.min() + 1.02 # all greater than 1
y = (x + 0.5).astype(np.float32) # no zero
z = (x + 15.5).astype(np.float32) # all positive
_ = np.arange(-0.90, 0.90, 0.25).astype(np.float32) # between -1 and 1
self._compareUnaryBoth(x, np.abs, math_ops.abs)
self._compareUnaryBoth(x, np.abs, _ABS)
self._compareUnaryBoth(x, np.negative, math_ops.negative)
self._compareUnaryBoth(x, np.negative, _NEG)
self._compareUnaryBoth(y, self._inv, math_ops.reciprocal)
self._compareUnaryBoth(z, np.log, math_ops.log)
self._compareUnaryBoth(x, np.square, math_ops.square)
self._compareUnaryBoth(x, self._sigmoid, math_ops.sigmoid)
self._compareUnaryBoth(z, np.sqrt, math_ops.sqrt)
self._compareUnaryBoth(z, self._rsqrt, math_ops.rsqrt)
self._compareUnaryBoth(x, np.exp, math_ops.exp)
self._compareUnaryBoth(x, self._sigmoid, math_ops.sigmoid)
self._compareUnaryBoth(z, np.log1p, math_ops.log1p)
self._compareUnaryBoth(x, np.square, math_ops.square)
self._compareUnaryBoth(y, np.sign, math_ops.sign)
self._compareUnaryBoth(x, np.tanh, math_ops.tanh)
x = np.array([0.5, 0.7], np.float32)
inx = ops.convert_to_tensor(x)
print("\nsigmoidGrad:\n")
self.compareUnaryGradient_CPU_GPU(inx, gen_math_ops.sigmoid, "sigmoidGrad")
gradient = gen_math_ops.sigmoid_grad(
gen_math_ops.sigmoid(inx), constant_op.constant(1.0)
)
print("gen_math_ops.sigmoid_grad(y) = ", gradient)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = (1 + np.linspace(0, 5, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 5, np.prod(ys))).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
if x.dtype in (np.float16, np.float32, np.float64):
self._compareGpu(x, y, np_func, tf_func)
def _testBCastByFunc(self, funcs, xs, ys):
dtypes_ = [
np.float32,
]
for dtype in dtypes_:
for np_func, tf_func in funcs:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
self._compareBCast(ys, xs, dtype, np_func, tf_func)
def _testBCastA(self, xs, ys):
funcs = [
(np.add, math_ops.add),
(np.add, _ADD),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastB(self, xs, ys):
funcs = [
(np.subtract, math_ops.subtract),
(np.subtract, _SUB),
(np.power, math_ops.pow),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastC(self, xs, ys):
funcs = [
(np.multiply, math_ops.multiply),
(np.multiply, _MUL),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastD(self, xs, ys):
funcs = [
(np.true_divide, math_ops.truediv),
(np.true_divide, _TRUEDIV),
]
self._testBCastByFunc(funcs, xs, ys)
def testBCast_0A(self):
self._testBCastA([1, 3, 2], [1])
def testBCast_0B(self):
self._testBCastB([1, 3, 2], [1])
def testBCast_0C(self):
self._testBCastC([1, 3, 2], [1])
def testBCast_0D(self):
self._testBCastD([1, 3, 2], [1])
def testBCast_1A(self):
self._testBCastA([2, 3, 2], [2])
def testBCast_1B(self):
self._testBCastB([1, 3, 2], [2])
def testBCast_1C(self):
self._testBCastC([1, 3, 2], [2])
def testBCast_1D(self):
self._testBCastD([1, 3, 2], [2])
def testBCast_2A(self):
self._testBCastA([2, 3, 2], [3, 2])
def testBCast_2B(self):
self._testBCastB([1, 3, 2], [3, 2])
def testBCast_2C(self):
self._testBCastC([1, 3, 2], [3, 2])
def testBCast_2D(self):
self._testBCastD([1, 3, 2], [3, 2])
def testBCast_3A(self):
self._testBCastA([1, 3, 2], [3, 1])
def testBCast_3B(self):
self._testBCastB([1, 3, 2], [3, 1])
def testBCast_3C(self):
self._testBCastC([1, 3, 2], [3, 1])
def testBCast_3D(self):
self._testBCastD([1, 3, 2], [3, 1])
def testBCast_4A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
def testBCast_4B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
def testBCast_4C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
def testBCast_4D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
def testBCast_5A(self):
self._testBCastA([1, 3, 2], [2, 3, 1])
def testBCast_5B(self):
self._testBCastB([1, 3, 2], [2, 3, 1])
def testBCast_5C(self):
self._testBCastC([1, 3, 2], [2, 3, 1])
def testBCast_5D(self):
self._testBCastD([1, 3, 2], [2, 3, 1])
def run_benchmark(func, num_iters, execution_mode=None):
ctx = context.context()
with context.execution_mode(execution_mode):
# call func to warm up
func()
if execution_mode == context.ASYNC:
ctx.executor.wait()
start = time.time()
for _ in xrange(num_iters):
func()
if execution_mode == context.ASYNC:
ctx.executor.wait()
end = time.time()
return end - start
if __name__ == "__main__":
test.main()
| MpsTest |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-jinaai/llama_index/embeddings/jinaai/base.py | {
"start": 685,
"end": 5345
} | class ____:
def __init__(
self,
model: str = "jina-embeddings-v3",
base_url: str = DEFAULT_JINA_AI_API_URL,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
self.api_url = f"{base_url}/embeddings"
self.api_key = get_from_param_or_env("api_key", api_key, "JINAAI_API_KEY", "")
self.model = model
self._session = requests.Session()
self._session.headers.update(
{"Authorization": f"Bearer {api_key}", "Accept-Encoding": "identity"}
)
def get_embeddings(
self,
input,
encoding_type: str = "float",
task: Optional[str] = None,
dimensions: Optional[int] = None,
late_chunking: Optional[bool] = None,
) -> List[List[float]]:
"""Get embeddings."""
# Call Jina AI Embedding API
input_json = {
"input": input,
"model": self.model,
"encoding_type": encoding_type,
}
if task is not None:
input_json["task"] = task
if dimensions is not None:
input_json["dimensions"] = dimensions
if late_chunking is not None:
input_json["late_chunking"] = late_chunking
resp = self._session.post( # type: ignore
self.api_url,
json=input_json,
).json()
if "data" not in resp:
raise RuntimeError(resp["detail"])
embeddings = resp["data"]
# Sort resulting embeddings by index
sorted_embeddings = sorted(embeddings, key=lambda e: e["index"]) # type: ignore
# Return just the embeddings
if encoding_type == "ubinary":
return [
np.unpackbits(np.array(result["embedding"], dtype="uint8")).tolist()
for result in sorted_embeddings
]
elif encoding_type == "binary":
return [
np.unpackbits(
(np.array(result["embedding"]) + 128).astype("uint8")
).tolist()
for result in sorted_embeddings
]
return [result["embedding"] for result in sorted_embeddings]
async def aget_embeddings(
self,
input,
encoding_type: str = "float",
task: Optional[str] = None,
dimensions: Optional[int] = None,
late_chunking: Optional[bool] = None,
) -> List[List[float]]:
"""Asynchronously get text embeddings."""
import aiohttp
async with aiohttp.ClientSession(trust_env=True) as session:
headers = {
"Authorization": f"Bearer {self.api_key}",
"Accept-Encoding": "identity",
}
input_json = {
"input": input,
"model": self.model,
"encoding_type": encoding_type,
}
if task is not None:
input_json["task"] = task
if dimensions is not None:
input_json["dimensions"] = dimensions
if late_chunking is not None:
input_json["late_chunking"] = late_chunking
async with session.post(
self.api_url,
json=input_json,
headers=headers,
) as response:
resp = await response.json()
response.raise_for_status()
embeddings = resp["data"]
# Sort resulting embeddings by index
sorted_embeddings = sorted(embeddings, key=lambda e: e["index"]) # type: ignore
# Return just the embeddings
if encoding_type == "ubinary":
return [
np.unpackbits(
np.array(result["embedding"], dtype="uint8")
).tolist()
for result in sorted_embeddings
]
elif encoding_type == "binary":
return [
np.unpackbits(
(np.array(result["embedding"]) + 128).astype("uint8")
).tolist()
for result in sorted_embeddings
]
return [result["embedding"] for result in sorted_embeddings]
def is_local(url):
url_parsed = urlparse(url)
if url_parsed.scheme in ("file", ""): # Possibly a local file
return exists(url_parsed.path)
return False
def get_bytes_str(file_path):
with open(file_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
| _JinaAPICaller |
python | scikit-image__scikit-image | doc/tools/apigen.py | {
"start": 787,
"end": 17175
} | class ____:
"""Automatic detection and parsing of API docs to Sphinx-parsable reST format."""
# only separating first two levels
rst_section_levels = ['*', '=', '-', '~', '^']
def __init__(
self,
package_name,
rst_extension='.rst',
package_skip_patterns=None,
module_skip_patterns=None,
):
r"""Initialize package for parsing
Parameters
----------
package_name : string
Name of the top-level package. *package_name* must be the
name of an importable package.
rst_extension : str, optional
Extension for reST files, default '.rst'.
package_skip_patterns : None or sequence of {strings, regexps}
Sequence of strings giving URIs of packages to be excluded
Operates on the package path, starting at (including) the
first dot in the package path, after *package_name* - so,
if *package_name* is ``sphinx``, then ``sphinx.util`` will
result in ``.util`` being passed for searching by these
regexps. If is None, gives default. Default is ``['\.tests$']``.
module_skip_patterns : None or sequence
Sequence of strings giving URIs of modules to be excluded
Operates on the module name including preceding URI path,
back to the first dot after *package_name*. For example
``sphinx.util.console`` results in the string to search of
``.util.console``.
If is None, gives default. Default is ``['\.setup$', '\._']``.
"""
if package_skip_patterns is None:
package_skip_patterns = ['\\.tests$']
if module_skip_patterns is None:
module_skip_patterns = ['\\.setup$', '\\._']
self.package_name = package_name
self.rst_extension = rst_extension
self.package_skip_patterns = package_skip_patterns
self.module_skip_patterns = module_skip_patterns
def get_package_name(self):
return self._package_name
def set_package_name(self, package_name):
"""Set package_name
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> docwriter.root_path == sphinx.__path__[0]
True
>>> docwriter.package_name = 'docutils'
>>> import docutils
>>> docwriter.root_path == docutils.__path__[0]
True
"""
# It's also possible to imagine caching the module parsing here
self._package_name = package_name
root_module = self._import(package_name)
self.root_path = root_module.__path__[-1]
if not os.path.isdir(self.root_path):
# __path__ might point to editable loader, try falling back to __file__
self.root_path = os.path.dirname(root_module.__file__)
if not os.path.isdir(self.root_path):
msg = (
f"could not determine a valid directory for {root_module!r}, "
f"'{self.root_path}' is not a directory"
)
raise NotADirectoryError(msg)
self.written_modules = None
package_name = property(
get_package_name, set_package_name, None, 'get/set package_name'
)
def _import(self, name):
"""Import namespace package."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def _get_object_name(self, line):
"""Get second token in line.
>>> docwriter = ApiDocWriter('sphinx')
>>> docwriter._get_object_name(" def func(): ")
'func'
>>> docwriter._get_object_name(" class Klass(object): ")
'Klass'
>>> docwriter._get_object_name(" class Klass: ")
'Klass'
"""
name = line.split()[1].split('(')[0].strip()
# in case we have classes which are not derived from object
# ie. old style classes
return name.rstrip(':')
def _uri2path(self, uri):
"""Convert uri to absolute filepath.
Parameters
----------
uri : string
URI of python module to return path for
Returns
-------
path : None or string
Returns None if there is no valid path for this URI
Otherwise returns absolute file system path for URI
Examples
--------
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> modpath = sphinx.__path__[0]
>>> res = docwriter._uri2path('sphinx.builder')
>>> res == os.path.join(modpath, 'builder.py')
True
>>> res = docwriter._uri2path('sphinx')
>>> res == os.path.join(modpath, '__init__.py')
True
>>> docwriter._uri2path('sphinx.does_not_exist')
"""
if uri == self.package_name:
return os.path.join(self.root_path, '__init__.py')
path = uri.replace(self.package_name + '.', '')
path = path.replace('.', os.path.sep)
path = os.path.join(self.root_path, path)
# XXX maybe check for extensions as well?
if os.path.exists(path + '.py'): # file
path += '.py'
elif os.path.exists(os.path.join(path, '__init__.py')):
path = os.path.join(path, '__init__.py')
else:
return None
return path
def _path2uri(self, dirpath):
"""Convert directory path to uri."""
package_dir = self.package_name.replace('.', os.path.sep)
relpath = dirpath.replace(self.root_path, package_dir)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, '.')
def _parse_module(self, uri):
"""Parse module defined in uri."""
filename = self._uri2path(uri)
if filename is None:
print(filename, 'erk')
# nothing that we could handle here.
return ([], [])
with open(filename) as f:
functions, classes = self._parse_lines(f)
return functions, classes
def _parse_module_with_import(self, uri):
"""Look for functions and classes in the importable module.
Parameters
----------
uri : str
The name of the module to be parsed. This module needs to be
importable.
Returns
-------
functions : list of str
A list of (public) function names in the module.
classes : list of str
A list of (public) class names in the module.
submodules : list of str
A list of (public) submodule names in the module.
"""
mod = __import__(uri, fromlist=[uri.split('.')[-1]])
# find all public objects in the module.
obj_strs = getattr(
mod, '__all__', [obj for obj in dir(mod) if not obj.startswith('_')]
)
functions = []
classes = []
submodules = []
for obj_str in obj_strs:
# find the actual object from its string representation
try:
obj = getattr(mod, obj_str)
except AttributeError:
continue
# figure out if obj is a function or class
if isinstance(obj, (FunctionType, BuiltinFunctionType)):
functions.append(obj_str)
elif isinstance(obj, ModuleType) and 'skimage' in mod.__name__:
submodules.append(obj_str)
else:
try:
issubclass(obj, object)
classes.append(obj_str)
except TypeError:
# not a function or class
pass
return functions, classes, submodules
def _parse_lines(self, linesource):
"""Parse lines of text for functions and classes."""
functions = []
classes = []
for line in linesource:
if line.startswith('def ') and line.count('('):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
functions.append(name)
elif line.startswith('class '):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
classes.append(name)
else:
pass
functions.sort()
classes.sort()
return functions, classes
def generate_api_doc(self, uri):
"""Make autodoc documentation template string for a module.
Parameters
----------
uri : string
Python location of module - e.g 'sphinx.builder'.
Returns
-------
S : string
Contents of API doc.
"""
# get the names of all classes and functions
functions, classes, submodules = self._parse_module_with_import(uri)
if not (len(functions) or len(classes) or len(submodules)) and DEBUG:
print('WARNING: Empty -', uri)
return ''
functions = sorted(functions)
classes = sorted(classes)
submodules = sorted(submodules)
ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n'
# Set the chapter title to read 'module' for all modules except for the
# main packages
title = ':mod:`' + uri + '`'
ad += title + '\n' + self.rst_section_levels[1] * len(title) + '\n\n'
ad += '.. automodule:: ' + uri + '\n\n'
ad += '.. currentmodule:: ' + uri + '\n\n'
ad += '.. autosummary::\n :nosignatures:\n\n'
for f in functions:
ad += ' ' + f + '\n'
ad += '\n'
for c in classes:
ad += ' ' + c + '\n'
ad += '\n'
for m in submodules:
ad += ' ' + m + '\n'
ad += '\n'
for f in functions:
ad += "------------\n\n"
# must NOT exclude from index to keep cross-refs working
ad += '\n.. autofunction:: ' + f + '\n\n'
ad += f' .. minigallery:: {uri}.{f}\n\n'
for c in classes:
ad += '\n.. autoclass:: ' + c + '\n'
# must NOT exclude from index to keep cross-refs working
ad += (
' :members:\n'
' :inherited-members:\n'
' :undoc-members:\n'
' :show-inheritance:\n'
'\n'
' .. automethod:: __init__\n\n'
)
ad += f' .. minigallery:: {uri}.{c}\n\n'
return ad
def _survives_exclude(self, matchstr, match_type):
"""Return True if matchstr does not match patterns.
Removes ``self.package_name`` from the beginning of the string if present.
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> dw._survives_exclude('sphinx.okpkg', 'package')
True
>>> dw.package_skip_patterns.append('^\\.badpkg$')
>>> dw._survives_exclude('sphinx.badpkg', 'package')
False
>>> dw._survives_exclude('sphinx.badpkg', 'module')
True
>>> dw._survives_exclude('sphinx.badmod', 'module')
True
>>> dw.module_skip_patterns.append('^\\.badmod$')
>>> dw._survives_exclude('sphinx.badmod', 'module')
False
"""
if match_type == 'module':
patterns = self.module_skip_patterns
elif match_type == 'package':
patterns = self.package_skip_patterns
else:
raise ValueError(f'Cannot interpret match type "{match_type}"')
# Match to URI without package name
L = len(self.package_name)
if matchstr[:L] == self.package_name:
matchstr = matchstr[L:]
for pat in patterns:
try:
pat.search
except AttributeError:
pat = re.compile(pat)
if pat.search(matchstr):
return False
return True
def discover_modules(self):
r"""Return module sequence discovered from ``self.package_name``.
Returns
-------
mods : sequence
Sequence of module names within ``self.package_name``.
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> mods = dw.discover_modules()
>>> 'sphinx.util' in mods
True
>>> dw.package_skip_patterns.append('\.util$')
>>> 'sphinx.util' in dw.discover_modules()
False
>>>
"""
modules = [self.package_name]
# raw directory parsing
for dirpath, dirnames, filenames in os.walk(self.root_path):
# Check directory names for packages
root_uri = self._path2uri(os.path.join(self.root_path, dirpath))
for dirname in dirnames[:]: # copy list - we modify inplace
package_uri = '.'.join((root_uri, dirname))
if self._uri2path(package_uri) and self._survives_exclude(
package_uri, 'package'
):
modules.append(package_uri)
else:
dirnames.remove(dirname)
return sorted(modules)
def write_modules_api(self, modules, outdir):
# write the list
written_modules = []
public_modules = [m for m in modules if not m.split('.')[-1].startswith('_')]
for m in public_modules:
api_str = self.generate_api_doc(m)
if not api_str:
continue
# write out to file
outfile = os.path.join(outdir, m + self.rst_extension)
with open(outfile, 'w') as fileobj:
fileobj.write(api_str)
written_modules.append(m)
self.written_modules = written_modules
def write_api_docs(self, outdir):
"""Generate API reST files.
Parameters
----------
outdir : string
Directory name in which to store the files. Filenames for each module
are automatically created.
Notes
-----
Sets self.written_modules to list of written modules.
"""
if not os.path.exists(outdir):
os.mkdir(outdir)
# compose list of modules
modules = self.discover_modules()
self.write_modules_api(modules, outdir)
def write_index(self, outdir, froot='gen', relative_to=None):
"""Make a reST API index file from the written files.
Parameters
----------
outdir : string
Directory to which to write generated index file.
froot : str, optional
Root (filename without extension) of filename to write to
Defaults to 'gen'. We add ``self.rst_extension``.
relative_to : string
Path to which written filenames are relative. This
component of the written file path will be removed from
outdir, in the generated index. Default is None, meaning,
leave path as it is.
"""
if self.written_modules is None:
raise ValueError('No modules written')
# Get full filename path
path = os.path.join(outdir, froot + self.rst_extension)
# Path written into index is relative to rootpath
if relative_to is not None:
relpath = (outdir + os.path.sep).replace(relative_to + os.path.sep, '')
else:
relpath = outdir
print("outdir: ", relpath)
with open(path, 'w') as idx:
w = idx.write
w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n')
# We look at the module name.
# If it is `skimage`, display,
# if `skimage.submodule`, only show `submodule`,
# if it is `skimage.submodule.subsubmodule`, ignore.
title = "API reference"
w(title + "\n")
w("=" * len(title) + "\n\n")
w('.. toctree::\n')
w(' :maxdepth: 1\n\n')
for f in self.written_modules:
w(f' {os.path.join(relpath, f)}\n\n')
w('----------------------\n\n')
w('.. toctree::\n')
w(' :maxdepth: 1\n\n')
w(' ../license\n')
| ApiDocWriter |
python | pytransitions__transitions | tests/test_pygraphviz.py | {
"start": 373,
"end": 528
} | class ____(TestDiagramsImport):
graph_engine = "pygraphviz"
pgv = pgv
@skipIf(pgv is None, 'Graph diagram requires pygraphviz')
| TestPygraphvizImport |
python | django__django | tests/async/models.py | {
"start": 174,
"end": 300
} | class ____(models.Model):
field = models.IntegerField()
created = models.DateTimeField(default=timezone.now)
| SimpleModel |
python | joke2k__faker | faker/providers/automotive/pt_PT/__init__.py | {
"start": 48,
"end": 391
} | class ____(AutomotiveProvider):
"""Implement automotive provider for ``pt_PT`` locale.
Sources:
- https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_Portugal
"""
license_formats = (
"##-##-??",
"##-??-##",
"??-##-##",
# New format since March 2020
"??-##-??",
)
| Provider |
python | kevin1024__vcrpy | tests/unit/test_stubs.py | {
"start": 275,
"end": 2858
} | class ____:
def test_setting_of_attributes_get_propagated_to_real_connection(self):
vcr_connection = VCRHTTPSConnection("www.examplehost.com")
vcr_connection.ssl_version = "example_ssl_version"
assert vcr_connection.real_connection.ssl_version == "example_ssl_version"
@mark.online
@mock.patch("vcr.cassette.Cassette.can_play_response_for", return_value=False)
def testing_connect(*args):
with contextlib.closing(VCRHTTPSConnection("www.google.com")) as vcr_connection:
vcr_connection.cassette = Cassette("test", record_mode=mode.ALL)
vcr_connection.real_connection.connect()
assert vcr_connection.real_connection.sock is not None
def test_body_consumed_once_stream(self, tmpdir, httpbin):
self._test_body_consumed_once(
tmpdir,
httpbin,
BytesIO(b"1234567890"),
BytesIO(b"9876543210"),
BytesIO(b"9876543210"),
)
def test_body_consumed_once_iterator(self, tmpdir, httpbin):
self._test_body_consumed_once(
tmpdir,
httpbin,
iter([b"1234567890"]),
iter([b"9876543210"]),
iter([b"9876543210"]),
)
# data2 and data3 should serve the same data, potentially as iterators
def _test_body_consumed_once(
self,
tmpdir,
httpbin,
data1,
data2,
data3,
):
with NamedTemporaryFile(dir=tmpdir, suffix=".yml") as f:
testpath = f.name
# NOTE: ``use_cassette`` is not okay with the file existing
# already. So we using ``.close()`` to not only
# close but also delete the empty file, before we start.
f.close()
host, port = httpbin.host, httpbin.port
match_on = ["method", "uri", "body"]
with use_cassette(testpath, match_on=match_on):
conn1 = httplib.HTTPConnection(host, port)
conn1.request("POST", "/anything", body=data1)
conn1.getresponse()
conn2 = httplib.HTTPConnection(host, port)
conn2.request("POST", "/anything", body=data2)
conn2.getresponse()
with use_cassette(testpath, match_on=match_on) as cass:
conn3 = httplib.HTTPConnection(host, port)
conn3.request("POST", "/anything", body=data3)
conn3.getresponse()
assert cass.play_counts[0] == 0
assert cass.play_counts[1] == 1
| TestVCRConnection |
python | tensorflow__tensorflow | tensorflow/python/module/module_test.py | {
"start": 11778,
"end": 12368
} | class ____(test_util.TensorFlowTestCase):
def testAbstract(self):
msg = "Can't instantiate.*abstract"
with self.assertRaisesRegex(TypeError, msg):
AbstractModule() # pylint: disable=abstract-class-instantiated
def testConcrete(self):
mod = ConcreteModule()
x, scope_name = mod(2.)
self.assertEqual(x, 4.)
self.assertEqual(scope_name, "concrete_module/")
self.assertEqual(get_name_scope(), "")
def get_name_scope():
with ops.name_scope("x", skip_on_eager=False) as ns:
ns = "/".join(ns.split("/")[:-2])
return ns + "/" if ns else ""
| AbcTest |
python | Textualize__textual | docs/examples/guide/widgets/hello05.py | {
"start": 680,
"end": 860
} | class ____(App):
CSS_PATH = "hello05.tcss"
def compose(self) -> ComposeResult:
yield Hello()
if __name__ == "__main__":
app = CustomApp()
app.run()
| CustomApp |
python | getsentry__sentry | src/sentry/types/region.py | {
"start": 3809,
"end": 3929
} | class ____(RegionResolutionError):
"""Indicate that a mapping to a region could not be found."""
| RegionMappingNotFound |
python | mlflow__mlflow | mlflow/genai/optimize/optimizers/base.py | {
"start": 522,
"end": 1781
} | class ____(ABC):
@abstractmethod
def optimize(
self,
eval_fn: _EvalFunc,
train_data: list[dict[str, Any]],
target_prompts: dict[str, str],
enable_tracking: bool = True,
) -> PromptOptimizerOutput:
"""
Optimize the target prompts using the given evaluation function,
dataset and target prompt templates.
Args:
eval_fn: The evaluation function that takes candidate prompts as a dict
(prompt template name -> prompt template) and a dataset as a list of dicts,
and returns a list of EvaluationResultRecord. Note that eval_fn is not thread-safe.
train_data: The dataset to use for optimization. Each record should
include the inputs and outputs fields with dict values.
target_prompts: The target prompt templates to use. The key is the prompt template
name and the value is the prompt template.
enable_tracking: If True (default), automatically log optimization progress.
Returns:
The outputs of the prompt optimizer that includes the optimized prompts
as a dict (prompt template name -> prompt template).
"""
| BasePromptOptimizer |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_organization_coding_agents.py | {
"start": 40694,
"end": 54592
} | class ____(BaseOrganizationCodingAgentsTest):
"""Test class for POST endpoint trigger source functionality."""
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_providers")
@patch("sentry.seer.autofix.coding_agent.get_autofix_state")
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_prompt")
@patch("sentry.seer.autofix.coding_agent.get_project_seer_preferences")
@patch(
"sentry.integrations.services.integration.integration_service.get_organization_integration"
)
@patch("sentry.integrations.services.integration.integration_service.get_integration")
def test_root_cause_trigger_source(
self,
mock_get_integration,
mock_get_org_integration,
mock_get_preferences,
mock_get_prompt,
mock_get_autofix_state,
mock_get_providers,
):
"""Test POST endpoint with root_cause trigger_source."""
mock_get_providers.return_value = ["github"]
mock_get_prompt.return_value = "Root cause prompt"
mock_get_preferences.return_value = PreferenceResponse(
preference=None, code_mapping_repos=[]
)
mock_rpc_integration = self._create_mock_rpc_integration()
mock_get_org_integration.return_value = self.rpc_org_integration
mock_get_integration.return_value = mock_rpc_integration
mock_get_autofix_state.return_value = self._create_mock_autofix_state()
data = {
"integration_id": str(self.integration.id),
"run_id": 123,
"trigger_source": "root_cause",
}
with (
self.feature("organizations:seer-coding-agent-integrations"),
patch(
"sentry.seer.autofix.coding_agent.store_coding_agent_states_to_seer",
),
):
response = self.get_success_response(self.organization.slug, method="post", **data)
assert response.data["success"] is True
assert response.data["launched_count"] >= 0
assert response.data["failed_count"] >= 0
# Verify prompt was called with root_cause trigger_source and no instruction
mock_get_prompt.assert_called_with(123, AutofixTriggerSource.ROOT_CAUSE, None)
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_providers")
@patch("sentry.seer.autofix.coding_agent.get_autofix_state")
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_prompt")
@patch("sentry.seer.autofix.coding_agent.get_project_seer_preferences")
@patch(
"sentry.integrations.services.integration.integration_service.get_organization_integration"
)
@patch("sentry.integrations.services.integration.integration_service.get_integration")
def test_root_cause_repos_extracted_and_deduped(
self,
mock_get_integration,
mock_get_org_integration,
mock_get_preferences,
mock_get_prompt,
mock_get_autofix_state,
mock_get_providers,
):
"""Root cause repos are extracted, de-duplicated, and used for launch."""
mock_get_providers.return_value = ["github"]
mock_get_prompt.return_value = "Root cause prompt"
mock_get_preferences.return_value = PreferenceResponse(
preference=None, code_mapping_repos=[]
)
mock_rpc_integration = self._create_mock_rpc_integration()
mock_get_org_integration.return_value = self.rpc_org_integration
mock_get_integration.return_value = mock_rpc_integration
# Create autofix state with request repos and root cause step including duplicate repos
mock_autofix_state = self._create_mock_autofix_state(
repos=[
SeerRepoDefinition(
organization_id=self.organization.id,
integration_id=str(self.integration.id),
owner="owner1",
name="repo1",
external_id="123",
provider="github",
),
SeerRepoDefinition(
organization_id=self.organization.id,
integration_id=str(self.integration.id),
owner="owner2",
name="repo2",
external_id="456",
provider="github",
),
]
)
mock_autofix_state.steps = [
{
"key": "root_cause_analysis",
"causes": [
{
"description": "Something happened",
"relevant_repos": ["owner1/repo1", "owner1/repo1"],
}
],
}
]
mock_get_autofix_state.return_value = mock_autofix_state
data = {
"integration_id": str(self.integration.id),
"run_id": 123,
"trigger_source": "root_cause",
}
with (
self.feature("organizations:seer-coding-agent-integrations"),
patch(
"sentry.seer.autofix.coding_agent.store_coding_agent_states_to_seer",
),
):
response = self.get_success_response(self.organization.slug, method="post", **data)
assert response.data["success"] is True
mock_get_prompt.assert_called_with(123, AutofixTriggerSource.ROOT_CAUSE, None)
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_providers")
@patch("sentry.seer.autofix.coding_agent.get_autofix_state")
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_prompt")
@patch("sentry.seer.autofix.coding_agent.get_project_seer_preferences")
@patch(
"sentry.integrations.services.integration.integration_service.get_organization_integration"
)
@patch("sentry.integrations.services.integration.integration_service.get_integration")
def test_root_cause_without_relevant_repos_falls_back_to_request_repos(
self,
mock_get_integration,
mock_get_org_integration,
mock_get_preferences,
mock_get_prompt,
mock_get_autofix_state,
mock_get_providers,
):
"""If root cause has no relevant_repos, fallback to request repos path executes."""
mock_get_providers.return_value = ["github"]
mock_get_prompt.return_value = "Root cause prompt"
mock_get_preferences.return_value = PreferenceResponse(
preference=None, code_mapping_repos=[]
)
mock_rpc_integration = self._create_mock_rpc_integration()
mock_get_org_integration.return_value = self.rpc_org_integration
mock_get_integration.return_value = mock_rpc_integration
# Create autofix state with request repos and root cause step lacking relevant_repos field
mock_autofix_state = self._create_mock_autofix_state(
repos=[
SeerRepoDefinition(
organization_id=self.organization.id,
integration_id=str(self.integration.id),
owner="owner1",
name="repo1",
external_id="123",
provider="github",
),
]
)
mock_autofix_state.steps = [
{
"key": "root_cause_analysis",
"causes": [
{
"description": "Something happened",
# intentionally no 'relevant_repos'
}
],
}
]
mock_get_autofix_state.return_value = mock_autofix_state
data = {
"integration_id": str(self.integration.id),
"run_id": 123,
"trigger_source": "root_cause",
}
with (
self.feature("organizations:seer-coding-agent-integrations"),
patch(
"sentry.seer.autofix.coding_agent.store_coding_agent_states_to_seer",
),
):
response = self.get_success_response(self.organization.slug, method="post", **data)
assert response.data["success"] is True
assert response.data["launched_count"] >= 0
assert response.data["failed_count"] >= 0
mock_get_prompt.assert_called_with(123, AutofixTriggerSource.ROOT_CAUSE, None)
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_providers")
@patch("sentry.seer.autofix.coding_agent.get_autofix_state")
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_prompt")
@patch("sentry.seer.autofix.coding_agent.get_project_seer_preferences")
@patch(
"sentry.integrations.services.integration.integration_service.get_organization_integration"
)
@patch("sentry.integrations.services.integration.integration_service.get_integration")
def test_solution_trigger_source(
self,
mock_get_integration,
mock_get_org_integration,
mock_get_preferences,
mock_get_prompt,
mock_get_autofix_state,
mock_get_providers,
):
"""Test POST endpoint with solution trigger_source."""
mock_get_providers.return_value = ["github"]
mock_get_prompt.return_value = "Solution prompt"
mock_get_preferences.return_value = PreferenceResponse(
preference=None, code_mapping_repos=[]
)
mock_rpc_integration = self._create_mock_rpc_integration()
mock_get_org_integration.return_value = self.rpc_org_integration
mock_get_integration.return_value = mock_rpc_integration
mock_get_autofix_state.return_value = self._create_mock_autofix_state()
data = {
"integration_id": str(self.integration.id),
"run_id": 123,
"trigger_source": "solution",
}
with (
self.feature("organizations:seer-coding-agent-integrations"),
patch(
"sentry.seer.autofix.coding_agent.store_coding_agent_states_to_seer",
),
):
response = self.get_success_response(self.organization.slug, method="post", **data)
assert response.data["success"] is True
assert response.data["launched_count"] >= 0
assert response.data["failed_count"] >= 0
# Verify prompt was called with solution trigger_source and no instruction
mock_get_prompt.assert_called_with(123, AutofixTriggerSource.SOLUTION, None)
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_providers")
@patch("sentry.seer.autofix.coding_agent.get_autofix_state")
@patch(
"sentry.integrations.services.integration.integration_service.get_organization_integration"
)
@patch("sentry.integrations.services.integration.integration_service.get_integration")
def test_invalid_trigger_source(
self,
mock_get_integration,
mock_get_org_integration,
mock_get_autofix_state,
mock_get_providers,
):
"""Test POST endpoint with invalid trigger_source."""
mock_get_providers.return_value = ["github"]
mock_rpc_integration = self._create_mock_rpc_integration()
mock_get_org_integration.return_value = self.rpc_org_integration
mock_get_integration.return_value = mock_rpc_integration
mock_get_autofix_state.return_value = self._create_mock_autofix_state()
data = {
"integration_id": str(self.integration.id),
"run_id": 123,
"trigger_source": "invalid_source",
}
with self.feature("organizations:seer-coding-agent-integrations"):
response = self.get_error_response(
self.organization.slug, method="post", status_code=400, **data
)
# Serializer field error shape
assert "trigger_source" in response.data
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_providers")
@patch("sentry.seer.autofix.coding_agent.get_autofix_state")
@patch("sentry.seer.autofix.coding_agent.get_coding_agent_prompt")
@patch("sentry.seer.autofix.coding_agent.get_project_seer_preferences")
@patch(
"sentry.integrations.services.integration.integration_service.get_organization_integration"
)
@patch("sentry.integrations.services.integration.integration_service.get_integration")
def test_prompt_not_available(
self,
mock_get_integration,
mock_get_org_integration,
mock_get_preferences,
mock_get_prompt,
mock_get_autofix_state,
mock_get_providers,
):
"""Test POST endpoint when prompt is not available."""
mock_get_providers.return_value = ["github"]
mock_get_prompt.return_value = None # Prompt not available
mock_get_preferences.return_value = PreferenceResponse(
preference=None, code_mapping_repos=[]
)
mock_rpc_integration = self._create_mock_rpc_integration()
mock_get_org_integration.return_value = self.rpc_org_integration
mock_get_integration.return_value = mock_rpc_integration
mock_get_autofix_state.return_value = self._create_mock_autofix_state()
data = {
"integration_id": str(self.integration.id),
"run_id": 123,
"trigger_source": "solution",
}
with (
self.feature("organizations:seer-coding-agent-integrations"),
patch(
"sentry.seer.autofix.coding_agent.get_coding_agent_prompt",
return_value=None,
),
):
response = self.get_error_response(
self.organization.slug, method="post", status_code=500, **data
)
assert response.data["detail"] == "Issue fetching prompt to send to coding agents."
| OrganizationCodingAgentsPostTriggerSourceTest |
python | doocs__leetcode | solution/1900-1999/1922.Count Good Numbers/Solution.py | {
"start": 0,
"end": 156
} | class ____:
def countGoodNumbers(self, n: int) -> int:
mod = 10**9 + 7
return pow(5, (n + 1) >> 1, mod) * pow(4, n >> 1, mod) % mod
| Solution |
python | huggingface__transformers | src/transformers/models/t5gemma/modeling_t5gemma.py | {
"start": 47717,
"end": 54356
} | class ____(T5GemmaPreTrainedModel):
def __init__(self, config: T5GemmaConfig, is_encoder_decoder: Optional[bool] = None):
r"""
is_encoder_decoder (`Optional`, *optional*):
Whether use encoder_decoder for sequence classification. When set to False, only encoder is used.
"""
if is_encoder_decoder is not None:
config.is_encoder_decoder = is_encoder_decoder
super().__init__(config)
self.num_labels = config.num_labels
if config.is_encoder_decoder:
self.model = T5GemmaModel(config)
else:
self.model = T5GemmaEncoderModel(config)
hidden_size = config.encoder.hidden_size
if config.is_encoder_decoder:
hidden_size = config.decoder.hidden_size
classifier_dropout = getattr(config, "classifier_dropout_rate", 0.1)
self.score = T5GemmaClassificationHead(hidden_size, self.num_labels, classifier_dropout)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.Tensor] = None,
decoder_position_ids: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[BaseModelOutput] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> SequenceClassifierOutput:
r"""
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, decoder_sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0,
config.decoder.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
if self.config.is_encoder_decoder and (input_ids is None and inputs_embeds is not None):
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__} in encoder-decoder mode."
)
# Following T5, we automatically creates decoder_input_ids from input_ids if no decoder_input_ids are provided
if self.config.is_encoder_decoder and (decoder_input_ids is None and decoder_inputs_embeds is None):
if input_ids is None:
raise ValueError(
"If no `decoder_input_ids` or `decoder_inputs_embeds` are "
"passed, `input_ids` cannot be `None`. Please pass either "
"`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`."
)
decoder_input_ids = self._shift_right(input_ids)
if self.config.is_encoder_decoder:
outputs: Seq2SeqModelOutput = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=False,
**kwargs,
)
last_hidden_state = outputs.last_hidden_state
hidden_states = outputs.decoder_hidden_states
attentions = outputs.decoder_attentions
else:
outputs: BaseModelOutput = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
**kwargs,
)
last_hidden_state = outputs.last_hidden_state
hidden_states = outputs.hidden_states
attentions = outputs.attentions
logits = self.score(last_hidden_state)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
if self.config.is_encoder_decoder:
last_non_pad_token += 1 # due to the right shift.
last_non_pad_token = torch.clamp(last_non_pad_token, max=decoder_input_ids.shape[-1] - 1)
else:
last_non_pad_token = -1
logger.warning_once(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
return SequenceClassifierOutput(
loss=loss,
logits=pooled_logits,
hidden_states=hidden_states,
attentions=attentions,
)
@auto_docstring
| T5GemmaForSequenceClassification |
python | uqfoundation__dill | dill/tests/test_diff.py | {
"start": 431,
"end": 2667
} | class ____:
pass
def test_diff():
a = A()
b = A()
c = A()
a.a = b
b.a = c
diff.memorise(a)
assert not diff.has_changed(a)
c.a = 1
assert diff.has_changed(a)
diff.memorise(c, force=True)
assert not diff.has_changed(a)
c.a = 2
assert diff.has_changed(a)
changed = diff.whats_changed(a)
assert list(changed[0].keys()) == ["a"]
assert not changed[1]
a2 = []
b2 = [a2]
c2 = [b2]
diff.memorise(c2)
assert not diff.has_changed(c2)
a2.append(1)
assert diff.has_changed(c2)
changed = diff.whats_changed(c2)
assert changed[0] == {}
assert changed[1]
a3 = {}
b3 = {1: a3}
c3 = {1: b3}
diff.memorise(c3)
assert not diff.has_changed(c3)
a3[1] = 1
assert diff.has_changed(c3)
changed = diff.whats_changed(c3)
assert changed[0] == {}
assert changed[1]
if not IS_PYPY:
import abc
# make sure the "_abc_invaldation_counter" doesn't make test fail
diff.memorise(abc.ABCMeta, force=True)
assert not diff.has_changed(abc)
abc.ABCMeta.zzz = 1
assert diff.has_changed(abc)
changed = diff.whats_changed(abc)
assert list(changed[0].keys()) == ["ABCMeta"]
assert not changed[1]
'''
import Queue
diff.memorise(Queue, force=True)
assert not diff.has_changed(Queue)
Queue.Queue.zzz = 1
assert diff.has_changed(Queue)
changed = diff.whats_changed(Queue)
assert list(changed[0].keys()) == ["Queue"]
assert not changed[1]
import math
diff.memorise(math, force=True)
assert not diff.has_changed(math)
math.zzz = 1
assert diff.has_changed(math)
changed = diff.whats_changed(math)
assert list(changed[0].keys()) == ["zzz"]
assert not changed[1]
'''
a = A()
b = A()
c = A()
a.a = b
b.a = c
diff.memorise(a)
assert not diff.has_changed(a)
c.a = 1
assert diff.has_changed(a)
diff.memorise(c, force=True)
assert not diff.has_changed(a)
del c.a
assert diff.has_changed(a)
changed = diff.whats_changed(a)
assert list(changed[0].keys()) == ["a"]
assert not changed[1]
if __name__ == '__main__':
test_diff()
| A |
python | PyCQA__flake8 | src/flake8/violation.py | {
"start": 440,
"end": 2036
} | class ____(NamedTuple):
"""Class representing a violation reported by Flake8."""
code: str
filename: str
line_number: int
column_number: int
text: str
physical_line: str | None
def is_inline_ignored(self, disable_noqa: bool) -> bool:
"""Determine if a comment has been added to ignore this line.
:param disable_noqa:
Whether or not users have provided ``--disable-noqa``.
:returns:
True if error is ignored in-line, False otherwise.
"""
physical_line = self.physical_line
# TODO(sigmavirus24): Determine how to handle stdin with linecache
if disable_noqa:
return False
if physical_line is None:
physical_line = linecache.getline(self.filename, self.line_number)
noqa_match = _find_noqa(physical_line)
if noqa_match is None:
LOG.debug("%r is not inline ignored", self)
return False
codes_str = noqa_match.groupdict()["codes"]
if codes_str is None:
LOG.debug("%r is ignored by a blanket ``# noqa``", self)
return True
codes = set(utils.parse_comma_separated_list(codes_str))
if self.code in codes or self.code.startswith(tuple(codes)):
LOG.debug(
"%r is ignored specifically inline with ``# noqa: %s``",
self,
codes_str,
)
return True
LOG.debug(
"%r is not ignored inline with ``# noqa: %s``", self, codes_str,
)
return False
| Violation |
python | walkccc__LeetCode | solutions/1553. Minimum Number of Days to Eat N Oranges/1553.py | {
"start": 0,
"end": 209
} | class ____:
@functools.lru_cache(None)
def minDays(self, n: int) -> int:
if n <= 1:
return n
return 1 + min(self.minDays(n // 3) + n % 3,
self.minDays(n // 2) + n % 2)
| Solution |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_function_base.py | {
"start": 78881,
"end": 82413
} | class ____(TestCase):
def test_simple(self):
[X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7])
assert_array_equal(X, np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]))
assert_array_equal(Y, np.array([[4, 4, 4], [5, 5, 5], [6, 6, 6], [7, 7, 7]]))
def test_single_input(self):
[X] = meshgrid([1, 2, 3, 4])
assert_array_equal(X, np.array([1, 2, 3, 4]))
def test_no_input(self):
args = []
assert_array_equal([], meshgrid(*args))
assert_array_equal([], meshgrid(*args, copy=False))
def test_indexing(self):
x = [1, 2, 3]
y = [4, 5, 6, 7]
[X, Y] = meshgrid(x, y, indexing="ij")
assert_array_equal(X, np.array([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]))
assert_array_equal(Y, np.array([[4, 5, 6, 7], [4, 5, 6, 7], [4, 5, 6, 7]]))
# Test expected shapes:
z = [8, 9]
assert_(meshgrid(x, y)[0].shape == (4, 3))
assert_(meshgrid(x, y, indexing="ij")[0].shape == (3, 4))
assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2))
assert_(meshgrid(x, y, z, indexing="ij")[0].shape == (3, 4, 2))
assert_raises(ValueError, meshgrid, x, y, indexing="notvalid")
def test_sparse(self):
[X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True)
assert_array_equal(X, np.array([[1, 2, 3]]))
assert_array_equal(Y, np.array([[4], [5], [6], [7]]))
def test_invalid_arguments(self):
# Test that meshgrid complains about invalid arguments
# Regression test for issue #4755:
# https://github.com/numpy/numpy/issues/4755
assert_raises(TypeError, meshgrid, [1, 2, 3], [4, 5, 6, 7], indices="ij")
def test_return_type(self):
# Test for appropriate dtype in returned arrays.
# Regression test for issue #5297
# https://github.com/numpy/numpy/issues/5297
x = np.arange(0, 10, dtype=np.float32)
y = np.arange(10, 20, dtype=np.float64)
X, Y = np.meshgrid(x, y)
assert_(X.dtype == x.dtype)
assert_(Y.dtype == y.dtype)
# copy
X, Y = np.meshgrid(x, y, copy=True)
assert_(X.dtype == x.dtype)
assert_(Y.dtype == y.dtype)
# sparse
X, Y = np.meshgrid(x, y, sparse=True)
assert_(X.dtype == x.dtype)
assert_(Y.dtype == y.dtype)
def test_writeback(self):
# Issue 8561
X = np.array([1.1, 2.2])
Y = np.array([3.3, 4.4])
x, y = np.meshgrid(X, Y, sparse=False, copy=True)
x[0, :] = 0
assert_equal(x[0, :], 0)
assert_equal(x[1, :], X)
def test_nd_shape(self):
a, b, c, d, e = np.meshgrid(*([0] * i for i in range(1, 6)))
expected_shape = (2, 1, 3, 4, 5)
assert_equal(a.shape, expected_shape)
assert_equal(b.shape, expected_shape)
assert_equal(c.shape, expected_shape)
assert_equal(d.shape, expected_shape)
assert_equal(e.shape, expected_shape)
def test_nd_values(self):
a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5])
assert_equal(a, [[[0, 0, 0]], [[0, 0, 0]]])
assert_equal(b, [[[1, 1, 1]], [[2, 2, 2]]])
assert_equal(c, [[[3, 4, 5]], [[3, 4, 5]]])
def test_nd_indexing(self):
a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5], indexing="ij")
assert_equal(a, [[[0, 0, 0], [0, 0, 0]]])
assert_equal(b, [[[1, 1, 1], [2, 2, 2]]])
assert_equal(c, [[[3, 4, 5], [3, 4, 5]]])
@xfail # (reason="TODO: implement")
| TestMeshgrid |
python | doocs__leetcode | solution/0200-0299/0200.Number of Islands/Solution2.py | {
"start": 0,
"end": 711
} | class ____:
def numIslands(self, grid: List[List[str]]) -> int:
def bfs(i, j):
grid[i][j] = '0'
q = deque([(i, j)])
while q:
i, j = q.popleft()
for a, b in pairwise(dirs):
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n and grid[x][y] == '1':
q.append((x, y))
grid[x][y] = 0
ans = 0
dirs = (-1, 0, 1, 0, -1)
m, n = len(grid), len(grid[0])
for i in range(m):
for j in range(n):
if grid[i][j] == '1':
bfs(i, j)
ans += 1
return ans
| Solution |
python | PyCQA__pylint | tests/functional/i/inherit_non_class.py | {
"start": 540,
"end": 630
} | class ____(lambda abc: 42): # [inherit-non-class]
""" Can't inherit from lambda. """
| Bad1 |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_data_bar02.py | {
"start": 345,
"end": 3877
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.index = 0
worksheet.conditional_format(
"A1",
{
"type": "data_bar",
"data_bar_2010": True,
},
)
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData/>
<conditionalFormatting sqref="A1">
<cfRule type="dataBar" priority="1">
<dataBar>
<cfvo type="min"/>
<cfvo type="max"/>
<color rgb="FF638EC6"/>
</dataBar>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}">
<x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000001}</x14:id>
</ext>
</extLst>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{78C0D931-6437-407d-A8EE-F0AAD7539E65}">
<x14:conditionalFormattings>
<x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000001}">
<x14:dataBar minLength="0" maxLength="100" border="1" negativeBarBorderColorSameAsPositive="0">
<x14:cfvo type="autoMin"/>
<x14:cfvo type="autoMax"/>
<x14:borderColor rgb="FF638EC6"/>
<x14:negativeFillColor rgb="FFFF0000"/>
<x14:negativeBorderColor rgb="FFFF0000"/>
<x14:axisColor rgb="FF000000"/>
</x14:dataBar>
</x14:cfRule>
<xm:sqref>A1</xm:sqref>
</x14:conditionalFormatting>
</x14:conditionalFormattings>
</ext>
</extLst>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | PrefectHQ__prefect | tests/runtime/test_deployment.py | {
"start": 4904,
"end": 5733
} | class ____:
async def test_version_is_attribute(self):
assert "version" in dir(deployment)
async def test_version_is_none_when_not_set(self, monkeypatch, prefect_client):
assert deployment.version is None
run = await prefect_client.create_flow_run(flow=flow(lambda: None, name="test"))
monkeypatch.setenv(name="PREFECT__FLOW_RUN_ID", value=str(run.id))
assert deployment.version is None
async def test_version_is_loaded_when_run_version_known(
self, deployment_id, monkeypatch, prefect_client
):
flow_run = await prefect_client.create_flow_run_from_deployment(deployment_id)
assert deployment.version is None
monkeypatch.setenv(name="PREFECT__FLOW_RUN_ID", value=str(flow_run.id))
assert deployment.version == "gold"
| TestVersion |
python | plotly__plotly.py | plotly/graph_objs/icicle/_domain.py | {
"start": 233,
"end": 4988
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "icicle"
_path_str = "icicle.domain"
_valid_props = {"column", "row", "x", "y"}
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this icicle trace .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this icicle trace .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
@property
def x(self):
"""
Sets the horizontal domain of this icicle trace (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
Sets the vertical domain of this icicle trace (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this icicle trace .
row
If there is a layout grid, use the domain for this row
in the grid for this icicle trace .
x
Sets the horizontal domain of this icicle trace (in
plot fraction).
y
Sets the vertical domain of this icicle trace (in plot
fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.icicle.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this icicle trace .
row
If there is a layout grid, use the domain for this row
in the grid for this icicle trace .
x
Sets the horizontal domain of this icicle trace (in
plot fraction).
y
Sets the vertical domain of this icicle trace (in plot
fraction).
Returns
-------
Domain
"""
super().__init__("domain")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.icicle.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.icicle.Domain`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("column", arg, column)
self._set_property("row", arg, row)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Domain |
python | django-import-export__django-import-export | tests/core/tests/admin_integration/test_views.py | {
"start": 456,
"end": 1336
} | class ____(AdminTestMixin, TestCase):
def setUp(self):
super().setUp()
self.factory = RequestFactory()
self.model_admin = MockModelAdmin(User, admin.site)
def test_changelist_view_context(self):
request = self.factory.get("/admin/")
request.user = self.user
# Call the changelist_view method
self.model_admin.ie_base_change_list_template = None
response = self.model_admin.changelist_view(request)
# Render will throw an exception if the default for {% extends %} is not set
response.render()
# Check if the base_change_list_template context variable is set to None
self.assertIsNone(response.context_data.get("base_change_list_template"))
self.assertContains(
response, '<a href="/admin/">Django administration</a>', html=True
)
| TestChangeListView |
python | getsentry__sentry | src/sentry/discover/translation/mep_to_eap.py | {
"start": 5599,
"end": 8725
} | class ____(NodeVisitor):
def __init__(self):
super().__init__()
def visit_raw_aggregate_param(self, node, children):
return column_switcheroo(node.text)[0]
def visit_aggregate_key(self, node, children):
term, did_update = function_switcheroo(node.text)
if did_update:
return term
return children or node.text
def visit_numeric_filter(self, node, children):
term, did_update = search_term_switcheroo(node.text)
if did_update:
return term
_, parsed_key, _, _, _ = children
flattened_parsed_key: list[str] = []
_flatten(parsed_key, flattened_parsed_key)
flattened_parsed_key_str = "".join(flattened_parsed_key)
if flattened_parsed_key_str:
if (
not flattened_parsed_key_str.startswith("tags[")
and flattened_parsed_key_str not in SPAN_ATTRIBUTE_DEFINITIONS
):
new_parsed_key = [f"tags[{flattened_parsed_key_str},number]"]
children[1] = new_parsed_key
return children or node.text
def visit_boolean_filter(self, node, children):
term, did_update = search_term_switcheroo(node.text)
if did_update:
return term
negation, parsed_key, sep, boolean_val = children
flattened_parsed_key: list[str] = []
_flatten(parsed_key, flattened_parsed_key)
flattened_parsed_key_str = "".join(flattened_parsed_key)
flattened_parsed_val: list[str] = []
_flatten(boolean_val, flattened_parsed_val)
flattened_parsed_val_str = "".join(flattened_parsed_val)
if (
flattened_parsed_key_str
and not flattened_parsed_key_str.startswith("tags[")
and flattened_parsed_key_str not in SPAN_ATTRIBUTE_DEFINITIONS
):
if flattened_parsed_val_str in ["0", "1"]:
new_parsed_key = [f"tags[{flattened_parsed_key_str},number]"]
children[1] = new_parsed_key
return children or node.text
flattened_parsed_val_num = None
if negation == "":
if flattened_parsed_val_str.lower() == "true":
flattened_parsed_val_num = "1"
elif flattened_parsed_val_str.lower() == "false":
flattened_parsed_val_num = "0"
return f"(tags[{flattened_parsed_key_str},number]:{flattened_parsed_val_num if flattened_parsed_val_num is not None else flattened_parsed_val_str} OR {flattened_parsed_key_str}:{flattened_parsed_val_str})"
return children or node.text
def visit_text_filter(self, node, children):
term, did_update = search_term_switcheroo(node.text)
if did_update:
return term
return children or node.text
def visit_key(self, node, children):
return column_switcheroo(node.text)[0]
def visit_value(self, node, children):
return column_switcheroo(node.text)[0]
def generic_visit(self, node, children):
return children or node.text
| TranslationVisitor |
python | cython__cython | tests/run/ext_auto_richcmp.py | {
"start": 9512,
"end": 9857
} | class ____(list):
"""
>>> l = [1, 2, 3, 4]
>>> notl = List(l)
>>> notl == l
False
>>> notl != l # implemented by base type
False
>>> notl == notl
True
>>> notl != notl # implemented by base type
False
"""
def __eq__(self, other):
return self is other or list(self) != list(other)
| List |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/job_definition.py | {
"start": 4094,
"end": 62173
} | class ____(IHasInternalInit):
"""Defines a Dagster job."""
_name: str
_graph_def: GraphDefinition
_description: Optional[str]
_tags: Mapping[str, str]
_run_tags: Optional[Mapping[str, str]]
_metadata: Mapping[str, MetadataValue]
_current_level_node_defs: Sequence[NodeDefinition]
_hook_defs: AbstractSet[HookDefinition]
_op_retry_policy: Optional[RetryPolicy]
_asset_layer: AssetLayer
_resource_requirements: Mapping[str, AbstractSet[str]]
_all_node_defs: Mapping[str, NodeDefinition]
_cached_run_config_schemas: dict[str, "RunConfigSchema"]
_subset_selection_data: Optional[Union[OpSelectionData, AssetSelectionData]]
input_values: Mapping[str, object]
_owners: Optional[Sequence[str]]
def __init__(
self,
*,
graph_def: GraphDefinition,
resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,
executor_def: Optional[ExecutorDefinition] = None,
logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,
name: Optional[str] = None,
config: Optional[
Union[ConfigMapping, Mapping[str, object], PartitionedConfig, "RunConfig"]
] = None,
description: Optional[str] = None,
partitions_def: Optional[PartitionsDefinition] = None,
tags: Optional[Mapping[str, Any]] = None,
run_tags: Optional[Mapping[str, Any]] = None,
metadata: Optional[Mapping[str, RawMetadataValue]] = None,
hook_defs: Optional[AbstractSet[HookDefinition]] = None,
op_retry_policy: Optional[RetryPolicy] = None,
_subset_selection_data: Optional[Union[OpSelectionData, AssetSelectionData]] = None,
asset_layer: Optional[AssetLayer] = None,
input_values: Optional[Mapping[str, object]] = None,
_was_explicitly_provided_resources: Optional[bool] = None,
owners: Optional[Sequence[str]] = None,
):
from dagster._core.definitions.run_config import RunConfig, convert_config_input
self._graph_def = graph_def
self._current_level_node_defs = self._graph_def.node_defs
# Recursively explore all nodes in this job
self._all_node_defs = _build_all_node_defs(self._current_level_node_defs)
self._asset_layer = check.opt_inst_param(
asset_layer, "asset_layer", AssetLayer
) or _infer_asset_layer_from_source_asset_deps(graph_def)
# validates
self._graph_def.get_inputs_must_be_resolved_top_level(self._asset_layer)
self._name = check_valid_name(check.str_param(name, "name")) if name else graph_def.name
self._executor_def = check.opt_inst_param(executor_def, "executor_def", ExecutorDefinition)
self._loggers = check.opt_nullable_mapping_param(
logger_defs,
"logger_defs",
key_type=str,
value_type=LoggerDefinition,
)
config = check.opt_inst_param(
config, "config", (Mapping, ConfigMapping, PartitionedConfig, RunConfig)
)
config = convert_config_input(config)
self._original_partitions_def_argument = check.opt_inst_param(
partitions_def, "partitions_def", PartitionsDefinition
)
# tags and description can exist on graph as well, but since
# same graph may be in multiple jobs, keep separate layer
self._description = check.opt_str_param(description, "description")
self._tags = normalize_tags(
tags, warning_stacklevel=5
) # reset once owners is out of beta_param
self._run_tags = run_tags # don't normalize to preserve None
self._metadata = normalize_metadata(
check.opt_mapping_param(metadata, "metadata", key_type=str)
)
self._hook_defs = check.opt_set_param(hook_defs, "hook_defs")
self._op_retry_policy = check.opt_inst_param(
op_retry_policy, "op_retry_policy", RetryPolicy
)
_subset_selection_data = check.opt_inst_param(
_subset_selection_data, "_subset_selection_data", (OpSelectionData, AssetSelectionData)
)
input_values = check.opt_mapping_param(input_values, "input_values", key_type=str)
resource_defs = check.opt_mapping_param(
resource_defs, "resource_defs", key_type=str, value_type=ResourceDefinition
)
for key in resource_defs.keys():
if not key.isidentifier():
check.failed(f"Resource key '{key}' must be a valid Python identifier.")
self._was_provided_resources = (
bool(resource_defs)
if _was_explicitly_provided_resources is None
else _was_explicitly_provided_resources
)
self._resource_defs = {
DEFAULT_IO_MANAGER_KEY: default_job_io_manager,
**resource_defs,
}
self._required_resource_keys = self._get_required_resource_keys(
self._was_provided_resources
)
self._run_config_schema = None
self._original_config_argument = config
self._subset_selection_data = _subset_selection_data
self.input_values = input_values
if owners:
for owner in owners:
validate_definition_owner(owner, "job", self._name)
self._owners = owners
for input_name in sorted(list(self.input_values.keys())):
if not graph_def.has_input(input_name):
raise DagsterInvalidDefinitionError(
f"Error when constructing JobDefinition '{self.name}': Input value provided for"
f" key '{input_name}', but job has no top-level input with that name."
)
def dagster_internal_init(
*,
graph_def: GraphDefinition,
resource_defs: Optional[Mapping[str, ResourceDefinition]],
executor_def: Optional[ExecutorDefinition],
logger_defs: Optional[Mapping[str, LoggerDefinition]],
name: Optional[str],
config: Optional[
Union[ConfigMapping, Mapping[str, object], PartitionedConfig, "RunConfig"]
],
description: Optional[str],
partitions_def: Optional[PartitionsDefinition],
tags: Optional[Mapping[str, Any]],
run_tags: Optional[Mapping[str, Any]],
metadata: Optional[Mapping[str, RawMetadataValue]],
hook_defs: Optional[AbstractSet[HookDefinition]],
op_retry_policy: Optional[RetryPolicy],
_subset_selection_data: Optional[Union[OpSelectionData, AssetSelectionData]],
asset_layer: Optional[AssetLayer],
input_values: Optional[Mapping[str, object]],
_was_explicitly_provided_resources: Optional[bool],
owners: Optional[Sequence[str]],
) -> "JobDefinition":
return JobDefinition(
graph_def=graph_def,
resource_defs=resource_defs,
executor_def=executor_def,
logger_defs=logger_defs,
name=name,
config=config,
description=description,
partitions_def=partitions_def,
tags=tags,
run_tags=run_tags,
metadata=metadata,
hook_defs=hook_defs,
op_retry_policy=op_retry_policy,
_subset_selection_data=_subset_selection_data,
asset_layer=asset_layer,
input_values=input_values,
_was_explicitly_provided_resources=_was_explicitly_provided_resources,
owners=owners,
)
@staticmethod
def for_external_job(
asset_keys: Iterable[AssetKey],
name: str,
metadata: Optional[Mapping[str, Any]] = None,
tags: Optional[Mapping[str, Any]] = None,
) -> "JobDefinition":
from dagster._core.definitions import op
# We need to create a dummy op in order for the asset graph to be rendered in the UI. It's worth investigating whether
# we can avoid this.
@op(name=f"{name}_op_inner")
def _op():
pass
return JobDefinition(
graph_def=GraphDefinition(name=name, node_defs=[_op]),
resource_defs={},
executor_def=None,
asset_layer=AssetLayer.for_external_job(asset_keys),
metadata=metadata,
tags=tags,
)
@property
def name(self) -> str:
return self._name
# If `run_tags` is set (not None), then `tags` and `run_tags` are separate specifications of
# "definition" and "run" tags respectively. Otherwise, `tags` is used for both.
# This is for backcompat with old behavior prior to the introduction of `run_tags`.
#
# We need to preserve the distinction between None and {} values for `run_tags` so that the
# same logic can be applied in the host process receiving a snapshot of this job. Therefore
# we store an extra flag `_has_separately_defined_run_tags` which we use to control snapshot
# generation.
@cached_property
def tags(self) -> Mapping[str, str]:
if self._run_tags is None:
return {**self._graph_def.tags, **self._tags}
else:
return self._tags
@cached_property
def run_tags(self) -> Mapping[str, str]:
if self._run_tags is None:
return self.tags
else:
return normalize_tags(
{**self._graph_def.tags, **self._run_tags}, warning_stacklevel=5
) # reset once owners is out of beta_param
# This property exists for backcompat purposes. If it is False, then we omit run_tags when
# generating a job snapshot. This lets host processes distinguish between None and {} `run_tags`
# values, which have different semantics:
#
# - run_tags=None (`tags` will be used for run tags)
# - run_tags={} (empty dict will be used for run tags), which have different semantics.
@property
def has_separately_defined_run_tags(self) -> bool:
return self._run_tags is not None
@property
def metadata(self) -> Mapping[str, MetadataValue]:
return self._metadata
@property
def description(self) -> Optional[str]:
return self._description
@property
def owners(self) -> Optional[Sequence[str]]:
return self._owners
@property
def graph(self) -> GraphDefinition:
return self._graph_def
@property
def dependency_structure(self) -> DependencyStructure:
return self._graph_def.dependency_structure
@property
def dependencies(self) -> DependencyMapping[NodeInvocation]:
return self._graph_def.dependencies
@public
@property
def executor_def(self) -> ExecutorDefinition:
"""Returns the default :py:class:`ExecutorDefinition` for the job.
If the user has not specified an executor definition, then this will default to the
:py:func:`multi_or_in_process_executor`. If a default is specified on the
:py:class:`Definitions` object the job was provided to, then that will be used instead.
"""
return self._executor_def or DEFAULT_EXECUTOR_DEF
@public
@property
def has_specified_executor(self) -> bool:
"""Returns True if this job has explicitly specified an executor, and False if the executor
was inherited through defaults or the :py:class:`Definitions` object the job was provided to.
"""
return self._executor_def is not None
@public
@property
def resource_defs(self) -> Mapping[str, ResourceDefinition]:
"""Returns the set of ResourceDefinition objects specified on the job.
This may not be the complete set of resources required by the job, since those can also be
provided on the :py:class:`Definitions` object the job may be provided to.
"""
return self._resource_defs
@public
@property
def partitioned_config(self) -> Optional[PartitionedConfig]:
"""The partitioned config for the job, if it has one.
A partitioned config defines a way to map partition keys to run config for the job.
"""
return self._resolve_configs()[0]
@public
@property
def config_mapping(self) -> Optional[ConfigMapping]:
"""The config mapping for the job, if it has one.
A config mapping defines a way to map a top-level config schema to run config for the job.
"""
return self._resolve_configs()[1]
@public
@property
def loggers(self) -> Mapping[str, LoggerDefinition]:
"""Returns the set of LoggerDefinition objects specified on the job.
If the user has not specified a mapping of :py:class:`LoggerDefinition` objects, then this
will default to the :py:func:`colored_console_logger` under the key `console`. If a default
is specified on the :py:class:`Definitions` object the job was provided to, then that will
be used instead.
"""
from dagster._loggers import default_loggers
return self._loggers or default_loggers()
@public
@property
def has_specified_loggers(self) -> bool:
"""Returns true if the job explicitly set loggers, and False if loggers were inherited
through defaults or the :py:class:`Definitions` object the job was provided to.
"""
return self._loggers is not None
@property
def required_resource_keys(self) -> AbstractSet[str]:
return self._required_resource_keys
@property
def run_config(self) -> Optional[Mapping[str, Any]]:
return self._resolve_configs()[2]
@property
def run_config_schema(self) -> "RunConfigSchema":
if self._run_config_schema is None:
self._run_config_schema = _create_run_config_schema(self)
return self._run_config_schema
@public
@property
def partitions_def(self) -> Optional[PartitionsDefinition]:
"""Returns the :py:class:`PartitionsDefinition` for the job, if it has one.
A partitions definition defines the set of partition keys the job operates on.
"""
return None if not self.partitioned_config else self.partitioned_config.partitions_def
@cached_property
def backfill_policy(self) -> BackfillPolicy:
executable_nodes = {self.asset_layer.get(k) for k in self.asset_layer.executable_asset_keys}
backfill_policies = {n.backfill_policy for n in executable_nodes if n.is_partitioned}
# normalize null backfill policy to explicit multi_run(1) policy
return resolve_backfill_policy(backfill_policies)
@property
def hook_defs(self) -> AbstractSet[HookDefinition]:
return self._hook_defs
@property
def asset_layer(self) -> AssetLayer:
return self._asset_layer
@property
def all_node_defs(self) -> Sequence[NodeDefinition]:
return list(self._all_node_defs.values())
@property
def top_level_node_defs(self) -> Sequence[NodeDefinition]:
return self._current_level_node_defs
@property
def op_retry_policy(self) -> Optional[RetryPolicy]:
return self._op_retry_policy
@cached_method
def _resolve_configs(
self,
) -> tuple[Optional[PartitionedConfig], Optional[ConfigMapping], Optional[Mapping[str, Any]]]:
config = self._original_config_argument
partition_def = self._original_partitions_def_argument
partitioned_config = None
config_mapping = None
run_config = None
if partition_def:
partitioned_config = PartitionedConfig.from_flexible_config(config, partition_def)
else:
if isinstance(config, ConfigMapping):
config_mapping = config
elif isinstance(config, PartitionedConfig):
partitioned_config = config
if self.asset_layer:
for asset_key in self._asset_layer.selected_asset_keys:
asset_partitions_def = self._asset_layer.get(asset_key).partitions_def
check.invariant(
asset_partitions_def is None
or asset_partitions_def == config.partitions_def,
"Can't supply a PartitionedConfig for 'config' with a different PartitionsDefinition"
f" than supplied for a target asset 'partitions_def'. Asset: {asset_key.to_user_string()}",
)
elif isinstance(config, dict):
run_config = config
# Using config mapping here is a trick to make it so that the preset will be used even
# when no config is supplied for the job.
config_mapping = _config_mapping_with_default_value(
get_run_config_schema_for_job(
self._graph_def,
self.resource_defs,
self.executor_def,
self.loggers,
self._asset_layer,
was_explicitly_provided_resources=self._was_provided_resources,
),
config,
self.name,
)
elif config is not None:
check.failed(
"config param must be a ConfigMapping, a PartitionedConfig, or a dictionary,"
f" but is an object of type {type(config)}"
)
return partitioned_config, config_mapping, run_config
def node_def_named(self, name: str) -> NodeDefinition:
check.str_param(name, "name")
check.invariant(name in self._all_node_defs, f"{name} not found")
return self._all_node_defs[name]
def has_node(self, name: str) -> bool:
check.str_param(name, "name")
return name in self._all_node_defs
def get_node(self, handle: NodeHandle) -> Node:
return self._graph_def.get_node(handle)
def get_op(self, handle: NodeHandle) -> OpNode:
node = self.get_node(handle)
assert isinstance(node, OpNode), (
f"Tried to retrieve node {handle} as op, but it represents a nested graph."
)
return node
def has_node_named(self, name: str) -> bool:
return self._graph_def.has_node_named(name)
def get_node_named(self, name: str) -> Node:
return self._graph_def.node_named(name)
@property
def nodes(self) -> Sequence[Node]:
return self._graph_def.nodes
@property
def nodes_in_topological_order(self) -> Sequence[Node]:
return self._graph_def.nodes_in_topological_order
def all_dagster_types(self) -> Iterable[DagsterType]:
return self._graph_def.all_dagster_types()
def has_dagster_type(self, name: str) -> bool:
return self._graph_def.has_dagster_type(name)
def dagster_type_named(self, name: str) -> DagsterType:
return self._graph_def.dagster_type_named(name)
def describe_target(self) -> str:
return f"job '{self.name}'"
def get_required_resource_defs(self) -> Mapping[str, ResourceDefinition]:
return {
resource_key: resource
for resource_key, resource in self.resource_defs.items()
if resource_key in self.required_resource_keys
}
def _get_required_resource_keys(self, validate_requirements: bool = False) -> AbstractSet[str]:
from dagster._core.execution.resources_init import get_transitive_required_resource_keys
requirements = self._get_resource_requirements()
if validate_requirements:
ensure_requirements_satisfied(self.resource_defs, requirements)
required_keys = {req.key for req in requirements if isinstance(req, ResourceKeyRequirement)}
if validate_requirements:
return required_keys.union(
get_transitive_required_resource_keys(required_keys, self.resource_defs)
)
else:
return required_keys
def _get_resource_requirements(self) -> Sequence[ResourceRequirement]:
return [
*self._graph_def.get_resource_requirements(self.asset_layer),
*[
req
for hook_def in self._hook_defs
for req in hook_def.get_resource_requirements(attached_to=f"job '{self._name}'")
],
*[
req
for assets_def in self.asset_layer.asset_graph.assets_defs
for hook_def in assets_def.hook_defs
for req in hook_def.get_resource_requirements(
attached_to=f"asset '{assets_def.node_def.name}'"
)
],
]
def validate_resource_requirements_satisfied(self) -> None:
resource_requirements = self._get_resource_requirements()
ensure_requirements_satisfied(self.resource_defs, resource_requirements)
def is_missing_required_resources(self) -> bool:
requirements = self._get_resource_requirements()
for requirement in requirements:
if not requirement.is_satisfied(self.resource_defs):
return True
return False
def get_all_hooks_for_handle(self, handle: NodeHandle) -> AbstractSet[HookDefinition]:
"""Gather all the hooks for the given node from all places possibly attached with a hook.
A hook can be attached to any of the following objects
* Node (node invocation)
* AssetsDefinition
* JobDefinition
Args:
handle (NodeHandle): The node's handle
Returns:
FrozenSet[HookDefinition]
"""
check.inst_param(handle, "handle", NodeHandle)
assets_def = self.asset_layer.get_assets_def_for_node(handle)
hook_defs = set(assets_def.hook_defs) if assets_def else set()
current = handle
lineage = []
while current:
lineage.append(current.name)
current = current.parent
# hooks on top-level node
name = lineage.pop()
node = self._graph_def.node_named(name)
hook_defs = hook_defs.union(node.hook_defs)
# hooks on non-top-level nodes
while lineage:
name = lineage.pop()
# While lineage is non-empty, definition is guaranteed to be a graph
definition = cast("GraphDefinition", node.definition)
node = definition.node_named(name)
hook_defs = hook_defs.union(node.hook_defs)
# hooks applied to a job definition will run on every node
hook_defs = hook_defs.union(self.hook_defs)
return frozenset(hook_defs)
def get_retry_policy_for_handle(self, handle: NodeHandle) -> Optional[RetryPolicy]:
node = self.get_node(handle)
definition = node.definition
if node.retry_policy:
return node.retry_policy
elif isinstance(definition, OpDefinition) and definition.retry_policy:
return definition.retry_policy
# could be expanded to look in graph containers
else:
return self._op_retry_policy
# make Callable for decorator reference updates
def __call__(self, *args, **kwargs):
raise DagsterInvariantViolationError(
f"Attempted to call job '{self.name}' directly. Jobs should be invoked by "
"using an execution API function (e.g. `job.execute_in_process`)."
)
@public
def execute_in_process(
self,
run_config: Optional[Union[Mapping[str, Any], "RunConfig"]] = None,
instance: Optional["DagsterInstance"] = None,
partition_key: Optional[str] = None,
raise_on_error: bool = True,
op_selection: Optional[Sequence[str]] = None,
asset_selection: Optional[Sequence[AssetKey]] = None,
run_id: Optional[str] = None,
input_values: Optional[Mapping[str, object]] = None,
tags: Optional[Mapping[str, str]] = None,
resources: Optional[Mapping[str, object]] = None,
) -> "ExecuteInProcessResult":
"""Execute the Job in-process, gathering results in-memory.
The `executor_def` on the Job will be ignored, and replaced with the in-process executor.
If using the default `io_manager`, it will switch from filesystem to in-memory.
Args:
run_config (Optional[Mapping[str, Any]]):
The configuration for the run
instance (Optional[DagsterInstance]):
The instance to execute against, an ephemeral one will be used if none provided.
partition_key (Optional[str]):
The string partition key that specifies the run config to execute. Can only be used
to select run config for jobs with partitioned config.
raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.
Defaults to ``True``.
op_selection (Optional[Sequence[str]]): A list of op selection queries (including single op
names) to execute. For example:
* ``['some_op']``: selects ``some_op`` itself.
* ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).
* ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants
(downstream dependencies) within 3 levels down.
* ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its
ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.
input_values (Optional[Mapping[str, Any]]):
A dictionary that maps python objects to the top-level inputs of the job. Input
values provided here will override input values that have been provided to the job
directly.
resources (Optional[Mapping[str, Any]]):
The resources needed if any are required. Can provide resource instances directly,
or resource definitions.
Returns:
:py:class:`~dagster.ExecuteInProcessResult`
"""
from dagster._core.definitions.job_base import InMemoryJob
from dagster._core.execution.execute_in_process import (
core_execute_in_process,
merge_run_tags,
type_check_and_normalize_args,
)
run_config, op_selection, asset_selection, resource_defs, partition_key, input_values = (
type_check_and_normalize_args(
run_config=run_config,
partition_key=partition_key,
op_selection=op_selection,
asset_selection=asset_selection,
input_values=input_values,
resources=resources,
)
)
ephemeral_job = self.as_ephemeral_job(
resource_defs=resource_defs,
input_values=input_values,
op_selection=op_selection,
asset_selection=asset_selection,
)
if partition_key and ephemeral_job.partitions_def:
with partition_loading_context(dynamic_partitions_store=instance) as ctx:
ephemeral_job.validate_partition_key(
partition_key=partition_key,
selected_asset_keys=set(asset_selection),
context=ctx,
)
wrapped_job = InMemoryJob(job_def=ephemeral_job)
if not run_config and ephemeral_job.partitioned_config and partition_key:
run_config = ephemeral_job.partitioned_config.get_run_config_for_partition_key(
partition_key
)
return core_execute_in_process(
job=wrapped_job,
run_config=run_config,
instance=instance,
output_capturing_enabled=True,
raise_on_error=raise_on_error,
run_tags=merge_run_tags(
job_def=self,
partition_key=partition_key,
tags=tags,
asset_selection=asset_selection,
instance=instance,
run_config=run_config,
),
run_id=run_id,
asset_selection=frozenset(asset_selection),
)
def as_ephemeral_job(
self,
resource_defs: Mapping[str, ResourceDefinition],
input_values: Mapping[str, object],
op_selection: Optional[Sequence[str]] = None,
asset_selection: Optional[Sequence[AssetKey]] = None,
) -> "JobDefinition":
from dagster._core.definitions.executor_definition import execute_in_process_executor
bound_resource_defs = dict(self.resource_defs)
return JobDefinition.dagster_internal_init(
name=self._name,
graph_def=self._graph_def,
resource_defs={**_swap_default_io_man(bound_resource_defs, self), **resource_defs},
executor_def=execute_in_process_executor,
logger_defs=self._loggers,
hook_defs=self.hook_defs,
config=self.config_mapping or self.partitioned_config or self.run_config,
tags=self.tags,
run_tags=self._run_tags,
op_retry_policy=self._op_retry_policy,
asset_layer=self.asset_layer,
input_values=merge_dicts(self.input_values, input_values),
description=self.description,
partitions_def=self.partitions_def,
metadata=self.metadata,
_subset_selection_data=None, # this is added below
_was_explicitly_provided_resources=True,
owners=self._owners,
).get_subset(
op_selection=op_selection,
asset_selection=frozenset(asset_selection) if asset_selection else None,
)
def _get_partitions_def(
self, selected_asset_keys: Optional[Iterable[AssetKey]]
) -> PartitionsDefinition:
if self.partitions_def:
return self.partitions_def
elif self.asset_layer:
if selected_asset_keys:
resolved_selected_asset_keys = selected_asset_keys
elif self.asset_selection:
resolved_selected_asset_keys = self.asset_selection
else:
resolved_selected_asset_keys = self.asset_layer.selected_asset_keys
unique_partitions_defs: set[PartitionsDefinition] = set()
for asset_key in resolved_selected_asset_keys:
partitions_def = self.asset_layer.get(asset_key).partitions_def
if partitions_def is not None:
unique_partitions_defs.add(partitions_def)
if len(unique_partitions_defs) == 1:
return check.not_none(next(iter(unique_partitions_defs)))
if selected_asset_keys is not None:
check.failed("There is no PartitionsDefinition shared by all the provided assets")
else:
check.failed("Job has no PartitionsDefinition")
def get_partition_keys(
self, selected_asset_keys: Optional[Iterable[AssetKey]]
) -> Sequence[str]:
partitions_def = self._get_partitions_def(selected_asset_keys)
return partitions_def.get_partition_keys()
def validate_partition_key(
self,
partition_key: str,
selected_asset_keys: Optional[Iterable[AssetKey]],
context: PartitionLoadingContext,
) -> None:
"""Ensures that the given partition_key is a member of the PartitionsDefinition
corresponding to every asset in the selection.
"""
partitions_def = self._get_partitions_def(selected_asset_keys)
partitions_def.validate_partition_key(partition_key, context=context)
def get_tags_for_partition_key(
self, partition_key: str, selected_asset_keys: Optional[Iterable[AssetKey]]
) -> Mapping[str, str]:
"""Gets tags for the given partition key."""
if self.partitioned_config is not None:
return self.partitioned_config.get_tags_for_partition_key(partition_key, self.name)
partitions_def = self._get_partitions_def(selected_asset_keys)
return partitions_def.get_tags_for_partition_key(partition_key)
def get_run_config_for_partition_key(self, partition_key: str) -> Mapping[str, Any]:
if self.partitioned_config:
return self.partitioned_config.get_run_config_for_partition_key(partition_key)
else:
return {}
@property
def op_selection_data(self) -> Optional[OpSelectionData]:
return (
self._subset_selection_data
if isinstance(self._subset_selection_data, OpSelectionData)
else None
)
@property
def asset_selection_data(self) -> Optional[AssetSelectionData]:
return (
self._subset_selection_data
if isinstance(self._subset_selection_data, AssetSelectionData)
else None
)
@property
def is_subset(self) -> bool:
return bool(self._subset_selection_data)
def get_subset(
self,
*,
op_selection: Optional[Iterable[str]] = None,
asset_selection: Optional[AbstractSet[AssetKey]] = None,
asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None,
) -> "JobDefinition":
check.invariant(
not (op_selection and (asset_selection or asset_check_selection)),
"op_selection cannot be provided with asset_selection or asset_check_selection to"
" execute_in_process",
)
if op_selection:
return self._get_job_def_for_op_selection(op_selection)
if asset_selection or asset_check_selection:
return self._get_job_def_for_asset_selection(
AssetSelectionData(
asset_selection=asset_selection or set(),
asset_check_selection=asset_check_selection,
parent_job_def=self,
)
)
else:
return self
def _get_job_def_for_asset_selection(
self, selection_data: AssetSelectionData
) -> "JobDefinition":
from dagster._core.definitions.assets.job.asset_job import (
build_asset_job,
get_asset_graph_for_job,
)
# If a non-null check selection is provided, use that. Otherwise the selection will resolve
# to all checks matching a selected asset by default.
selection = AssetSelection.assets(*selection_data.asset_selection)
if selection_data.asset_check_selection is not None:
selection = selection.without_checks() | AssetSelection.checks(
*selection_data.asset_check_selection
)
job_asset_graph = get_asset_graph_for_job(
self.asset_layer.asset_graph.source_asset_graph,
selection,
allow_different_partitions_defs=True,
)
return build_asset_job(
name=self.name,
asset_graph=job_asset_graph,
executor_def=self.executor_def,
resource_defs=self.resource_defs,
description=self.description,
tags=self.tags,
config=self.config_mapping or self.partitioned_config,
_asset_selection_data=selection_data,
allow_different_partitions_defs=True,
)
def _get_job_def_for_op_selection(self, op_selection: Iterable[str]) -> "JobDefinition":
try:
sub_graph = get_graph_subset(self.graph, op_selection, selected_outputs_by_op_handle={})
# if explicit config was passed the config_mapping that resolves the defaults implicitly is
# very unlikely to work. The job will still present the default config in the Dagster UI.
config = (
None
if self.run_config is not None
else self.config_mapping or self.partitioned_config
)
return self._copy(
config=config,
graph_def=sub_graph,
_subset_selection_data=OpSelectionData(
op_selection=list(op_selection),
resolved_op_selection=OpSelection(op_selection).resolve(self.graph),
parent_job_def=self, # used by job snapshot lineage
),
# TODO: subset this structure.
# https://github.com/dagster-io/dagster/issues/7541
asset_layer=self.asset_layer,
)
except DagsterInvalidDefinitionError as exc:
# This handles the case when you construct a subset such that an unsatisfied
# input cannot be loaded from config. Instead of throwing a DagsterInvalidDefinitionError,
# we re-raise a DagsterInvalidSubsetError.
node_paths = OpSelection(op_selection).resolve(self.graph)
raise DagsterInvalidSubsetError(
f"The attempted subset {str_format_set(node_paths)} for graph "
f"{self.graph.name} results in an invalid graph."
) from exc
@public
@deprecated(
breaking_version="2.0.0",
additional_warn_text="Directly instantiate `RunRequest(partition_key=...)` instead.",
)
def run_request_for_partition(
self,
partition_key: str,
run_key: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
asset_selection: Optional[Sequence[AssetKey]] = None,
run_config: Optional[Mapping[str, Any]] = None,
current_time: Optional[datetime] = None,
dynamic_partitions_store: Optional["DynamicPartitionsStore"] = None,
) -> "RunRequest":
"""Creates a RunRequest object for a run that processes the given partition.
Args:
partition_key: The key of the partition to request a run for.
run_key (Optional[str]): A string key to identify this launched run. For sensors, ensures that
only one run is created per run key across all sensor evaluations. For schedules,
ensures that one run is created per tick, across failure recoveries. Passing in a `None`
value means that a run will always be launched per evaluation.
tags (Optional[Dict[str, str]]): A dictionary of tags (string key-value pairs) to attach
to the launched run.
run_config (Optional[Mapping[str, Any]]: Configuration for the run. If the job has
a :py:class:`PartitionedConfig`, this value will override replace the config
provided by it.
current_time (Optional[datetime]): Used to determine which time-partitions exist.
Defaults to now.
dynamic_partitions_store (Optional[DynamicPartitionsStore]): The DynamicPartitionsStore
object that is responsible for fetching dynamic partitions. Required when the
partitions definition is a DynamicPartitionsDefinition with a name defined. Users
can pass the DagsterInstance fetched via `context.instance` to this argument.
Returns:
RunRequest: an object that requests a run to process the given partition.
"""
from dagster._core.definitions.run_request import RunRequest
if not (self.partitions_def and self.partitioned_config):
check.failed("Called run_request_for_partition on a non-partitioned job")
if (
isinstance(self.partitions_def, DynamicPartitionsDefinition)
and self.partitions_def.name
):
# Do not support using run_request_for_partition with dynamic partitions,
# since this requires querying the instance once per run request for the
# existent dynamic partitions
check.failed(
"run_request_for_partition is not supported for dynamic partitions. Instead, use"
" RunRequest(partition_key=...)"
)
with partition_loading_context(current_time, dynamic_partitions_store) as ctx:
self.partitions_def.validate_partition_key(partition_key, context=ctx)
run_config = (
run_config
if run_config is not None
else self.partitioned_config.get_run_config_for_partition_key(partition_key)
)
run_request_tags = {
**(tags or {}),
**self.partitioned_config.get_tags_for_partition_key(
partition_key,
job_name=self.name,
),
}
return RunRequest(
run_key=run_key,
run_config=run_config,
tags=run_request_tags,
job_name=self.name,
asset_selection=asset_selection,
partition_key=partition_key,
)
def get_config_schema_snapshot(self) -> "ConfigSchemaSnapshot":
return self.get_job_snapshot().config_schema_snapshot
def get_job_snapshot(self) -> "JobSnap":
return self.get_job_index().job_snapshot
@cached_method
def get_job_index(self) -> "JobIndex":
from dagster._core.remote_representation.job_index import JobIndex
from dagster._core.snap import JobSnap
return JobIndex(JobSnap.from_job_def(self), self.get_parent_job_snapshot())
def get_job_snapshot_id(self) -> str:
return self.get_job_index().job_snapshot_id
def get_parent_job_snapshot(self) -> Optional["JobSnap"]:
if self.op_selection_data:
return self.op_selection_data.parent_job_def.get_job_snapshot()
elif self.asset_selection_data:
return self.asset_selection_data.parent_job_def.get_job_snapshot()
else:
return None
def has_direct_input_value(self, input_name: str) -> bool:
return input_name in self.input_values
def get_direct_input_value(self, input_name: str) -> object:
if input_name not in self.input_values:
raise DagsterInvalidInvocationError(
f"On job '{self.name}', attempted to retrieve input value for input named"
f" '{input_name}', but no value was provided. Provided input values:"
f" {sorted(list(self.input_values.keys()))}"
)
return self.input_values[input_name]
def _copy(self, **kwargs: Any) -> "JobDefinition":
# dict() calls copy dict props
base_kwargs = dict(
graph_def=self.graph,
resource_defs=dict(self.resource_defs),
executor_def=self._executor_def,
logger_defs=self._loggers,
config=self._original_config_argument,
name=self._name,
description=self.description,
tags=self.tags,
run_tags=self._run_tags,
metadata=self._metadata,
hook_defs=self.hook_defs,
op_retry_policy=self._op_retry_policy,
_subset_selection_data=self._subset_selection_data,
asset_layer=self.asset_layer,
input_values=self.input_values,
partitions_def=self._original_partitions_def_argument,
_was_explicitly_provided_resources=(
"resource_defs" in kwargs or self._was_provided_resources
),
owners=self._owners,
)
resolved_kwargs = {**base_kwargs, **kwargs} # base kwargs overwritten for conflicts
job_def = JobDefinition.dagster_internal_init(**resolved_kwargs)
update_wrapper(job_def, self, updated=())
return job_def
@public
def with_top_level_resources(
self, resource_defs: Mapping[str, ResourceDefinition]
) -> "JobDefinition":
"""Apply a set of resources to all op instances within the job."""
resource_defs = check.mapping_param(resource_defs, "resource_defs", key_type=str)
return self._copy(resource_defs=resource_defs)
@public
def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> "JobDefinition":
"""Apply a set of hooks to all op instances within the job."""
hook_defs = check.set_param(hook_defs, "hook_defs", of_type=HookDefinition)
return self._copy(hook_defs=(hook_defs | self.hook_defs))
def with_executor_def(self, executor_def: ExecutorDefinition) -> "JobDefinition":
return self._copy(executor_def=executor_def)
def with_logger_defs(self, logger_defs: Mapping[str, LoggerDefinition]) -> "JobDefinition":
return self._copy(logger_defs=logger_defs)
def with_metadata(self, metadata: Mapping[str, RawMetadataValue]) -> "JobDefinition":
return self._copy(metadata=normalize_metadata(metadata))
@property
def op_selection(self) -> Optional[AbstractSet[str]]:
return set(self.op_selection_data.op_selection) if self.op_selection_data else None
@property
def asset_selection(self) -> Optional[AbstractSet[AssetKey]]:
return self.asset_selection_data.asset_selection if self.asset_selection_data else None
@property
def asset_check_selection(self) -> Optional[AbstractSet[AssetCheckKey]]:
return (
self.asset_selection_data.asset_check_selection if self.asset_selection_data else None
)
@property
def resolved_op_selection(self) -> Optional[AbstractSet[str]]:
return self.op_selection_data.resolved_op_selection if self.op_selection_data else None
def _swap_default_io_man(resources: Mapping[str, ResourceDefinition], job: JobDefinition):
"""Used to create the user facing experience of the default io_manager
switching to in-memory when using execute_in_process.
"""
from dagster._core.storage.mem_io_manager import mem_io_manager
if resources.get(DEFAULT_IO_MANAGER_KEY) in [default_job_io_manager]:
updated_resources = dict(resources)
updated_resources[DEFAULT_IO_MANAGER_KEY] = mem_io_manager
return updated_resources
return resources
@dagster_maintained_io_manager
@io_manager(
description="Built-in filesystem IO manager that stores and retrieves values using pickling."
)
def default_job_io_manager(init_context: "InitResourceContext"):
# support overriding the default io manager via environment variables
module_name = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_MODULE")
attribute_name = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE")
silence_failures = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_SILENCE_FAILURES")
if module_name and attribute_name:
from dagster._core.execution.build_resources import build_resources
try:
module = importlib.import_module(module_name)
attr = getattr(module, attribute_name)
check.invariant(
isinstance(attr, IOManagerDefinition),
"DAGSTER_DEFAULT_IO_MANAGER_MODULE and DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE"
" must specify an IOManagerDefinition",
)
with build_resources({"io_manager": attr}, instance=init_context.instance) as resources:
return resources.io_manager
except Exception as e:
if not silence_failures:
raise
else:
warnings.warn(
f"Failed to load io manager override with module: {module_name} attribute:"
f" {attribute_name}: {e}\nFalling back to default io manager."
)
# normally, default to the fs_io_manager
from dagster._core.storage.fs_io_manager import PickledObjectFilesystemIOManager
instance = check.not_none(init_context.instance)
return PickledObjectFilesystemIOManager(base_dir=instance.storage_directory())
@dagster_maintained_io_manager
@io_manager(
description="Built-in filesystem IO manager that stores and retrieves values using pickling.",
config_schema={"base_dir": Field(StringSource, is_required=False)},
)
def default_job_io_manager_with_fs_io_manager_schema(init_context: "InitResourceContext"):
# support overriding the default io manager via environment variables
module_name = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_MODULE")
attribute_name = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE")
silence_failures = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_SILENCE_FAILURES")
if module_name and attribute_name:
from dagster._core.execution.build_resources import build_resources
try:
module = importlib.import_module(module_name)
attr = getattr(module, attribute_name)
check.invariant(
isinstance(attr, IOManagerDefinition),
"DAGSTER_DEFAULT_IO_MANAGER_MODULE and DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE"
" must specify an IOManagerDefinition",
)
with build_resources({"io_manager": attr}, instance=init_context.instance) as resources:
return resources.io_manager
except Exception as e:
if not silence_failures:
raise
else:
warnings.warn(
f"Failed to load io manager override with module: {module_name} attribute:"
f" {attribute_name}: {e}\nFalling back to default io manager."
)
from dagster._core.storage.fs_io_manager import PickledObjectFilesystemIOManager
# normally, default to the fs_io_manager
base_dir = init_context.resource_config.get(
"base_dir", init_context.instance.storage_directory() if init_context.instance else None
)
return PickledObjectFilesystemIOManager(base_dir=base_dir)
def _config_mapping_with_default_value(
inner_schema: ConfigType,
default_config: Mapping[str, Any],
job_name: str,
) -> ConfigMapping:
if not isinstance(inner_schema, Shape):
check.failed("Only Shape (dictionary) config_schema allowed on Job ConfigMapping")
def config_fn(x):
return x
updated_fields = {}
field_aliases = inner_schema.field_aliases
for name, field in inner_schema.fields.items():
if name in default_config:
updated_fields[name] = Field(
config=field.config_type,
default_value=default_config[name],
description=field.description,
)
elif name in field_aliases and field_aliases[name] in default_config:
updated_fields[name] = Field(
config=field.config_type,
default_value=default_config[field_aliases[name]],
description=field.description,
)
else:
updated_fields[name] = field
config_schema = Shape(
fields=updated_fields,
description=(
"This run config schema was automatically populated with default values "
"from `default_config`."
),
field_aliases=inner_schema.field_aliases,
)
config_evr = validate_config(config_schema, default_config)
if not config_evr.success:
raise DagsterInvalidConfigError(
f"Error in config when building job '{job_name}' ",
config_evr.errors,
default_config,
)
return ConfigMapping(
config_fn=config_fn, config_schema=config_schema, receive_processed_config_values=False
)
def get_run_config_schema_for_job(
graph_def: GraphDefinition,
resource_defs: Mapping[str, ResourceDefinition],
executor_def: "ExecutorDefinition",
logger_defs: Mapping[str, LoggerDefinition],
asset_layer: Optional[AssetLayer],
was_explicitly_provided_resources: bool = False,
) -> ConfigType:
return JobDefinition(
name=graph_def.name,
graph_def=graph_def,
resource_defs=resource_defs,
executor_def=executor_def,
logger_defs=logger_defs,
asset_layer=asset_layer,
_was_explicitly_provided_resources=was_explicitly_provided_resources,
).run_config_schema.run_config_schema_type
def _infer_asset_layer_from_source_asset_deps(job_graph_def: GraphDefinition) -> AssetLayer:
"""For non-asset jobs that have some inputs that are fed from assets, constructs an
AssetLayer that includes these assets as loadables.
"""
from dagster._core.definitions.assets.graph.asset_graph import AssetGraph
keys_by_input_handle: dict[NodeInputHandle, AssetKey] = {}
assets_defs_by_key: dict[AssetKey, AssetsDefinition] = {}
# each entry is a graph definition and its handle relative to the job root
stack: list[tuple[GraphDefinition, Optional[NodeHandle]]] = [(job_graph_def, None)]
while stack:
graph_def, parent_node_handle = stack.pop()
# iterate through the input_assets mapping on the graph definition, which
# maps from node name to the set of assets definitions associated with each
# input of that node
for node_name, input_assets in graph_def.input_assets.items():
node_handle = NodeHandle(node_name, parent_node_handle)
for input_name, assets_def in input_assets.items():
key = assets_def.key
assets_defs_by_key[key] = assets_def
# we know what key is associated with the outer input handle, so we
# store that in the mapping and then calculate which inner input handles
# this outer input handle is connected to, storing those as well
outer_input_handle = NodeInputHandle(node_handle=node_handle, input_name=input_name)
keys_by_input_handle[outer_input_handle] = key
inner_node_def = graph_def.node_dict[node_name].definition
for inner_input_handle in inner_node_def.resolve_input_to_destinations(
outer_input_handle
):
keys_by_input_handle[inner_input_handle] = key
# add all subgraphs to the stack
for node_def in graph_def.node_defs:
if isinstance(node_def, GraphDefinition):
stack.append((node_def, NodeHandle(node_def.name, parent_node_handle)))
return AssetLayer(
asset_graph=AssetGraph.from_assets(list(assets_defs_by_key.values())),
# the AssetsDefinitions we have do not have any NodeDefinition explicitly
# associated with them (they are not part of the actual execution, they're
# just markers), so we don't pass them in through the data field and instead
# pass this mapping information directly
data=[],
mapped_source_asset_keys_by_input_handle=keys_by_input_handle,
)
def _build_all_node_defs(node_defs: Sequence[NodeDefinition]) -> Mapping[str, NodeDefinition]:
all_defs: dict[str, NodeDefinition] = {}
for current_level_node_def in node_defs:
for node_def in current_level_node_def.iterate_node_defs():
if node_def.name in all_defs:
if all_defs[node_def.name] != node_def:
raise DagsterInvalidDefinitionError(
f'Detected conflicting node definitions with the same name "{node_def.name}"'
)
else:
all_defs[node_def.name] = node_def
return all_defs
def _create_run_config_schema(
job_def: JobDefinition,
) -> "RunConfigSchema":
from dagster._core.definitions.run_config import (
RunConfigSchemaCreationData,
construct_config_type_dictionary,
define_run_config_schema_type,
)
from dagster._core.definitions.run_config_schema import RunConfigSchema
from dagster._core.remote_representation.code_location import is_implicit_asset_job_name
# When executing with a subset job that is not an implicit asset job, include the missing nodes
# from the original job as ignored to allow execution with
# run config that is valid for the original
ignored_nodes: Sequence[Node] = []
if job_def.is_subset and is_implicit_asset_job_name(job_def.name):
included_resource_defs = job_def.get_required_resource_defs()
else:
included_resource_defs = job_def.resource_defs
if job_def.is_subset and not is_implicit_asset_job_name(job_def.name):
if isinstance(job_def.graph, SubselectedGraphDefinition): # op selection provided
ignored_nodes = job_def.graph.get_top_level_omitted_nodes()
elif job_def.asset_selection_data:
parent_job = job_def
while parent_job.asset_selection_data:
parent_job = parent_job.asset_selection_data.parent_job_def
ignored_nodes = [
node for node in parent_job.graph.nodes if not job_def.has_node_named(node.name)
]
else:
ignored_nodes = []
run_config_schema_type = define_run_config_schema_type(
RunConfigSchemaCreationData(
job_name=job_def.name,
nodes=job_def.graph.nodes,
graph_def=job_def.graph,
dependency_structure=job_def.graph.dependency_structure,
executor_def=job_def.executor_def,
resource_defs=included_resource_defs,
logger_defs=job_def.loggers,
ignored_nodes=ignored_nodes,
required_resources=job_def.required_resource_keys,
direct_inputs=job_def.input_values,
asset_layer=job_def.asset_layer,
)
)
if job_def.config_mapping:
outer_config_type = job_def.config_mapping.config_schema.config_type
else:
outer_config_type = run_config_schema_type
if outer_config_type is None:
check.failed("Unexpected outer_config_type value of None")
config_type_dict_by_name, config_type_dict_by_key = construct_config_type_dictionary(
job_def.all_node_defs,
outer_config_type,
)
return RunConfigSchema(
run_config_schema_type=run_config_schema_type,
config_type_dict_by_name=config_type_dict_by_name,
config_type_dict_by_key=config_type_dict_by_key,
config_mapping=job_def.config_mapping,
)
| JobDefinition |
python | dagster-io__dagster | python_modules/libraries/dagster-prometheus/dagster_prometheus/resources.py | {
"start": 368,
"end": 472
} | class ____:
"""Integrates with Prometheus via the prometheus_client library."""
@beta
| PrometheusClient |
python | openai__openai-python | src/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py | {
"start": 620,
"end": 1025
} | class ____(BaseModel):
input: Optional[str] = None
"""The input to the Code Interpreter tool call."""
outputs: Optional[List[CodeInterpreterOutput]] = None
"""The outputs from the Code Interpreter tool call.
Code Interpreter can output one or more items, including text (`logs`) or images
(`image`). Each of these are represented by a different object type.
"""
| CodeInterpreter |
python | ray-project__ray | python/ray/data/tests/test_resource_manager.py | {
"start": 3117,
"end": 15850
} | class ____:
"""Unit tests for ResourceManager."""
def test_global_limits(self):
cluster_resources = {"CPU": 10, "GPU": 5, "object_store_memory": 1000}
default_object_store_memory_limit = math.ceil(
cluster_resources["object_store_memory"]
* ResourceManager.DEFAULT_OBJECT_STORE_MEMORY_LIMIT_FRACTION
)
def get_total_resources():
return ExecutionResources.from_resource_dict(cluster_resources)
# Test default resource limits.
# When no resource limits are set, the resource limits should default to
# the cluster resources for CPU/GPU, and
# DEFAULT_OBJECT_STORE_MEMORY_LIMIT_FRACTION of cluster object store memory.
options = ExecutionOptions()
resource_manager = ResourceManager(
MagicMock(), options, get_total_resources, DataContext.get_current()
)
expected = ExecutionResources(
cpu=cluster_resources["CPU"],
gpu=cluster_resources["GPU"],
object_store_memory=default_object_store_memory_limit,
)
assert resource_manager.get_global_limits() == expected
# Test setting resource_limits
options = ExecutionOptions()
options.resource_limits = ExecutionResources(
cpu=1, gpu=2, object_store_memory=100
)
resource_manager = ResourceManager(
MagicMock(), options, get_total_resources, DataContext.get_current()
)
expected = ExecutionResources(
cpu=1,
gpu=2,
object_store_memory=100,
)
assert resource_manager.get_global_limits() == expected
# Test setting exclude_resources
# The actual limit should be the default limit minus the excluded resources.
options = ExecutionOptions()
options.exclude_resources = ExecutionResources(
cpu=1, gpu=2, object_store_memory=100
)
resource_manager = ResourceManager(
MagicMock(), options, get_total_resources, DataContext.get_current()
)
expected = ExecutionResources(
cpu=cluster_resources["CPU"] - 1,
gpu=cluster_resources["GPU"] - 2,
object_store_memory=default_object_store_memory_limit - 100,
)
assert resource_manager.get_global_limits() == expected
# Test that we don't support setting both resource_limits
# and exclude_resources.
with pytest.raises(ValueError):
options = ExecutionOptions()
options.resource_limits = ExecutionResources(cpu=2)
options.exclude_resources = ExecutionResources(cpu=1)
options.validate()
def test_global_limits_cache(self):
get_total_resources = MagicMock(return_value=ExecutionResources(4, 1, 0))
cache_interval_s = 0.1
with patch.object(
ResourceManager,
"GLOBAL_LIMITS_UPDATE_INTERVAL_S",
cache_interval_s,
):
resource_manager = ResourceManager(
MagicMock(),
ExecutionOptions(),
get_total_resources,
DataContext.get_current(),
)
expected_resource = ExecutionResources(4, 1, 0)
# The first call should call ray.cluster_resources().
assert resource_manager.get_global_limits() == expected_resource
assert get_total_resources.call_count == 1
# The second call should return the cached value.
assert resource_manager.get_global_limits() == expected_resource
assert get_total_resources.call_count == 1
time.sleep(cache_interval_s)
# After the cache interval, the third call should call
# ray.cluster_resources() again.
assert resource_manager.get_global_limits() == expected_resource
assert get_total_resources.call_count == 2
def test_update_usage(self):
"""Test calculating op_usage."""
o1 = InputDataBuffer(DataContext.get_current(), [])
o2 = mock_map_op(o1)
o3 = mock_map_op(o2)
topo = build_streaming_topology(o3, ExecutionOptions())
# Mock different metrics that contribute to the resource usage.
mock_cpu = {
o1: 0,
o2: 5,
o3: 8,
}
mock_pending_task_outputs = {
o1: 0,
o2: 100,
o3: 200,
}
mock_internal_outqueue = {
o1: 0,
o2: 300,
o3: 400,
}
mock_external_outqueue_sizes = {
o1: 100,
o2: 500,
o3: 600,
}
mock_internal_inqueue = {
o1: 0,
o2: 700,
o3: 800,
}
mock_pending_task_inputs = {
o1: 0,
o2: 900,
o3: 1000,
}
for op in [o1, o2, o3]:
op.update_resource_usage = MagicMock()
op.current_processor_usage = MagicMock(
return_value=ExecutionResources(cpu=mock_cpu[op], gpu=0)
)
op.running_processor_usage = MagicMock(
return_value=ExecutionResources(cpu=mock_cpu[op], gpu=0)
)
op.pending_processor_usage = MagicMock(
return_value=ExecutionResources.zero()
)
op.extra_resource_usage = MagicMock(return_value=ExecutionResources.zero())
op._metrics = MagicMock(
obj_store_mem_pending_task_outputs=mock_pending_task_outputs[op],
obj_store_mem_internal_outqueue=mock_internal_outqueue[op],
obj_store_mem_internal_inqueue=mock_internal_inqueue[op],
obj_store_mem_pending_task_inputs=mock_pending_task_inputs[op],
)
ref_bundle = MagicMock(
size_bytes=MagicMock(return_value=mock_external_outqueue_sizes[op])
)
topo[op].add_output(ref_bundle)
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager._op_resource_allocator = None
resource_manager.update_usages()
global_cpu = 0
global_mem = 0
for op in [o1, o2, o3]:
if op == o1:
# Resource usage of InputDataBuffer doesn't count.
expected_mem = 0
else:
expected_mem = (
mock_pending_task_outputs[op]
+ mock_internal_outqueue[op]
+ mock_external_outqueue_sizes[op]
)
for next_op in op.output_dependencies:
expected_mem += (
+mock_internal_inqueue[next_op]
+ mock_pending_task_inputs[next_op]
)
op_usage = resource_manager.get_op_usage(op)
assert op_usage.cpu == mock_cpu[op]
assert op_usage.gpu == 0
assert op_usage.object_store_memory == expected_mem
if op != o1:
assert (
resource_manager._mem_op_internal[op]
== mock_pending_task_outputs[op] + mock_internal_outqueue[op]
)
assert (
resource_manager._mem_op_outputs[op]
== expected_mem - resource_manager._mem_op_internal[op]
)
global_cpu += mock_cpu[op]
global_mem += expected_mem
assert resource_manager.get_global_usage() == ExecutionResources(
global_cpu, 0, global_mem
)
def test_object_store_usage(self, restore_data_context):
input = make_ref_bundles([[x] for x in range(1)])[0]
input.size_bytes = MagicMock(return_value=1)
o1 = InputDataBuffer(DataContext.get_current(), [input])
o2 = mock_map_op(o1)
o3 = mock_map_op(o2)
topo = build_streaming_topology(o3, ExecutionOptions())
resource_manager = ResourceManager(
topo,
ExecutionOptions(),
MagicMock(return_value=ExecutionResources.zero()),
DataContext.get_current(),
)
ray.data.DataContext.get_current()._max_num_blocks_in_streaming_gen_buffer = 1
ray.data.DataContext.get_current().target_max_block_size = 2
resource_manager.update_usages()
assert resource_manager.get_op_usage(o1).object_store_memory == 0
assert resource_manager.get_op_usage(o2).object_store_memory == 0
assert resource_manager.get_op_usage(o3).object_store_memory == 0
# Objects in an operator's internal inqueue typically count toward the previous
# operator's object store memory usage. However, data from an
# `InputDataBuffer` aren't counted because they were created outside of this
# execution.
o2.metrics.on_input_queued(input)
resource_manager.update_usages()
assert resource_manager.get_op_usage(o1).object_store_memory == 0
assert resource_manager.get_op_usage(o2).object_store_memory == 0
assert resource_manager.get_op_usage(o3).object_store_memory == 0
# Operators estimate pending task outputs using the target max block size
# multiplied by MAX_SAFE_BLOCK_SIZE_FACTOR (1.5) during no-sample phase.
# In this case, the target max block size is 2, MAX_SAFE_BLOCK_SIZE_FACTOR
# is 1.5, and there is at most 1 block in the streaming generator buffer,
# so the estimated usage is 2 * 1.5 * 1 = 3.
o2.metrics.on_input_dequeued(input)
o2.metrics.on_task_submitted(0, input)
resource_manager.update_usages()
# target_max_block_size * factor * max_blocks
expected_usage = 2 * MAX_SAFE_BLOCK_SIZE_FACTOR * 1
assert resource_manager.get_op_usage(o1).object_store_memory == 0
op2_usage = resource_manager.get_op_usage(o2).object_store_memory
assert op2_usage == expected_usage
assert resource_manager.get_op_usage(o3).object_store_memory == 0
# When the task finishes, we move the data from the streaming generator to the
# operator's internal outqueue.
o2.metrics.on_output_queued(input)
o2.metrics.on_task_finished(0, None)
resource_manager.update_usages()
assert resource_manager.get_op_usage(o1).object_store_memory == 0
assert resource_manager.get_op_usage(o2).object_store_memory == 1
assert resource_manager.get_op_usage(o3).object_store_memory == 0
o2.metrics.on_output_dequeued(input)
topo[o2].output_queue.append(input)
resource_manager.update_usages()
assert resource_manager.get_op_usage(o1).object_store_memory == 0
assert resource_manager.get_op_usage(o2).object_store_memory == 1
assert resource_manager.get_op_usage(o3).object_store_memory == 0
# Objects in the current operator's internal inqueue count towards the previous
# operator's object store memory usage.
o3.metrics.on_input_queued(topo[o2].output_queue.pop())
resource_manager.update_usages()
assert resource_manager.get_op_usage(o1).object_store_memory == 0
assert resource_manager.get_op_usage(o2).object_store_memory == 1
assert resource_manager.get_op_usage(o3).object_store_memory == 0
# Task inputs count toward the previous operator's object store memory
# usage, and task outputs count toward the current operator's object
# store memory usage. During no-sample phase, pending outputs are
# estimated using target_max_block_size * MAX_SAFE_BLOCK_SIZE_FACTOR.
o3.metrics.on_input_dequeued(input)
o3.metrics.on_task_submitted(0, input)
resource_manager.update_usages()
assert resource_manager.get_op_usage(o1).object_store_memory == 0
assert resource_manager.get_op_usage(o2).object_store_memory == 1
# target_max_block_size (2) * factor (1.5) * max_blocks (1) = 3
expected_o3_usage = 2 * MAX_SAFE_BLOCK_SIZE_FACTOR * 1
op3_usage = resource_manager.get_op_usage(o3).object_store_memory
assert op3_usage == expected_o3_usage
# Task inputs no longer count once the task is finished.
o3.metrics.on_output_queued(input)
o3.metrics.on_task_finished(0, None)
resource_manager.update_usages()
assert resource_manager.get_op_usage(o1).object_store_memory == 0
assert resource_manager.get_op_usage(o2).object_store_memory == 0
assert resource_manager.get_op_usage(o3).object_store_memory == 1
| TestResourceManager |
python | pytorch__pytorch | torch/_dynamo/variables/user_defined.py | {
"start": 90475,
"end": 91882
} | class ____(UserDefinedObjectVariable):
def __init__(self, value, **kwargs):
super().__init__(value, **kwargs)
self.generic_dict_vt = variables.ConstDictVariable({})
def var_getattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker":
# A common pattern in the init code of MutableMapping objects is to
# update the __dict__ attribute. To prevent graph break, we directly
# return a ConstDictVariable for the __dict__attr.
#
# However, users can try to add a new attribute to the class using the
# __dict__ attribute. To catch this, we save the ConstDictVariable for
# the __dict__ and then lookup into this vt for each attr lookup.
if name == "get" and type(self.value).get in (
collections.abc.Mapping.get,
dict.get,
):
return variables.UserMethodVariable(polyfills.mapping_get, self)
elif name == "__dict__" and self.source:
self.generic_dict_vt = variables.LazyVariableTracker.create(
self.value.__dict__, AttrSource(self.source, "__dict__")
)
return self.generic_dict_vt
elif out := self.generic_dict_vt.maybe_getitem_const(
variables.ConstantVariable(name)
):
return out
else:
return super().var_getattr(tx, name)
| MutableMappingVariable |
python | walkccc__LeetCode | solutions/635. Design Log Storage System/635.py | {
"start": 0,
"end": 579
} | class ____:
def __init__(self):
self.granularityToIndices = {'Year': 4, 'Month': 7, 'Day': 10,
'Hour': 13, 'Minute': 16, 'Second': 19}
self.idAndTimestamps = []
def put(self, id: int, timestamp: str) -> None:
self.idAndTimestamps.append((id, timestamp))
def retrieve(self, start: str, end: str, granularity: str) -> list[int]:
index = self.granularityToIndices[granularity]
s = start[:index]
e = end[:index]
return [id for id, timestamp in self.idAndTimestamps
if s <= timestamp[:index] <= e]
| LogSystem |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 335425,
"end": 336076
} | class ____(_ForInStatNode):
# 'async for' statement
#
# iterator AIterAwaitExprNode(AsyncIteratorNode)
# item AwaitIterNextExprNode(AsyncIteratorNode)
is_async = True
def __init__(self, pos, **kw):
assert 'item' not in kw
from . import ExprNodes
# AwaitExprNodes must appear before running MarkClosureVisitor
kw['item'] = ExprNodes.AwaitIterNextExprNode(kw['iterator'].pos, arg=None)
_ForInStatNode.__init__(self, pos, **kw)
def _create_item_node(self):
from . import ExprNodes
self.item.arg = ExprNodes.AsyncNextNode(self.iterator)
| AsyncForStatNode |
python | getsentry__sentry | tests/sentry/mail/test_adapter.py | {
"start": 56673,
"end": 57094
} | class ____(BaseMailAdapterTest):
def test_get_digest_subject(self) -> None:
assert (
get_digest_subject(
mock.Mock(qualified_short_id="BAR-1"),
Counter({mock.sentinel.group: 3}),
datetime(2016, 9, 19, 1, 2, 3, tzinfo=UTC),
)
== "BAR-1 - 1 new alert since Sept. 19, 2016, 1:02 a.m. UTC"
)
| MailAdapterGetDigestSubjectTest |
python | Textualize__textual | src/textual/widgets/_data_table.py | {
"start": 1860,
"end": 2069
} | class ____(Exception):
"""The cell key/index was invalid.
Raised when the coordinates or cell key provided does not exist
in the DataTable (e.g. out of bounds index, invalid key)"""
| CellDoesNotExist |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.