language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | astropy__astropy | astropy/uncertainty/tests/test_functions.py | {
"start": 489,
"end": 901
} | class ____:
@classmethod
def setup_class(cls):
cls.a = (
np.array([[[0.0]], [[10.0]]])
+ np.array([[0.0], [1.0], [2.0]])
+ np.arange(4.0) / 10.0
)
cls.b = -(np.arange(3.0, 6.0)[:, np.newaxis] + np.arange(4.0) / 10.0)
cls.da = Distribution(cls.a)
cls.db = Distribution(cls.b)
cls.c = np.array([[200.0], [300.0]])
| ArraySetup |
python | django__django | tests/generic_relations_regress/models.py | {
"start": 4665,
"end": 4890
} | class ____(models.Model):
pass
def prevent_deletes(sender, instance, **kwargs):
raise models.ProtectedError("Not allowed to delete.", [instance])
models.signals.pre_delete.connect(prevent_deletes, sender=Node)
| Related |
python | sqlalchemy__sqlalchemy | test/sql/test_compiler.py | {
"start": 281493,
"end": 289608
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
"""test the _omit_from_statements parameter.
this somewhat awkward parameter was added to suit the case of
"insert_sentinel" columns that would try very hard not to be noticed
when not needed, by being omitted from any SQL statement that does not
refer to them explicitly. If they are referred to explicitly or
are in a context where their client side default has to be fired off,
then they are present.
If marked public, the feature could be used as a general "I don't want to
see this column unless I asked it to" use case.
"""
__dialect__ = "default_enhanced"
@testing.fixture
def t1(self):
m1 = MetaData()
t1 = Table(
"t1",
m1,
Column("id", Integer, primary_key=True),
Column("a", Integer),
Column(
"b", Integer, _omit_from_statements=True, insert_sentinel=True
),
Column("c", Integer),
Column("d", Integer, _omit_from_statements=True),
Column("e", Integer),
)
return t1
@testing.fixture
def t2(self):
m1 = MetaData()
t2 = Table(
"t2",
m1,
Column("id", Integer, primary_key=True),
Column("a", Integer),
Column(
"b",
Integer,
_omit_from_statements=True,
insert_sentinel=True,
default="10",
onupdate="20",
),
Column("c", Integer, default="14", onupdate="19"),
Column(
"d",
Integer,
_omit_from_statements=True,
default="5",
onupdate="15",
),
Column("e", Integer),
)
return t2
@testing.fixture
def t3(self):
m1 = MetaData()
t3 = Table(
"t3",
m1,
Column("id", Integer, primary_key=True),
Column("a", Integer),
insert_sentinel("b"),
Column("c", Integer, default="14", onupdate="19"),
)
return t3
def test_select_omitted(self, t1):
self.assert_compile(
select(t1), "SELECT t1.id, t1.a, t1.c, t1.e FROM t1"
)
def test_select_from_subquery_includes_hidden(self, t1):
s1 = select(t1.c.a, t1.c.b, t1.c.c, t1.c.d, t1.c.e).subquery()
eq_(s1.c.keys(), ["a", "b", "c", "d", "e"])
self.assert_compile(
select(s1),
"SELECT anon_1.a, anon_1.b, anon_1.c, anon_1.d, anon_1.e "
"FROM (SELECT t1.a AS a, t1.b AS b, t1.c AS c, t1.d AS d, "
"t1.e AS e FROM t1) AS anon_1",
)
def test_select_from_subquery_omitted(self, t1):
s1 = select(t1).subquery()
eq_(s1.c.keys(), ["id", "a", "c", "e"])
self.assert_compile(
select(s1),
"SELECT anon_1.id, anon_1.a, anon_1.c, anon_1.e FROM "
"(SELECT t1.id AS id, t1.a AS a, t1.c AS c, t1.e AS e FROM t1) "
"AS anon_1",
)
def test_insert_omitted(self, t1):
self.assert_compile(
insert(t1), "INSERT INTO t1 (id, a, c, e) VALUES (:id, :a, :c, :e)"
)
def test_insert_from_select_omitted(self, t1):
self.assert_compile(
insert(t1).from_select(["a", "c", "e"], select(t1)),
"INSERT INTO t1 (a, c, e) SELECT t1.id, t1.a, t1.c, t1.e FROM t1",
)
def test_insert_from_select_included(self, t1):
self.assert_compile(
insert(t1).from_select(["a", "b", "c", "d", "e"], select(t1)),
"INSERT INTO t1 (a, b, c, d, e) SELECT t1.id, t1.a, t1.c, t1.e "
"FROM t1",
)
def test_insert_from_select_defaults_included(self, t2):
self.assert_compile(
insert(t2).from_select(["a", "c", "e"], select(t2)),
"INSERT INTO t2 (a, c, e, b, d) SELECT t2.id, t2.a, t2.c, t2.e, "
":b AS anon_1, :d AS anon_2 FROM t2",
# TODO: do we have a test in test_defaults for this, that the
# default values get set up as expected?
)
def test_insert_from_select_sentinel_defaults_omitted(self, t3):
self.assert_compile(
# a pure SentinelDefault not included here, so there is no 'b'
insert(t3).from_select(["a", "c"], select(t3)),
"INSERT INTO t3 (a, c) SELECT t3.id, t3.a, t3.c FROM t3",
)
def test_insert_omitted_return_col_nonspecified(self, t1):
self.assert_compile(
insert(t1).returning(t1),
"INSERT INTO t1 (id, a, c, e) VALUES (:id, :a, :c, :e) "
"RETURNING t1.id, t1.a, t1.c, t1.e",
)
def test_insert_omitted_return_col_specified(self, t1):
self.assert_compile(
insert(t1).returning(t1.c.a, t1.c.b, t1.c.c, t1.c.d, t1.c.e),
"INSERT INTO t1 (id, a, c, e) VALUES (:id, :a, :c, :e) "
"RETURNING t1.a, t1.b, t1.c, t1.d, t1.e",
)
def test_insert_omitted_no_params(self, t1):
self.assert_compile(
insert(t1), "INSERT INTO t1 () VALUES ()", params={}
)
def test_insert_omitted_no_params_defaults(self, t2):
# omit columns that nonetheless have client-side defaults
# are included
self.assert_compile(
insert(t2),
"INSERT INTO t2 (b, c, d) VALUES (:b, :c, :d)",
params={},
)
def test_insert_omitted_no_params_defaults_no_sentinel(self, t3):
# omit columns that nonetheless have client-side defaults
# are included
self.assert_compile(
insert(t3),
"INSERT INTO t3 (c) VALUES (:c)",
params={},
)
def test_insert_omitted_defaults(self, t2):
self.assert_compile(
insert(t2), "INSERT INTO t2 (id, a, c, e) VALUES (:id, :a, :c, :e)"
)
def test_update_omitted(self, t1):
self.assert_compile(
update(t1), "UPDATE t1 SET id=:id, a=:a, c=:c, e=:e"
)
def test_update_omitted_defaults(self, t2):
self.assert_compile(
update(t2), "UPDATE t2 SET id=:id, a=:a, c=:c, e=:e"
)
def test_update_omitted_no_params_defaults(self, t2):
# omit columns that nonetheless have client-side defaults
# are included
self.assert_compile(
update(t2), "UPDATE t2 SET b=:b, c=:c, d=:d", params={}
)
def test_select_include_col(self, t1):
self.assert_compile(
select(t1, t1.c.b, t1.c.d),
"SELECT t1.id, t1.a, t1.c, t1.e, t1.b, t1.d FROM t1",
)
def test_update_include_col(self, t1):
self.assert_compile(
update(t1).values(a=5, b=10, c=15, d=20, e=25),
"UPDATE t1 SET a=:a, b=:b, c=:c, d=:d, e=:e",
checkparams={"a": 5, "b": 10, "c": 15, "d": 20, "e": 25},
)
def test_insert_include_col(self, t1):
self.assert_compile(
insert(t1).values(a=5, b=10, c=15, d=20, e=25),
"INSERT INTO t1 (a, b, c, d, e) VALUES (:a, :b, :c, :d, :e)",
checkparams={"a": 5, "b": 10, "c": 15, "d": 20, "e": 25},
)
def test_insert_include_col_via_keys(self, t1):
self.assert_compile(
insert(t1),
"INSERT INTO t1 (a, b, c, d, e) VALUES (:a, :b, :c, :d, :e)",
params={"a": 5, "b": 10, "c": 15, "d": 20, "e": 25},
checkparams={"a": 5, "b": 10, "c": 15, "d": 20, "e": 25},
)
def test_select_omitted_incl_whereclause(self, t1):
self.assert_compile(
select(t1).where(t1.c.d == 5),
"SELECT t1.id, t1.a, t1.c, t1.e FROM t1 WHERE t1.d = :d_1",
checkparams={"d_1": 5},
)
def test_select_omitted_incl_order_by(self, t1):
self.assert_compile(
select(t1).order_by(t1.c.d),
"SELECT t1.id, t1.a, t1.c, t1.e FROM t1 ORDER BY t1.d",
)
| OmitFromStatementsTest |
python | kamyu104__LeetCode-Solutions | Python/number-of-subsequences-with-odd-sum.py | {
"start": 66,
"end": 377
} | class ____(object):
def subsequenceCount(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MOD = 10**9+7
# 2^(odd-1)*2^even = 2^(len(nums)-1)
return pow(2, len(nums)-1, MOD) if any(x%2 for x in nums) else 0
# Time: O(n)
# Space: O(1)
# dp
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 54647,
"end": 55890
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
username: str,
jdbc_url: str,
password: Optional[str] = None,
jdbc_url_params: Optional[str] = None,
):
"""Airbyte Source for Jdbc.
Documentation can be found at https://docs.airbyte.com/integrations/sources/postgres
Args:
name (str): The name of the destination.
username (str): The username which is used to access the database.
password (Optional[str]): The password associated with this username.
jdbc_url (str): JDBC formatted URL. See the standard here.
jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
"""
self.username = check.str_param(username, "username")
self.password = check.opt_str_param(password, "password")
self.jdbc_url = check.str_param(jdbc_url, "jdbc_url")
self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")
super().__init__("Jdbc", name)
| JdbcSource |
python | pytest-dev__pytest | testing/test_cacheprovider.py | {
"start": 478,
"end": 9385
} | class ____:
def test_config_cache_mkdir(self, pytester: Pytester) -> None:
pytester.makeini("[pytest]")
config = pytester.parseconfigure()
assert config.cache is not None
with pytest.raises(ValueError):
config.cache.mkdir("key/name")
p = config.cache.mkdir("name")
assert p.is_dir()
def test_cache_dir_permissions(self, pytester: Pytester) -> None:
"""The .pytest_cache directory should have world-readable permissions
(depending on umask).
Regression test for #12308.
"""
pytester.makeini("[pytest]")
config = pytester.parseconfigure()
assert config.cache is not None
p = config.cache.mkdir("name")
assert p.is_dir()
# Instead of messing with umask, make sure .pytest_cache has the same
# permissions as the default that `mkdir` gives `p`.
assert (p.parent.stat().st_mode & 0o777) == (p.stat().st_mode & 0o777)
def test_config_cache_dataerror(self, pytester: Pytester) -> None:
pytester.makeini("[pytest]")
config = pytester.parseconfigure()
assert config.cache is not None
cache = config.cache
pytest.raises(TypeError, lambda: cache.set("key/name", cache))
config.cache.set("key/name", 0)
config.cache._getvaluepath("key/name").write_bytes(b"123invalid")
val = config.cache.get("key/name", -2)
assert val == -2
@pytest.mark.filterwarnings("ignore:could not create cache path")
def test_cache_writefail_cachefile_silent(self, pytester: Pytester) -> None:
pytester.makeini("[pytest]")
pytester.path.joinpath(".pytest_cache").write_text(
"gone wrong", encoding="utf-8"
)
config = pytester.parseconfigure()
cache = config.cache
assert cache is not None
cache.set("test/broken", [])
@pytest.fixture
def unwritable_cache_dir(self, pytester: Pytester) -> Generator[Path]:
cache_dir = pytester.path.joinpath(".pytest_cache")
cache_dir.mkdir()
mode = cache_dir.stat().st_mode
cache_dir.chmod(0)
if os.access(cache_dir, os.W_OK):
pytest.skip("Failed to make cache dir unwritable")
yield cache_dir
cache_dir.chmod(mode)
@pytest.mark.filterwarnings(
"ignore:could not create cache path:pytest.PytestWarning"
)
def test_cache_writefail_permissions(
self, unwritable_cache_dir: Path, pytester: Pytester
) -> None:
pytester.makeini("[pytest]")
config = pytester.parseconfigure()
cache = config.cache
assert cache is not None
cache.set("test/broken", [])
@pytest.mark.filterwarnings("default")
def test_cache_failure_warns(
self,
pytester: Pytester,
monkeypatch: MonkeyPatch,
unwritable_cache_dir: Path,
) -> None:
monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
pytester.makepyfile("def test_error(): raise Exception")
result = pytester.runpytest()
assert result.ret == 1
# warnings from nodeids and lastfailed
result.stdout.fnmatch_lines(
[
# Validate location/stacklevel of warning from cacheprovider.
"*= warnings summary =*",
"*/cacheprovider.py:*",
" */cacheprovider.py:*: PytestCacheWarning: could not create cache path "
f"{unwritable_cache_dir}/v/cache/nodeids: *",
' config.cache.set("cache/nodeids", sorted(self.cached_nodeids))',
"*1 failed, 2 warnings in*",
]
)
def test_config_cache(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
def pytest_configure(config):
# see that we get cache information early on
assert hasattr(config, "cache")
"""
)
pytester.makepyfile(
"""
def test_session(pytestconfig):
assert hasattr(pytestconfig, "cache")
"""
)
result = pytester.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_cachefuncarg(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
def test_cachefuncarg(cache):
val = cache.get("some/thing", None)
assert val is None
cache.set("some/thing", [1])
pytest.raises(TypeError, lambda: cache.get("some/thing"))
val = cache.get("some/thing", [])
assert val == [1]
"""
)
result = pytester.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_custom_rel_cache_dir(self, pytester: Pytester) -> None:
rel_cache_dir = os.path.join("custom_cache_dir", "subdir")
pytester.makeini(
f"""
[pytest]
cache_dir = {rel_cache_dir}
"""
)
pytester.makepyfile(test_errored="def test_error():\n assert False")
pytester.runpytest()
assert pytester.path.joinpath(rel_cache_dir).is_dir()
def test_custom_abs_cache_dir(
self, pytester: Pytester, tmp_path_factory: TempPathFactory
) -> None:
tmp = tmp_path_factory.mktemp("tmp")
abs_cache_dir = tmp / "custom_cache_dir"
pytester.makeini(
f"""
[pytest]
cache_dir = {abs_cache_dir}
"""
)
pytester.makepyfile(test_errored="def test_error():\n assert False")
pytester.runpytest()
assert abs_cache_dir.is_dir()
def test_custom_cache_dir_with_env_var(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
monkeypatch.setenv("env_var", "custom_cache_dir")
pytester.makeini(
"""
[pytest]
cache_dir = {cache_dir}
""".format(cache_dir="$env_var")
)
pytester.makepyfile(test_errored="def test_error():\n assert False")
pytester.runpytest()
assert pytester.path.joinpath("custom_cache_dir").is_dir()
@pytest.mark.parametrize("env", ((), ("TOX_ENV_DIR", "mydir/tox-env")))
def test_cache_reportheader(
env: Sequence[str], pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
pytester.makepyfile("""def test_foo(): pass""")
if env:
monkeypatch.setenv(*env)
expected = os.path.join(env[1], ".pytest_cache")
else:
monkeypatch.delenv("TOX_ENV_DIR", raising=False)
expected = ".pytest_cache"
result = pytester.runpytest("-v")
result.stdout.fnmatch_lines([f"cachedir: {expected}"])
def test_cache_reportheader_external_abspath(
pytester: Pytester, tmp_path_factory: TempPathFactory
) -> None:
external_cache = tmp_path_factory.mktemp(
"test_cache_reportheader_external_abspath_abs"
)
pytester.makepyfile("def test_hello(): pass")
pytester.makeini(
f"""
[pytest]
cache_dir = {external_cache}
"""
)
result = pytester.runpytest("-v")
result.stdout.fnmatch_lines([f"cachedir: {external_cache}"])
def test_cache_show(pytester: Pytester) -> None:
result = pytester.runpytest("--cache-show")
assert result.ret == 0
result.stdout.fnmatch_lines(["*cache is empty*"])
pytester.makeconftest(
"""
def pytest_configure(config):
config.cache.set("my/name", [1,2,3])
config.cache.set("my/hello", "world")
config.cache.set("other/some", {1:2})
dp = config.cache.mkdir("mydb")
dp.joinpath("hello").touch()
dp.joinpath("world").touch()
"""
)
result = pytester.runpytest()
assert result.ret == 5 # no tests executed
result = pytester.runpytest("--cache-show")
result.stdout.fnmatch_lines(
[
"*cachedir:*",
"*- cache values for '[*]' -*",
"cache/nodeids contains:",
"my/name contains:",
" [1, 2, 3]",
"other/some contains:",
" {*'1': 2}",
"*- cache directories for '[*]' -*",
"*mydb/hello*length 0*",
"*mydb/world*length 0*",
]
)
assert result.ret == 0
result = pytester.runpytest("--cache-show", "*/hello")
result.stdout.fnmatch_lines(
[
"*cachedir:*",
"*- cache values for '[*]/hello' -*",
"my/hello contains:",
" *'world'",
"*- cache directories for '[*]/hello' -*",
"d/mydb/hello*length 0*",
]
)
stdout = result.stdout.str()
assert "other/some" not in stdout
assert "d/mydb/world" not in stdout
assert result.ret == 0
| TestNewAPI |
python | huggingface__transformers | src/transformers/models/clvp/tokenization_clvp.py | {
"start": 2298,
"end": 12960
} | class ____(PreTrainedTokenizer):
"""
Construct a CLVP tokenizer. Based on byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import ClvpTokenizer
>>> tokenizer = ClvpTokenizer.from_pretrained("susnato/clvp_dev")
>>> tokenizer("Hello world")["input_ids"]
[62, 84, 28, 2, 179, 79]
>>> tokenizer(" Hello world")["input_ids"]
[2, 62, 84, 28, 2, 179, 79]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
</Tip>
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"[STOP]"`):
The end of sequence token.
pad_token (`str`, *optional*, defaults to `"[STOP]"`):
The pad token of the sequence.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (CLVP tokenizer detect beginning of words by the preceding space).
add_bos_token (`bool`, *optional*, defaults to `False`):
Whether to add `bos_token` in front of the sequence when add_special_tokens=True.
add_eos_token (`bool`, *optional*, defaults to `False`):
Whether to add `eos_token` in end of the sequence when add_special_tokens=True.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = [
"input_ids",
"attention_mask",
]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
unk_token="[UNK]",
bos_token="<|endoftext|>",
eos_token="[STOP]",
pad_token="[STOP]",
add_prefix_space=False,
add_bos_token=False,
add_eos_token=False,
**kwargs,
):
bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
self.add_bos_token = add_bos_token
self.add_eos_token = add_eos_token
self._normalizer = None
# Set special_tokens_pattern based on add_bos_token and add_eos_token flags
if add_bos_token and add_eos_token:
kwargs["special_tokens_pattern"] = "bos_eos"
elif add_bos_token:
kwargs["special_tokens_pattern"] = "bos"
elif add_eos_token:
kwargs["special_tokens_pattern"] = "eos"
else:
kwargs["special_tokens_pattern"] = "none"
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
bpe_merges = merges_handle.read().split("\n")[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
super().__init__(
errors=errors,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
add_prefix_space=add_prefix_space,
add_bos_token=add_bos_token,
add_eos_token=add_eos_token,
**kwargs,
)
@property
def vocab_size(self):
return len(self.encoder)
@property
def normalizer(self):
if self._normalizer is None:
self._normalizer = EnglishNormalizer()
return self._normalizer
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
bos_token_id = [self.bos_token_id] if self.add_bos_token else []
eos_token_id = [self.eos_token_id] if self.add_eos_token else []
output = bos_token_id + token_ids_0 + eos_token_id
if token_ids_1 is not None:
output = output + bos_token_id + token_ids_1 + eos_token_id
return output
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
text = self.normalizer(text)
for token in re.findall(self.pat, text):
token = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
# if the token is "Ġ" we replace it with "[SPACE]" (if "[SPACE]" is present in the vocab), otherwise we keep the "Ġ".
bpe_tokens.extend(
"[SPACE]" if bpe_token == "\u0120" and "[SPACE]" in self.encoder else bpe_token
for bpe_token in self.bpe(token).split(" ")
)
return bpe_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = "".join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def clean_up_tokenization(self, text):
text = "".join(text) if isinstance(text, list) else text
vocab_tokens = list(self.encoder.keys()) + list(self.added_tokens_encoder.keys())
text = text.replace("[SPACE]", " ") if "[SPACE]" in vocab_tokens else text
text = text.replace("[STOP]", " ") if "[STOP]" in vocab_tokens else text
text = text.replace(self.unk_token, "").replace(" ", " ").replace(" ", " ")
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
__all__ = ["ClvpTokenizer"]
| ClvpTokenizer |
python | huggingface__transformers | tests/models/bit/test_modeling_bit.py | {
"start": 5463,
"end": 9358
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Bit does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
pipeline_model_mapping = (
{"image-feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
test_resize_embeddings = False
has_attentions = False
test_torch_exportable = True
def setUp(self):
self.model_tester = BitModelTester(self)
self.config_tester = ConfigTester(
self, config_class=BitConfig, has_text_modality=False, common_properties=["num_channels"]
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Bit does not output attentions")
def test_attention_outputs(self):
pass
@unittest.skip(reason="Bit does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Bit does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_stages = self.model_tester.num_stages
self.assertEqual(len(hidden_states), expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.image_size // 4, self.model_tester.image_size // 4],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
layers_type = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
config.layer_type = layer_type
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip(reason="Bit does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "google/bit-50"
model = BitModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
| BitModelTest |
python | ray-project__ray | python/ray/air/config.py | {
"start": 21131,
"end": 28893
} | class ____:
"""Runtime configuration for training and tuning runs.
Upon resuming from a training or tuning run checkpoint,
Ray Train/Tune will automatically apply the RunConfig from
the previously checkpointed run.
Args:
name: Name of the trial or experiment. If not provided, will be deduced
from the Trainable.
storage_path: [Beta] Path where all results and checkpoints are persisted.
Can be a local directory or a destination on cloud storage.
For multi-node training/tuning runs, this must be set to a
shared storage location (e.g., S3, NFS).
This defaults to the local ``~/ray_results`` directory.
storage_filesystem: [Beta] A custom filesystem to use for storage.
If this is provided, `storage_path` should be a path with its
prefix stripped (e.g., `s3://bucket/path` -> `bucket/path`).
failure_config: Failure mode configuration.
checkpoint_config: Checkpointing configuration.
sync_config: Configuration object for syncing. See train.SyncConfig.
verbose: 0, 1, or 2. Verbosity mode.
0 = silent, 1 = default, 2 = verbose. Defaults to 1.
If the ``RAY_AIR_NEW_OUTPUT=1`` environment variable is set,
uses the old verbosity settings:
0 = silent, 1 = only status updates, 2 = status and brief
results, 3 = status and detailed results.
stop: Stop conditions to consider. Refer to ray.tune.stopper.Stopper
for more info. Stoppers should be serializable.
callbacks: [DeveloperAPI] Callbacks to invoke.
Refer to ray.tune.callback.Callback for more info.
Callbacks should be serializable.
Currently only stateless callbacks are supported for resumed runs.
(any state of the callback will not be checkpointed by Tune
and thus will not take effect in resumed runs).
progress_reporter: [DeveloperAPI] Progress reporter for reporting
intermediate experiment progress. Defaults to CLIReporter if
running in command-line, or JupyterNotebookReporter if running in
a Jupyter notebook.
log_to_file: [DeveloperAPI] Log stdout and stderr to files in
trial directories. If this is `False` (default), no files
are written. If `true`, outputs are written to `trialdir/stdout`
and `trialdir/stderr`, respectively. If this is a single string,
this is interpreted as a file relative to the trialdir, to which
both streams are written. If this is a Sequence (e.g. a Tuple),
it has to have length 2 and the elements indicate the files to
which stdout and stderr are written, respectively.
"""
name: Optional[str] = None
storage_path: Optional[str] = None
storage_filesystem: Optional[pyarrow.fs.FileSystem] = None
failure_config: Optional[FailureConfig] = None
checkpoint_config: Optional[CheckpointConfig] = None
sync_config: Optional["ray.train.SyncConfig"] = None
verbose: Optional[Union[int, "AirVerbosity", "Verbosity"]] = None
stop: Optional[Union[Mapping, "Stopper", Callable[[str, Mapping], bool]]] = None
callbacks: Optional[List["Callback"]] = None
progress_reporter: Optional["ray.tune.progress_reporter.ProgressReporter"] = None
log_to_file: Union[bool, str, Tuple[str, str]] = False
# Deprecated
local_dir: Optional[str] = None
def __post_init__(self):
from ray.train import SyncConfig
from ray.train.constants import DEFAULT_STORAGE_PATH
from ray.tune.experimental.output import AirVerbosity, get_air_verbosity
if self.local_dir is not None:
raise DeprecationWarning(
"The `RunConfig(local_dir)` argument is deprecated. "
"You should set the `RunConfig(storage_path)` instead."
"See the docs: https://docs.ray.io/en/latest/train/user-guides/"
"persistent-storage.html#setting-the-local-staging-directory"
)
if self.storage_path is None:
self.storage_path = DEFAULT_STORAGE_PATH
# TODO(justinvyu): [Deprecated]
ray_storage_uri: Optional[str] = os.environ.get("RAY_STORAGE")
if ray_storage_uri is not None:
logger.info(
"Using configured Ray Storage URI as the `storage_path`: "
f"{ray_storage_uri}"
)
warnings.warn(
"The `RAY_STORAGE` environment variable is deprecated. "
"Please use `RunConfig(storage_path)` instead.",
RayDeprecationWarning,
stacklevel=2,
)
self.storage_path = ray_storage_uri
if not self.failure_config:
self.failure_config = FailureConfig()
if not self.sync_config:
self.sync_config = SyncConfig()
if not self.checkpoint_config:
self.checkpoint_config = CheckpointConfig()
# Save the original verbose value to check for deprecations
self._verbose = self.verbose
if self.verbose is None:
# Default `verbose` value. For new output engine,
# this is AirVerbosity.DEFAULT.
# For old output engine, this is Verbosity.V3_TRIAL_DETAILS
# Todo (krfricke): Currently uses number to pass test_configs::test_repr
self.verbose = get_air_verbosity(AirVerbosity.DEFAULT) or 3
if isinstance(self.storage_path, Path):
self.storage_path = self.storage_path.as_posix()
def __repr__(self):
from ray.train import SyncConfig
return _repr_dataclass(
self,
default_values={
"failure_config": FailureConfig(),
"sync_config": SyncConfig(),
"checkpoint_config": CheckpointConfig(),
},
)
def _repr_html_(self) -> str:
reprs = []
if self.failure_config is not None:
reprs.append(
Template("title_data_mini.html.j2").render(
title="Failure Config", data=self.failure_config._repr_html_()
)
)
if self.sync_config is not None:
reprs.append(
Template("title_data_mini.html.j2").render(
title="Sync Config", data=self.sync_config._repr_html_()
)
)
if self.checkpoint_config is not None:
reprs.append(
Template("title_data_mini.html.j2").render(
title="Checkpoint Config", data=self.checkpoint_config._repr_html_()
)
)
# Create a divider between each displayed repr
subconfigs = [Template("divider.html.j2").render()] * (2 * len(reprs) - 1)
subconfigs[::2] = reprs
settings = Template("scrollableTable.html.j2").render(
table=tabulate(
{
"Name": self.name,
"Local results directory": self.local_dir,
"Verbosity": self.verbose,
"Log to file": self.log_to_file,
}.items(),
tablefmt="html",
headers=["Setting", "Value"],
showindex=False,
),
max_height="300px",
)
return Template("title_data.html.j2").render(
title="RunConfig",
data=Template("run_config.html.j2").render(
subconfigs=subconfigs,
settings=settings,
),
)
| RunConfig |
python | pdm-project__pdm | src/pdm/models/repositories/lock.py | {
"start": 917,
"end": 1093
} | class ____:
candidate: Candidate
dependencies: list[str] | None = None
summary: str = ""
marker: BaseMarker = dataclasses.field(default_factory=AnyMarker)
| Package |
python | joerick__pyinstrument | pyinstrument/low_level/stat_profile_python.py | {
"start": 366,
"end": 4761
} | class ____:
await_stack: list[str]
timing_thread_subscription: int | None = None
def __init__(
self,
target: Callable[[types.FrameType, str, Any], Any],
interval: float,
context_var: contextvars.ContextVar[object | None] | None,
timer_type: TimerType,
timer_func: Callable[[], float] | None,
):
self.target = target
self.interval = interval
if context_var:
# raise typeerror to match the C version
if not isinstance(context_var, contextvars.ContextVar):
raise TypeError("not a context var")
self.context_var = context_var
self.timer_type = timer_type
if timer_type == "walltime":
self.get_time = timeit.default_timer
elif timer_type == "walltime_thread":
self.get_time = pyi_timing_thread_get_time
self.timing_thread_subscription = pyi_timing_thread_subscribe(interval)
elif timer_type == "timer_func":
if timer_func is None:
raise TypeError("timer_func must be provided for timer_func timer_type")
self.get_time = timer_func
else:
raise ValueError(f"invalid timer_type '{timer_type}'")
self.last_invocation = self.get_time()
self.last_context_var_value = context_var.get() if context_var else None
self.await_stack = []
def __del__(self):
if self.timing_thread_subscription is not None:
pyi_timing_thread_unsubscribe(self.timing_thread_subscription)
def profile(self, frame: types.FrameType, event: str, arg: Any):
now = self.get_time()
if self.context_var:
context_var_value = self.context_var.get()
last_context_var_value = self.last_context_var_value
if context_var_value is not last_context_var_value:
context_change_frame = frame.f_back if event == "call" else frame
assert context_change_frame is not None
self.target(
context_change_frame,
"context_changed",
(context_var_value, last_context_var_value, self.await_stack),
)
self.last_context_var_value = context_var_value
# 0x80 == CO_COROUTINE (i.e. defined with 'async def')
if event == "return" and frame.f_code.co_flags & 0x80:
self.await_stack.append(get_frame_info(frame))
else:
self.await_stack.clear()
if now < self.last_invocation + self.interval:
return
self.last_invocation = now
return self.target(frame, event, arg)
"""
A reimplementation of setstatprofile in Python, for prototyping/reference
purposes. Not used in normal execution.
"""
def setstatprofile(
target: Callable[[types.FrameType, str, Any], Any] | None,
interval: float = 0.001,
context_var: contextvars.ContextVar[object | None] | None = None,
timer_type: TimerType = "walltime",
timer_func: Callable[[], float] | None = None,
) -> None:
if target:
profiler = PythonStatProfiler(
target=target,
interval=interval,
context_var=context_var,
timer_type=timer_type,
timer_func=timer_func,
)
sys.setprofile(profiler.profile)
else:
sys.setprofile(None)
def get_frame_info(frame: types.FrameType) -> str:
frame_info = "%s\x00%s\x00%i" % (
frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_code.co_firstlineno,
)
class_name = None
# try to find self argument for methods
self = frame.f_locals.get("self", None)
if self and hasattr(self, "__class__") and hasattr(self.__class__, "__qualname__"):
class_name = self.__class__.__qualname__
else:
# also try to find cls argument for class methods
cls = frame.f_locals.get("cls", None)
if cls and hasattr(cls, "__qualname__"):
class_name = cls.__qualname__
frame_hidden = "__tracebackhide__" in frame.f_locals
if class_name:
frame_info += "\x01c%s" % class_name
if frame.f_lineno is not None:
frame_info += "\x01l%i" % frame.f_lineno
if frame_hidden:
frame_info += "\x01h%i" % frame_hidden
return frame_info
| PythonStatProfiler |
python | miyuchina__mistletoe | mistletoe/block_token.py | {
"start": 28789,
"end": 29321
} | class ____(BlockToken):
"""
Table cell token.
This is a leaf block token. Its children are inline (span) tokens.
Should only be called by TableRow.__init__().
Attributes:
align (bool): align option for current cell (default to None).
"""
repr_attributes = BlockToken.repr_attributes + ("align",)
def __init__(self, content, align=None, line_number=None):
self.align = align
self.line_number = line_number
super().__init__(content, span_token.tokenize_inner)
| TableCell |
python | getsentry__sentry | tests/sentry/core/endpoints/test_organization_member_details.py | {
"start": 51260,
"end": 57189
} | class ____(APITestCase):
def setUp(self) -> None:
self.owner = self.create_user()
self.org = self.create_organization(owner=self.owner)
self.member = self.create_user()
self.member_om = self.create_member(
organization=self.org, user=self.member, role="member", teams=[]
)
self.login_as(self.member)
with assume_test_silo_mode(SiloMode.CONTROL):
totp = TotpInterface()
totp.enroll(self.member)
assert totp.authenticator is not None
self.interface_id = totp.authenticator.id
assert Authenticator.objects.filter(user=self.member).exists()
def assert_can_get_authenticators(self):
path = reverse(
"sentry-api-0-organization-member-details", args=[self.org.slug, self.member_om.id]
)
resp = self.client.get(path)
assert resp.status_code == 200
data = resp.data
assert len(data["user"]["authenticators"]) == 1
assert data["user"]["has2fa"] is True
assert data["user"]["canReset2fa"] is True
def assert_cannot_get_authenticators(self):
path = reverse(
"sentry-api-0-organization-member-details", args=[self.org.slug, self.member_om.id]
)
resp = self.client.get(path)
assert resp.status_code == 200
data = resp.data
assert "authenticators" not in data["user"]
assert "canReset2fa" not in data["user"]
@assume_test_silo_mode(SiloMode.CONTROL)
def assert_can_remove_authenticators(self):
path = reverse(
"sentry-api-0-user-authenticator-details", args=[self.member.id, self.interface_id]
)
resp = self.client.delete(path)
assert resp.status_code == 204
assert not Authenticator.objects.filter(user=self.member).exists()
@assume_test_silo_mode(SiloMode.CONTROL)
def assert_cannot_remove_authenticators(self):
path = reverse(
"sentry-api-0-user-authenticator-details", args=[self.member.id, self.interface_id]
)
resp = self.client.delete(path)
assert resp.status_code == 403
assert Authenticator.objects.filter(user=self.member).exists()
@patch("sentry.security.utils.generate_security_email")
def test_org_owner_can_reset_member_2fa(self, mock_generate_security_email: MagicMock) -> None:
self.login_as(self.owner)
self.assert_can_get_authenticators()
self.assert_can_remove_authenticators()
mock_generate_security_email.assert_called_once()
def test_owner_must_have_org_membership(self) -> None:
owner = self.create_user()
self.create_organization(owner=owner)
self.login_as(owner)
path = reverse(
"sentry-api-0-organization-member-details", args=[self.org.slug, self.member_om.id]
)
resp = self.client.get(path)
assert resp.status_code == 403
self.assert_cannot_remove_authenticators()
@patch("sentry.security.utils.generate_security_email")
def test_org_manager_can_reset_member_2fa(
self, mock_generate_security_email: MagicMock
) -> None:
manager = self.create_user()
self.create_member(organization=self.org, user=manager, role="manager", teams=[])
self.login_as(manager)
self.assert_can_get_authenticators()
self.assert_can_remove_authenticators()
mock_generate_security_email.assert_called_once()
def test_org_admin_cannot_reset_member_2fa(self) -> None:
admin = self.create_user()
self.create_member(organization=self.org, user=admin, role="admin", teams=[])
self.login_as(admin)
self.assert_cannot_get_authenticators()
self.assert_cannot_remove_authenticators()
def test_org_member_cannot_reset_member_2fa(self) -> None:
member = self.create_user()
self.create_member(organization=self.org, user=member, role="member", teams=[])
self.login_as(member)
self.assert_cannot_get_authenticators()
self.assert_cannot_remove_authenticators()
def test_cannot_reset_member_2fa__has_multiple_org_membership(self) -> None:
self.create_organization(owner=self.member)
self.login_as(self.owner)
path = reverse(
"sentry-api-0-organization-member-details", args=[self.org.slug, self.member_om.id]
)
resp = self.client.get(path)
assert resp.status_code == 200
data = resp.data
assert len(data["user"]["authenticators"]) == 1
assert data["user"]["has2fa"] is True
assert data["user"]["canReset2fa"] is False
self.assert_cannot_remove_authenticators()
def test_cannot_reset_member_2fa__org_requires_2fa(self) -> None:
self.login_as(self.owner)
with assume_test_silo_mode(SiloMode.CONTROL):
TotpInterface().enroll(self.owner)
self.org.update(flags=F("flags").bitor(Organization.flags.require_2fa))
assert self.org.flags.require_2fa.is_set is True
self.assert_cannot_remove_authenticators()
@assume_test_silo_mode(SiloMode.CONTROL)
def test_owner_can_only_reset_member_2fa(self) -> None:
self.login_as(self.owner)
path = reverse(
"sentry-api-0-user-authenticator-details", args=[self.member.id, self.interface_id]
)
resp = self.client.get(path)
assert resp.status_code == 403
# cannot regenerate recovery codes
recovery = RecoveryCodeInterface()
recovery.enroll(self.user)
assert recovery.authenticator, "authenticator should exist"
path = reverse(
"sentry-api-0-user-authenticator-details",
args=[self.member.id, recovery.authenticator.id],
)
resp = self.client.put(path)
assert resp.status_code == 403
| ResetOrganizationMember2faTest |
python | martinblech__xmltodict | xmltodict.py | {
"start": 314,
"end": 19480
} | class ____:
def __init__(
self,
item_depth=0,
item_callback=lambda *args: True,
xml_attribs=True,
attr_prefix="@",
cdata_key="#text",
force_cdata=False,
cdata_separator="",
postprocessor=None,
dict_constructor=dict,
strip_whitespace=True,
namespace_separator=":",
namespaces=None,
force_list=None,
comment_key="#comment",
):
self.path = []
self.stack = []
self.data = []
self.item = None
self.item_depth = item_depth
self.xml_attribs = xml_attribs
self.item_callback = item_callback
self.attr_prefix = attr_prefix
self.cdata_key = cdata_key
self.force_cdata = force_cdata
self.cdata_separator = cdata_separator
self.postprocessor = postprocessor
self.dict_constructor = dict_constructor
self.strip_whitespace = strip_whitespace
self.namespace_separator = namespace_separator
self.namespaces = namespaces
self.namespace_declarations = dict_constructor()
self.force_list = force_list
self.comment_key = comment_key
def _build_name(self, full_name):
if self.namespaces is None:
return full_name
i = full_name.rfind(self.namespace_separator)
if i == -1:
return full_name
namespace, name = full_name[:i], full_name[i+1:]
try:
short_namespace = self.namespaces[namespace]
except KeyError:
short_namespace = namespace
if not short_namespace:
return name
else:
return self.namespace_separator.join((short_namespace, name))
def _attrs_to_dict(self, attrs):
if isinstance(attrs, dict):
return attrs
return self.dict_constructor(zip(attrs[0::2], attrs[1::2]))
def startNamespaceDecl(self, prefix, uri):
self.namespace_declarations[prefix or ''] = uri
def startElement(self, full_name, attrs):
name = self._build_name(full_name)
attrs = self._attrs_to_dict(attrs)
if self.namespace_declarations:
if not attrs:
attrs = self.dict_constructor()
attrs['xmlns'] = self.namespace_declarations
self.namespace_declarations = self.dict_constructor()
self.path.append((name, attrs or None))
if len(self.path) >= self.item_depth:
self.stack.append((self.item, self.data))
if self.xml_attribs:
attr_entries = []
for key, value in attrs.items():
key = self.attr_prefix+self._build_name(key)
if self.postprocessor:
entry = self.postprocessor(self.path, key, value)
else:
entry = (key, value)
if entry:
attr_entries.append(entry)
attrs = self.dict_constructor(attr_entries)
else:
attrs = None
self.item = attrs or None
self.data = []
def endElement(self, full_name):
name = self._build_name(full_name)
# If we just closed an item at the streaming depth, emit it and drop it
# without attaching it back to its parent. This avoids accumulating all
# streamed items in memory when using item_depth > 0.
if len(self.path) == self.item_depth:
item = self.item
if item is None:
item = (None if not self.data
else self.cdata_separator.join(self.data))
should_continue = self.item_callback(self.path, item)
if not should_continue:
raise ParsingInterrupted
# Reset state for the parent context without keeping a reference to
# the emitted item.
if self.stack:
self.item, self.data = self.stack.pop()
else:
self.item = None
self.data = []
self.path.pop()
return
if self.stack:
data = (None if not self.data
else self.cdata_separator.join(self.data))
item = self.item
self.item, self.data = self.stack.pop()
if self.strip_whitespace and data:
data = data.strip() or None
if data and self._should_force_cdata(name, data) and item is None:
item = self.dict_constructor()
if item is not None:
if data:
self.push_data(item, self.cdata_key, data)
self.item = self.push_data(self.item, name, item)
else:
self.item = self.push_data(self.item, name, data)
else:
self.item = None
self.data = []
self.path.pop()
def characters(self, data):
if not self.data:
self.data = [data]
else:
self.data.append(data)
def comments(self, data):
if self.strip_whitespace:
data = data.strip()
self.item = self.push_data(self.item, self.comment_key, data)
def push_data(self, item, key, data):
if self.postprocessor is not None:
result = self.postprocessor(self.path, key, data)
if result is None:
return item
key, data = result
if item is None:
item = self.dict_constructor()
try:
value = item[key]
if isinstance(value, list):
value.append(data)
else:
item[key] = [value, data]
except KeyError:
if self._should_force_list(key, data):
item[key] = [data]
else:
item[key] = data
return item
def _should_force_list(self, key, value):
if not self.force_list:
return False
if isinstance(self.force_list, bool):
return self.force_list
try:
return key in self.force_list
except TypeError:
return self.force_list(self.path[:-1], key, value)
def _should_force_cdata(self, key, value):
if not self.force_cdata:
return False
if isinstance(self.force_cdata, bool):
return self.force_cdata
try:
return key in self.force_cdata
except TypeError:
return self.force_cdata(self.path[:-1], key, value)
def parse(xml_input, encoding=None, expat=expat, process_namespaces=False,
namespace_separator=':', disable_entities=True, process_comments=False, **kwargs):
"""Parse the given XML input and convert it into a dictionary.
`xml_input` can either be a `string`, a file-like object, or a generator of strings.
If `xml_attribs` is `True`, element attributes are put in the dictionary
among regular child elements, using `@` as a prefix to avoid collisions. If
set to `False`, they are just ignored.
Simple example::
>>> import xmltodict
>>> doc = xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>
... \"\"\")
>>> doc['a']['@prop']
'x'
>>> doc['a']['b']
['1', '2']
If `item_depth` is `0`, the function returns a dictionary for the root
element (default behavior). Otherwise, it calls `item_callback` every time
an item at the specified depth is found and returns `None` in the end
(streaming mode).
The callback function receives two parameters: the `path` from the document
root to the item (name-attribs pairs), and the `item` (dict). If the
callback's return value is false-ish, parsing will be stopped with the
:class:`ParsingInterrupted` exception.
Streaming example::
>>> def handle(path, item):
... print('path:%s item:%s' % (path, item))
... return True
...
>>> xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>\"\"\", item_depth=2, item_callback=handle)
path:[('a', {'prop': 'x'}), ('b', None)] item:1
path:[('a', {'prop': 'x'}), ('b', None)] item:2
The optional argument `postprocessor` is a function that takes `path`,
`key` and `value` as positional arguments and returns a new `(key, value)`
pair where both `key` and `value` may have changed. Usage example::
>>> def postprocessor(path, key, value):
... try:
... return key + ':int', int(value)
... except (ValueError, TypeError):
... return key, value
>>> xmltodict.parse('<a><b>1</b><b>2</b><b>x</b></a>',
... postprocessor=postprocessor)
{'a': {'b:int': [1, 2], 'b': 'x'}}
You can pass an alternate version of `expat` (such as `defusedexpat`) by
using the `expat` parameter. E.g:
>>> import defusedexpat
>>> xmltodict.parse('<a>hello</a>', expat=defusedexpat.pyexpat)
{'a': 'hello'}
You can use the force_list argument to force lists to be created even
when there is only a single child of a given level of hierarchy. The
force_list argument is a tuple of keys. If the key for a given level
of hierarchy is in the force_list argument, that level of hierarchy
will have a list as a child (even if there is only one sub-element).
The index_keys operation takes precedence over this. This is applied
after any user-supplied postprocessor has already run.
For example, given this input:
<servers>
<server>
<name>host1</name>
<os>Linux</os>
<interfaces>
<interface>
<name>em0</name>
<ip_address>10.0.0.1</ip_address>
</interface>
</interfaces>
</server>
</servers>
If called with force_list=('interface',), it will produce
this dictionary:
{'servers':
{'server':
{'name': 'host1',
'os': 'Linux'},
'interfaces':
{'interface':
[ {'name': 'em0', 'ip_address': '10.0.0.1' } ] } } }
`force_list` can also be a callable that receives `path`, `key` and
`value`. This is helpful in cases where the logic that decides whether
a list should be forced is more complex.
If `process_comments` is `True`, comments will be added using `comment_key`
(default=`'#comment'`) to the tag that contains the comment.
For example, given this input:
<a>
<b>
<!-- b comment -->
<c>
<!-- c comment -->
1
</c>
<d>2</d>
</b>
</a>
If called with `process_comments=True`, it will produce
this dictionary:
'a': {
'b': {
'#comment': 'b comment',
'c': {
'#comment': 'c comment',
'#text': '1',
},
'd': '2',
},
}
Comment text is subject to the `strip_whitespace` flag: when it is left
at the default `True`, comments will have leading and trailing
whitespace removed. Disable `strip_whitespace` to keep comment
indentation or padding intact.
"""
handler = _DictSAXHandler(namespace_separator=namespace_separator,
**kwargs)
if isinstance(xml_input, str):
encoding = encoding or 'utf-8'
xml_input = xml_input.encode(encoding)
if not process_namespaces:
namespace_separator = None
parser = expat.ParserCreate(
encoding,
namespace_separator
)
parser.ordered_attributes = True
parser.StartNamespaceDeclHandler = handler.startNamespaceDecl
parser.StartElementHandler = handler.startElement
parser.EndElementHandler = handler.endElement
parser.CharacterDataHandler = handler.characters
if process_comments:
parser.CommentHandler = handler.comments
parser.buffer_text = True
if disable_entities:
def _forbid_entities(*_args, **_kwargs):
raise ValueError("entities are disabled")
parser.EntityDeclHandler = _forbid_entities
if hasattr(xml_input, 'read'):
parser.ParseFile(xml_input)
elif isgenerator(xml_input):
for chunk in xml_input:
parser.Parse(chunk, False)
parser.Parse(b'', True)
else:
parser.Parse(xml_input, True)
return handler.item
def _convert_value_to_string(value):
"""Convert a value to its string representation for XML output.
Handles boolean values consistently by converting them to lowercase.
"""
if isinstance(value, (str, bytes)):
return value
if isinstance(value, bool):
return "true" if value else "false"
return str(value)
def _validate_name(value, kind):
"""Validate an element/attribute name for XML safety.
Raises ValueError with a specific reason when invalid.
kind: 'element' or 'attribute' (used in error messages)
"""
if not isinstance(value, str):
raise ValueError(f"{kind} name must be a string")
if value.startswith("?") or value.startswith("!"):
raise ValueError(f'Invalid {kind} name: cannot start with "?" or "!"')
if "<" in value or ">" in value:
raise ValueError(f'Invalid {kind} name: "<" or ">" not allowed')
if "/" in value:
raise ValueError(f'Invalid {kind} name: "/" not allowed')
if '"' in value or "'" in value:
raise ValueError(f"Invalid {kind} name: quotes not allowed")
if "=" in value:
raise ValueError(f'Invalid {kind} name: "=" not allowed')
if any(ch.isspace() for ch in value):
raise ValueError(f"Invalid {kind} name: whitespace not allowed")
def _validate_comment(value):
if isinstance(value, bytes):
try:
value = value.decode("utf-8")
except UnicodeDecodeError as exc:
raise ValueError("Comment text must be valid UTF-8") from exc
if not isinstance(value, str):
raise ValueError("Comment text must be a string")
if "--" in value:
raise ValueError("Comment text cannot contain '--'")
if value.endswith("-"):
raise ValueError("Comment text cannot end with '-'")
return value
def _process_namespace(name, namespaces, ns_sep=':', attr_prefix='@'):
if not isinstance(name, str):
return name
if not namespaces:
return name
try:
ns, name = name.rsplit(ns_sep, 1)
except ValueError:
pass
else:
ns_res = namespaces.get(ns.strip(attr_prefix))
name = '{}{}{}{}'.format(
attr_prefix if ns.startswith(attr_prefix) else '',
ns_res, ns_sep, name) if ns_res else name
return name
def _emit(key, value, content_handler,
attr_prefix='@',
cdata_key='#text',
depth=0,
preprocessor=None,
pretty=False,
newl='\n',
indent='\t',
namespace_separator=':',
namespaces=None,
full_document=True,
expand_iter=None,
comment_key='#comment'):
if isinstance(key, str) and key == comment_key:
comments_list = value if isinstance(value, list) else [value]
if isinstance(indent, int):
indent = " " * indent
for comment_text in comments_list:
if comment_text is None:
continue
comment_text = _convert_value_to_string(comment_text)
if not comment_text:
continue
if pretty:
content_handler.ignorableWhitespace(depth * indent)
content_handler.comment(comment_text)
if pretty:
content_handler.ignorableWhitespace(newl)
return
key = _process_namespace(key, namespaces, namespace_separator, attr_prefix)
if preprocessor is not None:
result = preprocessor(key, value)
if result is None:
return
key, value = result
# Minimal validation to avoid breaking out of tag context
_validate_name(key, "element")
if not hasattr(value, '__iter__') or isinstance(value, (str, dict)):
value = [value]
for index, v in enumerate(value):
if full_document and depth == 0 and index > 0:
raise ValueError('document with multiple roots')
if v is None:
v = {}
elif not isinstance(v, (dict, str)):
if expand_iter and hasattr(v, '__iter__'):
v = {expand_iter: v}
else:
v = _convert_value_to_string(v)
if isinstance(v, str):
v = {cdata_key: v}
cdata = None
attrs = {}
children = []
for ik, iv in v.items():
if ik == cdata_key:
cdata = _convert_value_to_string(iv)
continue
if isinstance(ik, str) and ik.startswith(attr_prefix):
ik = _process_namespace(ik, namespaces, namespace_separator,
attr_prefix)
if ik == '@xmlns' and isinstance(iv, dict):
for k, v in iv.items():
_validate_name(k, "attribute")
attr = 'xmlns{}'.format(f':{k}' if k else '')
attrs[attr] = str(v)
continue
if not isinstance(iv, str):
iv = str(iv)
attr_name = ik[len(attr_prefix) :]
_validate_name(attr_name, "attribute")
attrs[attr_name] = iv
continue
if isinstance(iv, list) and not iv:
continue # Skip empty lists to avoid creating empty child elements
children.append((ik, iv))
if isinstance(indent, int):
indent = ' ' * indent
if pretty:
content_handler.ignorableWhitespace(depth * indent)
content_handler.startElement(key, AttributesImpl(attrs))
if pretty and children:
content_handler.ignorableWhitespace(newl)
for child_key, child_value in children:
_emit(child_key, child_value, content_handler,
attr_prefix, cdata_key, depth+1, preprocessor,
pretty, newl, indent, namespaces=namespaces,
namespace_separator=namespace_separator,
expand_iter=expand_iter, comment_key=comment_key)
if cdata is not None:
content_handler.characters(cdata)
if pretty and children:
content_handler.ignorableWhitespace(depth * indent)
content_handler.endElement(key)
if pretty and depth:
content_handler.ignorableWhitespace(newl)
| _DictSAXHandler |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_heapq.py | {
"start": 17323,
"end": 17449
} | class ____(_TestErrorHandling, __TestCase):
module = py_heapq
@skipUnless(c_heapq, 'requires _heapq')
| TestErrorHandlingPython |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride3.py | {
"start": 2883,
"end": 2906
} | class ____(H2, H1): ...
| H |
python | pydata__xarray | xarray/tests/test_parallelcompat.py | {
"start": 1502,
"end": 4388
} | class ____(ChunkManagerEntrypoint):
"""Mock-up of ChunkManager class for DummyChunkedArray"""
def __init__(self):
self.array_cls = DummyChunkedArray
def is_chunked_array(self, data: Any) -> bool:
return isinstance(data, DummyChunkedArray)
def chunks(self, data: DummyChunkedArray) -> T_NormalizedChunks:
return data.chunks
def normalize_chunks(
self,
chunks: T_Chunks | T_NormalizedChunks,
shape: tuple[int, ...] | None = None,
limit: int | None = None,
dtype: np.dtype | None = None,
previous_chunks: T_NormalizedChunks | None = None,
) -> T_NormalizedChunks:
from dask.array.core import normalize_chunks
return normalize_chunks(chunks, shape, limit, dtype, previous_chunks)
def from_array(
self, data: T_DuckArray | np.typing.ArrayLike, chunks: _Chunks, **kwargs
) -> DummyChunkedArray:
from dask import array as da
return da.from_array(data, chunks, **kwargs)
def rechunk(self, data: DummyChunkedArray, chunks, **kwargs) -> DummyChunkedArray:
return data.rechunk(chunks, **kwargs)
def compute(self, *data: DummyChunkedArray, **kwargs) -> tuple[np.ndarray, ...]: # type: ignore[override]
from dask.array import compute
return compute(*data, **kwargs)
def apply_gufunc(
self,
func,
signature,
*args,
axes=None,
axis=None,
keepdims=False,
output_dtypes=None,
output_sizes=None,
vectorize=None,
allow_rechunk=False,
meta=None,
**kwargs,
):
from dask.array.gufunc import apply_gufunc
return apply_gufunc(
func,
signature,
*args,
axes=axes,
axis=axis,
keepdims=keepdims,
output_dtypes=output_dtypes,
output_sizes=output_sizes,
vectorize=vectorize,
allow_rechunk=allow_rechunk,
meta=meta,
**kwargs,
)
@pytest.fixture
def register_dummy_chunkmanager(monkeypatch):
"""
Mocks the registering of an additional ChunkManagerEntrypoint.
This preserves the presence of the existing DaskManager, so a test that relies on this and DaskManager both being
returned from list_chunkmanagers() at once would still work.
The monkeypatching changes the behavior of list_chunkmanagers when called inside xarray.namedarray.parallelcompat,
but not when called from this tests file.
"""
# Should include DaskManager iff dask is available to be imported
preregistered_chunkmanagers = list_chunkmanagers()
monkeypatch.setattr(
"xarray.namedarray.parallelcompat.list_chunkmanagers",
lambda: {"dummy": DummyChunkManager()} | preregistered_chunkmanagers,
)
yield
| DummyChunkManager |
python | ray-project__ray | python/ray/llm/_internal/batch/stages/prepare_image_stage.py | {
"start": 638,
"end": 5197
} | class ____:
"""Adapted from vllm.connections.HTTPConnection.
Helper class to send HTTP requests.
"""
def __init__(self, *, reuse_client: bool = True) -> None:
super().__init__()
self.reuse_client = reuse_client
self._sync_client: Optional[requests.Session] = None
self._async_client: Optional[aiohttp.ClientSession] = None
def get_sync_client(self) -> requests.Session:
if self._sync_client is None or not self.reuse_client:
self._sync_client = requests.Session()
return self._sync_client
# NOTE: We intentionally use an async function even though it is not
# required, so that the client is only accessible inside async event loop
async def get_async_client(self) -> aiohttp.ClientSession:
if self._async_client is None or not self.reuse_client:
self._async_client = aiohttp.ClientSession()
return self._async_client
def _validate_http_url(self, url: str):
parsed_url = urlparse(url)
if parsed_url.scheme not in ("http", "https"):
raise ValueError(
"Invalid HTTP URL: A valid HTTP URL "
"must have scheme 'http' or 'https'."
)
def _headers(self, **extras: str) -> MutableMapping[str, str]:
return {"User-Agent": "RayLLM-Batch", **extras}
def get_response(
self,
url: str,
*,
stream: bool = False,
timeout: Optional[float] = None,
extra_headers: Optional[Mapping[str, str]] = None,
):
self._validate_http_url(url)
client = self.get_sync_client()
extra_headers = extra_headers or {}
return client.get(
url, headers=self._headers(**extra_headers), stream=stream, timeout=timeout
)
async def get_async_response(
self,
url: str,
*,
timeout: Optional[float] = None,
extra_headers: Optional[Mapping[str, str]] = None,
):
self._validate_http_url(url)
client = await self.get_async_client()
extra_headers = extra_headers or {}
return client.get(url, headers=self._headers(**extra_headers), timeout=timeout)
def get_bytes(self, url: str, *, timeout: Optional[float] = None) -> bytes:
with self.get_response(url, timeout=timeout) as r:
r.raise_for_status()
return r.content
async def async_get_bytes(
self,
url: str,
*,
timeout: Optional[float] = None,
) -> bytes:
async with await self.get_async_response(url, timeout=timeout) as r:
r.raise_for_status()
return await r.read()
def get_text(self, url: str, *, timeout: Optional[float] = None) -> str:
with self.get_response(url, timeout=timeout) as r:
r.raise_for_status()
return r.text
async def async_get_text(
self,
url: str,
*,
timeout: Optional[float] = None,
) -> str:
async with await self.get_async_response(url, timeout=timeout) as r:
r.raise_for_status()
return await r.text()
def get_json(self, url: str, *, timeout: Optional[float] = None) -> str:
with self.get_response(url, timeout=timeout) as r:
r.raise_for_status()
return r.json()
async def async_get_json(
self,
url: str,
*,
timeout: Optional[float] = None,
) -> str:
async with await self.get_async_response(url, timeout=timeout) as r:
r.raise_for_status()
return await r.json()
def download_file(
self,
url: str,
save_path: Path,
*,
timeout: Optional[float] = None,
chunk_size: int = 128,
) -> Path:
with self.get_response(url, timeout=timeout) as r:
r.raise_for_status()
with save_path.open("wb") as f:
for chunk in r.iter_content(chunk_size):
f.write(chunk)
return save_path
async def async_download_file(
self,
url: str,
save_path: Path,
*,
timeout: Optional[float] = None,
chunk_size: int = 128,
) -> Path:
async with await self.get_async_response(url, timeout=timeout) as r:
r.raise_for_status()
with save_path.open("wb") as f:
async for chunk in r.content.iter_chunked(chunk_size):
f.write(chunk)
return save_path
| HTTPConnection |
python | tensorflow__tensorflow | tensorflow/python/ops/init_ops.py | {
"start": 23925,
"end": 26984
} | class ____(Initializer):
"""Initializer that generates tensors without scaling variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. If the input is `x` and the operation `x * W`,
and we want to initialize `W` uniformly at random, we need to pick `W` from
[-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)]
to keep the scale intact, where `dim = W.shape[0]` (the size of the input).
A similar calculation for convolutional networks gives an analogous result
with `dim` equal to the product of the first 3 dimensions. When
nonlinearities are present, we need to multiply this by a constant `factor`.
See (Sussillo et al., 2014) for deeper motivation, experiments
and the calculation of constants. In section 2.3 there, the constants were
numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15.
Args:
factor: Float. A multiplicative factor by which the values will be scaled.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
References:
[Sussillo et al., 2014](https://arxiv.org/abs/1412.6558)
([pdf](http://arxiv.org/pdf/1412.6558.pdf))
"""
@deprecated_args(None,
"Call initializer instance with the dtype argument instead "
"of passing it to the constructor", "dtype")
@deprecated(None,
"Use tf.initializers.variance_scaling instead with distribution="
"uniform to get equivalent behavior.")
def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32):
self.factor = factor
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
input_size = 1.0
# Estimating input size is not possible to do perfectly, but we try.
# The estimate, obtained by multiplying all dimensions but the last one,
# is the right thing for matrix multiply and convolutions (see above).
for dim in scale_shape[:-1]:
input_size *= float(dim)
# Avoid errors when initializing zero-size tensors.
input_size = max(input_size, 1.0)
max_val = math.sqrt(3 / input_size) * self.factor
return random_ops.random_uniform(
shape, -max_val, max_val, dtype, seed=self.seed)
def get_config(self):
return {"factor": self.factor, "seed": self.seed, "dtype": self.dtype.name}
@tf_export(v1=["initializers.variance_scaling", "variance_scaling_initializer"])
@deprecation.deprecated_endpoints("initializers.variance_scaling",
"variance_scaling_initializer")
| UniformUnitScaling |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 71393,
"end": 72195
} | class ____(BiffRecord):
"""
This record specifies the default height and default flags
for rows that do not have a corresponding ROW record.
Record DEFAULTROWHEIGHT, BIFF3-BIFF8:
Offset Size Contents
0 2 Option flags:
Bit Mask Contents
0 0001H 1 = Row height and default font height do not match
1 0002H 1 = Row is hidden
2 0004H 1 = Additional space above the row
3 0008H 1 = Additional space below the row
2 2 Default height for unused rows, in twips = 1/20 of a point
"""
_REC_ID = 0x0225
def __init__(self, options, def_height):
self._rec_data = pack('<2H', options, def_height)
| DefaultRowHeightRecord |
python | ray-project__ray | python/ray/_private/event/export_event_logger.py | {
"start": 3006,
"end": 9091
} | class ____:
def __init__(self, log_type: EventLogType, logger: logging.Logger):
"""Adapter for the Python logger that's used to emit export events."""
self.logger = logger
self.log_type = log_type
def send_event(self, event_data: ExportEventDataType):
# NOTE: Python logger is thread-safe,
# so we don't need to protect it using locks.
try:
event = self._create_export_event(event_data)
except TypeError:
global_logger.exception(
"Failed to create ExportEvent from event_data so no "
"event will be written to file."
)
return
event_as_str = self._export_event_to_string(event)
self.logger.info(event_as_str)
# Force flush so that we won't lose events
self.logger.handlers[0].flush()
def _create_export_event(self, event_data: ExportEventDataType) -> ExportEvent:
event = ExportEvent()
event.event_id = generate_event_id()
event.timestamp = int(datetime.now().timestamp())
if isinstance(event_data, ExportSubmissionJobEventData):
event.submission_job_event_data.CopyFrom(event_data)
event.source_type = ExportEvent.SourceType.EXPORT_SUBMISSION_JOB
elif isinstance(event_data, ExportTrainRunEventData):
event.train_run_event_data.CopyFrom(event_data)
event.source_type = ExportEvent.SourceType.EXPORT_TRAIN_RUN
elif isinstance(event_data, ExportTrainRunAttemptEventData):
event.train_run_attempt_event_data.CopyFrom(event_data)
event.source_type = ExportEvent.SourceType.EXPORT_TRAIN_RUN_ATTEMPT
elif isinstance(event_data, ExportDatasetMetadata):
event.dataset_metadata.CopyFrom(event_data)
event.source_type = ExportEvent.SourceType.EXPORT_DATASET_METADATA
elif isinstance(event_data, ExportDatasetOperatorEventData):
event.dataset_operator_event_data.CopyFrom(event_data)
event.source_type = ExportEvent.SourceType.EXPORT_DATASET_OPERATOR_EVENT
else:
raise TypeError(f"Invalid event_data type: {type(event_data)}")
if not self.log_type.supports_event_type(event_data):
global_logger.error(
f"event_data has source type {event.source_type}, however "
f"the event was sent to a logger with log type {self.log_type.log_type_name}. "
f"The event will still be written to the file of {self.log_type.log_type_name} "
"but this indicates a bug in the code."
)
pass
return event
def _export_event_to_string(self, event: ExportEvent) -> str:
event_data_json = {}
proto_to_dict_options = {
"always_print_fields_with_no_presence": True,
"preserving_proto_field_name": True,
"use_integers_for_enums": False,
}
event_data_field_set = event.WhichOneof("event_data")
if event_data_field_set:
event_data_json = message_to_dict(
getattr(event, event_data_field_set),
**proto_to_dict_options,
)
else:
global_logger.error(
f"event_data missing from export event with id {event.event_id} "
f"and type {event.source_type}. An empty event will be written, "
"but this indicates a bug in the code. "
)
pass
event_json = {
"event_id": event.event_id,
"timestamp": event.timestamp,
"source_type": ExportEvent.SourceType.Name(event.source_type),
"event_data": event_data_json,
}
return json.dumps(event_json)
def _build_export_event_file_logger(
log_type_name: str, sink_dir: str
) -> logging.Logger:
logger = logging.getLogger("_ray_export_event_logger_" + log_type_name)
logger.setLevel(logging.INFO)
dir_path = pathlib.Path(sink_dir) / "export_events"
filepath = dir_path / f"event_{log_type_name}.log"
dir_path.mkdir(exist_ok=True)
filepath.touch(exist_ok=True)
# Configure the logger.
# Default is 100 MB max file size
handler = logging.handlers.RotatingFileHandler(
filepath,
maxBytes=(ray_constants.RAY_EXPORT_EVENT_MAX_FILE_SIZE_BYTES),
backupCount=ray_constants.RAY_EXPORT_EVENT_MAX_BACKUP_COUNT,
)
logger.addHandler(handler)
logger.propagate = False
return logger
# This lock must be used when accessing or updating global event logger dict.
_export_event_logger_lock = threading.Lock()
_export_event_logger = {}
def get_export_event_logger(log_type: EventLogType, sink_dir: str) -> logging.Logger:
"""Get the export event logger of the current process.
There's only one logger per export event source.
Args:
log_type: The type of the export event.
sink_dir: The directory to sink event logs.
"""
with _export_event_logger_lock:
global _export_event_logger
log_type_name = log_type.log_type_name
if log_type_name not in _export_event_logger:
logger = _build_export_event_file_logger(log_type.log_type_name, sink_dir)
_export_event_logger[log_type_name] = ExportEventLoggerAdapter(
log_type, logger
)
return _export_event_logger[log_type_name]
def check_export_api_enabled(
source: ExportEvent.SourceType,
) -> bool:
"""
Check RAY_ENABLE_EXPORT_API_WRITE and RAY_ENABLE_EXPORT_API_WRITE_CONFIG environment
variables to verify if export events should be written for the given source type.
Args:
source: The source of the export event.
"""
if ray_constants.RAY_ENABLE_EXPORT_API_WRITE:
return True
source_name = ExportEvent.SourceType.Name(source)
return (
source_name in ray_constants.RAY_ENABLE_EXPORT_API_WRITE_CONFIG
if ray_constants.RAY_ENABLE_EXPORT_API_WRITE_CONFIG
else False
)
| ExportEventLoggerAdapter |
python | kubernetes-client__python | kubernetes/client/models/v1_api_service_condition.py | {
"start": 383,
"end": 7405
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1APIServiceCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1APIServiceCondition. # noqa: E501
Last time the condition transitioned from one status to another. # noqa: E501
:return: The last_transition_time of this V1APIServiceCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1APIServiceCondition.
Last time the condition transitioned from one status to another. # noqa: E501
:param last_transition_time: The last_transition_time of this V1APIServiceCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""Gets the message of this V1APIServiceCondition. # noqa: E501
Human-readable message indicating details about last transition. # noqa: E501
:return: The message of this V1APIServiceCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1APIServiceCondition.
Human-readable message indicating details about last transition. # noqa: E501
:param message: The message of this V1APIServiceCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1APIServiceCondition. # noqa: E501
Unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
:return: The reason of this V1APIServiceCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1APIServiceCondition.
Unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
:param reason: The reason of this V1APIServiceCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1APIServiceCondition. # noqa: E501
Status is the status of the condition. Can be True, False, Unknown. # noqa: E501
:return: The status of this V1APIServiceCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1APIServiceCondition.
Status is the status of the condition. Can be True, False, Unknown. # noqa: E501
:param status: The status of this V1APIServiceCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1APIServiceCondition. # noqa: E501
Type is the type of the condition. # noqa: E501
:return: The type of this V1APIServiceCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1APIServiceCondition.
Type is the type of the condition. # noqa: E501
:param type: The type of this V1APIServiceCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1APIServiceCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1APIServiceCondition):
return True
return self.to_dict() != other.to_dict()
| V1APIServiceCondition |
python | pytorch__pytorch | torch/_dynamo/output_graph.py | {
"start": 121009,
"end": 121419
} | class ____:
def __init__(
self,
tracer: "SubgraphTracer",
fn: Callable[P, R],
*args: P.args,
**kwargs: P.kwargs,
) -> None:
self.tracer = tracer
# pyrefly: ignore [invalid-type-var]
self.fn = fn
self.args = args
self.kwargs = kwargs
def __call__(self) -> Any:
return self.fn(*self.args, **self.kwargs)
| LazyProxy |
python | apache__airflow | devel-common/src/sphinx_exts/removemarktransform.py | {
"start": 1225,
"end": 2707
} | class ____(SphinxTransform):
"""
Trim doc marker like ``# [START howto_concept]` from python code-blocks.
Based on:
https://github.com/sphinx-doc/sphinx/blob/master/sphinx/transforms/post_transforms/code.py
class TrimDoctestFlagsTransform
"""
default_priority = TrimDoctestFlagsTransform.default_priority + 1
def apply(self, **kwargs):
for node in self.document.traverse(nodes.literal_block):
if self.is_pycode(node):
source = node.rawsource
source = docmark_re.sub("", source)
node.rawsource = source
node[:] = [nodes.Text(source)]
@staticmethod
def is_pycode(node: nodes.literal_block) -> bool:
"""Checks if the node is literal block of python"""
if node.rawsource != node.astext():
return False # skip parsed-literal node
language = node.get("language")
if language in ("py", "py3", "python", "python3", "default"):
return True
if language == "guess":
try:
lexer = guess_lexer(node.rawsource)
return isinstance(lexer, (PythonLexer, Python3Lexer))
except Exception:
pass
return False
def setup(app):
"""Sets the transform up"""
app.add_post_transform(TrimDocMarkerFlagsTransform)
return {"version": "builtin", "parallel_read_safe": True, "parallel_write_safe": True}
| TrimDocMarkerFlagsTransform |
python | getsentry__sentry | src/sentry/discover/endpoints/discover_saved_queries.py | {
"start": 1547,
"end": 7560
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
"POST": ApiPublishStatus.PUBLIC,
}
owner = ApiOwner.DATA_BROWSING
permission_classes = (DiscoverSavedQueryPermission,)
def has_feature(self, organization, request):
return features.has(
"organizations:discover", organization, actor=request.user
) or features.has("organizations:discover-query", organization, actor=request.user)
@extend_schema(
operation_id="List an Organization's Discover Saved Queries",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
VisibilityParams.PER_PAGE,
CursorQueryParam,
DiscoverSavedQueriesParams.QUERY,
DiscoverSavedQueriesParams.SORT,
],
request=None,
responses={
200: inline_sentry_response_serializer(
"DiscoverSavedQueryListResponse", list[DiscoverSavedQueryResponse]
),
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=DiscoverExamples.DISCOVER_SAVED_QUERIES_QUERY_RESPONSE,
)
def get(self, request: Request, organization: Organization) -> Response:
"""
Retrieve a list of saved queries that are associated with the given organization.
"""
if not self.has_feature(organization, request):
return self.respond(status=404)
queryset = (
DiscoverSavedQuery.objects.filter(organization=organization)
.prefetch_related("projects")
.extra(select={"lower_name": "lower(name)"})
).exclude(is_homepage=True)
query = request.query_params.get("query")
if query:
tokens = tokenize_query(query)
for key, value in tokens.items():
if key == "name" or key == "query":
queryset = queryset.filter(name__icontains=" ".join(value))
elif key == "version":
queryset = queryset.filter(version=" ".join(value))
else:
queryset = queryset.none()
sort_by = request.query_params.get("sortBy")
if sort_by and sort_by.startswith("-"):
sort_by, desc = sort_by[1:], True
else:
desc = False
if sort_by == "name":
order_by: list[Case | str] = [
"-lower_name" if desc else "lower_name",
"-date_created",
]
elif sort_by == "dateCreated":
order_by = ["-date_created" if desc else "date_created"]
elif sort_by == "dateUpdated":
order_by = ["-date_updated" if desc else "date_updated"]
elif sort_by == "mostPopular":
order_by = [
"visits" if desc else "-visits",
"-date_updated",
]
elif sort_by == "recentlyViewed":
order_by = ["last_visited" if desc else "-last_visited"]
elif sort_by == "myqueries":
order_by = [
Case(
When(created_by_id=request.user.id, then=-1),
default="created_by_id",
output_field=IntegerField(),
),
"-date_created",
]
else:
order_by = ["lower_name"]
queryset = queryset.order_by(*order_by)
# Old discover expects all queries and uses this parameter.
if request.query_params.get("all") == "1":
saved_queries = list(queryset.all())
return Response(serialize(saved_queries), status=200)
def data_fn(offset, limit):
return list(queryset[offset : offset + limit])
return self.paginate(
request=request,
paginator=GenericOffsetPaginator(data_fn=data_fn),
on_results=lambda x: serialize(x, request.user),
default_per_page=25,
)
@extend_schema(
operation_id="Create a New Saved Query",
parameters=[GlobalParams.ORG_ID_OR_SLUG],
request=DiscoverSavedQuerySerializer,
responses={
201: DiscoverSavedQueryModelSerializer,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=DiscoverExamples.DISCOVER_SAVED_QUERY_POST_RESPONSE,
)
def post(self, request: Request, organization) -> Response:
"""
Create a new saved query for the given organization.
"""
if not self.has_feature(organization, request):
return self.respond(status=404)
try:
params = self.get_filter_params(
request, organization, project_ids=request.data.get("projects")
)
except NoProjects:
raise ParseError(detail="No Projects found, join a Team")
serializer = DiscoverSavedQuerySerializer(
data=request.data,
context={"params": params, "organization": organization, "user": request.user},
)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
data = serializer.validated_data
user_selected_dataset = data["query_dataset"] != DiscoverSavedQueryTypes.DISCOVER
model = DiscoverSavedQuery.objects.create(
organization=organization,
name=data["name"],
query=data["query"],
version=data["version"],
dataset=data["query_dataset"],
dataset_source=(
DatasetSourcesTypes.USER.value
if user_selected_dataset
else DatasetSourcesTypes.UNKNOWN.value
),
created_by_id=request.user.id if request.user.is_authenticated else None,
)
model.set_projects(data["project_ids"])
return Response(serialize(model), status=201)
| DiscoverSavedQueriesEndpoint |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/assets/definition/assets_definition.py | {
"start": 3676,
"end": 92949
} | class ____(ResourceAddable, IHasInternalInit):
"""Defines a set of assets that are produced by the same op or graph.
AssetsDefinitions are typically not instantiated directly, but rather produced using the
:py:func:`@asset <asset>` or :py:func:`@multi_asset <multi_asset>` decorators.
"""
# Constructor arguments that are redundant with the specs argument
_dagster_internal_init_excluded_args = {
"group_names_by_key",
"metadata_by_key",
"tags_by_key",
"legacy_freshness_policies_by_key",
"auto_materialize_policies_by_key",
"partition_mappings",
"descriptions_by_key",
"asset_deps",
"owners_by_key",
"partitions_def",
}
# partition mappings are also tracked inside the AssetSpecs, but this enables faster access by
# upstream asset key
_partition_mappings: Mapping[AssetKey, PartitionMapping]
_resource_defs: Mapping[str, ResourceDefinition]
_specs_by_key: Mapping[AssetKey, AssetSpec]
_computation: Optional[AssetGraphComputation]
_hook_defs: AbstractSet[HookDefinition]
@beta_param(param="execution_type")
def __init__(
self,
*,
keys_by_input_name: Optional[Mapping[str, AssetKey]] = None,
keys_by_output_name: Optional[Mapping[str, AssetKey]] = None,
node_def: Optional[NodeDefinition] = None,
partitions_def: Optional[PartitionsDefinition] = None,
partition_mappings: Optional[Mapping[AssetKey, PartitionMapping]] = None,
asset_deps: Optional[Mapping[AssetKey, AbstractSet[AssetKey]]] = None,
selected_asset_keys: Optional[AbstractSet[AssetKey]] = None,
can_subset: bool = False,
resource_defs: Optional[Mapping[str, object]] = None,
group_names_by_key: Optional[Mapping[AssetKey, str]] = None,
metadata_by_key: Optional[Mapping[AssetKey, ArbitraryMetadataMapping]] = None,
tags_by_key: Optional[Mapping[AssetKey, Mapping[str, str]]] = None,
legacy_freshness_policies_by_key: Optional[Mapping[AssetKey, LegacyFreshnessPolicy]] = None,
backfill_policy: Optional[BackfillPolicy] = None,
# descriptions by key is more accurately understood as _overriding_ the descriptions
# by key that are in the OutputDefinitions associated with the asset key.
# This is a dangerous construction liable for bugs. Instead there should be a
# canonical source of asset descriptions in AssetsDefinintion and if we need
# to create a memoized cached dictionary of asset keys for perf or something we do
# that in the `__init__` or on demand.
#
# This is actually an override. We do not override descriptions
# in OutputDefinitions in @multi_asset
descriptions_by_key: Optional[Mapping[AssetKey, str]] = None,
check_specs_by_output_name: Optional[Mapping[str, AssetCheckSpec]] = None,
selected_asset_check_keys: Optional[AbstractSet[AssetCheckKey]] = None,
is_subset: bool = False,
owners_by_key: Optional[Mapping[AssetKey, Sequence[str]]] = None,
specs: Optional[Sequence[AssetSpec]] = None,
execution_type: Optional[AssetExecutionType] = None,
# TODO: FOU-243
auto_materialize_policies_by_key: Optional[Mapping[AssetKey, AutoMaterializePolicy]] = None,
hook_defs: Optional[AbstractSet[HookDefinition]] = None,
# if adding new fields, make sure to handle them in the with_attributes, from_graph,
# from_op, and get_attributes_dict methods
):
from dagster._core.definitions.graph_definition import GraphDefinition
from dagster._core.definitions.hook_definition import HookDefinition
from dagster._core.execution.build_resources import wrap_resources_for_execution
if isinstance(node_def, GraphDefinition):
_validate_graph_def(node_def)
self._check_specs_by_output_name = check.opt_mapping_param(
check_specs_by_output_name,
"check_specs_by_output_name",
key_type=str,
value_type=AssetCheckSpec,
)
self._hook_defs = check.opt_set_param(hook_defs, "hook_defs", HookDefinition)
automation_conditions_by_key = (
{k: v.to_automation_condition() for k, v in auto_materialize_policies_by_key.items()}
if auto_materialize_policies_by_key
else None
)
if node_def is None:
check.invariant(
not keys_by_input_name,
"node_def is None, so keys_by_input_name must be empty",
)
check.invariant(
not keys_by_output_name,
"node_def is None, so keys_by_output_name must be empty",
)
check.invariant(
backfill_policy is None,
"node_def is None, so backfill_policy must be None",
)
check.invariant(not can_subset, "node_def is None, so can_subset must be False")
self._computation = None
else:
selected_asset_keys, selected_asset_check_keys = _resolve_selections(
all_asset_keys={spec.key for spec in specs}
if specs
else set(check.not_none(keys_by_output_name).values()),
all_check_keys={spec.key for spec in (check_specs_by_output_name or {}).values()},
selected_asset_keys=selected_asset_keys,
selected_asset_check_keys=selected_asset_check_keys,
)
self._computation = AssetGraphComputation(
node_def=node_def,
keys_by_input_name=check.opt_mapping_param(
keys_by_input_name,
"keys_by_input_name",
key_type=str,
value_type=AssetKey,
),
keys_by_output_name=check.opt_mapping_param(
keys_by_output_name,
"keys_by_output_name",
key_type=str,
value_type=AssetKey,
),
check_keys_by_output_name={
output_name: spec.key
for output_name, spec in self._check_specs_by_output_name.items()
},
can_subset=can_subset,
backfill_policy=check.opt_inst_param(
backfill_policy, "backfill_policy", BackfillPolicy
),
is_subset=check.bool_param(is_subset, "is_subset"),
selected_asset_keys=selected_asset_keys,
selected_asset_check_keys=selected_asset_check_keys,
execution_type=execution_type or AssetExecutionType.MATERIALIZATION,
)
self._resource_defs = wrap_resources_for_execution(
check.opt_mapping_param(resource_defs, "resource_defs")
)
if specs is not None:
check.invariant(group_names_by_key is None)
check.invariant(metadata_by_key is None)
check.invariant(tags_by_key is None)
check.invariant(legacy_freshness_policies_by_key is None)
check.invariant(auto_materialize_policies_by_key is None)
check.invariant(automation_conditions_by_key is None)
check.invariant(descriptions_by_key is None)
check.invariant(owners_by_key is None)
check.invariant(partition_mappings is None)
check.invariant(asset_deps is None)
check.invariant(partitions_def is None)
resolved_specs = specs
else:
computation_not_none = check.not_none(
self._computation,
"If specs are not provided, a node_def must be provided",
)
all_asset_keys = set(computation_not_none.keys_by_output_name.values())
if asset_deps:
check.invariant(
set(asset_deps.keys()) == all_asset_keys,
"The set of asset keys with dependencies specified in the asset_deps argument must "
"equal the set of asset keys produced by this AssetsDefinition. \n"
f"asset_deps keys: {set(asset_deps.keys())} \n"
f"expected keys: {all_asset_keys}",
)
if partition_mappings:
_validate_partition_mappings(
partition_mappings=partition_mappings,
input_asset_keys=set(computation_not_none.keys_by_input_name.values()),
all_asset_keys=all_asset_keys,
)
check.invariant(node_def, "Must provide node_def if not providing specs")
resolved_specs = _asset_specs_from_attr_key_params(
all_asset_keys=all_asset_keys,
keys_by_input_name=computation_not_none.keys_by_input_name,
deps_by_asset_key=asset_deps,
partition_mappings=partition_mappings,
tags_by_key=tags_by_key,
owners_by_key=owners_by_key,
group_names_by_key=group_names_by_key,
legacy_freshness_policies_by_key=legacy_freshness_policies_by_key,
automation_conditions_by_key=automation_conditions_by_key,
metadata_by_key=metadata_by_key,
descriptions_by_key=descriptions_by_key,
code_versions_by_key=None,
partitions_def=partitions_def,
)
normalized_specs: list[AssetSpec] = []
for spec in resolved_specs:
if spec.owners:
for owner in spec.owners:
validate_asset_owner(owner, spec.key)
group_name = normalize_group_name(spec.group_name)
if self._computation is not None:
output_def, _ = self._computation.full_node_def.resolve_output_to_origin(
self._computation.output_names_by_key[spec.key], None
)
node_def_description = self._computation.node_def.description
output_def_metadata = output_def.metadata
output_def_description = output_def.description
output_def_code_version = output_def.code_version
skippable = not output_def.is_required
else:
node_def_description = None
output_def_metadata = {}
output_def_description = None
output_def_code_version = None
skippable = False
metadata = {**output_def_metadata, **(spec.metadata or {})}
# We construct description from three sources of truth here. This
# highly unfortunate. See commentary in @multi_asset's call to dagster_internal_init.
description = spec.description or output_def_description or node_def_description
code_version = spec.code_version or output_def_code_version
check.invariant(
not (
spec.legacy_freshness_policy
and spec.partitions_def is not None
and not isinstance(spec.partitions_def, TimeWindowPartitionsDefinition)
),
"FreshnessPolicies are currently unsupported for assets with partitions of type"
f" {spec.partitions_def}.",
)
normalized_specs.append(
replace(
spec,
group_name=group_name,
code_version=code_version,
metadata=metadata,
description=description,
skippable=skippable,
)
)
unique_partitions_defs = {
spec.partitions_def for spec in normalized_specs if spec.partitions_def is not None
}
if len(unique_partitions_defs) > 1 and not can_subset:
raise DagsterInvalidDefinitionError(
"If different AssetSpecs have different partitions_defs, can_subset must be True"
)
_validate_self_deps(normalized_specs)
self._specs_by_key = {}
for spec in normalized_specs:
if spec.key in self._specs_by_key and self._specs_by_key[spec.key] != spec:
raise DagsterInvalidDefinitionError(
"Received conflicting AssetSpecs with the same key:\n"
f"{self._specs_by_key[spec.key]}\n"
f"{spec}\n"
"This warning will become an exception in version 1.11"
)
self._specs_by_key[spec.key] = spec
self._partition_mappings = get_partition_mappings_from_deps(
{},
[dep for spec in normalized_specs for dep in spec.deps],
node_def.name if node_def else "external assets",
)
self._check_specs_by_key = {
spec.key: spec for spec in self._check_specs_by_output_name.values()
}
def dagster_internal_init(
*,
keys_by_input_name: Mapping[str, AssetKey],
keys_by_output_name: Mapping[str, AssetKey],
node_def: NodeDefinition,
selected_asset_keys: Optional[AbstractSet[AssetKey]],
can_subset: bool,
resource_defs: Optional[Mapping[str, object]],
backfill_policy: Optional[BackfillPolicy],
check_specs_by_output_name: Optional[Mapping[str, AssetCheckSpec]],
selected_asset_check_keys: Optional[AbstractSet[AssetCheckKey]],
is_subset: bool,
specs: Optional[Sequence[AssetSpec]],
execution_type: Optional[AssetExecutionType],
hook_defs: Optional[AbstractSet[HookDefinition]],
) -> "AssetsDefinition":
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=PreviewWarning)
warnings.simplefilter("ignore", category=BetaWarning)
return AssetsDefinition(
keys_by_input_name=keys_by_input_name,
keys_by_output_name=keys_by_output_name,
node_def=node_def,
selected_asset_keys=selected_asset_keys,
can_subset=can_subset,
resource_defs=resource_defs,
hook_defs=hook_defs,
backfill_policy=backfill_policy,
check_specs_by_output_name=check_specs_by_output_name,
selected_asset_check_keys=selected_asset_check_keys,
is_subset=is_subset,
specs=specs,
execution_type=execution_type,
)
def __call__(self, *args: object, **kwargs: object) -> object:
from dagster._core.definitions.composition import is_in_composition
from dagster._core.definitions.graph_definition import GraphDefinition
# defer to GraphDefinition.__call__ for graph backed assets, or if invoked in composition
if (
self._computation and isinstance(self._computation.node_def, GraphDefinition)
) or is_in_composition():
return self.node_def(*args, **kwargs)
# invoke against self to allow assets def information to be used
return direct_invocation_result(self, *args, **kwargs)
@public
@beta_param(param="resource_defs")
@deprecated_param(param="legacy_freshness_policies_by_output_name", breaking_version="1.12.0")
@staticmethod
def from_graph(
graph_def: "GraphDefinition",
*,
keys_by_input_name: Optional[Mapping[str, AssetKey]] = None,
keys_by_output_name: Optional[Mapping[str, AssetKey]] = None,
key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,
internal_asset_deps: Optional[Mapping[str, set[AssetKey]]] = None,
partitions_def: Optional[PartitionsDefinition] = None,
partition_mappings: Optional[Mapping[str, PartitionMapping]] = None,
resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,
group_name: Optional[str] = None,
group_names_by_output_name: Optional[Mapping[str, Optional[str]]] = None,
descriptions_by_output_name: Optional[Mapping[str, str]] = None,
metadata_by_output_name: Optional[Mapping[str, Optional[ArbitraryMetadataMapping]]] = None,
tags_by_output_name: Optional[Mapping[str, Optional[Mapping[str, str]]]] = None,
legacy_freshness_policies_by_output_name: Optional[
Mapping[str, Optional[LegacyFreshnessPolicy]]
] = None,
automation_conditions_by_output_name: Optional[
Mapping[str, Optional[AutomationCondition]]
] = None,
backfill_policy: Optional[BackfillPolicy] = None,
can_subset: bool = False,
check_specs: Optional[Sequence[AssetCheckSpec]] = None,
owners_by_output_name: Optional[Mapping[str, Sequence[str]]] = None,
code_versions_by_output_name: Optional[Mapping[str, Optional[str]]] = None,
# TODO: FOU-243
auto_materialize_policies_by_output_name: Optional[
Mapping[str, Optional[AutoMaterializePolicy]]
] = None,
hook_defs: Optional[AbstractSet[HookDefinition]] = None,
) -> "AssetsDefinition":
"""Constructs an AssetsDefinition from a GraphDefinition.
Args:
graph_def (GraphDefinition): The GraphDefinition that is an asset.
keys_by_input_name (Optional[Mapping[str, AssetKey]]): A mapping of the input
names of the decorated graph to their corresponding asset keys. If not provided,
the input asset keys will be created from the graph input names.
keys_by_output_name (Optional[Mapping[str, AssetKey]]): A mapping of the output
names of the decorated graph to their corresponding asset keys. If not provided,
the output asset keys will be created from the graph output names.
key_prefix (Optional[Union[str, Sequence[str]]]): If provided, key_prefix will be prepended
to each key in keys_by_output_name. Each item in key_prefix must be a valid name in
dagster (ie only contains letters, numbers, and _) and may not contain python
reserved keywords.
internal_asset_deps (Optional[Mapping[str, Set[AssetKey]]]): By default, it is assumed
that all assets produced by the graph depend on all assets that are consumed by that
graph. If this default is not correct, you pass in a map of output names to a
corrected set of AssetKeys that they depend on. Any AssetKeys in this list must be
either used as input to the asset or produced within the graph.
partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that
compose the assets.
partition_mappings (Optional[Mapping[str, PartitionMapping]]): Defines how to map partition
keys for this asset to partition keys of upstream assets. Each key in the dictionary
correponds to one of the input assets, and each value is a PartitionMapping.
If no entry is provided for a particular asset dependency, the partition mapping defaults
to the default partition mapping for the partitions definition, which is typically maps
partition keys to the same partition keys in upstream assets.
resource_defs (Optional[Mapping[str, ResourceDefinition]]):
(Beta) A mapping of resource keys to resource definitions. These resources
will be initialized during execution, and can be accessed from the
body of ops in the graph during execution.
group_name (Optional[str]): A group name for the constructed asset. Assets without a
group name are assigned to a group called "default".
group_names_by_output_name (Optional[Mapping[str, Optional[str]]]): Defines a group name to be
associated with some or all of the output assets for this node. Keys are names of the
outputs, and values are the group name. Cannot be used with the group_name argument.
descriptions_by_output_name (Optional[Mapping[str, Optional[str]]]): Defines a description to be
associated with each of the output asstes for this graph.
metadata_by_output_name (Optional[Mapping[str, Optional[RawMetadataMapping]]]): Defines metadata to
be associated with each of the output assets for this node. Keys are names of the
outputs, and values are dictionaries of metadata to be associated with the related
asset.
tags_by_output_name (Optional[Mapping[str, Optional[Mapping[str, str]]]]): Defines
tags to be associated with each of the output assets for this node. Keys are the names
of outputs, and values are dictionaries of tags to be associated with the related
asset.
legacy_freshness_policies_by_output_name (Optional[Mapping[str, Optional[LegacyFreshnessPolicy]]]): Defines a
LegacyFreshnessPolicy to be associated with some or all of the output assets for this node.
Keys are the names of the outputs, and values are the LegacyFreshnessPolicies to be attached
to the associated asset.
automation_conditions_by_output_name (Optional[Mapping[str, Optional[AutomationCondition]]]): Defines an
AutomationCondition to be associated with some or all of the output assets for this node.
Keys are the names of the outputs, and values are the AutoMaterializePolicies to be attached
to the associated asset.
backfill_policy (Optional[BackfillPolicy]): Defines this asset's BackfillPolicy
owners_by_key (Optional[Mapping[AssetKey, Sequence[str]]]): Defines
owners to be associated with each of the asset keys for this node.
"""
return AssetsDefinition._from_node(
node_def=graph_def,
keys_by_input_name=keys_by_input_name,
keys_by_output_name=keys_by_output_name,
key_prefix=key_prefix,
internal_asset_deps=internal_asset_deps,
partitions_def=partitions_def,
partition_mappings=partition_mappings,
resource_defs=resource_defs,
hook_defs=hook_defs,
group_name=group_name,
group_names_by_output_name=group_names_by_output_name,
descriptions_by_output_name=descriptions_by_output_name,
metadata_by_output_name=metadata_by_output_name,
tags_by_output_name=tags_by_output_name,
legacy_freshness_policies_by_output_name=legacy_freshness_policies_by_output_name,
automation_conditions_by_output_name=_resolve_automation_conditions_by_output_name(
automation_conditions_by_output_name,
auto_materialize_policies_by_output_name,
),
backfill_policy=backfill_policy,
can_subset=can_subset,
check_specs=check_specs,
owners_by_output_name=owners_by_output_name,
code_versions_by_output_name=code_versions_by_output_name,
)
@public
@staticmethod
@deprecated_param(param="legacy_freshness_policies_by_output_name", breaking_version="1.12.0")
def from_op(
op_def: OpDefinition,
*,
keys_by_input_name: Optional[Mapping[str, AssetKey]] = None,
keys_by_output_name: Optional[Mapping[str, AssetKey]] = None,
key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,
internal_asset_deps: Optional[Mapping[str, set[AssetKey]]] = None,
partitions_def: Optional[PartitionsDefinition] = None,
partition_mappings: Optional[Mapping[str, PartitionMapping]] = None,
group_name: Optional[str] = None,
group_names_by_output_name: Optional[Mapping[str, Optional[str]]] = None,
descriptions_by_output_name: Optional[Mapping[str, str]] = None,
metadata_by_output_name: Optional[Mapping[str, Optional[ArbitraryMetadataMapping]]] = None,
tags_by_output_name: Optional[Mapping[str, Optional[Mapping[str, str]]]] = None,
legacy_freshness_policies_by_output_name: Optional[
Mapping[str, Optional[LegacyFreshnessPolicy]]
] = None,
automation_conditions_by_output_name: Optional[
Mapping[str, Optional[AutomationCondition]]
] = None,
backfill_policy: Optional[BackfillPolicy] = None,
can_subset: bool = False,
# TODO: FOU-243
auto_materialize_policies_by_output_name: Optional[
Mapping[str, Optional[AutoMaterializePolicy]]
] = None,
hook_defs: Optional[AbstractSet[HookDefinition]] = None,
) -> "AssetsDefinition":
"""Constructs an AssetsDefinition from an OpDefinition.
Args:
op_def (OpDefinition): The OpDefinition that is an asset.
keys_by_input_name (Optional[Mapping[str, AssetKey]]): A mapping of the input
names of the decorated op to their corresponding asset keys. If not provided,
the input asset keys will be created from the op input names.
keys_by_output_name (Optional[Mapping[str, AssetKey]]): A mapping of the output
names of the decorated op to their corresponding asset keys. If not provided,
the output asset keys will be created from the op output names.
key_prefix (Optional[Union[str, Sequence[str]]]): If provided, key_prefix will be prepended
to each key in keys_by_output_name. Each item in key_prefix must be a valid name in
dagster (ie only contains letters, numbers, and _) and may not contain python
reserved keywords.
internal_asset_deps (Optional[Mapping[str, Set[AssetKey]]]): By default, it is assumed
that all assets produced by the op depend on all assets that are consumed by that
op. If this default is not correct, you pass in a map of output names to a
corrected set of AssetKeys that they depend on. Any AssetKeys in this list must be
either used as input to the asset or produced within the op.
partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that
compose the assets.
partition_mappings (Optional[Mapping[str, PartitionMapping]]): Defines how to map partition
keys for this asset to partition keys of upstream assets. Each key in the dictionary
correponds to one of the input assets, and each value is a PartitionMapping.
If no entry is provided for a particular asset dependency, the partition mapping defaults
to the default partition mapping for the partitions definition, which is typically maps
partition keys to the same partition keys in upstream assets.
group_name (Optional[str]): A group name for the constructed asset. Assets without a
group name are assigned to a group called "default".
group_names_by_output_name (Optional[Mapping[str, Optional[str]]]): Defines a group name to be
associated with some or all of the output assets for this node. Keys are names of the
outputs, and values are the group name. Cannot be used with the group_name argument.
descriptions_by_output_name (Optional[Mapping[str, Optional[str]]]): Defines a description to be
associated with each of the output asstes for this graph.
metadata_by_output_name (Optional[Mapping[str, Optional[RawMetadataMapping]]]): Defines metadata to
be associated with each of the output assets for this node. Keys are names of the
outputs, and values are dictionaries of metadata to be associated with the related
asset.
tags_by_output_name (Optional[Mapping[str, Optional[Mapping[str, str]]]]): Defines
tags to be associated with each othe output assets for this node. Keys are the names
of outputs, and values are dictionaries of tags to be associated with the related
asset.
legacy_freshness_policies_by_output_name (Optional[Mapping[str, Optional[LegacyFreshnessPolicy]]]): Defines a
LegacyFreshnessPolicy to be associated with some or all of the output assets for this node.
Keys are the names of the outputs, and values are the LegacyFreshnessPolicies to be attached
to the associated asset.
automation_conditions_by_output_name (Optional[Mapping[str, Optional[AutomationCondition]]]): Defines an
AutomationCondition to be associated with some or all of the output assets for this node.
Keys are the names of the outputs, and values are the AutoMaterializePolicies to be attached
to the associated asset.
backfill_policy (Optional[BackfillPolicy]): Defines this asset's BackfillPolicy
"""
return AssetsDefinition._from_node(
node_def=op_def,
keys_by_input_name=keys_by_input_name,
keys_by_output_name=keys_by_output_name,
key_prefix=key_prefix,
internal_asset_deps=internal_asset_deps,
partitions_def=partitions_def,
partition_mappings=partition_mappings,
group_name=group_name,
group_names_by_output_name=group_names_by_output_name,
descriptions_by_output_name=descriptions_by_output_name,
metadata_by_output_name=metadata_by_output_name,
tags_by_output_name=tags_by_output_name,
legacy_freshness_policies_by_output_name=legacy_freshness_policies_by_output_name,
automation_conditions_by_output_name=_resolve_automation_conditions_by_output_name(
automation_conditions_by_output_name,
auto_materialize_policies_by_output_name,
),
backfill_policy=backfill_policy,
can_subset=can_subset,
hook_defs=hook_defs,
)
@staticmethod
def _from_node(
node_def: NodeDefinition,
*,
keys_by_input_name: Optional[Mapping[str, AssetKey]] = None,
keys_by_output_name: Optional[Mapping[str, AssetKey]] = None,
key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,
internal_asset_deps: Optional[Mapping[str, set[AssetKey]]] = None,
partitions_def: Optional[PartitionsDefinition] = None,
partition_mappings: Optional[Mapping[str, PartitionMapping]] = None,
resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,
group_name: Optional[str] = None,
group_names_by_output_name: Optional[Mapping[str, Optional[str]]] = None,
descriptions_by_output_name: Optional[Mapping[str, str]] = None,
metadata_by_output_name: Optional[Mapping[str, Optional[ArbitraryMetadataMapping]]] = None,
tags_by_output_name: Optional[Mapping[str, Optional[Mapping[str, str]]]] = None,
legacy_freshness_policies_by_output_name: Optional[
Mapping[str, Optional[LegacyFreshnessPolicy]]
] = None,
code_versions_by_output_name: Optional[Mapping[str, Optional[str]]] = None,
automation_conditions_by_output_name: Optional[
Mapping[str, Optional[AutomationCondition]]
] = None,
backfill_policy: Optional[BackfillPolicy] = None,
can_subset: bool = False,
check_specs: Optional[Sequence[AssetCheckSpec]] = None,
owners_by_output_name: Optional[Mapping[str, Sequence[str]]] = None,
hook_defs: Optional[AbstractSet[HookDefinition]] = None,
) -> "AssetsDefinition":
from dagster._core.definitions.decorators.decorator_assets_definition_builder import (
_validate_check_specs_target_relevant_asset_keys,
create_check_specs_by_output_name,
)
from dagster._core.definitions.hook_definition import HookDefinition
node_def = check.inst_param(node_def, "node_def", NodeDefinition)
keys_by_input_name = _infer_keys_by_input_names(
node_def,
check.opt_mapping_param(
keys_by_input_name,
"keys_by_input_name",
key_type=str,
value_type=AssetKey,
),
)
keys_by_output_name = check.opt_mapping_param(
keys_by_output_name,
"keys_by_output_name",
key_type=str,
value_type=AssetKey,
)
check_specs_by_output_name = create_check_specs_by_output_name(check_specs)
keys_by_output_name = _infer_keys_by_output_names(
node_def, keys_by_output_name or {}, check_specs_by_output_name
)
internal_asset_deps = check.opt_mapping_param(
internal_asset_deps, "internal_asset_deps", key_type=str, value_type=set
)
resource_defs = check.opt_mapping_param(
resource_defs, "resource_defs", key_type=str, value_type=ResourceDefinition
)
hook_defs = check.opt_set_param(hook_defs, "hook_defs", HookDefinition)
transformed_internal_asset_deps: dict[AssetKey, AbstractSet[AssetKey]] = {}
if internal_asset_deps:
for output_name, asset_keys in internal_asset_deps.items():
if output_name not in keys_by_output_name:
check.failed(
f"output_name {output_name} specified in internal_asset_deps does not exist"
f" in the decorated function. Output names: {list(keys_by_output_name.keys())}.",
)
transformed_internal_asset_deps[keys_by_output_name[output_name]] = asset_keys
_validate_check_specs_target_relevant_asset_keys(
check_specs, list(keys_by_output_name.values())
)
keys_by_output_name_with_prefix: dict[str, AssetKey] = {}
key_prefix_list = [key_prefix] if isinstance(key_prefix, str) else key_prefix
for output_name, key in keys_by_output_name.items():
# add key_prefix to the beginning of each asset key
key_with_key_prefix = AssetKey(
list(filter(None, [*(key_prefix_list or []), *key.path]))
)
keys_by_output_name_with_prefix[output_name] = key_with_key_prefix
T = TypeVar("T")
def _output_dict_to_asset_dict(
attr_by_output_name: Optional[Mapping[str, Optional[T]]],
) -> Optional[Mapping[AssetKey, T]]:
if not attr_by_output_name:
return None
return {
keys_by_output_name_with_prefix[output_name]: attr
for output_name, attr in attr_by_output_name.items()
if attr is not None
}
check.param_invariant(
group_name is None or group_names_by_output_name is None,
"group_name",
"Cannot use both group_name and group_names_by_output_name",
)
if group_name is not None:
group_names_by_key = {
asset_key: group_name for asset_key in keys_by_output_name_with_prefix.values()
}
elif group_names_by_output_name:
group_names_by_key = _output_dict_to_asset_dict(group_names_by_output_name)
else:
group_names_by_key = None
specs = _asset_specs_from_attr_key_params(
all_asset_keys=set(keys_by_output_name_with_prefix.values()),
keys_by_input_name=keys_by_input_name,
deps_by_asset_key=transformed_internal_asset_deps or None,
partition_mappings=(
{
keys_by_input_name[input_name]: partition_mapping
for input_name, partition_mapping in partition_mappings.items()
}
if partition_mappings
else None
),
tags_by_key=_output_dict_to_asset_dict(tags_by_output_name),
owners_by_key=_output_dict_to_asset_dict(owners_by_output_name),
group_names_by_key=group_names_by_key,
legacy_freshness_policies_by_key=_output_dict_to_asset_dict(
legacy_freshness_policies_by_output_name
),
automation_conditions_by_key=_output_dict_to_asset_dict(
automation_conditions_by_output_name
),
metadata_by_key=_output_dict_to_asset_dict(metadata_by_output_name),
descriptions_by_key=_output_dict_to_asset_dict(descriptions_by_output_name),
code_versions_by_key=_output_dict_to_asset_dict(code_versions_by_output_name),
partitions_def=partitions_def,
)
return AssetsDefinition.dagster_internal_init(
keys_by_input_name=keys_by_input_name,
keys_by_output_name=keys_by_output_name_with_prefix,
node_def=node_def,
resource_defs=resource_defs,
hook_defs=hook_defs,
backfill_policy=check.opt_inst_param(
backfill_policy, "backfill_policy", BackfillPolicy
),
can_subset=can_subset,
selected_asset_keys=None, # node has no subselection info
check_specs_by_output_name=check_specs_by_output_name,
selected_asset_check_keys=None,
is_subset=False,
specs=specs,
execution_type=AssetExecutionType.MATERIALIZATION,
)
@public
@property
def can_subset(self) -> bool:
"""bool: If True, indicates that this AssetsDefinition may materialize any subset of its
asset keys in a given computation (as opposed to being required to materialize all asset
keys).
"""
return self._computation.can_subset if self._computation else False
@property
def computation(self) -> Optional[AssetGraphComputation]:
return self._computation
@property
def specs(self) -> Iterable[AssetSpec]:
return self._specs_by_key.values()
@property
def specs_by_key(self) -> Mapping[AssetKey, AssetSpec]:
return self._specs_by_key
@public
@property
def group_names_by_key(self) -> Mapping[AssetKey, str]:
"""Mapping[AssetKey, str]: Returns a mapping from the asset keys in this AssetsDefinition
to the group names assigned to them. If there is no assigned group name for a given AssetKey,
it will not be present in this dictionary.
"""
return {key: check.not_none(spec.group_name) for key, spec in self._specs_by_key.items()}
@public
@property
def descriptions_by_key(self) -> Mapping[AssetKey, str]:
"""Mapping[AssetKey, str]: Returns a mapping from the asset keys in this AssetsDefinition
to the descriptions assigned to them. If there is no assigned description for a given AssetKey,
it will not be present in this dictionary.
"""
return {
key: spec.description
for key, spec in self._specs_by_key.items()
if spec.description is not None
}
@public
@property
def op(self) -> OpDefinition:
"""OpDefinition: Returns the OpDefinition that is used to materialize the assets in this
AssetsDefinition.
"""
node_def = self.node_def
check.invariant(
isinstance(node_def, OpDefinition),
"The NodeDefinition for this AssetsDefinition is not of type OpDefinition.",
)
return cast("OpDefinition", node_def)
@public
@property
def node_def(self) -> NodeDefinition:
"""NodeDefinition: Returns the OpDefinition or GraphDefinition that is used to materialize
the assets in this AssetsDefinition.
"""
return check.not_none(self._computation, "This AssetsDefinition has no node_def").node_def
@public
@cached_property
def asset_deps(self) -> Mapping[AssetKey, AbstractSet[AssetKey]]:
"""Maps assets that are produced by this definition to assets that they depend on. The
dependencies can be either "internal", meaning that they refer to other assets that are
produced by this definition, or "external", meaning that they refer to assets that aren't
produced by this definition.
"""
return {
key: {dep.asset_key for dep in spec.deps} for key, spec in self._specs_by_key.items()
}
@property
def input_names(self) -> Iterable[str]:
"""Iterable[str]: The set of input names of the underlying NodeDefinition for this
AssetsDefinition.
"""
return self.keys_by_input_name.keys()
@public
@property
def key(self) -> AssetKey:
"""AssetKey: The asset key associated with this AssetsDefinition. If this AssetsDefinition
has more than one asset key, this will produce an error.
"""
check.invariant(
len(self.keys) == 1,
"Tried to retrieve asset key from an assets definition with multiple asset keys: "
+ ", ".join([str(ak.to_string()) for ak in self.keys]),
)
return next(iter(self.keys))
@public
@property
def resource_defs(self) -> Mapping[str, ResourceDefinition]:
"""Mapping[str, ResourceDefinition]: A mapping from resource name to ResourceDefinition for
the resources bound to this AssetsDefinition.
"""
return dict(self._resource_defs)
@property
def hook_defs(self) -> AbstractSet[HookDefinition]:
"""AbstractSet[HookDefinition]: A set of hook definitions that are bound to this
AssetsDefinition. These hooks will be executed when the assets in this AssetsDefinition
are materialized.
"""
return self._hook_defs
@public
@property
def keys(self) -> AbstractSet[AssetKey]:
"""AbstractSet[AssetKey]: The asset keys associated with this AssetsDefinition."""
if self._computation:
return self._computation.selected_asset_keys
else:
return self._specs_by_key.keys()
@property
def has_keys(self) -> bool:
return len(self.keys) > 0
@property
def has_check_keys(self) -> bool:
return len(self.check_keys) > 0
@public
@property
def dependency_keys(self) -> Iterable[AssetKey]:
"""Iterable[AssetKey]: The asset keys which are upstream of any asset included in this
AssetsDefinition.
"""
# the input asset keys that are directly upstream of a selected asset key
return {dep.asset_key for key in self.keys for dep in self._specs_by_key[key].deps}
@property
def node_keys_by_output_name(self) -> Mapping[str, AssetKey]:
"""AssetKey for each output on the underlying NodeDefinition."""
return self._computation.keys_by_output_name if self._computation else {}
@property
def node_keys_by_input_name(self) -> Mapping[str, AssetKey]:
"""AssetKey for each input on the underlying NodeDefinition."""
return self._computation.keys_by_input_name if self._computation else {}
@property
def input_names_by_node_key(self) -> Mapping[AssetKey, str]:
return {key: input_name for input_name, key in self.node_keys_by_input_name.items()}
@property
def node_check_specs_by_output_name(self) -> Mapping[str, AssetCheckSpec]:
"""AssetCheckSpec for each output on the underlying NodeDefinition."""
return self._check_specs_by_output_name
@property
def check_specs_by_output_name(self) -> Mapping[str, AssetCheckSpec]:
return {
name: spec
for name, spec in self._check_specs_by_output_name.items()
if self._computation is None or spec.key in self._computation.selected_asset_check_keys
}
def get_spec_for_check_key(self, asset_check_key: AssetCheckKey) -> AssetCheckSpec:
return self._check_specs_by_key[asset_check_key]
@property
def keys_by_output_name(self) -> Mapping[str, AssetKey]:
return {
name: key for name, key in self.node_keys_by_output_name.items() if key in self.keys
}
@cached_property
def entity_keys_by_output_name(self) -> Mapping[str, EntityKey]:
return merge_dicts(
self.keys_by_output_name,
{
output_name: spec.key
for output_name, spec in self.check_specs_by_output_name.items()
},
)
@cached_property
def output_names_by_entity_key(self) -> Mapping[EntityKey, str]:
return reverse_dict(self.entity_keys_by_output_name)
@property
def asset_and_check_keys(self) -> AbstractSet[EntityKey]:
return set(self.keys).union(self.check_keys)
@cached_property
def keys_by_input_name(self) -> Mapping[str, AssetKey]:
upstream_keys = {
*(dep.asset_key for key in self.keys for dep in self._specs_by_key[key].deps),
*(spec.asset_key for spec in self.check_specs if spec.asset_key not in self.keys),
*(
dep.asset_key
for spec in self.check_specs
for dep in spec.additional_deps
if dep.asset_key not in self.keys
),
}
return {
name: key for name, key in self.node_keys_by_input_name.items() if key in upstream_keys
}
@property
def legacy_freshness_policies_by_key(self) -> Mapping[AssetKey, LegacyFreshnessPolicy]:
return {
key: spec.legacy_freshness_policy
for key, spec in self._specs_by_key.items()
if spec.legacy_freshness_policy
}
@property
def auto_materialize_policies_by_key(
self,
) -> Mapping[AssetKey, AutoMaterializePolicy]:
return {
key: spec.auto_materialize_policy
for key, spec in self._specs_by_key.items()
if spec.auto_materialize_policy
}
@property
def automation_conditions_by_key(self) -> Mapping[AssetKey, AutomationCondition]:
return {
key: spec.automation_condition
for key, spec in self._specs_by_key.items()
if spec.automation_condition
}
@cached_method
def get_upstream_input_keys(self, keys: frozenset[AssetKey]) -> AbstractSet[AssetKey]:
"""Returns keys that are directly upstream of the provided keys and are inputs of this asset."""
direct_upstreams = {dep.asset_key for key in keys for dep in self._specs_by_key[key].deps}
return direct_upstreams - set(self.node_keys_by_output_name.values())
@cached_method
def get_checks_targeting_keys(self, keys: frozenset[AssetKey]) -> AbstractSet[AssetCheckKey]:
"""Returns checks defined on this AssetsDefinition for the provided keys."""
check_keys = {
check_spec.key for check_spec in self.node_check_specs_by_output_name.values()
}
return {key for key in check_keys if key.asset_key in keys}
# Applies only to external observable assets. Can be removed when we fold
# `auto_observe_interval_minutes` into auto-materialize policies.
@property
def auto_observe_interval_minutes(self) -> Optional[float]:
value = self._get_external_asset_metadata_value(
SYSTEM_METADATA_KEY_AUTO_OBSERVE_INTERVAL_MINUTES
)
if not (value is None or isinstance(value, (int, float))):
check.failed(
f"Expected auto_observe_interval_minutes to be a number or None, not {value}"
)
return value
# Applies to AssetsDefinition that were auto-created because some asset referenced a key as a
# dependency, but no definition was provided for that key.
@property
def is_auto_created_stub(self) -> bool:
return (
self._get_external_asset_metadata_value(SYSTEM_METADATA_KEY_AUTO_CREATED_STUB_ASSET)
is not None
)
def _get_external_asset_metadata_value(self, metadata_key: str) -> object:
first_key = next(iter(self.keys), None)
if not first_key:
return None
return (self._specs_by_key[first_key].metadata or {}).get(metadata_key)
@property
def backfill_policy(self) -> Optional[BackfillPolicy]:
return self._computation.backfill_policy if self._computation else None
@public
@cached_property
def partitions_def(self) -> Optional[PartitionsDefinition]:
"""Optional[PartitionsDefinition]: The PartitionsDefinition for this AssetsDefinition (if any)."""
partitions_defs = {
spec.partitions_def for spec in self.specs if spec.partitions_def is not None
}
if len(partitions_defs) == 1:
return next(iter(partitions_defs))
elif len(partitions_defs) == 0:
return None
else:
check.failed(
"Different assets within this AssetsDefinition have different PartitionsDefinitions"
)
@property
def metadata_by_key(self) -> Mapping[AssetKey, ArbitraryMetadataMapping]:
return {
key: spec.metadata
for key, spec in self._specs_by_key.items()
if spec.metadata is not None
}
@property
def tags_by_key(self) -> Mapping[AssetKey, Mapping[str, str]]:
return {key: spec.tags or {} for key, spec in self._specs_by_key.items()}
@property
def code_versions_by_key(self) -> Mapping[AssetKey, Optional[str]]:
return {key: spec.code_version for key, spec in self._specs_by_key.items()}
@property
def owners_by_key(self) -> Mapping[AssetKey, Sequence[str]]:
return {key: spec.owners or [] for key, spec in self._specs_by_key.items()}
@public
def get_partition_mapping(self, in_asset_key: AssetKey) -> Optional[PartitionMapping]:
"""Returns the partition mapping between keys in this AssetsDefinition and a given input
asset key (if any).
"""
return self._partition_mappings.get(in_asset_key)
@public
@property
def check_specs(self) -> Iterable[AssetCheckSpec]:
"""Returns the asset check specs defined on this AssetsDefinition, i.e. the checks that can
be executed while materializing the assets.
Returns:
Iterable[AssetsCheckSpec]:
"""
return self.check_specs_by_output_name.values()
@property
def check_keys(self) -> AbstractSet[AssetCheckKey]:
"""Returns the selected asset checks associated by this AssetsDefinition.
Returns:
AbstractSet[Tuple[AssetKey, str]]: The selected asset checks. An asset check is
identified by the asset key and the name of the check.
"""
if self._computation:
return self._computation.selected_asset_check_keys
else:
check.invariant(not self._check_specs_by_output_name)
return set()
@property
def check_key(self) -> AssetCheckKey:
check.invariant(
len(self.check_keys) == 1,
"Tried to retrieve asset check key from an assets definition with more or less than 1 asset check key: "
+ ", ".join([ak.to_user_string() for ak in self.check_keys]),
)
return next(iter(self.check_keys))
@property
def execution_type(self) -> AssetExecutionType:
if self._computation is None:
return AssetExecutionType.UNEXECUTABLE
else:
return self._computation.execution_type
@property
def is_external(self) -> bool:
return self.execution_type != AssetExecutionType.MATERIALIZATION
@property
def is_observable(self) -> bool:
return self.execution_type == AssetExecutionType.OBSERVATION
@property
def is_materializable(self) -> bool:
return self.execution_type == AssetExecutionType.MATERIALIZATION
@property
def is_executable(self) -> bool:
return self.execution_type != AssetExecutionType.UNEXECUTABLE
def get_partition_mapping_for_dep(self, dep_key: AssetKey) -> Optional[PartitionMapping]:
return self._partition_mappings.get(dep_key)
def infer_partition_mapping(
self,
asset_key: AssetKey,
upstream_asset_key: AssetKey,
upstream_partitions_def: Optional[PartitionsDefinition],
) -> PartitionMapping:
with disable_dagster_warnings():
partition_mapping = self._partition_mappings.get(upstream_asset_key)
return infer_partition_mapping(
partition_mapping,
self.specs_by_key[asset_key].partitions_def,
upstream_partitions_def,
)
def has_output_for_asset_key(self, key: AssetKey) -> bool:
return self._computation is not None and key in self._computation.output_names_by_key
def get_output_name_for_asset_key(self, key: AssetKey) -> str:
if (
self._computation is None
or key not in self._computation.output_names_by_key
or key not in self.keys
):
raise DagsterInvariantViolationError(
f"Asset key {key.to_user_string()} not found in AssetsDefinition"
)
else:
return self._computation.output_names_by_key[key]
def get_output_name_for_asset_check_key(self, key: AssetCheckKey) -> str:
for output_name, spec in self._check_specs_by_output_name.items():
if key == spec.key:
return output_name
raise DagsterInvariantViolationError(
f"Asset check key {key.to_user_string()} not found in AssetsDefinition"
)
def get_op_def_for_asset_key(self, key: AssetKey) -> Optional[OpDefinition]:
"""If this is an op-backed asset, returns the op def. If it's a graph-backed asset,
returns the op def within the graph that produces the given asset key.
"""
if self._computation is None:
return None
output_name = self.get_output_name_for_asset_key(key)
return self.node_def.resolve_output_to_origin_op_def(output_name)
def coerce_to_checks_def(self) -> "AssetChecksDefinition":
from dagster._core.definitions.asset_checks.asset_checks_definition import (
AssetChecksDefinition,
has_only_asset_checks,
)
if not has_only_asset_checks(self):
raise DagsterInvalidDefinitionError(
"Cannot coerce an AssetsDefinition to an AssetChecksDefinition if it contains "
"non-check assets."
)
if len(self.check_keys) == 0:
raise DagsterInvalidDefinitionError(
"Cannot coerce an AssetsDefinition to an AssetChecksDefinition if it contains no "
"checks."
)
return AssetChecksDefinition.create(
keys_by_input_name=self.keys_by_input_name,
node_def=self.op,
check_specs_by_output_name=self.check_specs_by_output_name,
resource_defs=self.resource_defs,
can_subset=self.can_subset,
)
def with_attributes(
self,
*,
asset_key_replacements: Mapping[AssetKey, AssetKey] = {},
group_names_by_key: Mapping[AssetKey, str] = {},
tags_by_key: Mapping[AssetKey, Mapping[str, str]] = {},
legacy_freshness_policy: Optional[
Union[LegacyFreshnessPolicy, Mapping[AssetKey, LegacyFreshnessPolicy]]
] = None,
automation_condition: Optional[
Union[AutomationCondition, Mapping[AssetKey, AutomationCondition]]
] = None,
backfill_policy: Optional[BackfillPolicy] = None,
hook_defs: Optional[AbstractSet[HookDefinition]] = None,
metadata_by_key: Optional[
Mapping[Union[AssetKey, AssetCheckKey], ArbitraryMetadataMapping]
] = None,
) -> "AssetsDefinition":
conflicts_by_attr_name: dict[str, set[AssetKey]] = defaultdict(set)
replaced_specs = []
for key, spec in self._specs_by_key.items():
replace_dict = {}
def update_replace_dict_and_conflicts(
new_value: Union[Mapping[AssetKey, object], object],
attr_name: str,
default_value: object = None,
) -> None:
if isinstance(new_value, Mapping):
if key in new_value:
replace_dict[attr_name] = new_value[key]
elif new_value:
replace_dict[attr_name] = new_value
old_value = getattr(spec, attr_name)
if old_value and old_value != default_value and attr_name in replace_dict:
conflicts_by_attr_name[attr_name].add(key)
update_replace_dict_and_conflicts(
new_value=automation_condition, attr_name="automation_condition"
)
update_replace_dict_and_conflicts(
new_value=legacy_freshness_policy, attr_name="legacy_freshness_policy"
)
update_replace_dict_and_conflicts(new_value=tags_by_key, attr_name="tags")
update_replace_dict_and_conflicts(
new_value=group_names_by_key,
attr_name="group_name",
default_value=DEFAULT_GROUP_NAME,
)
if metadata_by_key and key in metadata_by_key:
replace_dict["metadata"] = metadata_by_key[key]
if key in asset_key_replacements:
replace_dict["key"] = asset_key_replacements[key]
if asset_key_replacements:
new_deps = []
for dep in spec.deps:
replacement_key = asset_key_replacements.get(dep.asset_key, dep.asset_key)
if replacement_key is not None:
new_deps.append(dep._replace(asset_key=replacement_key))
else:
new_deps.append(dep)
replace_dict["deps"] = new_deps
replaced_specs.append(replace(spec, **replace_dict))
for attr_name, conflicting_asset_keys in conflicts_by_attr_name.items():
raise DagsterInvalidDefinitionError(
f"{attr_name} already exists on assets"
f" {', '.join(asset_key.to_user_string() for asset_key in conflicting_asset_keys)}"
)
check_specs_by_output_name = {}
for output_name, check_spec in self.node_check_specs_by_output_name.items():
updated_check_spec = check_spec
if check_spec.asset_key in asset_key_replacements:
updated_check_spec = updated_check_spec.replace_key(
key=check_spec.key.replace_asset_key(
asset_key_replacements[check_spec.asset_key]
)
)
if metadata_by_key and check_spec.key in metadata_by_key:
updated_check_spec = updated_check_spec.with_metadata(
metadata_by_key[check_spec.key]
)
check_specs_by_output_name[output_name] = updated_check_spec
selected_asset_check_keys = {
check_key.replace_asset_key(
asset_key_replacements.get(check_key.asset_key, check_key.asset_key)
)
for check_key in self.check_keys
}
replaced_attributes = dict(
keys_by_input_name={
input_name: asset_key_replacements.get(key, key)
for input_name, key in self.node_keys_by_input_name.items()
},
keys_by_output_name={
output_name: asset_key_replacements.get(key, key)
for output_name, key in self.node_keys_by_output_name.items()
},
selected_asset_keys={asset_key_replacements.get(key, key) for key in self.keys},
backfill_policy=backfill_policy if backfill_policy else self.backfill_policy,
is_subset=self.is_subset,
check_specs_by_output_name=check_specs_by_output_name,
selected_asset_check_keys=selected_asset_check_keys,
specs=replaced_specs,
hook_defs=hook_defs if hook_defs else self.hook_defs,
)
merged_attrs = merge_dicts(self.get_attributes_dict(), replaced_attributes)
return self.__class__.dagster_internal_init(**merged_attrs)
def map_asset_specs(self, fn: Callable[[AssetSpec], AssetSpec]) -> "AssetsDefinition":
mapped_specs = []
for spec in self.specs:
mapped_spec = fn(spec)
if mapped_spec.key != spec.key:
raise DagsterInvalidDefinitionError(
f"Asset key {spec.key.to_user_string()} was changed to "
f"{mapped_spec.key.to_user_string()}. Mapping function must not change keys."
)
mapped_specs.append(mapped_spec)
return replace_specs_on_asset(self, mapped_specs)
def subset_for(
self,
selected_asset_keys: AbstractSet[AssetKey],
selected_asset_check_keys: Optional[AbstractSet[AssetCheckKey]],
) -> "AssetsDefinition":
"""Create a subset of this AssetsDefinition that will only materialize the assets and checks
in the selected set.
Args:
selected_asset_keys (AbstractSet[AssetKey]): The total set of asset keys
selected_asset_check_keys (AbstractSet[AssetCheckKey]): The selected asset checks
"""
subsetted_computation = check.not_none(self._computation).subset_for(
selected_asset_keys, selected_asset_check_keys
)
return self.__class__.dagster_internal_init(
**{
**self.get_attributes_dict(),
"node_def": subsetted_computation.node_def,
"selected_asset_keys": subsetted_computation.selected_asset_keys,
"selected_asset_check_keys": subsetted_computation.selected_asset_check_keys,
"is_subset": True,
}
)
@property
def is_subset(self) -> bool:
return self._computation.is_subset if self._computation else False
@public
def to_source_assets(self) -> Sequence[SourceAsset]:
"""Returns a SourceAsset for each asset in this definition.
Each produced SourceAsset will have the same key, metadata, io_manager_key, etc. as the
corresponding asset
"""
return [
self._output_to_source_asset(output_name)
for output_name in self.keys_by_output_name.keys()
]
@public
def to_source_asset(self, key: Optional[CoercibleToAssetKey] = None) -> SourceAsset:
"""Returns a representation of this asset as a :py:class:`SourceAsset`.
If this is a multi-asset, the "key" argument allows selecting which asset to return a
SourceAsset representation of.
Args:
key (Optional[Union[str, Sequence[str], AssetKey]]]): If this is a multi-asset, select
which asset to return a SourceAsset representation of. If not a multi-asset, this
can be left as None.
Returns:
SourceAsset
"""
if len(self.keys) > 1:
check.invariant(
key is not None,
"The 'key' argument is required when there are multiple assets to choose from",
)
if key is not None:
resolved_key = AssetKey.from_coercible(key)
check.invariant(
resolved_key in self.keys,
f"Key {resolved_key} not found in AssetsDefinition",
)
else:
resolved_key = self.key
output_names = [
output_name
for output_name, ak in self.keys_by_output_name.items()
if ak == resolved_key
]
check.invariant(len(output_names) == 1)
return self._output_to_source_asset(output_names[0])
def _output_to_source_asset(self, output_name: str) -> SourceAsset:
with disable_dagster_warnings():
output_def = self.node_def.resolve_output_to_origin(
output_name, NodeHandle(self.node_def.name, parent=None)
)[0]
key = self.node_keys_by_output_name[output_name]
spec = self.specs_by_key[key]
return SourceAsset.dagster_internal_init(
key=key,
metadata=spec.metadata,
io_manager_key=output_def.io_manager_key,
description=spec.description,
resource_defs=self.resource_defs,
partitions_def=spec.partitions_def,
group_name=spec.group_name,
tags=spec.tags,
io_manager_def=None,
observe_fn=None,
op_tags=None,
automation_condition=None,
auto_observe_interval_minutes=None,
legacy_freshness_policy=None,
_required_resource_keys=None,
)
@public
def get_asset_spec(self, key: Optional[AssetKey] = None) -> AssetSpec:
"""Returns a representation of this asset as an :py:class:`AssetSpec`.
If this is a multi-asset, the "key" argument allows selecting which asset to return the
spec for.
Args:
key (Optional[AssetKey]): If this is a multi-asset, select which asset to return its
AssetSpec. If not a multi-asset, this can be left as None.
Returns:
AssetSpec
"""
return self._specs_by_key[key or self.key]
def get_io_manager_key_for_asset_key(self, key: AssetKey) -> str:
if self._computation is None:
return self._specs_by_key[key].metadata.get(
SYSTEM_METADATA_KEY_IO_MANAGER_KEY, DEFAULT_IO_MANAGER_KEY
)
else:
if SYSTEM_METADATA_KEY_IO_MANAGER_KEY in self._specs_by_key[key].metadata:
return self._specs_by_key[key].metadata[SYSTEM_METADATA_KEY_IO_MANAGER_KEY]
check.invariant(
SYSTEM_METADATA_KEY_IO_MANAGER_KEY not in self._specs_by_key[key].metadata
)
output_name = self.get_output_name_for_asset_key(key)
return self.node_def.resolve_output_to_origin(
output_name, NodeHandle(self.node_def.name, parent=None)
)[0].io_manager_key
def get_resource_requirements(self) -> Iterator[ResourceRequirement]:
from itertools import chain
from dagster._core.definitions.graph_definition import GraphDefinition
if self.is_executable:
if isinstance(self.node_def, GraphDefinition):
yield from chain(
self.node_def.get_resource_requirements(
asset_layer=None,
),
(
req
for hook_def in self._hook_defs
for req in hook_def.get_resource_requirements(
attached_to=f"asset '{self.node_def.name}'",
)
),
)
elif isinstance(self.node_def, OpDefinition):
yield from chain(
self.node_def.get_resource_requirements(
handle=None,
asset_layer=None,
),
(
req
for hook_def in self._hook_defs
for req in hook_def.get_resource_requirements(
attached_to=f"asset '{self.node_def.name}'",
)
),
)
else:
for key in self.keys:
# This matches how SourceAsset emit requirements except we emit
# ExternalAssetIOManagerRequirement instead of SourceAssetIOManagerRequirement
yield ExternalAssetIOManagerRequirement(
key=self.get_io_manager_key_for_asset_key(key),
asset_key=key.to_string(),
)
for source_key, resource_def in self.resource_defs.items():
yield from resource_def.get_resource_requirements(source_key=source_key)
@public
@property
def required_resource_keys(self) -> set[str]:
"""Set[str]: The set of keys for resources that must be provided to this AssetsDefinition."""
return {
requirement.key
for requirement in self.get_resource_requirements()
if requirement
if isinstance(requirement, ResourceKeyRequirement)
}
def __str__(self):
if len(self.keys) == 1:
return f"AssetsDefinition with key {self.key.to_string()}"
else:
asset_keys = ", ".join(sorted([asset_key.to_string() for asset_key in self.keys]))
return f"AssetsDefinition with keys {asset_keys}"
@cached_property
def unique_id(self) -> str:
return unique_id_from_asset_and_check_keys(itertools.chain(self.keys, self.check_keys))
def with_resources(self, resource_defs: Mapping[str, ResourceDefinition]) -> "AssetsDefinition":
attributes_dict = self.get_attributes_dict()
attributes_dict["resource_defs"] = merge_resource_defs(
old_resource_defs=self.resource_defs,
resource_defs_to_merge_in=resource_defs,
requires_resources=self,
)
with disable_dagster_warnings():
return self.__class__(**attributes_dict)
@public
def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> "AssetsDefinition":
"""Apply a set of hooks to all op instances within the asset."""
from dagster._core.definitions.hook_definition import HookDefinition
hook_defs = check.set_param(hook_defs, "hook_defs", of_type=HookDefinition)
return self.with_attributes(hook_defs=(hook_defs | self.hook_defs))
def get_attributes_dict(self) -> dict[str, Any]:
return dict(
keys_by_input_name=self.node_keys_by_input_name,
keys_by_output_name=self.node_keys_by_output_name,
node_def=self._computation.node_def if self._computation else None,
selected_asset_keys=self.keys,
can_subset=self.can_subset,
resource_defs=self._resource_defs,
hook_defs=self._hook_defs,
backfill_policy=self.backfill_policy,
check_specs_by_output_name=self._check_specs_by_output_name,
selected_asset_check_keys=self.check_keys,
specs=self.specs,
is_subset=self.is_subset,
execution_type=self._computation.execution_type if self._computation else None,
)
def _infer_keys_by_input_names(
node_def: NodeDefinition, keys_by_input_name: Mapping[str, AssetKey]
) -> Mapping[str, AssetKey]:
all_input_names = [input_def.name for input_def in node_def.input_defs]
if keys_by_input_name:
check.invariant(
set(keys_by_input_name.keys()) == set(all_input_names),
"The set of input names keys specified in the keys_by_input_name argument must "
f"equal the set of asset keys inputted by '{node_def.name}'. \n"
f"keys_by_input_name keys: {set(keys_by_input_name.keys())} \n"
f"expected keys: {all_input_names}",
)
# If asset key is not supplied in keys_by_input_name, create asset key
# from input name
inferred_input_names_by_asset_key: dict[str, AssetKey] = {
input_name: keys_by_input_name.get(input_name, AssetKey([input_name]))
for input_name in all_input_names
}
return inferred_input_names_by_asset_key
def _infer_keys_by_output_names(
node_def: NodeDefinition,
keys_by_output_name: Mapping[str, AssetKey],
check_specs_by_output_name: Mapping[str, AssetCheckSpec],
) -> Mapping[str, AssetKey]:
output_names = [output_def.name for output_def in node_def.output_defs]
if keys_by_output_name:
overlapping_asset_and_check_outputs = set(keys_by_output_name.keys()) & set(
check_specs_by_output_name.keys()
)
check.invariant(
not overlapping_asset_and_check_outputs,
"The set of output names associated with asset keys and checks overlap:"
f" {overlapping_asset_and_check_outputs}",
)
union_asset_and_check_outputs = set(keys_by_output_name.keys()) | set(
check_specs_by_output_name.keys()
)
check.invariant(
union_asset_and_check_outputs == set(output_names),
"The union of the set of output names keys specified in the keys_by_output_name and"
" check_specs_by_output_name arguments must equal the set of asset keys outputted by"
f" {node_def.name}. union keys:"
f" {union_asset_and_check_outputs} \nexpected keys: {set(output_names)}",
)
inferred_keys_by_output_names: dict[str, AssetKey] = {
output_name: asset_key for output_name, asset_key in keys_by_output_name.items()
}
if (
len(output_names) == 1
and output_names[0] not in keys_by_output_name
and output_names[0] not in check_specs_by_output_name
and output_names[0] == "result"
):
# If there is only one output and the name is the default "result", generate asset key
# from the name of the node
inferred_keys_by_output_names[output_names[0]] = AssetKey([node_def.name])
for output_name in output_names:
if (
output_name not in inferred_keys_by_output_names
and output_name not in check_specs_by_output_name
):
inferred_keys_by_output_names[output_name] = AssetKey([output_name])
return inferred_keys_by_output_names
def _validate_graph_def(graph_def: "GraphDefinition", prefix: Optional[Sequence[str]] = None):
"""Ensure that all leaf nodes are mapped to graph outputs."""
from dagster._core.definitions.graph_definition import GraphDefinition, create_adjacency_lists
prefix = check.opt_sequence_param(prefix, "prefix")
# recursively validate any sub-graphs
for inner_node_def in graph_def.node_defs:
if isinstance(inner_node_def, GraphDefinition):
_validate_graph_def(inner_node_def, prefix=[*prefix, graph_def.name])
# leaf nodes have no downstream nodes
forward_edges, _ = create_adjacency_lists(graph_def.nodes, graph_def.dependency_structure)
leaf_nodes = {
node_name for node_name, downstream_nodes in forward_edges.items() if not downstream_nodes
}
# set of nodes that have outputs mapped to a graph output
mapped_output_nodes = {
output_mapping.maps_from.node_name for output_mapping in graph_def.output_mappings
}
# leaf nodes which do not have an associated mapped output
unmapped_leaf_nodes = {".".join([*prefix, node]) for node in leaf_nodes - mapped_output_nodes}
check.invariant(
not unmapped_leaf_nodes,
f"All leaf nodes within graph '{graph_def.name}' must generate outputs which are mapped"
" to outputs of the graph, and produce assets. The following leaf node(s) are"
f" non-asset producing ops: {unmapped_leaf_nodes}. This behavior is not currently"
" supported because these ops are not required for the creation of the associated"
" asset(s).",
)
def _resolve_automation_conditions_by_output_name(
automation_conditions_by_output_name: Optional[Mapping[str, Optional[AutomationCondition]]],
auto_materialize_policies_by_output_name: Optional[
Mapping[str, Optional[AutoMaterializePolicy]]
],
) -> Optional[Mapping[str, Optional[AutomationCondition]]]:
if auto_materialize_policies_by_output_name is not None:
check.param_invariant(
automation_conditions_by_output_name is None,
"automation_conditions_by_output_name",
"Cannot supply both `automation_conditions_by_output_name` and `auto_materialize_policies_by_output_name`",
)
return {
k: v.to_automation_condition() if v else None
for k, v in auto_materialize_policies_by_output_name.items()
}
else:
return automation_conditions_by_output_name
def _resolve_selections(
all_asset_keys: AbstractSet[AssetKey],
all_check_keys: AbstractSet[AssetCheckKey],
selected_asset_keys: Optional[AbstractSet[AssetKey]],
selected_asset_check_keys: Optional[AbstractSet[AssetCheckKey]],
) -> tuple[AbstractSet[AssetKey], AbstractSet[AssetCheckKey]]:
# NOTE: this logic mirrors subsetting at the asset layer. This is ripe for consolidation.
if selected_asset_keys is None and selected_asset_check_keys is None:
# if no selections, include everything
return all_asset_keys, all_check_keys
else:
resolved_selected_asset_keys = selected_asset_keys or set()
if selected_asset_check_keys is None:
# if assets were selected but checks are None, then include all checks for selected
# assets
resolved_selected_asset_check_keys = {
key for key in all_check_keys if key.asset_key in resolved_selected_asset_keys
}
else:
# otherwise, use the selected checks
resolved_selected_asset_check_keys = selected_asset_check_keys
return resolved_selected_asset_keys, resolved_selected_asset_check_keys
def _validate_partition_mappings(
partition_mappings: Mapping[AssetKey, PartitionMapping],
input_asset_keys: AbstractSet[AssetKey],
all_asset_keys: AbstractSet[AssetKey],
) -> None:
for asset_key, partition_mapping in partition_mappings.items():
warn_if_partition_mapping_not_builtin(partition_mapping)
if asset_key not in input_asset_keys:
check.failed(
f"While constructing AssetsDefinition outputting {all_asset_keys}, received a"
f" partition mapping for {asset_key} that is not defined in the set of upstream"
f" assets: {input_asset_keys}"
)
def _asset_specs_from_attr_key_params(
all_asset_keys: AbstractSet[AssetKey],
keys_by_input_name: Mapping[str, AssetKey],
deps_by_asset_key: Optional[Mapping[AssetKey, AbstractSet[AssetKey]]],
partition_mappings: Optional[Mapping[AssetKey, PartitionMapping]],
group_names_by_key: Optional[Mapping[AssetKey, str]],
metadata_by_key: Optional[Mapping[AssetKey, ArbitraryMetadataMapping]],
tags_by_key: Optional[Mapping[AssetKey, Mapping[str, str]]],
legacy_freshness_policies_by_key: Optional[Mapping[AssetKey, LegacyFreshnessPolicy]],
automation_conditions_by_key: Optional[Mapping[AssetKey, AutomationCondition]],
code_versions_by_key: Optional[Mapping[AssetKey, str]],
descriptions_by_key: Optional[Mapping[AssetKey, str]],
owners_by_key: Optional[Mapping[AssetKey, Sequence[str]]],
partitions_def: Optional[PartitionsDefinition],
) -> Sequence[AssetSpec]:
validated_group_names_by_key = check.opt_mapping_param(
group_names_by_key, "group_names_by_key", key_type=AssetKey, value_type=str
)
validated_metadata_by_key = check.opt_mapping_param(
metadata_by_key, "metadata_by_key", key_type=AssetKey, value_type=dict
)
for tags in (tags_by_key or {}).values():
normalize_tags(tags, strict=True)
validated_tags_by_key = tags_by_key or {}
validated_descriptions_by_key = check.opt_mapping_param(
descriptions_by_key, "descriptions_by_key", key_type=AssetKey, value_type=str
)
validated_code_versions_by_key = check.opt_mapping_param(
code_versions_by_key, "code_versions_by_key", key_type=AssetKey, value_type=str
)
validated_legacy_freshness_policies_by_key = check.opt_mapping_param(
legacy_freshness_policies_by_key,
"legacy_freshness_policies_by_key",
key_type=AssetKey,
value_type=LegacyFreshnessPolicy,
)
validated_automation_conditions_by_key = check.opt_mapping_param(
automation_conditions_by_key,
"automation_conditions_by_key",
key_type=AssetKey,
value_type=AutomationCondition,
)
validated_owners_by_key = check.opt_mapping_param(
owners_by_key, "owners_by_key", key_type=AssetKey, value_type=list
)
dep_keys_from_keys_by_input_name = set(keys_by_input_name.values())
dep_objs_from_keys_by_input_name = [
AssetDep(asset=key, partition_mapping=(partition_mappings or {}).get(key))
for key in dep_keys_from_keys_by_input_name
]
result: list[AssetSpec] = []
for key in all_asset_keys:
if deps_by_asset_key:
dep_objs = [
AssetDep(asset=key, partition_mapping=(partition_mappings or {}).get(key))
for key in deps_by_asset_key.get(key, [])
]
else:
dep_objs = dep_objs_from_keys_by_input_name
with disable_dagster_warnings():
result.append(
AssetSpec.dagster_internal_init(
key=key,
description=validated_descriptions_by_key.get(key),
metadata=validated_metadata_by_key.get(key),
tags=validated_tags_by_key.get(key),
legacy_freshness_policy=validated_legacy_freshness_policies_by_key.get(key),
automation_condition=validated_automation_conditions_by_key.get(key),
owners=validated_owners_by_key.get(key),
group_name=validated_group_names_by_key.get(key),
code_version=validated_code_versions_by_key.get(key),
deps=dep_objs,
# Value here is irrelevant, because it will be replaced by value from
# NodeDefinition
skippable=False,
auto_materialize_policy=None,
kinds=None,
partitions_def=check.opt_inst_param(
partitions_def, "partitions_def", PartitionsDefinition
),
)
)
return result
def _validate_self_deps(specs: Iterable[AssetSpec]) -> None:
for spec in specs:
for dep in spec.deps:
if dep.asset_key != spec.key:
continue
if dep.partition_mapping:
time_window_partition_mapping = get_self_dep_time_window_partition_mapping(
dep.partition_mapping, spec.partitions_def
)
if (
time_window_partition_mapping is not None
and (time_window_partition_mapping.start_offset or 0) < 0
and (time_window_partition_mapping.end_offset or 0) < 0
):
continue
raise DagsterInvalidDefinitionError(
f'Asset "{spec.key.to_user_string()}" depends on itself. Assets can only depend'
" on themselves if they are:\n(a) time-partitioned and each partition depends on"
" earlier partitions\n(b) multipartitioned, with one time dimension that depends"
" on earlier time partitions"
)
def get_self_dep_time_window_partition_mapping(
partition_mapping: Optional[PartitionMapping],
partitions_def: Optional[PartitionsDefinition],
) -> Optional[TimeWindowPartitionMapping]:
"""Returns a time window partition mapping dimension of the provided partition mapping,
if exists.
"""
if isinstance(partition_mapping, TimeWindowPartitionMapping):
return partition_mapping
elif isinstance(partition_mapping, MultiPartitionMapping):
if not isinstance(partitions_def, MultiPartitionsDefinition):
return None
time_partition_mapping = partition_mapping.downstream_mappings_by_upstream_dimension.get(
partitions_def.time_window_dimension.name
)
if time_partition_mapping is None or not isinstance(
time_partition_mapping.partition_mapping, TimeWindowPartitionMapping
):
return None
return time_partition_mapping.partition_mapping
return None
def get_partition_mappings_from_deps(
partition_mappings: dict[AssetKey, PartitionMapping],
deps: Iterable[AssetDep],
asset_name: str,
) -> Mapping[AssetKey, PartitionMapping]:
# Add PartitionMappings specified via AssetDeps to partition_mappings dictionary. Error on duplicates
for dep in deps:
if dep.partition_mapping is None:
continue
if partition_mappings.get(dep.asset_key, None) is None:
partition_mappings[dep.asset_key] = dep.partition_mapping
continue
if partition_mappings[dep.asset_key] == dep.partition_mapping:
continue
else:
raise DagsterInvalidDefinitionError(
f"Two different PartitionMappings for {dep.asset_key} provided for"
f" asset {asset_name}. Please use the same PartitionMapping for"
f" {dep.asset_key}."
)
return partition_mappings
def unique_id_from_asset_and_check_keys(entity_keys: Iterable["EntityKey"]) -> str:
"""Generate a unique ID from the provided asset keys.
This is useful for generating op names that don't have collisions.
"""
sorted_key_strs = sorted(str(key) for key in entity_keys)
return non_secure_md5_hash_str(json.dumps(sorted_key_strs).encode("utf-8"))[:8]
def replace_specs_on_asset(
assets_def: AssetsDefinition, replaced_specs: Sequence[AssetSpec]
) -> "AssetsDefinition":
from dagster._builtins import Nothing
from dagster._core.definitions.input import In
new_deps_by_key = {dep.asset_key: dep for spec in replaced_specs for dep in spec.deps}
previous_deps_by_key = {dep.asset_key: dep for spec in assets_def.specs for dep in spec.deps}
added_dep_keys = set(new_deps_by_key.keys()) - set(previous_deps_by_key.keys())
removed_dep_keys = set(previous_deps_by_key.keys()) - set(new_deps_by_key.keys())
remaining_original_deps_by_key = {
key: previous_deps_by_key[key]
for key in set(previous_deps_by_key.keys()) - removed_dep_keys
}
original_key_to_input_mapping = reverse_dict(assets_def.node_keys_by_input_name)
# If there are no changes to the dependency structure, we don't need to make any changes to the underlying node.
if not assets_def.is_executable or (not added_dep_keys and not removed_dep_keys):
return assets_def.__class__.dagster_internal_init(
**{**assets_def.get_attributes_dict(), "specs": replaced_specs}
)
# Otherwise, there are changes to the dependency structure. We need to update the node_def.
# Graph-backed assets do not currently support non-argument-based deps. Every argument to a graph-backed asset
# must map to an an input on an internal asset node in the graph structure.
# IMPROVEME BUILD-529
check.invariant(
isinstance(assets_def.node_def, OpDefinition),
"Can only add additional deps to an op-backed asset.",
)
# for each deleted dep, we need to make sure it is not an argument-based dep. Argument-based deps cannot be removed.
for dep_key in removed_dep_keys:
dep = previous_deps_by_key[dep_key]
input_name = original_key_to_input_mapping[dep.asset_key]
input_def = assets_def.node_def.input_def_named(input_name)
check.invariant(
input_def.dagster_type.is_nothing,
f"Attempted to remove argument-backed dependency {dep.asset_key} (mapped to argument {input_name}) from the asset. Only non-argument dependencies can be changed or removed using map_asset_specs.",
)
remaining_ins = {
input_name: the_in
for input_name, the_in in assets_def.node_def.input_dict.items()
if assets_def.node_keys_by_input_name[input_name] in remaining_original_deps_by_key
}
all_ins = merge_dicts(
remaining_ins,
{
stringify_asset_key_to_input_name(dep.asset_key): In(dagster_type=Nothing)
for dep in new_deps_by_key.values()
},
)
return assets_def.__class__.dagster_internal_init(
**{
**assets_def.get_attributes_dict(),
"node_def": assets_def.op.with_replaced_properties(
name=assets_def.op.name, ins=all_ins
),
"specs": replaced_specs,
}
)
| AssetsDefinition |
python | getsentry__sentry | src/sentry/integrations/perforce/integration.py | {
"start": 10756,
"end": 11242
} | class ____:
"""
Installation view for Perforce configuration.
This is a simple pass-through view. The actual configuration
happens in the Settings tab after installation via get_organization_config().
"""
def dispatch(self, request, pipeline):
"""
Handle installation request.
Args:
request: HTTP request object
pipeline: Installation pipeline
"""
return pipeline.next_step()
| PerforceInstallationView |
python | spyder-ide__spyder | spyder/plugins/completion/providers/languageserver/provider.py | {
"start": 1550,
"end": 35037
} | class ____(SpyderCompletionProvider):
"""Language Server Protocol manager."""
COMPLETION_PROVIDER_NAME = 'lsp'
DEFAULT_ORDER = 1
SLOW = True
CONF_DEFAULTS = [
('enable_hover_hints', True),
('show_lsp_down_warning', True),
('code_completion', True),
# ('code_snippets', True),
('jedi_definition', True),
('jedi_definition/follow_imports', True),
('jedi_signature_help', True),
('preload_modules', PRELOAD_MDOULES),
('pyflakes', True),
('mccabe', False),
('flake8', False),
('ruff', False),
('no_linting', False),
('formatting', 'autopep8'),
('format_on_save', False),
('flake8/filename', ''),
('flake8/exclude', ''),
('flake8/extendSelect', ''),
('flake8/extendIgnore', 'E,W,C90'),
('flake8/max_line_length', 79),
('ruff/exclude', ''),
('ruff/extendSelect', ''),
('ruff/extendIgnore', 'E'),
('pydocstyle', False),
('pydocstyle/convention', 'numpy'),
('pydocstyle/select', ''),
('pydocstyle/ignore', ''),
('pydocstyle/match', '(?!test_).*\\.py'),
('pydocstyle/match_dir', '[^\\.].*'),
('advanced/enabled', False),
('advanced/module', 'pylsp'),
('advanced/host', '127.0.0.1'),
('advanced/port', 2087),
('advanced/external', False),
('advanced/stdio', False)
]
# IMPORTANT NOTES:
# 1. If you want to *change* the default value of a current option, you
# need to do a MINOR update in config version, e.g. from 0.1.0 to 0.2.0
# 2. If you want to *remove* options that are no longer needed or if you
# want to *rename* options, then you need to do a MAJOR update in
# version, e.g. from 0.1.0 to 1.0.0
# 3. You don't need to touch this value if you're just adding a new option
CONF_VERSION = "1.0.0"
CONF_TABS = TABS
STOPPED = 'stopped'
RUNNING = 'running'
LOCALHOST = ['127.0.0.1', 'localhost']
MAX_RESTART_ATTEMPTS = 5
TIME_BETWEEN_RESTARTS = 10000 # ms
TIME_HEARTBEAT = 3000 # ms
# --- Signals
# ------------------------------------------------------------------------
sig_exception_occurred = Signal(dict)
"""
This Signal is emitted to report that an exception has occurred.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data = {
"text": str,
"is_traceback": bool,
"title": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a Python
error traceback.
`title` indicates how the error data should customize the report dialog.
"""
def __init__(self, parent, config):
SpyderCompletionProvider.__init__(self, parent, config)
# To keep track of the current interpreter used for completions
self._interpreter = sys.executable
self.clients = {}
self.clients_restart_count = {}
self.clients_restart_timers = {}
self.clients_restarting = {}
self.clients_hearbeat = {}
self.clients_statusbar = {}
self.requests = set({})
self.register_queue = {}
self.update_lsp_configuration()
self.show_no_external_server_warning = True
self.current_project_path = None
# Status bar widget
self.STATUS_BAR_CLASSES = [
self.create_statusbar
]
def __del__(self):
"""Stop all heartbeats"""
for language in self.clients_hearbeat:
try:
if self.clients_hearbeat[language] is not None:
self.clients_hearbeat[language].stop()
self.clients_hearbeat[language].setParent(None)
del self.clients_hearbeat[language]
except (TypeError, KeyError, RuntimeError):
pass
# --- Status bar widget handling
def restart_lsp(self, language: str, force=False):
"""Restart language server on failure."""
client_config = {
'status': self.STOPPED,
'config': self.get_language_config(language),
'instance': None,
}
if force:
logger.info("Manual restart for {}...".format(language))
self.update_status(language, ClientStatus.RESTARTING)
self.restart_client(language, client_config)
elif self.clients_restarting[language]:
attempt = (self.MAX_RESTART_ATTEMPTS
- self.clients_restart_count[language] + 1)
logger.info("Automatic restart attempt {} for {}...".format(
attempt, language))
self.update_status(language, ClientStatus.RESTARTING)
self.clients_restart_count[language] -= 1
self.restart_client(language, client_config)
client = self.clients[language]
# Restarted the maximum amount of times without
if self.clients_restart_count[language] <= 0:
logger.info("Restart failed!")
self.clients_restarting[language] = False
self.clients_restart_timers[language].stop()
self.clients_restart_timers[language] = None
try:
self.clients_hearbeat[language].stop()
self.clients_hearbeat[language].setParent(None)
del self.clients_hearbeat[language]
if PYSIDE2 or PYSIDE6:
client['instance'].disconnect(None, None, None)
else:
client['instance'].disconnect()
client['instance'].stop()
except (TypeError, KeyError, RuntimeError):
pass
self.report_lsp_down(language)
def create_statusbar(self, parent):
return LSPStatusWidget(parent, self)
def check_restart(self, client, language):
"""
Check if a server restart was successful in order to stop
further attempts.
"""
status = client['status']
instance = client['instance']
# This check is only necessary for stdio servers
check = True
if instance.stdio_pid:
check = instance.is_stdio_alive()
if status == self.RUNNING and check:
logger.info("Restart successful!")
self.clients_restarting[language] = False
self.clients_restart_timers[language].stop()
self.clients_restart_timers[language] = None
self.clients_restart_count[language] = 0
self.update_status(language, ClientStatus.READY)
def check_heartbeat(self, language):
"""
Check if client or server for a given language are down.
"""
# This avoids an odd error when running our tests.
if running_under_pytest():
if not getattr(self, 'clients', None):
return
client = self.clients[language]
status = client['status']
instance = client.get('instance', None)
if instance is not None:
if instance.is_down() or status != self.RUNNING:
instance.sig_went_down.emit(language)
def update_status(self, language, status):
"""
Update status for the current file.
"""
self.clients_statusbar[language] = status
self.sig_call_statusbar.emit(
LSPStatusWidget.ID, 'update_status', (language, status), {})
def on_initialize(self, options, language):
"""
Update the status bar widget on client initilization.
"""
# Set status after the server was started correctly.
if not self.clients_restarting.get(language, False):
self.update_status(language, ClientStatus.READY)
# Set status after a restart.
if self.clients_restarting.get(language):
client = self.clients[language]
self.check_restart(client, language)
def handle_lsp_down(self, language):
"""
Handle automatic restart of client/server on failure.
"""
InstallerPylspError(language)
if (not self.clients_restarting.get(language, False)
and not running_under_pytest()):
try:
self.clients_hearbeat[language].stop()
self.clients_hearbeat[language].setParent(None)
del self.clients_hearbeat[language]
except KeyError:
pass
logger.info("Automatic restart for {}...".format(language))
timer = QTimer(self)
timer.setSingleShot(False)
timer.setInterval(self.TIME_BETWEEN_RESTARTS)
timer.timeout.connect(lambda: self.restart_lsp(language))
self.update_status(language, ClientStatus.RESTARTING)
self.clients_restarting[language] = True
self.clients_restart_count[language] = self.MAX_RESTART_ATTEMPTS
self.clients_restart_timers[language] = timer
timer.start()
# ------------------ SpyderCompletionProvider methods ---------------------
def get_name(self):
return _('Language Server Protocol (LSP)')
def register_file(self, language, filename, codeeditor):
if language in self.clients:
language_client = self.clients[language]['instance']
if language_client is None:
self.register_queue[language].append((filename, codeeditor))
else:
language_client.register_file(filename, codeeditor)
def get_languages(self):
"""
Get the list of languages we need to start servers and create
clients for.
"""
languages = ['python']
all_options = self.config
for option in all_options:
if option in [l.lower() for l in SUPPORTED_LANGUAGES]:
languages.append(option)
return languages
def get_language_config(self, language):
"""Get language configuration options from our config system."""
if language == 'python':
return self.generate_python_config()
else:
return self.get_conf(language)
def get_root_path(self, language):
"""
Get root path to pass to the LSP servers.
This can be the current project path or the output of
getcwd_or_home (except for Python, see below).
"""
path = self.current_project_path
if not path:
# We can't use getcwd_or_home for LSP servers because if it
# returns home and you have a lot of files on it
# then computing completions takes a long time
# and blocks the server.
# Instead we use an empty directory inside our config one,
# just like we did for Rope in Spyder 3.
path = osp.join(get_conf_path(), 'lsp_paths', 'root_path')
if not osp.exists(path):
os.makedirs(path)
return path
@Slot()
def project_path_update(self, project_path, update_kind, projects):
"""
Send a didChangeWorkspaceFolders request to each LSP server
when the project path changes so they can update their
respective root paths.
If the server doesn't support workspace updates, restart the
client with the new root path.
"""
if update_kind == WorkspaceUpdateKind.ADDITION:
self.current_project_path = project_path
for language in self.clients:
language_client = self.clients[language]
if language_client['status'] == self.RUNNING:
instance = language_client['instance']
if (instance.support_multiple_workspaces and
instance.support_workspace_update):
instance.send_workspace_folders_change({
'folder': project_path,
'instance': projects,
'kind': update_kind
})
else:
logger.debug(
"{0}: LSP does not support multiple workspaces, "
"restarting client!".format(instance.language)
)
folder = self.get_root_path(language)
instance.folder = folder
self.sig_stop_completions.emit(language)
self.stop_completion_services_for_language(language)
self.start_completion_services_for_language(language)
def report_server_error(self, error):
"""Report server errors in our error report dialog."""
error_data = dict(
text=error,
is_traceback=True,
title="Internal Python Language Server error",
)
self.sig_exception_occurred.emit(error_data)
def report_no_external_server(self, host, port, language):
"""
Report that connection couldn't be established with
an external server.
"""
if os.name == 'nt':
os_message = (
"<br><br>"
"To fix this, please verify that your firewall or antivirus "
"allows Python processes to open ports in your system, or the "
"settings you introduced in our Preferences to connect to "
"external LSP servers."
)
else:
os_message = (
"<br><br>"
"To fix this, please verify the settings you introduced in "
"our Preferences to connect to external LSP servers."
)
warn_str = (
_("It appears there is no {language} language server listening "
"at address:"
"<br><br>"
"<tt>{host}:{port}</tt>"
"<br><br>"
"Therefore, completion and linting for {language} will not "
"work during this session.").format(
host=host, port=port, language=language.capitalize())
+ os_message
)
def wrap_message_box(parent):
return QMessageBox.warning(parent, _("Warning"), warn_str)
self.sig_show_widget.emit(wrap_message_box)
self.show_no_external_server_warning = False
@Slot(str)
def report_lsp_down(self, language):
"""
Report that either the transport layer or the LSP server are
down.
"""
self.update_status(language, ClientStatus.DOWN)
if not self.get_conf('show_lsp_down_warning'):
return
if os.name == 'nt':
os_message = (
"To try to fix this, please verify that your firewall or "
"antivirus allows Python processes to open ports in your "
"system, or restart Spyder.<br><br>"
)
else:
os_message = (
"This problem could be fixed by restarting Spyder. "
)
warn_str = (
_("Completion and linting in the editor for {language} files "
"will not work during the current session, or stopped working."
"<br><br>").format(language=language.capitalize())
+ os_message +
_("Do you want to restart Spyder now?")
)
wrapper = ServerDisabledMessageBox.instance(warn_str, self.set_conf)
self.sig_show_widget.emit(wrapper)
def start_completion_services_for_language(self, language):
"""Start an LSP client for a given language."""
# To keep track if the client was started.
started = False
if language in self.clients:
language_client = self.clients[language]
queue = self.register_queue[language]
started = language_client['status'] == self.RUNNING
if language not in self.clients_hearbeat:
# completion_services for language is already running
# Start client heartbeat
timer = QTimer(self)
self.clients_hearbeat[language] = timer
timer.setInterval(self.TIME_HEARTBEAT)
timer.timeout.connect(functools.partial(
self.check_heartbeat, language))
timer.start()
if language_client['status'] == self.STOPPED:
config = language_client['config']
# If we're trying to connect to an external server,
# verify that it's listening before creating a
# client for it.
if config['external']:
host = config['host']
port = config['port']
response = check_connection_port(host, port)
if not response:
if self.show_no_external_server_warning:
self.report_no_external_server(
host, port, language)
self.update_status(language, ClientStatus.DOWN)
return False
language_client['instance'] = LSPClient(
parent=self,
server_settings=config,
folder=self.get_root_path(language),
language=language
)
self.register_client_instance(language_client['instance'])
# Register that a client was started.
logger.info("Starting LSP client for {}...".format(language))
language_client['instance'].start()
language_client['status'] = self.RUNNING
started = True
for entry in queue:
language_client['instance'].register_file(*entry)
self.register_queue[language] = []
return started
def register_client_instance(self, instance):
"""Register signals emitted by a client instance."""
instance.sig_went_down.connect(self.handle_lsp_down)
instance.sig_initialize.connect(self.on_initialize)
instance.sig_server_error.connect(self.report_server_error)
instance.sig_initialize.connect(
self.sig_language_completions_available)
def start(self):
self.sig_provider_ready.emit(self.COMPLETION_PROVIDER_NAME)
def shutdown(self):
logger.info("Shutting down LSP manager...")
for language in self.clients:
self.stop_completion_services_for_language(language)
@Slot(object, bool)
def python_path_update(self, new_path, prioritize):
"""
Update server configuration after a change in Spyder's Python
path.
Parameters
----------
new_path: list of str
New state of the Python path handled by Spyder.
prioritize: bool
Whether to prioritize Python path in sys.path.
"""
# Opening/closing a project will create a diff between old_path
# and new_path, but we don't know if prioritize changed.
# sig_pythonpath_changed is only emitted if there is a change so we
# should always update the confguration when this method is called.
logger.debug("Update server's sys.path")
self.update_lsp_configuration(python_only=True)
@qdebounced(timeout=600)
def interpreter_changed(self, interpreter: str):
"""
Handle Python interperter changes from other plugins.
Notes
-----
- This method is debounced to prevent sending too many requests to the
server when switching IPython consoles for different envs in quick
succession.
- The timeout corresponds more or less to the time it takes to switch
back and forth between two consoles.
"""
if interpreter != self._interpreter:
logger.debug(f"LSP interpreter changed to {interpreter}")
self._interpreter = interpreter
self.update_lsp_configuration(python_only=True)
def file_opened_closed_or_updated(self, filename: str, language: str):
self.sig_call_statusbar.emit(
LSPStatusWidget.ID, 'set_current_language', (language,), {})
@on_conf_change
def update_configuration(self, config):
self.config = config
self.update_lsp_configuration()
@on_conf_change(section='outline_explorer',
option=['group_cells', 'show_comments'])
def on_pyls_spyder_configuration_change(self, option, value):
self.update_lsp_configuration()
@on_conf_change(section='completions', option='enable_code_snippets')
def on_code_snippets_enabled_disabled(self, value):
self.update_lsp_configuration()
@on_conf_change(
section='pythonpath_manager',
option=['spyder_pythonpath', 'prioritize']
)
def on_pythonpath_option_update(self, option, value):
# This is only useful to run some self-contained tests
if running_under_pytest():
self.update_lsp_configuration(python_only=True)
def update_lsp_configuration(self, python_only=False):
"""
Update server configuration after changes done by the user
through Spyder's Preferences.
python_only: bool
Perform an update only for the Python language server.
"""
for language in self.get_languages():
if python_only and language != 'python':
continue
client_config = {'status': self.STOPPED,
'config': self.get_language_config(language),
'instance': None}
if language not in self.clients:
self.clients[language] = client_config
self.register_queue[language] = []
else:
current_lang_config = self.clients[language]['config']
new_lang_config = client_config['config']
restart_diff = ['cmd', 'args', 'host',
'port', 'external', 'stdio']
restart = any([current_lang_config[x] != new_lang_config[x]
for x in restart_diff])
if restart:
logger.debug("Restart required for {} client!".format(
language))
if self.clients[language]['status'] == self.STOPPED:
# If we move from an external non-working server to
# an internal one, we need to start a new client.
if (current_lang_config['external'] and
not new_lang_config['external']):
self.restart_client(language, client_config)
else:
self.clients[language] = client_config
elif self.clients[language]['status'] == self.RUNNING:
self.restart_client(language, client_config)
else:
if self.clients[language]['status'] == self.RUNNING:
client = self.clients[language]['instance']
client.send_configurations(
new_lang_config['configurations'])
def restart_client(self, language, config):
"""Restart a client."""
self.sig_stop_completions.emit(language)
self.stop_completion_services_for_language(language)
self.clients[language] = config
self.start_completion_services_for_language(language)
def update_client_status(self, active_set):
for language in self.clients:
if language not in active_set:
self.stop_completion_services_for_language(language)
def stop_completion_services_for_language(self, language):
if language in self.clients:
language_client = self.clients[language]
if language_client['status'] == self.RUNNING:
logger.info("Stopping LSP client for {}...".format(language))
try:
if PYSIDE2 or PYSIDE6:
language_client['instance'].disconnect(None, None, None)
else:
language_client['instance'].disconnect()
except TypeError:
pass
try:
if self.clients_hearbeat[language] is not None:
self.clients_hearbeat[language].stop()
self.clients_hearbeat[language].setParent(None)
del self.clients_hearbeat[language]
except (TypeError, KeyError, RuntimeError):
pass
language_client['instance'].stop()
language_client['status'] = self.STOPPED
self.sig_stop_completions.emit(language)
def receive_response(self, response_type, response, language, req_id):
if req_id in self.requests:
self.requests.discard(req_id)
self.sig_response_ready.emit(
self.COMPLETION_PROVIDER_NAME, req_id, response)
def send_request(self, language, request, params, req_id):
if language in self.clients:
language_client = self.clients[language]
if language_client['status'] == self.RUNNING:
self.requests.add(req_id)
client = self.clients[language]['instance']
params['response_callback'] = functools.partial(
self.receive_response, language=language, req_id=req_id)
client.perform_request(request, params)
return
self.sig_response_ready.emit(self.COMPLETION_PROVIDER_NAME,
req_id, {})
def send_notification(self, language, request, params):
if language in self.clients:
language_client = self.clients[language]
if language_client['status'] == self.RUNNING:
client = self.clients[language]['instance']
client.perform_request(request, params)
def broadcast_notification(self, request, params):
"""Send notification/request to all available LSP servers."""
language = params.pop('language', None)
if language:
self.send_notification(language, request, params)
else:
for language in self.clients:
self.send_notification(language, request, params)
def generate_python_config(self):
"""
Update Python server configuration with the options saved in our
config system.
"""
python_config = PYTHON_CONFIG.copy()
# Server options
cmd = self.get_conf('advanced/module', 'pylsp')
host = self.get_conf('advanced/host', '127.0.0.1')
port = self.get_conf('advanced/port', 2087)
# Flake8
cs_max_line_length = self.get_conf('flake8/max_line_length', 79)
f8_exclude = self.get_conf('flake8/exclude', '').split(',')
f8_filename = self.get_conf('flake8/filename', '').split(',')
f8_select = self.get_conf('flake8/extendSelect', '').split(',')
f8_ignore = self.get_conf('flake8/extendIgnore', '').split(',')
f8_indent = self.get_conf(
'indent_chars',
'* *',
section='editor'
).replace('*','')
f8_tab_size = self.get_conf(
'tab_stop_width_spaces',
4,
section='editor'
)
f8_indent_size = (
f8_indent.count(" ") + f8_indent.count("\t") * f8_tab_size
)
flake8 = {
"enabled": self.get_conf("flake8"),
"filename": [
filename.strip() for filename in f8_filename if filename
],
"exclude": [exclude.strip() for exclude in f8_exclude if exclude],
"extendSelect": [select.strip() for select in f8_select if select],
"extendIgnore": [ignore.strip() for ignore in f8_ignore if ignore],
"indentSize": f8_indent_size,
"maxLineLength": cs_max_line_length,
}
# pycodestyle
pycodestyle = {
'maxLineLength': cs_max_line_length
}
# Linting - Pyflakes
pyflakes = {
'enabled': self.get_conf('pyflakes')
}
# Linting - ruff
ruff_exclude = self.get_conf('ruff/exclude', '').split(',')
ruff_select = self.get_conf('ruff/extendSelect', '').split(',')
ruff_ignore = self.get_conf('ruff/extendIgnore', '').split(',')
ruff = {
"enabled": self.get_conf("ruff"),
"exclude": [
exclude.strip() for exclude in ruff_exclude if exclude
],
"extendSelect": [
select.strip() for select in ruff_select if select
],
"extendIgnore": [
ignore.strip() for ignore in ruff_ignore if ignore
],
"lineLength": cs_max_line_length,
}
# Linting disabled
no_linting = {"enabled": self.get_conf("no_linting")}
# ruff - pydocstyle docstring linting
pydocstyle_enabled = self.get_conf('pydocstyle')
if pydocstyle_enabled:
if 'D' not in ruff['extendSelect']:
ruff['extendSelect'].append('D')
if 'D' in ruff['extendIgnore']:
ruff['extendIgnore'].remove('D')
convention = self.get_conf('pydocstyle/convention')
ruff.update(
{
'config': f"lint.pydocstyle.convention = '{convention}'",
}
)
# Autoformatting configuration
formatter = self.get_conf('formatting')
# Enabling/disabling formatters
formatters = ['autopep8', 'yapf', 'black']
formatter_options = {
fmt: {
'enabled': fmt == formatter
}
for fmt in formatters
}
# Setting max line length for formatters.
# Notes:
# 1. The autopep8 plugin shares the same maxLineLength value with the
# flake8 one. That's why it's not necessary to set it here.
# 2. The yapf pylsp plugin doesn't support this yet.
formatter_options['black']['line_length'] = cs_max_line_length
# PyLS-Spyder configuration
group_cells = self.get_conf(
'group_cells',
section='outline_explorer'
)
display_block_comments = self.get_conf(
'show_comments',
section='outline_explorer'
)
pyls_spyder_options = {
'enable_block_comments': display_block_comments,
'group_cells': group_cells
}
# Jedi configuration
env_vars = os.environ.copy() # Ensure env is indepependent of PyLSP's
jedi = {
'environment': self._interpreter,
'extra_paths': self.get_conf('spyder_pythonpath',
section='pythonpath_manager',
default=[]),
'prioritize_extra_paths': self.get_conf(
'prioritize', section='pythonpath_manager', default=False
),
'env_vars': env_vars,
}
jedi_completion = {
'enabled': self.get_conf('code_completion'),
'include_params': self.get_conf('enable_code_snippets',
section='completions')
}
jedi_signature_help = {
'enabled': self.get_conf('jedi_signature_help')
}
jedi_definition = {
'enabled': self.get_conf('jedi_definition'),
'follow_imports': self.get_conf('jedi_definition/follow_imports')
}
# Advanced
external_server = self.get_conf('advanced/external')
stdio = self.get_conf('advanced/stdio')
# Setup options in json
python_config['cmd'] = cmd
if host in self.LOCALHOST and not stdio:
python_config['args'] = ('--host {host} --port {port} --tcp '
'--check-parent-process')
else:
python_config['args'] = '--check-parent-process'
python_config['external'] = external_server
python_config['stdio'] = stdio
python_config['host'] = host
python_config['port'] = port
# Updating options
plugins = python_config['configurations']['pylsp']['plugins']
plugins['pyflakes'].update(pyflakes)
plugins['pycodestyle'].update(pycodestyle)
plugins['flake8'].update(flake8)
plugins['ruff'].update(ruff)
plugins['no_linting'].update(no_linting)
plugins['pyls_spyder'].update(pyls_spyder_options)
plugins['jedi'].update(jedi)
plugins['jedi_completion'].update(jedi_completion)
plugins['jedi_signature_help'].update(jedi_signature_help)
plugins['jedi_definition'].update(jedi_definition)
plugins['preload']['modules'] = self.get_conf('preload_modules')
for fmt in formatters:
plugins[fmt].update(formatter_options[fmt])
return python_config
| LanguageServerProvider |
python | doocs__leetcode | solution/2500-2599/2583.Kth Largest Sum in a Binary Tree/Solution2.py | {
"start": 192,
"end": 620
} | class ____:
def kthLargestLevelSum(self, root: Optional[TreeNode], k: int) -> int:
def dfs(root, d):
if root is None:
return
if len(arr) <= d:
arr.append(0)
arr[d] += root.val
dfs(root.left, d + 1)
dfs(root.right, d + 1)
arr = []
dfs(root, 0)
return -1 if len(arr) < k else nlargest(k, arr)[-1]
| Solution |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_ecs.py | {
"start": 32834,
"end": 36141
} | class ____(EcsBaseTestCase):
@pytest.mark.parametrize(("waiter_delay", "waiter_max_attempts"), WAITERS_TEST_CASES)
def test_execute_with_waiter(self, patch_hook_waiters, waiter_delay, waiter_max_attempts):
mocked_waiters = mock.MagicMock(name="MockedHookWaitersMethod")
patch_hook_waiters.return_value = mocked_waiters
op = EcsCreateClusterOperator(
task_id="task",
cluster_name=CLUSTER_NAME,
wait_for_completion=True,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
)
with mock.patch.object(self.client, "create_cluster") as mock_client_method:
result = op.execute({})
mock_client_method.assert_called_once_with(clusterName=CLUSTER_NAME)
patch_hook_waiters.assert_called_once_with("cluster_active")
expected_waiter_config = {}
if waiter_delay:
expected_waiter_config["Delay"] = waiter_delay
if waiter_max_attempts:
expected_waiter_config["MaxAttempts"] = waiter_max_attempts
mocked_waiters.wait.assert_called_once_with(clusters=mock.ANY, WaiterConfig=expected_waiter_config)
assert result is not None
@mock.patch.object(EcsCreateClusterOperator, "client")
def test_execute_deferrable(self, mock_client: MagicMock):
op = EcsCreateClusterOperator(
task_id="task",
cluster_name=CLUSTER_NAME,
deferrable=True,
waiter_delay=12,
waiter_max_attempts=34,
)
mock_client.create_cluster.return_value = {
"cluster": {"status": EcsClusterStates.PROVISIONING, "clusterArn": "my arn"}
}
with pytest.raises(TaskDeferred) as defer:
op.execute(context={})
assert defer.value.trigger.waiter_delay == 12
assert defer.value.trigger.attempts == 34
def test_execute_immediate_create(self, patch_hook_waiters):
"""Test if cluster created during initial request."""
op = EcsCreateClusterOperator(task_id="task", cluster_name=CLUSTER_NAME, wait_for_completion=True)
with mock.patch.object(self.client, "create_cluster") as mock_client_method:
mock_client_method.return_value = {"cluster": {"status": "ACTIVE", "foo": "bar"}}
result = op.execute({})
mock_client_method.assert_called_once_with(clusterName=CLUSTER_NAME)
patch_hook_waiters.assert_not_called()
assert result == {"status": "ACTIVE", "foo": "bar"}
def test_execute_without_waiter(self, patch_hook_waiters):
op = EcsCreateClusterOperator(task_id="task", cluster_name=CLUSTER_NAME, wait_for_completion=False)
with mock.patch.object(self.client, "create_cluster") as mock_client_method:
result = op.execute({})
mock_client_method.assert_called_once_with(clusterName=CLUSTER_NAME)
patch_hook_waiters.assert_not_called()
assert result is not None
def test_template_fields(self):
op = EcsCreateClusterOperator(
task_id="task",
cluster_name=CLUSTER_NAME,
deferrable=True,
waiter_delay=12,
waiter_max_attempts=34,
)
validate_template_fields(op)
| TestEcsCreateClusterOperator |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 137597,
"end": 140046
} | class ____(nn.Module):
def __init__(self, config: Qwen2_5OmniDiTConfig):
super().__init__()
self.config = config
self.dim = config.hidden_size
self.heads = config.num_attention_heads
self.inner_dim = config.head_dim * config.num_attention_heads
self.dropout = config.dropout
self.is_causal = False
self.to_q = nn.Linear(config.hidden_size, self.inner_dim)
self.to_k = nn.Linear(config.hidden_size, self.inner_dim)
self.to_v = nn.Linear(config.hidden_size, self.inner_dim)
self.to_out = nn.ModuleList([nn.Linear(self.inner_dim, config.hidden_size), nn.Dropout(config.dropout)])
def forward(
self,
hidden_states, # noised input x
position_embeddings=None, # rotary position embedding for x
attention_mask=None,
) -> torch.Tensor:
batch_size = hidden_states.shape[0]
# `sample` projections.
query = self.to_q(hidden_states)
key = self.to_k(hidden_states)
value = self.to_v(hidden_states)
# attention
inner_dim = key.shape[-1]
head_dim = inner_dim // self.heads
query = query.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
# apply rotary position embedding
# Due to training process, only first head is applied with RoPE, will be fixed at next release
cos, sin = position_embeddings
query[:, :1], key[:, :1] = apply_rotary_pos_emb(query[:, :1], key[:, :1], cos, sin)
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attention_weights, _ = attention_interface(
self,
query,
key,
value,
attention_mask=attention_mask,
is_causal=False,
)
# mask. e.g. inference got a batch with different target durations, mask out the padding
attention_weights = attention_weights.reshape(batch_size, -1, self.heads * head_dim)
attention_weights = attention_weights.to(query.dtype)
# linear proj
attention_output = self.to_out[0](attention_weights)
attention_output = self.to_out[1](attention_output)
return attention_output
# time step conditioning embedding
| DiTAttention |
python | doocs__leetcode | solution/1800-1899/1879.Minimum XOR Sum of Two Arrays/Solution.py | {
"start": 0,
"end": 444
} | class ____:
def minimumXORSum(self, nums1: List[int], nums2: List[int]) -> int:
n = len(nums2)
f = [[inf] * (1 << n) for _ in range(n + 1)]
f[0][0] = 0
for i, x in enumerate(nums1, 1):
for j in range(1 << n):
for k in range(n):
if j >> k & 1:
f[i][j] = min(f[i][j], f[i - 1][j ^ (1 << k)] + (x ^ nums2[k]))
return f[-1][-1]
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_cond_format09.py | {
"start": 315,
"end": 1314
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("cond_format08.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditional formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format = workbook.add_format(
{
"font_color": "#9C6500",
"bg_color": "#FFEB9C",
"font_condense": 1,
"font_extend": 1,
}
)
worksheet.write("A1", 10)
worksheet.write("A2", 20)
worksheet.write("A3", 30)
worksheet.write("A4", 40)
worksheet.conditional_format(
"A1",
{"type": "cell", "format": format, "criteria": "greater than", "value": 5},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | huggingface__transformers | src/transformers/models/parakeet/modular_parakeet.py | {
"start": 4592,
"end": 4796
} | class ____(FastSpeech2ConformerConvolutionModule):
def __init__(self, config: ParakeetEncoderConfig, module_config=None):
super().__init__(config, module_config)
| ParakeetEncoderConvolutionModule |
python | automl__auto-sklearn | autosklearn/pipeline/components/regression/mlp.py | {
"start": 650,
"end": 11159
} | class ____(IterativeComponent, AutoSklearnRegressionAlgorithm):
def __init__(
self,
hidden_layer_depth,
num_nodes_per_layer,
activation,
alpha,
learning_rate_init,
early_stopping,
solver,
batch_size,
n_iter_no_change,
tol,
shuffle,
beta_1,
beta_2,
epsilon,
validation_fraction=None,
random_state=None,
verbose=0,
):
self.hidden_layer_depth = hidden_layer_depth
self.num_nodes_per_layer = num_nodes_per_layer
self.max_iter = self.get_max_iter()
self.activation = activation
self.alpha = alpha
self.learning_rate_init = learning_rate_init
self.early_stopping = early_stopping
self.n_iter_no_change = n_iter_no_change
self.validation_fraction = validation_fraction
self.tol = tol
self.solver = solver
self.batch_size = batch_size
self.shuffle = shuffle
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.beta_1 = beta_1
self.random_state = random_state
self.verbose = verbose
self.estimator = None
@staticmethod
def get_max_iter():
return 512
def get_current_iter(self):
return self.estimator.n_iter_
def iterative_fit(self, X, y, n_iter=2, refit=False):
"""Set n_iter=2 for the same reason as for SGD"""
import sklearn.preprocessing
from sklearn.neural_network import MLPRegressor
n_iter = max(n_iter, 2)
if refit:
self.estimator = None
self.scaler = None
if self.estimator is None:
self._fully_fit = False
self.max_iter = int(self.max_iter)
self.hidden_layer_depth = int(self.hidden_layer_depth)
self.num_nodes_per_layer = int(self.num_nodes_per_layer)
self.hidden_layer_sizes = tuple(
self.num_nodes_per_layer for i in range(self.hidden_layer_depth)
)
self.activation = str(self.activation)
self.alpha = float(self.alpha)
self.learning_rate_init = float(self.learning_rate_init)
self.early_stopping = str(self.early_stopping)
if self.early_stopping == "train":
self.validation_fraction = 0.0
self.tol = float(self.tol)
self.n_iter_no_change = int(self.n_iter_no_change)
self.early_stopping_val = False
elif self.early_stopping == "valid":
self.validation_fraction = float(self.validation_fraction)
self.tol = float(self.tol)
self.n_iter_no_change = int(self.n_iter_no_change)
self.early_stopping_val = True
else:
raise ValueError(
"Set early stopping to unknown value %s" % self.early_stopping
)
# elif self.early_stopping == "off":
# self.validation_fraction = 0
# self.tol = 10000
# self.n_iter_no_change = self.max_iter
# self.early_stopping_val = False
self.solver = self.solver
try:
self.batch_size = int(self.batch_size)
except ValueError:
self.batch_size = str(self.batch_size)
self.shuffle = check_for_bool(self.shuffle)
self.beta_1 = float(self.beta_1)
self.beta_2 = float(self.beta_2)
self.epsilon = float(self.epsilon)
self.beta_1 = float(self.beta_1)
self.verbose = int(self.verbose)
n_iter = int(np.ceil(n_iter))
# initial fit of only increment trees
self.estimator = MLPRegressor(
hidden_layer_sizes=self.hidden_layer_sizes,
activation=self.activation,
solver=self.solver,
alpha=self.alpha,
batch_size=self.batch_size,
learning_rate_init=self.learning_rate_init,
max_iter=n_iter,
shuffle=self.shuffle,
random_state=self.random_state,
verbose=self.verbose,
warm_start=True,
early_stopping=self.early_stopping_val,
validation_fraction=self.validation_fraction,
n_iter_no_change=self.n_iter_no_change,
tol=self.tol,
beta_1=self.beta_2,
beta_2=self.beta_1,
epsilon=self.epsilon,
# We do not use these, see comments below in search space
# momentum=self.momentum,
# nesterovs_momentum=self.nesterovs_momentum,
# power_t=self.power_t,
# learning_rate=self.learning_rate,
# max_fun=self.max_fun
)
self.scaler = sklearn.preprocessing.StandardScaler(copy=True)
# Convert y to be at least 2d for the StandardScaler
# [1,1,1] -> [[1], [1], [1]]
if y.ndim == 1:
y = y.reshape((-1, 1))
self.scaler.fit(y)
else:
new_max_iter = min(self.max_iter - self.estimator.n_iter_, n_iter)
self.estimator.max_iter = new_max_iter
# Convert y to be at least 2d for the scaler
# [1,1,1] -> [[1], [1], [1]]
if y.ndim == 1:
y = y.reshape((-1, 1))
y_scaled = self.scaler.transform(y)
# Flatten: [[0], [0], [0]] -> [0, 0, 0]
if y_scaled.ndim == 2 and y_scaled.shape[1] == 1:
y_scaled = y_scaled.flatten()
self.estimator.fit(X, y_scaled)
if (
self.estimator.n_iter_ >= self.max_iter
or self.estimator._no_improvement_count > self.n_iter_no_change
):
self._fully_fit = True
return self
def configuration_fully_fitted(self):
if self.estimator is None:
return False
elif not hasattr(self, "_fully_fit"):
return False
else:
return self._fully_fit
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
y_pred = self.estimator.predict(X)
inverse = self.scaler.inverse_transform(y_pred)
# Flatten: [[0], [0], [0]] -> [0, 0, 0]
if inverse.ndim == 2 and inverse.shape[1] == 1:
inverse = inverse.flatten()
return inverse
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "MLP",
"name": "Multilayer Percepton",
"handles_regression": True,
"handles_classification": False,
"handles_multiclass": False,
"handles_multilabel": False,
"handles_multioutput": False,
"is_deterministic": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
hidden_layer_depth = UniformIntegerHyperparameter(
name="hidden_layer_depth", lower=1, upper=3, default_value=1
)
num_nodes_per_layer = UniformIntegerHyperparameter(
name="num_nodes_per_layer", lower=16, upper=264, default_value=32, log=True
)
activation = CategoricalHyperparameter(
name="activation", choices=["tanh", "relu"], default_value="tanh"
)
alpha = UniformFloatHyperparameter(
name="alpha", lower=1e-7, upper=1e-1, default_value=1e-4, log=True
)
learning_rate_init = UniformFloatHyperparameter(
name="learning_rate_init",
lower=1e-4,
upper=0.5,
default_value=1e-3,
log=True,
)
# Not allowing to turn off early stopping
early_stopping = CategoricalHyperparameter(
name="early_stopping",
choices=["valid", "train"], # , "off"],
default_value="valid",
)
# Constants
n_iter_no_change = Constant(
name="n_iter_no_change", value=32
) # default=10 is too low
validation_fraction = Constant(name="validation_fraction", value=0.1)
tol = UnParametrizedHyperparameter(name="tol", value=1e-4)
solver = Constant(name="solver", value="adam")
# Relying on sklearn defaults for now
batch_size = UnParametrizedHyperparameter(name="batch_size", value="auto")
shuffle = UnParametrizedHyperparameter(name="shuffle", value="True")
beta_1 = UnParametrizedHyperparameter(name="beta_1", value=0.9)
beta_2 = UnParametrizedHyperparameter(name="beta_2", value=0.999)
epsilon = UnParametrizedHyperparameter(name="epsilon", value=1e-8)
# Not used
# solver=["sgd", "lbfgs"] --> not used to keep searchspace simpler
# learning_rate --> only used when using solver=sgd
# power_t --> only used when using solver=sgd & learning_rate=invscaling
# momentum --> only used when solver=sgd
# nesterovs_momentum --> only used when solver=sgd
# max_fun --> only used when solver=lbfgs
# activation=["identity", "logistic"] --> not useful for classification
cs.add_hyperparameters(
[
hidden_layer_depth,
num_nodes_per_layer,
activation,
alpha,
learning_rate_init,
early_stopping,
n_iter_no_change,
validation_fraction,
tol,
solver,
batch_size,
shuffle,
beta_1,
beta_2,
epsilon,
]
)
validation_fraction_cond = InCondition(
validation_fraction, early_stopping, ["valid"]
)
cs.add_conditions([validation_fraction_cond])
# We always use early stopping
# n_iter_no_change_cond = \
# InCondition(n_iter_no_change, early_stopping, ["valid", "train"])
# tol_cond = InCondition(n_iter_no_change, early_stopping, ["valid", "train"])
# cs.add_conditions([n_iter_no_change_cond, tol_cond])
return cs
| MLPRegressor |
python | airbytehq__airbyte | airbyte-integrations/bases/base-normalization/normalization/transform_catalog/dbt_macro.py | {
"start": 101,
"end": 438
} | class ____(ABC):
"https://docs.getdbt.com/docs/building-a-dbt-project/jinja-macros"
@abstractmethod
def __str__(self):
pass
def __repr__(self):
return str(self)
def __add__(self, other):
return str(self) + str(other)
def __radd__(self, other):
return str(other) + str(self)
| Macro |
python | mlflow__mlflow | mlflow/johnsnowlabs/__init__.py | {
"start": 33789,
"end": 34821
} | class ____:
"""
Wrapper around NLUPipeline providing interface for scoring pandas DataFrame.
"""
def __init__(
self,
spark_model,
spark=None,
):
# we have this `or`, so we support _PyFuncModelWrapper(nlu_ref)
self.spark = spark or _get_or_create_sparksession()
self.spark_model = spark_model
def get_raw_model(self):
"""
Returns the underlying model.
"""
return self.spark_model
def predict(self, text, params: dict[str, Any] | None = None):
"""Generate predictions given input data in a pandas DataFrame.
Args:
text: pandas DataFrame containing input data.
params: Additional parameters to pass to the model for inference.
Returns:
List with model predictions.
"""
output_level = params.get("output_level", "") if params else ""
return self.spark_model.predict(text, output_level=output_level).reset_index().to_json()
| _PyFuncModelWrapper |
python | doocs__leetcode | solution/1000-1099/1016.Binary String With Substrings Representing 1 To N/Solution.py | {
"start": 0,
"end": 179
} | class ____:
def queryString(self, s: str, n: int) -> bool:
if n > 1000:
return False
return all(bin(i)[2:] in s for i in range(n, n // 2, -1))
| Solution |
python | huggingface__transformers | src/transformers/models/mobilevit/modeling_mobilevit.py | {
"start": 6235,
"end": 8797
} | class ____(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
super().__init__()
if hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size {hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
| MobileViTSelfAttention |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-appsflyer/source_appsflyer/source.py | {
"start": 11636,
"end": 11981
} | class ____(RawDataMixin, IncrementalAppsflyerStream):
cursor_field = "install_time"
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return f"raw-data/export/app/{self.app_id}/organic_installs_report/v5"
| OrganicInstalls |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/tests/np_test.py | {
"start": 139528,
"end": 141166
} | class ____(jtu.TestCase):
@named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.name, shapes, itertools.repeat(dtype)),
"op": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes, "dtype": dtype,
"order": rec.order, "tol": rec.tol}
for shapes in CombosWithReplacement(nonempty_shapes, rec.nargs)
for dtype in rec.dtypes)
for rec in GRAD_TEST_RECORDS))
@jtu.disable
def testOpGrad(self, op, rng_factory, shapes, dtype, order, tol):
rng = rng_factory()
tol = {onp.float32: 1e-1, onp.complex64: 1e-1}
args = tuple(rng(shape, dtype) for shape in shapes)
check_grads(op, args, order, ["fwd", "rev"], tol, tol)
@named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(rec.op.__name__, special_value),
"op": rec.op, "special_value": special_value, "order": rec.order}
for special_value in rec.values)
for rec in GRAD_SPECIAL_VALUE_TEST_RECORDS))
@jtu.disable
def testOpGradSpecialValue(self, op, special_value, order):
check_grads(op, (special_value,), order, ["fwd", "rev"],
atol={onp.float32: 3e-3})
@jtu.disable
def testTakeAlongAxisIssue1521(self):
# https://github.com/google/jax/issues/1521
idx = tnp.repeat(tnp.arange(3), 10).reshape((30, 1))
def f(x):
y = x * tnp.arange(3.).reshape((1, 3))
return tnp.take_along_axis(y, idx, -1).sum()
check_grads(f, (1.,), order=1)
if __name__ == "__main__":
absltest.main()
| NumpyGradTests |
python | ray-project__ray | python/ray/tests/test_runtime_env_strong_type.py | {
"start": 261,
"end": 1715
} | class ____:
field1: List[ValueType]
field2: str
def test_convert_from_and_to_dataclass():
runtime_env = RuntimeEnv()
test_plugin = TestPlugin(
field1=[
ValueType(nfield1=["a", "b", "c"], nfield2=False),
ValueType(nfield1=["d", "e"], nfield2=True),
],
field2="abc",
)
runtime_env.set("test_plugin", test_plugin)
serialized_runtime_env = runtime_env.serialize()
assert "test_plugin" in serialized_runtime_env
runtime_env_2 = RuntimeEnv.deserialize(serialized_runtime_env)
test_plugin_2 = runtime_env_2.get("test_plugin", data_class=TestPlugin)
assert len(test_plugin_2.field1) == 2
assert test_plugin_2.field1[0].nfield1 == ["a", "b", "c"]
assert test_plugin_2.field1[0].nfield2 is False
assert test_plugin_2.field1[1].nfield1 == ["d", "e"]
assert test_plugin_2.field1[1].nfield2 is True
assert test_plugin_2.field2 == "abc"
def test_pip(start_cluster):
cluster, address = start_cluster
ray.init(address)
runtime_env = RuntimeEnv()
pip = Pip(packages=["pip-install-test==0.5"])
runtime_env.set("pip", pip)
@ray.remote
class Actor:
def foo(self):
import pip_install_test # noqa
return "hello"
a = Actor.options(runtime_env=runtime_env).remote()
assert ray.get(a.foo.remote()) == "hello"
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| TestPlugin |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/misplaced_bare_raise.py | {
"start": 340,
"end": 1090
} | class ____:
def __enter__(self):
return self
def __exit__(self, *args):
raise
try:
raise # [misplaced-bare-raise]
except Exception:
pass
def f():
try:
raise # [misplaced-bare-raise]
except Exception:
pass
def g():
raise # [misplaced-bare-raise]
def h():
try:
if True:
def i():
raise # [misplaced-bare-raise]
except Exception:
pass
raise # [misplaced-bare-raise]
raise # [misplaced-bare-raise]
try:
pass
except:
def i():
raise # [misplaced-bare-raise]
try:
pass
except:
class C:
raise # [misplaced-bare-raise]
try:
pass
except:
pass
finally:
raise # [misplaced-bare-raise]
| ContextManager |
python | django__django | tests/backends/base/test_base.py | {
"start": 10775,
"end": 17875
} | class ____(SimpleTestCase):
databases = {"default"}
def setUp(self):
# All test cases here need newly configured and created connections.
# Use the default db connection for convenience.
connection.close()
self.addCleanup(connection.close)
def patch_settings_dict(self, conn_health_checks):
self.settings_dict_patcher = patch.dict(
connection.settings_dict,
{
**connection.settings_dict,
"CONN_MAX_AGE": None,
"CONN_HEALTH_CHECKS": conn_health_checks,
},
)
self.settings_dict_patcher.start()
self.addCleanup(self.settings_dict_patcher.stop)
def run_query(self):
with connection.cursor() as cursor:
cursor.execute("SELECT 42" + connection.features.bare_select_suffix)
@skipUnlessDBFeature("test_db_allows_multiple_connections")
def test_health_checks_enabled(self):
self.patch_settings_dict(conn_health_checks=True)
self.assertIsNone(connection.connection)
# Newly created connections are considered healthy without performing
# the health check.
with patch.object(connection, "is_usable", side_effect=AssertionError):
self.run_query()
old_connection = connection.connection
# Simulate request_finished.
connection.close_if_unusable_or_obsolete()
self.assertIs(old_connection, connection.connection)
# Simulate connection health check failing.
with patch.object(
connection, "is_usable", return_value=False
) as mocked_is_usable:
self.run_query()
new_connection = connection.connection
# A new connection is established.
self.assertIsNot(new_connection, old_connection)
# Only one health check per "request" is performed, so the next
# query will carry on even if the health check fails. Next query
# succeeds because the real connection is healthy and only the
# health check failure is mocked.
self.run_query()
self.assertIs(new_connection, connection.connection)
self.assertEqual(mocked_is_usable.call_count, 1)
# Simulate request_finished.
connection.close_if_unusable_or_obsolete()
# The underlying connection is being reused further with health checks
# succeeding.
self.run_query()
self.run_query()
self.assertIs(new_connection, connection.connection)
@skipUnlessDBFeature("test_db_allows_multiple_connections")
def test_health_checks_enabled_errors_occurred(self):
self.patch_settings_dict(conn_health_checks=True)
self.assertIsNone(connection.connection)
# Newly created connections are considered healthy without performing
# the health check.
with patch.object(connection, "is_usable", side_effect=AssertionError):
self.run_query()
old_connection = connection.connection
# Simulate errors_occurred.
connection.errors_occurred = True
# Simulate request_started (the connection is healthy).
connection.close_if_unusable_or_obsolete()
# Persistent connections are enabled.
self.assertIs(old_connection, connection.connection)
# No additional health checks after the one in
# close_if_unusable_or_obsolete() are executed during this "request"
# when running queries.
with patch.object(connection, "is_usable", side_effect=AssertionError):
self.run_query()
@skipUnlessDBFeature("test_db_allows_multiple_connections")
def test_health_checks_disabled(self):
self.patch_settings_dict(conn_health_checks=False)
self.assertIsNone(connection.connection)
# Newly created connections are considered healthy without performing
# the health check.
with patch.object(connection, "is_usable", side_effect=AssertionError):
self.run_query()
old_connection = connection.connection
# Simulate request_finished.
connection.close_if_unusable_or_obsolete()
# Persistent connections are enabled (connection is not).
self.assertIs(old_connection, connection.connection)
# Health checks are not performed.
with patch.object(connection, "is_usable", side_effect=AssertionError):
self.run_query()
# Health check wasn't performed and the connection is unchanged.
self.assertIs(old_connection, connection.connection)
self.run_query()
# The connection is unchanged after the next query either during
# the current "request".
self.assertIs(old_connection, connection.connection)
@skipUnlessDBFeature("test_db_allows_multiple_connections")
def test_set_autocommit_health_checks_enabled(self):
self.patch_settings_dict(conn_health_checks=True)
self.assertIsNone(connection.connection)
# Newly created connections are considered healthy without performing
# the health check.
with patch.object(connection, "is_usable", side_effect=AssertionError):
# Simulate outermost atomic block: changing autocommit for
# a connection.
connection.set_autocommit(False)
self.run_query()
connection.commit()
connection.set_autocommit(True)
old_connection = connection.connection
# Simulate request_finished.
connection.close_if_unusable_or_obsolete()
# Persistent connections are enabled.
self.assertIs(old_connection, connection.connection)
# Simulate connection health check failing.
with patch.object(
connection, "is_usable", return_value=False
) as mocked_is_usable:
# Simulate outermost atomic block: changing autocommit for
# a connection.
connection.set_autocommit(False)
new_connection = connection.connection
self.assertIsNot(new_connection, old_connection)
# Only one health check per "request" is performed, so a query will
# carry on even if the health check fails. This query succeeds
# because the real connection is healthy and only the health check
# failure is mocked.
self.run_query()
connection.commit()
connection.set_autocommit(True)
# The connection is unchanged.
self.assertIs(new_connection, connection.connection)
self.assertEqual(mocked_is_usable.call_count, 1)
# Simulate request_finished.
connection.close_if_unusable_or_obsolete()
# The underlying connection is being reused further with health checks
# succeeding.
connection.set_autocommit(False)
self.run_query()
connection.commit()
connection.set_autocommit(True)
self.assertIs(new_connection, connection.connection)
| ConnectionHealthChecksTests |
python | mlflow__mlflow | mlflow/genai/scorers/builtin_scorers.py | {
"start": 61064,
"end": 63604
} | class ____(BuiltInSessionLevelScorer):
"""
ConversationCompleteness evaluates whether an AI assistant fully addresses all user requests
by the end of the conversation.
For evaluating the completeness of a single user prompt, use the Completeness scorer instead.
This scorer analyzes a complete conversation (represented as a list of traces) to determine
if the assistant successfully addressed all the user's requests in a conversation. It returns
"yes" or "no".
You can invoke the scorer directly with a session for testing, or pass it to
`mlflow.genai.evaluate` for running full evaluation on a dataset.
Args:
name: The name of the scorer. Defaults to "conversation_completeness".
model: {{ model }}
Example (direct usage):
.. code-block:: python
import mlflow
from mlflow.genai.scorers import ConversationCompleteness
# Retrieve a list of traces with the same session ID
session = mlflow.search_traces(
experiment_ids=[experiment_id],
filter_string=f"metadata.`mlflow.trace.session` = '{session_id}'",
return_type="list",
)
assessment = ConversationCompleteness(name="my_completion_check")(session=session)
print(assessment) # Feedback with value "yes" or "no"
Example (with evaluate):
.. code-block:: python
import mlflow
from mlflow.genai.scorers import ConversationCompleteness
session = mlflow.search_traces(
experiment_ids=[experiment_id],
filter_string=f"metadata.`mlflow.trace.session` = '{session_id}'",
return_type="list",
)
result = mlflow.genai.evaluate(data=session, scorers=[ConversationCompleteness()])
"""
name: str = CONVERSATION_COMPLETENESS_ASSESSMENT_NAME
model: str | None = None
description: str = (
"Evaluate whether the assistant fully addresses all user requests by the end of "
"the conversation."
)
def _create_judge(self) -> InstructionsJudge:
return InstructionsJudge(
name=self.name,
instructions=self.instructions,
model=self.model,
description=self.description,
feedback_value_type=Literal["yes", "no"],
generate_rationale_first=True,
)
@property
def instructions(self) -> str:
return CONVERSATION_COMPLETENESS_PROMPT
@experimental(version="3.7.0")
@format_docstring(_MODEL_API_DOC)
| ConversationCompleteness |
python | fastapi__sqlmodel | docs_src/tutorial/indexes/tutorial001_py310.py | {
"start": 71,
"end": 1184
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.commit()
def select_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Deadpond")
results = session.exec(statement)
for hero in results:
print(hero)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | pennersr__django-allauth | allauth/socialaccount/providers/jupyterhub/provider.py | {
"start": 280,
"end": 661
} | class ____(OAuth2Provider):
id = "jupyterhub"
name = "JupyterHub"
account_class = JupyterHubAccount
oauth2_adapter_class = JupyterHubOAuth2Adapter
def extract_uid(self, data):
return str(data.get("name"))
def extract_common_fields(self, data):
return dict(name=data.get("name", ""))
provider_classes = [JupyterHubProvider]
| JupyterHubProvider |
python | google__pytype | pytype/abstract/abstract_utils.py | {
"start": 3114,
"end": 3327
} | class ____(Exception):
"""The error for user-defined generic types."""
def __init__(self, annot, error) -> None:
super().__init__(annot, error)
self.annot = annot
self.error = error
| GenericTypeError |
python | google__jax | tests/mosaic/gpu_test.py | {
"start": 154403,
"end": 165389
} | class ____(TestCase):
@parameterized.named_parameters(
("f32", jnp.float32, 256),
("f16", jnp.float16, 256),
("f16_small", jnp.float16, 128),
)
def test_store_untiled_splat(self, jax_dtype, size):
mlir_dtype = utils.dtype_to_ir_type(jax_dtype)
def kernel(ctx, out, _):
del ctx
arr = mgpu.FragmentedArray.splat(
c(1.0, mlir_dtype), (size,), is_signed=utils.is_signed(jax_dtype)
)
arr.store_untiled(out)
expected = np.ones((size,), jax_dtype)
mosaic_ones = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), (), expected, ()
)()
np.testing.assert_array_equal(mosaic_ones, expected)
@parameterized.product(
shape=((128, 128), (64, 8), (64, 256)),
dtype=(jnp.int32, jnp.int16, jnp.int8),
)
def test_wgmma_tiled_layout(self, shape, dtype):
def kernel(ctx, dst, _):
iota = iota_tensor(*shape, dtype)
tiled = iota.to_layout(fa._tiled_wgmma_layout(shape))
# Note that WGMMA layouts are always (shape[0] // 64, shape[1] // 8, 2, 1)
self.assertEqual(
tiled.registers.shape,
(shape[0] // 64, shape[1] // 8, 1, 1, 2, 1, 1, 1, 1),
)
self.assertEqual(tiled.shape, shape)
self.assertEqual(tiled.mlir_dtype, iota.mlir_dtype)
tiled.store_untiled(dst, optimized=False)
ty = jax.ShapeDtypeStruct(shape, dtype)
f = mgpu.as_gpu_kernel(kernel, (1, 1, 1), (128, 1, 1), (), ty, ())
expected = np.arange(math.prod(shape), dtype=dtype).reshape(shape)
np.testing.assert_array_equal(f(), expected)
@parameterized.product(
dtype=[jnp.int8, jnp.int16, jnp.int32],
swizzle=[16, 32, 64, 128],
num_col_tiles=[1, 2, 3],
row_tiling=[8, 64],
)
@jtu.thread_unsafe_test() # Modifies ``os.environ``.
def test_copy_tiled(self, dtype, swizzle, num_col_tiles, row_tiling):
mlir_dtype = utils.dtype_to_ir_type(dtype)
bw = bytewidth(mlir_dtype)
col_tiling = swizzle // bw
if col_tiling % 8:
self.skipTest("WGMMA layout requires col_tiling % 8 == 0")
m, n = 128, col_tiling * num_col_tiles
tiling = (row_tiling, col_tiling)
def kernel(ctx, in_, out, smems):
smem_in, smem_out, barrier = smems
ctx.async_copy(src_ref=in_, dst_ref=smem_in, swizzle=swizzle, barrier=barrier)
barrier.wait()
t = mgpu.FragmentedArray.load_tiled(
smem_in, swizzle=swizzle, is_signed=True, layout=mgpu.WGMMA_LAYOUT
)
t.store_tiled(smem_out, swizzle=swizzle)
mgpu.commit_shared()
ctx.async_copy(src_ref=smem_out, dst_ref=out, swizzle=swizzle)
ctx.await_async_copy(0)
expected = (
np.arange(m * n, dtype=dtype)
.reshape(m // tiling[0], tiling[0], n // tiling[1], tiling[1])
.transpose(0, 2, 1, 3)
)
with jtu.set_env(MOSAIC_GPU_DUMP_SASS="1"), self.capture_stdout() as sass:
iota = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), expected, expected,
[expected, expected, mgpu.TMABarrier()],
)(expected)
np.testing.assert_array_equal(iota, expected)
# Verify that we don't use too many registers for the transfers.
# We verify LDS and STS separately, because they might use two different
# methods of computing offsets and we don't rely on CSE between them.
expected_regs = swizzle // bytewidth(mlir_dtype) // 8
# When the bytewidth is smaller than 2 the swizzle pattern changes every 2
# column tiles, so we only need half the registers.
if bytewidth(mlir_dtype) < 2:
expected_regs //= 2
for instr in ("STS", "LDS"):
with self.subTest(instr + " count"):
addrs = re.findall(instr + r".* \[(.*)\]", sass())
def get_reg(addr):
if (pos := addr.find("+")) != -1:
return addr[:pos]
return addr
used_regs = {get_reg(addr) for addr in addrs}
try:
self.assertLessEqual(len(used_regs), expected_regs)
except:
problematic_device_patterns = ("RTX PRO 6000 Blackwell", "GB10$")
if match := jtu.device_kind_match(problematic_device_patterns):
self.skipTest(f"{match} uses more registers for an unknown reason")
raise
def test_copy_for_upcast(self):
dtype = jnp.int8
swizzle = 128
col_tiling = swizzle // bytewidth(utils.dtype_to_ir_type(dtype))
m, n = 128, col_tiling * 2
tiling = (64, col_tiling)
layout = fa.WGMMA_LAYOUT_UPCAST_2X
def kernel(ctx, in_, out, smems):
smem_in, smem_out, barrier = smems
ctx.async_copy(src_ref=in_, dst_ref=smem_in, swizzle=swizzle, barrier=barrier)
barrier.wait()
t = mgpu.FragmentedArray.load_tiled(
smem_in, swizzle=swizzle, is_signed=True, layout=layout
)
t.store_tiled(smem_out, swizzle=swizzle)
mgpu.commit_shared()
ctx.async_copy(src_ref=smem_out, dst_ref=out, swizzle=swizzle)
ctx.await_async_copy(0)
x = jax.random.randint(
jax.random.key(42), tile_shape((m, n), tiling), -128, 127, dtype=dtype
)
f = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), x, x, [x, x, mgpu.TMABarrier()],
)
np.testing.assert_array_equal(f(x), x)
@parameterized.product(
dtype=[jnp.int16, jnp.int32], # TODO(apaszke): More dtypes
swizzle=[16, 32, 64, 128],
layouts=[
(fa.WGMMA_LAYOUT, fa.WGMMA_TRANSPOSED_LAYOUT),
(fa.TCGEN05_LAYOUT, fa.TCGEN05_TRANSPOSED_LAYOUT),
],
)
@jtu.skip_if_mosaic_gpu_exceeds_shared_memory(
device_patterns=("RTX PRO 6000 Blackwell", "GB10$"))
def test_transpose_tiled(self, dtype, swizzle, layouts):
mlir_dtype = utils.dtype_to_ir_type(dtype)
bw = bytewidth(mlir_dtype)
col_tiling = swizzle // bw
if bw == 2:
m, n = 256, 192
elif bw == 4:
m, n = 256, 96
else:
raise ValueError(f"Unsupported bitwidth: {bw}")
tiling = (8, col_tiling)
if col_tiling < 8:
self.skipTest("Swizzle too small")
layout, transpose_layout = layouts
def kernel(ctx, in_, out, smems):
smem_in, smem_out, barrier = smems
ctx.async_copy(src_ref=in_, dst_ref=smem_in, swizzle=swizzle, barrier=barrier)
barrier.wait()
t = mgpu.FragmentedArray.load_tiled(
smem_in, swizzle=swizzle, is_signed=True, layout=layout
)
smem_out_t = memref_transpose(smem_out, (1, 0, 3, 2))
t.to_layout(transpose_layout).store_tiled(smem_out_t, swizzle=swizzle)
mgpu.commit_shared()
ctx.async_copy(src_ref=smem_out, dst_ref=out, swizzle=swizzle)
ctx.await_async_copy(0)
x = (
np.arange(m * n, dtype=dtype)
.reshape(m // tiling[0], tiling[0], n // tiling[1], tiling[1])
.transpose(0, 2, 1, 3)
)
y_ref = (
np.arange(m * n, dtype=dtype)
.reshape(m, n)
.T.reshape(n // tiling[0], tiling[0], m // tiling[1], tiling[1])
.transpose(0, 2, 1, 3)
)
y = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), x, y_ref, [x, y_ref, mgpu.TMABarrier()],
)(x)
np.testing.assert_array_equal(y, y_ref)
@parameterized.parameters(
(fa.WGMMA_LAYOUT_UPCAST_2X, fa.WGMMA_LAYOUT, jnp.int8, jnp.int8, 1),
(fa.WGMMA_LAYOUT_UPCAST_2X, fa.WGMMA_LAYOUT, jnp.int8, jnp.int16, 1),
(fa.WGMMA_LAYOUT_UPCAST_4X, fa.WGMMA_LAYOUT_UPCAST_2X, jnp.int4, jnp.int4, 1),
(fa.WGMMA_LAYOUT_UPCAST_2X, fa.WGMMA_LAYOUT, jnp.int4, jnp.int4, 0.5),
(fa.WGMMA_LAYOUT_UPCAST_4X, fa.WGMMA_LAYOUT, jnp.int4, jnp.int4, 2),
)
@jtu.thread_unsafe_test() # Modifies ``os.environ``.
@jtu.skip_if_mosaic_gpu_exceeds_shared_memory(device_patterns="RTX PRO 6000 Blackwell")
def test_upcast_to_wgmma(
self, start_layout, end_layout, in_dtype, cast_dtype, shfl_per_reg
):
in_dtype = jnp.dtype(in_dtype)
out_dtype = jnp.dtype(jnp.int16)
out_dtype_mlir = utils.dtype_to_ir_type(out_dtype)
swizzle = 128
in_col_tiling = 8 * swizzle // jnp.iinfo(in_dtype).bits
in_tiling = (8, in_col_tiling)
out_col_tiling = swizzle // out_dtype.itemsize
out_tiling = (8, out_col_tiling)
m, n = 64, in_col_tiling * 2
regs_per_thread = None
def kernel(ctx, in_, out, smems):
nonlocal regs_per_thread
smem_in, smem_out, barrier = smems
ctx.async_copy(src_ref=in_, dst_ref=smem_in, swizzle=swizzle, barrier=barrier)
barrier.wait()
t = mgpu.FragmentedArray.load_tiled(
smem_in, swizzle=swizzle, is_signed=True, layout=start_layout
)
regs_per_thread = t.registers.size
t = t.astype(utils.dtype_to_ir_type(cast_dtype), is_signed=True)
t = t.to_layout(end_layout)
t = t.astype(out_dtype_mlir, is_signed=True)
t.store_tiled(smem_out, swizzle=swizzle)
mgpu.commit_shared()
ctx.async_copy(src_ref=smem_out, dst_ref=out, swizzle=swizzle)
ctx.await_async_copy(0)
def tile(x, tiling):
return x.reshape(
x.shape[0] // tiling[0], tiling[0], x.shape[1] // tiling[1], tiling[1]
).transpose(0, 2, 1, 3)
in_iinfo = jnp.iinfo(in_dtype)
x = jax.random.randint(
jax.random.key(42), (m, n), in_iinfo.min, in_iinfo.max, dtype=jnp.int32
).astype(in_dtype)
xt = tile(x, in_tiling)
y = x.astype(out_dtype)
yt = tile(y, out_tiling)
f = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), xt, yt, [xt, yt, mgpu.TMABarrier()],
)
with jtu.set_env(MOSAIC_GPU_DUMP_SASS="1"), self.capture_stdout() as sass:
yt_kernel = f(xt)
jax.block_until_ready(yt_kernel)
np.testing.assert_array_equal(yt_kernel, yt)
try:
self.assertEqual(sass().count("SHFL.BFLY"), regs_per_thread * shfl_per_reg)
except:
problematic_device_patterns = ("RTX PRO 6000 Blackwell", "GB10$")
if match := jtu.device_kind_match(problematic_device_patterns):
self.skipTest(f"{match} requires more SHFL.BFLY for an unknown reason")
raise
@parameterized.product(
in_length=[1, 2, 4, 8],
out_length=[1, 2, 4, 8],
)
def test_convert_tmem_native_vector_length(self, in_length, out_length):
dtype = jnp.dtype(jnp.int16)
def kernel(ctx, in_, out, smems):
smem_in, smem_out, barrier = smems
ctx.async_copy(src_ref=in_, dst_ref=smem_in, barrier=barrier)
barrier.wait()
t = mgpu.FragmentedArray.load_untiled(
smem_in, layout=mgpu.tmem_native_layout(in_length),
is_signed=True, optimized=False
)
t = t.to_layout(mgpu.tmem_native_layout(out_length))
t.store_untiled(smem_out, optimized=False)
mgpu.commit_shared()
ctx.async_copy(src_ref=smem_out, dst_ref=out)
ctx.await_async_copy(0)
iinfo = jnp.iinfo(dtype)
x = jax.random.randint(
jax.random.key(42), (128, 128), iinfo.min, iinfo.max, dtype=jnp.int16
)
f = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), x, x, [x, x, mgpu.TMABarrier()],
)
y = f(x)
np.testing.assert_array_equal(y, x)
@dataclasses.dataclass(frozen=True)
| LayoutTest |
python | langchain-ai__langchain | libs/core/langchain_core/messages/content.py | {
"start": 13237,
"end": 13907
} | class ____(TypedDict):
"""Result of a server-side tool call."""
type: Literal["server_tool_result"]
"""Used for discrimination."""
id: NotRequired[str]
"""An identifier associated with the server tool result."""
tool_call_id: str
"""ID of the corresponding server tool call."""
status: Literal["success", "error"]
"""Execution status of the server-side tool."""
output: NotRequired[Any]
"""Output of the executed tool."""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata."""
| ServerToolResult |
python | has2k1__plotnine | plotnine/stats/stat_function.py | {
"start": 402,
"end": 3211
} | class ____(stat):
"""
Superimpose a function onto a plot
{usage}
Parameters
----------
{common_parameters}
fun : callable
Function to evaluate.
n : int, default=101
Number of points at which to evaluate the function.
xlim : tuple, default=None
`x` limits for the range. The default depends on
the `x` aesthetic. There is not an `x` aesthetic
then the `xlim` must be provided.
args : Optional[tuple[Any] | dict[str, Any]], default=None
Arguments to pass to `fun`.
See Also
--------
plotnine.geom_path : The default `geom` for this `stat`.
"""
_aesthetics_doc = """
{aesthetics_table}
**Options for computed aesthetics**
```python
"x" # x points at which the function is evaluated
"fx" # points evaluated at each x
```
"""
DEFAULT_PARAMS = {
"geom": "path",
"position": "identity",
"na_rm": False,
"fun": None,
"n": 101,
"args": None,
"xlim": None,
}
DEFAULT_AES = {"y": after_stat("fx")}
CREATES = {"fx"}
def __init__(self, mapping=None, data=None, **kwargs):
if data is None:
def _data_func(data: pd.DataFrame) -> pd.DataFrame:
if data.empty:
data = pd.DataFrame({"group": [1]})
return data
data = _data_func
super().__init__(mapping, data, **kwargs)
def setup_params(self, data):
if not callable(self.params["fun"]):
raise PlotnineError(
"stat_function requires parameter 'fun' to be "
"a function or any other callable object"
)
def compute_group(self, data, scales):
old_fun: Callable[..., FloatArrayLike] = self.params["fun"]
n = self.params["n"]
args = self.params["args"]
xlim = self.params["xlim"]
range_x = xlim or scales.x.dimension((0, 0))
if isinstance(args, (list, tuple)):
def fun(x):
return old_fun(x, *args)
elif isinstance(args, dict):
def fun(x):
return old_fun(x, **args)
elif args is not None:
def fun(x):
return old_fun(x, args)
else:
def fun(x):
return old_fun(x)
x = np.linspace(range_x[0], range_x[1], n)
# continuous scale
if isinstance(scales.x, scale_continuous):
x = scales.x.inverse(x)
# We know these can handle array_likes
if isinstance(old_fun, (np.ufunc, np.vectorize)):
fx = fun(x)
else:
fx = [fun(val) for val in x]
new_data = pd.DataFrame({"x": x, "fx": fx})
return new_data
| stat_function |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/globus/tests.py | {
"start": 288,
"end": 1034
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = GlobusProvider.id
@override_settings(SOCIALACCOUNT_QUERY_EMAIL=True)
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"identity_provider_display_name": "University of Gozorpazorp",
"sub": "a6fc81e-4a6c1-97ac-b4c6-84ff6a8ce662",
"preferred_username": "morty@ugz.edu",
"identity_provider": "9a4c8312f-9432-9a7c-1654-6a987c6531fa",
"organization": "University of Gozorpazorp",
"email": "morty@ugz.edu",
"name": "Morty Smith"
}
""",
)
def get_expected_to_str(self):
return "morty@ugz.edu"
| GlobusTests |
python | huggingface__transformers | src/transformers/models/lfm2_moe/modular_lfm2_moe.py | {
"start": 5863,
"end": 5913
} | class ____(Lfm2ShortConv):
pass
| Lfm2MoeShortConv |
python | django__django | tests/syndication_tests/feeds.py | {
"start": 5310,
"end": 5877
} | class ____(TestRss2Feed):
def get_object(self, request, entry_id):
return Entry.objects.get(pk=entry_id)
def items(self, obj):
return Article.objects.filter(entry=obj)
def item_link(self, item):
return "%sarticle/%s/" % (item.entry.get_absolute_url(), item.pk)
def item_comments(self, item):
return "%scomments" % self.item_link(item)
def item_description(self, item):
return "Article description: %s" % item.title
def item_title(self, item):
return "Title: %s" % item.title
| TestGetObjectFeed |
python | pytorch__pytorch | torch/distributed/fsdp/_fully_shard/_fsdp_param_group.py | {
"start": 4584,
"end": 4720
} | class ____(NamedTuple):
reduce_scatter_input: torch.Tensor
event: Optional[torch.Event] # reduce-scatter event
| ReduceScatterState |
python | kamyu104__LeetCode-Solutions | Python/number-of-bit-changes-to-make-two-integers-equal.py | {
"start": 51,
"end": 357
} | class ____(object):
def minChanges(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
def popcount(x):
return bin(x).count('1')
return popcount(n^k) if n&k == k else -1
# Time: O(logn)
# Space: O(1)
# bit manipulation
| Solution |
python | tensorflow__tensorflow | tensorflow/python/data/ops/load_op.py | {
"start": 7087,
"end": 7619
} | class ____(dataset_ops.DatasetSource):
"""A dataset for one chunk file from a tf.data distributed snapshot."""
def __init__(self, chunk_file: str, element_spec: Any, compression: str):
self._chunk_file = chunk_file
self._element_spec = element_spec
variant_tensor = ged_ops.snapshot_chunk_dataset(
chunk_file,
compression=compression,
**self._flat_structure)
super().__init__(variant_tensor)
@property
def element_spec(self) -> Any:
return self._element_spec
| _SnapshotChunkDataset |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/call15.py | {
"start": 1211,
"end": 1441
} | class ____[T]:
def __init__(self, value: T) -> None:
self._value: T = value
def update(self, value: T = 0, /) -> "A[T]":
return A(value)
a = A("")
a.update("")
# This should generate an error.
a.update()
| A |
python | matplotlib__matplotlib | lib/matplotlib/hatch.py | {
"start": 2827,
"end": 3507
} | class ____(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = int(
(hatch.count('\\') + hatch.count('x') + hatch.count('X'))
* density)
if self.num_lines:
self.num_vertices = (self.num_lines + 1) * 2
else:
self.num_vertices = 0
def set_vertices_and_codes(self, vertices, codes):
steps = np.linspace(-0.5, 0.5, self.num_lines + 1)
vertices[0::2, 0] = 0.0 + steps
vertices[0::2, 1] = 1.0 + steps
vertices[1::2, 0] = 1.0 + steps
vertices[1::2, 1] = 0.0 + steps
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
| SouthEastHatch |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 172264,
"end": 172985
} | class ____(Operation):
def call(self, x):
return backend.numpy.ravel(x)
def compute_output_spec(self, x):
if None in x.shape:
output_shape = [
None,
]
else:
output_shape = [int(np.prod(x.shape))]
return KerasTensor(output_shape, dtype=x.dtype)
@keras_export(["keras.ops.ravel", "keras.ops.numpy.ravel"])
def ravel(x):
"""Return a contiguous flattened tensor.
A 1-D tensor, containing the elements of the input, is returned.
Args:
x: Input tensor.
Returns:
Output tensor.
"""
if any_symbolic_tensors((x,)):
return Ravel().symbolic_call(x)
return backend.numpy.ravel(x)
| Ravel |
python | getsentry__sentry | src/sentry/sentry_apps/api/endpoints/sentry_app_publish_request.py | {
"start": 1393,
"end": 5643
} | class ____(SentryAppBaseEndpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
def has_ui_component(self, sentry_app):
"""Determine if the sentry app supports issue linking or stack trace linking."""
elements = (sentry_app.schema or {}).get("elements", [])
return any(element.get("type") in COMPONENT_TYPES for element in elements)
def post(self, request: Request, sentry_app) -> Response:
# check status of app to make sure it is unpublished
if sentry_app.is_published:
return Response({"detail": "Cannot publish already published integration."}, status=400)
if sentry_app.is_internal:
return Response({"detail": "Cannot publish internal integration."}, status=400)
if sentry_app.is_publish_request_inprogress:
return Response({"detail": "Publish request in progress."}, status=400)
if not SentryAppAvatar.objects.filter(
sentry_app=sentry_app, color=True, avatar_type=SentryAppAvatarTypes.UPLOAD.value
).exists():
return Response({"detail": "Must upload a logo for the integration."}, status=400)
if (
self.has_ui_component(sentry_app)
and not SentryAppAvatar.objects.filter(
sentry_app=sentry_app,
color=False,
avatar_type=SentryAppAvatarTypes.UPLOAD.value,
).exists()
):
return Response(
{"detail": "Must upload an icon for issue and stack trace linking integrations."},
status=400,
)
assert isinstance(
request.user, (User, RpcUser)
), "User must be authenticated to update a Sentry App"
SentryAppUpdater(
sentry_app=sentry_app,
status=SentryAppStatus.PUBLISH_REQUEST_INPROGRESS_STR,
).run(user=request.user)
org_mapping = OrganizationMapping.objects.filter(
organization_id=sentry_app.owner_id
).first()
if not org_mapping:
return Response(
{"detail": "Cannot publish a custom integration without an organization"},
status=400,
)
organization = organization_service.get_organization_by_id(id=org_mapping.organization_id)
questionnaire_serializer = SentryAppPublishRequestSerializer(data=request.data)
if not questionnaire_serializer.is_valid():
return Response(questionnaire_serializer.errors, status=400)
questionnaire: Iterable[dict[str, str]] = request.data.get("questionnaire", [])
assert organization is not None, "RpcOrganizationContext must exist to get the organization"
new_subject = f"We've received your integration submission for {sentry_app.slug}"
new_context = {
"questionnaire": questionnaire,
"actor": request.user,
"sentry_app": sentry_app,
"organization": org_mapping,
}
template = "sentry/emails/sentry-app-publish-confirmation.txt"
html_template = "sentry/emails/sentry-app-publish-confirmation.html"
new_message = MessageBuilder(
subject=new_subject,
context=new_context,
template=template,
html_template=html_template,
type="sentry-app-publish-request",
)
# Must send to user & partners so that the reply-to email will be each other
recipients = ["integrations-platform@sentry.io", request.user.email]
sent_messages = new_message.send(
to=recipients,
)
# We sent an email to each person in the recip. list so anything less means we had a failure
if sent_messages < len(recipients):
extras = {"organization": org_mapping.slug, **new_context}
sentry_sdk.capture_message("publish-email-failed", "info")
logger.info("publish-email-failed", extra=extras)
return Response(
{"detail": "Something went wrong trying to send publish confirmation email"},
status=500,
)
return Response(status=201)
| SentryAppPublishRequestEndpoint |
python | huggingface__transformers | src/transformers/models/electra/modeling_electra.py | {
"start": 16475,
"end": 19322
} | class ____(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = ElectraAttention(config, is_causal=config.is_decoder, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = ElectraAttention(
config,
is_causal=False,
layer_idx=layer_idx,
is_cross_attention=True,
)
self.intermediate = ElectraIntermediate(config)
self.output = ElectraOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
self_attention_output, _ = self.attention(
hidden_states,
attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
attention_output = self_attention_output
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
" by setting `config.add_cross_attention=True`"
)
cross_attention_output, _ = self.crossattention(
self_attention_output,
None, # attention_mask
encoder_hidden_states,
encoder_attention_mask,
past_key_values=past_key_values,
**kwargs,
)
attention_output = cross_attention_output
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
return layer_output
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Electra
| ElectraLayer |
python | keras-team__keras | keras/src/dtype_policies/dtype_policy.py | {
"start": 10487,
"end": 16005
} | class ____(QuantizedDTypePolicy):
"""Quantized dtype policy for GPTQ quantization.
This policy helps propagate quantization settings for GPTQ
when loading a GPTQ quantized model in Keras format.
Args:
mode: The quantization mode. This should be a string in the format
`"gptq/<weight_bits>/<group_size>"`.
- `"gptq"`: The identifier for the quantization algorithm.
- `<weight_bits>`: Number of bits to quantize weights to.
Supported values are 2, 3, 4, and 8.
- `<group_size>`: The group size for quantization. Supported
values are -1 (for whole-tensor quantization) or any
positive integer. Typically a smaller group size leads
to better accuracy but slower speed.
Example: `"gptq/4/128"`.
source_name: The source dtype policy name, e.g. "float32".
"""
def __init__(
self,
mode,
source_name=None,
):
parts = mode.split("/")
expected_format = "'gptq/<weight_bits>/<group_size>'"
# Validate format
if len(parts) != 3 or parts[0] != "gptq":
raise ValueError(
"Invalid mode for GPTQDTypePolicy. Expected format "
f"{expected_format}, but got '{mode}'."
)
# Validate and cast weight_bits and group_size
try:
weight_bits = int(parts[1])
group_size = int(parts[2])
except ValueError:
raise ValueError(
"Invalid mode for GPTQDTypePolicy. <weight_bits> and "
"<group_size> must be integers. Expected format "
f"{expected_format}, but got '{mode}'."
)
# Validate supported values
if weight_bits not in [2, 3, 4, 8]:
raise ValueError(
"Invalid weight_bits in mode. Supported values are "
f"2, 3, 4, and 8, but got {weight_bits} from '{mode}'."
)
if group_size < -1 or group_size == 0:
raise ValueError(
"Invalid group_size in mode. Supported values are "
"-1 (whole-tensor) or a positive integer, "
f"but got {group_size} from '{mode}'."
)
base_mode = parts[0]
super().__init__(
mode=base_mode,
source_name=source_name,
)
self._name = f"{mode}_from_{source_name}"
self.mode = base_mode
self.weight_bits = weight_bits
self.group_size = group_size
def __eq__(self, other):
if super().__eq__(other) is False:
return False
return (
self.weight_bits == other.weight_bits
and self.group_size == other.group_size
)
def get_config(self):
config = super().get_config()
# Reconstruct the full mode string for serialization
mode = f"{self.mode}/{self.weight_bits}/{self.group_size}"
config.update({"mode": mode})
return config
@keras_export(
[
"keras.config.set_dtype_policy",
"keras.mixed_precision.set_dtype_policy", # Legacy
"keras.mixed_precision.set_global_policy", # Legacy
]
)
def set_dtype_policy(policy):
"""Sets the default dtype policy globally.
Example:
>>> keras.config.set_dtype_policy("mixed_float16")
"""
if not isinstance(policy, DTypePolicy):
if isinstance(policy, str):
if policy.startswith(QUANTIZATION_MODES):
policy = _get_quantized_dtype_policy_by_str(policy)
else:
policy = DTypePolicy(policy)
else:
raise ValueError(
"Invalid `policy` argument. "
"Expected the string name of a policy "
"(such as 'mixed_float16') or a `DTypePolicy` "
f"instance. Received: policy={policy} "
f"(of type {type(policy)})"
)
global_state.set_global_attribute("dtype_policy", policy)
@keras_export(
[
"keras.config.dtype_policy",
"keras.mixed_precision.dtype_policy", # Legacy
"keras.mixed_precision.global_policy", # Legacy
]
)
def dtype_policy():
"""Returns the current default dtype policy object."""
policy = global_state.get_global_attribute("dtype_policy", None)
if policy is None:
policy = DTypePolicy(backend.floatx())
set_dtype_policy(policy)
return policy
def _get_quantized_dtype_policy_by_str(policy):
if not isinstance(policy, str):
raise TypeError(f"`policy` must be a string. Received: policy={policy}")
if not policy.startswith(QUANTIZATION_MODES):
raise ValueError(
"`policy` is incompatible with the current supported quantization."
)
split_name = policy.split("_from_")
if len(split_name) != 2:
raise ValueError(
"Cannot convert `policy` into a valid pair (`mode`, `source_name`) "
"to instantiate `QuantizedDTypePolicy`. "
f"Received: policy={policy}"
)
mode, source_name = split_name
if policy.startswith("int8") or policy.startswith("int4"):
return QuantizedDTypePolicy(mode, source_name)
elif policy.startswith("gptq"):
return GPTQDTypePolicy(mode, source_name)
elif policy.startswith("float8"):
return QuantizedFloat8DTypePolicy(mode, source_name)
else:
raise NotImplementedError
| GPTQDTypePolicy |
python | celery__celery | t/unit/app/test_beat.py | {
"start": 892,
"end": 1124
} | class ____:
def test_beat_lazy_func(self):
def add(a, b):
return a + b
result = BeatLazyFunc(add, 1, 2)
assert add(1, 2) == result()
assert add(1, 2) == result.delay()
| test_BeatLazyFunc |
python | pytorch__pytorch | test/dynamo/test_recompile_ux.py | {
"start": 446,
"end": 11336
} | class ____(torch._dynamo.test_case.TestCase):
# TODO(whc) dynamo actually recompiles one more time than the cache limit
cache_limit = 1
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._exit_stack.enter_context(
torch._dynamo.config.patch("recompile_limit", cls.cache_limit)
)
def test_drop_cache_on_skip(self):
def model(x, i):
return x + i
attached = False
triggered = False
def trigger():
nonlocal triggered
triggered = True
def compiler(gm, input):
nonlocal attached
f = gm.forward
assert not attached
# NB: making this a weakref.ref causes the cycle to no
# longer be promptly GC'ed
weakref.finalize(f, trigger)
attached = True
return f
x = torch.randn(2)
for i in range(2):
opt_model = torch.compile(model, backend=compiler)
opt_model(x, i)
self.assertTrue(triggered)
def test_loop_torture(self):
def loop_torture(input, iters):
out = input
# randint itself causes one graph break
for _ in range(iters):
out += input
return out
compile_counter = torch._dynamo.testing.CompileCounter()
for _ in range(10):
x = torch.randn(3)
iters = torch.randint(low=0, high=1000, size=())
opt_loop_torture = torch.compile(loop_torture, backend=compile_counter)
opt_loop_torture(x, iters)
# Currently, we recompile each time,
# We'd probably like to bail out quickly and warn
# TODO(whc) these checks fail on py37. Why?
# self.assertEqual(counters["frames"]["total"], 2 + self.cache_limit)
# self.assertEqual(counters["frames"]["ok"], 1 + self.cache_limit)
# compile_counter only sees frames that were fed to the backend compiler,
# which is a subset of counters["frames"]["ok"] -- probably because
# counters["frames"]["ok"] includes frames not containing torch ops?
self.assertEqual(compile_counter.frame_count, self.cache_limit)
@torch._dynamo.config.patch("automatic_dynamic_shapes", False)
def test_dynamic_input(self):
def model(input):
return input + input
expected_recompiles = 2
compile_counter = torch._dynamo.testing.CompileCounter()
with torch._dynamo.config.patch("recompile_limit", expected_recompiles):
with self.assertLogs(logger="torch._dynamo", level="WARNING") as logs:
for _ in range(10):
bsz = torch.randint(low=0, high=1000, size=())
x = torch.randn((bsz, 3, 4))
opt_model = torch.compile(model, backend=compile_counter)
opt_model(x)
self.assertEqual(compile_counter.frame_count, expected_recompiles)
self.assertEqual(len(logs.records), 1)
print(logs.records[0])
self.assertTrue(
logs.records[0]
.getMessage()
.startswith("torch._dynamo hit config.recompile_limit")
)
@unittest.skipIf(
not torch.cuda.is_available() and not torch.xpu.is_available(),
"requires cuda or xpu",
)
def test_nvfuser_guards(self):
# we may want to model dynamo's guards sufficiently after nvfuser's ProfilingExecutor guards
# such that we ensure dynamo is in charge of all the recompilations at the top level,
# and we could thus simplify the underlying torchscript executor
def func(a, b, c):
return a + b * c
a = torch.rand(3, 4, 5, device=device_type)
b = torch.rand(3, 4, 5, device=device_type)
b_v = torch.rand(3, 5, 4, device=device_type).view(3, 4, 5)
b_p = torch.rand(3, 5, 4, device=device_type).permute(0, 2, 1)
c = torch.rand(3, 4, 5, device=device_type)
compile_counter = torch._dynamo.testing.CompileCounter()
with torch._dynamo.config.patch("recompile_limit", 2):
opt_func = torch.compile(func, backend=compile_counter)
opt_func(a, b, c) # warmup
self.assertEqual(compile_counter.frame_count, 1)
opt_func(a, b, c) # no guard fail or recompile
self.assertEqual(compile_counter.frame_count, 1)
opt_func(a, b_v, c) # a view should not cause nvfuser recompile
self.assertEqual(compile_counter.frame_count, 1)
opt_func(a, b_p, c) # a permutation should cause recompile
self.assertEqual(compile_counter.frame_count, 2)
def assert_single_log_contains(self, logs, contains_str):
self.assertEqual(len(logs.records), 1)
self.assertTrue(
logs.records[0].getMessage().find(contains_str) > 0,
msg=f'Expected to find "{contains_str}" in log "{logs.records[0].getMessage()}"',
)
def test_verbose_tensor_check(self):
def func(a):
# Warning: choose a function here whose meta implementation lives
# entirely in C++. If you do a Python one, Dynamo will dive into
# torch._refs which is OK but it will muddy up the warnings
return torch.add(a, 4)
def cache_fail_test(cached_input, missed_input, expected_failure):
# TODO(whc) maybe its hacky to have a 'test within a test' but this seemed convenient
torch._dynamo.reset()
torch._dynamo.utils.counters.clear()
opt_func = torch.compile(func, backend="eager")
# warmup
opt_func(cached_input)
with self.assertLogs(logger="torch._dynamo", level="WARNING") as logs:
opt_func = torch.compile(func, backend="eager")
opt_func(missed_input)
self.assert_single_log_contains(logs, expected_failure)
a = torch.rand(3, 4, 5)
cache_fail_test(
a,
a[0:2, :, :],
"tensor 'a' size mismatch at index 0. expected 3, actual 2",
)
cache_fail_test(
a,
a.clone().as_strided((3, 4, 5), stride=(1, 3, 12)),
"tensor 'a' stride mismatch at index 0. expected 20, actual 1",
)
cache_fail_test(a, a[0, :, :], "tensor 'a' rank mismatch. expected 3, actual 2")
cache_fail_test(a, a.to("meta"), "tensor 'a' dispatch key set mismatch.")
cache_fail_test(
a,
a.to(torch.float16),
"tensor 'a' dtype mismatch. expected Float, actual Half",
)
a_grad = a.clone()
a_grad.requires_grad = True
cache_fail_test(
a,
a_grad,
"tensor 'a' requires_grad mismatch. expected requires_grad=0",
)
def test_mismatched_type(self):
a = torch.rand(3, 4, 5)
b = torch.rand(3, 4, 5)
def func(a, b):
return a + b
opt_func = torch.compile(func, backend="eager")
# warmup
opt_func(a, b)
with self.assertLogs(logger="torch._dynamo", level="WARNING") as logs:
opt_func = torch.compile(func, backend="eager")
opt_func(a, 1)
self.assert_single_log_contains(
logs,
"expected type of 'b' to be a tensor type, ' but found <class 'int'>",
)
@torch._dynamo.config.patch(recompile_limit=1, fail_on_recompile_limit_hit=True)
def test_fail_on_recompile_limit_hit(self):
@torch.compile(backend="eager")
def func(b, a):
if a:
return b * 2
else:
return b + 1
func(torch.randn(5), True)
with self.assertRaises(FailOnRecompileLimitHit):
func(torch.randn(5), False)
@torch._dynamo.config.patch("recompile_limit", 32)
def test_multiple_guard_fails(self):
failure_reasons = []
def guard_fail_fn(failure):
failure_reasons.append(failure[0])
def f(x):
return torch.relu(x)
opt_f = torch._dynamo.optimize(
backend="eager", guard_fail_fn=guard_fail_fn, dynamic=False
)(f)
for i in range(5):
failure_reasons.clear()
opt_f(torch.randn(8 + i))
failure_str = "\n".join(failure_reasons)
for line in [
"tensor 'x' size mismatch at index 0. expected 11, actual 12",
"tensor 'x' size mismatch at index 0. expected 10, actual 12",
"tensor 'x' size mismatch at index 0. expected 9, actual 12",
"tensor 'x' size mismatch at index 0. expected 8, actual 12",
]:
self.assertIn(
line,
failure_str,
)
@torch._dynamo.config.patch("recompile_limit", 32)
def test_multiple_guard_fails_report_all(self):
with log_settings(kwargs_to_settings(recompiles_verbose=True)):
failure_reasons = []
def guard_fail_fn(failure):
failure_reasons.append(failure[0])
def f(x):
return torch.ones(len(x), x[-1])
opt_f = torch._dynamo.optimize(
backend="eager", guard_fail_fn=guard_fail_fn, dynamic=False
)(f)
opt_f([4, 5, 6])
def filter_reasons():
return "\n".join(
[
line
for line in "\n".join(failure_reasons).splitlines()
if not line.startswith("___check_type_id")
]
)
failure_reasons.clear()
opt_f([7, 8])
for line in ["len(x) == 3"]:
self.assertIn(line, filter_reasons())
failure_reasons.clear()
opt_f([9])
for line in ["len(x) == 2", "len(x) == 3"]:
self.assertIn(line, filter_reasons())
@torch._dynamo.config.patch(recompile_limit=1)
def test_recompile_child_run_only(self):
def f(x, n):
if torch.compiler.is_compiling():
x = x + 1
x = g(x)
return h(x) + n
def g(x):
if torch.compiler.is_compiling():
return x + 2
return x
def h(x):
if torch.compiler.is_compiling():
return x + 4
return x
torch.compile(g, backend="eager")(torch.randn(3))
inp = torch.randn(3)
opt_f = torch.compile(f, backend="eager")
opt_f(inp, 0)
# expect f to run eager, g compiled (from previous invocatino), h eager
res = opt_f(inp, 1)
self.assertEqual(res, inp + 3)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| RecompileUxTests |
python | apache__airflow | providers/google/tests/unit/google/cloud/links/test_dataplex.py | {
"start": 11989,
"end": 13103
} | class ____:
@pytest.mark.db_test
def test_get_link(self, create_task_instance_of_operator, session, mock_supervisor_comms):
expected_url = EXPECTED_DATAPLEX_CATALOG_ASPECT_TYPE_LINK
link = DataplexCatalogAspectTypeLink()
ti = create_task_instance_of_operator(
DataplexCatalogGetAspectTypeOperator,
dag_id="test_link_dag",
task_id="test_link_task",
location=TEST_LOCATION,
aspect_type_id=TEST_ASPECT_TYPE_ID,
project_id=TEST_PROJECT_ID,
)
session.add(ti)
session.commit()
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key="key",
value={
"aspect_type_id": ti.task.aspect_type_id,
"location": ti.task.location,
"project_id": ti.task.project_id,
},
)
actual_url = link.get_link(operator=ti.task, ti_key=ti.key)
assert actual_url == expected_url
| TestDataplexCatalogAspectTypeLink |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_device_class_configuration.py | {
"start": 383,
"end": 3547
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'opaque': 'V1beta1OpaqueDeviceConfiguration'
}
attribute_map = {
'opaque': 'opaque'
}
def __init__(self, opaque=None, local_vars_configuration=None): # noqa: E501
"""V1beta1DeviceClassConfiguration - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._opaque = None
self.discriminator = None
if opaque is not None:
self.opaque = opaque
@property
def opaque(self):
"""Gets the opaque of this V1beta1DeviceClassConfiguration. # noqa: E501
:return: The opaque of this V1beta1DeviceClassConfiguration. # noqa: E501
:rtype: V1beta1OpaqueDeviceConfiguration
"""
return self._opaque
@opaque.setter
def opaque(self, opaque):
"""Sets the opaque of this V1beta1DeviceClassConfiguration.
:param opaque: The opaque of this V1beta1DeviceClassConfiguration. # noqa: E501
:type: V1beta1OpaqueDeviceConfiguration
"""
self._opaque = opaque
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1DeviceClassConfiguration):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1DeviceClassConfiguration):
return True
return self.to_dict() != other.to_dict()
| V1beta1DeviceClassConfiguration |
python | huggingface__transformers | tests/models/perception_lm/test_video_processing_perception_lm.py | {
"start": 3274,
"end": 4957
} | class ____(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = PerceptionLMVideoProcessor if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.video_processor_tester = PerceptionLMVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_properties(self):
video_processing = self.fast_video_processing_class(**self.video_processor_dict)
self.assertTrue(hasattr(video_processing, "do_resize"))
self.assertTrue(hasattr(video_processing, "size"))
self.assertTrue(hasattr(video_processing, "do_center_crop"))
self.assertTrue(hasattr(video_processing, "center_crop"))
self.assertTrue(hasattr(video_processing, "do_normalize"))
self.assertTrue(hasattr(video_processing, "image_mean"))
self.assertTrue(hasattr(video_processing, "image_std"))
self.assertTrue(hasattr(video_processing, "do_convert_rgb"))
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"height": 20, "width": 20})
self.assertEqual(video_processor.crop_size, {"height": 18, "width": 18})
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict, size=42, crop_size=84)
self.assertEqual(video_processor.size, {"height": 42, "width": 42})
self.assertEqual(video_processor.crop_size, {"height": 84, "width": 84})
| PerceptionLMVideoProcessingTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType5.py | {
"start": 677,
"end": 1039
} | class ____(Generic[_KT, _VT]):
@classmethod
def method1(cls, i: Iterable[_T], v: _S) -> "A[_T, _S]": ...
def func1(__x: A[int, _X] | A[str, _X] | A[str | int, _X]) -> A[int, _X]: ...
v3 = func1(A.method1("a", "b"))
reveal_type(v3, expected_text="A[int, str]")
v4 = str.maketrans(dict.fromkeys("a", "b"))
reveal_type(v4, expected_text="dict[int, str]")
| A |
python | joke2k__faker | faker/providers/automotive/en_CA/__init__.py | {
"start": 48,
"end": 919
} | class ____(AutomotiveProvider):
"""Implement automotive provider for ``en_CA`` locale.
Sources:
- https://www.revolvy.com/main/index.php?s=Canadian%20licence%20plate%20designs%20and%20serial%20formats
"""
license_formats = (
# Alberta
"???-####",
# BC
"??# ##?",
"?? ####",
# Manitoba
"??? ###",
# New Brunswick
"??? ###",
# Newfoundland and Labrador
"??? ###",
# NWT
"######",
# Nova Scotia
"??? ###",
# Nunavut
"### ###",
# Ontario
"### ???",
"???? ###",
"??# ###",
"### #??",
"?? ####",
"GV??-###",
# PEI
"## ##??",
# Quebec
"?## ???",
# Saskatchewan
"### ???",
# Yukon
"???##",
)
| Provider |
python | openai__openai-python | src/openai/resources/fine_tuning/jobs/jobs.py | {
"start": 36093,
"end": 37033
} | class ____:
def __init__(self, jobs: AsyncJobs) -> None:
self._jobs = jobs
self.create = async_to_streamed_response_wrapper(
jobs.create,
)
self.retrieve = async_to_streamed_response_wrapper(
jobs.retrieve,
)
self.list = async_to_streamed_response_wrapper(
jobs.list,
)
self.cancel = async_to_streamed_response_wrapper(
jobs.cancel,
)
self.list_events = async_to_streamed_response_wrapper(
jobs.list_events,
)
self.pause = async_to_streamed_response_wrapper(
jobs.pause,
)
self.resume = async_to_streamed_response_wrapper(
jobs.resume,
)
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithStreamingResponse:
return AsyncCheckpointsWithStreamingResponse(self._jobs.checkpoints)
| AsyncJobsWithStreamingResponse |
python | cython__cython | Cython/Plex/Regexps.py | {
"start": 6789,
"end": 7411
} | class ____(RE):
"""
SpecialSymbol(sym) is an RE which matches the special input
symbol |sym|, which is one of BOL, EOL or EOF.
"""
nullable = 0
match_nl = 0
sym = None
def __init__(self, sym):
self.sym = sym
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
# Sequences 'bol bol' and 'bol eof' are impossible, so only need
# to allow for bol if sym is eol
if match_bol and self.sym == EOL:
initial_state = self.build_opt(m, initial_state, BOL)
initial_state.add_transition(self.sym, final_state)
| SpecialSymbol |
python | walkccc__LeetCode | solutions/2241. Design an ATM Machine/2241.py | {
"start": 0,
"end": 555
} | class ____:
def __init__(self):
self.banknotes = [20, 50, 100, 200, 500]
self.bank = [0] * 5
def deposit(self, banknotesCount: list[int]) -> None:
for i in range(5):
self.bank[i] += banknotesCount[i]
def withdraw(self, amount: int) -> list[int]:
withdrew = [0] * 5
for i in reversed(range(5)):
withdrew[i] = min(self.bank[i], amount // self.banknotes[i])
amount -= withdrew[i] * self.banknotes[i]
if amount:
return [-1]
for i in range(5):
self.bank[i] -= withdrew[i]
return withdrew
| ATM |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_block_types.py | {
"start": 1049,
"end": 5031
} | class ____:
async def test_create_block_type(self, client):
response = await client.post(
"/block_types/",
json=BlockTypeCreate(
name="x",
slug="x",
logo_url="http://example.com/logo.png",
documentation_url="http://example.com/docs.html",
description="A block, verily",
code_example=CODE_EXAMPLE,
).model_dump(),
)
assert response.status_code == status.HTTP_201_CREATED
result = BlockType.model_validate(response.json())
assert result.name == "x"
assert result.slug == "x"
assert result.logo_url == "http://example.com/logo.png"
assert result.documentation_url == "http://example.com/docs.html"
assert result.description == "A block, verily"
assert result.code_example == CODE_EXAMPLE
response = await client.get(f"/block_types/{result.id}")
api_block_type = BlockType.model_validate(response.json())
assert api_block_type.name == "x"
assert api_block_type.slug == "x"
assert api_block_type.logo_url == "http://example.com/logo.png"
assert api_block_type.documentation_url == "http://example.com/docs.html"
assert api_block_type.description == "A block, verily"
assert api_block_type.code_example == CODE_EXAMPLE
async def test_create_block_type_with_existing_slug(self, client):
response = await client.post(
"/block_types/",
json=BlockTypeCreate(
name="x",
slug="x",
logo_url="http://example.com/logo.png",
documentation_url="http://example.com/docs.html",
).model_dump(),
)
assert response.status_code == status.HTTP_201_CREATED
response = await client.post(
"/block_types/",
json=BlockTypeCreate(
name="x",
slug="x",
logo_url="http://example.com/logo.png",
documentation_url="http://example.com/docs.html",
).model_dump(),
)
assert response.status_code == status.HTTP_409_CONFLICT
@pytest.mark.parametrize(
"name",
[
"my block type",
"my:block type",
r"my\block type",
"my👍block type",
"my|block type",
],
)
async def test_create_block_type_with_nonstandard_characters(self, client, name):
response = await client.post(
"/block_types/",
json=dict(name=name, slug=slugify(name)),
)
assert response.status_code == status.HTTP_201_CREATED
@pytest.mark.parametrize(
"name",
[
"my%block_type",
"my/block type",
],
)
async def test_create_block_type_with_invalid_characters(self, client, name):
response = await client.post(
"/block_types/",
json=dict(name=name, slug=slugify(name)),
)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.parametrize(
"name", ["PrefectBlockType", "Prefect", "prefect_block_type", "pReFeCt!"]
)
async def test_create_block_type_with_reserved_name_fails(self, client, name):
response = await client.post(
"/block_types/",
json=dict(name=name, slug=slugify(name)),
)
assert response.status_code == status.HTTP_403_FORBIDDEN
assert (
response.json()["detail"]
== "Block type names beginning with 'Prefect' are reserved."
)
async def test_create_block_type_with_invalid_slug_fails(self, client):
response = await client.post(
"/block_types/",
json=dict(name="bad slug", slug="bad slug"),
)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
| TestCreateBlockType |
python | scipy__scipy | scipy/stats/tests/test_odds_ratio.py | {
"start": 250,
"end": 6727
} | class ____:
@pytest.mark.parametrize('parameters, rresult', data)
def test_results_from_r(self, parameters, rresult):
alternative = parameters.alternative.replace('.', '-')
result = odds_ratio(parameters.table)
# The results computed by R are not very accurate.
if result.statistic < 400:
or_rtol = 5e-4
ci_rtol = 2e-2
else:
or_rtol = 5e-2
ci_rtol = 1e-1
assert_allclose(result.statistic,
rresult.conditional_odds_ratio, rtol=or_rtol)
ci = result.confidence_interval(parameters.confidence_level,
alternative)
assert_allclose((ci.low, ci.high), rresult.conditional_odds_ratio_ci,
rtol=ci_rtol)
# Also do a self-check for the conditional odds ratio.
# With the computed conditional odds ratio as the noncentrality
# parameter of the noncentral hypergeometric distribution with
# parameters table.sum(), table[0].sum(), and table[:,0].sum() as
# total, ngood and nsample, respectively, the mean of the distribution
# should equal table[0, 0].
cor = result.statistic
table = np.array(parameters.table)
total = table.sum()
ngood = table[0].sum()
nsample = table[:, 0].sum()
# nchypergeom_fisher does not allow the edge cases where the
# noncentrality parameter is 0 or inf, so handle those values
# separately here.
if cor == 0:
nchg_mean = hypergeom.support(total, ngood, nsample)[0]
elif cor == np.inf:
nchg_mean = hypergeom.support(total, ngood, nsample)[1]
else:
nchg_mean = nchypergeom_fisher.mean(total, ngood, nsample, cor)
assert_allclose(nchg_mean, table[0, 0], rtol=1e-13)
# Check that the confidence interval is correct.
alpha = 1 - parameters.confidence_level
if alternative == 'two-sided':
if ci.low > 0:
sf = nchypergeom_fisher.sf(table[0, 0] - 1,
total, ngood, nsample, ci.low)
assert_allclose(sf, alpha/2, rtol=1e-11)
if np.isfinite(ci.high):
cdf = nchypergeom_fisher.cdf(table[0, 0],
total, ngood, nsample, ci.high)
assert_allclose(cdf, alpha/2, rtol=1e-11)
elif alternative == 'less':
if np.isfinite(ci.high):
cdf = nchypergeom_fisher.cdf(table[0, 0],
total, ngood, nsample, ci.high)
assert_allclose(cdf, alpha, rtol=1e-11)
else:
# alternative == 'greater'
if ci.low > 0:
sf = nchypergeom_fisher.sf(table[0, 0] - 1,
total, ngood, nsample, ci.low)
assert_allclose(sf, alpha, rtol=1e-11)
@pytest.mark.parametrize('table', [
[[0, 0], [5, 10]],
[[5, 10], [0, 0]],
[[0, 5], [0, 10]],
[[5, 0], [10, 0]],
])
def test_row_or_col_zero(self, table):
result = odds_ratio(table)
assert_equal(result.statistic, np.nan)
ci = result.confidence_interval()
assert_equal((ci.low, ci.high), (0, np.inf))
@pytest.mark.parametrize("case",
[[0.95, 'two-sided', 0.4879913, 2.635883],
[0.90, 'two-sided', 0.5588516, 2.301663]])
def test_sample_odds_ratio_ci(self, case):
# Compare the sample odds ratio confidence interval to the R function
# oddsratio.wald from the epitools package, e.g.
# > library(epitools)
# > table = matrix(c(10, 20, 41, 93), nrow=2, ncol=2, byrow=TRUE)
# > result = oddsratio.wald(table)
# > result$measure
# odds ratio with 95% C.I.
# Predictor estimate lower upper
# Exposed1 1.000000 NA NA
# Exposed2 1.134146 0.4879913 2.635883
confidence_level, alternative, ref_low, ref_high = case
table = [[10, 20], [41, 93]]
result = odds_ratio(table, kind='sample')
assert_allclose(result.statistic, 1.134146, rtol=1e-6)
ci = result.confidence_interval(confidence_level, alternative)
assert_allclose([ci.low, ci.high], [ref_low, ref_high], rtol=1e-6)
@pytest.mark.slow
@pytest.mark.parametrize('alternative', ['less', 'greater', 'two-sided'])
def test_sample_odds_ratio_one_sided_ci(self, alternative):
# can't find a good reference for one-sided CI, so bump up the sample
# size and compare against the conditional odds ratio CI
table = [[1000, 2000], [4100, 9300]]
res = odds_ratio(table, kind='sample')
ref = odds_ratio(table, kind='conditional')
assert_allclose(res.statistic, ref.statistic, atol=1e-5)
assert_allclose(res.confidence_interval(alternative=alternative),
ref.confidence_interval(alternative=alternative),
atol=2e-3)
@pytest.mark.parametrize('kind', ['sample', 'conditional'])
@pytest.mark.parametrize('bad_table', [123, "foo", [10, 11, 12]])
def test_invalid_table_shape(self, kind, bad_table):
with pytest.raises(ValueError, match="Invalid shape"):
odds_ratio(bad_table, kind=kind)
def test_invalid_table_type(self):
with pytest.raises(ValueError, match='must be an array of integers'):
odds_ratio([[1.0, 3.4], [5.0, 9.9]])
def test_negative_table_values(self):
with pytest.raises(ValueError, match='must be nonnegative'):
odds_ratio([[1, 2], [3, -4]])
def test_invalid_kind(self):
with pytest.raises(ValueError, match='`kind` must be'):
odds_ratio([[10, 20], [30, 14]], kind='magnetoreluctance')
def test_invalid_alternative(self):
result = odds_ratio([[5, 10], [2, 32]])
with pytest.raises(ValueError, match='`alternative` must be'):
result.confidence_interval(alternative='depleneration')
@pytest.mark.parametrize('level', [-0.5, 1.5])
def test_invalid_confidence_level(self, level):
result = odds_ratio([[5, 10], [2, 32]])
with pytest.raises(ValueError, match='must be between 0 and 1'):
result.confidence_interval(confidence_level=level)
| TestOddsRatio |
python | apache__airflow | providers/ydb/tests/unit/ydb/hooks/test_ydb.py | {
"start": 1268,
"end": 1332
} | class ____:
def wait(*args, **kwargs):
pass
| FakeDriver |
python | numba__llvmlite | llvmlite/binding/ffi.py | {
"start": 4029,
"end": 6664
} | class ____(object):
"""Wrap libllvmlite with a lock such that only one thread may access it at
a time.
This class duck-types a CDLL.
"""
__slots__ = ['_lib_handle', '_fntab', '_lock']
def __init__(self):
self._lib_handle = None
self._fntab = {}
self._lock = _LLVMLock()
def _load_lib(self):
test_sym = "LLVMPY_GetVersionInfo"
mod_name = __name__.rpartition(".")[0]
lib_name = get_library_name()
with _suppress_cleanup_errors(_importlib_resources_path(
mod_name, lib_name)) as lib_path:
try:
self._lib_handle = ctypes.CDLL(str(lib_path))
# Check that we can look up expected symbols.
getattr(self._lib_handle, test_sym)()
except OSError:
# OSError may be raised if the file cannot be opened, or is not
# a shared library.
msg = (f"Could not find/load shared object file '{lib_name}' "
f"from resource location: '{mod_name}'. This could mean "
"that the library literally cannot be found, but may "
"also mean that the permissions are incorrect or that a "
"dependency of/a symbol in the library could not be "
"resolved.")
raise OSError(msg)
except AttributeError:
# AttributeError is raised if the test_sym symbol does not
# exist.
msg = ("During testing of symbol lookup, the symbol "
f"'{test_sym}' could not be found in the library "
f"'{lib_path}'")
raise OSError(msg)
@property
def _lib(self):
# Not threadsafe.
if not self._lib_handle:
self._load_lib()
return self._lib_handle
def __getattr__(self, name):
try:
return self._fntab[name]
except KeyError:
pass
# Lazily wraps new functions as they are requested
cfn = getattr(self._lib, name)
wrapped = _lib_fn_wrapper(self._lock, cfn)
self._fntab[name] = wrapped
return wrapped
@property
def _name(self):
"""The name of the library passed in the CDLL constructor.
For duck-typing a ctypes.CDLL
"""
return self._lib._name
@property
def _handle(self):
"""The system handle used to access the library.
For duck-typing a ctypes.CDLL
"""
return self._lib._handle
| _lib_wrapper |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/partialmethod.py | {
"start": 38,
"end": 421
} | class ____:
"""An example for partialmethod.
refs: https://docs.python.org/3/library/functools.html#functools.partialmethod
"""
def set_state(self, state):
"""Update state of cell to *state*."""
#: Make a cell alive.
set_alive = partialmethod(set_state, True)
# a partialmethod with no docstring
set_dead = partialmethod(set_state, False)
| Cell |
python | tensorflow__tensorflow | tensorflow/compiler/tests/searchsorted_op_test.py | {
"start": 901,
"end": 2836
} | class ____(xla_test.XLATestCase):
def test1D(self):
# Test against NumPy implementation (which is 1D only).
np.random.seed(1)
for side in ['left', 'right']:
for dtype in [np.float32, np.int32]:
values = np.random.uniform(
low=-1000, high=1000, size=(10,)).astype(dtype)
unsorted = np.random.uniform(
low=-1000, high=1000, size=(20,)).astype(dtype)
sorted_sequence = np.sort(unsorted)
np_ans = np.searchsorted(sorted_sequence, values, side=side)
with self.session() as session:
with self.test_scope():
tf_ans = array_ops.searchsorted(sorted_sequence, values, side=side)
tf_out = session.run(tf_ans)
self.assertAllEqual(np_ans, tf_out)
def _test2DExample(self, dtype, side, sorted_sequence, values, correct_ans):
with self.session() as session:
with self.test_scope():
tf_ans = array_ops.searchsorted(sorted_sequence, values, side=side)
tf_out = session.run(tf_ans)
self.assertAllEqual(correct_ans, tf_out)
def testLowerBound2DExample(self):
# 2D TensorFlow documentation example.
for dtype in self.float_types | self.int_types:
sorted_sequence = np.array([[0, 3, 9, 9, 10], [1, 2, 3, 4, 5]], dtype)
values = np.array([[2, 4, 9], [0, 2, 6]], dtype)
correct_ans = np.array([[1, 2, 2], [0, 1, 5]], dtype)
self._test2DExample(dtype, 'left', sorted_sequence, values, correct_ans)
def testUpperBound2DExample(self):
# 2D TensorFlow documentation example.
for dtype in self.float_types | self.int_types:
sorted_sequence = np.array([[0, 3, 9, 9, 10], [1, 2, 3, 4, 5]], dtype)
values = np.array([[2, 4, 9], [0, 2, 6]], dtype)
correct_ans = np.array([[1, 2, 4], [0, 2, 5]], dtype)
self._test2DExample(dtype, 'right', sorted_sequence, values, correct_ans)
if __name__ == '__main__':
test.main()
| SearchSorteddOpTest |
python | kamyu104__LeetCode-Solutions | Python/create-target-array-in-the-given-order.py | {
"start": 536,
"end": 842
} | class ____(object):
def createTargetArray(self, nums, index):
"""
:type nums: List[int]
:type index: List[int]
:rtype: List[int]
"""
result = []
for i, x in itertools.izip(index, nums):
result.insert(i, x)
return result
| Solution2 |
python | TheAlgorithms__Python | graphs/minimum_spanning_tree_kruskal2.py | {
"start": 1395,
"end": 4059
} | class ____[T]:
def __init__(self) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
self.connections: dict[T, dict[T, int]] = {}
def add_node(self, node: T) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
self.connections[node] = {}
def add_edge(self, node1: T, node2: T, weight: int) -> None:
# add an edge with the given weight
self.add_node(node1)
self.add_node(node2)
self.connections[node1][node2] = weight
self.connections[node2][node1] = weight
def kruskal(self) -> GraphUndirectedWeighted[T]:
# Kruskal's Algorithm to generate a Minimum Spanning Tree (MST) of a graph
"""
Details: https://en.wikipedia.org/wiki/Kruskal%27s_algorithm
Example:
>>> g1 = GraphUndirectedWeighted[int]()
>>> g1.add_edge(1, 2, 1)
>>> g1.add_edge(2, 3, 2)
>>> g1.add_edge(3, 4, 1)
>>> g1.add_edge(3, 5, 100) # Removed in MST
>>> g1.add_edge(4, 5, 5)
>>> assert 5 in g1.connections[3]
>>> mst = g1.kruskal()
>>> assert 5 not in mst.connections[3]
>>> g2 = GraphUndirectedWeighted[str]()
>>> g2.add_edge('A', 'B', 1)
>>> g2.add_edge('B', 'C', 2)
>>> g2.add_edge('C', 'D', 1)
>>> g2.add_edge('C', 'E', 100) # Removed in MST
>>> g2.add_edge('D', 'E', 5)
>>> assert 'E' in g2.connections["C"]
>>> mst = g2.kruskal()
>>> assert 'E' not in mst.connections['C']
"""
# getting the edges in ascending order of weights
edges = []
seen = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start))
edges.append((start, end, self.connections[start][end]))
edges.sort(key=lambda x: x[2])
# creating the disjoint set
disjoint_set = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(node)
# MST generation
num_edges = 0
index = 0
graph = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections) - 1:
u, v, w = edges[index]
index += 1
parent_u = disjoint_set.find_set(u)
parent_v = disjoint_set.find_set(v)
if parent_u != parent_v:
num_edges += 1
graph.add_edge(u, v, w)
disjoint_set.union(u, v)
return graph
| GraphUndirectedWeighted |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/never2.py | {
"start": 366,
"end": 522
} | class ____(Generic[T]):
pass
def func2(x: U) -> ClassB[U]:
# This should generate an error because T is invariant.
return ClassB[Never]()
| ClassB |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_measurements_meta.py | {
"start": 249,
"end": 5190
} | class ____(MetricsEnhancedPerformanceTestCase):
endpoint = "sentry-api-0-organization-measurements-meta"
METRIC_STRINGS = [
"d:transactions/measurements.something_custom@millisecond",
]
features = {"organizations:discover-basic": True}
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
self.url = reverse(
self.endpoint, kwargs={"organization_id_or_slug": self.project.organization.slug}
)
self.features = {"organizations:performance-use-metrics": True}
def test_simple(self) -> None:
self.store_transaction_metric(
1,
metric="measurements.something_custom",
internal_metric="d:transactions/measurements.something_custom@millisecond",
entity="metrics_distributions",
timestamp=self.day_ago + timedelta(hours=1, minutes=0),
)
response = self.do_request(
{
"project": self.project.id,
"statsPeriod": "14d",
}
)
assert response.status_code == 200, response.content
assert response.data == {
"measurements.something_custom": {
"functions": [
"apdex",
"avg",
"p50",
"p75",
"p90",
"p95",
"p99",
"p100",
"max",
"min",
"sum",
"percentile",
"http_error_count",
"http_error_rate",
],
"unit": "millisecond",
}
}
def test_measurements_with_numbers_in_name(self) -> None:
self.store_transaction_metric(
1,
metric="measurements.something_custom",
internal_metric="d:transactions/measurements.1234567890.abcdef@millisecond",
entity="metrics_distributions",
timestamp=self.day_ago + timedelta(hours=1, minutes=0),
)
response = self.do_request(
{
"project": self.project.id,
"statsPeriod": "14d",
}
)
assert response.status_code == 200, response.content
assert response.data == {
"measurements.1234567890.abcdef": {
"functions": [
"apdex",
"avg",
"p50",
"p75",
"p90",
"p95",
"p99",
"p100",
"max",
"min",
"sum",
"percentile",
"http_error_count",
"http_error_rate",
],
"unit": "millisecond",
}
}
def test_measurements_with_lots_of_periods(self) -> None:
self.store_transaction_metric(
1,
metric="measurements.something_custom",
internal_metric="d:transactions/measurements.a.b.c.d.e.f.g@millisecond",
entity="metrics_distributions",
timestamp=self.day_ago + timedelta(hours=1, minutes=0),
)
response = self.do_request(
{
"project": self.project.id,
"statsPeriod": "14d",
}
)
assert response.status_code == 200, response.content
assert response.data == {
"measurements.a.b.c.d.e.f.g": {
"functions": [
"apdex",
"avg",
"p50",
"p75",
"p90",
"p95",
"p99",
"p100",
"max",
"min",
"sum",
"percentile",
"http_error_count",
"http_error_rate",
],
"unit": "millisecond",
}
}
def test_metric_outside_query_daterange(self) -> None:
self.store_transaction_metric(
1,
metric="measurements.something_custom",
internal_metric="d:transactions/measurements.something_custom@millisecond",
entity="metrics_distributions",
timestamp=self.day_ago - timedelta(days=15, minutes=0),
)
response = self.do_request(
{
"project": self.project.id,
"statsPeriod": "14d",
}
)
assert response.status_code == 200, response.content
assert response.data == {}
| OrganizationMeasurementsMetaEndpoint |
python | readthedocs__readthedocs.org | readthedocs/search/api/v2/views.py | {
"start": 6942,
"end": 7008
} | class ____(PageSearchAPIView):
pass
| BaseProxiedPageSearchAPIView |
python | pytorch__pytorch | torch/_inductor/standalone_compile.py | {
"start": 9864,
"end": 16664
} | class ____(CompiledArtifact):
"""
Similar to CompiledArtifact, but the object is a single, bundled precompiled function.
This object is always a serializable callable function.
This object is essentially a wrapper for BundledAOTAutogradSerializableCallable, which
is used by torch._dynamo.aot_compile for AOT Precompilation.
"""
AOT_HEADER = bytes("AOTCompiledArtifact", "utf-8")
def __init__(
self,
compiled_fn: Callable[..., Any],
):
self.inner_fn = BundledAOTAutogradSerializableCallable(compiled_fn)
self._artifacts = (
None # We don't need artifacts, the inner object handles everything
)
@staticmethod
def from_bundled_callable(
bundled_fn: BundledAOTAutogradSerializableCallable,
) -> AOTCompiledArtifact:
return AOTCompiledArtifact(bundled_fn.compiled_fn)
def __call__(self, *args: Any) -> Any:
return self.inner_fn(*args)
def save(
self, *, path: str, format: Literal["binary", "unpacked"] = "binary"
) -> None:
if format == "unpacked":
raise RuntimeError(
"AOTCompiledArtifact does not support unpacked format yet"
)
result_bytes = self.serialize()
from torch.utils._appending_byte_serializer import BytesWriter
from .codecache import torch_key
writer = BytesWriter()
writer.write_bytes(AOTCompiledArtifact.AOT_HEADER)
writer.write_bytes(torch_key())
writer.write_bytes(result_bytes)
from torch._inductor.codecache import write_atomic
# Save a sentinel file to indicate that this is AOT
write_atomic(path, writer.to_bytes())
def serialize(self) -> bytes:
return BundledAOTAutogradSerializableCallable.serialize_compile_artifacts(
self.inner_fn
)
@staticmethod
def deserialize(result_bytes: bytes) -> AOTCompiledArtifact:
deserialized = (
BundledAOTAutogradSerializableCallable.deserialize_compile_artifacts(
result_bytes
)
)
assert isinstance(deserialized, BundledAOTAutogradSerializableCallable)
return AOTCompiledArtifact.from_bundled_callable(deserialized)
@staticmethod
def load(
*, path: str, format: Literal["binary", "unpacked"] = "binary"
) -> CompiledArtifact:
if format == "unpacked":
raise RuntimeError(
"AOTCompiledArtifact does not support unpacked format yet"
)
with open(path, "rb") as file:
from torch.utils._appending_byte_serializer import BytesReader
from .codecache import torch_key
result_bytes = file.read()
reader = BytesReader(result_bytes)
header = reader.read_bytes()
assert header == AOTCompiledArtifact.AOT_HEADER
assert reader.read_bytes() == torch_key()
artifact = reader.read_bytes()
assert reader.is_finished()
return AOTCompiledArtifact.deserialize(artifact)
def standalone_compile(
gm: GraphModule,
example_inputs: Sequence[InputType],
*,
dynamic_shapes: Any,
options: Any,
aot: bool = False, # AOT mode, which uses BundledAOTAutogradCache
) -> CompiledArtifact:
"""
Implementation of torch.inductor.standalone_compile
"""
from torch.compiler._cache import CacheArtifactManager
from .compile_fx import compile_fx
ignore_shape_env = False
if dynamic_shapes == "from_example_inputs":
fake_mode = FakeTensorMode(shape_env=ShapeEnv())
# tells compile_fx to ignore the shape_envs on the ambient context
# and the graph_module.
ignore_shape_env = True
elif dynamic_shapes == "from_tracing_context":
# Reuse fake_mode from the TracingContext.
# NB: The TracingContext only exists if we're currently in a torch.compile backend.
context = torch._guards.TracingContext.get()
assert context.fake_mode is not None
fake_mode = context.fake_mode
elif dynamic_shapes == "from_graph":
fake_mode = FakeTensorMode(shape_env=ShapeEnv())
# Strategy: find a FakeTensor in the graph output, grab its FakeTensorMode.
# The graph passed to standalone_compile must be an Inductor-approved graph,
# which means that there is at least one Tensor output and the output node
# contains a flat list of Tensors.
last_node = next(iter(reversed(gm.graph.nodes)))
assert last_node.op == "output"
assert len(last_node.args) == 1
def handle_node(node: torch.fx.Node) -> None:
nonlocal fake_mode
if "example_value" in node.meta:
maybe_tensor = node.meta["example_value"]
if isinstance(maybe_tensor, torch._subclasses.fake_tensor.FakeTensor):
fake_mode = maybe_tensor.fake_mode
# If gm came from Dynamo, then last_node.args[0] is always a list,
# even in single-Tensor returns.
#
# It's possible to get into a situation where last_node.args[0]
# is a Node (and not a list!). This happens if you call split_module
# on the graph. We allow for this case since it is common.
if isinstance(last_node.args[0], torch.fx.Node):
handle_node(last_node.args[0])
else:
for node in last_node.args[0]:
handle_node(node)
else:
raise ValueError(
f"standalone_compile got unsupported `dynamic_shapes` value: dynamic_shapes={dynamic_shapes}."
)
context = torch._guards.TracingContext(fake_mode)
with (
torch._guards.tracing(context),
CacheArtifactManager.with_fresh_cache(),
config.patch("triton.autotune_at_compile_time", True),
torch._functorch.config.patch("bundled_autograd_cache", aot),
):
# compile_fx can mutate gm
gm = copy.deepcopy(gm)
compiled_fn = compile_fx(
gm, example_inputs, ignore_shape_env=ignore_shape_env, **options
)
assert callable(compiled_fn)
if aot:
if not hasattr(compiled_fn, "serialize"):
raise RuntimeError(
"Compiled function should have serialize method when aot=True"
)
return AOTCompiledArtifact(compiled_fn)
artifacts = torch.compiler.save_cache_artifacts()
if artifacts is None:
log.warning(
"standalone_compile artifact generation failed, cannot save. "
"Run with TORCH_LOGS=+torch._inductor.codecache to identify the problem"
)
return CacheCompiledArtifact(compiled_fn, artifacts)
| AOTCompiledArtifact |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 59109,
"end": 59313
} | class ____(themeable):
"""
Layout items in the legend
Parameters
----------
theme_element : Literal["vertical", "horizontal"]
Vertically or horizontally
"""
| legend_direction |
python | kamyu104__LeetCode-Solutions | Python/squares-of-a-sorted-array.py | {
"start": 45,
"end": 576
} | class ____(object):
def sortedSquares(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
right = bisect.bisect_left(A, 0)
left = right-1
result = []
while 0 <= left or right < len(A):
if right == len(A) or \
(0 <= left and A[left]**2 < A[right]**2):
result.append(A[left]**2)
left -= 1
else:
result.append(A[right]**2)
right += 1
return result
| Solution |
python | scikit-learn__scikit-learn | sklearn/utils/_testing.py | {
"start": 39220,
"end": 40953
} | class ____(contextlib.AbstractContextManager):
# see raises() for parameters
def __init__(self, expected_exc_type, match, may_pass, err_msg):
self.expected_exc_types = (
expected_exc_type
if isinstance(expected_exc_type, Iterable)
else [expected_exc_type]
)
self.matches = [match] if isinstance(match, str) else match
self.may_pass = may_pass
self.err_msg = err_msg
self.raised_and_matched = False
def __exit__(self, exc_type, exc_value, _):
# see
# https://docs.python.org/2.5/whatsnew/pep-343.html#SECTION000910000000000000000
if exc_type is None: # No exception was raised in the block
if self.may_pass:
return True # CM is happy
else:
err_msg = self.err_msg or f"Did not raise: {self.expected_exc_types}"
raise AssertionError(err_msg)
if not any(
issubclass(exc_type, expected_type)
for expected_type in self.expected_exc_types
):
if self.err_msg is not None:
raise AssertionError(self.err_msg) from exc_value
else:
return False # will re-raise the original exception
if self.matches is not None:
err_msg = self.err_msg or (
"The error message should contain one of the following "
"patterns:\n{}\nGot {}".format("\n".join(self.matches), str(exc_value))
)
if not any(re.search(match, str(exc_value)) for match in self.matches):
raise AssertionError(err_msg) from exc_value
self.raised_and_matched = True
return True
| _Raises |
python | getsentry__sentry | src/sentry/plugins/base/binding_manager.py | {
"start": 805,
"end": 1241
} | class ____:
BINDINGS = {
"repository.provider": RepositoryProviderManager,
"integration-repository.provider": IntegrationRepositoryProviderManager,
}
def __init__(self):
self._bindings = {k: v() for k, v in self.BINDINGS.items()}
def add(self, name, binding, **kwargs):
self._bindings[name].add(binding, **kwargs)
def get(self, name):
return self._bindings[name]
| BindingManager |
python | scipy__scipy | benchmarks/benchmarks/test_functions.py | {
"start": 5494,
"end": 6299
} | class ____:
# note: this function is not smooth at the origin. the gradient will never
# converge in the minimizer
target_E = 0.
solution = [0., 0.]
xmin = np.array([-5, -5])
xmax = np.array([5, 5])
def fun(self, x):
E = (-20. * exp(-0.2 * sqrt(0.5 * (x[0]**2 + x[1]**2))) + 20. + np.e -
exp(0.5 * (cos(2. * pi * x[0]) + cos(2. * pi * x[1]))))
return E
def der(self, x):
R = sqrt(x[0]**2 + x[1]**2)
term1 = -20. * exp(-0.2 * R)
term2 = -exp(0.5 * (cos(2. * pi * x[0]) + cos(2. * pi * x[1])))
deriv1 = term1 * (-0.2 * 0.5 / R)
dfdx = 2. * deriv1 * x[0] - term2 * pi * sin(2. * pi * x[0])
dfdy = 2. * deriv1 * x[1] - term2 * pi * sin(2. * pi * x[1])
return np.array([dfdx, dfdy])
| Ackley |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_transforms.py | {
"start": 563,
"end": 17873
} | class ____:
single_point = [1.0, 1.0]
multiple_points = [[0.0, 2.0], [3.0, 3.0], [4.0, 0.0]]
pivot = single_point
def test_init(self):
Affine2D([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
Affine2D(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], int))
Affine2D(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], float))
def test_values(self):
np.random.seed(19680801)
values = np.random.random(6)
assert_array_equal(Affine2D.from_values(*values).to_values(), values)
def test_modify_inplace(self):
# Some polar transforms require modifying the matrix in place.
trans = Affine2D()
mtx = trans.get_matrix()
mtx[0, 0] = 42
assert_array_equal(trans.get_matrix(), [[42, 0, 0], [0, 1, 0], [0, 0, 1]])
def test_clear(self):
a = Affine2D(np.random.rand(3, 3) + 5) # Anything non-identity.
a.clear()
assert_array_equal(a.get_matrix(), [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
def test_rotate(self):
r_pi_2 = Affine2D().rotate(np.pi / 2)
r90 = Affine2D().rotate_deg(90)
assert_array_equal(r_pi_2.get_matrix(), r90.get_matrix())
assert_array_almost_equal(r90.transform(self.single_point), [-1, 1])
assert_array_almost_equal(r90.transform(self.multiple_points),
[[-2, 0], [-3, 3], [0, 4]])
r_pi = Affine2D().rotate(np.pi)
r180 = Affine2D().rotate_deg(180)
assert_array_equal(r_pi.get_matrix(), r180.get_matrix())
assert_array_almost_equal(r180.transform(self.single_point), [-1, -1])
assert_array_almost_equal(r180.transform(self.multiple_points),
[[0, -2], [-3, -3], [-4, 0]])
r_pi_3_2 = Affine2D().rotate(3 * np.pi / 2)
r270 = Affine2D().rotate_deg(270)
assert_array_equal(r_pi_3_2.get_matrix(), r270.get_matrix())
assert_array_almost_equal(r270.transform(self.single_point), [1, -1])
assert_array_almost_equal(r270.transform(self.multiple_points),
[[2, 0], [3, -3], [0, -4]])
assert_array_equal((r90 + r90).get_matrix(), r180.get_matrix())
assert_array_equal((r90 + r180).get_matrix(), r270.get_matrix())
def test_rotate_around(self):
r_pi_2 = Affine2D().rotate_around(*self.pivot, np.pi / 2)
r90 = Affine2D().rotate_deg_around(*self.pivot, 90)
assert_array_equal(r_pi_2.get_matrix(), r90.get_matrix())
assert_array_almost_equal(r90.transform(self.single_point), [1, 1])
assert_array_almost_equal(r90.transform(self.multiple_points),
[[0, 0], [-1, 3], [2, 4]])
r_pi = Affine2D().rotate_around(*self.pivot, np.pi)
r180 = Affine2D().rotate_deg_around(*self.pivot, 180)
assert_array_equal(r_pi.get_matrix(), r180.get_matrix())
assert_array_almost_equal(r180.transform(self.single_point), [1, 1])
assert_array_almost_equal(r180.transform(self.multiple_points),
[[2, 0], [-1, -1], [-2, 2]])
r_pi_3_2 = Affine2D().rotate_around(*self.pivot, 3 * np.pi / 2)
r270 = Affine2D().rotate_deg_around(*self.pivot, 270)
assert_array_equal(r_pi_3_2.get_matrix(), r270.get_matrix())
assert_array_almost_equal(r270.transform(self.single_point), [1, 1])
assert_array_almost_equal(r270.transform(self.multiple_points),
[[2, 2], [3, -1], [0, -2]])
assert_array_almost_equal((r90 + r90).get_matrix(), r180.get_matrix())
assert_array_almost_equal((r90 + r180).get_matrix(), r270.get_matrix())
def test_scale(self):
sx = Affine2D().scale(3, 1)
sy = Affine2D().scale(1, -2)
trans = Affine2D().scale(3, -2)
assert_array_equal((sx + sy).get_matrix(), trans.get_matrix())
assert_array_equal(trans.transform(self.single_point), [3, -2])
assert_array_equal(trans.transform(self.multiple_points),
[[0, -4], [9, -6], [12, 0]])
def test_skew(self):
trans_rad = Affine2D().skew(np.pi / 8, np.pi / 12)
trans_deg = Affine2D().skew_deg(22.5, 15)
assert_array_equal(trans_rad.get_matrix(), trans_deg.get_matrix())
# Using ~atan(0.5), ~atan(0.25) produces roundish numbers on output.
trans = Affine2D().skew_deg(26.5650512, 14.0362435)
assert_array_almost_equal(trans.transform(self.single_point), [1.5, 1.25])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[1, 2], [4.5, 3.75], [4, 1]])
def test_translate(self):
tx = Affine2D().translate(23, 0)
ty = Affine2D().translate(0, 42)
trans = Affine2D().translate(23, 42)
assert_array_equal((tx + ty).get_matrix(), trans.get_matrix())
assert_array_equal(trans.transform(self.single_point), [24, 43])
assert_array_equal(trans.transform(self.multiple_points),
[[23, 44], [26, 45], [27, 42]])
def test_rotate_plus_other(self):
trans = Affine2D().rotate_deg(90).rotate_deg_around(*self.pivot, 180)
trans_added = (Affine2D().rotate_deg(90) +
Affine2D().rotate_deg_around(*self.pivot, 180))
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [3, 1])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[4, 2], [5, -1], [2, -2]])
trans = Affine2D().rotate_deg(90).scale(3, -2)
trans_added = Affine2D().rotate_deg(90) + Affine2D().scale(3, -2)
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [-3, -2])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[-6, -0], [-9, -6], [0, -8]])
trans = (Affine2D().rotate_deg(90)
.skew_deg(26.5650512, 14.0362435)) # ~atan(0.5), ~atan(0.25)
trans_added = (Affine2D().rotate_deg(90) +
Affine2D().skew_deg(26.5650512, 14.0362435))
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [-0.5, 0.75])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[-2, -0.5], [-1.5, 2.25], [2, 4]])
trans = Affine2D().rotate_deg(90).translate(23, 42)
trans_added = Affine2D().rotate_deg(90) + Affine2D().translate(23, 42)
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [22, 43])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[21, 42], [20, 45], [23, 46]])
def test_rotate_around_plus_other(self):
trans = Affine2D().rotate_deg_around(*self.pivot, 90).rotate_deg(180)
trans_added = (Affine2D().rotate_deg_around(*self.pivot, 90) +
Affine2D().rotate_deg(180))
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [-1, -1])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[0, 0], [1, -3], [-2, -4]])
trans = Affine2D().rotate_deg_around(*self.pivot, 90).scale(3, -2)
trans_added = (Affine2D().rotate_deg_around(*self.pivot, 90) +
Affine2D().scale(3, -2))
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [3, -2])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[0, 0], [-3, -6], [6, -8]])
trans = (Affine2D().rotate_deg_around(*self.pivot, 90)
.skew_deg(26.5650512, 14.0362435)) # ~atan(0.5), ~atan(0.25)
trans_added = (Affine2D().rotate_deg_around(*self.pivot, 90) +
Affine2D().skew_deg(26.5650512, 14.0362435))
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [1.5, 1.25])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[0, 0], [0.5, 2.75], [4, 4.5]])
trans = Affine2D().rotate_deg_around(*self.pivot, 90).translate(23, 42)
trans_added = (Affine2D().rotate_deg_around(*self.pivot, 90) +
Affine2D().translate(23, 42))
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [24, 43])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[23, 42], [22, 45], [25, 46]])
def test_scale_plus_other(self):
trans = Affine2D().scale(3, -2).rotate_deg(90)
trans_added = Affine2D().scale(3, -2) + Affine2D().rotate_deg(90)
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_equal(trans.transform(self.single_point), [2, 3])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[4, 0], [6, 9], [0, 12]])
trans = Affine2D().scale(3, -2).rotate_deg_around(*self.pivot, 90)
trans_added = (Affine2D().scale(3, -2) +
Affine2D().rotate_deg_around(*self.pivot, 90))
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_equal(trans.transform(self.single_point), [4, 3])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[6, 0], [8, 9], [2, 12]])
trans = (Affine2D().scale(3, -2)
.skew_deg(26.5650512, 14.0362435)) # ~atan(0.5), ~atan(0.25)
trans_added = (Affine2D().scale(3, -2) +
Affine2D().skew_deg(26.5650512, 14.0362435))
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [2, -1.25])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[-2, -4], [6, -3.75], [12, 3]])
trans = Affine2D().scale(3, -2).translate(23, 42)
trans_added = Affine2D().scale(3, -2) + Affine2D().translate(23, 42)
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_equal(trans.transform(self.single_point), [26, 40])
assert_array_equal(trans.transform(self.multiple_points),
[[23, 38], [32, 36], [35, 42]])
def test_skew_plus_other(self):
# Using ~atan(0.5), ~atan(0.25) produces roundish numbers on output.
trans = Affine2D().skew_deg(26.5650512, 14.0362435).rotate_deg(90)
trans_added = (Affine2D().skew_deg(26.5650512, 14.0362435) +
Affine2D().rotate_deg(90))
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [-1.25, 1.5])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[-2, 1], [-3.75, 4.5], [-1, 4]])
trans = (Affine2D().skew_deg(26.5650512, 14.0362435)
.rotate_deg_around(*self.pivot, 90))
trans_added = (Affine2D().skew_deg(26.5650512, 14.0362435) +
Affine2D().rotate_deg_around(*self.pivot, 90))
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [0.75, 1.5])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[0, 1], [-1.75, 4.5], [1, 4]])
trans = Affine2D().skew_deg(26.5650512, 14.0362435).scale(3, -2)
trans_added = (Affine2D().skew_deg(26.5650512, 14.0362435) +
Affine2D().scale(3, -2))
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [4.5, -2.5])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[3, -4], [13.5, -7.5], [12, -2]])
trans = Affine2D().skew_deg(26.5650512, 14.0362435).translate(23, 42)
trans_added = (Affine2D().skew_deg(26.5650512, 14.0362435) +
Affine2D().translate(23, 42))
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [24.5, 43.25])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[24, 44], [27.5, 45.75], [27, 43]])
def test_translate_plus_other(self):
trans = Affine2D().translate(23, 42).rotate_deg(90)
trans_added = Affine2D().translate(23, 42) + Affine2D().rotate_deg(90)
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [-43, 24])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[-44, 23], [-45, 26], [-42, 27]])
trans = Affine2D().translate(23, 42).rotate_deg_around(*self.pivot, 90)
trans_added = (Affine2D().translate(23, 42) +
Affine2D().rotate_deg_around(*self.pivot, 90))
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [-41, 24])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[-42, 23], [-43, 26], [-40, 27]])
trans = Affine2D().translate(23, 42).scale(3, -2)
trans_added = Affine2D().translate(23, 42) + Affine2D().scale(3, -2)
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [72, -86])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[69, -88], [78, -90], [81, -84]])
trans = (Affine2D().translate(23, 42)
.skew_deg(26.5650512, 14.0362435)) # ~atan(0.5), ~atan(0.25)
trans_added = (Affine2D().translate(23, 42) +
Affine2D().skew_deg(26.5650512, 14.0362435))
assert_array_equal(trans.get_matrix(), trans_added.get_matrix())
assert_array_almost_equal(trans.transform(self.single_point), [45.5, 49])
assert_array_almost_equal(trans.transform(self.multiple_points),
[[45, 49.75], [48.5, 51.5], [48, 48.75]])
def test_invalid_transform(self):
t = mtransforms.Affine2D()
# There are two different exceptions, since the wrong number of
# dimensions is caught when constructing an array_view, and that
# raises a ValueError, and a wrong shape with a possible number
# of dimensions is caught by our CALL_CPP macro, which always
# raises the less precise RuntimeError.
with pytest.raises(ValueError):
t.transform(1)
with pytest.raises(ValueError):
t.transform([[[1]]])
with pytest.raises(RuntimeError):
t.transform([])
with pytest.raises(RuntimeError):
t.transform([1])
with pytest.raises(ValueError):
t.transform([[1]])
with pytest.raises(ValueError):
t.transform([[1, 2, 3]])
def test_copy(self):
a = mtransforms.Affine2D()
b = mtransforms.Affine2D()
s = a + b
# Updating a dependee should invalidate a copy of the dependent.
s.get_matrix() # resolve it.
s1 = copy.copy(s)
assert not s._invalid and not s1._invalid
a.translate(1, 2)
assert s._invalid and s1._invalid
assert (s1.get_matrix() == a.get_matrix()).all()
# Updating a copy of a dependee shouldn't invalidate a dependent.
s.get_matrix() # resolve it.
b1 = copy.copy(b)
b1.translate(3, 4)
assert not s._invalid
assert_array_equal(s.get_matrix(), a.get_matrix())
def test_deepcopy(self):
a = mtransforms.Affine2D()
b = mtransforms.Affine2D()
s = a + b
# Updating a dependee shouldn't invalidate a deepcopy of the dependent.
s.get_matrix() # resolve it.
s1 = copy.deepcopy(s)
assert not s._invalid and not s1._invalid
a.translate(1, 2)
assert s._invalid and not s1._invalid
assert_array_equal(s1.get_matrix(), mtransforms.Affine2D().get_matrix())
# Updating a deepcopy of a dependee shouldn't invalidate a dependent.
s.get_matrix() # resolve it.
b1 = copy.deepcopy(b)
b1.translate(3, 4)
assert not s._invalid
assert_array_equal(s.get_matrix(), a.get_matrix())
| TestAffine2D |
python | ray-project__ray | rllib/models/torch/fcnet.py | {
"start": 482,
"end": 5903
} | class ____(TorchModelV2, nn.Module):
"""Generic fully connected network."""
def __init__(
self,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
num_outputs: int,
model_config: ModelConfigDict,
name: str,
):
TorchModelV2.__init__(
self, obs_space, action_space, num_outputs, model_config, name
)
nn.Module.__init__(self)
hiddens = list(model_config.get("fcnet_hiddens", [])) + list(
model_config.get("post_fcnet_hiddens", [])
)
activation = model_config.get("fcnet_activation")
if not model_config.get("fcnet_hiddens", []):
activation = model_config.get("post_fcnet_activation")
no_final_linear = model_config.get("no_final_linear")
self.vf_share_layers = model_config.get("vf_share_layers")
self.free_log_std = model_config.get("free_log_std")
# Generate free-floating bias variables for the second half of
# the outputs.
if self.free_log_std:
assert num_outputs % 2 == 0, (
"num_outputs must be divisible by two",
num_outputs,
)
num_outputs = num_outputs // 2
layers = []
prev_layer_size = int(np.prod(obs_space.shape))
self._logits = None
# Create layers 0 to second-last.
for size in hiddens[:-1]:
layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=size,
initializer=normc_initializer(1.0),
activation_fn=activation,
)
)
prev_layer_size = size
# The last layer is adjusted to be of size num_outputs, but it's a
# layer with activation.
if no_final_linear and num_outputs:
layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=num_outputs,
initializer=normc_initializer(1.0),
activation_fn=activation,
)
)
prev_layer_size = num_outputs
# Finish the layers with the provided sizes (`hiddens`), plus -
# iff num_outputs > 0 - a last linear layer of size num_outputs.
else:
if len(hiddens) > 0:
layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=hiddens[-1],
initializer=normc_initializer(1.0),
activation_fn=activation,
)
)
prev_layer_size = hiddens[-1]
if num_outputs:
self._logits = SlimFC(
in_size=prev_layer_size,
out_size=num_outputs,
initializer=normc_initializer(0.01),
activation_fn=None,
)
else:
self.num_outputs = ([int(np.prod(obs_space.shape))] + hiddens[-1:])[-1]
# Layer to add the log std vars to the state-dependent means.
if self.free_log_std and self._logits:
self._append_free_log_std = AppendBiasLayer(num_outputs)
self._hidden_layers = nn.Sequential(*layers)
self._value_branch_separate = None
if not self.vf_share_layers:
# Build a parallel set of hidden layers for the value net.
prev_vf_layer_size = int(np.prod(obs_space.shape))
vf_layers = []
for size in hiddens:
vf_layers.append(
SlimFC(
in_size=prev_vf_layer_size,
out_size=size,
activation_fn=activation,
initializer=normc_initializer(1.0),
)
)
prev_vf_layer_size = size
self._value_branch_separate = nn.Sequential(*vf_layers)
self._value_branch = SlimFC(
in_size=prev_layer_size,
out_size=1,
initializer=normc_initializer(0.01),
activation_fn=None,
)
# Holds the current "base" output (before logits layer).
self._features = None
# Holds the last input, in case value branch is separate.
self._last_flat_in = None
@override(TorchModelV2)
def forward(
self,
input_dict: Dict[str, TensorType],
state: List[TensorType],
seq_lens: TensorType,
) -> (TensorType, List[TensorType]):
obs = input_dict["obs_flat"].float()
self._last_flat_in = obs.reshape(obs.shape[0], -1)
self._features = self._hidden_layers(self._last_flat_in)
logits = self._logits(self._features) if self._logits else self._features
if self.free_log_std:
logits = self._append_free_log_std(logits)
return logits, state
@override(TorchModelV2)
def value_function(self) -> TensorType:
assert self._features is not None, "must call forward() first"
if self._value_branch_separate:
out = self._value_branch(
self._value_branch_separate(self._last_flat_in)
).squeeze(1)
else:
out = self._value_branch(self._features).squeeze(1)
return out
| FullyConnectedNetwork |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/attrs/test_pretty.py | {
"start": 1897,
"end": 2243
} | class ____:
x: int
y: int = attrs.field(init=False)
def test_does_not_include_no_init_fields_in_attrs_printing():
record = AttrsClassWithNoInitField(x=1)
assert pretty.pretty(record) == "AttrsClassWithNoInitField(x=1)"
record.y = 1
assert pretty.pretty(record) == "AttrsClassWithNoInitField(x=1)"
| AttrsClassWithNoInitField |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-recharge/source_recharge/streams.py | {
"start": 592,
"end": 668
} | class ____(Enum):
DEPRECATED = "2021-01"
MODERN = "2021-11"
| ApiVersion |
python | pytorch__pytorch | torch/testing/_internal/optests/generate_tests.py | {
"start": 29458,
"end": 31762
} | class ____:
def __init__(self, path: str, data: FailuresDictData):
self.path = path
self.data = data
@staticmethod
def load(path, *, create_file=False) -> "FailuresDict":
if create_file and not os.path.exists(path):
result = FailuresDict(path, {})
FailuresDict.save()
return result
with open(path) as fp:
contents = fp.read()
if contents.strip() == "":
dct = {
"_description": DESCRIPTION,
"data": {},
"_version": VERSION,
}
else:
dct = json.loads(contents)
assert "data" in dct
assert "_version" in dct and dct["_version"] == VERSION
return FailuresDict(path, dct["data"])
def _save(self, to_str=False) -> Optional[str]:
to_dump = {
"_description": DESCRIPTION,
"data": self.data,
"_version": VERSION,
}
# json.dumps doesn't end with a newline. Let's add one because files
# should end in newlines.
serialized = json.dumps(to_dump, **DUMP_OPTIONS) + "\n"
if to_str:
return serialized
with open(self.path, "w") as fp:
fp.write(serialized)
return None
def save(self) -> None:
return self._save()
def get_status(self, qualname: str, test_name: str) -> str:
if qualname not in self.data:
return "xsuccess"
dct = self.data[qualname]
if test_name not in dct:
return "xsuccess"
return dct[test_name]["status"]
def set_status(
self,
qualname: str,
test_name: str,
status: str,
*,
comment: Optional[str] = None,
):
if qualname not in self.data:
self.data[qualname] = {}
dct = self.data[qualname]
if test_name not in dct:
dct[test_name] = {"status": None, "comment": ""}
if status == "xsuccess":
# The default status is "xsuccess".
del dct[test_name]
else:
dct[test_name]["status"] = status
if comment is not None:
dct[test_name]["comment"] = comment
| FailuresDict |
python | scikit-learn__scikit-learn | sklearn/ensemble/tests/test_bagging.py | {
"start": 24067,
"end": 24387
} | class ____(BaseEstimator):
"""Fake estimator accepting sample_weight"""
def fit(self, X, y, sample_weight=None):
"""Record values passed during fit"""
self.X_ = X
self.y_ = y
self.sample_weight_ = sample_weight
def predict(self, X):
pass
| EstimatorAcceptingSampleWeight |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_optim_state.py | {
"start": 3921,
"end": 9959
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.block0 = BlockB(5, 3)
self.block1 = BlockB(3, 7)
self.bias = torch.nn.Parameter(torch.randn((5,)))
self.block2 = torch.nn.Sequential(
BlockA(7, 9),
BlockA(9, 9),
BlockB(9, 5),
)
self.relu = torch.nn.ReLU()
def forward(self, x) -> torch.Tensor:
x = self.relu(self.block0(x))
x = self.relu(self.block1(x))
x = self.relu(self.block2(x))
x = x + self.bias
return x
def get_input(self, device):
BATCH_SIZE = 8
return (torch.randn((BATCH_SIZE, 5)).to(device),)
def get_loss(self, inp, output):
return output.sum()
def run_backward(self, loss):
loss.backward()
@staticmethod
def wrap(
model: torch.nn.Module,
group: Optional[dist.ProcessGroup] = None,
ignore_modules: bool = False,
fsdp_kwargs: Optional[dict[str, Any]] = None,
) -> torch.nn.Module:
if fsdp_kwargs is None:
fsdp_kwargs = {}
# Flatten Bias0; then flatten weight and Bias1 together into `block1`
model.block1.bias_module0 = FSDP(
model.block1.bias_module0,
process_group=group,
**fsdp_kwargs,
)
model.block1 = FSDP(model.block1, process_group=group, **fsdp_kwargs)
# Flatten Bias0; flatten Bias1; then flatten weight into `block2[1]`
model.block2[1].bias_module0 = FSDP(
model.block2[1].bias_module0,
process_group=group,
**fsdp_kwargs,
)
model.block2[1].bias_module1 = FSDP(
model.block2[1].bias_module1,
process_group=group,
**fsdp_kwargs,
)
model.block2[1] = FSDP(model.block2[1], process_group=group, **fsdp_kwargs)
# Flatten weight, Bias, bias into `block2[2]`
ignored_modules = [model.block2[2].bias_module0] if ignore_modules else None
model.block2[2] = FSDP(
model.block2[2],
process_group=group,
ignored_modules=ignored_modules,
**fsdp_kwargs,
)
return model
@staticmethod
def wrap_alt(
model: torch.nn.Module,
group: Optional[dist.ProcessGroup] = None,
fsdp_kwargs: Optional[dict[str, Any]] = None,
) -> torch.nn.Module:
if fsdp_kwargs is None:
fsdp_kwargs = {}
model.block0.bias_module0 = FSDP(
model.block0.bias_module0,
process_group=group,
**fsdp_kwargs,
)
model.block0 = FSDP(model.block0, process_group=group, **fsdp_kwargs)
return model
@staticmethod
def wrap_with_unmanaged_params(
model,
add_to_fsdp_module: bool,
group=None,
) -> tuple[torch.nn.Module, list[torch.nn.Parameter]]:
"""Registers unmanaged parameters before wrapping with :meth:`wrap`."""
device = next(model.parameters()).device
unmanaged_param = torch.nn.Parameter(torch.randn(5, 5, device=device))
# Either register the parameter to a module to be wrapped with FSDP
# (`model.block2[2]`) or a module not to be wrapped with FSDP (`model`)
register_module = model.block2[2] if add_to_fsdp_module else model
register_module.register_parameter(
"unmanaged_param",
unmanaged_param,
)
# For simplicity, we only add a single unmanaged parameter, but should
# be easy to generalize if needed
return NestedModel.wrap(model, group), [unmanaged_param]
@staticmethod
def add_unmanaged_param_entry(osd, unmanaged_param, step) -> None:
"""Adds an entry for the unmanaged parameter ``unmanaged_param``
assuming Adam optimizer and a single parameter group."""
# The unmanaged parameters should be passed to this method in
# `model.parameters()` order since their parameter IDs will be assigned
# in order of the skipped IDs
# Assign a parameter ID to the unmanaged parameter
unmanaged_param_id = -1
param_ids = osd["param_groups"][0]["params"]
for i in range(1, len(param_ids)):
diff = param_ids[i] - param_ids[i - 1]
if diff != 1:
assert diff > 1, f"Invalid IDs: {param_ids[i - 1]} {param_ids[i]}"
unmanaged_param_id = param_ids[i - 1] + 1
break
if unmanaged_param_id == -1:
unmanaged_param_id = len(param_ids) # last ID skipped
assert unmanaged_param_id >= 0, "One parameter ID should be skipped"
# Add a state entry for the unmanaged parameter
state_device = next(iter(next(iter(osd["state"].values())).values())).device
osd["state"][unmanaged_param_id] = {
"step": torch.tensor(float(step), device=state_device),
"exp_avg": torch.randn(unmanaged_param.shape, device=state_device),
"exp_avg_sq": torch.randn(unmanaged_param.shape, device=state_device),
}
# Insert the ID into the parameter group in order
bisect.insort(osd["param_groups"][0]["params"], unmanaged_param_id)
# NOTE: We exclude `self.bias` from either parameter group to test the
# case where the optimizer input does not include all model parameters
def param_group0(self) -> list[torch.nn.Parameter]:
# Use `block1`'s parameters for the first parameter group to deviate
# from the `model.parameters()` order
return list(self.block1.parameters())
def param_group1(self) -> list[torch.nn.Parameter]:
# Deviate from the `model.parameters()` order further by rearranging
# `block2`'s parameters to be before `block0`'s parameters
return list(self.block2.parameters()) + list(self.block0.parameters())
# Simple and boring model to test interface and some corner cases that do not
# require complicated wrapping strategy.
| NestedModel |
python | Farama-Foundation__Gymnasium | tests/test_core.py | {
"start": 4106,
"end": 4338
} | class ____(ObservationWrapper):
"""Example observation wrapper for testing."""
def observation(self, observation: ObsType) -> ObsType:
"""Observation function."""
return np.array([1])
| ExampleObservationWrapper |
python | sqlalchemy__sqlalchemy | test/orm/test_query.py | {
"start": 3572,
"end": 5554
} | class ____(QueryTest):
run_create_tables = None
run_inserts = None
def test_with_session(self):
User = self.classes.User
s1 = fixture_session()
s2 = fixture_session()
q1 = s1.query(User)
q2 = q1.with_session(s2)
assert q2.session is s2
assert q1.session is s1
@testing.combinations(
(lambda s, User: s.query(User)),
(lambda s, User: s.query(User).filter_by(name="x")),
(lambda s, User: s.query(User.id, User.name).filter_by(name="x")),
(
lambda s, User: s.query(func.count(User.id)).filter(
User.name == "x"
)
),
)
def test_rudimentary_statement_accessors(self, test_case):
User = self.classes.User
s = fixture_session()
q1 = testing.resolve_lambda(test_case, s=s, User=User)
is_true(
q1.statement.set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
).compare(q1.__clause_element__())
)
is_true(
q1.statement.set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
).compare(q1.selectable)
)
@testing.combinations(("session",), ("connection",), argnames="executor")
@testing.combinations(
("execute",), ("scalars",), ("scalar",), argnames="method"
)
def test_no_query_in_execute(self, executor, method, connection):
# even though this test is testing deprecations, these deprecations
# become errors when removed so we dont want to remove this test,
# just update it
if executor == "session":
exec_obj = Session(connection)
else:
exec_obj = connection
meth = getattr(exec_obj, method)
q = Session().query(literal_column("1"))
with testing.expect_raises_message(
sa_exc.ObjectNotExecutableError, "Not an executable object: .*"
):
meth(q)
| MiscTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/self2.py | {
"start": 993,
"end": 1165
} | class ____(Shape1): ...
x1 = Shape1().set_scale(3.4)
reveal_type(x1, expected_text="Shape1")
x2 = Circle1().set_scale(3.4)
reveal_type(x2, expected_text="Circle1")
| Circle1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.