language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/types.py | {
"start": 3777,
"end": 3957
} | class ____(sqltypes.TypeEngine[int]):
"""Provide the PostgreSQL OID type."""
__visit_name__ = "OID"
operator_classes = OperatorClass.BASE | OperatorClass.COMPARISON
| OID |
python | pypa__warehouse | tests/unit/oidc/forms/test_activestate.py | {
"start": 3347,
"end": 23523
} | class ____:
def test_validate(self, monkeypatch):
data = MultiDict(
{
"organization": "some-org",
"project": "some-project",
"actor": "someuser",
}
)
form = activestate.ActiveStatePublisherForm(MultiDict(data))
monkeypatch.setattr(form, "_lookup_organization", lambda o: None)
monkeypatch.setattr(form, "_lookup_actor", lambda o: fake_user_info)
assert form.validate(), str(form.errors)
def test_lookup_actor_404(self, monkeypatch):
response = pretend.stub(
status_code=404,
raise_for_status=pretend.raiser(HTTPError),
content=b"fake-content",
)
requests = pretend.stub(
post=pretend.call_recorder(lambda o, **kw: response),
expception=_requests.exceptions,
Timeout=Timeout,
HTTPError=HTTPError,
ConnectionError=ConnectionError,
)
monkeypatch.setattr(activestate, "requests", requests)
form = activestate.ActiveStatePublisherForm()
with pytest.raises(wtforms.validators.ValidationError):
form._lookup_actor(fake_username)
assert requests.post.calls == [
pretend.call(
"https://platform.activestate.com/graphql/v1/graphql",
json={
"query": "query($username: String) {users(where: {username: {_eq: $username}}) {user_id}}", # noqa: E501
"variables": {"username": fake_username},
},
timeout=5,
)
]
def test_lookup_actor_other_http_error(self, monkeypatch):
response = pretend.stub(
# anything that isn't 404 or 403
status_code=422,
raise_for_status=pretend.raiser(HTTPError),
content=b"fake-content",
)
requests = pretend.stub(
post=pretend.call_recorder(lambda o, **kw: response),
expception=_requests.exceptions,
Timeout=Timeout,
HTTPError=HTTPError,
ConnectionError=ConnectionError,
)
monkeypatch.setattr(activestate, "requests", requests)
sentry_sdk = pretend.stub(capture_message=pretend.call_recorder(lambda s: None))
monkeypatch.setattr(activestate, "sentry_sdk", sentry_sdk)
form = activestate.ActiveStatePublisherForm()
with pytest.raises(wtforms.validators.ValidationError):
form._lookup_actor(fake_username)
assert requests.post.calls == [
pretend.call(
"https://platform.activestate.com/graphql/v1/graphql",
json={
"query": "query($username: String) {users(where: {username: {_eq: $username}}) {user_id}}", # noqa: E501
"variables": {"username": fake_username},
},
timeout=5,
)
]
assert sentry_sdk.capture_message.calls == [
pretend.call("Unexpected 422 error from ActiveState API: b'fake-content'")
]
def test_lookup_actor_http_timeout(self, monkeypatch):
requests = pretend.stub(
post=pretend.raiser(Timeout),
Timeout=Timeout,
HTTPError=HTTPError,
ConnectionError=ConnectionError,
)
monkeypatch.setattr(activestate, "requests", requests)
sentry_sdk = pretend.stub(capture_message=pretend.call_recorder(lambda s: None))
monkeypatch.setattr(activestate, "sentry_sdk", sentry_sdk)
form = activestate.ActiveStatePublisherForm()
with pytest.raises(wtforms.validators.ValidationError):
form._lookup_actor(fake_username)
assert sentry_sdk.capture_message.calls == [
pretend.call("Connection error from ActiveState API")
]
def test_lookup_actor_connection_error(self, monkeypatch):
requests = pretend.stub(
post=pretend.raiser(ConnectionError),
Timeout=Timeout,
HTTPError=HTTPError,
ConnectionError=ConnectionError,
)
monkeypatch.setattr(activestate, "requests", requests)
sentry_sdk = pretend.stub(capture_message=pretend.call_recorder(lambda s: None))
monkeypatch.setattr(activestate, "sentry_sdk", sentry_sdk)
form = activestate.ActiveStatePublisherForm()
with pytest.raises(wtforms.validators.ValidationError):
form._lookup_actor(fake_username)
assert sentry_sdk.capture_message.calls == [
pretend.call("Connection error from ActiveState API")
]
def test_lookup_actor_non_json(self, monkeypatch):
response = pretend.stub(
status_code=200,
raise_for_status=pretend.call_recorder(lambda: None),
json=pretend.raiser(_requests.exceptions.JSONDecodeError("", "", 0)),
content=b"",
)
requests = pretend.stub(
post=pretend.call_recorder(lambda o, **kw: response),
HTTPError=HTTPError,
exceptions=_requests.exceptions,
)
monkeypatch.setattr(activestate, "requests", requests)
sentry_sdk = pretend.stub(capture_message=pretend.call_recorder(lambda s: None))
monkeypatch.setattr(activestate, "sentry_sdk", sentry_sdk)
form = activestate.ActiveStatePublisherForm()
with pytest.raises(wtforms.validators.ValidationError):
form._lookup_actor(fake_username)
assert sentry_sdk.capture_message.calls == [
pretend.call("Unexpected error from ActiveState API: b''")
]
def test_lookup_actor_gql_error(self, monkeypatch):
response = pretend.stub(
status_code=200,
raise_for_status=pretend.call_recorder(lambda: None),
json=lambda: {"errors": ["some error"]},
content=b"fake-content",
)
requests = pretend.stub(
post=pretend.call_recorder(lambda o, **kw: response),
HTTPError=HTTPError,
exceptions=_requests.exceptions,
)
monkeypatch.setattr(activestate, "requests", requests)
sentry_sdk = pretend.stub(capture_message=pretend.call_recorder(lambda s: None))
monkeypatch.setattr(activestate, "sentry_sdk", sentry_sdk)
form = activestate.ActiveStatePublisherForm()
with pytest.raises(wtforms.validators.ValidationError):
form._lookup_actor(fake_username)
assert requests.post.calls == [
pretend.call(
"https://platform.activestate.com/graphql/v1/graphql",
json={
"query": "query($username: String) {users(where: {username: {_eq: $username}}) {user_id}}", # noqa: E501
"variables": {"username": fake_username},
},
timeout=5,
)
]
assert sentry_sdk.capture_message.calls == [
pretend.call("Unexpected error from ActiveState API: ['some error']")
]
def test_lookup_actor_gql_no_data(self, monkeypatch):
response = pretend.stub(
status_code=200,
raise_for_status=pretend.call_recorder(lambda: None),
json=lambda: {"data": {"users": []}},
)
requests = pretend.stub(
post=pretend.call_recorder(lambda o, **kw: response),
HTTPError=HTTPError,
exceptions=_requests.exceptions,
)
monkeypatch.setattr(activestate, "requests", requests)
form = activestate.ActiveStatePublisherForm()
with pytest.raises(wtforms.validators.ValidationError):
form._lookup_actor(fake_username)
assert requests.post.calls == [
pretend.call(
"https://platform.activestate.com/graphql/v1/graphql",
json={
"query": "query($username: String) {users(where: {username: {_eq: $username}}) {user_id}}", # noqa: E501
"variables": {"username": fake_username},
},
timeout=5,
)
]
def test_lookup_actor_succeeds(self, monkeypatch):
response = pretend.stub(
status_code=200,
raise_for_status=pretend.call_recorder(lambda: None),
json=lambda: fake_gql_user_response,
)
requests = pretend.stub(
post=pretend.call_recorder(lambda o, **kw: response),
HTTPError=HTTPError,
requests=_requests.exceptions,
Timeout=_requests.Timeout,
ConnectionError=_requests.ConnectionError,
)
monkeypatch.setattr(activestate, "requests", requests)
form = activestate.ActiveStatePublisherForm()
info = form._lookup_actor(fake_username)
assert requests.post.calls == [
pretend.call(
"https://platform.activestate.com/graphql/v1/graphql",
json={
"query": "query($username: String) {users(where: {username: {_eq: $username}}) {user_id}}", # noqa: E501
"variables": {"username": fake_username},
},
timeout=5,
)
]
assert info == fake_user_info
# _lookup_organization
def test_lookup_organization_404(self, monkeypatch):
response = pretend.stub(
status_code=404,
raise_for_status=pretend.raiser(HTTPError),
content=b"fake-content",
)
requests = pretend.stub(
post=pretend.call_recorder(lambda o, **kw: response),
HTTPError=HTTPError,
exceptions=_requests.exceptions,
Timeout=_requests.Timeout,
ConnectionError=_requests.ConnectionError,
)
monkeypatch.setattr(activestate, "requests", requests)
form = activestate.ActiveStatePublisherForm()
with pytest.raises(wtforms.validators.ValidationError):
form._lookup_organization(fake_org_name)
assert requests.post.calls == [
pretend.call(
"https://platform.activestate.com/graphql/v1/graphql",
json={
"query": "query($orgname: String) {organizations(where: {display_name: {_eq: $orgname}}) {added}}", # noqa: E501
"variables": {"orgname": fake_org_name},
},
timeout=5,
)
]
def test_lookup_organization_other_http_error(self, monkeypatch):
response = pretend.stub(
# anything that isn't 404 or 403
status_code=422,
raise_for_status=pretend.raiser(HTTPError),
content=b"fake-content",
)
requests = pretend.stub(
post=pretend.call_recorder(lambda o, **kw: response),
HTTPError=HTTPError,
exceptions=_requests.exceptions,
Timeout=_requests.Timeout,
ConnectionError=_requests.ConnectionError,
)
monkeypatch.setattr(activestate, "requests", requests)
sentry_sdk = pretend.stub(capture_message=pretend.call_recorder(lambda s: None))
monkeypatch.setattr(activestate, "sentry_sdk", sentry_sdk)
form = activestate.ActiveStatePublisherForm()
with pytest.raises(wtforms.validators.ValidationError):
form._lookup_organization(fake_org_name)
assert requests.post.calls == [
pretend.call(
"https://platform.activestate.com/graphql/v1/graphql",
json={
"query": "query($orgname: String) {organizations(where: {display_name: {_eq: $orgname}}) {added}}", # noqa: E501
"variables": {"orgname": fake_org_name},
},
timeout=5,
)
]
assert sentry_sdk.capture_message.calls == [
pretend.call("Unexpected 422 error from ActiveState API: b'fake-content'")
]
def test_lookup_organization_http_timeout(self, monkeypatch):
requests = pretend.stub(
post=pretend.raiser(Timeout),
Timeout=Timeout,
HTTPError=HTTPError,
ConnectionError=ConnectionError,
)
monkeypatch.setattr(activestate, "requests", requests)
sentry_sdk = pretend.stub(capture_message=pretend.call_recorder(lambda s: None))
monkeypatch.setattr(activestate, "sentry_sdk", sentry_sdk)
form = activestate.ActiveStatePublisherForm()
with pytest.raises(wtforms.validators.ValidationError):
form._lookup_organization(fake_org_name)
assert sentry_sdk.capture_message.calls == [
pretend.call("Connection error from ActiveState API")
]
def test_lookup_organization_connection_error(self, monkeypatch):
requests = pretend.stub(
post=pretend.raiser(ConnectionError),
Timeout=Timeout,
HTTPError=HTTPError,
ConnectionError=ConnectionError,
)
monkeypatch.setattr(activestate, "requests", requests)
sentry_sdk = pretend.stub(capture_message=pretend.call_recorder(lambda s: None))
monkeypatch.setattr(activestate, "sentry_sdk", sentry_sdk)
form = activestate.ActiveStatePublisherForm()
with pytest.raises(wtforms.validators.ValidationError):
form._lookup_organization(fake_org_name)
assert sentry_sdk.capture_message.calls == [
pretend.call("Connection error from ActiveState API")
]
def test_lookup_organization_non_json(self, monkeypatch):
response = pretend.stub(
status_code=200,
raise_for_status=pretend.call_recorder(lambda: None),
json=pretend.raiser(_requests.exceptions.JSONDecodeError("", "", 0)),
content=b"",
)
requests = pretend.stub(
post=pretend.call_recorder(lambda o, **kw: response),
HTTPError=HTTPError,
exceptions=_requests.exceptions,
)
monkeypatch.setattr(activestate, "requests", requests)
sentry_sdk = pretend.stub(capture_message=pretend.call_recorder(lambda s: None))
monkeypatch.setattr(activestate, "sentry_sdk", sentry_sdk)
form = activestate.ActiveStatePublisherForm()
with pytest.raises(wtforms.validators.ValidationError):
form._lookup_organization(fake_org_name)
assert sentry_sdk.capture_message.calls == [
pretend.call("Unexpected error from ActiveState API: b''")
]
def test_lookup_organization_gql_error(self, monkeypatch):
response = pretend.stub(
status_code=200,
raise_for_status=pretend.call_recorder(lambda: None),
json=lambda: {"errors": ["some error"]},
content=b'{"errors": ["some error"]}',
)
requests = pretend.stub(
post=pretend.call_recorder(lambda o, **kw: response),
HTTPError=HTTPError,
exceptions=_requests.exceptions,
)
monkeypatch.setattr(activestate, "requests", requests)
sentry_sdk = pretend.stub(capture_message=pretend.call_recorder(lambda s: None))
monkeypatch.setattr(activestate, "sentry_sdk", sentry_sdk)
form = activestate.ActiveStatePublisherForm()
with pytest.raises(wtforms.validators.ValidationError):
form._lookup_organization(fake_org_name)
assert requests.post.calls == [
pretend.call(
"https://platform.activestate.com/graphql/v1/graphql",
json={
"query": "query($orgname: String) {organizations(where: {display_name: {_eq: $orgname}}) {added}}", # noqa: E501
"variables": {"orgname": fake_org_name},
},
timeout=5,
)
]
assert sentry_sdk.capture_message.calls == [
pretend.call("Unexpected error from ActiveState API: ['some error']")
]
def test_lookup_organization_gql_no_data(self, monkeypatch):
response = pretend.stub(
status_code=200,
raise_for_status=pretend.call_recorder(lambda: None),
json=lambda: {"data": {"organizations": []}},
content='{"data": {"organizations": []}}',
)
requests = pretend.stub(
post=pretend.call_recorder(lambda o, **kw: response),
HTTPError=HTTPError,
exceptions=_requests.exceptions,
)
monkeypatch.setattr(activestate, "requests", requests)
form = activestate.ActiveStatePublisherForm()
with pytest.raises(wtforms.validators.ValidationError):
form._lookup_organization(fake_org_name)
assert requests.post.calls == [
pretend.call(
"https://platform.activestate.com/graphql/v1/graphql",
json={
"query": "query($orgname: String) {organizations(where: {display_name: {_eq: $orgname}}) {added}}", # noqa: E501
"variables": {"orgname": fake_org_name},
},
timeout=5,
)
]
def test_lookup_organization_succeeds(self, monkeypatch):
response = pretend.stub(
status_code=200,
json=lambda: fake_gql_org_response,
)
requests = pretend.stub(
post=pretend.call_recorder(lambda o, **kw: response), HTTPError=HTTPError
)
monkeypatch.setattr(activestate, "requests", requests)
form = activestate.ActiveStatePublisherForm()
form._lookup_organization(fake_org_name)
assert requests.post.calls == [
pretend.call(
"https://platform.activestate.com/graphql/v1/graphql",
json={
"query": "query($orgname: String) {organizations(where: {display_name: {_eq: $orgname}}) {added}}", # noqa: E501
"variables": {"orgname": fake_org_name},
},
timeout=5,
)
]
@pytest.mark.parametrize(
"data",
[
# Organization
# Missing
# Empty
{"organization": "", "project": "good", "actor": "good"},
# Actor
# Missing
# Empty
{"actor": "", "project": "good", "organization": "good"},
{"actor": None, "project": "good", "organization": "good"},
# Project
# Too short
# Too long
# Invalid characters
# No leading or ending -
# No double --
# Missing
# Empty
{"project": "AB", "actor": "good", "organization": "good"},
{
"project": "abcdefghojklmnopqrstuvwxyz123456789012345",
"actor": "good",
"organization": "good",
},
{
"project": "invalid_characters@",
"actor": "good",
"organization": "good",
},
{"project": "-foo-", "actor": "good", "organization": "good"},
{"project": "---", "actor": "good", "organization": "good"},
{"project": "", "actor": "good", "organization": "good"},
{"project": None, "actor": "good", "organization": "good"},
],
)
def test_validate_basic_invalid_fields(self, monkeypatch, data):
print(data)
form = activestate.ActiveStatePublisherForm(MultiDict(data))
monkeypatch.setattr(form, "_lookup_actor", lambda o: fake_user_info)
monkeypatch.setattr(form, "_lookup_organization", lambda o: None)
assert not form.validate()
def test_validate_owner(self, monkeypatch):
form = activestate.ActiveStatePublisherForm()
monkeypatch.setattr(form, "_lookup_actor", lambda o: fake_user_info)
field = pretend.stub(data=fake_username)
form.validate_actor(field)
assert form.actor_id == "some-user-id"
| TestActiveStatePublisherForm |
python | pytorch__pytorch | test/dynamo/test_generator.py | {
"start": 33919,
"end": 41476
} | class ____(GeneratorTestsBase):
def test_throw(self):
def whoo(t):
try:
yield t.sin()
except RuntimeError:
yield t.cos()
def fn(t):
gen = whoo(t)
a = next(gen)
b = gen.throw(RuntimeError)
return a + b
t = torch.randn(2)
y = self._compile_check(fn, (t,))
self.assertEqual(y, t.sin() + t.cos())
def test_throw_with_finally(self):
z = 0
def whoo():
nonlocal z
z = 0
try:
try:
yield 1
except ValueError:
yield 2
finally:
z += 2
except ValueError:
z += 33
yield 4
finally:
z += 1
z += 10
def f(x):
gen = whoo()
next(gen)
gen.throw(ValueError)
return x.sin()
self._compile_check(f)
self.assertEqual(z, 3)
def test_throw_without_finally(self):
z = 0
def whoo(t):
nonlocal z
z = 0
try:
z += 1
yield t.sin()
z += 10
except RuntimeError:
z += 100
yield t.cos()
z += 1_000
z += 10_000
def fn(t):
gen = whoo(t)
a = next(gen)
b = gen.throw(RuntimeError)
return a + b
t = torch.randn(2)
y = self._compile_check(fn, (t,))
self.assertEqual(y, t.sin() + t.cos())
self.assertEqual(z, 101)
def test_throw_no_yield_after_throw(self):
z = 0
def whoo(t):
nonlocal z
z = 0
try:
z += 1
yield t.sin()
except ValueError:
z += 10
finally:
z += 100
def fn(t):
gen = whoo(t)
a = next(gen)
try:
gen.throw(ValueError)
except StopIteration:
return a
t = torch.randn(2)
y = self._compile_check(fn, (t,))
self.assertEqual(z, 111)
self.assertEqual(y, t.sin())
def test_throw_not_catch(self):
z = 0
def whoo(t):
nonlocal z
z = 0
try:
z += 1
yield t.sin()
except ValueError:
z += 10
yield t.cos()
finally:
z += 100
@torch.compile(backend="eager", fullgraph=True)
def fn(t):
gen = whoo(t)
a = next(gen)
b = gen.throw(RuntimeError)
return a + b
t = torch.randn(2)
with self.assertRaises(RuntimeError):
fn(t)
def test_throw_raise_difference_exc(self):
z = 0
def whoo(t):
nonlocal z
z = 0
try:
z += 1
yield t.sin()
except ValueError as e:
z += 10
raise RuntimeError from e
finally:
z += 100
@torch.compile(backend="eager", fullgraph=True)
def fn(t):
gen = whoo(t)
a = next(gen)
b = gen.throw(ValueError)
return a + b
t = torch.randn(2)
with self.assertRaises(RuntimeError):
fn(t)
def test_throw_yield_finally(self):
z = 0
def whoo(t):
nonlocal z
z = 0
try:
z += 1
yield t.sin()
except RuntimeError:
z += 10
yield t.cos()
finally:
z += 100
yield t.tan() # RuntimeError: generator ignored GeneratorExit
@torch.compile(backend="eager", fullgraph=True)
def fn(t):
gen = whoo(t)
a = next(gen)
b = gen.throw(RuntimeError)
return a + b
t = torch.randn(2)
with self.assertRaises(Unsupported):
fn(t)
def test_throw_try_except_finally(self):
z = 0
def whoo(t):
nonlocal z
z = 0
try:
z += 1
yield t.sin()
except ValueError:
z += 10
yield t.cos()
except RuntimeError:
z += 100
yield t.tan()
finally:
z += 1000
z += 10_000
def fn(t):
gen = whoo(t)
a = next(gen)
b = gen.throw(RuntimeError)
return a + b
t = torch.randn(2)
y = self._compile_check(fn, (t,))
self.assertEqual(y, t.sin() + t.tan())
self.assertEqual(z, 1 + 100 + 1000)
def test_exception_context_with_yield(self):
def f():
yield
def fn(t):
gen = f()
gen.send(None)
try:
gen.throw(ValueError)
except ValueError:
z = 1
except Exception as e:
raise AssertionError from e
assert z == 1
return t.sin()
self._compile_check(fn)
def test_return_const_value_in_except_and_finally(self):
def whoo():
try:
yield 1
except ValueError:
return 2 # noqa: B901
finally:
return 3 # noqa: B012, SIM107, B901
def fn(t):
gen = whoo()
next(gen)
try:
gen.throw(ValueError)
except StopIteration as e:
assert e.args[0] == 3
except Exception as e:
raise AssertionError from e
return t.sin()
self._compile_check(fn)
def test_return_value_in_except_and_finally(self):
class Foo:
def __init__(self, x):
self.x = x
def whoo():
try:
yield 1
except ValueError:
return Foo(2) # noqa: B901
finally:
return Foo(3) # noqa: B012, SIM107, B901
def fn(t):
gen = whoo()
next(gen)
try:
gen.throw(ValueError)
except StopIteration as e:
assert e.args[0].x == 3
except Exception as e:
raise AssertionError from e
return t.sin()
self._compile_check(fn)
def test_return_None_in_except_and_finally(self):
def whoo():
try:
yield 1
except ValueError:
return 2 # noqa: B901
finally:
return # noqa: B012, SIM107
def fn(t):
gen = whoo()
next(gen)
try:
gen.throw(ValueError)
except StopIteration as e:
assert len(e.args) == 0
except Exception as e:
raise AssertionError from e
return t.sin()
self._compile_check(fn)
instantiate_parametrized_tests(GeneratorTests)
instantiate_parametrized_tests(TestGeneratorSend)
instantiate_parametrized_tests(TestGeneratorClose)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| TestGeneratorThrow |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/final3.py | {
"start": 684,
"end": 2103
} | class ____:
member1: Final = 4
# This should generate an error because only
# one declaration can have a Final attribute.
member1: Final
member2: typing.Final[int] = 3
member4: Final[int]
# This should generate an error because there is
# no assignment.
member5: Final[str]
member6: Final[int]
_member7: Final = 6
__member8: Final = 6
member9: Final = 2
# This should generate an error.
member9 = 3
def __init__(self, a: bool):
# This should generate an error because a Final
# member outside of a stub file or a class body
# must have an initializer.
self.member3: Final
# This should generate an error because this symbol
# already has a final declaration.
self.member2: Final[int]
if a:
self.member4 = 5
else:
self.member4 = 6
self.member4 = 6
def another_method(self):
# This should generate an error because assignments
# can occur only within class bodies or __init__ methods.
self.member6 = 4
# This should generate an error because 'Final' cannot
# be used to annotate instance variables outside of
# an __init__ method.
self.member7: Final = 6
reveal_type(ClassA.member1, expected_text="Literal[4]")
reveal_type(ClassA(True).member1, expected_text="Literal[4]")
| ClassA |
python | langchain-ai__langchain | libs/standard-tests/langchain_tests/integration_tests/vectorstores.py | {
"start": 566,
"end": 31019
} | class ____(BaseStandardTests):
"""Base class for vector store integration tests.
Implementers should subclass this test suite and provide a fixture
that returns an empty vector store for each test.
The fixture should use the `get_embeddings` method to get a pre-defined
embeddings model that should be used for this test suite.
Here is a template:
```python
from typing import Generator
import pytest
from langchain_core.vectorstores import VectorStore
from langchain_parrot_link.vectorstores import ParrotVectorStore
from langchain_tests.integration_tests.vectorstores import VectorStoreIntegrationTests
class TestParrotVectorStore(VectorStoreIntegrationTests):
@pytest.fixture()
def vectorstore(self) -> Generator[VectorStore, None, None]: # type: ignore
\"\"\"Get an empty vectorstore.\"\"\"
store = ParrotVectorStore(self.get_embeddings())
# note: store should be EMPTY at this point
# if you need to delete data, you may do so here
try:
yield store
finally:
# cleanup operations, or deleting data
pass
```
In the fixture, before the `yield` we instantiate an empty vector store. In the
`finally` block, we call whatever logic is necessary to bring the vector store
to a clean state.
```python
from typing import Generator
import pytest
from langchain_core.vectorstores import VectorStore
from langchain_tests.integration_tests.vectorstores import VectorStoreIntegrationTests
from langchain_chroma import Chroma
class TestChromaStandard(VectorStoreIntegrationTests):
@pytest.fixture()
def vectorstore(self) -> Generator[VectorStore, None, None]: # type: ignore
\"\"\"Get an empty VectorStore for unit tests.\"\"\"
store = Chroma(embedding_function=self.get_embeddings())
try:
yield store
finally:
store.delete_collection()
pass
```
Note that by default we enable both sync and async tests. To disable either,
override the `has_sync` or `has_async` properties to `False` in the
subclass. For example:
```python
class TestParrotVectorStore(VectorStoreIntegrationTests):
@pytest.fixture()
def vectorstore(self) -> Generator[VectorStore, None, None]: # type: ignore
...
@property
def has_async(self) -> bool:
return False
```
!!! note
API references for individual test methods include troubleshooting tips.
""" # noqa: E501
@abstractmethod
@pytest.fixture
def vectorstore(self) -> VectorStore:
"""Get the `VectorStore` class to test.
The returned `VectorStore` should be empty.
"""
@property
def has_sync(self) -> bool:
"""Configurable property to enable or disable sync tests."""
return True
@property
def has_async(self) -> bool:
"""Configurable property to enable or disable async tests."""
return True
@property
def has_get_by_ids(self) -> bool:
"""Whether the `VectorStore` supports `get_by_ids`."""
return True
@staticmethod
def get_embeddings() -> Embeddings:
"""Get embeddings.
A pre-defined embeddings model that should be used for this test.
This currently uses `DeterministicFakeEmbedding` from `langchain-core`,
which uses numpy to generate random numbers based on a hash of the input text.
The resulting embeddings are not meaningful, but they are deterministic.
"""
return DeterministicFakeEmbedding(
size=EMBEDDING_SIZE,
)
def test_vectorstore_is_empty(self, vectorstore: VectorStore) -> None:
"""Test that the `VectorStore` is empty.
??? note "Troubleshooting"
If this test fails, check that the test class (i.e., sub class of
`VectorStoreIntegrationTests`) initializes an empty vector store in the
`vectorestore` fixture.
"""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
assert vectorstore.similarity_search("foo", k=1) == []
def test_add_documents(self, vectorstore: VectorStore) -> None:
"""Test adding documents into the `VectorStore`.
??? note "Troubleshooting"
If this test fails, check that:
1. We correctly initialize an empty vector store in the `vectorestore`
fixture.
2. Calling `similarity_search` for the top `k` similar documents does
not threshold by score.
3. We do not mutate the original document object when adding it to the
vector store (e.g., by adding an ID).
"""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
original_documents = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
ids = vectorstore.add_documents(original_documents)
documents = vectorstore.similarity_search("bar", k=2)
assert documents == [
Document(page_content="bar", metadata={"id": 2}, id=ids[1]),
Document(page_content="foo", metadata={"id": 1}, id=ids[0]),
]
# Verify that the original document object does not get mutated!
# (e.g., an ID is added to the original document object)
assert original_documents == [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
def test_vectorstore_still_empty(self, vectorstore: VectorStore) -> None:
"""Test that the `VectorStore` is still empty.
This test should follow a test that adds documents.
This just verifies that the fixture is set up properly to be empty
after each test.
??? note "Troubleshooting"
If this test fails, check that the test class (i.e., sub class of
`VectorStoreIntegrationTests`) correctly clears the vector store in the
`finally` block.
"""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
assert vectorstore.similarity_search("foo", k=1) == []
def test_deleting_documents(self, vectorstore: VectorStore) -> None:
"""Test deleting documents from the `VectorStore`.
??? note "Troubleshooting"
If this test fails, check that `add_documents` preserves identifiers
passed in through `ids`, and that `delete` correctly removes
documents.
"""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
documents = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
ids = vectorstore.add_documents(documents, ids=["1", "2"])
assert ids == ["1", "2"]
vectorstore.delete(["1"])
documents = vectorstore.similarity_search("foo", k=1)
assert documents == [Document(page_content="bar", metadata={"id": 2}, id="2")]
def test_deleting_bulk_documents(self, vectorstore: VectorStore) -> None:
"""Test that we can delete several documents at once.
??? note "Troubleshooting"
If this test fails, check that `delete` correctly removes multiple
documents when given a list of IDs.
"""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
documents = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
Document(page_content="baz", metadata={"id": 3}),
]
vectorstore.add_documents(documents, ids=["1", "2", "3"])
vectorstore.delete(["1", "2"])
documents = vectorstore.similarity_search("foo", k=1)
assert documents == [Document(page_content="baz", metadata={"id": 3}, id="3")]
def test_delete_missing_content(self, vectorstore: VectorStore) -> None:
"""Deleting missing content should not raise an exception.
??? note "Troubleshooting"
If this test fails, check that `delete` does not raise an exception
when deleting IDs that do not exist.
"""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
vectorstore.delete(["1"])
vectorstore.delete(["1", "2", "3"])
def test_add_documents_with_ids_is_idempotent(
self, vectorstore: VectorStore
) -> None:
"""Adding by ID should be idempotent.
??? note "Troubleshooting"
If this test fails, check that adding the same document twice with the
same IDs has the same effect as adding it once (i.e., it does not
duplicate the documents).
"""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
documents = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
vectorstore.add_documents(documents, ids=["1", "2"])
vectorstore.add_documents(documents, ids=["1", "2"])
documents = vectorstore.similarity_search("bar", k=2)
assert documents == [
Document(page_content="bar", metadata={"id": 2}, id="2"),
Document(page_content="foo", metadata={"id": 1}, id="1"),
]
def test_add_documents_by_id_with_mutation(self, vectorstore: VectorStore) -> None:
"""Test that we can overwrite by ID using `add_documents`.
??? note "Troubleshooting"
If this test fails, check that when `add_documents` is called with an
ID that already exists in the vector store, the content is updated
rather than duplicated.
"""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
documents = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
vectorstore.add_documents(documents=documents, ids=["1", "2"])
# Now over-write content of ID 1
new_documents = [
Document(
page_content="new foo", metadata={"id": 1, "some_other_field": "foo"}
),
]
vectorstore.add_documents(documents=new_documents, ids=["1"])
# Check that the content has been updated
documents = vectorstore.similarity_search("new foo", k=2)
assert documents == [
Document(
id="1",
page_content="new foo",
metadata={"id": 1, "some_other_field": "foo"},
),
Document(id="2", page_content="bar", metadata={"id": 2}),
]
def test_get_by_ids(self, vectorstore: VectorStore) -> None:
"""Test get by IDs.
This test requires that `get_by_ids` be implemented on the vector store.
??? note "Troubleshooting"
If this test fails, check that `get_by_ids` is implemented and returns
documents in the same order as the IDs passed in.
!!! note
`get_by_ids` was added to the `VectorStore` interface in
`langchain-core` version 0.2.11. If difficult to implement, this
test can be skipped by setting the `has_get_by_ids` property to
`False`.
```python
@property
def has_get_by_ids(self) -> bool:
return False
```
"""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
if not self.has_get_by_ids:
pytest.skip("get_by_ids not implemented.")
documents = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
ids = vectorstore.add_documents(documents, ids=["1", "2"])
retrieved_documents = vectorstore.get_by_ids(ids)
assert _sort_by_id(retrieved_documents) == _sort_by_id(
[
Document(page_content="foo", metadata={"id": 1}, id=ids[0]),
Document(page_content="bar", metadata={"id": 2}, id=ids[1]),
]
)
def test_get_by_ids_missing(self, vectorstore: VectorStore) -> None:
"""Test get by IDs with missing IDs.
??? note "Troubleshooting"
If this test fails, check that `get_by_ids` is implemented and does not
raise an exception when given IDs that do not exist.
!!! note
`get_by_ids` was added to the `VectorStore` interface in
`langchain-core` version 0.2.11. If difficult to implement, this
test can be skipped by setting the `has_get_by_ids` property to
`False`.
```python
@property
def has_get_by_ids(self) -> bool:
return False
```
"""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
if not self.has_get_by_ids:
pytest.skip("get_by_ids not implemented.")
# This should not raise an exception
documents = vectorstore.get_by_ids(["1", "2", "3"])
assert documents == []
def test_add_documents_documents(self, vectorstore: VectorStore) -> None:
"""Run `add_documents` tests.
??? note "Troubleshooting"
If this test fails, check that `get_by_ids` is implemented and returns
documents in the same order as the IDs passed in.
Check also that `add_documents` will correctly generate string IDs if
none are provided.
!!! note
`get_by_ids` was added to the `VectorStore` interface in
`langchain-core` version 0.2.11. If difficult to implement, this
test can be skipped by setting the `has_get_by_ids` property to
`False`.
```python
@property
def has_get_by_ids(self) -> bool:
return False
```
"""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
if not self.has_get_by_ids:
pytest.skip("get_by_ids not implemented.")
documents = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
ids = vectorstore.add_documents(documents)
assert _sort_by_id(vectorstore.get_by_ids(ids)) == _sort_by_id(
[
Document(page_content="foo", metadata={"id": 1}, id=ids[0]),
Document(page_content="bar", metadata={"id": 2}, id=ids[1]),
]
)
def test_add_documents_with_existing_ids(self, vectorstore: VectorStore) -> None:
"""Test that `add_documents` with existing IDs is idempotent.
??? note "Troubleshooting"
If this test fails, check that `get_by_ids` is implemented and returns
documents in the same order as the IDs passed in.
This test also verifies that:
1. IDs specified in the `Document.id` field are assigned when adding
documents.
2. If some documents include IDs and others don't string IDs are generated
for the latter.
!!! note
`get_by_ids` was added to the `VectorStore` interface in
`langchain-core` version 0.2.11. If difficult to implement, this
test can be skipped by setting the `has_get_by_ids` property to
`False`.
```python
@property
def has_get_by_ids(self) -> bool:
return False
```
"""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
if not self.has_get_by_ids:
pytest.skip("get_by_ids not implemented.")
documents = [
Document(id="foo", page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
ids = vectorstore.add_documents(documents)
assert "foo" in ids
assert _sort_by_id(vectorstore.get_by_ids(ids)) == _sort_by_id(
[
Document(page_content="foo", metadata={"id": 1}, id="foo"),
Document(page_content="bar", metadata={"id": 2}, id=ids[1]),
]
)
async def test_vectorstore_is_empty_async(self, vectorstore: VectorStore) -> None:
"""Test that the `VectorStore` is empty.
??? note "Troubleshooting"
If this test fails, check that the test class (i.e., sub class of
`VectorStoreIntegrationTests`) initializes an empty vector store in the
`vectorestore` fixture.
"""
if not self.has_async:
pytest.skip("Async tests not supported.")
assert await vectorstore.asimilarity_search("foo", k=1) == []
async def test_add_documents_async(self, vectorstore: VectorStore) -> None:
"""Test adding documents into the `VectorStore`.
??? note "Troubleshooting"
If this test fails, check that:
1. We correctly initialize an empty vector store in the `vectorestore`
fixture.
2. Calling `.asimilarity_search` for the top `k` similar documents does
not threshold by score.
3. We do not mutate the original document object when adding it to the
vector store (e.g., by adding an ID).
"""
if not self.has_async:
pytest.skip("Async tests not supported.")
original_documents = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
ids = await vectorstore.aadd_documents(original_documents)
documents = await vectorstore.asimilarity_search("bar", k=2)
assert documents == [
Document(page_content="bar", metadata={"id": 2}, id=ids[1]),
Document(page_content="foo", metadata={"id": 1}, id=ids[0]),
]
# Verify that the original document object does not get mutated!
# (e.g., an ID is added to the original document object)
assert original_documents == [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
async def test_vectorstore_still_empty_async(
self, vectorstore: VectorStore
) -> None:
"""Test that the `VectorStore` is still empty.
This test should follow a test that adds documents.
This just verifies that the fixture is set up properly to be empty
after each test.
??? note "Troubleshooting"
If this test fails, check that the test class (i.e., sub class of
`VectorStoreIntegrationTests`) correctly clears the vector store in the
`finally` block.
"""
if not self.has_async:
pytest.skip("Async tests not supported.")
assert await vectorstore.asimilarity_search("foo", k=1) == []
async def test_deleting_documents_async(self, vectorstore: VectorStore) -> None:
"""Test deleting documents from the `VectorStore`.
??? note "Troubleshooting"
If this test fails, check that `aadd_documents` preserves identifiers
passed in through `ids`, and that `delete` correctly removes
documents.
"""
if not self.has_async:
pytest.skip("Async tests not supported.")
documents = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
ids = await vectorstore.aadd_documents(documents, ids=["1", "2"])
assert ids == ["1", "2"]
await vectorstore.adelete(["1"])
documents = await vectorstore.asimilarity_search("foo", k=1)
assert documents == [Document(page_content="bar", metadata={"id": 2}, id="2")]
async def test_deleting_bulk_documents_async(
self, vectorstore: VectorStore
) -> None:
"""Test that we can delete several documents at once.
??? note "Troubleshooting"
If this test fails, check that `adelete` correctly removes multiple
documents when given a list of IDs.
"""
if not self.has_async:
pytest.skip("Async tests not supported.")
documents = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
Document(page_content="baz", metadata={"id": 3}),
]
await vectorstore.aadd_documents(documents, ids=["1", "2", "3"])
await vectorstore.adelete(["1", "2"])
documents = await vectorstore.asimilarity_search("foo", k=1)
assert documents == [Document(page_content="baz", metadata={"id": 3}, id="3")]
async def test_delete_missing_content_async(self, vectorstore: VectorStore) -> None:
"""Deleting missing content should not raise an exception.
??? note "Troubleshooting"
If this test fails, check that `adelete` does not raise an exception
when deleting IDs that do not exist.
"""
if not self.has_async:
pytest.skip("Async tests not supported.")
await vectorstore.adelete(["1"])
await vectorstore.adelete(["1", "2", "3"])
async def test_add_documents_with_ids_is_idempotent_async(
self, vectorstore: VectorStore
) -> None:
"""Adding by ID should be idempotent.
??? note "Troubleshooting"
If this test fails, check that adding the same document twice with the
same IDs has the same effect as adding it once (i.e., it does not
duplicate the documents).
"""
if not self.has_async:
pytest.skip("Async tests not supported.")
documents = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
await vectorstore.aadd_documents(documents, ids=["1", "2"])
await vectorstore.aadd_documents(documents, ids=["1", "2"])
documents = await vectorstore.asimilarity_search("bar", k=2)
assert documents == [
Document(page_content="bar", metadata={"id": 2}, id="2"),
Document(page_content="foo", metadata={"id": 1}, id="1"),
]
async def test_add_documents_by_id_with_mutation_async(
self, vectorstore: VectorStore
) -> None:
"""Test that we can overwrite by ID using `add_documents`.
??? note "Troubleshooting"
If this test fails, check that when `aadd_documents` is called with an
ID that already exists in the vector store, the content is updated
rather than duplicated.
"""
if not self.has_async:
pytest.skip("Async tests not supported.")
documents = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
await vectorstore.aadd_documents(documents=documents, ids=["1", "2"])
# Now over-write content of ID 1
new_documents = [
Document(
page_content="new foo", metadata={"id": 1, "some_other_field": "foo"}
),
]
await vectorstore.aadd_documents(documents=new_documents, ids=["1"])
# Check that the content has been updated
documents = await vectorstore.asimilarity_search("new foo", k=2)
assert documents == [
Document(
id="1",
page_content="new foo",
metadata={"id": 1, "some_other_field": "foo"},
),
Document(id="2", page_content="bar", metadata={"id": 2}),
]
async def test_get_by_ids_async(self, vectorstore: VectorStore) -> None:
"""Test get by IDs.
This test requires that `get_by_ids` be implemented on the vector store.
??? note "Troubleshooting"
If this test fails, check that `get_by_ids` is implemented and returns
documents in the same order as the IDs passed in.
!!! note
`get_by_ids` was added to the `VectorStore` interface in
`langchain-core` version 0.2.11. If difficult to implement, this
test can be skipped by setting the `has_get_by_ids` property to
`False`.
```python
@property
def has_get_by_ids(self) -> bool:
return False
```
"""
if not self.has_async:
pytest.skip("Async tests not supported.")
if not self.has_get_by_ids:
pytest.skip("get_by_ids not implemented.")
documents = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
ids = await vectorstore.aadd_documents(documents, ids=["1", "2"])
retrieved_documents = await vectorstore.aget_by_ids(ids)
assert _sort_by_id(retrieved_documents) == _sort_by_id(
[
Document(page_content="foo", metadata={"id": 1}, id=ids[0]),
Document(page_content="bar", metadata={"id": 2}, id=ids[1]),
]
)
async def test_get_by_ids_missing_async(self, vectorstore: VectorStore) -> None:
"""Test get by IDs with missing IDs.
??? note "Troubleshooting"
If this test fails, check that `get_by_ids` is implemented and does not
raise an exception when given IDs that do not exist.
!!! note
`get_by_ids` was added to the `VectorStore` interface in
`langchain-core` version 0.2.11. If difficult to implement, this
test can be skipped by setting the `has_get_by_ids` property to
`False`.
```python
@property
def has_get_by_ids(self) -> bool:
return False
```
"""
if not self.has_async:
pytest.skip("Async tests not supported.")
if not self.has_get_by_ids:
pytest.skip("get_by_ids not implemented.")
# This should not raise an exception
assert await vectorstore.aget_by_ids(["1", "2", "3"]) == []
async def test_add_documents_documents_async(
self, vectorstore: VectorStore
) -> None:
"""Run `add_documents` tests.
??? note "Troubleshooting"
If this test fails, check that `get_by_ids` is implemented and returns
documents in the same order as the IDs passed in.
Check also that `aadd_documents` will correctly generate string IDs if
none are provided.
!!! note
`get_by_ids` was added to the `VectorStore` interface in
`langchain-core` version 0.2.11. If difficult to implement, this
test can be skipped by setting the `has_get_by_ids` property to
`False`.
```python
@property
def has_get_by_ids(self) -> bool:
return False
```
"""
if not self.has_async:
pytest.skip("Async tests not supported.")
if not self.has_get_by_ids:
pytest.skip("get_by_ids not implemented.")
documents = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
ids = await vectorstore.aadd_documents(documents)
assert _sort_by_id(await vectorstore.aget_by_ids(ids)) == _sort_by_id(
[
Document(page_content="foo", metadata={"id": 1}, id=ids[0]),
Document(page_content="bar", metadata={"id": 2}, id=ids[1]),
]
)
async def test_add_documents_with_existing_ids_async(
self, vectorstore: VectorStore
) -> None:
"""Test that `add_documents` with existing IDs is idempotent.
??? note "Troubleshooting"
If this test fails, check that `get_by_ids` is implemented and returns
documents in the same order as the IDs passed in.
This test also verifies that:
1. IDs specified in the `Document.id` field are assigned when adding
documents.
2. If some documents include IDs and others don't string IDs are generated
for the latter.
!!! note
`get_by_ids` was added to the `VectorStore` interface in
`langchain-core` version 0.2.11. If difficult to implement, this
test can be skipped by setting the `has_get_by_ids` property to
`False`.
```python
@property
def has_get_by_ids(self) -> bool:
return False
```
"""
if not self.has_async:
pytest.skip("Async tests not supported.")
if not self.has_get_by_ids:
pytest.skip("get_by_ids not implemented.")
documents = [
Document(id="foo", page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
]
ids = await vectorstore.aadd_documents(documents)
assert "foo" in ids
assert _sort_by_id(await vectorstore.aget_by_ids(ids)) == _sort_by_id(
[
Document(page_content="foo", metadata={"id": 1}, id="foo"),
Document(page_content="bar", metadata={"id": 2}, id=ids[1]),
]
)
| VectorStoreIntegrationTests |
python | ray-project__ray | doc/source/train/doc_code/hvd_trainer.py | {
"start": 491,
"end": 2372
} | class ____(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.layer1 = nn.Linear(input_size, layer_size)
self.relu = nn.ReLU()
self.layer2 = nn.Linear(layer_size, output_size)
def forward(self, input):
return self.layer2(self.relu(self.layer1(input)))
def train_loop_per_worker():
hvd.init()
dataset_shard = train.get_dataset_shard("train")
model = NeuralNetwork()
device = train.torch.get_device()
model.to(device)
loss_fn = nn.MSELoss()
lr_scaler = 1
optimizer = torch.optim.SGD(model.parameters(), lr=0.1 * lr_scaler)
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
op=hvd.Average,
)
for epoch in range(num_epochs):
model.train()
for batch in dataset_shard.iter_torch_batches(
batch_size=32, dtypes=torch.float
):
inputs, labels = torch.unsqueeze(batch["x"], 1), batch["y"]
outputs = model(inputs)
loss = loss_fn(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f"epoch: {epoch}, loss: {loss.item()}")
with tempfile.TemporaryDirectory() as tmpdir:
torch.save(model.state_dict(), os.path.join(tmpdir, "model.pt"))
train.report(
{"loss": loss.item()}, checkpoint=Checkpoint.from_directory(tmpdir)
)
train_dataset = ray.data.from_items([{"x": x, "y": x + 1} for x in range(32)])
scaling_config = ScalingConfig(num_workers=3, use_gpu=use_gpu)
trainer = HorovodTrainer(
train_loop_per_worker=train_loop_per_worker,
scaling_config=scaling_config,
datasets={"train": train_dataset},
)
result = trainer.fit()
| NeuralNetwork |
python | has2k1__plotnine | plotnine/mapping/aes.py | {
"start": 13766,
"end": 16815
} | class ____:
"""
Repeat an Aeshetic a given number of times
The methods in this class know how to create sequences of aesthetics
whose values may not be scalar.
Some aesthetics may have valid values that are not scalar. e.g.
sequences. Inserting one of such a value in a dataframe as a column
would either lead to the wrong input or fail. The s
"""
@staticmethod
def linetype(value: Any, n: int) -> Sequence[Any]:
"""
Repeat linetypes
"""
named = {
"solid",
"dashed",
"dashdot",
"dotted",
"_",
"--",
"-.",
":",
"none",
" ",
"",
}
if value in named:
return [value] * n
# tuple of the form (offset, (on, off, on, off, ...))
# e.g (0, (1, 2))
if (
isinstance(value, tuple)
and isinstance(value[0], int)
and isinstance(value[1], tuple)
and len(value[1]) % 2 == 0
and all(isinstance(x, int) for x in value[1])
):
return [value] * n
raise ValueError(f"{value} is not a known linetype.")
@staticmethod
def color(value: Any, n: int) -> Sequence[Any]:
"""
Repeat colors
"""
if isinstance(value, str):
return [value] * n
if is_color_tuple(value):
return [tuple(value)] * n
raise ValueError(f"{value} is not a known color.")
fill = color
@staticmethod
def shape(value: Any, n: int) -> Any:
"""
Repeat shapes
"""
if isinstance(value, str):
return [value] * n
# tuple of the form (numsides, style, angle)
# where style is in the range [0, 3]
# e.g (4, 1, 45)
if (
isinstance(value, tuple)
and all(isinstance(x, int) for x in value)
and 0 <= value[1] < 3
):
return [value] * n
if is_shape_points(value):
return [tuple(value)] * n
raise ValueError(f"{value} is not a know shape.")
def is_shape_points(obj: Any) -> bool:
"""
Return True if obj is like Sequence[tuple[float, float]]
"""
def is_numeric(obj) -> bool:
"""
Return True if obj is a python or numpy float or integer
"""
return isinstance(obj, (float, int, np.floating, np.integer))
if not iter(obj):
return False
try:
return all(is_numeric(a) and is_numeric(b) for a, b in obj)
except TypeError:
return False
def has_groups(data: pd.DataFrame) -> bool:
"""
Check if data is grouped
Parameters
----------
data :
Data
Returns
-------
out : bool
If True, the data has groups.
"""
# If any row in the group column is equal to NO_GROUP, then
# the data all of them are and the data has no groups
return data["group"].iloc[0] != NO_GROUP
| RepeatAesthetic |
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 883,
"end": 1004
} | class ____(models.Model):
name = models.CharField(max_length=50)
created = models.DateTimeField(auto_now=True)
| Club |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/registry_entry.py | {
"start": 1457,
"end": 25442
} | class ____:
entry_blob_path: str
metadata_file_path: str
def _apply_metadata_overrides(metadata_data: dict, registry_type: str, bucket_name: str, metadata_blob: storage.Blob) -> dict:
connector_type = metadata_data["connectorType"]
overridden_metadata_data = _apply_overrides_from_registry(metadata_data, registry_type)
# remove fields that are not needed in the registry
del overridden_metadata_data["registryOverrides"]
del overridden_metadata_data["connectorType"]
# rename field connectorSubtype to sourceType
connector_subtype = overridden_metadata_data.get("connectorSubtype")
if connector_subtype:
overridden_metadata_data["sourceType"] = overridden_metadata_data["connectorSubtype"]
del overridden_metadata_data["connectorSubtype"]
# rename definitionId field to sourceDefinitionId or destinationDefinitionId
id_field = "sourceDefinitionId" if connector_type == "source" else "destinationDefinitionId"
overridden_metadata_data[id_field] = overridden_metadata_data["definitionId"]
del overridden_metadata_data["definitionId"]
# add in useless fields that are currently required for porting to the actor definition spec
overridden_metadata_data["tombstone"] = False
overridden_metadata_data["custom"] = False
overridden_metadata_data["public"] = True
# Add generated fields for source file metadata and git
overridden_metadata_data["generated"] = _apply_generated_fields(overridden_metadata_data, bucket_name, metadata_blob)
# Add Dependency information
overridden_metadata_data["packageInfo"] = _apply_package_info_fields(overridden_metadata_data, bucket_name)
# Add language field
overridden_metadata_data = _apply_language_field(overridden_metadata_data)
# if there is no supportLevel, set it to "community"
if not overridden_metadata_data.get("supportLevel"):
overridden_metadata_data["supportLevel"] = "community"
# apply ab_internal defaults
overridden_metadata_data = _apply_ab_internal_defaults(overridden_metadata_data)
# apply icon url and releases
icon_blob = _get_icon_blob_from_gcs(bucket_name, metadata_data)
icon_url = get_public_url_for_gcs_file(icon_blob.bucket.name, icon_blob.name, METADATA_CDN_BASE_URL)
overridden_metadata_data["iconUrl"] = icon_url
overridden_metadata_data["releases"] = _apply_connector_releases(overridden_metadata_data)
return overridden_metadata_data
@deep_copy_params
def _apply_overrides_from_registry(metadata_data: dict, override_registry_key: str) -> dict:
"""Apply the overrides from the registry to the metadata data.
Args:
metadata_data (dict): The metadata data field.
override_registry_key (str): The key of the registry to override the metadata with.
Returns:
dict: The metadata data field with the overrides applied.
"""
override_registry = metadata_data["registryOverrides"][override_registry_key]
del override_registry["enabled"]
# remove any None values from the override registry
override_registry = {k: v for k, v in override_registry.items() if v is not None}
metadata_data.update(override_registry)
return metadata_data
def _apply_generated_fields(metadata_data: dict, bucket_name: str, metadata_blob: storage.Blob) -> dict:
"""Apply generated fields to the metadata data field.
Args:
metadata_data (dict): The metadata data field.
Returns:
dict: The metadata data field with the generated fields applied.
"""
# work on our own copy of everything
metadata_data = copy.deepcopy(metadata_data)
generated_fields = metadata_data.get("generated") or {}
# Add the source file metadata
# on a GCS blob, the "name" is actually the full path
generated_fields = set_with(generated_fields, "source_file_info.metadata_file_path", metadata_blob.name, default_none_to_dict)
generated_fields = set_with(generated_fields, "source_file_info.metadata_bucket_name", bucket_name, default_none_to_dict)
generated_fields = set_with(
generated_fields, "source_file_info.registry_entry_generated_at", datetime.datetime.now().isoformat(), default_none_to_dict
)
generated_fields = set_with(
generated_fields, "source_file_info.metadata_last_modified", metadata_blob.updated.isoformat(), default_none_to_dict
)
return generated_fields
@sentry_sdk.trace
@deep_copy_params
def _apply_package_info_fields(metadata_data: dict, bucket_name: str) -> dict:
"""Apply package info fields to the metadata data field.
Args:
metadata_data (dict): The metadata data field.
bucket_name (str): The name of the GCS bucket.
Returns:
dict: The metadata data field with the package info fields applied.
"""
sanitized_connector_technical_name = metadata_data["dockerRepository"].replace("airbyte/", "")
connector_version = metadata_data["dockerImageTag"]
dependencies_path = (
f"{CONNECTOR_DEPENDENCY_FOLDER}/{sanitized_connector_technical_name}/{connector_version}/{CONNECTOR_DEPENDENCY_FILE_NAME}"
)
package_info_fields = metadata_data.get("packageInfo") or {}
try:
logger.info(
f"Getting dependencies blob for `{sanitized_connector_technical_name}` `{connector_version}` at path `{dependencies_path}`"
)
gcs_client = get_gcs_storage_client(gcs_creds=os.environ.get("GCS_CREDENTIALS"))
bucket = gcs_client.bucket(bucket_name)
dependencies_blob = bucket.blob(dependencies_path)
dependencies_blob_contents = safe_read_gcs_file(dependencies_blob)
if dependencies_blob_contents is not None:
dependencies_json = json.loads(dependencies_blob_contents)
cdk_version = None
for package in dependencies_json.get("dependencies", []):
if package.get("package_name") == "airbyte-cdk":
# Note: Prefix the version with the python slug as the python cdk is the only one we have
# versions available for.
cdk_version = f'{PYTHON_CDK_SLUG}:{package.get("version")}'
break
package_info_fields = set_with(package_info_fields, "cdk_version", cdk_version, default_none_to_dict)
except Exception as e:
logger.warning(f"Error reading dependencies file for `{sanitized_connector_technical_name}`: {e}")
raise
logger.info("Added package info fields.")
return package_info_fields
@deep_copy_params
def _apply_language_field(metadata_data: dict) -> dict:
"""Transform the language tag into a top-level field, if it is not already present.
Args:
metadata_data (dict): The metadata data field.
Returns:
dict: The metadata data field with the language field applied.
"""
if metadata_data.get("language"):
return metadata_data
tags = metadata_data.get("tags", [])
languages = [tag.replace("language:", "") for tag in tags if tag.startswith("language:")]
metadata_data["language"] = languages[0] if languages else None
return metadata_data
@deep_copy_params
def _apply_ab_internal_defaults(metadata_data: dict) -> dict:
"""Apply ab_internal defaults to the metadata data field.
Args:
metadata_data (dict): The metadata data field.
Returns:
dict: The metadata data field with the ab_internal defaults applied.
"""
default_ab_internal_values = {
"sl": 100,
"ql": 100,
}
existing_ab_internal_values = metadata_data.get("ab_internal") or {}
ab_internal_values = {**default_ab_internal_values, **existing_ab_internal_values}
metadata_data["ab_internal"] = ab_internal_values
return metadata_data
@deep_copy_params
def _apply_connector_releases(metadata: dict) -> Optional[pd.DataFrame]:
documentation_url = metadata["documentationUrl"]
final_registry_releases = {}
releases = metadata.get("releases")
if releases is not None and releases.get("breakingChanges"):
# apply defaults for connector releases
final_registry_releases["migrationDocumentationUrl"] = _calculate_migration_documentation_url(
metadata["releases"], documentation_url
)
# releases has a dictionary field called breakingChanges, where the key is the version and the value is the data for the breaking change
# each breaking change has a migrationDocumentationUrl field that is optional, so we need to apply defaults to it
breaking_changes = metadata["releases"]["breakingChanges"]
if breaking_changes is not None:
for version, breaking_change in breaking_changes.items():
breaking_change["migrationDocumentationUrl"] = _calculate_migration_documentation_url(
breaking_change, documentation_url, version
)
final_registry_releases["breakingChanges"] = breaking_changes
if releases is not None and releases.get("rolloutConfiguration"):
final_registry_releases["rolloutConfiguration"] = metadata["releases"]["rolloutConfiguration"]
return final_registry_releases
def _calculate_migration_documentation_url(releases_or_breaking_change: dict, documentation_url: str, version: Optional[str] = None) -> str:
"""Calculate the migration documentation url for the connector releases.
Args:
metadata_releases (dict): The connector releases.
Returns:
str: The migration documentation url.
"""
base_url = f"{documentation_url}-migrations"
default_migration_documentation_url = f"{base_url}#{version}" if version is not None else base_url
return releases_or_breaking_change.get("migrationDocumentationUrl", None) or default_migration_documentation_url
def _get_and_parse_yaml_file(file_path: pathlib.Path) -> dict:
"""Get and parse the metadata file.
Args:
metadata_file_path (pathlib.Path): The path to the metadata file.
Returns:
dict: The file dictionary.
"""
try:
logger.debug(f"Getting and parsing YAML file: `{file_path}`")
with open(file_path, "r") as f:
file_dict = yaml.safe_load(f)
except Exception as e:
logger.exception(f"Error parsing file")
raise
logger.info("Parsed YAML file.")
return file_dict
@sentry_sdk.trace
def _get_icon_blob_from_gcs(bucket_name: str, metadata_entry: dict) -> storage.Blob:
"""Get the icon blob from the GCS bucket.
Args:
bucket (storage.Bucket): The GCS bucket.
metadata_entry (dict): The metadata entry.
Returns:
storage.Blob: The icon blob.
"""
connector_docker_repository = metadata_entry["dockerRepository"]
icon_file_path = f"{METADATA_FOLDER}/{connector_docker_repository}/latest/{ICON_FILE_NAME}"
try:
logger.info(f"Getting icon blob for {connector_docker_repository}")
gcs_client = get_gcs_storage_client(gcs_creds=os.environ.get("GCS_CREDENTIALS"))
bucket = gcs_client.bucket(bucket_name)
icon_blob = bucket.blob(icon_file_path)
if not icon_blob.exists():
raise ValueError(f"Icon file not found for `{connector_docker_repository}`")
except Exception as e:
logger.exception(f"Error getting icon blob")
raise
return icon_blob
def _get_connector_type_from_registry_entry(registry_entry: dict) -> TaggedRegistryEntry:
"""Get the connector type from the registry entry.
Args:
registry_entry (dict): The registry entry.
Returns:
TaggedRegistryEntry: The connector type and model.
"""
if registry_entry.get(ConnectorTypePrimaryKey.SOURCE.value):
return (ConnectorTypes.SOURCE, ConnectorRegistrySourceDefinition)
elif registry_entry.get(ConnectorTypePrimaryKey.DESTINATION.value):
return (ConnectorTypes.DESTINATION, ConnectorRegistryDestinationDefinition)
else:
raise Exception("Could not determine connector type from registry entry")
def _get_registry_blob_information(
metadata_dict: dict, registry_type: str, metadata_data_with_overrides: dict, is_prerelease: bool
) -> List[RegistryEntryInfo]:
"""
Builds information for each registry blob: GCS path to write the registry entry into, along with
the correct "metadata blob path" for the entry.
Args:
metadata_dict (dict): The metadata dictionary.
registry_type (str): The registry type.
Returns:
List[RegistryEntryInfo]: Tuples of the path to write the registry entry to, and the metadata blob path to populate into the entry.
"""
docker_repository = metadata_dict["data"]["dockerRepository"]
registry_entry_paths: List[RegistryEntryInfo] = []
# The versioned registry entries always respect the registry overrides.
# For example, with destination-postgres: we'll push oss.json to `destination-postgres/<version>/oss.json`,
# but cloud.json goes to `destination-postgres-strict-encrypt/<version>/cloud.json`.
versioned_registry_entry_path = f"{METADATA_FOLDER}/{metadata_data_with_overrides['dockerRepository']}/{metadata_data_with_overrides['dockerImageTag']}/{registry_type}.json"
# However, the metadata blob path uses the non-overridden docker repo, for... reasons?
versioned_metadata_blob_path = f"{METADATA_FOLDER}/{docker_repository}/{metadata_data_with_overrides['dockerImageTag']}/metadata.yaml"
# We always publish the versioned registry entry.
registry_entry_paths.append(RegistryEntryInfo(versioned_registry_entry_path, versioned_metadata_blob_path))
# If we're not doing a prerelease, we have an extra file to push.
# Note that for these extra files, we point at a different metadata.yaml than the versioned file
# (e.g. the `latest` registry entry points at the `latest` metadata.yaml, instead of at the versioned metadata.yaml)
if not is_prerelease:
if "-rc" in metadata_dict["data"]["dockerImageTag"]:
# We're doing a release candidate publish. Push the RC registry entry.
# This intentionally uses the non-overridden docker_repository. We _always_ upload both cloud+oss registry entries
# to the non-overridden docker_repository path for the `release_candidate` entry.
release_candidate_registry_entry_path = f"{METADATA_FOLDER}/{docker_repository}/release_candidate/{registry_type}.json"
release_candidate_metadata_blob_path = f"{METADATA_FOLDER}/{docker_repository}/release_candidate/metadata.yaml"
registry_entry_paths.append(RegistryEntryInfo(release_candidate_registry_entry_path, release_candidate_metadata_blob_path))
else:
# This is a normal publish. Push the `latest` registry entry.
# This intentionally uses the non-overridden docker_repository. We _always_ upload both cloud+oss registry entries
# to the non-overridden docker_repository path for the `latest` entry.
latest_registry_entry_path = f"{METADATA_FOLDER}/{docker_repository}/latest/{registry_type}.json"
latest_registry_metadata_blob_path = f"{METADATA_FOLDER}/{docker_repository}/latest/metadata.yaml"
registry_entry_paths.append(RegistryEntryInfo(latest_registry_entry_path, latest_registry_metadata_blob_path))
return registry_entry_paths
@sentry_sdk.trace
def _persist_connector_registry_entry(bucket_name: str, registry_entry: PolymorphicRegistryEntry, registry_entry_path: str) -> None:
"""Persist the connector registry entry to the GCS bucket.
Args:
bucket_name (str): The name of the GCS bucket.
registry_entry (PolymorphicRegistryEntry): The registry entry.
registry_entry_path (str): The path to the registry entry.
"""
try:
logger.info(f"Persisting connector registry entry to {registry_entry_path}")
gcs_client = get_gcs_storage_client()
bucket = gcs_client.bucket(bucket_name)
registry_entry_blob = bucket.blob(registry_entry_path)
registry_entry_blob.upload_from_string(registry_entry.json(exclude_none=True))
except Exception as e:
logger.exception(f"Error persisting connector registry entry")
raise
@sentry_sdk.trace
def generate_and_persist_registry_entry(
bucket_name: str, repo_metadata_file_path: pathlib.Path, registry_type: str, docker_image_tag: str, is_prerelease: bool
) -> None:
"""Generate and persist the connector registry entry to the GCS bucket.
Args:
bucket_name (str): The name of the GCS bucket.
repo_metadata_file_path (pathlib.Path): The path to the spec file.
registry_type (str): The registry type.
docker_image_tag (str): The docker image tag associated with this release. Typically a semver string (e.g. '1.2.3'), possibly with a suffix (e.g. '1.2.3-dev.abcde12345')
is_prerelease (bool): Whether this is a prerelease, or a main release.
"""
# Read the repo metadata dict to bootstrap ourselves. We need the docker repository,
# so that we can read the metadata from GCS.
repo_metadata_dict = _get_and_parse_yaml_file(repo_metadata_file_path)
docker_repository = repo_metadata_dict["data"]["dockerRepository"]
try:
# Now that we have the docker repo, read the appropriate versioned metadata from GCS.
# This metadata will differ in a few fields (e.g. in prerelease mode, dockerImageTag will contain the actual prerelease tag `1.2.3-dev.abcde12345`),
# so we'll treat this as the source of truth (ish. See below for how we handle the registryOverrides field.)
gcs_client = get_gcs_storage_client(gcs_creds=os.environ.get("GCS_CREDENTIALS"))
bucket = gcs_client.bucket(bucket_name)
metadata_blob = bucket.blob(f"{METADATA_FOLDER}/{docker_repository}/{docker_image_tag}/{METADATA_FILE_NAME}")
# bucket.blob() returns a partially-loaded blob.
# reload() asks GCS to fetch the rest of the information.
# (this doesn't fetch the _contents_ of the blob, only its metadata - modified time, etc.)
metadata_blob.reload()
metadata_dict = yaml.safe_load(metadata_blob.download_as_string())
except:
logger.exception("Error loading metadata from GCS")
message = f"*🤖 🔴 _Registry Entry Generation_ FAILED*:\nRegistry Entry: `{registry_type}.json`\nConnector: `{repo_metadata_dict['data']['dockerRepository']}`\nGCS Bucket: `{bucket_name}`."
send_slack_message(PUBLISH_UPDATE_CHANNEL, message)
raise
message = f"*🤖 🟡 _Registry Entry Generation_ STARTED*:\nRegistry Entry: `{registry_type}.json`\nConnector: `{docker_repository}`\nGCS Bucket: `{bucket_name}`."
send_slack_message(PUBLISH_UPDATE_CHANNEL, message)
# If the connector is not enabled on the given registry, skip generateing and persisting the registry entry.
if metadata_dict["data"]["registryOverrides"][registry_type]["enabled"]:
metadata_data = metadata_dict["data"]
try:
overridden_metadata_data = _apply_metadata_overrides(metadata_data, registry_type, bucket_name, metadata_blob)
except Exception as e:
logger.exception(f"Error applying metadata overrides")
message = f"*🤖 🔴 _Registry Entry Generation_ FAILED*:\nRegistry Entry: `{registry_type}.json`\nConnector: `{metadata_data['dockerRepository']}`\nGCS Bucket: `{bucket_name}`."
send_slack_message(PUBLISH_UPDATE_CHANNEL, message)
raise
registry_entry_blob_paths = _get_registry_blob_information(metadata_dict, registry_type, overridden_metadata_data, is_prerelease)
logger.info("Parsing spec file.")
spec_cache = SpecCache()
# Use the overridden values here. This enables us to read from the appropriate spec cache for strict-encrypt connectors.
cached_spec = spec_cache.find_spec_cache_with_fallback(
overridden_metadata_data["dockerRepository"], overridden_metadata_data["dockerImageTag"], registry_type
)
overridden_metadata_data["spec"] = spec_cache.download_spec(cached_spec)
logger.info("Spec file parsed and added to metadata.")
logger.info("Parsing registry entry model.")
_, RegistryEntryModel = _get_connector_type_from_registry_entry(overridden_metadata_data)
registry_entry_model = RegistryEntryModel.parse_obj(overridden_metadata_data)
logger.info("Registry entry model parsed.")
# Persist the registry entry to the GCS bucket.
for registry_entry_info in registry_entry_blob_paths:
registry_entry_blob_path = registry_entry_info.entry_blob_path
metadata_blob_path = registry_entry_info.metadata_file_path
try:
logger.info(
f"Persisting `{metadata_data['dockerRepository']}` {registry_type} registry entry to `{registry_entry_blob_path}`"
)
# set the correct metadata blob path on the registry entry
registry_entry_model = copy.deepcopy(registry_entry_model)
generated_fields: Optional[GeneratedFields] = registry_entry_model.generated
if generated_fields is not None:
source_file_info: Optional[SourceFileInfo] = generated_fields.source_file_info
if source_file_info is not None:
source_file_info.metadata_file_path = metadata_blob_path
_persist_connector_registry_entry(bucket_name, registry_entry_model, registry_entry_blob_path)
message = f"*🤖 🟢 _Registry Entry Generation_ SUCCESS*:\nRegistry Entry: `{registry_type}.json`\nConnector: `{metadata_data['dockerRepository']}`\nGCS Bucket: `{bucket_name}`\nPath: `{registry_entry_blob_path}`."
send_slack_message(PUBLISH_UPDATE_CHANNEL, message)
logger.info("Success.")
except Exception as e:
logger.exception(f"Error persisting connector registry entry to")
message = f"*🤖 🔴 _Registry Entry Generation_ FAILED*:\nRegistry Entry: `{registry_type}.json`\nConnector: `{metadata_data['dockerRepository']}`\nGCS Bucket: `{bucket_name}`\nPath: `{registry_entry_blob_path}`."
send_slack_message(PUBLISH_UPDATE_CHANNEL, message)
try:
bucket.delete_blob(registry_entry_blob_path)
except Exception as cleanup_error:
logger.warning(f"Failed to clean up {registry_entry_blob_path}: {cleanup_error}")
raise
else:
logger.info(
f"Registry type {registry_type} is not enabled for `{metadata_dict['data']['dockerRepository']}`, skipping generation and upload."
)
message = f"*🤖 ⚫ _Registry Entry Generation_ NOOP*:\n_Note: Connector is not enabled on {registry_type} registry. No action required._\nRegistry Entry: `{registry_type}.json`\nConnector: `{metadata_dict['data']['dockerRepository']}`\nGCS Bucket: `{bucket_name}`."
send_slack_message(PUBLISH_UPDATE_CHANNEL, message)
# For latest versions that are disabled, delete any existing registry entry to remove it from the registry
if (
"-rc" not in metadata_dict["data"]["dockerImageTag"] and "-dev" not in metadata_dict["data"]["dockerImageTag"]
) and not metadata_dict["data"]["registryOverrides"][registry_type]["enabled"]:
logger.info(
f"{registry_type} is not enabled: deleting existing {registry_type} registry entry for {metadata_dict['data']['dockerRepository']} at latest path."
)
latest_registry_entry_path = f"{METADATA_FOLDER}/{metadata_dict['data']['dockerRepository']}/latest/{registry_type}.json"
existing_registry_entry = bucket.blob(latest_registry_entry_path)
if existing_registry_entry.exists():
bucket.delete_blob(latest_registry_entry_path)
| RegistryEntryInfo |
python | getsentry__sentry | src/sentry/backup/services/import_export/model.py | {
"start": 5254,
"end": 5748
} | class ____(str, Enum):
Unknown = "Unknown"
DatabaseError = "DatabaseError"
DeserializationFailed = "DeserializationFailed"
IncorrectSiloModeForModel = "IncorrectSiloModeForModel"
IntegrityError = "IntegrityError"
InvalidMinOrdinal = "InvalidMinOrdinal"
MissingImportUUID = "MissingImportUUID"
UnknownModel = "UnknownModel"
UnexpectedModel = "UnexpectedModel"
UnspecifiedScope = "UnspecifiedScope"
ValidationError = "ValidationError"
| RpcImportErrorKind |
python | ansible__ansible | packaging/cli-doc/build.py | {
"start": 8001,
"end": 9968
} | class ____:
"""Documentation for an action."""
desc: str | None
options: tuple[str, ...]
arg: str | None
def get_action_docs(parser: argparse.ArgumentParser) -> list[ActionDoc]:
"""Get action documentation from the given argument parser."""
action_docs = []
# noinspection PyProtectedMember
for action in parser._actions:
if action.help == argparse.SUPPRESS:
continue
# noinspection PyProtectedMember, PyUnresolvedReferences
args = action.dest.upper() if isinstance(action, argparse._StoreAction) else None
if args or action.option_strings:
action_docs.append(
ActionDoc(
desc=action.help,
options=tuple(action.option_strings),
arg=args,
)
)
return action_docs
def trim_docstring(docstring: str | None) -> str:
"""Trim and return the given docstring using the implementation from https://peps.python.org/pep-0257/#handling-docstring-indentation."""
if not docstring:
return '' # pragma: nocover
# Convert tabs to spaces (following the normal Python rules) and split into a list of lines
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count)
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special)
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string
return '\n'.join(trimmed)
if __name__ == '__main__':
main()
| ActionDoc |
python | lepture__authlib | authlib/oauth1/rfc5849/errors.py | {
"start": 1082,
"end": 1166
} | class ____(OAuth1Error):
error = "unsupported_parameter"
| UnsupportedParameterError |
python | scrapy__scrapy | tests/test_downloader_handlers.py | {
"start": 10584,
"end": 13344
} | class ____:
def setup_method(self):
crawler = get_crawler()
self.download_handler = build_from_crawler(DataURIDownloadHandler, crawler)
async def download_request(self, request: Request) -> Response:
return await maybe_deferred_to_future(
self.download_handler.download_request(request, DefaultSpider())
)
@deferred_f_from_coro_f
async def test_response_attrs(self):
uri = "data:,A%20brief%20note"
request = Request(uri)
response = await self.download_request(request)
assert response.url == uri
assert not response.headers
@deferred_f_from_coro_f
async def test_default_mediatype_encoding(self):
request = Request("data:,A%20brief%20note")
response = await self.download_request(request)
assert response.text == "A brief note"
assert type(response) is responsetypes.from_mimetype("text/plain") # pylint: disable=unidiomatic-typecheck
assert response.encoding == "US-ASCII"
@deferred_f_from_coro_f
async def test_default_mediatype(self):
request = Request("data:;charset=iso-8859-7,%be%d3%be")
response = await self.download_request(request)
assert response.text == "\u038e\u03a3\u038e"
assert type(response) is responsetypes.from_mimetype("text/plain") # pylint: disable=unidiomatic-typecheck
assert response.encoding == "iso-8859-7"
@deferred_f_from_coro_f
async def test_text_charset(self):
request = Request("data:text/plain;charset=iso-8859-7,%be%d3%be")
response = await self.download_request(request)
assert response.text == "\u038e\u03a3\u038e"
assert response.body == b"\xbe\xd3\xbe"
assert response.encoding == "iso-8859-7"
@deferred_f_from_coro_f
async def test_mediatype_parameters(self):
request = Request(
"data:text/plain;foo=%22foo;bar%5C%22%22;"
"charset=utf-8;bar=%22foo;%5C%22 foo ;/,%22"
",%CE%8E%CE%A3%CE%8E"
)
response = await self.download_request(request)
assert response.text == "\u038e\u03a3\u038e"
assert type(response) is responsetypes.from_mimetype("text/plain") # pylint: disable=unidiomatic-typecheck
assert response.encoding == "utf-8"
@deferred_f_from_coro_f
async def test_base64(self):
request = Request("data:text/plain;base64,SGVsbG8sIHdvcmxkLg%3D%3D")
response = await self.download_request(request)
assert response.text == "Hello, world."
@deferred_f_from_coro_f
async def test_protocol(self):
request = Request("data:,")
response = await self.download_request(request)
assert response.protocol is None
| TestDataURI |
python | huggingface__transformers | src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | {
"start": 20228,
"end": 20998
} | class ____(PreTrainedModel):
config: MobileViTV2Config
base_model_prefix = "mobilevitv2"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
_no_split_modules = ["MobileViTV2Layer"]
@torch.no_grad()
def _init_weights(self, module: nn.Module) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.GroupNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
@auto_docstring
| MobileViTV2PreTrainedModel |
python | pandas-dev__pandas | pandas/tests/io/json/test_json_table_schema.py | {
"start": 23684,
"end": 31436
} | class ____:
@pytest.mark.parametrize(
"index_nm",
[None, "idx", pytest.param("index", marks=pytest.mark.xfail), "level_0"],
)
@pytest.mark.parametrize(
"vals",
[
{"ints": [1, 2, 3, 4]},
{"objects": ["a", "b", "c", "d"]},
{"objects": ["1", "2", "3", "4"]},
{
"date_ranges": pd.date_range(
"2016-01-01", freq="D", periods=4, unit="ns"
)
},
{"categoricals": pd.Series(pd.Categorical(["a", "b", "c", "c"]))},
{
"ordered_cats": pd.Series(
pd.Categorical(["a", "b", "c", "c"], ordered=True)
)
},
{"floats": [1.0, 2.0, 3.0, 4.0]},
{"floats": [1.1, 2.2, 3.3, 4.4]},
{"bools": [True, False, False, True]},
{
"timezones": pd.date_range(
"2016-01-01", freq="D", periods=4, tz="US/Central", unit="ns"
) # added in # GH 35973
},
],
)
def test_read_json_table_orient(self, index_nm, vals):
df = DataFrame(vals, index=pd.Index(range(4), name=index_nm))
out = StringIO(df.to_json(orient="table"))
result = pd.read_json(out, orient="table")
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize(
"index_nm",
[
None,
"idx",
pytest.param(
"index",
marks=pytest.mark.filterwarnings("ignore:Index name:UserWarning"),
),
],
)
def test_read_json_table_orient_raises(self, index_nm):
vals = {"timedeltas": pd.timedelta_range("1h", periods=4, freq="min")}
df = DataFrame(vals, index=pd.Index(range(4), name=index_nm))
out = StringIO(df.to_json(orient="table"))
with pytest.raises(NotImplementedError, match="can not yet read "):
pd.read_json(out, orient="table")
@pytest.mark.parametrize(
"index_nm",
[None, "idx", pytest.param("index", marks=pytest.mark.xfail), "level_0"],
)
@pytest.mark.parametrize(
"vals",
[
{"ints": [1, 2, 3, 4]},
{"objects": ["a", "b", "c", "d"]},
{"objects": ["1", "2", "3", "4"]},
{
"date_ranges": pd.date_range(
"2016-01-01", freq="D", periods=4, unit="ns"
)
},
{"categoricals": pd.Series(pd.Categorical(["a", "b", "c", "c"]))},
{
"ordered_cats": pd.Series(
pd.Categorical(["a", "b", "c", "c"], ordered=True)
)
},
{"floats": [1.0, 2.0, 3.0, 4.0]},
{"floats": [1.1, 2.2, 3.3, 4.4]},
{"bools": [True, False, False, True]},
{
"timezones": pd.date_range(
"2016-01-01", freq="D", periods=4, tz="US/Central", unit="ns"
) # added in # GH 35973
},
],
)
def test_read_json_table_period_orient(self, index_nm, vals):
df = DataFrame(
vals,
index=pd.Index(
(pd.Period(f"2022Q{q}") for q in range(1, 5)), name=index_nm
),
)
out = StringIO(df.to_json(orient="table"))
result = pd.read_json(out, orient="table")
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize(
"idx",
[
pd.Index(range(4)),
pd.date_range(
"2020-08-30",
freq="D",
periods=4,
unit="ns",
)._with_freq(None),
pd.date_range(
"2020-08-30", freq="D", periods=4, tz="US/Central", unit="ns"
)._with_freq(None),
pd.MultiIndex.from_product(
[
pd.date_range(
"2020-08-30", freq="D", periods=2, tz="US/Central", unit="ns"
),
["x", "y"],
],
),
],
)
@pytest.mark.parametrize(
"vals",
[
{"floats": [1.1, 2.2, 3.3, 4.4]},
{"dates": pd.date_range("2020-08-30", freq="D", periods=4, unit="ns")},
{
"timezones": pd.date_range(
"2020-08-30", freq="D", periods=4, tz="Europe/London", unit="ns"
)
},
],
)
def test_read_json_table_timezones_orient(self, idx, vals):
# GH 35973
df = DataFrame(vals, index=idx)
out = StringIO(df.to_json(orient="table"))
result = pd.read_json(out, orient="table")
tm.assert_frame_equal(df, result)
def test_comprehensive(self):
df = DataFrame(
{
"A": [1, 2, 3, 4],
"B": ["a", "b", "c", "c"],
"C": pd.date_range("2016-01-01", freq="D", periods=4, unit="ns"),
# 'D': pd.timedelta_range('1h', periods=4, freq='min'),
"E": pd.Series(pd.Categorical(["a", "b", "c", "c"])),
"F": pd.Series(pd.Categorical(["a", "b", "c", "c"], ordered=True)),
"G": [1.1, 2.2, 3.3, 4.4],
"H": pd.date_range(
"2016-01-01", freq="D", periods=4, tz="US/Central", unit="ns"
),
"I": [True, False, False, True],
},
index=pd.Index(range(4), name="idx"),
)
out = StringIO(df.to_json(orient="table"))
result = pd.read_json(out, orient="table")
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize(
"index_names",
[[None, None], ["foo", "bar"], ["foo", None], [None, "foo"], ["index", "foo"]],
)
def test_multiindex(self, index_names):
# GH 18912
df = DataFrame(
[["Arr", "alpha", [1, 2, 3, 4]], ["Bee", "Beta", [10, 20, 30, 40]]],
index=[["A", "B"], ["Null", "Eins"]],
columns=["Aussprache", "Griechisch", "Args"],
)
df.index.names = index_names
out = StringIO(df.to_json(orient="table"))
result = pd.read_json(out, orient="table")
tm.assert_frame_equal(df, result)
def test_empty_frame_roundtrip(self):
# GH 21287
df = DataFrame(columns=["a", "b", "c"])
expected = df.copy()
out = StringIO(df.to_json(orient="table"))
result = pd.read_json(out, orient="table")
tm.assert_frame_equal(expected, result)
def test_read_json_orient_table_old_schema_version(self):
df_json = """
{
"schema":{
"fields":[
{"name":"index","type":"integer"},
{"name":"a","type":"string"}
],
"primaryKey":["index"],
"pandas_version":"0.20.0"
},
"data":[
{"index":0,"a":1},
{"index":1,"a":2.0},
{"index":2,"a":"s"}
]
}
"""
expected = DataFrame({"a": [1, 2.0, "s"]})
result = pd.read_json(StringIO(df_json), orient="table")
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize("freq", ["M", "2M", "Q", "2Q", "Y", "2Y"])
def test_read_json_table_orient_period_depr_freq(self, freq):
# GH#9586
df = DataFrame(
{"ints": [1, 2]},
index=pd.PeriodIndex(["2020-01", "2021-06"], freq=freq),
)
out = StringIO(df.to_json(orient="table"))
result = pd.read_json(out, orient="table")
tm.assert_frame_equal(df, result)
| TestTableOrientReader |
python | getsentry__sentry | src/sentry/integrations/jira/views/sentry_issue_details.py | {
"start": 3393,
"end": 6856
} | class ____(JiraSentryUIBaseView):
"""
Handles requests (from the Sentry integration in Jira) for HTML to display when you
click on "Sentry -> Linked Issues" in the RH sidebar of an issue in the Jira UI.
"""
html_file = "sentry/integrations/jira-issue.html"
def handle_groups(self, groups: QuerySet[Group]) -> Response:
response_context = {"groups": [build_context(group) for group in groups]}
logger.info(
"issue_hook.response",
extra={"issue_count": len(groups)},
)
return self.get_response(response_context)
def dispatch(self, request: HttpRequest, *args, **kwargs) -> HttpResponseBase:
try:
return super().dispatch(request, *args, **kwargs)
except ApiError as exc:
# Sometime set_badge() will fail to connect.
response_option = handle_jira_api_error(exc, " to set badge")
if response_option:
return self.get_response(response_option)
raise
def get(self, request: Request, issue_key, *args, **kwargs) -> Response:
scope = sentry_sdk.get_isolation_scope()
try:
integration = get_integration_from_request(request, "jira")
except AtlassianConnectValidationError as e:
scope.set_tag("failure", "AtlassianConnectValidationError")
logger.info(
"issue_hook.validation_error",
extra={
"issue_key": issue_key,
"error": str(e),
},
)
return self.get_response({"error_message": UNABLE_TO_VERIFY_INSTALLATION})
except ExpiredSignatureError:
scope.set_tag("failure", "ExpiredSignatureError")
return self.get_response({"refresh_required": True})
try:
external_issue = ExternalIssue.objects.get(integration_id=integration.id, key=issue_key)
organization = Organization.objects.get(id=external_issue.organization_id)
if (
integration_service.get_organization_integration(
organization_id=external_issue.organization_id,
integration_id=integration.id,
)
is None
):
set_badge(integration, issue_key, 0)
return self.get_response({"issue_not_linked": True})
groups = Group.objects.get_groups_by_external_issue(
integration=integration,
organizations=[organization],
external_issue_key=issue_key,
)
except (
ExternalIssue.DoesNotExist,
# Multiple ExternalIssues are returned if organizations share one integration.
# Since we cannot identify the organization from the request alone, for now, we just
# avoid crashing on the MultipleObjectsReturned error.
ExternalIssue.MultipleObjectsReturned,
) as e:
scope.set_tag("failure", e.__class__.__name__)
set_badge(integration, issue_key, 0)
return self.get_response({"issue_not_linked": True})
scope.set_tag("organization.slug", organization.slug)
response = self.handle_groups(groups)
scope.set_tag("status_code", response.status_code)
set_badge(integration, issue_key, len(groups))
return response
@control_silo_view
| JiraSentryIssueDetailsView |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 22589,
"end": 23309
} | class ____:
def test_format_timeframe(self):
assert self.locale._format_timeframe("hours", 2) == "2 മണിക്കൂർ"
assert self.locale._format_timeframe("hour", 0) == "ഒരു മണിക്കൂർ"
def test_format_relative_now(self):
result = self.locale._format_relative("ഇപ്പോൾ", "now", 0)
assert result == "ഇപ്പോൾ"
def test_format_relative_past(self):
result = self.locale._format_relative("ഒരു മണിക്കൂർ", "hour", 1)
assert result == "ഒരു മണിക്കൂർ ശേഷം"
def test_format_relative_future(self):
result = self.locale._format_relative("ഒരു മണിക്കൂർ", "hour", -1)
assert result == "ഒരു മണിക്കൂർ മുമ്പ്"
@pytest.mark.usefixtures("lang_locale")
| TestMalayalamLocale |
python | django__django | django/template/utils.py | {
"start": 368,
"end": 3571
} | class ____:
def __init__(self, templates=None):
"""
templates is an optional list of template engine definitions
(structured like settings.TEMPLATES).
"""
self._templates = templates
self._engines = {}
@cached_property
def templates(self):
if self._templates is None:
self._templates = settings.TEMPLATES
templates = {}
backend_names = []
for tpl in self._templates:
try:
# This will raise an exception if 'BACKEND' doesn't exist or
# isn't a string containing at least one dot.
default_name = tpl["BACKEND"].rsplit(".", 2)[-2]
except Exception:
invalid_backend = tpl.get("BACKEND", "<not defined>")
raise ImproperlyConfigured(
"Invalid BACKEND for a template engine: {}. Check "
"your TEMPLATES setting.".format(invalid_backend)
)
tpl = {
"NAME": default_name,
"DIRS": [],
"APP_DIRS": False,
"OPTIONS": {},
**tpl,
}
templates[tpl["NAME"]] = tpl
backend_names.append(tpl["NAME"])
counts = Counter(backend_names)
duplicates = [alias for alias, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Template engine aliases aren't unique, duplicates: {}. "
"Set a unique NAME for each engine in settings.TEMPLATES.".format(
", ".join(duplicates)
)
)
return templates
def __getitem__(self, alias):
try:
return self._engines[alias]
except KeyError:
try:
params = self.templates[alias]
except KeyError:
raise InvalidTemplateEngineError(
"Could not find config for '{}' "
"in settings.TEMPLATES".format(alias)
)
# If importing or initializing the backend raises an exception,
# self._engines[alias] isn't set and this code may get executed
# again, so we must preserve the original params. See #24265.
params = params.copy()
backend = params.pop("BACKEND")
engine_cls = import_string(backend)
engine = engine_cls(params)
self._engines[alias] = engine
return engine
def __iter__(self):
return iter(self.templates)
def all(self):
return [self[alias] for alias in self]
@functools.lru_cache
def get_app_template_dirs(dirname):
"""
Return an iterable of paths of directories to load app templates from.
dirname is the name of the subdirectory containing templates inside
installed applications.
"""
# Immutable return value because it will be cached and shared by callers.
return tuple(
path
for app_config in apps.get_app_configs()
if app_config.path and (path := Path(app_config.path) / dirname).is_dir()
)
| EngineHandler |
python | pytorch__pytorch | torch/_inductor/cpu_vec_isa.py | {
"start": 5412,
"end": 6024
} | class ____(VecISA):
_bit_width = 128 # This is required to leverage the compute implemented in aten/src/ATen/cpu/vec/vec128/vec128_float_neon.h
_macro = ["CPU_CAPABILITY_NEON", "AT_BUILD_ARM_VEC256_WITH_SLEEF"]
_arch_flags = "" # Unused
_dtype_nelements = {torch.float: 4, torch.bfloat16: 8, torch.float16: 8}
def __str__(self) -> str:
if config.is_fbcode():
return "neon"
return "asimd" # detects the presence of advanced SIMD on armv8-a kernels
__hash__: Callable[[VecISA], Any] = VecISA.__hash__ # type: ignore[assignment]
@dataclasses.dataclass
| VecNEON |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/app_testing/tutorial001/main.py | {
"start": 165,
"end": 305
} | class ____(SQLModel):
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
| HeroBase |
python | numba__numba | numba/tests/test_dictobject.py | {
"start": 782,
"end": 28300
} | class ____(MemoryLeakMixin, TestCase):
def test_dict_bool(self):
"""
Exercise bool(dict)
"""
@njit
def foo(n):
d = dictobject.new_dict(int32, float32)
for i in range(n):
d[i] = i + 1
return bool(d)
# Insert nothing
self.assertEqual(foo(n=0), False)
# Insert 1 entry
self.assertEqual(foo(n=1), True)
# Insert 2 entries
self.assertEqual(foo(n=2), True)
# Insert 100 entries
self.assertEqual(foo(n=100), True)
def test_dict_create(self):
"""
Exercise dictionary creation, insertion and len
"""
@njit
def foo(n):
d = dictobject.new_dict(int32, float32)
for i in range(n):
d[i] = i + 1
return len(d)
# Insert nothing
self.assertEqual(foo(n=0), 0)
# Insert 1 entry
self.assertEqual(foo(n=1), 1)
# Insert 2 entries
self.assertEqual(foo(n=2), 2)
# Insert 100 entries
self.assertEqual(foo(n=100), 100)
def test_dict_get(self):
"""
Exercise dictionary creation, insertion and get
"""
@njit
def foo(n, targets):
d = dictobject.new_dict(int32, float64)
# insertion loop
for i in range(n):
d[i] = i
# retrieval loop
output = []
for t in targets:
output.append(d.get(t))
return output
self.assertEqual(foo(5, [0, 1, 9]), [0, 1, None])
self.assertEqual(foo(10, [0, 1, 9]), [0, 1, 9])
self.assertEqual(foo(10, [-1, 9, 1]), [None, 9, 1])
def test_dict_get_with_default(self):
"""
Exercise dict.get(k, d) where d is set
"""
@njit
def foo(n, target, default):
d = dictobject.new_dict(int32, float64)
# insertion loop
for i in range(n):
d[i] = i
# retrieval loop
return d.get(target, default)
self.assertEqual(foo(5, 3, -1), 3)
self.assertEqual(foo(5, 5, -1), -1)
def test_dict_getitem(self):
"""
Exercise dictionary __getitem__
"""
@njit
def foo(keys, vals, target):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
# lookup
return d[target]
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(foo(keys, vals, 1), 0.1)
self.assertEqual(foo(keys, vals, 2), 0.2)
self.assertEqual(foo(keys, vals, 3), 0.3)
# check no leak so far
self.assert_no_memory_leak()
# disable leak check for exception test
self.disable_leak_check()
with self.assertRaisesRegex(KeyError, "0"):
foo(keys, vals, 0)
with self.assertRaisesRegex(KeyError, "4"):
foo(keys, vals, 4)
def test_dict_popitem(self):
"""
Exercise dictionary .popitem
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
# popitem
return d.popitem()
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
for i in range(1, len(keys)):
self.assertEqual(
foo(keys[:i], vals[:i]),
(keys[i - 1], vals[i - 1]),
)
def test_dict_popitem_many(self):
"""
Exercise dictionary .popitem
"""
@njit
def core(d, npop):
# popitem
keysum, valsum = 0, 0
for _ in range(npop):
k, v = d.popitem()
keysum += k
valsum -= v
return keysum, valsum
@njit
def foo(keys, vals, npop):
d = dictobject.new_dict(int32, int32)
# insertion
for k, v in zip(keys, vals):
d[k] = v
return core(d, npop)
keys = [1, 2, 3]
vals = [10, 20, 30]
for i in range(len(keys)):
self.assertEqual(
foo(keys, vals, npop=3),
core.py_func(dict(zip(keys, vals)), npop=3),
)
# check no leak so far
self.assert_no_memory_leak()
# disable leak check for exception test
self.disable_leak_check()
with self.assertRaises(KeyError):
foo(keys, vals, npop=4)
def test_dict_pop(self):
"""
Exercise dictionary .pop
"""
@njit
def foo(keys, vals, target):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
# popitem
return d.pop(target, None), len(d)
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(foo(keys, vals, 1), (0.1, 2))
self.assertEqual(foo(keys, vals, 2), (0.2, 2))
self.assertEqual(foo(keys, vals, 3), (0.3, 2))
self.assertEqual(foo(keys, vals, 0), (None, 3))
# check no leak so far
self.assert_no_memory_leak()
# disable leak check for exception test
self.disable_leak_check()
@njit
def foo():
d = dictobject.new_dict(int32, float64)
# popitem
return d.pop(0)
with self.assertRaises(KeyError):
foo()
def test_dict_pop_many(self):
"""
Exercise dictionary .pop
"""
@njit
def core(d, pops):
total = 0
for k in pops:
total += k + d.pop(k, 0.123) + len(d)
total *= 2
return total
@njit
def foo(keys, vals, pops):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
# popitem
return core(d, pops)
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
pops = [2, 3, 3, 1, 0, 2, 1, 0, -1]
self.assertEqual(
foo(keys, vals, pops),
core.py_func(dict(zip(keys, vals)), pops),
)
def test_dict_delitem(self):
@njit
def foo(keys, vals, target):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
del d[target]
return len(d), d.get(target)
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(foo(keys, vals, 1), (2, None))
self.assertEqual(foo(keys, vals, 2), (2, None))
self.assertEqual(foo(keys, vals, 3), (2, None))
# check no leak so far
self.assert_no_memory_leak()
# disable leak check for exception test
self.disable_leak_check()
with self.assertRaises(KeyError):
foo(keys, vals, 0)
def test_dict_clear(self):
"""
Exercise dict.clear
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
b4 = len(d)
# clear
d.clear()
return b4, len(d)
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(foo(keys, vals), (3, 0))
def test_dict_items(self):
"""
Exercise dict.items
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
out = []
for kv in d.items():
out.append(kv)
return out
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(
foo(keys, vals),
list(zip(keys, vals)),
)
# Test .items() on empty dict
@njit
def foo():
d = dictobject.new_dict(int32, float64)
out = []
for kv in d.items():
out.append(kv)
return out
self.assertEqual(foo(), [])
def test_dict_keys(self):
"""
Exercise dict.keys
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
out = []
for k in d.keys():
out.append(k)
return out
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(
foo(keys, vals),
keys,
)
def test_dict_keys_len(self):
"""
Exercise len(dict.keys())
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
return len(d.keys())
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(
foo(keys, vals),
len(keys),
)
def test_dict_values(self):
"""
Exercise dict.values
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
out = []
for v in d.values():
out.append(v)
return out
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(
foo(keys, vals),
vals,
)
def test_dict_values_len(self):
"""
Exercise len(dict.values())
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
return len(d.values())
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(
foo(keys, vals),
len(vals),
)
def test_dict_items_len(self):
"""
Exercise len(dict.items())
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
return len(d.items())
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertPreciseEqual(
foo(keys, vals),
len(vals),
)
def test_dict_iter(self):
"""
Exercise iter(dict)
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
out = []
for k in d:
out.append(k)
return out
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(
foo(keys, vals),
[1, 2, 3]
)
def test_dict_contains(self):
"""
Exercise operator.contains
"""
@njit
def foo(keys, vals, checklist):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
out = []
for k in checklist:
out.append(k in d)
return out
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(
foo(keys, vals, [2, 3, 4, 1, 0]),
[True, True, False, True, False],
)
def test_dict_copy(self):
"""
Exercise dict.copy
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
return list(d.copy().items())
keys = list(range(20))
vals = [x + i / 100 for i, x in enumerate(keys)]
out = foo(keys, vals)
self.assertEqual(out, list(zip(keys, vals)))
def test_dict_setdefault(self):
"""
Exercise dict.setdefault
"""
@njit
def foo():
d = dictobject.new_dict(int32, float64)
d.setdefault(1, 1.2) # used because key is not in
a = d.get(1)
d[1] = 2.3
b = d.get(1)
d[2] = 3.4
d.setdefault(2, 4.5) # not used because key is in
c = d.get(2)
return a, b, c
self.assertEqual(foo(), (1.2, 2.3, 3.4))
def test_dict_equality(self):
"""
Exercise dict.__eq__ and .__ne__
"""
@njit
def foo(na, nb, fa, fb):
da = dictobject.new_dict(int32, float64)
db = dictobject.new_dict(int32, float64)
for i in range(na):
da[i] = i * fa
for i in range(nb):
db[i] = i * fb
return da == db, da != db
# Same keys and values
self.assertEqual(foo(10, 10, 3, 3), (True, False))
# Same keys and diff values
self.assertEqual(foo(10, 10, 3, 3.1), (False, True))
# LHS has more keys
self.assertEqual(foo(11, 10, 3, 3), (False, True))
# RHS has more keys
self.assertEqual(foo(10, 11, 3, 3), (False, True))
def test_dict_equality_more(self):
"""
Exercise dict.__eq__
"""
@njit
def foo(ak, av, bk, bv):
# The key-value types are different in the two dictionaries
da = dictobject.new_dict(int32, float64)
db = dictobject.new_dict(int64, float32)
for i in range(len(ak)):
da[ak[i]] = av[i]
for i in range(len(bk)):
db[bk[i]] = bv[i]
return da == db
# Simple equal case
ak = [1, 2, 3]
av = [2, 3, 4]
bk = [1, 2, 3]
bv = [2, 3, 4]
self.assertTrue(foo(ak, av, bk, bv))
# Equal with replacement
ak = [1, 2, 3]
av = [2, 3, 4]
bk = [1, 2, 2, 3]
bv = [2, 1, 3, 4]
self.assertTrue(foo(ak, av, bk, bv))
# Diff values
ak = [1, 2, 3]
av = [2, 3, 4]
bk = [1, 2, 3]
bv = [2, 1, 4]
self.assertFalse(foo(ak, av, bk, bv))
# Diff keys
ak = [0, 2, 3]
av = [2, 3, 4]
bk = [1, 2, 3]
bv = [2, 3, 4]
self.assertFalse(foo(ak, av, bk, bv))
def test_dict_equality_diff_type(self):
"""
Exercise dict.__eq__
"""
@njit
def foo(na, b):
da = dictobject.new_dict(int32, float64)
for i in range(na):
da[i] = i
return da == b
# dict != int
self.assertFalse(foo(10, 1))
# dict != tuple[int]
self.assertFalse(foo(10, (1,)))
def test_dict_to_from_meminfo(self):
"""
Exercise dictobject.{_as_meminfo, _from_meminfo}
"""
@njit
def make_content(nelem):
for i in range(nelem):
yield i, i + (i + 1) / 100
@njit
def boxer(nelem):
d = dictobject.new_dict(int32, float64)
for k, v in make_content(nelem):
d[k] = v
return dictobject._as_meminfo(d)
dcttype = types.DictType(int32, float64)
@njit
def unboxer(mi):
d = dictobject._from_meminfo(mi, dcttype)
return list(d.items())
mi = boxer(10)
self.assertEqual(mi.refcount, 1)
got = unboxer(mi)
expected = list(make_content.py_func(10))
self.assertEqual(got, expected)
def test_001_cannot_downcast_key(self):
@njit
def foo(n):
d = dictobject.new_dict(int32, float64)
for i in range(n):
d[i] = i + 1
# bad key type
z = d.get(1j)
return z
with self.assertRaises(TypingError) as raises:
foo(10)
self.assertIn(
'cannot safely cast complex128 to int32',
str(raises.exception),
)
def test_002_cannot_downcast_default(self):
@njit
def foo(n):
d = dictobject.new_dict(int32, float64)
for i in range(n):
d[i] = i + 1
# bad default type
z = d.get(2 * n, 1j)
return z
with self.assertRaises(TypingError) as raises:
foo(10)
self.assertIn(
'cannot safely cast complex128 to float64',
str(raises.exception),
)
def test_003_cannot_downcast_key(self):
@njit
def foo(n):
d = dictobject.new_dict(int32, float64)
for i in range(n):
d[i] = i + 1
# bad cast!?
z = d.get(2.4)
return z
# should raise
with self.assertRaises(TypingError) as raises:
foo(10)
self.assertIn(
'cannot safely cast float64 to int32',
str(raises.exception),
)
def test_004_cannot_downcast_key(self):
@njit
def foo():
d = dictobject.new_dict(int32, float64)
# should raise TypingError
d[1j] = 7.
with self.assertRaises(TypingError) as raises:
foo()
self.assertIn(
'cannot safely cast complex128 to int32',
str(raises.exception),
)
def test_005_cannot_downcast_value(self):
@njit
def foo():
d = dictobject.new_dict(int32, float64)
# should raise TypingError
d[1] = 1j
with self.assertRaises(TypingError) as raises:
foo()
self.assertIn(
'cannot safely cast complex128 to float64',
str(raises.exception),
)
def test_006_cannot_downcast_key(self):
@njit
def foo():
d = dictobject.new_dict(int32, float64)
# raise TypingError
d[11.5]
with self.assertRaises(TypingError) as raises:
foo()
self.assertIn(
'cannot safely cast float64 to int32',
str(raises.exception),
)
@unittest.skipUnless(sys.maxsize > 2 ** 32, "64 bit test only")
def test_007_collision_checks(self):
# this checks collisions in real life for 64bit systems
@njit
def foo(v1, v2):
d = dictobject.new_dict(int64, float64)
c1 = np.uint64(2 ** 61 - 1)
c2 = np.uint64(0)
assert hash(c1) == hash(c2)
d[c1] = v1
d[c2] = v2
return (d[c1], d[c2])
a, b = 10., 20.
x, y = foo(a, b)
self.assertEqual(x, a)
self.assertEqual(y, b)
def test_008_lifo_popitem(self):
# check that (keys, vals) are LIFO .popitem()
@njit
def foo(n):
d = dictobject.new_dict(int32, float64)
for i in range(n):
d[i] = i + 1
keys = []
vals = []
for i in range(n):
tmp = d.popitem()
keys.append(tmp[0])
vals.append(tmp[1])
return keys, vals
z = 10
gk, gv = foo(z)
self.assertEqual(gk, [x for x in reversed(range(z))])
self.assertEqual(gv, [x + 1 for x in reversed(range(z))])
def test_010_cannot_downcast_default(self):
@njit
def foo():
d = dictobject.new_dict(int32, float64)
d[0] = 6.
d[1] = 7.
# pop'd default must have same type as value
d.pop(11, 12j)
with self.assertRaises(TypingError) as raises:
foo()
self.assertIn(
"cannot safely cast complex128 to float64",
str(raises.exception),
)
def test_011_cannot_downcast_key(self):
@njit
def foo():
d = dictobject.new_dict(int32, float64)
d[0] = 6.
d[1] = 7.
# pop'd key must have same type as key
d.pop(11j)
with self.assertRaises(TypingError) as raises:
foo()
self.assertIn(
"cannot safely cast complex128 to int32",
str(raises.exception),
)
def test_012_cannot_downcast_key(self):
@njit
def foo():
d = dictobject.new_dict(int32, float64)
d[0] = 6.
# invalid key type
return 1j in d
with self.assertRaises(TypingError) as raises:
foo()
self.assertIn(
"cannot safely cast complex128 to int32",
str(raises.exception),
)
def test_013_contains_empty_dict(self):
@njit
def foo():
d = dictobject.new_dict(int32, float64)
# contains on empty dict
return 1 in d
self.assertFalse(foo())
def test_014_not_contains_empty_dict(self):
@njit
def foo():
d = dictobject.new_dict(int32, float64)
# not contains empty dict
return 1 not in d
self.assertTrue(foo())
def test_015_dict_clear(self):
@njit
def foo(n):
d = dictobject.new_dict(int32, float64)
for i in range(n):
d[i] = i + 1
x = len(d)
d.clear()
y = len(d)
return x, y
m = 10
self.assertEqual(foo(m), (m, 0))
def test_016_cannot_downcast_key(self):
@njit
def foo():
d = dictobject.new_dict(int32, float64)
# key is wrong type
d.setdefault(1j, 12.)
with self.assertRaises(TypingError) as raises:
foo()
self.assertIn(
"cannot safely cast complex128 to int32",
str(raises.exception),
)
def test_017_cannot_downcast_default(self):
@njit
def foo():
d = dictobject.new_dict(int32, float64)
# default value is wrong type
d.setdefault(1, 12.j)
with self.assertRaises(TypingError) as raises:
foo()
self.assertIn(
"cannot safely cast complex128 to float64",
str(raises.exception),
)
def test_018_keys_iter_are_views(self):
# this is broken somewhere in llvmlite, intent of test is to check if
# keys behaves like a view or not
@njit
def foo():
d = dictobject.new_dict(int32, float64)
d[11] = 12.
k1 = d.keys()
d[22] = 9.
k2 = d.keys()
rk1 = [x for x in k1]
rk2 = [x for x in k2]
return rk1, rk2
a, b = foo()
self.assertEqual(a, b)
self.assertEqual(a, [11, 22])
# Not implemented yet
@unittest.expectedFailure
def test_019(self):
# should keys/vals be set-like?
@njit
def foo():
d = dictobject.new_dict(int32, float64)
d[11] = 12.
d[22] = 9.
k2 = d.keys() & {12, }
return k2
print(foo())
def test_020_string_key(self):
@njit
def foo():
d = dictobject.new_dict(types.unicode_type, float64)
d['a'] = 1.
d['b'] = 2.
d['c'] = 3.
d['d'] = 4.
out = []
for x in d.items():
out.append(x)
return out, d['a']
items, da = foo()
self.assertEqual(items, [('a', 1.), ('b', 2.), ('c', 3.), ('d', 4)])
self.assertEqual(da, 1.)
def test_021_long_str_key(self):
@njit
def foo():
d = dictobject.new_dict(types.unicode_type, float64)
tmp = []
for i in range(10000):
tmp.append('a')
s = ''.join(tmp)
d[s] = 1.
out = list(d.items())
return out
self.assertEqual(foo(), [('a' * 10000, 1)])
def test_022_references_juggle(self):
@njit
def foo():
d = dictobject.new_dict(int32, float64)
e = d
d[1] = 12.
e[2] = 14.
e = dictobject.new_dict(int32, float64)
e[1] = 100.
e[2] = 1000.
f = d
d = e
k1 = [x for x in d.items()]
k2 = [x for x in e.items()]
k3 = [x for x in f.items()]
return k1, k2, k3
k1, k2, k3 = foo()
self.assertEqual(k1, [(1, 100.0), (2, 1000.0)])
self.assertEqual(k2, [(1, 100.0), (2, 1000.0)])
self.assertEqual(k3, [(1, 12), (2, 14)])
def test_023_closure(self):
@njit
def foo():
d = dictobject.new_dict(int32, float64)
def bar():
d[1] = 12.
d[2] = 14.
bar()
return [x for x in d.keys()]
self.assertEqual(foo(), [1, 2])
def test_024_unicode_getitem_keys(self):
# See issue #6135
@njit
def foo():
s = 'a\u1234'
d = {s[0] : 1}
return d['a']
self.assertEqual(foo(), foo.py_func())
@njit
def foo():
s = 'abc\u1234'
d = {s[:1] : 1}
return d['a']
self.assertEqual(foo(), foo.py_func())
def test_issue6570_alignment_padding(self):
# Create a key type that is 12-bytes long on a 8-byte aligned system
# so that the a 4-byte padding is needed.
# If the 4-byte padding is not zero-filled, it will have garbage data
# that affects key matching in the lookup.
keyty = types.Tuple([types.uint64, types.float32])
@njit
def foo():
d = dictobject.new_dict(keyty, float64)
t1 = np.array([3], dtype=np.uint64)
t2 = np.array([5.67], dtype=np.float32)
v1 = np.array([10.23], dtype=np.float32)
d[(t1[0], t2[0])] = v1[0]
return (t1[0], t2[0]) in d
self.assertTrue(foo())
def test_dict_update(self):
"""
Tests dict.update works with various dictionaries.
"""
n = 10
def f1(n):
"""
Test update with a regular dictionary.
"""
d1 = {i: i + 1 for i in range(n)}
d2 = {3 * i: i for i in range(n)}
d1.update(d2)
return d1
py_func = f1
cfunc = njit()(f1)
a = py_func(n)
b = cfunc(n)
self.assertEqual(a, b)
def f2(n):
"""
Test update where one of the dictionaries
is created as a Python literal.
"""
d1 = {
1: 2,
3: 4,
5: 6
}
d2 = {3 * i: i for i in range(n)}
d1.update(d2)
return d1
py_func = f2
cfunc = njit()(f2)
a = py_func(n)
b = cfunc(n)
self.assertEqual(a, b)
| TestDictObject |
python | lepture__authlib | authlib/integrations/flask_oauth2/authorization_server.py | {
"start": 469,
"end": 5911
} | class ____(_AuthorizationServer):
"""Flask implementation of :class:`authlib.oauth2.rfc6749.AuthorizationServer`.
Initialize it with ``query_client``, ``save_token`` methods and Flask
app instance::
def query_client(client_id):
return Client.query.filter_by(client_id=client_id).first()
def save_token(token, request):
if request.user:
user_id = request.user.id
else:
user_id = None
client = request.client
tok = Token(client_id=client.client_id, user_id=user.id, **token)
db.session.add(tok)
db.session.commit()
server = AuthorizationServer(app, query_client, save_token)
# or initialize lazily
server = AuthorizationServer()
server.init_app(app, query_client, save_token)
"""
def __init__(self, app=None, query_client=None, save_token=None):
super().__init__()
self._query_client = query_client
self._save_token = save_token
self._error_uris = None
if app is not None:
self.init_app(app)
def init_app(self, app, query_client=None, save_token=None):
"""Initialize later with Flask app instance."""
if query_client is not None:
self._query_client = query_client
if save_token is not None:
self._save_token = save_token
self.load_config(app.config)
def load_config(self, config):
self.register_token_generator(
"default", self.create_bearer_token_generator(config)
)
self.scopes_supported = config.get("OAUTH2_SCOPES_SUPPORTED")
self._error_uris = config.get("OAUTH2_ERROR_URIS")
def query_client(self, client_id):
return self._query_client(client_id)
def save_token(self, token, request):
return self._save_token(token, request)
def get_error_uri(self, request, error):
if self._error_uris:
uris = dict(self._error_uris)
return uris.get(error.error)
def create_oauth2_request(self, request):
return FlaskOAuth2Request(flask_req)
def create_json_request(self, request):
return FlaskJsonRequest(flask_req)
def handle_response(self, status_code, payload, headers):
if isinstance(payload, dict):
payload = json.dumps(payload)
return Response(payload, status=status_code, headers=headers)
def send_signal(self, name, *args, **kwargs):
if name == "after_authenticate_client":
client_authenticated.send(self, *args, **kwargs)
elif name == "after_revoke_token":
token_revoked.send(self, *args, **kwargs)
def create_bearer_token_generator(self, config):
"""Create a generator function for generating ``token`` value. This
method will create a Bearer Token generator with
:class:`authlib.oauth2.rfc6750.BearerToken`.
Configurable settings:
1. OAUTH2_ACCESS_TOKEN_GENERATOR: Boolean or import string, default is True.
2. OAUTH2_REFRESH_TOKEN_GENERATOR: Boolean or import string, default is False.
3. OAUTH2_TOKEN_EXPIRES_IN: Dict or import string, default is None.
By default, it will not generate ``refresh_token``, which can be turn on by
configure ``OAUTH2_REFRESH_TOKEN_GENERATOR``.
Here are some examples of the token generator::
OAUTH2_ACCESS_TOKEN_GENERATOR = "your_project.generators.gen_token"
# and in module `your_project.generators`, you can define:
def gen_token(client, grant_type, user, scope):
# generate token according to these parameters
token = create_random_token()
return f"{client.id}-{user.id}-{token}"
Here is an example of ``OAUTH2_TOKEN_EXPIRES_IN``::
OAUTH2_TOKEN_EXPIRES_IN = {
"authorization_code": 864000,
"urn:ietf:params:oauth:grant-type:jwt-bearer": 3600,
}
"""
conf = config.get("OAUTH2_ACCESS_TOKEN_GENERATOR", True)
access_token_generator = create_token_generator(conf, 42)
conf = config.get("OAUTH2_REFRESH_TOKEN_GENERATOR", False)
refresh_token_generator = create_token_generator(conf, 48)
expires_conf = config.get("OAUTH2_TOKEN_EXPIRES_IN")
expires_generator = create_token_expires_in_generator(expires_conf)
return BearerTokenGenerator(
access_token_generator, refresh_token_generator, expires_generator
)
def create_token_expires_in_generator(expires_in_conf=None):
if isinstance(expires_in_conf, str):
return import_string(expires_in_conf)
data = {}
data.update(BearerTokenGenerator.GRANT_TYPES_EXPIRES_IN)
if isinstance(expires_in_conf, dict):
data.update(expires_in_conf)
def expires_in(client, grant_type):
return data.get(grant_type, BearerTokenGenerator.DEFAULT_EXPIRES_IN)
return expires_in
def create_token_generator(token_generator_conf, length=42):
if callable(token_generator_conf):
return token_generator_conf
if isinstance(token_generator_conf, str):
return import_string(token_generator_conf)
elif token_generator_conf is True:
def token_generator(*args, **kwargs):
return generate_token(length)
return token_generator
| AuthorizationServer |
python | python-pillow__Pillow | src/PIL/ImageFile.py | {
"start": 14980,
"end": 15162
} | class ____(abc.ABC):
def open(self, im: StubImageFile) -> None:
pass
@abc.abstractmethod
def load(self, im: StubImageFile) -> Image.Image:
pass
| StubHandler |
python | doocs__leetcode | solution/0700-0799/0747.Largest Number At Least Twice of Others/Solution.py | {
"start": 0,
"end": 153
} | class ____:
def dominantIndex(self, nums: List[int]) -> int:
x, y = nlargest(2, nums)
return nums.index(x) if x >= 2 * y else -1
| Solution |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 28232,
"end": 28447
} | class ____(PrefectBaseModel):
"""Filter by `Worker.worker_config_id`."""
any_: Optional[List[UUID]] = Field(
default=None, description="A list of work pool ids to include"
)
| WorkerFilterWorkPoolId |
python | streamlit__streamlit | lib/tests/streamlit/elements/heading_test.py | {
"start": 11480,
"end": 16211
} | class ____(DeltaGeneratorTestCase):
"""Test ability to marshall title protos."""
def test_st_title(self):
"""Test st.title."""
st.title("some title")
el = self.get_delta_from_queue().new_element
assert el.heading.body == "some title"
assert el.heading.tag == "h1"
assert not el.heading.hide_anchor
assert not el.heading.divider
def test_st_title_with_anchor(self):
"""Test st.title with anchor."""
st.title("some title", anchor="some-anchor")
el = self.get_delta_from_queue().new_element
assert el.heading.body == "some title"
assert el.heading.tag == "h1"
assert el.heading.anchor == "some-anchor"
assert not el.heading.hide_anchor
assert not el.heading.divider
def test_st_title_with_hidden_anchor(self):
"""Test st.title with hidden anchor."""
st.title("some title", anchor=False)
el = self.get_delta_from_queue().new_element
assert el.heading.body == "some title"
assert el.heading.tag == "h1"
assert el.heading.anchor == ""
assert el.heading.hide_anchor
assert not el.heading.divider
def test_st_title_with_invalid_anchor(self):
"""Test st.title with invalid anchor."""
with pytest.raises(
StreamlitAPIException, match="Anchor parameter has invalid value:"
):
st.title("some header", anchor=True)
with pytest.raises(
StreamlitAPIException, match="Anchor parameter has invalid type:"
):
st.title("some header", anchor=6)
def test_st_title_with_help(self):
"""Test st.title with help."""
st.title("some title", help="help text")
el = self.get_delta_from_queue().new_element
assert el.heading.body == "some title"
assert el.heading.tag == "h1"
assert el.heading.help == "help text"
assert not el.heading.divider
def test_st_title_with_invalid_divider(self):
"""Test st.title with invalid divider."""
with pytest.raises(TypeError):
st.title("some header", divider=True)
with pytest.raises(TypeError):
st.title("some header", divider="blue")
def test_st_title_with_width(self):
"""Test st.title with different width types."""
test_cases = [
(500, WidthConfigFields.PIXEL_WIDTH.value, "pixel_width", 500),
("stretch", WidthConfigFields.USE_STRETCH.value, "use_stretch", True),
("content", WidthConfigFields.USE_CONTENT.value, "use_content", True),
]
for width_value, expected_width_spec, field_name, field_value in test_cases:
with self.subTest(width_value=width_value):
st.title("some title", width=width_value)
el = self.get_delta_from_queue().new_element
assert el.heading.body == "some title"
assert el.heading.tag == "h1"
assert el.width_config.WhichOneof("width_spec") == expected_width_spec
assert getattr(el.width_config, field_name) == field_value
def test_st_title_with_invalid_width(self):
"""Test st.title with invalid width values."""
test_cases = [
(
"invalid",
"Invalid width value: 'invalid'. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
-100,
"Invalid width value: -100. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
0,
"Invalid width value: 0. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
100.5,
"Invalid width value: 100.5. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
]
for width_value, expected_error_message in test_cases:
with self.subTest(width_value=width_value):
with pytest.raises(StreamlitAPIException) as exc:
st.title("some title", width=width_value)
assert str(exc.value) == expected_error_message
def test_st_title_default_width(self):
"""Test that st.title defaults to stretch width."""
st.title("some title")
el = self.get_delta_from_queue().new_element
assert el.heading.body == "some title"
assert el.heading.tag == "h1"
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.width_config.use_stretch is True
| StTitleTest |
python | celery__celery | celery/security/serialization.py | {
"start": 691,
"end": 3832
} | class ____:
"""Signed serializer."""
def __init__(self, key=None, cert=None, cert_store=None,
digest=DEFAULT_SECURITY_DIGEST, serializer='json'):
self._key = key
self._cert = cert
self._cert_store = cert_store
self._digest = get_digest_algorithm(digest)
self._serializer = serializer
def serialize(self, data):
"""Serialize data structure into string."""
assert self._key is not None
assert self._cert is not None
with reraise_errors('Unable to serialize: {0!r}', (Exception,)):
content_type, content_encoding, body = dumps(
data, serializer=self._serializer)
# What we sign is the serialized body, not the body itself.
# this way the receiver doesn't have to decode the contents
# to verify the signature (and thus avoiding potential flaws
# in the decoding step).
body = ensure_bytes(body)
return self._pack(body, content_type, content_encoding,
signature=self._key.sign(body, self._digest),
signer=self._cert.get_id())
def deserialize(self, data):
"""Deserialize data structure from string."""
assert self._cert_store is not None
with reraise_errors('Unable to deserialize: {0!r}', (Exception,)):
payload = self._unpack(data)
signature, signer, body = (payload['signature'],
payload['signer'],
payload['body'])
self._cert_store[signer].verify(body, signature, self._digest)
return loads(body, payload['content_type'],
payload['content_encoding'], force=True)
def _pack(self, body, content_type, content_encoding, signer, signature,
sep=DEFAULT_SEPARATOR):
fields = sep.join(
ensure_bytes(s) for s in [b64encode(signer), b64encode(signature),
content_type, content_encoding, body]
)
return b64encode(fields)
def _unpack(self, payload, sep=DEFAULT_SEPARATOR):
raw_payload = b64decode(ensure_bytes(payload))
v = raw_payload.split(sep, maxsplit=4)
return {
'signer': b64decode(v[0]),
'signature': b64decode(v[1]),
'content_type': bytes_to_str(v[2]),
'content_encoding': bytes_to_str(v[3]),
'body': v[4],
}
def register_auth(key=None, key_password=None, cert=None, store=None,
digest=DEFAULT_SECURITY_DIGEST,
serializer='json'):
"""Register security serializer."""
s = SecureSerializer(key and PrivateKey(key, password=key_password),
cert and Certificate(cert),
store and FSCertStore(store),
digest, serializer=serializer)
registry.register('auth', s.serialize, s.deserialize,
content_type='application/data',
content_encoding='utf-8')
| SecureSerializer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/shopify_graphql/bulk/query.py | {
"start": 95872,
"end": 105933
} | class ____(ShopifyBulkQuery):
"""
{
productVariants(
query: "updatedAt:>='2019-04-13T00:00:00+00:00' AND updatedAt:<='2024-04-30T12:16:17.273363+00:00'"
) {
edges {
node {
__typename
id
title
price
sku
position
inventoryPolicy
compareAtPrice
inventoryManagement
createdAt
updatedAt
taxable
barcode
weight
weightUnit
inventoryQuantity
requiresShipping
availableForSale
displayName
taxCode
options: selectedOptions {
name
value
option_value: optionValue {
id
name
has_variants: hasVariants
swatch {
color
image {
id
image {
src
url
}
}
}
}
}
grams: weight
image {
image_id: id
image_src: src
image_url: url
}
old_inventory_quantity: inventoryQuantity
product {
product_id: id
}
fulfillmentService {
fulfillment_service: handle
}
inventoryItem {
inventory_item_id: id
}
presentmentPrices {
edges {
node {
__typename
price {
amount
currencyCode
}
compareAtPrice {
amount
currencyCode
}
}
}
}
}
}
}
"""
query_name = "productVariants"
@property
def _should_include_presentment_prices(self) -> bool:
return self.config.get("job_product_variants_include_pres_prices", True)
@property
def query_nodes(self) -> Optional[Union[List[Field], List[str]]]:
prices_fields: List[str] = ["amount", "currencyCode"]
presentment_prices_fields: List[Field] = [
Field(
name="edges",
fields=[
Field(
name="node",
fields=[
"__typename",
Field(name="price", fields=prices_fields),
Field(name="compareAtPrice", fields=prices_fields),
],
)
],
)
]
option_value_fields: List[Field] = [
"id",
"name",
Field(name="hasVariants", alias="has_variants"),
Field(name="swatch", fields=["color", Field(name="image", fields=["id", Field(name="image", fields=["src", "url"])])]),
]
option_fields: List[Field] = [
"name",
"value",
Field(name="optionValue", alias="option_value", fields=option_value_fields),
]
presentment_prices = (
[Field(name="presentmentPrices", fields=presentment_prices_fields)] if self._should_include_presentment_prices else []
)
image_fields = [
Field(name="id", alias="image_id"),
Field(name="src", alias="image_src"),
Field(name="url", alias="image_url"),
]
measurement_fields = [
Field(name="weight", fields=["value", "unit"]),
]
inventory_item_fields = [
Field(name="id", alias="inventory_item_id"),
Field(name="tracked", alias="tracked"),
Field(name="requiresShipping", alias="requires_shipping"),
Field(name="measurement", alias="measurement", fields=measurement_fields),
]
query_nodes: List[Field] = [
"__typename",
"id",
"title",
"price",
"sku",
"position",
"inventoryPolicy",
"compareAtPrice",
"createdAt",
"updatedAt",
"taxable",
"barcode",
"inventoryQuantity",
"availableForSale",
"displayName",
"taxCode",
Field(name="selectedOptions", alias="options", fields=option_fields),
Field(name="image", fields=image_fields),
Field(name="inventoryQuantity", alias="old_inventory_quantity"),
Field(name="product", fields=[Field(name="id", alias="product_id")]),
Field(name="inventoryItem", fields=inventory_item_fields),
] + presentment_prices
return query_nodes
record_composition = {
"new_record": "ProductVariant",
# each `ProductVariant` could have `ProductVariantPricePair` associated with the product variant.
"record_components": ["ProductVariantPricePair"],
}
def _process_presentment_prices(self, entity: List[dict]) -> List[dict]:
for item in entity:
# remove the `__parentId` from the object
if BULK_PARENT_KEY in item:
item.pop(BULK_PARENT_KEY)
# these objects could be literally `Null/None` from the response,
# this is treated like a real value, so we need to assigne the correct values instead
price: Optional[Mapping[str, Any]] = item.get("price", {})
if not price:
price = {}
# get the amount values
price_amount = price.get("amount") if price else None
# make the nested object's values up to the schema, (cast the `str` > `float`)
item["price"]["amount"] = float(price_amount) if price_amount else None
# convert field names to snake case
item["price"] = self.tools.fields_names_to_snake_case(item.get("price"))
compare_at_price: Optional[Mapping[str, Any]] = item.get("compareAtPrice", {})
if not compare_at_price:
compare_at_price = {}
# assign the correct value, if there is no object from response
item["compareAtPrice"] = compare_at_price
compare_at_price_amount = compare_at_price.get("amount") if compare_at_price else None
item["compareAtPrice"]["amount"] = float(compare_at_price_amount) if compare_at_price_amount else None
item["compare_at_price"] = self.tools.fields_names_to_snake_case(item["compareAtPrice"])
# remove leftovers
item.pop("compareAtPrice", None)
return entity
def _unnest_and_resolve_id(self, record: MutableMapping[str, Any], from_property: str, id_field: str) -> int:
entity = record.get(from_property, {})
return self.tools.resolve_str_id(entity.get(id_field)) if entity else None
def record_process_components(self, record: MutableMapping[str, Any]) -> Iterable[MutableMapping[str, Any]]:
"""
Defines how to process collected components.
"""
# get the joined record components collected for the record
record_components = record.get("record_components", {})
# process record components
if record_components:
record["presentment_prices"] = self._process_presentment_prices(record_components.get("ProductVariantPricePair", []))
record.pop("record_components")
# unnest mandatory fields from their placeholders
record["product_id"] = self._unnest_and_resolve_id(record, "product", "product_id")
record["inventory_item_id"] = self._unnest_and_resolve_id(record, "inventoryItem", "inventory_item_id")
inventory_item = record.get("inventoryItem")
measurement_weight = record.get("inventoryItem", {}).get("measurement", {}).get("weight")
record["weight"] = measurement_weight.get("value", 0.0) if measurement_weight is not None else 0.0
record["weight_unit"] = measurement_weight.get("unit") if measurement_weight else None
record["tracked"] = inventory_item.get("tracked") if inventory_item else None
record["requires_shipping"] = inventory_item.get("requires_shipping") if inventory_item else None
record["image_id"] = self._unnest_and_resolve_id(record, "image", "image_id")
image = record.get("image", {})
record["image_src"] = image.get("image_src") if image else None
record["image_url"] = image.get("image_url") if image else None
# cast the `price` to number, could be literally `None`
price = record.get("price")
record["price"] = float(price) if price else None
# cast the `grams` to integer
record["grams"] = int(record.get("weight", 0))
# convert date-time cursors
record["createdAt"] = self.tools.from_iso8601_to_rfc3339(record, "createdAt")
record["updatedAt"] = self.tools.from_iso8601_to_rfc3339(record, "updatedAt")
# clean up the leftovers
record.pop("image", None)
record.pop("product", None)
record.pop("inventoryItem", None)
yield record
| ProductVariant |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/service/local_workers_test.py | {
"start": 1288,
"end": 10891
} | class ____(data_service_test_base.TestBase, parameterized.TestCase):
"""Tests reading from local workers if `target_workers` is `local`."""
@combinations.generate(test_base.default_test_combinations())
def testOneLocalWorker(self):
cluster = multi_process_cluster.MultiProcessCluster(
num_local_workers=1, num_remote_workers=5)
num_elements = 10
ds = self.make_distributed_range_dataset(
num_elements, cluster, target_workers="local")
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
num_local_workers=[1, 3], num_remote_workers=[0, 3])))
def testLocalWorkers(self, num_local_workers, num_remote_workers):
cluster = multi_process_cluster.MultiProcessCluster(
num_local_workers=num_local_workers,
num_remote_workers=num_remote_workers)
num_elements = 10
ds = self.make_distributed_range_dataset(
num_elements, cluster, target_workers="LOCAL")
self.assertDatasetProduces(
ds,
num_local_workers * list(range(num_elements)),
assert_items_equal=True)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
num_local_workers=[1, 3], num_remote_workers=[0, 3])))
def testRepeatedDataset(self, num_local_workers, num_remote_workers):
cluster = multi_process_cluster.MultiProcessCluster(
num_local_workers=num_local_workers,
num_remote_workers=num_remote_workers)
num_elements = 10
num_repetitions = 5
ds = self.make_distributed_range_dataset(
num_elements, cluster, target_workers="LOCAL")
ds = ds.repeat(num_repetitions)
self.assertDatasetProduces(
ds,
expected_output=num_local_workers * num_repetitions *
list(range(num_elements)),
assert_items_equal=True)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
num_local_workers=[1, 3], num_remote_workers=[0, 3])))
def testPrefetchingDataset(self, num_local_workers, num_remote_workers):
cluster = multi_process_cluster.MultiProcessCluster(
num_local_workers=num_local_workers,
num_remote_workers=num_remote_workers)
num_elements = 10
ds = self.make_distributed_range_dataset(
num_elements, cluster, target_workers="LOCAL")
ds = ds.prefetch(10)
self.assertDatasetProduces(
ds,
expected_output=num_local_workers * list(range(num_elements)),
assert_items_equal=True)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
num_local_workers=[1, 3], num_remote_workers=[0, 3])))
def testMultipleEpochs(self, num_local_workers, num_remote_workers):
cluster = multi_process_cluster.MultiProcessCluster(
num_local_workers=num_local_workers,
num_remote_workers=num_remote_workers)
num_elements = 10
ds = self.make_distributed_range_dataset(
num_elements, cluster, target_workers="LOCAL")
for _ in range(10):
self.assertDatasetProduces(
ds,
num_local_workers * list(range(num_elements)),
assert_items_equal=True)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
num_local_workers=[1, 3], num_remote_workers=[0, 3])))
def testDynamicSharding(self, num_local_workers, num_remote_workers):
cluster = multi_process_cluster.MultiProcessCluster(
num_local_workers=num_local_workers,
num_remote_workers=num_remote_workers)
num_elements = 100
ds = self.make_distributed_range_dataset(
num_elements,
cluster,
processing_mode=data_service_ops.ShardingPolicy.DYNAMIC,
target_workers="LOCAL")
self.assertDatasetProduces(
ds, list(range(num_elements)), assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testMultipleConsumers(self):
num_local_workers, num_remote_workers = 1, 3
cluster = multi_process_cluster.MultiProcessCluster(
num_local_workers=num_local_workers,
num_remote_workers=num_remote_workers)
# Because the elements in datasets are prefetched one per
# CPU core, a static number here may be excessively large
# for small numbers of CPU cores, or too small for high
# CPU core count machines, or probably both.
# In this case the below formula should satisfy both needs.
num_elements = 50 + (multiprocessing.cpu_count() * 2)
num_consumers = 8
iterators = []
for _ in range(num_consumers):
dataset = self.make_distributed_range_dataset(
num_elements, cluster, job_name="shared_job")
iterators.append(self.getNext(dataset))
results = []
for _ in range(10):
for it in iterators:
results.append(self.evaluate(it()))
for it in iterators:
results.extend(self.getIteratorOutput(it))
self.assertCountEqual(results, (num_local_workers + num_remote_workers) *
list(range(num_elements)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
num_local_workers=[1, 3], num_remote_workers=[0, 3])))
def testEmptyDataset(self, num_local_workers, num_remote_workers):
cluster = multi_process_cluster.MultiProcessCluster(
num_local_workers=num_local_workers,
num_remote_workers=num_remote_workers)
num_elements = 0
ds = self.make_distributed_range_dataset(
num_elements, cluster, target_workers="LOCAL")
self.assertDatasetProduces(ds, [])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
num_local_workers=[0, 3], num_remote_workers=[1, 3])))
def testNonLocalRead(self, num_local_workers, num_remote_workers):
"""This test ensures the remote workers are running and producing data."""
cluster = multi_process_cluster.MultiProcessCluster(
num_local_workers=num_local_workers,
num_remote_workers=num_remote_workers)
num_elements = 10
ds = self.make_distributed_range_dataset(num_elements, cluster)
num_workers = num_local_workers + num_remote_workers
self.assertDatasetProduces(
ds, num_workers * list(range(num_elements)), assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testNoLocalWorker(self):
cluster = multi_process_cluster.MultiProcessCluster(
num_local_workers=0, num_remote_workers=3)
num_elements = 10
ds = self.make_distributed_range_dataset(
num_elements, cluster, target_workers="LOCAL")
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Local reads require local tf.data workers, but no local worker is "
"found."):
self.getDatasetOutput(ds)
@combinations.generate(test_base.default_test_combinations())
def testInconsistentTargetWorkers(self):
cluster = multi_process_cluster.MultiProcessCluster(
num_local_workers=3, num_remote_workers=3)
ds = dataset_ops.Dataset.range(10)
datasets = [
self.make_distributed_dataset(
ds, cluster, job_name="test_job", target_workers=target_workers)
for target_workers in ["AUTO", "ANY", "LOCAL"]
]
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"but found an existing job with different parameters: "
"Existing target workers: <AUTO>"):
for dataset in datasets:
self.getDatasetOutput(dataset)
@combinations.generate(test_base.default_test_combinations())
def testAnonymousJobWithDifferentTargetWorkers(self):
num_local_workers, num_remote_workers = (3, 3)
cluster = multi_process_cluster.MultiProcessCluster(num_local_workers,
num_remote_workers)
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
datasets = {
target_workers: self.make_distributed_dataset(
ds, cluster, target_workers=target_workers)
for target_workers in ["AUTO", "ANY", "LOCAL"]
}
num_workers = num_local_workers + num_remote_workers
self.assertDatasetProduces(
datasets["AUTO"],
num_workers * list(range(num_elements)),
assert_items_equal=True)
self.assertDatasetProduces(
datasets["ANY"],
num_workers * list(range(num_elements)),
assert_items_equal=True)
self.assertDatasetProduces(
datasets["LOCAL"],
num_local_workers * list(range(num_elements)),
assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testCoordinatedRead(self):
cluster = multi_process_cluster.MultiProcessCluster(
num_local_workers=3, num_remote_workers=3)
ds = dataset_ops.Dataset.range(10).repeat()
ds = self.make_distributed_dataset(
ds,
cluster,
job_name="test_job",
consumer_index=0,
num_consumers=3,
target_workers="LOCAL")
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Coordinated reads require non-local workers"):
self.getDatasetOutput(ds)
| LocalWorkersTest |
python | huggingface__transformers | src/transformers/utils/metrics.py | {
"start": 182,
"end": 6291
} | class ____(Enum):
"""Status of a generation request through its lifecycle."""
PENDING = "pending"
PREFILLING = "prefilling"
PREFILLING_SPLIT = "prefilling_split"
SPLIT_PENDING_REMAINDER = "split_pending_remainder"
DECODING = "decoding"
FINISHED = "finished"
FAILED = "failed"
if is_opentelemetry_available():
from opentelemetry import metrics
from opentelemetry.trace import Status, StatusCode, get_tracer
_has_opentelemetry = True
else:
_has_opentelemetry = False
def attach_tracer(tracer_name_template=None):
"""
Decorator that attaches a tracer to a class.
This decorator should be applied to classes that need OpenTelemetry tracing.
It adds a tracer attribute to the class instance that can be used by the traced decorator.
Args:
tracer_name_template: Optional template string for the tracer name.
If provided, it should contain {module} which will be replaced with the class's full module path
and {class_name} for the class name.
If None, a default naming scheme will be used where:
- If the module already starts with "transformers.", it will use that directly
- Otherwise, it will prepend "transformers." to the module name
Returns:
Class decorator function
"""
if not _has_opentelemetry:
return lambda cls: cls
def decorator(cls):
original_init = cls.__init__
@functools.wraps(original_init)
def init_with_tracer(self, *args, **kwargs):
original_init(self, *args, **kwargs)
module_name = cls.__module__
class_name = cls.__qualname__
if tracer_name_template is None:
if module_name.startswith("transformers."):
tracer_name = f"{module_name}.{class_name}"
else:
tracer_name = f"transformers.{module_name}.{class_name}"
else:
tracer_name = tracer_name_template.format(module=module_name, class_name=class_name)
self.tracer = get_tracer(tracer_name)
cls.__init__ = init_with_tracer
return cls
return decorator
def traced(
func=None,
*,
span_name=None,
standalone=False,
additional_attributes: list[tuple[str, str, Any | Callable[[Any], Any]]] | None = None,
):
"""
Decorator to trace function calls with OpenTelemetry.
Can be used as @traced or @traced(span_name="custom_name")
Args:
func: The function to trace
span_name: Optional custom name for the span (defaults to function name)
standalone: If True, creates a parentless span
additional_attributes: Optional list of additional attributes to set on the span.
Each item is a tuple of (instance_attribute_name, span_attribute_key, value_or_transform_function)
where:
- instance_attribute_name: Name of the attribute to get from the class instance
- span_attribute_key: Key to use when setting the attribute on the span
- value_or_transform_function: Either a raw value to use directly, or a function to transform
the attribute value before setting it on the span
Returns:
Decorated function with tracing
"""
def decorator(func):
if not _has_opentelemetry:
return func
@functools.wraps(func)
def wrapper(*args, **kwargs):
instance = args[0] if args and (hasattr(func, "__self__") and func.__self__ is not None) else None
is_method = instance is not None
if is_method and hasattr(instance, "tracer"):
tracer = instance.tracer
else:
tracer = get_tracer(f"transformers.{func.__module__}.{func.__name__}")
name = span_name or func.__name__
span_fn = tracer.start_span if standalone else tracer.start_as_current_span
with span_fn(name) as span:
span.set_attribute("function.name", func.__name__)
span.set_attribute("function.module", func.__module__)
span.set_attribute("function.is_method", is_method)
if args:
for i, arg in enumerate(args):
if isinstance(arg, (str, int, float, bool)) or arg is None:
span.set_attribute(f"args.{i}", str(arg))
else:
span.set_attribute(f"args.{i}", str(type(arg)))
if kwargs:
for key, value in kwargs.items():
if isinstance(value, (str, int, float, bool)) or value is None:
span.set_attribute(f"kwargs.{key}", str(value))
else:
span.set_attribute(f"kwargs.{key}", str(type(value)))
if additional_attributes and is_method:
for attr_config in additional_attributes:
instance_attribute_name, span_attribute_key, value_or_transform_function = attr_config
if hasattr(instance, instance_attribute_name):
attribute_value = getattr(instance, instance_attribute_name)
if callable(value_or_transform_function):
transformed_value = value_or_transform_function(attribute_value)
else:
transformed_value = value_or_transform_function
span.set_attribute(span_attribute_key, transformed_value)
try:
result = func(*args, **kwargs)
return result
except Exception as e:
span.set_status(Status(StatusCode.ERROR))
span.record_exception(e)
raise
return wrapper
if func is None:
return decorator
return decorator(func)
logger = logging.getLogger(__name__)
@attach_tracer()
| RequestStatus |
python | django-guardian__django-guardian | guardian/testapp/models.py | {
"start": 388,
"end": 519
} | class ____(models.Model):
title = models.CharField("title", max_length=64)
def __str__(self):
return self.title
| Post |
python | matplotlib__matplotlib | lib/matplotlib/widgets.py | {
"start": 113516,
"end": 133449
} | class ____(_SelectorWidget):
"""
Select a rectangular region of an Axes.
For the cursor to remain responsive you must keep a reference to it.
Press and release events triggered at the same coordinates outside the
selection will clear the selector, except when
``ignore_event_outside=True``.
%s
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import matplotlib.widgets as mwidgets
>>> fig, ax = plt.subplots()
>>> ax.plot([1, 2, 3], [10, 50, 100])
>>> def onselect(eclick, erelease):
... print(eclick.xdata, eclick.ydata)
... print(erelease.xdata, erelease.ydata)
>>> props = dict(facecolor='blue', alpha=0.5)
>>> rect = mwidgets.RectangleSelector(ax, onselect, interactive=True,
... props=props)
>>> fig.show()
>>> rect.add_state('square')
See also: :doc:`/gallery/widgets/rectangle_selector`
"""
def __init__(self, ax, onselect=None, *, minspanx=0,
minspany=0, useblit=False,
props=None, spancoords='data', button=None, grab_range=10,
handle_props=None, interactive=False,
state_modifier_keys=None, drag_from_anywhere=False,
ignore_event_outside=False, use_data_coordinates=False):
super().__init__(ax, onselect, useblit=useblit, button=button,
state_modifier_keys=state_modifier_keys,
use_data_coordinates=use_data_coordinates)
self._interactive = interactive
self.drag_from_anywhere = drag_from_anywhere
self.ignore_event_outside = ignore_event_outside
self._rotation = 0.0
self._aspect_ratio_correction = 1.0
# State to allow the option of an interactive selector that can't be
# interactively drawn. This is used in PolygonSelector as an
# interactive bounding box to allow the polygon to be easily resized
self._allow_creation = True
if props is None:
props = dict(facecolor='red', edgecolor='black',
alpha=0.2, fill=True)
props = {**props, 'animated': self.useblit}
self._visible = props.pop('visible', self._visible)
to_draw = self._init_shape(**props)
self.ax.add_patch(to_draw)
self._selection_artist = to_draw
self._set_aspect_ratio_correction()
self.minspanx = minspanx
self.minspany = minspany
_api.check_in_list(['data', 'pixels'], spancoords=spancoords)
self.spancoords = spancoords
self.grab_range = grab_range
if self._interactive:
self._handle_props = {
'markeredgecolor': (props or {}).get('edgecolor', 'black'),
**cbook.normalize_kwargs(handle_props, Line2D)}
self._corner_order = ['SW', 'SE', 'NE', 'NW']
xc, yc = self.corners
self._corner_handles = ToolHandles(self.ax, xc, yc,
marker_props=self._handle_props,
useblit=self.useblit)
self._edge_order = ['W', 'S', 'E', 'N']
xe, ye = self.edge_centers
self._edge_handles = ToolHandles(self.ax, xe, ye, marker='s',
marker_props=self._handle_props,
useblit=self.useblit)
xc, yc = self.center
self._center_handle = ToolHandles(self.ax, [xc], [yc], marker='s',
marker_props=self._handle_props,
useblit=self.useblit)
self._active_handle = None
self._extents_on_press = None
@property
def _handles_artists(self):
return (*self._center_handle.artists, *self._corner_handles.artists,
*self._edge_handles.artists)
def _init_shape(self, **props):
return Rectangle((0, 0), 0, 1, visible=False,
rotation_point='center', **props)
def _press(self, event):
"""Button press event handler."""
# make the drawn box/line visible get the click-coordinates, button, ...
if self._interactive and self._selection_artist.get_visible():
self._set_active_handle(event)
else:
self._active_handle = None
if ((self._active_handle is None or not self._interactive) and
self._allow_creation):
# Clear previous rectangle before drawing new rectangle.
self.update()
if (self._active_handle is None and not self.ignore_event_outside and
self._allow_creation):
x, y = self._get_data_coords(event)
self._visible = False
self.extents = x, x, y, y
self._visible = True
else:
self.set_visible(True)
self._extents_on_press = self.extents
self._rotation_on_press = self._rotation
self._set_aspect_ratio_correction()
match self._get_action():
case _RectangleSelectorAction.ROTATE:
# TODO: set to a rotate cursor if possible?
pass
case _RectangleSelectorAction.MOVE:
self._set_cursor(backend_tools.cursors.MOVE)
case _RectangleSelectorAction.RESIZE:
# TODO: set to a resize cursor if possible?
pass
case _RectangleSelectorAction.CREATE:
self._set_cursor(backend_tools.cursors.SELECT_REGION)
return False
def _release(self, event):
"""Button release event handler."""
self._set_cursor(backend_tools.Cursors.POINTER)
if not self._interactive:
self._selection_artist.set_visible(False)
if (self._active_handle is None and self._selection_completed and
self.ignore_event_outside):
return
# update the eventpress and eventrelease with the resulting extents
x0, x1, y0, y1 = self.extents
self._eventpress.xdata = x0
self._eventpress.ydata = y0
xy0 = self.ax.transData.transform([x0, y0])
self._eventpress.x, self._eventpress.y = xy0
self._eventrelease.xdata = x1
self._eventrelease.ydata = y1
xy1 = self.ax.transData.transform([x1, y1])
self._eventrelease.x, self._eventrelease.y = xy1
# calculate dimensions of box or line
if self.spancoords == 'data':
spanx = abs(self._eventpress.xdata - self._eventrelease.xdata)
spany = abs(self._eventpress.ydata - self._eventrelease.ydata)
elif self.spancoords == 'pixels':
spanx = abs(self._eventpress.x - self._eventrelease.x)
spany = abs(self._eventpress.y - self._eventrelease.y)
else:
_api.check_in_list(['data', 'pixels'],
spancoords=self.spancoords)
# check if drawn distance (if it exists) is not too small in
# either x or y-direction
if spanx <= self.minspanx or spany <= self.minspany:
if self._selection_completed:
# Call onselect, only when the selection is already existing
self.onselect(self._eventpress, self._eventrelease)
self._clear_without_update()
else:
self.onselect(self._eventpress, self._eventrelease)
self._selection_completed = True
self.update()
self._active_handle = None
self._extents_on_press = None
return False
def _get_action(self):
state = self._state
if 'rotate' in state and self._active_handle in self._corner_order:
return _RectangleSelectorAction.ROTATE
elif self._active_handle == 'C':
return _RectangleSelectorAction.MOVE
elif self._active_handle:
return _RectangleSelectorAction.RESIZE
return _RectangleSelectorAction.CREATE
def _onmove(self, event):
"""
Motion notify event handler.
This can do one of four things:
- Translate
- Rotate
- Re-size
- Continue the creation of a new shape
"""
eventpress = self._eventpress
# The calculations are done for rotation at zero: we apply inverse
# transformation to events except when we rotate and move
state = self._state
action = self._get_action()
xdata, ydata = self._get_data_coords(event)
if action == _RectangleSelectorAction.RESIZE:
inv_tr = self._get_rotation_transform().inverted()
xdata, ydata = inv_tr.transform([xdata, ydata])
eventpress.xdata, eventpress.ydata = inv_tr.transform(
(eventpress.xdata, eventpress.ydata))
dx = xdata - eventpress.xdata
dy = ydata - eventpress.ydata
# refmax is used when moving the corner handle with the square state
# and is the maximum between refx and refy
refmax = None
if self._use_data_coordinates:
refx, refy = dx, dy
else:
# Get dx/dy in display coordinates
refx = event.x - eventpress.x
refy = event.y - eventpress.y
x0, x1, y0, y1 = self._extents_on_press
# rotate an existing shape
if action == _RectangleSelectorAction.ROTATE:
# calculate angle abc
a = (eventpress.xdata, eventpress.ydata)
b = self.center
c = (xdata, ydata)
angle = (np.arctan2(c[1]-b[1], c[0]-b[0]) -
np.arctan2(a[1]-b[1], a[0]-b[0]))
self.rotation = np.rad2deg(self._rotation_on_press + angle)
elif action == _RectangleSelectorAction.RESIZE:
size_on_press = [x1 - x0, y1 - y0]
center = (x0 + size_on_press[0] / 2, y0 + size_on_press[1] / 2)
# Keeping the center fixed
if 'center' in state:
# hh, hw are half-height and half-width
if 'square' in state:
# when using a corner, find which reference to use
if self._active_handle in self._corner_order:
refmax = max(refx, refy, key=abs)
if self._active_handle in ['E', 'W'] or refmax == refx:
hw = xdata - center[0]
hh = hw / self._aspect_ratio_correction
else:
hh = ydata - center[1]
hw = hh * self._aspect_ratio_correction
else:
hw = size_on_press[0] / 2
hh = size_on_press[1] / 2
# cancel changes in perpendicular direction
if self._active_handle in ['E', 'W'] + self._corner_order:
hw = abs(xdata - center[0])
if self._active_handle in ['N', 'S'] + self._corner_order:
hh = abs(ydata - center[1])
x0, x1, y0, y1 = (center[0] - hw, center[0] + hw,
center[1] - hh, center[1] + hh)
else:
# change sign of relative changes to simplify calculation
# Switch variables so that x1 and/or y1 are updated on move
if 'W' in self._active_handle:
x0 = x1
if 'S' in self._active_handle:
y0 = y1
if self._active_handle in ['E', 'W'] + self._corner_order:
x1 = xdata
if self._active_handle in ['N', 'S'] + self._corner_order:
y1 = ydata
if 'square' in state:
# when using a corner, find which reference to use
if self._active_handle in self._corner_order:
refmax = max(refx, refy, key=abs)
if self._active_handle in ['E', 'W'] or refmax == refx:
sign = np.sign(ydata - y0)
y1 = y0 + sign * abs(x1 - x0) / self._aspect_ratio_correction
else:
sign = np.sign(xdata - x0)
x1 = x0 + sign * abs(y1 - y0) * self._aspect_ratio_correction
elif action == _RectangleSelectorAction.MOVE:
x0, x1, y0, y1 = self._extents_on_press
dx = xdata - eventpress.xdata
dy = ydata - eventpress.ydata
x0 += dx
x1 += dx
y0 += dy
y1 += dy
else:
# Create a new shape
self._rotation = 0
# Don't create a new rectangle if there is already one when
# ignore_event_outside=True
if ((self.ignore_event_outside and self._selection_completed) or
not self._allow_creation):
return
center = [eventpress.xdata, eventpress.ydata]
dx = (xdata - center[0]) / 2
dy = (ydata - center[1]) / 2
# square shape
if 'square' in state:
refmax = max(refx, refy, key=abs)
if refmax == refx:
dy = np.sign(dy) * abs(dx) / self._aspect_ratio_correction
else:
dx = np.sign(dx) * abs(dy) * self._aspect_ratio_correction
# from center
if 'center' in state:
dx *= 2
dy *= 2
# from corner
else:
center[0] += dx
center[1] += dy
x0, x1, y0, y1 = (center[0] - dx, center[0] + dx,
center[1] - dy, center[1] + dy)
self.extents = x0, x1, y0, y1
@property
def _rect_bbox(self):
return self._selection_artist.get_bbox().bounds
def _set_aspect_ratio_correction(self):
aspect_ratio = self.ax._get_aspect_ratio()
self._selection_artist._aspect_ratio_correction = aspect_ratio
if self._use_data_coordinates:
self._aspect_ratio_correction = 1
else:
self._aspect_ratio_correction = aspect_ratio
def _get_rotation_transform(self):
aspect_ratio = self.ax._get_aspect_ratio()
return Affine2D().translate(-self.center[0], -self.center[1]) \
.scale(1, aspect_ratio) \
.rotate(self._rotation) \
.scale(1, 1 / aspect_ratio) \
.translate(*self.center)
@property
def corners(self):
"""
Corners of rectangle in data coordinates from lower left,
moving clockwise.
"""
x0, y0, width, height = self._rect_bbox
xc = x0, x0 + width, x0 + width, x0
yc = y0, y0, y0 + height, y0 + height
transform = self._get_rotation_transform()
coords = transform.transform(np.array([xc, yc]).T).T
return coords[0], coords[1]
@property
def edge_centers(self):
"""
Midpoint of rectangle edges in data coordinates from left,
moving anti-clockwise.
"""
x0, y0, width, height = self._rect_bbox
w = width / 2.
h = height / 2.
xe = x0, x0 + w, x0 + width, x0 + w
ye = y0 + h, y0, y0 + h, y0 + height
transform = self._get_rotation_transform()
coords = transform.transform(np.array([xe, ye]).T).T
return coords[0], coords[1]
@property
def center(self):
"""Center of rectangle in data coordinates."""
x0, y0, width, height = self._rect_bbox
return x0 + width / 2., y0 + height / 2.
@property
def extents(self):
"""
Return (xmin, xmax, ymin, ymax) in data coordinates as defined by the
bounding box before rotation.
"""
x0, y0, width, height = self._rect_bbox
xmin, xmax = sorted([x0, x0 + width])
ymin, ymax = sorted([y0, y0 + height])
return xmin, xmax, ymin, ymax
@extents.setter
def extents(self, extents):
# Update displayed shape
self._draw_shape(extents)
if self._interactive:
# Update displayed handles
self._corner_handles.set_data(*self.corners)
self._edge_handles.set_data(*self.edge_centers)
x, y = self.center
self._center_handle.set_data([x], [y])
self.set_visible(self._visible)
self.update()
@property
def rotation(self):
"""
Rotation in degree in interval [-45°, 45°]. The rotation is limited in
range to keep the implementation simple.
"""
return np.rad2deg(self._rotation)
@rotation.setter
def rotation(self, value):
# Restrict to a limited range of rotation [-45°, 45°] to avoid changing
# order of handles
if -45 <= value and value <= 45:
self._rotation = np.deg2rad(value)
# call extents setter to draw shape and update handles positions
self.extents = self.extents
def _draw_shape(self, extents):
x0, x1, y0, y1 = extents
xmin, xmax = sorted([x0, x1])
ymin, ymax = sorted([y0, y1])
xlim = sorted(self.ax.get_xlim())
ylim = sorted(self.ax.get_ylim())
xmin = max(xlim[0], xmin)
ymin = max(ylim[0], ymin)
xmax = min(xmax, xlim[1])
ymax = min(ymax, ylim[1])
self._selection_artist.set_x(xmin)
self._selection_artist.set_y(ymin)
self._selection_artist.set_width(xmax - xmin)
self._selection_artist.set_height(ymax - ymin)
self._selection_artist.set_angle(self.rotation)
def _set_active_handle(self, event):
"""Set active handle based on the location of the mouse event."""
# Note: event.xdata/ydata in data coordinates, event.x/y in pixels
c_idx, c_dist = self._corner_handles.closest(event.x, event.y)
e_idx, e_dist = self._edge_handles.closest(event.x, event.y)
m_idx, m_dist = self._center_handle.closest(event.x, event.y)
if 'move' in self._state:
self._active_handle = 'C'
# Set active handle as closest handle, if mouse click is close enough.
elif m_dist < self.grab_range * 2:
# Prioritise center handle over other handles
self._active_handle = 'C'
elif c_dist > self.grab_range and e_dist > self.grab_range:
# Not close to any handles
if self.drag_from_anywhere and self._contains(event):
# Check if we've clicked inside the region
self._active_handle = 'C'
else:
self._active_handle = None
return
elif c_dist < e_dist:
# Closest to a corner handle
self._active_handle = self._corner_order[c_idx]
else:
# Closest to an edge handle
self._active_handle = self._edge_order[e_idx]
def _contains(self, event):
"""Return True if event is within the patch."""
return self._selection_artist.contains(event, radius=0)[0]
@property
def geometry(self):
"""
Return an array of shape (2, 5) containing the
x (``RectangleSelector.geometry[1, :]``) and
y (``RectangleSelector.geometry[0, :]``) data coordinates of the four
corners of the rectangle starting and ending in the top left corner.
"""
if hasattr(self._selection_artist, 'get_verts'):
xfm = self.ax.transData.inverted()
y, x = xfm.transform(self._selection_artist.get_verts()).T
return np.array([x, y])
else:
return np.array(self._selection_artist.get_data())
@_docstring.Substitution(_RECTANGLESELECTOR_PARAMETERS_DOCSTRING.replace(
'__ARTIST_NAME__', 'ellipse'))
| RectangleSelector |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_privacy_urls.py | {
"start": 19146,
"end": 19708
} | class ____(URLAccessMixin):
def setUp(self):
super().setUp()
self.response_data.update(
{
"/accounts/tokens/create/": {"status_code": 405},
"/accounts/tokens/delete/": {"status_code": 405},
}
)
self.default_kwargs.update(
{
"username": self.tester.username,
}
)
def test_private_urls(self):
from readthedocs.profiles.urls.private import urlpatterns
self._test_url(urlpatterns)
| PrivateUserProfileMixin |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0082_disconnect_error_detector_cron_workflows.py | {
"start": 1333,
"end": 2670
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("workflow_engine", "0081_add_unique_constraint_to_detector_group"),
]
operations = [
migrations.RunPython(
disconnect_error_detector_cron_workflows,
migrations.RunPython.noop,
hints={"tables": ["sentry_rule"]},
)
]
| Migration |
python | jina-ai__jina | jina/logging/logger.py | {
"start": 391,
"end": 1903
} | class ____(_LogRender):
"""Override the original rich log record for more compact layout."""
def __call__(
self,
console,
renderables,
log_time=None,
time_format=None,
level=None,
path=None,
line_no=None,
link_path=None,
):
from rich.containers import Renderables
from rich.table import Table
from rich.text import Text
output = Table.grid(padding=(0, 1))
output.expand = True
if self.show_level:
output.add_column(style="log.level", width=5)
output.add_column(ratio=1, style='log.message', overflow='ellipsis')
if self.show_time:
output.add_column(style="log.path")
row = []
if self.show_level:
row.append(level)
row.append(Renderables(renderables))
if self.show_time:
log_time = log_time or console.get_datetime()
time_format = time_format or self.time_format
if callable(time_format):
log_time_display = time_format(log_time)
else:
log_time_display = Text(log_time.strftime(time_format))
if log_time_display == self._last_time and self.omit_repeated_times:
row.append(Text(" " * len(log_time_display)))
else:
row.append(log_time_display)
self._last_time = log_time_display
output.add_row(*row)
return output
| _MyLogRender |
python | pypa__hatch | tests/env/plugin/test_interface.py | {
"start": 32197,
"end": 50078
} | class ____:
def test_default(self, isolation, isolated_data_dir, platform, temp_application):
config = {
"project": {"name": "my_app", "version": "0.0.1", "dependencies": ["dep1"]},
"tool": {"hatch": {"envs": {"default": {"skip-install": False}}}},
}
project = Project(isolation, config=config)
project.set_app(temp_application)
temp_application.project = project
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
temp_application,
)
assert environment.dependencies == environment.dependencies == ["dep1"]
assert len(environment.dependencies) == len(environment.dependencies_complex)
def test_not_array(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1", "dependencies": ["dep1"]},
"tool": {"hatch": {"envs": {"default": {"dependencies": 9000}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(TypeError, match="Field `tool.hatch.envs.default.dependencies` must be an array"):
_ = environment.dependencies
def test_entry_not_string(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1", "dependencies": ["dep1"]},
"tool": {"hatch": {"envs": {"default": {"dependencies": [9000]}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
TypeError, match="Dependency #1 of field `tool.hatch.envs.default.dependencies` must be a string"
):
_ = environment.dependencies
def test_invalid(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1", "dependencies": ["dep1"]},
"tool": {"hatch": {"envs": {"default": {"dependencies": ["foo^1"]}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
ValueError, match="Dependency #1 of field `tool.hatch.envs.default.dependencies` is invalid: .+"
):
_ = environment.dependencies
def test_extra_not_array(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1", "dependencies": ["dep1"]},
"tool": {"hatch": {"envs": {"default": {"extra-dependencies": 9000}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(TypeError, match="Field `tool.hatch.envs.default.extra-dependencies` must be an array"):
_ = environment.dependencies
def test_extra_entry_not_string(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1", "dependencies": ["dep1"]},
"tool": {"hatch": {"envs": {"default": {"extra-dependencies": [9000]}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
TypeError, match="Dependency #1 of field `tool.hatch.envs.default.extra-dependencies` must be a string"
):
_ = environment.dependencies
def test_extra_invalid(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1", "dependencies": ["dep1"]},
"tool": {"hatch": {"envs": {"default": {"extra-dependencies": ["foo^1"]}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
ValueError, match="Dependency #1 of field `tool.hatch.envs.default.extra-dependencies` is invalid: .+"
):
_ = environment.dependencies
def test_full(self, isolation, isolated_data_dir, platform, temp_application):
config = {
"project": {"name": "my_app", "version": "0.0.1", "dependencies": ["dep1"]},
"tool": {
"hatch": {
"envs": {
"default": {"skip-install": False, "dependencies": ["dep2"], "extra-dependencies": ["dep3"]}
}
}
},
}
project = Project(isolation, config=config)
project.set_app(temp_application)
temp_application.project = project
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
temp_application,
)
assert environment.dependencies == ["dep2", "dep3", "dep1"]
def test_context_formatting(self, isolation, isolated_data_dir, platform, temp_application, uri_slash_prefix):
config = {
"project": {"name": "my_app", "version": "0.0.1", "dependencies": ["dep1"]},
"tool": {
"hatch": {
"envs": {
"default": {
"skip-install": False,
"dependencies": ["dep2"],
"extra-dependencies": ["proj @ {root:uri}"],
}
}
}
},
}
project = Project(isolation, config=config)
project.set_app(temp_application)
temp_application.project = project
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
temp_application,
)
normalized_path = str(isolation).replace("\\", "/")
assert environment.dependencies == ["dep2", f"proj@ file:{uri_slash_prefix}{normalized_path}", "dep1"]
def test_project_dependencies_context_formatting(
self, temp_dir, isolated_data_dir, platform, temp_application, uri_slash_prefix
):
"""
Regression test for context formatting in project dependencies.
Ensures that dependencies in [project] section with context variables
like {root:parent:uri} are properly formatted before creating Dependency objects.
"""
# Create a sibling project
sibling_project = temp_dir.parent / "sibling-project"
sibling_project.mkdir(exist_ok=True)
(sibling_project / "pyproject.toml").write_text(
"""\
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "sibling-project"
version = "0.0.1"
"""
)
config = {
"project": {
"name": "my_app",
"version": "0.0.1",
"dependencies": ["sibling-project @ {root:parent:uri}/sibling-project"],
},
"tool": {"hatch": {"envs": {"default": {"skip-install": False}}}},
}
project = Project(temp_dir, config=config)
project.set_app(temp_application)
temp_application.project = project
environment = MockEnvironment(
temp_dir,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
temp_application,
)
normalized_parent_path = str(temp_dir.parent).replace("\\", "/")
expected_dep = f"sibling-project@ file:{uri_slash_prefix}{normalized_parent_path}/sibling-project"
# Verify the dependency was formatted correctly
assert expected_dep in environment.dependencies
# Verify we can access the path property without errors
for dep in environment.project_dependencies_complex:
if dep.name == "sibling-project":
assert dep.path is not None
assert "sibling-project" in str(dep.path)
def test_full_skip_install(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1", "dependencies": ["dep1"]},
"tool": {
"hatch": {
"envs": {
"default": {"dependencies": ["dep2"], "extra-dependencies": ["dep3"], "skip-install": True}
}
}
},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
assert environment.dependencies == ["dep2", "dep3"]
def test_full_skip_install_and_features(self, isolation, isolated_data_dir, platform, temp_application):
config = {
"project": {
"name": "my_app",
"version": "0.0.1",
"dependencies": ["dep1"],
"optional-dependencies": {"feat": ["dep4"]},
},
"tool": {
"hatch": {
"envs": {
"default": {
"dependencies": ["dep2"],
"extra-dependencies": ["dep3"],
"skip-install": True,
"features": ["feat"],
}
}
}
},
}
project = Project(isolation, config=config)
project.set_app(temp_application)
temp_application.project = project
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
temp_application,
)
assert environment.dependencies == ["dep2", "dep3", "dep4"]
def test_full_skip_install_and_dependency_groups(self, isolation, isolated_data_dir, platform, temp_application):
config = {
"project": {
"name": "my_app",
"version": "0.0.1",
"dependencies": ["dep1"],
},
"dependency-groups": {
"foo": ["dep5"],
"bar": ["dep4", {"include-group": "foo"}],
},
"tool": {
"hatch": {
"envs": {
"default": {
"dependencies": ["dep2"],
"extra-dependencies": ["dep3"],
"skip-install": True,
"dependency-groups": ["bar"],
}
}
}
},
}
project = Project(isolation, config=config)
project.set_app(temp_application)
temp_application.project = project
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
temp_application,
)
assert environment.dependencies == ["dep2", "dep3", "dep4", "dep5"]
def test_full_no_dev_mode(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1", "dependencies": ["dep1"]},
"tool": {
"hatch": {
"envs": {"default": {"dependencies": ["dep2"], "extra-dependencies": ["dep3"], "dev-mode": False}}
}
},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
assert environment.dependencies == ["dep2", "dep3"]
def test_builder(self, isolation, isolated_data_dir, platform, global_application):
config = {
"build-system": {"requires": ["dep2"]},
"project": {"name": "my_app", "version": "0.0.1", "dependencies": ["dep1"]},
"tool": {
"hatch": {"envs": {"default": {"skip-install": False, "builder": True, "dependencies": ["dep3"]}}}
},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
assert environment.dependencies == ["dep3", "dep2"]
def test_workspace(self, temp_dir, isolated_data_dir, platform, temp_application):
for i in range(3):
project_file = temp_dir / f"foo{i}" / "pyproject.toml"
project_file.parent.mkdir()
project_file.write_text(
f"""\
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "foo{i}"
version = "0.0.1"
dependencies = ["pkg-{i}"]
[project.optional-dependencies]
feature1 = ["pkg-feature-1{i}"]
feature2 = ["pkg-feature-2{i}"]
feature3 = ["pkg-feature-3{i}"]
"""
)
config = {
"project": {"name": "my_app", "version": "0.0.1", "dependencies": ["dep1"]},
"tool": {
"hatch": {
"envs": {
"default": {
"skip-install": False,
"dependencies": ["dep2"],
"extra-dependencies": ["dep3"],
"workspace": {
"members": [
{"path": "foo0", "features": ["feature1"]},
{"path": "foo1", "features": ["feature1", "feature2"]},
{"path": "foo2", "features": ["feature1", "feature2", "feature3"]},
],
},
},
},
},
},
}
project = Project(temp_dir, config=config)
project.set_app(temp_application)
temp_application.project = project
environment = MockEnvironment(
temp_dir,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
temp_application,
)
assert environment.dependencies == [
"dep2",
"dep3",
"pkg-0",
"pkg-feature-10",
"pkg-1",
"pkg-feature-11",
"pkg-feature-21",
"pkg-2",
"pkg-feature-12",
"pkg-feature-22",
"pkg-feature-32",
"dep1",
]
| TestDependencies |
python | ZoranPandovski__al-go-rithms | data_structures/avl_tree/Python/avl.py | {
"start": 266,
"end": 8099
} | class ____():
def __init__(self, *args):
self.node = None
self.height = -1
self.balance = 0;
if len(args) == 1:
for i in args[0]:
self.insert(i)
def height(self):
if self.node:
return self.node.height
else:
return 0
def is_leaf(self):
return (self.height == 0)
def insert(self, key):
tree = self.node
newnode = Node(key)
if tree == None:
self.node = newnode
self.node.left = AVLTree()
self.node.right = AVLTree()
debug("Inserted key [" + str(key) + "]")
elif key < tree.key:
self.node.left.insert(key)
elif key > tree.key:
self.node.right.insert(key)
else:
debug("Key [" + str(key) + "] already in tree.")
self.rebalance()
def rebalance(self):
'''
Rebalance a particular (sub)tree
'''
# key inserted. Let's check if we're balanced
self.update_heights(False)
self.update_balances(False)
while self.balance < -1 or self.balance > 1:
if self.balance > 1:
if self.node.left.balance < 0:
self.node.left.lrotate() # we're in case II
self.update_heights()
self.update_balances()
self.rrotate()
self.update_heights()
self.update_balances()
if self.balance < -1:
if self.node.right.balance > 0:
self.node.right.rrotate() # we're in case III
self.update_heights()
self.update_balances()
self.lrotate()
self.update_heights()
self.update_balances()
def rrotate(self):
# Rotate left pivoting on self
debug ('Rotating ' + str(self.node.key) + ' right')
A = self.node
B = self.node.left.node
T = B.right.node
self.node = B
B.right.node = A
A.left.node = T
def lrotate(self):
# Rotate left pivoting on self
debug ('Rotating ' + str(self.node.key) + ' left')
A = self.node
B = self.node.right.node
T = B.left.node
self.node = B
B.left.node = A
A.right.node = T
def update_heights(self, recurse=True):
if not self.node == None:
if recurse:
if self.node.left != None:
self.node.left.update_heights()
if self.node.right != None:
self.node.right.update_heights()
self.height = max(self.node.left.height,
self.node.right.height) + 1
else:
self.height = -1
def update_balances(self, recurse=True):
if not self.node == None:
if recurse:
if self.node.left != None:
self.node.left.update_balances()
if self.node.right != None:
self.node.right.update_balances()
self.balance = self.node.left.height - self.node.right.height
else:
self.balance = 0
def delete(self, key):
# debug("Trying to delete at node: " + str(self.node.key))
if self.node != None:
if self.node.key == key:
debug("Deleting ... " + str(key))
if self.node.left.node == None and self.node.right.node == None:
self.node = None # leaves can be killed at will
# if only one subtree, take that
elif self.node.left.node == None:
self.node = self.node.right.node
elif self.node.right.node == None:
self.node = self.node.left.node
# worst-case: both children present. Find logical successor
else:
replacement = self.logical_successor(self.node)
if replacement != None: # sanity check
debug("Found replacement for " + str(key) + " -> " + str(replacement.key))
self.node.key = replacement.key
# replaced. Now delete the key from right child
self.node.right.delete(replacement.key)
self.rebalance()
return
elif key < self.node.key:
self.node.left.delete(key)
elif key > self.node.key:
self.node.right.delete(key)
self.rebalance()
else:
return
def logical_predecessor(self, node):
'''
Find the biggest valued node in LEFT child
'''
node = node.left.node
if node != None:
while node.right != None:
if node.right.node == None:
return node
else:
node = node.right.node
return node
def logical_successor(self, node):
'''
Find the smallese valued node in RIGHT child
'''
node = node.right.node
if node != None: # just a sanity check
while node.left != None:
debug("LS: traversing: " + str(node.key))
if node.left.node == None:
return node
else:
node = node.left.node
return node
def check_balanced(self):
if self == None or self.node == None:
return True
# We always need to make sure we are balanced
self.update_heights()
self.update_balances()
return ((abs(self.balance) < 2) and self.node.left.check_balanced() and self.node.right.check_balanced())
def inorder_traverse(self):
if self.node == None:
return []
inlist = []
l = self.node.left.inorder_traverse()
for i in l:
inlist.append(i)
inlist.append(self.node.key)
l = self.node.right.inorder_traverse()
for i in l:
inlist.append(i)
return inlist
def display(self, level=0, pref=''):
'''
Display the whole tree. Uses recursive def.
TODO: create a better display using breadth-first search
'''
self.update_heights() # Must update heights before balances
self.update_balances()
if(self.node != None):
print('-' * level * 2, pref, self.node.key, "[" + str(self.height) + ":" + str(self.balance) + "]", 'L' if self.is_leaf() else ' ')
if self.node.left != None:
self.node.left.display(level + 1, '<')
if self.node.left != None:
self.node.right.display(level + 1, '>')
# Usage example
if __name__ == "__main__":
a = AVLTree()
print("----- Inserting -------")
#inlist = [5, 2, 12, -4, 3, 21, 19, 25]
inlist = [7, 5, 2, 6, 3, 4, 1, 8, 9, 0]
for i in inlist:
a.insert(i)
a.display()
print("----- Deleting -------")
a.delete(3)
a.delete(4)
# a.delete(5)
a.display()
print()
print("Input :", inlist)
print("deleting ... ", 3)
print("deleting ... ", 4)
print("Inorder traversal:", a.inorder_traverse())
| AVLTree |
python | PyCQA__pylint | doc/data/messages/p/property-with-parameters/bad.py | {
"start": 0,
"end": 96
} | class ____:
@property
def bore(self, depth): # [property-with-parameters]
pass
| Worm |
python | huggingface__transformers | src/transformers/models/x_clip/modeling_x_clip.py | {
"start": 24006,
"end": 27908
} | class ____(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`XCLIPEncoderLayer`].
Args:
config: XCLIPConfig
"""
def __init__(self, config: XCLIPConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([XCLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
| XCLIPEncoder |
python | python-pillow__Pillow | Tests/test_decompression_bomb.py | {
"start": 183,
"end": 2728
} | class ____:
def test_no_warning_small_file(self) -> None:
# Implicit assert: no warning.
# A warning would cause a failure.
with Image.open(TEST_FILE):
pass
def test_no_warning_no_limit(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Arrange
# Turn limit off
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", None)
assert Image.MAX_IMAGE_PIXELS is None
# Act / Assert
# Implicit assert: no warning.
# A warning would cause a failure.
with Image.open(TEST_FILE):
pass
def test_warning(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Set limit to trigger warning on the test file
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", 128 * 128 - 1)
assert Image.MAX_IMAGE_PIXELS == 128 * 128 - 1
with pytest.warns(Image.DecompressionBombWarning):
with Image.open(TEST_FILE):
pass
def test_exception(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Set limit to trigger exception on the test file
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", 64 * 128 - 1)
assert Image.MAX_IMAGE_PIXELS == 64 * 128 - 1
with pytest.raises(Image.DecompressionBombError):
with Image.open(TEST_FILE):
pass
def test_exception_ico(self) -> None:
with pytest.raises(Image.DecompressionBombError):
with Image.open("Tests/images/decompression_bomb.ico"):
pass
def test_exception_gif(self) -> None:
with pytest.raises(Image.DecompressionBombError):
with Image.open("Tests/images/decompression_bomb.gif"):
pass
def test_exception_gif_extents(self) -> None:
with Image.open("Tests/images/decompression_bomb_extents.gif") as im:
with pytest.raises(Image.DecompressionBombError):
im.seek(1)
def test_exception_gif_zero_width(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Set limit to trigger exception on the test file
monkeypatch.setattr(Image, "MAX_IMAGE_PIXELS", 4 * 64 * 128)
assert Image.MAX_IMAGE_PIXELS == 4 * 64 * 128
with pytest.raises(Image.DecompressionBombError):
with Image.open("Tests/images/zero_width.gif"):
pass
def test_exception_bmp(self) -> None:
with pytest.raises(Image.DecompressionBombError):
with Image.open("Tests/images/bmp/b/reallybig.bmp"):
pass
| TestDecompressionBomb |
python | google__pytype | pytype/abstract/_singletons.py | {
"start": 9038,
"end": 9449
} | class ____(Empty):
"""Assigned to variables that have del called on them."""
def __init__(self, line: int, ctx: "context.Context") -> None:
super().__init__(ctx)
self.line = line
self.name = "deleted"
def get_special_attribute(
self, node: cfg.CFGNode, name: str, valself: cfg.Variable
) -> cfg.Variable:
del name, valself # unused
return self.ctx.new_unsolvable(node)
| Deleted |
python | astropy__astropy | astropy/io/votable/converters.py | {
"start": 7522,
"end": 11692
} | class ____(Converter):
"""
Handles the char datatype. (7-bit unsigned characters).
Missing values are not handled for string or unicode types.
"""
default = _empty_bytes
def __init__(self, field, config=None, pos=None):
if config is None:
config = {}
Converter.__init__(self, field, config, pos)
self.field_name = field.name
if field.arraysize is None:
vo_warn(W47, (), config, pos)
field.arraysize = "1"
if field.arraysize == "*":
self.format = "O"
self.binparse = self._binparse_var
self.binoutput = self._binoutput_var
self.arraysize = "*"
else:
# Check if this is a bounded variable-length field
is_variable = field.arraysize.endswith("*")
numeric_part = field.arraysize.removesuffix("*")
try:
self.arraysize = int(numeric_part)
except ValueError:
vo_raise(E01, (numeric_part, "char", field.ID), config)
self.format = f"U{self.arraysize:d}"
# For bounded variable-length fields use the variable methods
if is_variable:
self.binparse = self._binparse_var
self.binoutput = self._binoutput_var
else:
self.binparse = self._binparse_fixed
self.binoutput = self._binoutput_fixed
self._struct_format = f">{self.arraysize:d}s"
def supports_empty_values(self, config):
return True
def parse(self, value, config=None, pos=None):
if self.arraysize != "*" and len(value) > self.arraysize:
vo_warn(W46, ("char", self.arraysize), config, pos)
# Warn about non-ascii characters if warnings are enabled.
try:
value.encode("ascii")
except UnicodeEncodeError:
vo_warn(W55, (self.field_name, value), config, pos)
return value, False
def output(self, value, mask):
if mask:
return ""
# The output methods for Char assume that value is either str or bytes.
# This method needs to return a str, but needs to warn if the str contains
# non-ASCII characters.
try:
if isinstance(value, str):
value.encode("ascii")
else:
# Check for non-ASCII chars in the bytes object.
value = value.decode("ascii")
except (ValueError, UnicodeEncodeError):
warn_or_raise(E24, UnicodeEncodeError, (value, self.field_name))
finally:
if isinstance(value, bytes):
# Convert the bytes to str regardless of non-ASCII chars.
value = value.decode("utf-8")
return xml_escape_cdata(value)
def _binparse_var(self, read):
length = self._parse_length(read)
if self.arraysize != "*" and length > self.arraysize:
vo_warn(W46, ("char", self.arraysize), None, None)
return read(length).decode("ascii"), False
def _binparse_fixed(self, read):
s = struct.unpack(self._struct_format, read(self.arraysize))[0]
end = s.find(_zero_byte)
s = s.decode("ascii")
if end != -1:
return s[:end], False
return s, False
def _binoutput_var(self, value, mask):
if mask or value is None or value == "":
return _zero_int
if isinstance(value, str):
try:
value = value.encode("ascii")
except ValueError:
vo_raise(E24, (value, self.field_name))
if self.arraysize != "*" and len(value) > self.arraysize:
vo_warn(W46, ("char", self.arraysize), None, None)
return self._write_length(len(value)) + value
def _binoutput_fixed(self, value, mask):
if mask:
value = _empty_bytes
elif isinstance(value, str):
try:
value = value.encode("ascii")
except ValueError:
vo_raise(E24, (value, self.field_name))
return struct.pack(self._struct_format, value)
| Char |
python | pyca__cryptography | tests/hazmat/primitives/decrepit/test_rc2.py | {
"start": 653,
"end": 958
} | class ____:
test_kat = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "RC2"),
[
"rc2-cbc.txt",
],
lambda key, **kwargs: RC2(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)),
)
| TestRC2ModeCBC |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 52658,
"end": 53002
} | class ____(sgqlc.types.Enum):
"""The possible roles within an organization for its members.
Enumeration Choices:
* `ADMIN`: The user is an administrator of the organization.
* `MEMBER`: The user is a member of the organization.
"""
__schema__ = github_schema
__choices__ = ("ADMIN", "MEMBER")
| OrganizationMemberRole |
python | pandas-dev__pandas | pandas/core/computation/parsing.py | {
"start": 4002,
"end": 7708
} | class ____(Enum):
DEFAULT = 0
IN_BACKTICK = 1
IN_SINGLE_QUOTE = 2
IN_DOUBLE_QUOTE = 3
def _split_by_backtick(s: str) -> list[tuple[bool, str]]:
"""
Splits a str into substrings along backtick characters (`).
Disregards backticks inside quotes.
Parameters
----------
s : str
The Python source code string.
Returns
-------
substrings: list[tuple[bool, str]]
List of tuples, where each tuple has two elements:
The first is a boolean indicating if the substring is backtick-quoted.
The second is the actual substring.
"""
substrings = []
substr: list[str] = [] # Will join into a string before adding to `substrings`
i = 0
parse_state = ParseState.DEFAULT
while i < len(s):
char = s[i]
match char:
case "`":
# start of a backtick-quoted string
if parse_state == ParseState.DEFAULT:
if substr:
substrings.append((False, "".join(substr)))
substr = [char]
i += 1
parse_state = ParseState.IN_BACKTICK
continue
elif parse_state == ParseState.IN_BACKTICK:
# escaped backtick inside a backtick-quoted string
next_char = s[i + 1] if (i != len(s) - 1) else None
if next_char == "`":
substr.append(char)
substr.append(next_char)
i += 2
continue
# end of the backtick-quoted string
else:
substr.append(char)
substrings.append((True, "".join(substr)))
substr = []
i += 1
parse_state = ParseState.DEFAULT
continue
case "'":
# start of a single-quoted string
if parse_state == ParseState.DEFAULT:
parse_state = ParseState.IN_SINGLE_QUOTE
# end of a single-quoted string
elif (parse_state == ParseState.IN_SINGLE_QUOTE) and (s[i - 1] != "\\"):
parse_state = ParseState.DEFAULT
case '"':
# start of a double-quoted string
if parse_state == ParseState.DEFAULT:
parse_state = ParseState.IN_DOUBLE_QUOTE
# end of a double-quoted string
elif (parse_state == ParseState.IN_DOUBLE_QUOTE) and (s[i - 1] != "\\"):
parse_state = ParseState.DEFAULT
substr.append(char)
i += 1
if substr:
substrings.append((False, "".join(substr)))
return substrings
def tokenize_string(source: str) -> Iterator[tuple[int, str]]:
"""
Tokenize a Python source code string.
Parameters
----------
source : str
The Python source code string.
Returns
-------
tok_generator : Iterator[Tuple[int, str]]
An iterator yielding all tokens with only toknum and tokval (Tuple[ing, str]).
"""
# GH 59285
# Escape characters, including backticks
source = "".join(
(
create_valid_python_identifier(substring[1:-1])
if is_backtick_quoted
else substring
)
for is_backtick_quoted, substring in _split_by_backtick(source)
)
line_reader = StringIO(source).readline
token_generator = tokenize.generate_tokens(line_reader)
for toknum, tokval, _, _, _ in token_generator:
yield toknum, tokval
| ParseState |
python | sympy__sympy | sympy/tensor/tensor.py | {
"start": 45757,
"end": 49066
} | class ____(Basic):
"""
Represents a tensor index
Parameters
==========
name : name of the index, or ``True`` if you want it to be automatically assigned
tensor_index_type : ``TensorIndexType`` of the index
is_up : flag for contravariant index (is_up=True by default)
Attributes
==========
``name``
``tensor_index_type``
``is_up``
Notes
=====
Tensor indices are contracted with the Einstein summation convention.
An index can be in contravariant or in covariant form; in the latter
case it is represented prepending a ``-`` to the index name. Adding
``-`` to a covariant (is_up=False) index makes it contravariant.
Dummy indices have a name with head given by
``tensor_inde_type.dummy_name`` with underscore and a number.
Similar to ``symbols`` multiple contravariant indices can be created
at once using ``tensor_indices(s, typ)``, where ``s`` is a string
of names.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, TensorIndex, TensorHead, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> mu = TensorIndex('mu', Lorentz, is_up=False)
>>> nu, rho = tensor_indices('nu, rho', Lorentz)
>>> A = TensorHead('A', [Lorentz, Lorentz])
>>> A(mu, nu)
A(-mu, nu)
>>> A(-mu, -rho)
A(mu, -rho)
>>> A(mu, -mu)
A(-L_0, L_0)
"""
def __new__(cls, name, tensor_index_type, is_up=True):
if isinstance(name, str):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
elif name is True:
name = "_i{}".format(len(tensor_index_type._autogenerated))
name_symbol = Symbol(name)
tensor_index_type._autogenerated.append(name_symbol)
else:
raise ValueError("invalid name")
is_up = sympify(is_up)
return Basic.__new__(cls, name_symbol, tensor_index_type, is_up)
@property
def name(self):
return self.args[0].name
@property
def tensor_index_type(self):
return self.args[1]
@property
def is_up(self):
return self.args[2]
def _print(self):
s = self.name
if not self.is_up:
s = f'-{s}'
return s
def __lt__(self, other):
return ((self.tensor_index_type, self.name) <
(other.tensor_index_type, other.name))
def __neg__(self):
t1 = TensorIndex(self.name, self.tensor_index_type,
(not self.is_up))
return t1
def tensor_indices(s, typ):
"""
Returns list of tensor indices given their names and their types.
Parameters
==========
s : string of comma separated names of indices
typ : ``TensorIndexType`` of the indices
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
"""
if isinstance(s, str):
a = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
tilist = [TensorIndex(i, typ) for i in a]
if len(tilist) == 1:
return tilist[0]
return tilist
| TensorIndex |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/test_workflow.py | {
"start": 1903,
"end": 16144
} | class ____(BaseWorkflowTest):
def setUp(self) -> None:
(
self.workflow,
self.detector,
self.detector_workflow,
self.workflow_triggers,
) = self.create_detector_and_workflow()
self.error_workflow, self.error_detector, self.detector_workflow_error, _ = (
self.create_detector_and_workflow(
name_prefix="error",
workflow_triggers=self.create_data_condition_group(),
detector_type=ErrorGroupType.slug,
)
)
self.group, self.event, self.group_event = self.create_group_event()
self.event_data = WorkflowEventData(
event=self.group_event,
group=self.group,
group_state=GroupState(
id=1, is_new=False, is_regression=True, is_new_group_environment=False
),
)
self.issue_stream_detector = self.create_detector(
project=self.project,
type=IssueStreamGroupType.slug,
)
self.batch_client = DelayedWorkflowClient()
def test_skips_disabled_workflows(self) -> None:
workflow_triggers = self.create_data_condition_group()
self.create_data_condition(
condition_group=workflow_triggers,
type=Condition.EVENT_SEEN_COUNT,
comparison=1,
condition_result=True,
)
workflow = self.create_workflow(
name="disabled_workflow", when_condition_group=workflow_triggers, enabled=False
)
self.create_detector_workflow(
detector=self.error_detector,
workflow=workflow,
)
result = process_workflows(self.batch_client, self.event_data, FROZEN_TIME)
assert result.data.triggered_workflows == {self.error_workflow}
def test_error_event(self) -> None:
result = process_workflows(self.batch_client, self.event_data, FROZEN_TIME)
assert result.data.triggered_workflows == {self.error_workflow}
@patch("sentry.workflow_engine.processors.action.fire_actions")
def test_process_workflows_event(self, mock_fire_actions: MagicMock) -> None:
# Create an action so fire_actions will be called
self.create_workflow_action(workflow=self.error_workflow)
process_workflows_event(
project_id=self.project.id,
event_id=self.event.event_id,
group_id=self.group.id,
occurrence_id=self.group_event.occurrence_id,
group_state={
"id": 1,
"is_new": False,
"is_regression": True,
"is_new_group_environment": False,
},
has_reappeared=False,
has_escalated=False,
)
mock_fire_actions.assert_called_once()
@patch("sentry.workflow_engine.processors.action.filter_recently_fired_workflow_actions")
def test_populate_workflow_env_for_filters(self, mock_filter: MagicMock) -> None:
# this should not pass because the environment is not None
self.error_workflow.update(environment=self.group_event.get_environment())
error_workflow_filters = self.create_data_condition_group(
logic_type=DataConditionGroup.Type.ANY_SHORT_CIRCUIT
)
self.create_data_condition(
condition_group=error_workflow_filters,
type=Condition.FIRST_SEEN_EVENT,
comparison=True,
condition_result=True,
)
self.create_workflow_data_condition_group(
workflow=self.error_workflow, condition_group=error_workflow_filters
)
workflow_triggers = self.create_data_condition_group(
logic_type=DataConditionGroup.Type.ANY_SHORT_CIRCUIT
)
workflow_filters = self.create_data_condition_group(
logic_type=DataConditionGroup.Type.ANY_SHORT_CIRCUIT
)
# this should pass because the environment is None
self.create_data_condition(
condition_group=workflow_filters,
type=Condition.FIRST_SEEN_EVENT,
comparison=True,
condition_result=True,
)
workflow = self.create_workflow(
name="testy",
when_condition_group=workflow_triggers,
)
self.create_detector_workflow(
detector=self.error_detector,
workflow=workflow,
)
self.create_workflow_data_condition_group(
workflow=workflow, condition_group=workflow_filters
)
assert self.event_data.group_state
self.event_data.group_state["is_new"] = True
result = process_workflows(self.batch_client, self.event_data, FROZEN_TIME)
mock_filter.assert_called_with({workflow_filters}, self.event_data)
assert result.tainted is False
def test_same_environment_only(self) -> None:
env = self.create_environment(project=self.project)
other_env = self.create_environment(project=self.project)
self.group, self.event, self.group_event = self.create_group_event(environment=env.name)
self.event_data = WorkflowEventData(
event=self.group_event,
group=self.group,
group_state=GroupState(
id=1, is_new=False, is_regression=True, is_new_group_environment=False
),
)
# only processes workflows with the same env or no env specified
self.error_workflow.update(environment=None)
dcg = self.create_data_condition_group()
non_matching_env_workflow = self.create_workflow(
when_condition_group=dcg, environment=self.create_environment()
)
self.create_detector_workflow(
detector=self.error_detector,
workflow=non_matching_env_workflow,
)
matching_dcg = self.create_data_condition_group()
matching_env_workflow = self.create_workflow(
when_condition_group=matching_dcg,
environment=env,
)
self.create_detector_workflow(
detector=self.error_detector,
workflow=matching_env_workflow,
)
mismatched_dcg = self.create_data_condition_group()
mismatched_env_workflow = self.create_workflow(
when_condition_group=mismatched_dcg, environment=other_env
)
self.create_detector_workflow(
detector=self.error_detector,
workflow=mismatched_env_workflow,
)
result = process_workflows(self.batch_client, self.event_data, FROZEN_TIME)
assert result.data.triggered_workflows == {self.error_workflow, matching_env_workflow}
def test_issue_occurrence_event(self) -> None:
issue_occurrence = self.build_occurrence(evidence_data={"detector_id": self.detector.id})
self.group_event.occurrence = issue_occurrence
result = process_workflows(self.batch_client, self.event_data, FROZEN_TIME)
assert result.data.triggered_workflows == {self.workflow}
def test_regressed_event(self) -> None:
dcg = self.create_data_condition_group()
self.create_data_condition(
type=Condition.REGRESSION_EVENT,
comparison=True,
condition_result=True,
condition_group=dcg,
)
workflow = self.create_workflow(when_condition_group=dcg)
self.create_detector_workflow(
detector=self.error_detector,
workflow=workflow,
)
result = process_workflows(self.batch_client, self.event_data, FROZEN_TIME)
assert result.data.triggered_workflows == {self.error_workflow, workflow}
@patch("sentry.utils.metrics.incr")
@patch("sentry.workflow_engine.processors.detector.logger")
def test_no_detector(self, mock_logger: MagicMock, mock_incr: MagicMock) -> None:
self.issue_stream_detector.delete()
self.group_event.occurrence = self.build_occurrence(evidence_data={})
result = process_workflows(self.batch_client, self.event_data, FROZEN_TIME)
assert result.msg == "No Detectors associated with the issue were found"
mock_incr.assert_called_with("workflow_engine.detectors.error") # called twice
mock_logger.exception.assert_called_with(
"Detector not found for event",
extra={
"event_id": self.event.event_id,
"group_id": self.group_event.group_id,
"detector_id": None,
},
) # exception is called twice for both missing detectors
@patch("sentry.utils.metrics.incr")
@patch("sentry.workflow_engine.processors.detector.logger")
def test_no_issue_stream_detector(self, mock_logger: MagicMock, mock_incr: MagicMock) -> None:
self.issue_stream_detector.delete()
process_workflows(self.batch_client, self.event_data, FROZEN_TIME)
mock_incr.assert_any_call("workflow_engine.detectors.error")
mock_logger.exception.assert_called_once_with(
"Issue stream detector not found for event",
extra={
"project_id": self.group.project_id,
"group_id": self.group_event.group_id,
},
)
@patch("sentry.utils.metrics.incr")
@patch("sentry.workflow_engine.processors.workflow.logger")
def test_no_environment(self, mock_logger: MagicMock, mock_incr: MagicMock) -> None:
Environment.objects.all().delete()
cache.clear()
result = process_workflows(self.batch_client, self.event_data, FROZEN_TIME)
assert not result.data.triggered_workflows
assert result.msg == "Environment for event not found"
mock_incr.assert_called_once_with(
"workflow_engine.process_workflows.error", 1, tags={"detector_type": "error"}
)
mock_logger.exception.assert_called_once_with(
"Missing environment for event",
extra={"event_id": self.event.event_id},
)
@patch("sentry.utils.metrics.incr")
@patch("sentry.workflow_engine.processors.detector.logger")
def test_no_metrics_triggered(self, mock_logger: MagicMock, mock_incr: MagicMock) -> None:
self.issue_stream_detector.delete()
self.error_detector.delete()
process_workflows(self.batch_client, self.event_data, FROZEN_TIME)
mock_incr.assert_called_with("workflow_engine.detectors.error") # called twice
mock_logger.exception.assert_called() # called twice
@patch("sentry.utils.metrics.incr")
def test_metrics_with_workflows(self, mock_incr: MagicMock) -> None:
process_workflows(self.batch_client, self.event_data, FROZEN_TIME)
mock_incr.assert_any_call(
"workflow_engine.process_workflows",
1,
tags={"detector_type": self.error_detector.type},
)
@patch("sentry.utils.metrics.incr")
def test_metrics_triggered_workflows(self, mock_incr: MagicMock) -> None:
process_workflows(self.batch_client, self.event_data, FROZEN_TIME)
mock_incr.assert_any_call(
"workflow_engine.process_workflows.triggered_workflows",
1,
tags={"detector_type": self.error_detector.type},
)
@with_feature("organizations:workflow-engine-trigger-actions")
@patch("sentry.workflow_engine.processors.action.trigger_action.apply_async")
def test_workflow_fire_history_with_action_deduping(
self, mock_trigger_action: MagicMock
) -> None:
"""Fire a single action, but record that it was fired for multiple workflows"""
self.action_group, self.action = self.create_workflow_action(workflow=self.error_workflow)
error_workflow_2 = self.create_workflow(
name="error_workflow_2",
when_condition_group=self.create_data_condition_group(),
)
self.create_detector_workflow(
detector=self.error_detector,
workflow=error_workflow_2,
)
self.action_group_2, self.action_2 = self.create_workflow_action(workflow=error_workflow_2)
error_workflow_3 = self.create_workflow(
name="error_workflow_3",
when_condition_group=self.create_data_condition_group(),
)
self.create_detector_workflow(
detector=self.error_detector,
workflow=error_workflow_3,
)
self.action_group_3, self.action_3 = self.create_workflow_action(workflow=error_workflow_3)
process_workflows(self.batch_client, self.event_data, FROZEN_TIME)
assert WorkflowFireHistory.objects.count() == 3
assert mock_trigger_action.call_count == 3
def test_uses_issue_stream_workflows(self) -> None:
issue_occurrence = self.build_occurrence()
self.group_event.occurrence = issue_occurrence
self.group.update(type=issue_occurrence.type.type_id)
self.error_workflow.delete()
issue_stream_workflow, _, _, _ = self.create_detector_and_workflow(
name_prefix="issue_stream",
workflow_triggers=self.create_data_condition_group(),
detector_type=IssueStreamGroupType.slug,
)
result = process_workflows(self.batch_client, self.event_data, FROZEN_TIME)
assert result.tainted is True
assert result.data.triggered_workflows == {issue_stream_workflow}
assert result.data.triggered_actions is not None
assert len(result.data.triggered_actions) == 0
def test_multiple_detectors(self) -> None:
issue_stream_workflow, issue_stream_detector, _, _ = self.create_detector_and_workflow(
name_prefix="issue_stream",
workflow_triggers=self.create_data_condition_group(),
detector_type=IssueStreamGroupType.slug,
)
self.create_detector_workflow(
detector=issue_stream_detector,
workflow=self.error_workflow,
)
result = process_workflows(self.batch_client, self.event_data, FROZEN_TIME)
assert result.data.triggered_workflows == {self.error_workflow, issue_stream_workflow}
assert result.data.associated_detector == self.error_detector
| TestProcessWorkflows |
python | pypa__virtualenv | src/virtualenv/create/via_global_ref/venv.py | {
"start": 536,
"end": 3688
} | class ____(ViaGlobalRefApi):
def __init__(self, options, interpreter) -> None:
self.describe = options.describe
super().__init__(options, interpreter)
current = PythonInfo.current()
self.can_be_inline = interpreter is current and interpreter.executable == interpreter.system_executable
self._context = None
def _args(self):
return super()._args() + ([("describe", self.describe.__class__.__name__)] if self.describe else [])
@classmethod
def can_create(cls, interpreter):
if interpreter.has_venv:
if CPython3macOsBrew.can_describe(interpreter):
return CPython3macOsBrew.setup_meta(interpreter)
meta = ViaGlobalRefMeta()
if interpreter.platform == "win32":
meta = handle_store_python(meta, interpreter)
return meta
return None
def create(self):
if self.can_be_inline:
self.create_inline()
else:
self.create_via_sub_process()
for lib in self.libs:
ensure_dir(lib)
super().create()
self.executables_for_win_pypy_less_v37()
def executables_for_win_pypy_less_v37(self):
"""
PyPy <= 3.6 (v7.3.3) for Windows contains only pypy3.exe and pypy3w.exe
Venv does not handle non-existing exe sources, e.g. python.exe, so this
patch does it.
""" # noqa: D205
creator = self.describe
if isinstance(creator, Pypy3Windows) and creator.less_v37:
for exe in creator.executables(self.interpreter):
exe.run(creator, self.symlinks)
def create_inline(self):
from venv import EnvBuilder # noqa: PLC0415
builder = EnvBuilder(
system_site_packages=self.enable_system_site_package,
clear=False,
symlinks=self.symlinks,
with_pip=False,
)
builder.create(str(self.dest))
def create_via_sub_process(self):
cmd = self.get_host_create_cmd()
LOGGER.info("using host built-in venv to create via %s", " ".join(cmd))
code, out, err = run_cmd(cmd)
if code != 0:
raise ProcessCallFailedError(code, out, err, cmd)
def get_host_create_cmd(self):
cmd = [self.interpreter.system_executable, "-m", "venv", "--without-pip"]
if self.enable_system_site_package:
cmd.append("--system-site-packages")
cmd.extend(("--symlinks" if self.symlinks else "--copies", str(self.dest)))
return cmd
def set_pyenv_cfg(self):
# prefer venv options over ours, but keep our extra
venv_content = copy(self.pyenv_cfg.refresh())
super().set_pyenv_cfg()
self.pyenv_cfg.update(venv_content)
def __getattribute__(self, item):
describe = object.__getattribute__(self, "describe")
if describe is not None and hasattr(describe, item):
element = getattr(describe, item)
if not callable(element) or item == "script":
return element
return object.__getattribute__(self, item)
__all__ = [
"Venv",
]
| Venv |
python | readthedocs__readthedocs.org | readthedocs/embed/tests/test_api.py | {
"start": 1387,
"end": 1457
} | class ____(BaseTestEmbedAPI):
pass
@pytest.mark.proxito
| TestEmbedAPI |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride1.py | {
"start": 12185,
"end": 12228
} | class ____:
def a(self) -> int: ...
| Base4 |
python | wandb__wandb | wandb/vendor/pygments/lexers/diff.py | {
"start": 3270,
"end": 4873
} | class ____(RegexLexer):
"""
A `wdiff <https://www.gnu.org/software/wdiff/>`_ lexer.
Note that:
* only to normal output (without option like -l).
* if target files of wdiff contain "[-", "-]", "{+", "+}",
especially they are unbalanced, this lexer will get confusing.
.. versionadded:: 2.2
"""
name = 'WDiff'
aliases = ['wdiff']
filenames = ['*.wdiff']
mimetypes = []
flags = re.MULTILINE | re.DOTALL
# We can only assume "[-" after "[-" before "-]" is `nested`,
# for instance wdiff to wdiff outputs. We have no way to
# distinct these marker is of wdiff output from original text.
ins_op = r"\{\+"
ins_cl = r"\+\}"
del_op = r"\[\-"
del_cl = r"\-\]"
normal = r'[^{}[\]+-]+' # for performance
tokens = {
'root': [
(ins_op, Generic.Inserted, 'inserted'),
(del_op, Generic.Deleted, 'deleted'),
(normal, Text),
(r'.', Text),
],
'inserted': [
(ins_op, Generic.Inserted, '#push'),
(del_op, Generic.Inserted, '#push'),
(del_cl, Generic.Inserted, '#pop'),
(ins_cl, Generic.Inserted, '#pop'),
(normal, Generic.Inserted),
(r'.', Generic.Inserted),
],
'deleted': [
(del_op, Generic.Deleted, '#push'),
(ins_op, Generic.Deleted, '#push'),
(ins_cl, Generic.Deleted, '#pop'),
(del_cl, Generic.Deleted, '#pop'),
(normal, Generic.Deleted),
(r'.', Generic.Deleted),
],
}
| WDiffLexer |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-airbyte-zendesk-support/llama_index/readers/airbyte_zendesk_support/base.py | {
"start": 126,
"end": 753
} | class ____(AirbyteCDKReader):
"""
AirbyteZendeskSupportReader reader.
Retrieve documents from ZendeskSupport
Args:
config: The config object for the zendesk_support source.
"""
def __init__(
self,
config: Mapping[str, Any],
record_handler: Optional[RecordHandler] = None,
) -> None:
"""Initialize with parameters."""
import source_zendesk_support
super().__init__(
source_class=source_zendesk_support.SourceZendeskSupport,
config=config,
record_handler=record_handler,
)
| AirbyteZendeskSupportReader |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_any.py | {
"start": 29490,
"end": 31024
} | class ____(ipaddress.IPv6Interface):
def __str__(self):
return super().__str__() + '_subclassed'
@pytest.mark.parametrize(
('value', 'expected_json'),
[
(ipaddress.IPv4Address('192.168.1.1'), '192.168.1.1'),
(ipaddress.IPv6Address('2001:0db8:85a3:0000:0000:8a2e:0370:7334'), '2001:db8:85a3::8a2e:370:7334'),
(SubIpV4('192.168.1.1'), '192.168.1.1_subclassed'),
(SubIpV6('2001:0db8:85a3:0000:0000:8a2e:0370:7334'), '2001:db8:85a3::8a2e:370:7334_subclassed'),
(ipaddress.IPv4Network('192.168.1.0/24'), '192.168.1.0/24'),
(ipaddress.IPv6Network('2001:0db8:85a3:0000:0000:8a2e:0370:7334'), '2001:db8:85a3::8a2e:370:7334/128'),
(SubNetV4('192.168.1.0/24'), '192.168.1.0/24_subclassed'),
(SubNetV6('2001:0db8:85a3:0000:0000:8a2e:0370:7334'), '2001:db8:85a3::8a2e:370:7334/128_subclassed'),
(ipaddress.IPv4Interface('192.168.1.1/24'), '192.168.1.1/24'),
(ipaddress.IPv6Interface('2001:0db8:85a3:0000:0000:8a2e:0370:7334'), '2001:db8:85a3::8a2e:370:7334/128'),
(SubInterfaceV4('192.168.1.1/24'), '192.168.1.1/24_subclassed'),
(SubInterfaceV6('2001:0db8:85a3:0000:0000:8a2e:0370:7334'), '2001:db8:85a3::8a2e:370:7334/128_subclassed'),
],
)
def test_ipaddress_type_inference(any_serializer, value, expected_json):
assert any_serializer.to_python(value) == value
assert any_serializer.to_python(value, mode='json') == expected_json
assert any_serializer.to_json(value) == f'"{expected_json}"'.encode()
| SubInterfaceV6 |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 161345,
"end": 162442
} | class ____(Operation):
def __init__(self, dtype=None, *, name=None):
super().__init__(name=name)
self.dtype = None if dtype is None else backend.standardize_dtype(dtype)
def call(self, x):
return backend.numpy.zeros_like(x, dtype=self.dtype)
def compute_output_spec(self, x, dtype=None):
dtype = (
backend.standardize_dtype(x.dtype)
if self.dtype is None
else self.dtype
)
sparse = getattr(x, "sparse", False)
return KerasTensor(x.shape, dtype=dtype, sparse=sparse)
@keras_export(
[
"keras.ops.zeros_like",
"keras.ops.numpy.zeros_like",
]
)
def zeros_like(x, dtype=None):
"""Return a tensor of zeros with the same shape and type as `x`.
Args:
x: Input tensor.
dtype: Overrides the data type of the result.
Returns:
A tensor of zeros with the same shape and type as `x`.
"""
if any_symbolic_tensors((x,)):
return ZerosLike(dtype=dtype).symbolic_call(x)
return backend.numpy.zeros_like(x, dtype=dtype)
| ZerosLike |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 681631,
"end": 682389
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for IssueTimelineItem."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("IssueTimelineItemEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("IssueTimelineItem"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| IssueTimelineConnection |
python | django__django | tests/async/tests.py | {
"start": 493,
"end": 769
} | class ____(SimpleTestCase):
def test_caches_local(self):
@async_to_sync
async def async_cache():
return caches[DEFAULT_CACHE_ALIAS]
cache_1 = async_cache()
cache_2 = async_cache()
self.assertIs(cache_1, cache_2)
| CacheTest |
python | scrapy__scrapy | tests/test_spider.py | {
"start": 8318,
"end": 18662
} | class ____(TestSpider):
test_body = b"""<html><head><title>Page title</title></head>
<body>
<p><a href="item/12.html">Item 12</a></p>
<div class='links'>
<p><a href="/about.html">About us</a></p>
</div>
<div>
<p><a href="/nofollow.html">This shouldn't be followed</a></p>
</div>
</body></html>"""
spider_class = CrawlSpider
def test_rule_without_link_extractor(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (Rule(),)
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 3
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
"http://example.org/somepage/item/12.html",
"http://example.org/about.html",
"http://example.org/nofollow.html",
]
def test_process_links(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (Rule(LinkExtractor(), process_links="dummy_process_links"),)
def dummy_process_links(self, links):
return links
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 3
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
"http://example.org/somepage/item/12.html",
"http://example.org/about.html",
"http://example.org/nofollow.html",
]
def test_process_links_filter(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (Rule(LinkExtractor(), process_links="filter_process_links"),)
_test_regex = re.compile("nofollow")
def filter_process_links(self, links):
return [link for link in links if not self._test_regex.search(link.url)]
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 2
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
"http://example.org/somepage/item/12.html",
"http://example.org/about.html",
]
def test_process_links_generator(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (Rule(LinkExtractor(), process_links="dummy_process_links"),)
def dummy_process_links(self, links):
yield from links
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 3
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
"http://example.org/somepage/item/12.html",
"http://example.org/about.html",
"http://example.org/nofollow.html",
]
def test_process_request(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
def process_request_change_domain(request, response):
return request.replace(url=request.url.replace(".org", ".com"))
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (
Rule(LinkExtractor(), process_request=process_request_change_domain),
)
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 3
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
"http://example.com/somepage/item/12.html",
"http://example.com/about.html",
"http://example.com/nofollow.html",
]
def test_process_request_with_response(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
def process_request_meta_response_class(request, response):
request.meta["response_class"] = response.__class__.__name__
return request
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (
Rule(
LinkExtractor(), process_request=process_request_meta_response_class
),
)
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 3
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
"http://example.org/somepage/item/12.html",
"http://example.org/about.html",
"http://example.org/nofollow.html",
]
assert [r.meta["response_class"] for r in output] == [
"HtmlResponse",
"HtmlResponse",
"HtmlResponse",
]
def test_process_request_instance_method(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (Rule(LinkExtractor(), process_request="process_request_upper"),)
def process_request_upper(self, request, response):
return request.replace(url=request.url.upper())
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 3
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
safe_url_string("http://EXAMPLE.ORG/SOMEPAGE/ITEM/12.HTML"),
safe_url_string("http://EXAMPLE.ORG/ABOUT.HTML"),
safe_url_string("http://EXAMPLE.ORG/NOFOLLOW.HTML"),
]
def test_process_request_instance_method_with_response(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (
Rule(
LinkExtractor(),
process_request="process_request_meta_response_class",
),
)
def process_request_meta_response_class(self, request, response):
request.meta["response_class"] = response.__class__.__name__
return request
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 3
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
"http://example.org/somepage/item/12.html",
"http://example.org/about.html",
"http://example.org/nofollow.html",
]
assert [r.meta["response_class"] for r in output] == [
"HtmlResponse",
"HtmlResponse",
"HtmlResponse",
]
def test_follow_links_attribute_population(self):
crawler = get_crawler()
spider = self.spider_class.from_crawler(crawler, "example.com")
assert hasattr(spider, "_follow_links")
assert spider._follow_links
settings_dict = {"CRAWLSPIDER_FOLLOW_LINKS": False}
crawler = get_crawler(settings_dict=settings_dict)
spider = self.spider_class.from_crawler(crawler, "example.com")
assert hasattr(spider, "_follow_links")
assert not spider._follow_links
@inlineCallbacks
def test_start_url(self):
class TestSpider(self.spider_class):
name = "test"
start_url = "https://www.example.com"
crawler = get_crawler(TestSpider)
with LogCapture("scrapy.core.engine", propagate=False, level=ERROR) as log:
yield crawler.crawl()
assert "Error while reading start items and requests" in str(log)
assert "did you miss an 's'?" in str(log)
def test_parse_response_use(self):
class _CrawlSpider(CrawlSpider):
name = "test"
start_urls = "https://www.example.com"
_follow_links = False
with warnings.catch_warnings(record=True) as w:
spider = _CrawlSpider()
assert len(w) == 0
spider._parse_response(
TextResponse(spider.start_urls, body=b""), None, None
)
assert len(w) == 1
def test_parse_response_override(self):
class _CrawlSpider(CrawlSpider):
def _parse_response(self, response, callback, cb_kwargs, follow=True):
pass
name = "test"
start_urls = "https://www.example.com"
_follow_links = False
with warnings.catch_warnings(record=True) as w:
assert len(w) == 0
spider = _CrawlSpider()
assert len(w) == 1
spider._parse_response(
TextResponse(spider.start_urls, body=b""), None, None
)
assert len(w) == 1
def test_parse_with_rules(self):
class _CrawlSpider(CrawlSpider):
name = "test"
start_urls = "https://www.example.com"
with warnings.catch_warnings(record=True) as w:
spider = _CrawlSpider()
spider.parse_with_rules(
TextResponse(spider.start_urls, body=b""), None, None
)
assert len(w) == 0
| TestCrawlSpider |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 53668,
"end": 59150
} | class ____(PyObjectType):
# objstruct_cname string Name of PyObject struct
is_builtin_type = 1
has_attributes = 1
base_type = None
module_name = '__builtin__'
require_exact = True
is_exception_type = False
# fields that let it look like an extension type
vtabslot_cname = None
vtabstruct_cname = None
vtabptr_cname = None
typedef_flag = True
is_external = True
decl_type = 'PyObject'
def __init__(self, name, cname, objstruct_cname=None):
self.name = name
self.typeptr_cname = "(%s)" % cname
self.objstruct_cname = objstruct_cname
self.is_gc_simple = name in builtin_types_that_cannot_create_refcycles
self.builtin_trashcan = name in builtin_types_with_trashcan
if name == 'type':
# Special case the type type, as many C API calls (and other
# libraries) actually expect a PyTypeObject* for type arguments.
self.decl_type = objstruct_cname
if is_exception_type_name(name):
self.is_exception_type = True
self.require_exact = False
def set_scope(self, scope):
self.scope = scope
if scope:
scope.parent_type = self
def __str__(self):
return "%s object" % self.name
def __repr__(self):
return "<%s>"% self.typeptr_cname
def default_coerced_ctype(self):
if self.name in ('bytes', 'bytearray'):
return c_char_ptr_type
elif self.name == 'bool':
return c_bint_type
elif self.name == 'float':
return c_double_type
return None
def assignable_from(self, src_type):
if isinstance(src_type, BuiltinObjectType):
return src_type.name == self.name
elif src_type.is_extension_type:
# FIXME: This is an ugly special case that we currently
# keep supporting. It allows users to specify builtin
# types as external extension types, while keeping them
# compatible with the real builtin types. We already
# generate a warning for it. Big TODO: remove!
return (src_type.module_name == '__builtin__' and
src_type.name == self.name)
else:
return True
def typeobj_is_available(self):
return True
def attributes_known(self):
return True
def subtype_of(self, type):
return type.is_pyobject and type.assignable_from(self)
def type_check_function(self, exact=True):
type_name = self.name
if type_name in _special_type_check_functions:
type_check = _special_type_check_functions[type_name]
elif self.is_exception_type:
type_check = f"__Pyx_PyExc_{type_name}_Check"
else:
type_check = f'Py{type_name.capitalize()}_Check'
if exact and not self.is_exception_type and type_name not in ('bool', 'slice', 'memoryview'):
type_check += 'Exact'
return type_check
def isinstance_code(self, arg):
return '%s(%s)' % (self.type_check_function(exact=False), arg)
def type_test_code(self, scope, arg, allow_none=True, exact=True):
type_check = self.type_check_function(exact=exact)
check = f'likely({type_check}({arg}))'
scope.use_utility_code(UtilityCode.load_cached(
"RaiseUnexpectedTypeError", "ObjectHandling.c"))
if allow_none:
check += f'||(({arg}) == Py_None)'
return check + f' || __Pyx_RaiseUnexpectedTypeError("{self.name}", {arg})'
def convert_to_basetype(self, code, pos, arg_cname, allow_none=True, arg_name_cstring="NULL"):
"""Generate type checking code that converts compatible (number) types to the plain base type in-place.
Replaces the C value in 'arg_cname' on conversion or error, decrefing the original value.
"""
if self.name == 'float':
utility_code_name = "pyfloat_simplify"
cfunc = "__Pyx_PyFloat_FromNumber"
elif self.name == 'int':
utility_code_name = "pyint_simplify"
cfunc = "__Pyx_PyInt_FromNumber"
else:
# No conversion, simple type check.
type_test = self.type_test_code(code.globalstate, arg_cname, allow_none=allow_none)
code.putln(f"if (!({type_test})) {code.error_goto(pos)}")
return
code.globalstate.use_utility_code(
UtilityCode.load_cached(utility_code_name, "TypeConversion.c"))
code.put_error_if_neg(
pos, f"{cfunc}(&{arg_cname}, {arg_name_cstring}, {allow_none:d})"
)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
else:
base_code = public_decl(self.decl_type, dll_linkage)
entity_code = "*%s" % entity_code
return self.base_declaration_code(base_code, entity_code)
def as_pyobject(self, cname):
if self.decl_type == 'PyObject':
return cname
else:
return "(PyObject *)" + cname
def cast_code(self, expr_code, to_object_struct = False):
return "((%s*)%s)" % (
to_object_struct and self.objstruct_cname or self.decl_type, # self.objstruct_cname may be None
expr_code)
def py_type_name(self):
return self.name
| BuiltinObjectType |
python | doocs__leetcode | solution/0200-0299/0253.Meeting Rooms II/Solution.py | {
"start": 0,
"end": 332
} | class ____:
def minMeetingRooms(self, intervals: List[List[int]]) -> int:
m = max(e[1] for e in intervals)
d = [0] * (m + 1)
for l, r in intervals:
d[l] += 1
d[r] -= 1
ans = s = 0
for v in d:
s += v
ans = max(ans, s)
return ans
| Solution |
python | google__jax | jax/_src/lax/slicing.py | {
"start": 8208,
"end": 10292
} | class ____(NamedTuple):
"""
Describes the dimension number arguments to an `XLA's Gather operator
<https://www.openxla.org/xla/operation_semantics#gather>`_. See the XLA
documentation for more details of what the dimension numbers mean.
Args:
offset_dims: the set of dimensions in the `gather` output that offset into
an array sliced from `operand`. Must be a tuple of integers in ascending
order, each representing a dimension number of the output.
collapsed_slice_dims: the set of dimensions `i` in `operand` that have
`slice_sizes[i] == 1` and that should not have a corresponding dimension
in the output of the gather. Must be a tuple of integers in ascending
order.
start_index_map: for each dimension in `start_indices`, gives the
corresponding dimension in the `operand` that is to be sliced. Must be a
tuple of integers with size equal to `start_indices.shape[-1]`.
operand_batching_dims: the set of batching dimensions `i` in `operand` that
have `slice_sizes[i] == 1` and that should have a corresponding dimension
in both the `start_indices` (at the same index in
`start_indices_batching_dims`) and output of the gather. Must be a tuple
of integers in ascending order.
start_indices_batching_dims: the set of batching dimensions `i` in
`start_indices` that should have a corresponding dimension in both the
`operand` (at the same index in `operand_batching_dims`) and output of the
gather. Must be a tuple of integers (order is fixed based on
correspondence with `operand_batching_dims`).
Unlike XLA's `GatherDimensionNumbers` structure, `index_vector_dim` is
implicit; there is always an index vector dimension and it must always be the
last dimension. To gather scalar indices, add a trailing dimension of size 1.
"""
offset_dims: tuple[int, ...]
collapsed_slice_dims: tuple[int, ...]
start_index_map: tuple[int, ...]
operand_batching_dims: tuple[int, ...] = ()
start_indices_batching_dims: tuple[int, ...] = ()
| GatherDimensionNumbers |
python | doocs__leetcode | solution/1500-1599/1539.Kth Missing Positive Number/Solution.py | {
"start": 0,
"end": 407
} | class ____:
def findKthPositive(self, arr: List[int], k: int) -> int:
if arr[0] > k:
return k
left, right = 0, len(arr)
while left < right:
mid = (left + right) >> 1
if arr[mid] - mid - 1 >= k:
right = mid
else:
left = mid + 1
return arr[left - 1] + k - (arr[left - 1] - (left - 1) - 1)
| Solution |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/batch_client.py | {
"start": 1591,
"end": 4951
} | class ____(Protocol):
"""
A structured Protocol for ``boto3.client('batch') -> botocore.client.Batch``.
This is used for type hints on :py:meth:`.BatchClient.client`; it covers
only the subset of client methods required.
.. seealso::
- https://mypy.readthedocs.io/en/latest/protocols.html
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html
"""
def describe_jobs(self, jobs: list[str]) -> dict:
"""
Get job descriptions from AWS Batch.
:param jobs: a list of JobId to describe
:return: an API response to describe jobs
"""
...
def get_waiter(self, waiterName: str) -> botocore.waiter.Waiter:
"""
Get an AWS Batch service waiter.
:param waiterName: The name of the waiter. The name should match
the name (including the casing) of the key name in the waiter
model file (typically this is CamelCasing).
:return: a waiter object for the named AWS Batch service
.. note::
AWS Batch might not have any waiters (until botocore PR-1307 is released).
.. code-block:: python
import boto3
boto3.client("batch").waiter_names == []
.. seealso::
- https://boto3.amazonaws.com/v1/documentation/api/latest/guide/clients.html#waiters
- https://github.com/boto/botocore/pull/1307
"""
...
def submit_job(
self,
jobName: str,
jobQueue: str,
jobDefinition: str,
arrayProperties: dict,
parameters: dict,
containerOverrides: dict,
ecsPropertiesOverride: dict,
eksPropertiesOverride: dict,
tags: dict,
) -> dict:
"""
Submit a Batch job.
:param jobName: the name for the AWS Batch job
:param jobQueue: the queue name on AWS Batch
:param jobDefinition: the job definition name on AWS Batch
:param arrayProperties: the same parameter that boto3 will receive
:param parameters: the same parameter that boto3 will receive
:param containerOverrides: the same parameter that boto3 will receive
:param ecsPropertiesOverride: the same parameter that boto3 will receive
:param eksPropertiesOverride: the same parameter that boto3 will receive
:param tags: the same parameter that boto3 will receive
:return: an API response
"""
...
def terminate_job(self, jobId: str, reason: str) -> dict:
"""
Terminate a Batch job.
:param jobId: a job ID to terminate
:param reason: a reason to terminate job ID
:return: an API response
"""
...
def create_compute_environment(self, **kwargs) -> dict:
"""
Create an AWS Batch compute environment.
:param kwargs: Arguments for boto3 create_compute_environment
.. seealso::
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch/client/create_compute_environment.html
"""
...
# Note that the use of invalid-name parameters should be restricted to the boto3 mappings only;
# all the Airflow wrappers of boto3 clients should not adopt invalid-names to match boto3.
| BatchProtocol |
python | pypa__pip | src/pip/_vendor/packaging/metadata.py | {
"start": 18089,
"end": 26320
} | class ____(Generic[T]):
"""Validate a metadata field.
All _process_*() methods correspond to a core metadata field. The method is
called with the field's raw value. If the raw value is valid it is returned
in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field).
If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause
as appropriate).
"""
name: str
raw_name: str
added: _MetadataVersion
def __init__(
self,
*,
added: _MetadataVersion = "1.0",
) -> None:
self.added = added
def __set_name__(self, _owner: Metadata, name: str) -> None:
self.name = name
self.raw_name = _RAW_TO_EMAIL_MAPPING[name]
def __get__(self, instance: Metadata, _owner: type[Metadata]) -> T:
# With Python 3.8, the caching can be replaced with functools.cached_property().
# No need to check the cache as attribute lookup will resolve into the
# instance's __dict__ before __get__ is called.
cache = instance.__dict__
value = instance._raw.get(self.name)
# To make the _process_* methods easier, we'll check if the value is None
# and if this field is NOT a required attribute, and if both of those
# things are true, we'll skip the the converter. This will mean that the
# converters never have to deal with the None union.
if self.name in _REQUIRED_ATTRS or value is not None:
try:
converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}")
except AttributeError:
pass
else:
value = converter(value)
cache[self.name] = value
try:
del instance._raw[self.name] # type: ignore[misc]
except KeyError:
pass
return cast(T, value)
def _invalid_metadata(
self, msg: str, cause: Exception | None = None
) -> InvalidMetadata:
exc = InvalidMetadata(
self.raw_name, msg.format_map({"field": repr(self.raw_name)})
)
exc.__cause__ = cause
return exc
def _process_metadata_version(self, value: str) -> _MetadataVersion:
# Implicitly makes Metadata-Version required.
if value not in _VALID_METADATA_VERSIONS:
raise self._invalid_metadata(f"{value!r} is not a valid metadata version")
return cast(_MetadataVersion, value)
def _process_name(self, value: str) -> str:
if not value:
raise self._invalid_metadata("{field} is a required field")
# Validate the name as a side-effect.
try:
utils.canonicalize_name(value, validate=True)
except utils.InvalidName as exc:
raise self._invalid_metadata(
f"{value!r} is invalid for {{field}}", cause=exc
) from exc
else:
return value
def _process_version(self, value: str) -> version_module.Version:
if not value:
raise self._invalid_metadata("{field} is a required field")
try:
return version_module.parse(value)
except version_module.InvalidVersion as exc:
raise self._invalid_metadata(
f"{value!r} is invalid for {{field}}", cause=exc
) from exc
def _process_summary(self, value: str) -> str:
"""Check the field contains no newlines."""
if "\n" in value:
raise self._invalid_metadata("{field} must be a single line")
return value
def _process_description_content_type(self, value: str) -> str:
content_types = {"text/plain", "text/x-rst", "text/markdown"}
message = email.message.EmailMessage()
message["content-type"] = value
content_type, parameters = (
# Defaults to `text/plain` if parsing failed.
message.get_content_type().lower(),
message["content-type"].params,
)
# Check if content-type is valid or defaulted to `text/plain` and thus was
# not parseable.
if content_type not in content_types or content_type not in value.lower():
raise self._invalid_metadata(
f"{{field}} must be one of {list(content_types)}, not {value!r}"
)
charset = parameters.get("charset", "UTF-8")
if charset != "UTF-8":
raise self._invalid_metadata(
f"{{field}} can only specify the UTF-8 charset, not {list(charset)}"
)
markdown_variants = {"GFM", "CommonMark"}
variant = parameters.get("variant", "GFM") # Use an acceptable default.
if content_type == "text/markdown" and variant not in markdown_variants:
raise self._invalid_metadata(
f"valid Markdown variants for {{field}} are {list(markdown_variants)}, "
f"not {variant!r}",
)
return value
def _process_dynamic(self, value: list[str]) -> list[str]:
for dynamic_field in map(str.lower, value):
if dynamic_field in {"name", "version", "metadata-version"}:
raise self._invalid_metadata(
f"{dynamic_field!r} is not allowed as a dynamic field"
)
elif dynamic_field not in _EMAIL_TO_RAW_MAPPING:
raise self._invalid_metadata(
f"{dynamic_field!r} is not a valid dynamic field"
)
return list(map(str.lower, value))
def _process_provides_extra(
self,
value: list[str],
) -> list[utils.NormalizedName]:
normalized_names = []
try:
for name in value:
normalized_names.append(utils.canonicalize_name(name, validate=True))
except utils.InvalidName as exc:
raise self._invalid_metadata(
f"{name!r} is invalid for {{field}}", cause=exc
) from exc
else:
return normalized_names
def _process_requires_python(self, value: str) -> specifiers.SpecifierSet:
try:
return specifiers.SpecifierSet(value)
except specifiers.InvalidSpecifier as exc:
raise self._invalid_metadata(
f"{value!r} is invalid for {{field}}", cause=exc
) from exc
def _process_requires_dist(
self,
value: list[str],
) -> list[requirements.Requirement]:
reqs = []
try:
for req in value:
reqs.append(requirements.Requirement(req))
except requirements.InvalidRequirement as exc:
raise self._invalid_metadata(
f"{req!r} is invalid for {{field}}", cause=exc
) from exc
else:
return reqs
def _process_license_expression(
self, value: str
) -> NormalizedLicenseExpression | None:
try:
return licenses.canonicalize_license_expression(value)
except ValueError as exc:
raise self._invalid_metadata(
f"{value!r} is invalid for {{field}}", cause=exc
) from exc
def _process_license_files(self, value: list[str]) -> list[str]:
paths = []
for path in value:
if ".." in path:
raise self._invalid_metadata(
f"{path!r} is invalid for {{field}}, "
"parent directory indicators are not allowed"
)
if "*" in path:
raise self._invalid_metadata(
f"{path!r} is invalid for {{field}}, paths must be resolved"
)
if (
pathlib.PurePosixPath(path).is_absolute()
or pathlib.PureWindowsPath(path).is_absolute()
):
raise self._invalid_metadata(
f"{path!r} is invalid for {{field}}, paths must be relative"
)
if pathlib.PureWindowsPath(path).as_posix() != path:
raise self._invalid_metadata(
f"{path!r} is invalid for {{field}}, paths must use '/' delimiter"
)
paths.append(path)
return paths
| _Validator |
python | vyperlang__vyper | vyper/builtins/functions.py | {
"start": 16811,
"end": 20297
} | class ____(BuiltinFunctionT):
_id = "concat"
def fetch_call_return(self, node):
arg_types = self.infer_arg_types(node)
length = 0
for arg_t in arg_types:
length += arg_t.length
if isinstance(arg_types[0], (StringT)):
return_type = StringT()
else:
return_type = BytesT()
return_type.set_length(length)
return return_type
def infer_arg_types(self, node, expected_return_typ=None):
if len(node.args) < 2:
raise ArgumentException("Invalid argument count: expected at least 2", node)
if node.keywords:
raise ArgumentException("Keyword arguments are not accepted here", node.keywords[0])
ret = []
prev_typeclass = None
for arg in node.args:
validate_expected_type(arg, (BytesT.any(), StringT.any(), BytesM_T.any()))
arg_t = get_possible_types_from_node(arg).pop()
current_typeclass = "String" if isinstance(arg_t, StringT) else "Bytes"
if prev_typeclass and current_typeclass != prev_typeclass:
raise TypeMismatch(
(
"Concat expects consistent use of string or bytes types, "
"use either string or bytes."
),
arg,
)
prev_typeclass = current_typeclass
ret.append(arg_t)
return ret
def build_IR(self, expr, context):
args = [Expr(arg, context).ir_node for arg in expr.args]
if len(args) < 2:
raise StructureException("Concat expects at least two arguments", expr)
# Maximum length of the output
dst_maxlen = sum(
[arg.typ.maxlen if isinstance(arg.typ, _BytestringT) else arg.typ.m for arg in args]
)
# TODO: try to grab these from semantic analysis
if isinstance(args[0].typ, StringT):
ret_typ = StringT(dst_maxlen)
else:
ret_typ = BytesT(dst_maxlen)
# respect API of copy_bytes
bufsize = dst_maxlen + 32
dst = context.new_internal_variable(BytesT(bufsize))
dst.annotation = "concat destination"
ret = ["seq"]
# stack item representing our current offset in the dst buffer
ofst = "concat_ofst"
# TODO: optimize for the case where all lengths are statically known.
for arg in args:
dst_data = add_ofst(bytes_data_ptr(dst), ofst)
if isinstance(arg.typ, _BytestringT):
with arg.cache_when_complex("arg") as (b1, arg):
argdata = bytes_data_ptr(arg)
with get_bytearray_length(arg).cache_when_complex("len") as (b2, arglen):
do_copy = [
"seq",
copy_bytes(dst_data, argdata, arglen, arg.typ.maxlen),
["set", ofst, ["add", ofst, arglen]],
]
ret.append(b1.resolve(b2.resolve(do_copy)))
else:
ret.append(STORE(dst_data, unwrap_location(arg)))
ret.append(["set", ofst, ["add", ofst, arg.typ.m]])
ret.append(STORE(dst, ofst))
# Memory location of the output
ret.append(dst)
return IRnode.from_list(
["with", ofst, 0, ret], typ=ret_typ, location=MEMORY, annotation="concat"
)
| Concat |
python | PyCQA__pylint | tests/functional/i/init_not_called.py | {
"start": 395,
"end": 430
} | class ____:
"""ancestor 3"""
| CCCC |
python | dask__distributed | distributed/utils.py | {
"start": 13799,
"end": 23055
} | class ____:
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be reused, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops: ClassVar[
weakref.WeakKeyDictionary[IOLoop, tuple[int, LoopRunner | None]]
] = weakref.WeakKeyDictionary()
_lock = threading.Lock()
_loop_thread: _CollectErrorThread | None
def __init__(self, loop: IOLoop | None = None, asynchronous: bool = False):
if loop is None:
if asynchronous:
# raises RuntimeError if there's no running loop
try:
asyncio.get_running_loop()
except RuntimeError as e:
raise RuntimeError(
"Constructing LoopRunner(asynchronous=True) without a running loop is not supported"
) from e
loop = IOLoop.current()
elif not loop.asyncio_loop.is_running(): # type: ignore[attr-defined]
# LoopRunner is not responsible for starting a foreign IOLoop
raise RuntimeError(
"Constructing LoopRunner(loop=loop) without a running loop is not supported"
)
self._loop = loop
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
self._stop_event = LateLoopEvent()
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self) -> None:
assert not self._started
if self._loop is not None:
try:
count, real_runner = self._all_loops[self._loop]
except KeyError:
assert self._loop.asyncio_loop.is_running() # type: ignore[attr-defined]
self._started = True
return
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
start_evt = threading.Event()
start_exc = None
loop = None
async def amain() -> None:
nonlocal loop
loop = IOLoop.current()
start_evt.set()
await self._stop_event.wait()
def run_loop() -> None:
nonlocal start_exc
try:
asyncio_run(amain(), loop_factory=get_loop_factory())
except BaseException as e:
if start_evt.is_set():
raise
start_exc = e
start_evt.set()
self._loop_thread = _CollectErrorThread(
target=run_loop, daemon=True, name="IO loop"
)
start_evt.wait(timeout=10)
if start_exc is not None:
raise start_exc
assert loop is not None
self._loop = loop
self._started = True
self._all_loops[loop] = (1, self)
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
try:
count, real_runner = self._all_loops[self._loop]
except KeyError:
return
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
return
assert count == 1
del self._all_loops[self._loop]
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
try:
self._loop.add_callback(self._stop_event.set)
self._loop_thread.join(timeout=timeout)
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
loop = self._loop
if loop is None or not loop.asyncio_loop.is_running():
raise RuntimeError(
"Accessing the loop property while the loop is not running is not supported"
)
return self._loop
@contextlib.contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextlib.contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, "w") as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def is_kernel():
"""Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if "IPython" not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), "kernel", None) is not None
def key_split_group(x: object) -> str:
"""A more fine-grained version of key_split.
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x'
"""
if isinstance(x, tuple):
return x[0]
elif isinstance(x, str):
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return key_split(x)
elif isinstance(x, bytes):
return key_split_group(x.decode())
else:
return "Other"
@overload
def log_errors(func: Callable[P, T], /) -> Callable[P, T]: ...
@overload
def log_errors(*, pdb: bool = False, unroll_stack: int = 1) -> _LogErrors: ...
def log_errors(func=None, /, *, pdb=False, unroll_stack=0):
"""Log any errors and then reraise them.
This can be used:
- As a context manager::
with log_errors(...):
...
- As a bare function decorator::
@log_errors
def func(...):
...
- As a function decorator with parameters::
@log_errors(...)
def func(...):
...
Parameters
----------
pdb: bool, optional
Set to True to break into the debugger in case of exception
unroll_stack: int, optional
Number of levels of stack to unroll when determining the module's name for the
purpose of logging. Normally you should omit this. Set to 1 if you are writing a
helper function, context manager, or decorator.
"""
le = _LogErrors(pdb=pdb, unroll_stack=unroll_stack)
return le(func) if func else le
_getmodulename_with_path_map: dict[str, str] = {}
def _getmodulename_with_path(fname: str) -> str:
"""Variant of inspect.getmodulename that returns the full module path"""
try:
return _getmodulename_with_path_map[fname]
except KeyError:
pass
for modname, mod in sys.modules.copy().items():
fname2 = getattr(mod, "__file__", None)
if fname2:
_getmodulename_with_path_map[fname2] = modname
try:
return _getmodulename_with_path_map[fname]
except KeyError: # pragma: nocover
return os.path.splitext(os.path.basename(fname))[0]
| LoopRunner |
python | getsentry__sentry | tests/sentry/testutils/helpers/test_features.py | {
"start": 2463,
"end": 3752
} | class ____(TestCase):
"""Test that with_feature works correctly when used as a class decorator."""
def test_with_feature_on_class_works(self) -> None:
"""Test that using with_feature as a class decorator enables features for all methods."""
@with_feature("organizations:session-replay")
class TestClassWithFeature(TestCase):
def test_method_1(self) -> None:
org = self.create_organization()
assert features.has("organizations:session-replay", org)
def test_method_2(self) -> None:
org = self.create_organization()
assert features.has("organizations:session-replay", org)
# Verify the fixture was created
fixture_found = False
for attr_name in dir(TestClassWithFeature):
if (
attr_name.startswith("_feature_fixture")
and "organizations:session-replay" in attr_name
):
fixture_found = True
break
assert fixture_found, "Feature fixture was not created on the class"
test_instance = TestClassWithFeature()
test_instance.setUp()
test_instance.test_method_1()
test_instance.test_method_2()
| TestWithFeatureClassDecorator |
python | joerick__pyinstrument | pyinstrument/vendor/decorator.py | {
"start": 2454,
"end": 10987
} | class ____(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
# Atomic get-and-increment provided by the GIL
_compile_count = itertools.count()
# make pylint happy
args = varargs = varkw = defaults = kwonlyargs = kwonlydefaults = ()
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.__defaults__ = self.defaults
func.__kwdefaults__ = self.kwonlydefaults or None
func.__annotations__ = getattr(self, 'annotations', None)
try:
frame = sys._getframe(3)
except AttributeError: # for IronPython and similar implementations
callermodule = '?'
else:
callermodule = frame.f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.search(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline for old Pythons
src += '\n'
# Ensure each generated function has a unique filename for profilers
# (such as cProfile) that depend on the tuple of (<filename>,
# <definition line>, <function name>) being unique.
filename = '<%s:decorator-gen-%d>' % (
__file__, next(self._compile_count))
try:
code = compile(src, filename, 'single')
exec(code, evaldict)
except Exception:
print('Error in generated code:', file=sys.stderr)
print(src, file=sys.stderr)
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an
attribute __source__ is added to the result. The attributes attrs
are added, if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] # strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
caller = evaldict.get('_call_') # when called from `decorate`
if caller and iscoroutinefunction(caller):
body = ('async def %(name)s(%(signature)s):\n' + ibody).replace(
'return', 'return await')
else:
body = 'def %(name)s(%(signature)s):\n' + ibody
return self.make(body, evaldict, addsource, **attrs)
def decorate(func, caller, extras=()):
"""
decorate(func, caller) decorates a function using a caller.
"""
evaldict = dict(_call_=caller, _func_=func)
es = ''
for i, extra in enumerate(extras):
ex = '_e%d_' % i
evaldict[ex] = extra
es += ex + ', '
fun = FunctionMaker.create(
func, "return _call_(_func_, %s%%(shortsignature)s)" % es,
evaldict, __wrapped__=func)
if hasattr(func, '__qualname__'):
fun.__qualname__ = func.__qualname__
return fun
def decorator(caller, _func=None):
"""decorator(caller) converts a caller function into a decorator"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller)
# else return a decorator function
defaultargs, defaults = '', ()
if inspect.isclass(caller):
name = caller.__name__.lower()
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
elif inspect.isfunction(caller):
if caller.__name__ == '<lambda>':
name = '_lambda_'
else:
name = caller.__name__
doc = caller.__doc__
nargs = caller.__code__.co_argcount
ndefs = len(caller.__defaults__ or ())
defaultargs = ', '.join(caller.__code__.co_varnames[nargs-ndefs:nargs])
if defaultargs:
defaultargs += ','
defaults = caller.__defaults__
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
doc = caller.__call__.__doc__
evaldict = dict(_call=caller, _decorate_=decorate)
dec = FunctionMaker.create(
'%s(%s func)' % (name, defaultargs),
'if func is None: return lambda func: _decorate_(func, _call, (%s))\n'
'return _decorate_(func, _call, (%s))' % (defaultargs, defaultargs),
evaldict, doc=doc, module=caller.__module__, __wrapped__=caller)
if defaults:
dec.__defaults__ = defaults + (None,)
return dec
# ####################### contextmanager ####################### #
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager as _GeneratorContextManager
| FunctionMaker |
python | automl__auto-sklearn | autosklearn/pipeline/components/feature_preprocessing/no_preprocessing.py | {
"start": 308,
"end": 1395
} | class ____(AutoSklearnPreprocessingAlgorithm):
def __init__(self, random_state):
"""This preprocessors does not change the data"""
def fit(self, X, Y=None):
self.preprocessor = "passthrough"
self.fitted_ = True
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return X
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "no",
"name": "NoPreprocessing",
"handles_regression": True,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": True,
"is_deterministic": True,
"input": (SPARSE, DENSE, UNSIGNED_DATA),
"output": (INPUT,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
return cs
| NoPreprocessing |
python | getsentry__sentry | src/sentry/api/serializers/models/organization_member/response.py | {
"start": 615,
"end": 771
} | class ____(TypedDict, total=False):
"""Sentry doesn't use this field but is expected by SCIM"""
active: bool
| OrganizationMemberSCIMSerializerOptional |
python | ansible__ansible | lib/ansible/playbook/role/requirement.py | {
"start": 1074,
"end": 4114
} | class ____(RoleDefinition):
"""
Helper class for Galaxy, which is used to parse both dependencies
specified in meta/main.yml and requirements.yml files.
"""
def __init__(self):
pass
@staticmethod
def repo_url_to_role_name(repo_url):
# gets the role name out of a repo like
# http://git.example.com/repos/repo.git" => "repo"
if '://' not in repo_url and '@' not in repo_url:
return repo_url
trailing_path = repo_url.split('/')[-1]
if trailing_path.endswith('.git'):
trailing_path = trailing_path[:-4]
if trailing_path.endswith('.tar.gz'):
trailing_path = trailing_path[:-7]
if ',' in trailing_path:
trailing_path = trailing_path.split(',')[0]
return trailing_path
@staticmethod
def role_yaml_parse(role):
if isinstance(role, str):
name = None
scm = None
src = None
version = None
if ',' in role:
if role.count(',') == 1:
(src, version) = role.strip().split(',', 1)
elif role.count(',') == 2:
(src, version, name) = role.strip().split(',', 2)
else:
raise AnsibleError("Invalid role line (%s). Proper format is 'role_name[,version[,name]]'" % role)
else:
src = role
if name is None:
name = RoleRequirement.repo_url_to_role_name(src)
if '+' in src:
(scm, src) = src.split('+', 1)
return dict(name=name, src=src, scm=scm, version=version)
if 'role' in role:
name = role['role']
if ',' in name:
raise AnsibleError("Invalid old style role requirement: %s" % name)
else:
del role['role']
role['name'] = name
else:
role = role.copy()
if 'src' in role:
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
role["scm"], dummy, role["src"] = role["src"].partition('+')
if 'name' not in role:
role["name"] = RoleRequirement.repo_url_to_role_name(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
for key in list(role.keys()):
if key not in VALID_SPEC_KEYS:
role.pop(key)
return role
@staticmethod
def scm_archive_role(src, scm='git', name=None, version='HEAD', keep_scm_meta=False):
return scm_archive_resource(src, scm=scm, name=name, version=version, keep_scm_meta=keep_scm_meta)
| RoleRequirement |
python | apache__airflow | providers/standard/src/airflow/providers/standard/triggers/external_task.py | {
"start": 7588,
"end": 11808
} | class ____(BaseTrigger):
"""
Waits asynchronously for a dag to complete for a specific run_id.
:param dag_id: The dag_id that contains the task you want to wait for
:param states: allowed states, default is ``['success']``
:param run_ids: The run_id of dag run.
:param poll_interval: The time interval in seconds to check the state.
The default value is 5.0 sec.
"""
def __init__(
self,
dag_id: str,
states: list[DagRunState],
run_ids: list[str] | None = None,
execution_dates: list[datetime] | None = None,
poll_interval: float = 5.0,
):
super().__init__()
self.dag_id = dag_id
self.states = states
self.run_ids = run_ids
self.execution_dates = execution_dates
self.poll_interval = poll_interval
def serialize(self) -> tuple[str, dict[str, typing.Any]]:
"""Serialize DagStateTrigger arguments and classpath."""
data = {
"dag_id": self.dag_id,
"states": self.states,
"poll_interval": self.poll_interval,
"run_ids": self.run_ids,
"execution_dates": self.execution_dates,
}
return "airflow.providers.standard.triggers.external_task.DagStateTrigger", data
async def run(self) -> typing.AsyncIterator[TriggerEvent]:
"""Check periodically if the dag run exists, and has hit one of the states yet, or not."""
runs_ids_or_dates = 0
if self.run_ids:
runs_ids_or_dates = len(self.run_ids)
elif self.execution_dates:
runs_ids_or_dates = len(self.execution_dates)
cls_path, data = self.serialize()
if AIRFLOW_V_3_0_PLUS:
data.update( # update with {run_id: run_state} dict
await self.validate_count_dags_af_3(runs_ids_or_dates_len=runs_ids_or_dates)
)
yield TriggerEvent((cls_path, data))
return
else:
while True:
num_dags = await self.count_dags()
if num_dags == runs_ids_or_dates:
yield TriggerEvent((cls_path, data))
return
await asyncio.sleep(self.poll_interval)
async def validate_count_dags_af_3(self, runs_ids_or_dates_len: int = 0) -> dict[str, str]:
from airflow.sdk.execution_time.task_runner import RuntimeTaskInstance
run_states: dict[str, str] = {} # {run_id: run_state}
while True:
num_dags = await sync_to_async(RuntimeTaskInstance.get_dr_count)(
dag_id=self.dag_id,
run_ids=self.run_ids,
states=self.states, # type: ignore[arg-type]
logical_dates=self.execution_dates,
)
if num_dags == runs_ids_or_dates_len:
if isinstance(self.run_ids, list):
for run_id in self.run_ids:
state = await sync_to_async(RuntimeTaskInstance.get_dagrun_state)(
dag_id=self.dag_id,
run_id=run_id,
)
run_states[run_id] = state
return run_states
await asyncio.sleep(self.poll_interval)
if not AIRFLOW_V_3_0_PLUS:
from airflow.utils.session import NEW_SESSION, provide_session # type: ignore[misc]
@sync_to_async
@provide_session
def count_dags(self, *, session: Session = NEW_SESSION) -> int:
"""Count how many dag runs in the database match our criteria."""
_dag_run_date_condition = (
DagRun.run_id.in_(self.run_ids or [])
if AIRFLOW_V_3_0_PLUS
else DagRun.execution_date.in_(self.execution_dates)
)
stmt = (
select(func.count())
.select_from(DagRun)
.where(
DagRun.dag_id == self.dag_id,
DagRun.state.in_(self.states),
_dag_run_date_condition,
)
)
result = session.execute(stmt).scalar()
return result or 0
| DagStateTrigger |
python | dagster-io__dagster | integration_tests/test_suites/auto_materialize_perf_tests/perf_scenario.py | {
"start": 2956,
"end": 6479
} | class ____(NamedTuple):
name: str
n_assets: int
n_sources: int = 0
asset_partitions_def: Optional[PartitionsDefinition] = None
max_connectivity: int = 20
def build_definitions(self) -> Definitions:
"""Builds a random set of assets based on the given parameters."""
random.seed(11235711)
deps = {
f"asset_{i}": {
AssetKey(f"asset_{j}")
for j in random.sample(range(i), min(i, random.randint(1, self.max_connectivity)))
}
for i in range(self.n_assets)
}
sources: list[SourceAsset] = []
for i in range(self.n_sources):
_source_asset = SourceAsset(key=f"source_{i}")
for j in random.sample(
range(self.max_connectivity), random.randint(1, self.max_connectivity)
):
deps[f"asset_{j}"].add(_source_asset.key)
sources.append(_source_asset)
@multi_asset(
outs={
f"asset_{i}": AssetOut(
dagster_type=Nothing,
is_required=False,
auto_materialize_policy=AutoMaterializePolicy.eager(),
)
for i in range(self.n_assets)
},
deps=[*(f"source_{i}" for i in range(self.n_sources))],
internal_asset_deps=deps,
can_subset=True,
partitions_def=self.asset_partitions_def,
)
def _masset(context):
selected_outputs = context.selected_output_names
# ensure topological ordering of outputs
for i in range(self.n_assets):
if f"asset_{i}" in selected_outputs:
yield Output(None, f"asset_{i}")
return Definitions(assets=[*sources, _masset])
def build_scenario(
self,
max_execution_time_seconds: int,
n_runs: int = 1,
randomize_runs: bool = False,
partition_keys_to_backfill: Optional[Sequence[str]] = None,
) -> PerfScenario:
defs = self.build_definitions()
asset_graph = defs.resolve_asset_graph()
run_requests: list[RunRequest] = []
if self.asset_partitions_def:
for partition_key in partition_keys_to_backfill or []:
run_requests.append(
RunRequest(
asset_selection=list(asset_graph.materializable_asset_keys),
partition_key=partition_key,
)
)
else:
for i in range(n_runs):
if randomize_runs:
target_asset = random.randint(0, self.n_assets)
selection = AssetSelection.keys(AssetKey(f"asset_{target_asset}")).upstream()
to_materialize = selection.resolve(asset_graph)
else:
to_materialize = asset_graph.materializable_asset_keys
run_requests.append(RunRequest(asset_selection=list(to_materialize)))
if partition_keys_to_backfill:
runs_str = f"{len(partition_keys_to_backfill)}_partition_keys"
else:
runs_str = f"{n_runs}_random_runs" if randomize_runs else f"{n_runs}_runs"
name = f"{self.name}_{runs_str}"
return PerfScenario(
defs=defs,
activity_history=ActivityHistory(run_requests),
max_execution_time_seconds=max_execution_time_seconds,
name=name,
)
| RandomAssets |
python | spack__spack | lib/spack/spack/vendor/jinja2/nodes.py | {
"start": 9955,
"end": 10143
} | class ____(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ("body",)
body: t.List[Node]
| Template |
python | pallets__werkzeug | tests/test_datastructures.py | {
"start": 10895,
"end": 12959
} | class ____(_MutableMultiDictTests):
storage_class = ds.MultiDict
def test_multidict_pop(self):
def make_d():
return self.storage_class({"foo": [1, 2, 3, 4]})
d = make_d()
assert d.pop("foo") == 1
assert not d
d = make_d()
assert d.pop("foo", 32) == 1
assert not d
d = make_d()
assert d.pop("foos", 32) == 32
assert d
with pytest.raises(KeyError):
d.pop("foos")
def test_multidict_pop_raise_badrequestkeyerror_for_empty_list_value(self):
mapping = [("a", "b"), ("a", "c")]
md = self.storage_class(mapping)
md.setlistdefault("empty", [])
with pytest.raises(KeyError):
md.pop("empty")
def test_multidict_popitem_raise_badrequestkeyerror_for_empty_list_value(self):
mapping = []
md = self.storage_class(mapping)
md.setlistdefault("empty", [])
with pytest.raises(BadRequestKeyError):
md.popitem()
def test_setlistdefault(self):
md = self.storage_class()
assert md.setlistdefault("u", [-1, -2]) == [-1, -2]
assert md.getlist("u") == [-1, -2]
assert md["u"] == -1
def test_iter_interfaces(self):
mapping = [
("a", 1),
("b", 2),
("a", 2),
("d", 3),
("a", 1),
("a", 3),
("d", 4),
("c", 3),
]
md = self.storage_class(mapping)
assert list(zip(md.keys(), md.listvalues())) == list(md.lists())
assert list(zip(md, md.listvalues())) == list(md.lists())
assert list(zip(md.keys(), md.listvalues())) == list(md.lists())
def test_getitem_raise_badrequestkeyerror_for_empty_list_value(self):
mapping = [("a", "b"), ("a", "c")]
md = self.storage_class(mapping)
md.setlistdefault("empty", [])
with pytest.raises(KeyError):
md["empty"]
@pytest.mark.filterwarnings("ignore:'OrderedMultiDict':DeprecationWarning")
| TestMultiDict |
python | numba__numba | numba/tests/test_struct_ref.py | {
"start": 383,
"end": 711
} | class ____(types.StructRef):
"""
Test associated with this type represent the lowest level uses of structref.
"""
pass
my_struct_ty = MySimplerStructType(
fields=[("values", types.intp[:]), ("counter", types.intp)]
)
structref.define_boxing(MySimplerStructType, structref.StructRefProxy)
| MySimplerStructType |
python | kamyu104__LeetCode-Solutions | Python/length-of-last-word.py | {
"start": 362,
"end": 518
} | class ____(object):
# @param s, a string
# @return an integer
def lengthOfLastWord(self, s):
return len(s.strip().split(" ")[-1])
| Solution2 |
python | sympy__sympy | sympy/physics/vector/printing.py | {
"start": 1571,
"end": 3930
} | class ____(LatexPrinter):
"""Latex Printer for vector expressions. """
def _print_Function(self, expr, exp=None):
from sympy.physics.vector.functions import dynamicsymbols
func = expr.func.__name__
t = dynamicsymbols._t
if (hasattr(self, '_print_' + func) and not
isinstance(type(expr), UndefinedFunction)):
return getattr(self, '_print_' + func)(expr, exp)
elif isinstance(type(expr), UndefinedFunction) and (expr.args == (t,)):
# treat this function like a symbol
expr = Symbol(func)
if exp is not None:
# copied from LatexPrinter._helper_print_standard_power, which
# we can't call because we only have exp as a string.
base = self.parenthesize(expr, PRECEDENCE['Pow'])
base = self.parenthesize_super(base)
return r"%s^{%s}" % (base, exp)
else:
return super()._print(expr)
else:
return super()._print_Function(expr, exp)
def _print_Derivative(self, der_expr):
from sympy.physics.vector.functions import dynamicsymbols
# make sure it is in the right form
der_expr = der_expr.doit()
if not isinstance(der_expr, Derivative):
return r"\left(%s\right)" % self.doprint(der_expr)
# check if expr is a dynamicsymbol
t = dynamicsymbols._t
expr = der_expr.expr
red = expr.atoms(AppliedUndef)
syms = der_expr.variables
test1 = not all(True for i in red if i.free_symbols == {t})
test2 = not all(t == i for i in syms)
if test1 or test2:
return super()._print_Derivative(der_expr)
# done checking
dots = len(syms)
base = self._print_Function(expr)
base_split = base.split('_', 1)
base = base_split[0]
if dots == 1:
base = r"\dot{%s}" % base
elif dots == 2:
base = r"\ddot{%s}" % base
elif dots == 3:
base = r"\dddot{%s}" % base
elif dots == 4:
base = r"\ddddot{%s}" % base
else: # Fallback to standard printing
return super()._print_Derivative(der_expr)
if len(base_split) != 1:
base += '_' + base_split[1]
return base
| VectorLatexPrinter |
python | dagster-io__dagster | python_modules/libraries/dagster-mysql/dagster_mysql/schedule_storage/schedule_storage.py | {
"start": 1363,
"end": 7899
} | class ____(SqlScheduleStorage, ConfigurableClass):
"""MySQL-backed run storage.
Users should not directly instantiate this class; it is instantiated by internal machinery when
``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in
``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.
.. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-mysql-legacy.yaml
:caption: dagster.yaml
:start-after: start_marker_schedules
:end-before: end_marker_schedules
:language: YAML
Note that the fields in this config are :py:class:`~dagster.StringSource` and
:py:class:`~dagster.IntSource` and can be configured from environment variables.
"""
def __init__(self, mysql_url: str, inst_data: Optional[ConfigurableClassData] = None):
self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
self.mysql_url = mysql_url
# Default to not holding any connections open to prevent accumulating connections per DagsterInstance
self._engine = create_engine(
self.mysql_url,
isolation_level=mysql_isolation_level(),
poolclass=db_pool.NullPool,
)
# Stamp and create tables if the main table does not exist (we can't check alembic
# revision because alembic config may be shared with other storage classes)
table_names = retry_mysql_connection_fn(db.inspect(self._engine).get_table_names)
if "jobs" not in table_names:
retry_mysql_creation_fn(self._init_db)
self._mysql_version = self.get_server_version()
super().__init__()
def _init_db(self) -> None:
with self.connect() as conn:
ScheduleStorageSqlMetadata.create_all(conn)
stamp_alembic_rev(mysql_alembic_config(__file__), conn)
# mark all the data migrations as applied
self.migrate()
self.optimize()
def optimize_for_webserver(
self, statement_timeout: int, pool_recycle: int, max_overflow: int
) -> None:
# When running in dagster-webserver, hold an open connection
# https://github.com/dagster-io/dagster/issues/3719
self._engine = create_engine(
self.mysql_url,
isolation_level=mysql_isolation_level(),
pool_size=1,
pool_recycle=pool_recycle,
max_overflow=max_overflow,
)
@property
def inst_data(self) -> Optional[ConfigurableClassData]:
return self._inst_data
@classmethod
def config_type(cls) -> UserConfigSchema:
return mysql_config()
@classmethod
def from_config_value( # pyright: ignore[reportIncompatibleMethodOverride]
cls, inst_data: Optional[ConfigurableClassData], config_value: MySqlStorageConfig
) -> "MySQLScheduleStorage":
return MySQLScheduleStorage(
inst_data=inst_data, mysql_url=mysql_url_from_config(config_value)
)
@staticmethod
def wipe_storage(mysql_url: str) -> None:
engine = create_engine(
mysql_url, isolation_level=mysql_isolation_level(), poolclass=db_pool.NullPool
)
try:
ScheduleStorageSqlMetadata.drop_all(engine)
finally:
engine.dispose()
@staticmethod
def create_clean_storage(mysql_url: str) -> "MySQLScheduleStorage":
MySQLScheduleStorage.wipe_storage(mysql_url)
return MySQLScheduleStorage(mysql_url)
def connect(self) -> ContextManager[Connection]:
return create_mysql_connection(self._engine, __file__, "schedule")
@property
def supports_batch_queries(self) -> bool:
if not self._mysql_version:
return False
return parse_mysql_version(self._mysql_version) >= parse_mysql_version(
MINIMUM_MYSQL_BATCH_VERSION
)
def get_server_version(self) -> Optional[str]:
with self.connect() as conn:
row = conn.execute(db.text("select version()")).fetchone()
if not row:
return None
return cast("str", row[0])
def upgrade(self) -> None:
with self.connect() as conn:
alembic_config = mysql_alembic_config(__file__)
run_alembic_upgrade(alembic_config, conn)
def _add_or_update_instigators_table(self, conn: Connection, state) -> None:
selector_id = state.selector_id
conn.execute(
db_dialects.mysql.insert(InstigatorsTable)
.values(
selector_id=selector_id,
repository_selector_id=state.repository_selector_id,
status=state.status.value,
instigator_type=state.instigator_type.value,
instigator_body=serialize_value(state),
)
.on_duplicate_key_update(
status=state.status.value,
instigator_type=state.instigator_type.value,
instigator_body=serialize_value(state),
update_timestamp=get_current_datetime(),
)
)
def add_auto_materialize_asset_evaluations(
self,
evaluation_id: int,
asset_evaluations: Sequence[AutomationConditionEvaluationWithRunIds[EntityKey]],
) -> None:
if not asset_evaluations:
return
# Define the base insert statement
insert_stmt = db_dialects.mysql.insert(AssetDaemonAssetEvaluationsTable).values(
[
{
"evaluation_id": evaluation_id,
"asset_key": evaluation.key.to_db_string(),
"asset_evaluation_body": serialize_value(evaluation),
"num_requested": evaluation.num_requested,
}
for evaluation in asset_evaluations
]
)
# Define the upsert statement using the ON DUPLICATE KEY UPDATE syntax for MySQL
upsert_stmt = insert_stmt.on_duplicate_key_update(
asset_evaluation_body=insert_stmt.inserted.asset_evaluation_body,
num_requested=insert_stmt.inserted.num_requested,
)
with self.connect() as conn:
conn.execute(upsert_stmt)
def alembic_version(self) -> AlembicVersion:
alembic_config = mysql_alembic_config(__file__)
with self.connect() as conn:
return check_alembic_revision(alembic_config, conn)
| MySQLScheduleStorage |
python | django-haystack__django-haystack | test_haystack/test_loading.py | {
"start": 3016,
"end": 6149
} | class ____(TestCase):
@override_settings()
def test_init(self):
del settings.HAYSTACK_ROUTERS
cr = loading.ConnectionRouter()
self.assertEqual(
[str(route.__class__) for route in cr.routers],
["<class 'haystack.routers.DefaultRouter'>"],
)
@override_settings(HAYSTACK_ROUTERS=["haystack.routers.DefaultRouter"])
def test_router_override1(self):
cr = loading.ConnectionRouter()
self.assertEqual(
[str(route.__class__) for route in cr.routers],
["<class 'haystack.routers.DefaultRouter'>"],
)
@override_settings(HAYSTACK_ROUTERS=[])
def test_router_override2(self):
cr = loading.ConnectionRouter()
self.assertEqual(
[str(route.__class__) for route in cr.routers],
["<class 'haystack.routers.DefaultRouter'>"],
)
@override_settings(
HAYSTACK_ROUTERS=[
"test_haystack.mocks.MockMasterSlaveRouter",
"haystack.routers.DefaultRouter",
]
)
def test_router_override3(self):
cr = loading.ConnectionRouter()
self.assertEqual(
[str(route.__class__) for route in cr.routers],
[
"<class 'test_haystack.mocks.MockMasterSlaveRouter'>",
"<class 'haystack.routers.DefaultRouter'>",
],
)
@override_settings()
def test_actions1(self):
del settings.HAYSTACK_ROUTERS
cr = loading.ConnectionRouter()
self.assertEqual(cr.for_read(), "default")
self.assertEqual(cr.for_write(), ["default"])
@override_settings(
HAYSTACK_ROUTERS=[
"test_haystack.mocks.MockMasterSlaveRouter",
"haystack.routers.DefaultRouter",
]
)
def test_actions2(self):
cr = loading.ConnectionRouter()
self.assertEqual(cr.for_read(), "slave")
self.assertEqual(cr.for_write(), ["master", "default"])
@override_settings(
HAYSTACK_ROUTERS=[
"test_haystack.mocks.MockPassthroughRouter",
"test_haystack.mocks.MockMasterSlaveRouter",
"haystack.routers.DefaultRouter",
]
)
def test_actions3(self):
cr = loading.ConnectionRouter()
# Demonstrate pass-through
self.assertEqual(cr.for_read(), "slave")
self.assertEqual(cr.for_write(), ["master", "default"])
# Demonstrate that hinting can change routing.
self.assertEqual(cr.for_read(pass_through=False), "pass")
self.assertEqual(
cr.for_write(pass_through=False), ["pass", "master", "default"]
)
@override_settings(
HAYSTACK_ROUTERS=[
"test_haystack.mocks.MockMultiRouter",
"haystack.routers.DefaultRouter",
]
)
def test_actions4(self):
cr = loading.ConnectionRouter()
# Demonstrate that a router can return multiple backends in the "for_write" method
self.assertEqual(cr.for_read(), "default")
self.assertEqual(cr.for_write(), ["multi1", "multi2", "default"])
| ConnectionRouterTestCase |
python | walkccc__LeetCode | solutions/2055. Plates Between Candles/2055.py | {
"start": 0,
"end": 520
} | class ____:
def platesBetweenCandles(self, s: str, queries: list[list[int]]) -> list[int]:
ans = []
indices = [i for i, c in enumerate(s) if c == '|'] # indices of '|'
for left, right in queries:
l = bisect.bisect_left(indices, left)
r = bisect.bisect_right(indices, right) - 1
if l < r:
lengthBetweenCandles = indices[r] - indices[l] + 1
numCandles = r - l + 1
ans.append(lengthBetweenCandles - numCandles)
else:
ans.append(0)
return ans
| Solution |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0054_add_builds_index_for_addons.py | {
"start": 119,
"end": 472
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0053_alter_version_build_data"),
]
operations = [
migrations.AlterIndexTogether(
name="build",
index_together={
("version", "state", "date", "success"),
},
),
]
| Migration |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_getlimits.py | {
"start": 1368,
"end": 1602
} | class ____(TestCase):
def test_singleton(self):
ftype = finfo(single)
ftype2 = finfo(single)
assert_equal(id(ftype), id(ftype2))
@skip(reason="torch.finfo is not a singleton. Why demanding it is?")
| TestSingle |
python | readthedocs__readthedocs.org | readthedocs/core/tests/test_signup.py | {
"start": 201,
"end": 1746
} | class ____(TestCase):
def setUp(self):
self.form_data = {
"email": "test123@gmail.com",
"username": "test123",
"password1": "123456",
"password2": "123456",
}
self.form_data_plus_checkbox = self.form_data.copy()
self.form_data_plus_checkbox["receive_newsletter"] = True
@patch("readthedocs.core.signals.requests.post")
def test_signup_without_checkbox_does_not_subscribe(self, mock_requests_post):
response = self.client.post("/accounts/signup/", data=self.form_data)
email_confirmed.send(
sender=None,
request=None,
email_address=EmailAddress.objects.get(email=self.form_data["email"]),
)
mock_requests_post.assert_not_called()
@patch("readthedocs.core.signals.requests.post")
def test_signup_calls_subscribe_api(self, mock_requests_post):
response = self.client.post(
"/accounts/signup/", data=self.form_data_plus_checkbox
)
email_confirmed.send(
sender=None,
request=None,
email_address=EmailAddress.objects.get(email=self.form_data["email"]),
)
mock_requests_post.assert_called_with(
settings.MAILERLITE_API_SUBSCRIBERS_URL,
json={
"email": self.form_data["email"],
"resubscribe": True,
},
headers={"X-MailerLite-ApiKey": settings.MAILERLITE_API_KEY},
timeout=3,
)
| TestNewsletterSignup |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/components.py | {
"start": 45459,
"end": 46150
} | class ____(_YAMLViewer, UserComponent):
"""
A component for displaying YAML data with syntax highlighting and collapsible sections.
This component provides a rich view of YAML data with proper formatting and syntax highlighting.
Example:
```python
from metaflow.cards import YAMLViewer, EventsTimeline
from metaflow import current
# Use in events timeline
events = EventsTimeline(title="Configuration Changes")
events.update({
"action": "config_update",
"config": YAMLViewer({
"database": {"host": "localhost", "port": 5432},
"features": ["auth", "logging"]
})
})
```
"""
pass
| YAMLViewer |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/multimethod_diamond_parent/package.py | {
"start": 206,
"end": 628
} | class ____(MultimethodBase):
"""This package is designed for use with Spack's multimethod test.
It has a bunch of test cases for the @when decorator that the
test uses.
"""
@when("@3.0")
def diamond_inheritance(self):
return "second_parent"
@when("@4.0, 2.0")
def diamond_inheritance(self):
return "should never be reached by diamond inheritance test"
| MultimethodDiamondParent |
python | pypa__hatch | tests/backend/metadata/test_core.py | {
"start": 13547,
"end": 19956
} | class ____:
def test_dynamic(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"readme": 9000, "dynamic": ["readme"]}})
with pytest.raises(
ValueError,
match="Metadata field `readme` cannot be both statically defined and listed in field `project.dynamic`",
):
_ = metadata.core.readme
@pytest.mark.parametrize("attribute", ["readme", "readme_content_type"])
def test_unknown_type(self, isolation, attribute):
metadata = ProjectMetadata(str(isolation), None, {"project": {"readme": 9000}})
with pytest.raises(TypeError, match="Field `project.readme` must be a string or a table"):
_ = getattr(metadata.core, attribute)
def test_default(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {}})
assert metadata.core.readme == metadata.core.readme == ""
assert metadata.core.readme_content_type == metadata.core.readme_content_type == "text/markdown"
assert metadata.core.readme_path == metadata.core.readme_path == ""
def test_string_path_unknown_content_type(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"readme": "foo"}})
with pytest.raises(
TypeError, match="Unable to determine the content-type based on the extension of readme file: foo"
):
_ = metadata.core.readme
def test_string_path_nonexistent(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"readme": "foo/bar.md"}})
with pytest.raises(OSError, match="Readme file does not exist: foo/bar\\.md"):
_ = metadata.core.readme
@pytest.mark.parametrize(
("extension", "content_type"), [(".md", "text/markdown"), (".rst", "text/x-rst"), (".txt", "text/plain")]
)
def test_string_correct(self, extension, content_type, temp_dir):
metadata = ProjectMetadata(str(temp_dir), None, {"project": {"readme": f"foo/bar{extension}"}})
file_path = temp_dir / "foo" / f"bar{extension}"
file_path.ensure_parent_dir_exists()
file_path.write_text("test content")
assert metadata.core.readme == metadata.core.readme == "test content"
assert metadata.core.readme_content_type == metadata.core.readme_content_type == content_type
assert metadata.core.readme_path == metadata.core.readme_path == f"foo/bar{extension}"
def test_table_content_type_missing(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"readme": {}}})
with pytest.raises(ValueError, match="Field `content-type` is required in the `project.readme` table"):
_ = metadata.core.readme
def test_table_content_type_not_string(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"readme": {"content-type": 5}}})
with pytest.raises(TypeError, match="Field `content-type` in the `project.readme` table must be a string"):
_ = metadata.core.readme
def test_table_content_type_not_unknown(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"readme": {"content-type": "foo"}}})
with pytest.raises(
ValueError,
match=(
"Field `content-type` in the `project.readme` table must be one of the following: "
"text/markdown, text/x-rst, text/plain"
),
):
_ = metadata.core.readme
def test_table_multiple_options(self, isolation):
metadata = ProjectMetadata(
str(isolation), None, {"project": {"readme": {"content-type": "text/markdown", "file": "", "text": ""}}}
)
with pytest.raises(ValueError, match="Cannot specify both `file` and `text` in the `project.readme` table"):
_ = metadata.core.readme
def test_table_no_option(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"readme": {"content-type": "text/markdown"}}})
with pytest.raises(ValueError, match="Must specify either `file` or `text` in the `project.readme` table"):
_ = metadata.core.readme
def test_table_file_not_string(self, isolation):
metadata = ProjectMetadata(
str(isolation), None, {"project": {"readme": {"content-type": "text/markdown", "file": 4}}}
)
with pytest.raises(TypeError, match="Field `file` in the `project.readme` table must be a string"):
_ = metadata.core.readme
def test_table_file_nonexistent(self, isolation):
metadata = ProjectMetadata(
str(isolation), None, {"project": {"readme": {"content-type": "text/markdown", "file": "foo/bar.md"}}}
)
with pytest.raises(OSError, match="Readme file does not exist: foo/bar\\.md"):
_ = metadata.core.readme
def test_table_file_correct(self, temp_dir):
metadata = ProjectMetadata(
str(temp_dir), None, {"project": {"readme": {"content-type": "text/markdown", "file": "foo/bar.markdown"}}}
)
file_path = temp_dir / "foo" / "bar.markdown"
file_path.ensure_parent_dir_exists()
file_path.write_text("test content")
assert metadata.core.readme == metadata.core.readme == "test content"
assert metadata.core.readme_content_type == metadata.core.readme_content_type == "text/markdown"
assert metadata.core.readme_path == metadata.core.readme_path == "foo/bar.markdown"
def test_table_text_not_string(self, isolation):
metadata = ProjectMetadata(
str(isolation), None, {"project": {"readme": {"content-type": "text/markdown", "text": 4}}}
)
with pytest.raises(TypeError, match="Field `text` in the `project.readme` table must be a string"):
_ = metadata.core.readme
def test_table_text_correct(self, isolation):
metadata = ProjectMetadata(
str(isolation), None, {"project": {"readme": {"content-type": "text/markdown", "text": "test content"}}}
)
assert metadata.core.readme == metadata.core.readme == "test content"
assert metadata.core.readme_content_type == metadata.core.readme_content_type == "text/markdown"
assert metadata.core.readme_path == metadata.core.readme_path == ""
| TestReadme |
python | ray-project__ray | python/ray/autoscaler/v2/schema.py | {
"start": 846,
"end": 999
} | class ____:
# The node resource usage.
usage: List[ResourceUsage]
# How long the node has been idle.
idle_time_ms: int
@dataclass
| NodeUsage |
python | doocs__leetcode | solution/2500-2599/2580.Count Ways to Group Overlapping Ranges/Solution2.py | {
"start": 0,
"end": 297
} | class ____:
def countWays(self, ranges: List[List[int]]) -> int:
ranges.sort()
mx = -1
mod = 10**9 + 7
ans = 1
for start, end in ranges:
if start > mx:
ans = ans * 2 % mod
mx = max(mx, end)
return ans
| Solution |
python | google__pytype | pytype/tools/analyze_project/config_test.py | {
"start": 1931,
"end": 3008
} | class ____(TestBase):
"""Test FileConfig."""
def test_config_file(self):
with test_utils.Tempdir() as d:
f = d.create_file('test.cfg', PYTYPE_CFG)
conf = config.FileConfig()
path = conf.read_from_file(f)
self.assertEqual(path, f)
self._validate_file_contents(conf, d.path)
def test_missing_config_file_section(self):
with test_utils.Tempdir() as d:
f = d.create_file('test.cfg', RANDOM_CFG)
conf = config.FileConfig()
path = conf.read_from_file(f)
self.assertIsNone(path)
self._validate_empty_contents(conf)
def test_read_nonexistent(self):
conf = config.FileConfig()
self.assertIsNone(
conf.read_from_file(
file_utils.replace_separator('/does/not/exist/test.cfg')))
self._validate_empty_contents(conf)
def test_read_bad_format(self):
conf = config.FileConfig()
with test_utils.Tempdir() as d:
f = d.create_file('test.cfg', 'ladadeda := squirrels')
self.assertIsNone(conf.read_from_file(f))
self._validate_empty_contents(conf)
| TestFileConfig |
python | encode__django-rest-framework | tests/test_middleware.py | {
"start": 1683,
"end": 2046
} | class ____:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
assert hasattr(request, 'user'), '`user` is not set on request'
assert request.user.is_authenticated, '`user` is not authenticated'
return response
| RequestUserMiddleware |
python | allegroai__clearml | clearml/backend_api/services/v2_9/events.py | {
"start": 20024,
"end": 21301
} | class ____(NonStrictDataModel):
"""
:param task: Task ID
:type task: str
:param metric: Metric name
:type metric: str
"""
_schema = {
"properties": {
"metric": {"description": "Metric name", "type": "string"},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "metric"],
"type": "object",
}
def __init__(self, task: str, metric: str, **kwargs: Any) -> None:
super(TaskMetric, self).__init__(**kwargs)
self.task = task
self.metric = metric
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("metric")
def metric(self) -> str:
return self._property_metric
@metric.setter
def metric(self, value: str) -> None:
if value is None:
self._property_metric = None
return
self.assert_isinstance(value, "metric", six.string_types)
self._property_metric = value
| TaskMetric |
python | ansible__ansible | test/integration/targets/strategy_host_pinned/callback_plugins/callback_host_count.py | {
"start": 209,
"end": 1622
} | class ____(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'callback_host_count'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._executing_hosts_counter = 0
def v2_playbook_on_task_start(self, task, is_conditional):
self._display.display(task.name or task.action)
if task.name == "start":
self._executing_hosts_counter += 1
# NOTE assumes 2 forks
num_forks = 2
if self._executing_hosts_counter > num_forks:
# Exception is caught and turned into just a warning in TQM,
# so raise BaseException to fail the test
# To prevent seeing false positives in case the exception handling
# in TQM is changed and BaseException is swallowed, print something
# and ensure the test fails in runme.sh in such a case.
self._display.display("host_pinned_test_failed")
raise BaseException(
"host_pinned test failed, number of hosts executing: "
f"{self._executing_hosts_counter}, expected: {num_forks}"
)
def v2_playbook_on_handler_task_start(self, task):
self._display.display(task.name or task.action)
def v2_runner_on_ok(self, result):
if result.task.name == "end":
self._executing_hosts_counter -= 1
| CallbackModule |
python | plotly__plotly.py | tests/test_optional/test_utils/test_utils.py | {
"start": 2577,
"end": 14683
} | class ____(TestCase):
def test_encode_as_plotly(self):
# should *fail* when object doesn't have `to_plotly_json` attribute
objs_without_attr = [1, "one", set(["a", "set"]), {"a": "dict"}, ["a", "list"]]
for obj in objs_without_attr:
self.assertRaises(
utils.NotEncodable, utils.PlotlyJSONEncoder.encode_as_plotly, obj
)
# should return without exception when obj has `to_plotly_json` attr
expected_res = "wedidit"
class ObjWithAttr(object):
def to_plotly_json(self):
return expected_res
res = utils.PlotlyJSONEncoder.encode_as_plotly(ObjWithAttr())
self.assertEqual(res, expected_res)
def test_encode_as_list(self):
# should *fail* when object doesn't have `tolist` method
objs_without_attr = [1, "one", set(["a", "set"]), {"a": "dict"}, ["a", "list"]]
for obj in objs_without_attr:
self.assertRaises(
utils.NotEncodable, utils.PlotlyJSONEncoder.encode_as_list, obj
)
# should return without exception when obj has `tolist` attr
expected_res = ["some", "list"]
class ObjWithAttr(object):
def tolist(self):
return expected_res
res = utils.PlotlyJSONEncoder.encode_as_list(ObjWithAttr())
self.assertEqual(res, expected_res)
def test_encode_as_pandas(self):
# should *fail* on things that are not specific pandas objects
not_pandas = ["giraffe", 6, float("nan"), ["a", "list"]]
for obj in not_pandas:
self.assertRaises(
utils.NotEncodable, utils.PlotlyJSONEncoder.encode_as_pandas, obj
)
# should succeed when we've got specific pandas thingies
res = utils.PlotlyJSONEncoder.encode_as_pandas(pd.NaT)
self.assertTrue(res is None)
def test_encode_as_numpy(self):
# should *fail* on non-numpy-y things
not_numpy = ["hippo", 8, float("nan"), {"a": "dict"}]
for obj in not_numpy:
self.assertRaises(
utils.NotEncodable, utils.PlotlyJSONEncoder.encode_as_numpy, obj
)
# should succeed with numpy-y-thingies
res = utils.PlotlyJSONEncoder.encode_as_numpy(np.ma.core.masked)
self.assertTrue(math.isnan(res))
def test_encode_as_datetime(self):
# should succeed with 'utcoffset', 'isoformat' and '__sub__' attrs
res = utils.PlotlyJSONEncoder.encode_as_datetime(datetime.datetime(2013, 10, 1))
self.assertEqual(res, "2013-10-01T00:00:00")
def test_encode_as_datetime_with_microsecond(self):
# should not include extraneous microsecond info if DNE
res = utils.PlotlyJSONEncoder.encode_as_datetime(
datetime.datetime(2013, 10, 1, microsecond=0)
)
self.assertEqual(res, "2013-10-01T00:00:00")
# should include microsecond info if present
res = utils.PlotlyJSONEncoder.encode_as_datetime(
datetime.datetime(2013, 10, 1, microsecond=10)
)
self.assertEqual(res, "2013-10-01T00:00:00.000010")
def test_encode_as_datetime_with_localized_tz(self):
# should convert tzinfo to utc. Note that in october, we're in EDT!
# therefore the 4 hour difference is correct.
naive_datetime = datetime.datetime(2013, 10, 1)
aware_datetime = pytz.timezone("US/Eastern").localize(naive_datetime)
res = utils.PlotlyJSONEncoder.encode_as_datetime(aware_datetime)
self.assertEqual(res, "2013-10-01T00:00:00-04:00")
def test_encode_as_date(self):
# should *fail* without 'utcoffset' and 'isoformat' and '__sub__' attrs
non_datetimes = ["noon", 56, "00:00:00"]
for obj in non_datetimes:
self.assertRaises(
utils.NotEncodable, utils.PlotlyJSONEncoder.encode_as_date, obj
)
# should work with a date
a_date = datetime.date(2013, 10, 1)
res = utils.PlotlyJSONEncoder.encode_as_date(a_date)
self.assertEqual(res, "2013-10-01")
# should also work with a date time without a utc offset!
res = utils.PlotlyJSONEncoder.encode_as_date(
datetime.datetime(2013, 10, 1, microsecond=10)
)
self.assertEqual(res, "2013-10-01 00:00:00.000010")
def test_encode_as_decimal(self):
# should work with decimal values
res = utils.PlotlyJSONEncoder.encode_as_decimal(decimal.Decimal(1.023452))
self.assertAlmostEqual(res, 1.023452) # Checks upto 7 decimal places
self.assertIsInstance(res, float)
def test_figure_json_encoding(self):
df = pd.DataFrame(columns=["col 1"], data=[1, 2, 3])
s1 = Scatter3d(x=numeric_list, y=np_list, z=mixed_list)
s2 = Scatter(x=df["col 1"])
data = Data([s1, s2])
figure = Figure(data=data)
js1 = _json.dumps(s1, cls=utils.PlotlyJSONEncoder, sort_keys=True)
js2 = _json.dumps(s2, cls=utils.PlotlyJSONEncoder, sort_keys=True)
assert (
js1 == '{"type": "scatter3d", "x": [1, 2, 3], '
'"y": [1, 2, 3, null, null, "2014-01-05T00:00:00"], '
'"z": [1, "A", "2014-01-05T00:00:00", '
'"2014-01-05T01:01:01", "2014-01-05T01:01:01.000001"]}'
)
assert js2 == '{"type": "scatter", "x": [1, 2, 3]}'
# Test JSON encoding works
_json.dumps(data, cls=utils.PlotlyJSONEncoder, sort_keys=True)
_json.dumps(figure, cls=utils.PlotlyJSONEncoder, sort_keys=True)
# Test data wasn't mutated
np_array = np.array([1, 2, 3, np_nan(), np_inf(), dt(2014, 1, 5)])
for k in range(len(np_array)):
if k == 3:
# check NaN
assert np.isnan(np_list[k]) and np.isnan(np_array[k])
else:
# non-NaN
assert np_list[k] == np_array[k]
assert set(data[0]["z"]) == set(
[
1,
"A",
dt(2014, 1, 5),
dt(2014, 1, 5, 1, 1, 1),
dt(2014, 1, 5, 1, 1, 1, 1),
]
)
def test_datetime_json_encoding(self):
j1 = _json.dumps(dt_list, cls=utils.PlotlyJSONEncoder)
assert (
j1 == '["2014-01-05T00:00:00", '
'"2014-01-05T01:01:01", '
'"2014-01-05T01:01:01.000001"]'
)
j2 = _json.dumps({"x": dt_list}, cls=utils.PlotlyJSONEncoder)
assert (
j2 == '{"x": ["2014-01-05T00:00:00", '
'"2014-01-05T01:01:01", '
'"2014-01-05T01:01:01.000001"]}'
)
def test_pandas_json_encoding(self):
j1 = _json.dumps(df["col 1"], cls=utils.PlotlyJSONEncoder)
print(j1)
print("\n")
assert j1 == '[1, 2, 3, "2014-01-05T00:00:00", null, null, null]'
# Test that data wasn't mutated
assert_series_equal(
df["col 1"],
pd.Series(
[1, 2, 3, dt(2014, 1, 5), pd.NaT, np_nan(), np_inf()], name="col 1"
),
)
j2 = _json.dumps(df.index, cls=utils.PlotlyJSONEncoder)
assert j2 == "[0, 1, 2, 3, 4, 5, 6]"
nat = [pd.NaT]
j3 = _json.dumps(nat, cls=utils.PlotlyJSONEncoder)
assert j3 == "[null]"
assert nat[0] is pd.NaT
j4 = _json.dumps(rng, cls=utils.PlotlyJSONEncoder)
assert j4 == '["2011-01-01T00:00:00", "2011-01-01T01:00:00"]'
j5 = _json.dumps(ts, cls=utils.PlotlyJSONEncoder)
assert j5 == "[1.5, 2.5]"
assert_series_equal(ts, pd.Series([1.5, 2.5], index=rng))
j6 = _json.dumps(ts.index, cls=utils.PlotlyJSONEncoder)
assert j6 == '["2011-01-01T00:00:00", "2011-01-01T01:00:00"]'
def test_encode_customdata_datetime_series(self):
df = pd.DataFrame(dict(t=pd.to_datetime(["2010-01-01", "2010-01-02"])))
# 1D customdata
fig = Figure(
Scatter(x=df["t"], customdata=df["t"]), layout=dict(template="none")
)
fig_json = _json.dumps(
fig, cls=utils.PlotlyJSONEncoder, separators=(",", ":"), sort_keys=True
)
self.assertTrue(
fig_json.startswith(
'{"data":[{"customdata":["2010-01-01T00:00:00.000000000","2010-01-02T00:00:00.000000000"]'
)
)
def test_encode_customdata_datetime_homogeneous_dataframe(self):
df = pd.DataFrame(
dict(
t1=pd.to_datetime(["2010-01-01", "2010-01-02"]),
t2=pd.to_datetime(["2011-01-01", "2011-01-02"]),
)
)
# 2D customdata
fig = Figure(
Scatter(x=df["t1"], customdata=df[["t1", "t2"]]),
layout=dict(template="none"),
)
fig_json = _json.dumps(
fig, cls=utils.PlotlyJSONEncoder, separators=(",", ":"), sort_keys=True
)
self.assertTrue(
fig_json.startswith(
'{"data":[{"customdata":'
'[["2010-01-01T00:00:00.000000000","2011-01-01T00:00:00.000000000"],'
'["2010-01-02T00:00:00.000000000","2011-01-02T00:00:00.000000000"]'
)
)
def test_encode_customdata_datetime_inhomogeneous_dataframe(self):
df = pd.DataFrame(
dict(
t=pd.to_datetime(["2010-01-01", "2010-01-02"]),
v=np.arange(2),
)
)
# 2D customdata
fig = Figure(
Scatter(x=df["t"], customdata=df[["t", "v"]]), layout=dict(template="none")
)
fig_json = _json.dumps(
fig, cls=utils.PlotlyJSONEncoder, separators=(",", ":"), sort_keys=True
)
self.assertTrue(
fig_json.startswith(
'{"data":[{"customdata":'
'[["2010-01-01T00:00:00",0],["2010-01-02T00:00:00",1]]'
)
)
def test_numpy_masked_json_encoding(self):
temp = [1, 2, np.ma.core.masked]
j1 = _json.dumps(temp, cls=utils.PlotlyJSONEncoder)
print(j1)
assert j1 == "[1, 2, null]"
def test_numpy_dates(self):
a = np.arange(np.datetime64("2011-07-11"), np.datetime64("2011-07-18"))
j1 = _json.dumps(a, cls=utils.PlotlyJSONEncoder)
assert (
j1 == '["2011-07-11", "2011-07-12", "2011-07-13", '
'"2011-07-14", "2011-07-15", "2011-07-16", '
'"2011-07-17"]'
)
def test_datetime_dot_date(self):
a = [datetime.date(2014, 1, 1), datetime.date(2014, 1, 2)]
j1 = _json.dumps(a, cls=utils.PlotlyJSONEncoder)
assert j1 == '["2014-01-01", "2014-01-02"]'
def test_numpy_datetime64(self):
a = pd.date_range("2011-07-11", "2011-07-13", freq="D").values
j1 = _json.dumps(a, cls=utils.PlotlyJSONEncoder)
assert (
j1 == '["2011-07-11T00:00:00.000000000", '
'"2011-07-12T00:00:00.000000000", '
'"2011-07-13T00:00:00.000000000"]'
)
def test_pil_image_encoding(self):
img_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"..",
"..",
"test_plotly_utils",
"resources",
"1x1-black.png",
)
with open(img_path, "rb") as f:
hex_bytes = base64.b64encode(f.read()).decode("ascii")
expected_uri = "data:image/png;base64," + hex_bytes
img = Image.open(img_path)
j1 = _json.dumps({"source": img}, cls=utils.PlotlyJSONEncoder)
assert j1 == '{"source": "%s"}' % expected_uri
def test_nan_to_null(self):
array = [1, float("NaN"), float("Inf"), float("-Inf"), "platypus"]
result = _json.dumps(array, cls=utils.PlotlyJSONEncoder)
expected_result = '[1, null, null, null, "platypus"]'
self.assertEqual(result, expected_result)
def test_invalid_encode_exception(self):
with self.assertRaises(TypeError):
_json.dumps({"a": {1}}, cls=utils.PlotlyJSONEncoder)
| TestJSONEncoder |
python | apache__airflow | providers/openlineage/tests/system/openlineage/example_openlineage_base_complex_dag.py | {
"start": 2479,
"end": 2786
} | class ____(BashOperator):
def __init__(self, **kwargs):
# Just to test that these attrs are included in OL event
self.deferrable = True
self.external_dag_id = "external_dag_id"
self.external_task_id = "external_task_id"
super().__init__(**kwargs)
| SomeCustomOperator |
python | getsentry__sentry | tests/sentry/workflow_engine/models/test_json_config_base.py | {
"start": 2254,
"end": 3068
} | class ____(JSONConfigBaseTest):
def test_detector_no_registration(self) -> None:
with pytest.raises(ValueError):
self.create_detector(name="test_detector", type="no_registration")
def test_detector_schema(self) -> None:
self.create_detector(name="test_detector", type="test", config=self.correct_config)
with pytest.raises(ValidationError):
self.create_detector(name="test_detector", type="test", config={"hi": "there"})
def test_detector_empty_schema(self) -> None:
self.create_detector(name="example_detector", type="example", config={})
with pytest.raises(ValidationError):
self.create_detector(name="test_detector", type="example", config={"hi": "there"})
# TODO - Move this to the workflow model test
| TestDetectorConfig |
python | pyqtgraph__pyqtgraph | pyqtgraph/multiprocess/remoteproxy.py | {
"start": 545,
"end": 679
} | class ____(UserWarning):
"""Emitted when a request to a remote object results in an Exception """
pass
| RemoteExceptionWarning |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 70204,
"end": 71133
} | class ____:
xl3Arrows = 1 # from enum XlIconSet
xl3ArrowsGray = 2 # from enum XlIconSet
xl3Flags = 3 # from enum XlIconSet
xl3Signs = 6 # from enum XlIconSet
xl3Stars = 18 # from enum XlIconSet
xl3Symbols = 7 # from enum XlIconSet
xl3Symbols2 = 8 # from enum XlIconSet
xl3TrafficLights1 = 4 # from enum XlIconSet
xl3TrafficLights2 = 5 # from enum XlIconSet
xl3Triangles = 19 # from enum XlIconSet
xl4Arrows = 9 # from enum XlIconSet
xl4ArrowsGray = 10 # from enum XlIconSet
xl4CRV = 12 # from enum XlIconSet
xl4RedToBlack = 11 # from enum XlIconSet
xl4TrafficLights = 13 # from enum XlIconSet
xl5Arrows = 14 # from enum XlIconSet
xl5ArrowsGray = 15 # from enum XlIconSet
xl5Boxes = 20 # from enum XlIconSet
xl5CRV = 16 # from enum XlIconSet
xl5Quarters = 17 # from enum XlIconSet
xlCustomSet = -1 # from enum XlIconSet
| IconSet |
python | django__django | tests/introspection/models.py | {
"start": 3104,
"end": 3341
} | class ____(models.Model):
name = models.CharField(max_length=15, db_comment="'Name' column comment")
class Meta:
db_table_comment = "Custom table comment"
required_db_features = {"supports_comments"}
| DbCommentModel |
python | streamlit__streamlit | lib/streamlit/components/v2/component_registry.py | {
"start": 1470,
"end": 10657
} | class ____:
"""Definition of a bidirectional component V2.
The definition holds inline content or file references for HTML, CSS, and
JavaScript, plus metadata used by the runtime to serve assets. When CSS/JS
are provided as file paths, their asset-dir-relative URLs are exposed via
``css_url`` and ``js_url`` (or can be overridden with
``css_asset_relative_path``/``js_asset_relative_path``).
Parameters
----------
name : str
A short, descriptive name for the component.
html : str or None, optional
HTML content as a string.
css : str or None, optional
Inline CSS content or an absolute/relative path to a ``.css`` file.
Relative paths are interpreted as asset-dir-relative and validated to
reside within the component's ``asset_dir``. Absolute paths are rejected
by the API.
js : str or None, optional
Inline JavaScript content or an absolute/relative path to a ``.js``
file. Relative paths are interpreted as asset-dir-relative and validated
to reside within the component's ``asset_dir``. Absolute paths are
rejected by the API.
css_asset_relative_path : str or None, optional
Asset-dir-relative URL path to use when serving the CSS file. If not
provided, the filename from ``css`` is used when ``css`` is file-backed.
js_asset_relative_path : str or None, optional
Asset-dir-relative URL path to use when serving the JS file. If not
provided, the filename from ``js`` is used when ``js`` is file-backed.
"""
name: str
html: str | None = None
css: str | None = None
js: str | None = None
# Store processed content and metadata
_has_css_path: bool = field(default=False, init=False, repr=False)
_has_js_path: bool = field(default=False, init=False, repr=False)
_source_paths: dict[str, str] = field(default_factory=dict, init=False, repr=False)
# Asset-dir-relative paths used for frontend loading. These represent the
# URL path segment under the component's declared asset_dir (e.g. "build/index.js")
# and are independent of the on-disk absolute file path stored in css/js.
css_asset_relative_path: str | None = None
js_asset_relative_path: str | None = None
def __post_init__(self) -> None:
# Keep track of source paths for content loaded from files
source_paths = {}
# Store CSS and JS paths if provided
is_css_path, css_path = self._is_file_path(self.css)
is_js_path, js_path = self._is_file_path(self.js)
if css_path:
source_paths["css"] = os.path.dirname(css_path)
if js_path:
source_paths["js"] = os.path.dirname(js_path)
object.__setattr__(self, "_has_css_path", is_css_path)
object.__setattr__(self, "_has_js_path", is_js_path)
object.__setattr__(self, "_source_paths", source_paths)
# Allow empty definitions to support manifest-registered components that
# declare only an asset sandbox (asset_dir) without inline or file-backed
# entry content. Runtime API calls can later provide js/css/html.
def _is_file_path(self, content: str | None) -> tuple[bool, str | None]:
"""Determine whether ``content`` is a filesystem path and resolve it.
For string inputs that look like paths (contain separators, prefixes, or
have common asset extensions), values are normally provided by the v2
public API, which resolves and validates asset-dir-relative inputs and
passes absolute paths here. When this dataclass is constructed
internally, callers must supply already-resolved absolute paths that
have passed the same validation rules upstream. Relative paths are not
accepted here.
Parameters
----------
content : str or None
The potential inline content or path.
Returns
-------
tuple[bool, str | None]
``(is_path, abs_path)`` where ``is_path`` indicates whether the
input was treated as a path and ``abs_path`` is the resolved
absolute path if a path, otherwise ``None``.
Raises
------
ValueError
If ``content`` is treated as a path but the file does not exist, or
if a non-absolute, path-like string is provided.
"""
if content is None:
return False, None
# Determine if it's a file path or inline content for strings
if isinstance(content, str):
stripped = content.strip()
is_likely_path = not ComponentPathUtils.looks_like_inline_content(stripped)
if is_likely_path:
if os.path.isabs(content):
abs_path = content
if not os.path.exists(abs_path):
raise ValueError(f"File does not exist: {abs_path}")
return True, abs_path
# Relative, path-like strings are not accepted at this layer.
raise ValueError(
"Relative file paths are not accepted in BidiComponentDefinition; "
"pass absolute, pre-validated paths from the v2 API."
)
# If we get here, it's content, not a path
return False, None
@property
def is_placeholder(self) -> bool:
"""Return True if this definition is a placeholder (no content).
Placeholders are typically created during the manifest scanning phase
when we discover a component's existence but haven't yet loaded its
content via the public API.
"""
return self.html is None and self.css is None and self.js is None
@property
def css_url(self) -> str | None:
"""Return the asset-dir-relative URL path for CSS when file-backed.
When present, servers construct
``/_stcore/bidi-components/<component>/<css_url>`` using this value. If
``css_asset_relative_path`` is specified, it takes precedence over the
filename derived from ``css``.
"""
return self._derive_asset_url(
has_path=self._has_css_path,
value=self.css,
override=self.css_asset_relative_path,
)
@property
def js_url(self) -> str | None:
"""Return the asset-dir-relative URL path for JS when file-backed.
When present, servers construct
``/_stcore/bidi-components/<component>/<js_url>`` using this value. If
``js_asset_relative_path`` is specified, it takes precedence over the
filename derived from ``js``.
"""
return self._derive_asset_url(
has_path=self._has_js_path,
value=self.js,
override=self.js_asset_relative_path,
)
def _derive_asset_url(
self, *, has_path: bool, value: str | None, override: str | None
) -> str | None:
"""Compute asset-dir-relative URL for a file-backed asset.
Parameters
----------
has_path
Whether the value refers to a file path.
value
The css/js field value (inline string or path).
override
Optional explicit asset-dir-relative override.
Returns
-------
str or None
The derived URL path or ``None`` if not file-backed.
"""
if not has_path:
return None
# Prefer explicit URL override if provided (relative to asset_dir)
if override:
return override
# Fallback: preserve relative subpath if the provided path is relative;
# otherwise default to the basename for absolute paths. Normalize
# leading "./" to avoid awkward prefixes in URLs.
path_str = str(value)
if os.path.isabs(path_str):
return os.path.basename(path_str)
norm = path_str.replace("\\", "/").removeprefix("./")
# If there's a subpath remaining, preserve it; otherwise use basename
return norm if "/" in norm else os.path.basename(norm)
@property
def css_content(self) -> str | None:
"""Return inline CSS content or ``None`` if file-backed or missing."""
if self._has_css_path or self.css is None:
return None
# Return as string if it's not a path
return str(self.css)
@property
def js_content(self) -> str | None:
"""Return inline JavaScript content or ``None`` if file-backed or missing."""
if self._has_js_path or self.js is None:
return None
# Return as string if it's not a path
return str(self.js)
@property
def html_content(self) -> str | None:
"""Return inline HTML content or ``None`` if not provided."""
return self.html
@property
def source_paths(self) -> dict[str, str]:
"""Return source directories for file-backed CSS/JS content.
The returned mapping contains keys like ``"js"`` and ``"css"`` with the
directory path from which each was loaded.
"""
return self._source_paths
| BidiComponentDefinition |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.