sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
pydantic/pydantic:pydantic-core/tests/validators/test_literal.py | import re
from enum import Enum
from typing import Any, Callable
import pytest
from pydantic_core import SchemaError, SchemaValidator, ValidationError, core_schema
from pydantic_core import core_schema as cs
from ..conftest import Err, PyAndJson, plain_repr
@pytest.mark.parametrize(
'kwarg_expected,input_value,expected',
[
([1], 1, 1),
pytest.param(
[1],
2,
Err(
'Input should be 1 [type=literal_error, input_value=2, input_type=int]',
[
{
'type': 'literal_error',
'loc': (),
'msg': 'Input should be 1',
'input': 2,
'ctx': {'expected': '1'},
}
],
),
id='wrong-single-int',
),
(['foo'], 'foo', 'foo'),
pytest.param(
['foo'],
'bar',
Err(
"Input should be 'foo' [type=literal_error, input_value='bar', input_type=str]",
[
{
'type': 'literal_error',
'loc': (),
'msg': "Input should be 'foo'",
'input': 'bar',
'ctx': {'expected': "'foo'"},
}
],
),
id='wrong-single-str',
),
([1, 2], 1, 1),
([1, 2], 2, 2),
pytest.param(
[1, 2],
3,
Err('Input should be 1 or 2 [type=literal_error, input_value=3, input_type=int]'),
id='wrong-multiple-int',
),
([1, 2, 3, 4], 4, 4),
pytest.param(
[1, 2, 3, 4],
5,
Err(
'Input should be 1, 2, 3 or 4 [type=literal_error, input_value=5, input_type=int]',
[
{
'type': 'literal_error',
'loc': (),
'msg': 'Input should be 1, 2, 3 or 4',
'input': 5,
'ctx': {'expected': '1, 2, 3 or 4'},
}
],
),
id='wrong-multiple-int',
),
(['a', 'b'], 'a', 'a'),
pytest.param(
['a', 'b'],
'c',
Err("Input should be 'a' or 'b' [type=literal_error, input_value='c', input_type=str]"),
id='wrong-multiple-str',
),
([1, '1'], 1, 1),
([1, '1'], '1', '1'),
pytest.param(
[1, '1'],
'2',
Err(
"Input should be 1 or '1' [type=literal_error, input_value='2', input_type=str]",
[
{
'type': 'literal_error',
'loc': (),
'msg': "Input should be 1 or '1'",
'input': '2',
'ctx': {'expected': "1 or '1'"},
}
],
),
id='wrong-str-int',
),
],
)
def test_literal_py_and_json(py_and_json: PyAndJson, kwarg_expected, input_value, expected):
v = py_and_json({'type': 'literal', 'expected': kwarg_expected})
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)) as exc_info:
v.validate_test(input_value)
if expected.errors is not None:
# debug(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == expected.errors
else:
assert v.validate_test(input_value) == expected
@pytest.mark.parametrize(
'kwarg_expected,input_value,expected',
[
([1, b'whatever'], b'whatever', b'whatever'),
([(1, 2), (3, 4)], (1, 2), (1, 2)),
([(1, 2), (3, 4)], (3, 4), (3, 4)),
pytest.param(
[1, b'whatever'],
3,
Err("Input should be 1 or b'whatever' [type=literal_error, input_value=3, input_type=int]"),
id='wrong-general',
),
([b'bite'], b'bite', b'bite'),
pytest.param(
[b'bite'],
'spoon',
Err(
"Input should be b'bite' [type=literal_error, input_value='spoon', input_type=str]",
[
{
'type': 'literal_error',
'loc': (),
'msg': "Input should be 1 or '1'",
'input': '2',
'ctx': {'expected': "1 or '1'"},
}
],
),
id='single-byte',
),
],
)
def test_literal_not_json(kwarg_expected, input_value, expected):
v = SchemaValidator(cs.literal_schema(expected=kwarg_expected))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)) as exc_info:
v.validate_python(input_value)
if expected.errors is not None:
# debug(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == expected.errors
else:
assert v.validate_python(input_value) == expected
def test_build_error():
with pytest.raises(SchemaError, match='SchemaError: `expected` should have length > 0'):
SchemaValidator(cs.literal_schema(expected=[]))
def test_literal_none():
v = SchemaValidator(core_schema.literal_schema([None]))
assert v.isinstance_python(None) is True
assert v.isinstance_python(0) is False
expected_repr_start = 'SchemaValidator(title="literal[None]"'
assert plain_repr(v)[: len(expected_repr_start)] == expected_repr_start
def test_union():
v = SchemaValidator(core_schema.union_schema([core_schema.literal_schema(['a', 'b']), core_schema.int_schema()]))
assert v.validate_python('a') == 'a'
assert v.validate_python(4) == 4
with pytest.raises(ValidationError) as exc_info:
v.validate_python('c')
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'literal_error',
'loc': ("literal['a','b']",),
'msg': "Input should be 'a' or 'b'",
'input': 'c',
'ctx': {'expected': "'a' or 'b'"},
},
{
'type': 'int_parsing',
'loc': ('int',),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'c',
},
]
def test_enum_value():
class FooEnum(Enum):
foo = 'foo_value'
bar = 'bar_value'
v = SchemaValidator(core_schema.literal_schema([FooEnum.foo]))
assert v.validate_python(FooEnum.foo) == FooEnum.foo
with pytest.raises(ValidationError) as exc_info:
v.validate_python('foo_value')
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'literal_error',
'loc': (),
'msg': "Input should be <FooEnum.foo: 'foo_value'>",
'input': 'foo_value',
'ctx': {'expected': "<FooEnum.foo: 'foo_value'>"},
}
]
with pytest.raises(ValidationError) as exc_info:
v.validate_python('unknown')
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'literal_error',
'loc': (),
'msg': "Input should be <FooEnum.foo: 'foo_value'>",
'input': 'unknown',
'ctx': {'expected': "<FooEnum.foo: 'foo_value'>"},
}
]
with pytest.raises(ValidationError) as exc_info:
v.validate_json('"foo_value"')
assert exc_info.value.errors(include_url=False) == [
{
'type': 'literal_error',
'loc': (),
'msg': "Input should be <FooEnum.foo: 'foo_value'>",
'input': 'foo_value',
'ctx': {'expected': "<FooEnum.foo: 'foo_value'>"},
}
]
def test_str_enum_values():
class Foo(str, Enum):
foo = 'foo_value'
bar = 'bar_value'
v = SchemaValidator(core_schema.literal_schema([Foo.foo]))
assert v.validate_python(Foo.foo) == Foo.foo
assert v.validate_python('foo_value') == Foo.foo
assert v.validate_json('"foo_value"') == Foo.foo
with pytest.raises(ValidationError) as exc_info:
v.validate_python('unknown')
assert exc_info.value.errors(include_url=False) == [
{
'type': 'literal_error',
'loc': (),
'msg': "Input should be <Foo.foo: 'foo_value'>",
'input': 'unknown',
'ctx': {'expected': "<Foo.foo: 'foo_value'>"},
}
]
def test_int_enum_values():
class Foo(int, Enum):
foo = 2
bar = 3
v = SchemaValidator(core_schema.literal_schema([Foo.foo]))
assert v.validate_python(Foo.foo) == Foo.foo
assert v.validate_python(2) == Foo.foo
assert v.validate_json('2') == Foo.foo
with pytest.raises(ValidationError) as exc_info:
v.validate_python(4)
assert exc_info.value.errors(include_url=False) == [
{
'type': 'literal_error',
'loc': (),
'msg': 'Input should be <Foo.foo: 2>',
'input': 4,
'ctx': {'expected': '<Foo.foo: 2>'},
}
]
@pytest.mark.parametrize(
'reverse, err',
[
(
lambda x: list(reversed(x)),
[
{
'type': 'literal_error',
'loc': (),
'msg': 'Input should be <Foo.foo: 1> or 1',
'input': 2,
'ctx': {'expected': '<Foo.foo: 1> or 1'},
}
],
),
(
lambda x: x,
[
{
'type': 'literal_error',
'loc': (),
'msg': 'Input should be 1 or <Foo.foo: 1>',
'input': 2,
'ctx': {'expected': '1 or <Foo.foo: 1>'},
}
],
),
],
)
def test_mix_int_enum_with_int(reverse: Callable[[list[Any]], list[Any]], err: Any):
class Foo(int, Enum):
foo = 1
v = SchemaValidator(core_schema.literal_schema(reverse([1, Foo.foo])))
assert v.validate_python(Foo.foo) is Foo.foo
val = v.validate_python(1)
assert val == 1 and val is not Foo.foo
val = v.validate_json('1')
assert val == 1 and val is not Foo.foo
with pytest.raises(ValidationError) as exc_info:
v.validate_python(2)
assert exc_info.value.errors(include_url=False) == err
@pytest.mark.parametrize(
'reverse, err',
[
(
lambda x: list(reversed(x)),
[
{
'type': 'literal_error',
'loc': (),
'msg': "Input should be <Foo.foo: 'foo_val'> or 'foo_val'",
'input': 'bar_val',
'ctx': {'expected': "<Foo.foo: 'foo_val'> or 'foo_val'"},
}
],
),
(
lambda x: x,
[
{
'type': 'literal_error',
'loc': (),
'msg': "Input should be 'foo_val' or <Foo.foo: 'foo_val'>",
'input': 'bar_val',
'ctx': {'expected': "'foo_val' or <Foo.foo: 'foo_val'>"},
}
],
),
],
)
def test_mix_str_enum_with_str(reverse: Callable[[list[Any]], list[Any]], err: Any):
class Foo(str, Enum):
foo = 'foo_val'
v = SchemaValidator(core_schema.literal_schema(reverse(['foo_val', Foo.foo])))
assert v.validate_python(Foo.foo) is Foo.foo
val = v.validate_python('foo_val')
assert val == 'foo_val' and val is not Foo.foo
val = v.validate_json('"foo_val"')
assert val == 'foo_val' and val is not Foo.foo
with pytest.raises(ValidationError) as exc_info:
v.validate_python('bar_val')
assert exc_info.value.errors(include_url=False) == err
def test_big_int():
big_int = 2**64 + 1
massive_int = 2**128 + 1
v = SchemaValidator(core_schema.literal_schema([big_int, massive_int]))
assert v.validate_python(big_int) == big_int
assert v.validate_python(massive_int) == massive_int
m = r'Input should be 18446744073709551617 or 340282366920938463463374607431768211457 \[type=literal_error'
with pytest.raises(ValidationError, match=m):
v.validate_python(37)
def test_enum_for_str() -> None:
class S(str, Enum):
a = 'a'
val_enum = SchemaValidator(core_schema.literal_schema([S.a]))
val_str = SchemaValidator(core_schema.literal_schema(['a']))
for val in [val_enum, val_str]:
assert val.validate_python('a') == 'a'
assert val.validate_python(S.a) == 'a'
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_literal.py",
"license": "MIT License",
"lines": 361,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_model.py | import re
import sys
from collections import defaultdict
from copy import deepcopy
from decimal import Decimal
from typing import Any, Callable, Union
import pytest
from dirty_equals import HasRepr, IsInstance
from pydantic_core import CoreConfig, SchemaError, SchemaValidator, ValidationError, core_schema
from pydantic_core.core_schema import ExtraBehavior
def test_model_class():
class MyModel:
# this is not required, but it avoids `__pydantic_fields_set__` being included in `__dict__`
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
field_b: int
v = SchemaValidator(
core_schema.model_schema(
MyModel,
core_schema.model_fields_schema(
{
'field_a': core_schema.model_field(core_schema.str_schema()),
'field_b': core_schema.model_field(core_schema.int_schema()),
}
),
)
)
assert repr(v).startswith('SchemaValidator(title="MyModel", validator=Model(\n')
m = v.validate_python({'field_a': 'test', 'field_b': 12})
assert isinstance(m, MyModel)
assert m.field_a == 'test'
assert m.field_b == 12
assert m.__pydantic_extra__ is None
assert m.__pydantic_fields_set__ == {'field_a', 'field_b'}
assert m.__dict__ == {'field_a': 'test', 'field_b': 12}
m2 = v.validate_python({'field_a': 'test', 'field_b': 12}, strict=True)
assert isinstance(m2, MyModel)
assert m2.field_a == 'test'
assert m2.field_b == 12
assert m2.__pydantic_extra__ is None
assert m2.__pydantic_fields_set__ == {'field_a', 'field_b'}
assert m2.__dict__ == {'field_a': 'test', 'field_b': 12}
@pytest.mark.parametrize(
'schema_extra_behavior,validate_fn_extra_kw',
[
('allow', None),
('ignore', 'allow'),
],
)
def test_model_class_extra(schema_extra_behavior: dict[str, Any], validate_fn_extra_kw: Union[ExtraBehavior, None]):
class MyModel:
# this is not required, but it avoids `__pydantic_fields_set__` being included in `__dict__`
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
field_b: int
v = SchemaValidator(
core_schema.model_schema(
MyModel,
core_schema.model_fields_schema(
{
'field_a': core_schema.model_field(core_schema.str_schema()),
'field_b': core_schema.model_field(core_schema.int_schema()),
},
extra_behavior=schema_extra_behavior,
),
)
)
m = v.validate_python({'field_a': 'test', 'field_b': 12, 'field_c': 'extra'}, extra=validate_fn_extra_kw)
assert isinstance(m, MyModel)
assert m.field_a == 'test'
assert m.field_b == 12
assert m.__pydantic_extra__ == {'field_c': 'extra'}
assert m.__pydantic_fields_set__ == {'field_a', 'field_b', 'field_c'}
assert m.__dict__ == {'field_a': 'test', 'field_b': 12}
@pytest.mark.parametrize(
'schema_extra_behavior,validate_fn_extra_kw',
[
('forbid', None),
('ignore', 'forbid'),
],
)
def test_model_class_extra_forbid(
schema_extra_behavior: dict[str, Any], validate_fn_extra_kw: Union[ExtraBehavior, None]
):
class MyModel:
class Meta:
pass
# this is not required, but it avoids `__pydantic_fields_set__` being included in `__dict__`
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
field_b: int
class Wrapper:
def __init__(self, inner):
self._inner = inner
def __dir__(self):
return dir(self._inner)
def __getattr__(self, key):
return getattr(self._inner, key)
v = SchemaValidator(
core_schema.model_schema(
MyModel,
core_schema.model_fields_schema(
{
'field_a': core_schema.model_field(core_schema.str_schema()),
'field_b': core_schema.model_field(core_schema.int_schema()),
},
extra_behavior=schema_extra_behavior,
),
)
)
m = v.validate_python({'field_a': 'test', 'field_b': 12}, extra=validate_fn_extra_kw)
assert isinstance(m, MyModel)
assert m.field_a == 'test'
assert m.field_b == 12
# try revalidating from the model's attributes
m = v.validate_python(Wrapper(m), from_attributes=True, extra=validate_fn_extra_kw)
with pytest.raises(ValidationError) as exc_info:
m = v.validate_python({'field_a': 'test', 'field_b': 12, 'field_c': 'extra'}, extra=validate_fn_extra_kw)
assert exc_info.value.errors(include_url=False) == [
{'type': 'extra_forbidden', 'loc': ('field_c',), 'msg': 'Extra inputs are not permitted', 'input': 'extra'}
]
@pytest.mark.parametrize('extra_behavior', ['allow', 'ignore', 'forbid'])
def test_model_class_extra_forbid_from_attributes(extra_behavior: str):
# iterating attributes includes much more than just __dict__, so need
# careful interaction with __extra__
class MyModel:
# this is not required, but it avoids `__pydantic_fields_set__` being included in `__dict__`
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
field_b: int
class Data:
# https://github.com/pydantic/pydantic/issues/9242
class Meta:
pass
def __init__(self, **values):
self.__dict__.update(values)
v = SchemaValidator(
core_schema.model_schema(
MyModel,
core_schema.model_fields_schema(
{
'field_a': core_schema.model_field(core_schema.str_schema()),
'field_b': core_schema.model_field(core_schema.int_schema()),
},
extra_behavior=extra_behavior,
from_attributes=True,
),
)
)
m = v.validate_python(Data(field_a='test', field_b=12))
assert isinstance(m, MyModel)
assert m.field_a == 'test'
assert m.field_b == 12
# with from_attributes, extra is basically ignored
m = v.validate_python(Data(field_a='test', field_b=12, field_c='extra'))
assert isinstance(m, MyModel)
assert m.field_a == 'test'
assert m.field_b == 12
assert not hasattr(m, 'field_c')
def test_model_class_setattr():
setattr_calls = []
class MyModel:
field_a: str
def __setattr__(self, key, value):
setattr_calls.append((key, value))
# don't do anything
m1 = MyModel()
m1.foo = 'bar'
assert not hasattr(m1, 'foo')
assert setattr_calls == [('foo', 'bar')]
setattr_calls.clear()
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
schema=core_schema.model_fields_schema(
fields={'field_a': core_schema.model_field(schema=core_schema.str_schema())}
),
)
)
m = v.validate_python({'field_a': 'test'})
assert isinstance(m, MyModel)
assert m.field_a == 'test'
assert m.__pydantic_fields_set__ == {'field_a'}
assert setattr_calls == []
def test_model_class_root_validator_wrap():
class MyModel:
def __init__(self, **kwargs: Any) -> None:
self.__dict__.update(kwargs)
def f(
input_value: dict[str, Any],
validator: Callable[[dict[str, Any]], dict[str, Any]],
info: core_schema.ValidationInfo,
):
assert input_value['field_a'] == 123
output = validator(input_value)
return output
schema = core_schema.model_schema(
MyModel,
core_schema.with_info_wrap_validator_function(
f, core_schema.model_fields_schema({'field_a': core_schema.model_field(core_schema.int_schema())})
),
)
v = SchemaValidator(schema)
m = v.validate_python({'field_a': 123})
assert m.field_a == 123
with pytest.raises(ValidationError) as e:
v.validate_python({'field_a': 456})
assert e.value.errors(include_url=False) == [
{
'type': 'assertion_error',
'loc': (),
'msg': 'Assertion failed, assert 456 == 123',
'input': {'field_a': 456},
'ctx': {'error': HasRepr(repr(AssertionError('assert 456 == 123')))},
}
]
def test_model_class_root_validator_before():
class MyModel:
def __init__(self, **kwargs: Any) -> None:
self.__dict__.update(kwargs)
def f(input_value: dict[str, Any], info: core_schema.ValidationInfo):
assert input_value['field_a'] == 123
return input_value
schema = core_schema.model_schema(
MyModel,
core_schema.with_info_before_validator_function(
f, core_schema.model_fields_schema({'field_a': core_schema.model_field(core_schema.int_schema())})
),
)
v = SchemaValidator(schema)
m = v.validate_python({'field_a': 123})
assert m.field_a == 123
with pytest.raises(ValidationError) as e:
v.validate_python({'field_a': 456})
assert e.value.errors(include_url=False) == [
{
'type': 'assertion_error',
'loc': (),
'msg': 'Assertion failed, assert 456 == 123',
'input': {'field_a': 456},
'ctx': {'error': HasRepr(repr(AssertionError('assert 456 == 123')))},
}
]
def test_model_class_root_validator_after():
class MyModel:
def __init__(self, **kwargs: Any) -> None:
self.__dict__.update(kwargs)
def f(input_value_and_fields_set: tuple[dict[str, Any], set[str]]):
input_value, _, _ = input_value_and_fields_set
assert input_value['field_a'] == 123
return input_value_and_fields_set
schema = core_schema.model_schema(
MyModel,
core_schema.no_info_after_validator_function(
f, core_schema.model_fields_schema({'field_a': core_schema.model_field(core_schema.int_schema())})
),
)
v = SchemaValidator(schema)
m = v.validate_python({'field_a': 123})
assert m.field_a == 123
with pytest.raises(ValidationError) as e:
v.validate_python({'field_a': 456})
assert e.value.errors(include_url=False) == [
{
'type': 'assertion_error',
'loc': (),
'msg': 'Assertion failed, assert 456 == 123',
'input': {'field_a': 456},
'ctx': {'error': HasRepr(repr(AssertionError('assert 456 == 123')))},
}
]
@pytest.mark.parametrize('mode', ['before', 'after', 'wrap'])
def test_function_ask(mode):
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
def f(input_value, info):
return input_value
SchemaValidator(
core_schema.model_schema(
cls=MyModel,
schema={
'type': f'function-{mode}',
'function': {'type': 'with-info', 'function': f},
'schema': core_schema.model_fields_schema(
fields={'field_a': core_schema.model_field(schema=core_schema.str_schema())}
),
},
)
)
def test_function_plain_ask():
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
def f(input_value):
return input_value, {1: 2}, {'field_a'}
v = SchemaValidator(core_schema.model_schema(cls=MyModel, schema=core_schema.no_info_plain_validator_function(f)))
m = v.validate_python({'field_a': 'test'})
assert isinstance(m, MyModel)
assert m.__dict__ == {'field_a': 'test'}
assert m.__pydantic_extra__ == {1: 2}
assert m.__pydantic_fields_set__ == {'field_a'}
def test_union_sub_schema():
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
schema=core_schema.union_schema(
choices=[
core_schema.model_fields_schema(
fields={'foo': core_schema.model_field(schema=core_schema.int_schema())}
),
core_schema.model_fields_schema(
fields={'bar': core_schema.model_field(schema=core_schema.int_schema())}
),
]
),
)
)
m = v.validate_python({'foo': '123'})
assert isinstance(m, MyModel)
assert m.__dict__ == {'foo': 123}
assert m.__pydantic_fields_set__ == {'foo'}
m = v.validate_python({'bar': '123'})
assert isinstance(m, MyModel)
assert m.__dict__ == {'bar': 123}
assert m.__pydantic_fields_set__ == {'bar'}
def test_tagged_union_sub_schema():
class MyModel:
pass
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
schema=core_schema.tagged_union_schema(
discriminator='foo',
choices={
'apple': core_schema.model_fields_schema(
fields={
'foo': core_schema.model_field(schema=core_schema.str_schema()),
'bar': core_schema.model_field(schema=core_schema.int_schema()),
}
),
'banana': core_schema.model_fields_schema(
fields={
'foo': core_schema.model_field(schema=core_schema.str_schema()),
'spam': core_schema.model_field(
schema=core_schema.list_schema(items_schema=core_schema.int_schema())
),
}
),
},
),
)
)
m = v.validate_python({'foo': 'apple', 'bar': '123'})
assert isinstance(m, MyModel)
assert m.__dict__ == {
'foo': 'apple',
'bar': 123,
'__pydantic_fields_set__': {'foo', 'bar'},
'__pydantic_extra__': None,
'__pydantic_private__': None,
}
m = v.validate_python({'foo': 'banana', 'spam': [1, 2, 3]})
assert isinstance(m, MyModel)
# insert_assert(m.__dict__)
assert m.__dict__ == {
'foo': 'banana',
'spam': [1, 2, 3],
'__pydantic_fields_set__': {'spam', 'foo'},
'__pydantic_extra__': None,
'__pydantic_private__': None,
}
def test_bad_sub_schema():
class MyModel:
pass
v = SchemaValidator(core_schema.model_schema(cls=MyModel, schema=core_schema.int_schema()))
with pytest.raises(TypeError):
v.validate_python(123)
def test_model_class_function_after():
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
def f(input_value, info):
input_value[0]['x'] = 'y'
return input_value
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
schema={
'type': 'function-after',
'function': {'type': 'with-info', 'function': f},
'schema': core_schema.model_fields_schema(
fields={'field_a': core_schema.model_field(schema=core_schema.str_schema())}
),
},
)
)
m = v.validate_python({'field_a': 'test'})
assert isinstance(m, MyModel)
assert m.__dict__ == {'field_a': 'test', 'x': 'y'}
assert m.__pydantic_fields_set__ == {'field_a'}
def test_model_class_not_type():
with pytest.raises(SchemaError, match=re.escape("TypeError: 'int' object is not an instance of 'type'")):
SchemaValidator(
schema=core_schema.model_schema(
cls=123,
schema=core_schema.model_fields_schema(
fields={'field_a': core_schema.model_field(schema=core_schema.str_schema())}
),
)
)
def test_model_class_instance_direct():
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
def __init__(self):
self.field_a = 'init'
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
schema=core_schema.model_fields_schema(
fields={'field_a': core_schema.model_field(schema=core_schema.str_schema())}
),
)
)
m1 = v.validate_python({'field_a': 'test'})
assert isinstance(m1, MyModel)
assert m1.field_a == 'test'
assert m1.__pydantic_fields_set__ == {'field_a'}
m2 = MyModel()
m3 = v.validate_python(m2)
assert m2 == m3
assert m3.field_a == 'init'
def test_model_class_instance_subclass():
post_init_calls = []
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
def __init__(self):
self.field_a = 'init_a'
def model_post_init(self, context):
post_init_calls.append(context)
class MySubModel(MyModel):
field_b: str
def __init__(self):
super().__init__()
self.field_b = 'init_b'
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
schema=core_schema.model_fields_schema(
fields={'field_a': core_schema.model_field(schema=core_schema.str_schema())}
),
post_init='model_post_init',
)
)
m2 = MySubModel()
assert m2.field_a
m3 = v.validate_python(m2, context='call1')
assert m2 is m3
assert m3.field_a == 'init_a'
assert m3.field_b == 'init_b'
assert post_init_calls == []
m4 = v.validate_python({'field_a': b'hello'}, context='call2')
assert isinstance(m4, MyModel)
assert m4.field_a == 'hello'
assert m4.__pydantic_fields_set__ == {'field_a'}
assert post_init_calls == ['call2']
def test_model_class_instance_subclass_revalidate():
post_init_calls = []
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
def __init__(self):
self.field_a = 'init_a'
def model_post_init(self, context):
post_init_calls.append(context)
class MySubModel(MyModel):
field_b: str
__pydantic_fields_set__ = set()
__pydantic_extra__ = None
def __init__(self):
super().__init__()
self.field_b = 'init_b'
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
schema=core_schema.model_fields_schema(
fields={'field_a': core_schema.model_field(schema=core_schema.str_schema())}
),
post_init='model_post_init',
revalidate_instances='always',
)
)
m2 = MySubModel()
assert m2.field_a
m2.__pydantic_extra__ = {}
m2.__pydantic_fields_set__ = set()
m3 = v.validate_python(m2, context='call1')
assert m2 is not m3
assert m3.field_a == 'init_a'
assert not hasattr(m3, 'field_b')
assert post_init_calls == ['call1']
m4 = MySubModel()
m4.__pydantic_extra__ = {}
m4.__pydantic_fields_set__ = {'fruit_loop'}
m5 = v.validate_python(m4, context='call2')
assert m4 is not m5
assert m5.__pydantic_fields_set__ == {'fruit_loop'}
assert m5.field_a == 'init_a'
assert not hasattr(m5, 'field_b')
assert post_init_calls == ['call1', 'call2']
def test_model_class_strict():
class MyModel:
def __init__(self):
self.field_a = 'init_a'
self.field_b = 'init_b'
v = SchemaValidator(
core_schema.model_schema(
strict=True,
cls=MyModel,
schema=core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
}
),
)
)
assert re.search(r'revalidate: \w+', repr(v)).group(0) == 'revalidate: Never'
m = MyModel()
m2 = v.validate_python(m)
assert isinstance(m, MyModel)
assert m is m2
assert m.field_a == 'init_a'
# note that since dict validation was not run here, there has been no check this is an int
assert m.field_b == 'init_b'
m3 = v.validate_python({'field_a': 'test', 'field_b': 12})
assert isinstance(m3, MyModel)
assert m3.field_a == 'test'
assert m3.field_b == 12
class MySubModel(MyModel):
field_c: str
def __init__(self):
super().__init__()
self.field_c = 'init_c'
# instances of subclasses are allowed in strict mode
m3 = MySubModel()
m4 = v.validate_python(m3)
assert m4 is m3
def test_model_class_strict_json():
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
field_b: int
field_c: int
v = SchemaValidator(
core_schema.model_schema(
strict=True,
cls=MyModel,
schema=core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
'field_c': core_schema.model_field(
schema=core_schema.with_default_schema(default=42, schema=core_schema.int_schema())
),
}
),
)
)
m = v.validate_json('{"field_a": "foobar", "field_b": "123"}')
assert isinstance(m, MyModel)
assert m.field_a == 'foobar'
assert m.field_b == 123
assert m.field_c == 42
assert m.__pydantic_fields_set__ == {'field_a', 'field_b'}
def test_internal_error():
v = SchemaValidator(
core_schema.model_schema(
cls=int,
schema=core_schema.model_fields_schema(
fields={'f': core_schema.model_field(schema=core_schema.int_schema())}
),
)
)
with pytest.raises(AttributeError, match=re.escape("'int' object has no attribute '__dict__'")):
v.validate_python({'f': 123})
def test_revalidate_always():
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
def __init__(self, a, b, fields_set):
self.field_a = a
self.field_b = b
self.__pydantic_extra__ = {}
if fields_set is not None:
self.__pydantic_fields_set__ = fields_set
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
revalidate_instances='always',
schema=core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
}
),
)
)
assert re.search(r'revalidate: \w+', repr(v)).group(0) == 'revalidate: Always'
m = v.validate_python({'field_a': 'test', 'field_b': 12})
assert isinstance(m, MyModel)
assert m.__dict__ == {'field_a': 'test', 'field_b': 12}
assert m.__pydantic_fields_set__ == {'field_a', 'field_b'}
m2 = MyModel('x', 42, {'field_a'})
m3 = v.validate_python(m2)
assert isinstance(m3, MyModel)
assert m3 is not m2
assert m3.__dict__ == {'field_a': 'x', 'field_b': 42}
assert m3.__pydantic_fields_set__ == {'field_a'}
m4 = MyModel('x', 'not int', {'field_a'})
with pytest.raises(ValidationError) as exc_info:
v.validate_python(m4)
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('field_b',),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'not int',
}
]
m5 = MyModel('x', 5, None)
with pytest.raises(AttributeError, match='__pydantic_fields_set__'):
v.validate_python(m5)
def test_revalidate_subclass_instances():
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
def __init__(self):
self.field_a = 'init_a'
self.field_b = 123
class MySubModel(MyModel):
def __init__(self):
super().__init__()
self.field_c = 'init_c'
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
revalidate_instances='subclass-instances',
schema=core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
}
),
)
)
m1 = MyModel()
m2 = v.validate_python(m1)
assert m2 is m1
m3 = MySubModel()
m3.__pydantic_extra__ = {}
m3.__pydantic_fields_set__ = set()
assert hasattr(m3, 'field_c')
m4 = v.validate_python(m3)
assert m4 is not m3
assert type(m4) is MyModel
assert not hasattr(m4, 'field_c')
m5 = MySubModel()
m5.__pydantic_extra__ = {}
m5.__pydantic_fields_set__ = set()
m5.field_b = 'not an int'
with pytest.raises(ValidationError, match="type=int_parsing, input_value='not an int', input_type=str"):
v.validate_python(m5)
def test_revalidate_extra():
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
schema=core_schema.model_fields_schema(
extra_behavior='allow',
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
},
),
config=CoreConfig(revalidate_instances='always'),
)
)
m = v.validate_python({'field_a': 'test', 'field_b': 12, 'more': (1, 2, 3)})
assert isinstance(m, MyModel)
assert m.__dict__ == {'field_a': 'test', 'field_b': 12}
assert m.__pydantic_extra__ == {'more': (1, 2, 3)}
assert m.__pydantic_fields_set__ == {'field_a', 'field_b', 'more'}
m2 = MyModel(field_a='x', field_b=42)
m2.__pydantic_extra__ = {'another': 42.5}
m2.__pydantic_fields_set__ = {'field_a', 'field_b', 'another'}
m3 = v.validate_python(m2)
assert isinstance(m3, MyModel)
assert m3 is not m2
assert m3.__dict__ == {'field_a': 'x', 'field_b': 42}
assert m3.__pydantic_extra__ == {'another': 42.5}
assert m3.__pydantic_fields_set__ == {'field_a', 'field_b', 'another'}
def test_post_init():
call_count = 0
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
field_b: int
def call_me_maybe(self, *args):
nonlocal call_count
call_count += 1
assert len(args) == 1
context = args[0]
assert context is None
assert self.field_a == 'test'
assert self.field_b == 12
assert self.__pydantic_fields_set__ == {'field_a', 'field_b'}
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
post_init='call_me_maybe',
schema=core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
}
),
)
)
m = v.validate_python({'field_a': 'test', 'field_b': 12})
assert isinstance(m, MyModel)
assert call_count == 1
def test_revalidate_post_init():
call_count = 0
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
def call_me_maybe(self, context):
nonlocal call_count
call_count += 1
assert context is None
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
post_init='call_me_maybe',
schema=core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
}
),
config=CoreConfig(revalidate_instances='always'),
)
)
assert re.search(r'revalidate: \w+', repr(v)).group(0) == 'revalidate: Always'
m = v.validate_python({'field_a': 'test', 'field_b': 12})
assert isinstance(m, MyModel)
assert m.__dict__ == {'field_a': 'test', 'field_b': 12}
assert m.__pydantic_fields_set__ == {'field_a', 'field_b'}
assert call_count == 1
m2 = MyModel()
m2.field_a = 'x'
m2.field_b = 42
m2.__pydantic_extra__ = {}
m2.__pydantic_fields_set__ = {'field_a'}
m3 = v.validate_python(m2)
assert isinstance(m3, MyModel)
assert m3 is not m2
assert m3.__dict__ == {'field_a': 'x', 'field_b': 42}
assert m3.__pydantic_fields_set__ == {'field_a'}
assert call_count == 2
def test_post_init_validation_error():
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
def call_me_maybe(self, context, **kwargs):
if context and 'error' in context:
raise ValueError(f'this is broken: {self.field_a}')
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
post_init='call_me_maybe',
schema=core_schema.model_fields_schema(
fields={'field_a': core_schema.model_field(schema=core_schema.str_schema())}
),
)
)
m = v.validate_python({'field_a': 'test'})
assert isinstance(m, MyModel)
assert m.field_a == 'test'
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'field_a': 'test'}, strict=None, context={'error': 1})
assert exc_info.value.errors(include_url=False) == [
{
'type': 'value_error',
'loc': (),
'msg': 'Value error, this is broken: test',
'input': {'field_a': 'test'},
'ctx': {'error': HasRepr(repr(ValueError('this is broken: test')))},
}
]
def test_post_init_internal_error():
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
def wrong_signature(self):
pass
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
post_init='wrong_signature',
schema=core_schema.model_fields_schema(
fields={'field_a': core_schema.model_field(schema=core_schema.str_schema())}
),
)
)
with pytest.raises(TypeError, match=r'wrong_signature\(\) takes 1 positional argument but 2 were given'):
v.validate_python({'field_a': 'test'})
def test_post_init_mutate():
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
field_b: int
def call_me_maybe(self, context, **kwargs):
self.field_a *= 2
self.__pydantic_fields_set__ = {'field_a'}
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
post_init='call_me_maybe',
schema=core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
}
),
)
)
m = v.validate_python({'field_a': 'test', 'field_b': 12})
assert isinstance(m, MyModel)
assert m.field_a == 'testtest'
assert m.field_b == 12
assert m.__pydantic_fields_set__ == {'field_a'}
assert m.__dict__ == {'field_a': 'testtest', 'field_b': 12}
def test_validate_assignment():
class MyModel:
# this is not required, but it avoids `__pydantic_fields_set__` being included in `__dict__`
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
field_b: int
def __init__(self):
self.__pydantic_extra__ = None
v = SchemaValidator(
core_schema.model_schema(
MyModel,
core_schema.model_fields_schema(
{
'field_a': core_schema.model_field(core_schema.str_schema()),
'field_b': core_schema.model_field(core_schema.int_schema()),
},
extra_behavior='allow',
),
extra_behavior='allow',
)
)
m = MyModel()
m.field_a = 'hello'
m.field_b = 123
m.__pydantic_fields_set__ = {'field_a'}
v.validate_assignment(m, 'field_b', '321')
m.field_a = 'hello'
assert m.field_b == 321
assert m.__pydantic_fields_set__ == {'field_a', 'field_b'}
v.validate_assignment(m, 'field_b', '322', from_attributes=True)
assert m.field_b == 322
# try deleting a field
del m.field_b
# assignment to `field_a` should not care about `field_b` missing
v.validate_assignment(m, 'field_a', 'hello world', from_attributes=True)
assert m.field_a == 'hello world'
def test_validate_assignment_function():
class MyModel:
# this is not required, but it avoids `__pydantic_fields_set__` being included in `__dict__`
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
field_b: int
field_c: int
calls: list[Any] = []
def func(x, info):
calls.append(str(info))
return x * 2
v = SchemaValidator(
core_schema.model_schema(
MyModel,
core_schema.model_fields_schema(
{
'field_a': core_schema.model_field(core_schema.str_schema()),
'field_b': core_schema.model_field(
core_schema.with_info_after_validator_function(func, core_schema.int_schema())
),
'field_c': core_schema.model_field(core_schema.int_schema()),
}
),
)
)
m = v.validate_python({'field_a': 'x', 'field_b': 123, 'field_c': 456})
assert m.field_a == 'x'
assert m.field_b == 246
assert m.field_c == 456
assert m.__pydantic_fields_set__ == {'field_a', 'field_b', 'field_c'}
assert calls == ["ValidationInfo(config=None, context=None, data={'field_a': 'x'}, field_name='field_b')"]
v.validate_assignment(m, 'field_b', '111')
assert m.field_b == 222
assert calls == [
"ValidationInfo(config=None, context=None, data={'field_a': 'x'}, field_name='field_b')",
"ValidationInfo(config=None, context=None, data={'field_a': 'x', 'field_c': 456}, field_name='field_b')",
]
def test_validate_assignment_no_fields_set():
class MyModel:
__slots__ = ('__dict__', '__pydantic_extra__')
def __init__(self):
self.__pydantic_extra__ = None
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
schema=core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
}
),
)
)
m = MyModel()
m.field_a = 'hello'
m.field_b = 123
assert not hasattr(m, '__pydantic_fields_set__')
v.validate_assignment(m, 'field_a', b'different')
m.field_a = 'different'
assert m.field_b == 123
assert not hasattr(m, '__pydantic_fields_set__')
# wrong arguments
with pytest.raises(AttributeError, match="'str' object has no attribute '__dict__'"):
v.validate_assignment('field_a', 'field_a', b'different')
def test_frozen():
class MyModel:
__slots__ = {'__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'}
v = SchemaValidator(
core_schema.model_schema(
MyModel,
core_schema.model_fields_schema({'f': core_schema.model_field(core_schema.str_schema())}),
frozen=True,
)
)
m = v.validate_python({'f': 'x'})
assert m.f == 'x'
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment(m, 'f', 'y')
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'frozen_instance', 'loc': (), 'msg': 'Instance is frozen', 'input': 'y'}
]
@pytest.mark.parametrize(
'function_schema,call1, call2',
[
(
core_schema.with_info_after_validator_function,
(({'a': 1, 'b': 2}, None, {'b'}), 'ValidationInfo(config=None, context=None, data=None, field_name=None)'),
(({'a': 10, 'b': 2}, None, {'a'}), "ValidationInfo(config=None, context=None, data=None, field_name='a')"),
),
(
core_schema.with_info_before_validator_function,
({'b': 2}, 'ValidationInfo(config=None, context=None, data=None, field_name=None)'),
({'a': 10, 'b': 2}, "ValidationInfo(config=None, context=None, data=None, field_name='a')"),
),
(
core_schema.with_info_wrap_validator_function,
({'b': 2}, 'ValidationInfo(config=None, context=None, data=None, field_name=None)'),
({'a': 10, 'b': 2}, "ValidationInfo(config=None, context=None, data=None, field_name='a')"),
),
],
)
def test_validate_assignment_model_validator_function(function_schema: Any, call1: Any, call2: Any):
"""
Test handling of values and fields_set for validator functions that wrap a model when using
validate_assignment.
Note that we are currently not exposing this functionality in conjunction with getting
access to `fields_set` in a model validator, so the behavior of fields set.
In particular, for function_after it is not clear if the fields set passed to
the validator should be the fields that were assigned on this call to `validate_assignment`
(currently always a single field) or the fields that have been assigned in the
model since it was created.
"""
class Model:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
calls: list[Any] = []
def f(values_or_values_and_fields_set: Any, *args: Any) -> Any:
if len(args) == 2:
# wrap
handler, info = args
calls.append((deepcopy(values_or_values_and_fields_set), str(info)))
return handler(values_or_values_and_fields_set)
else:
info = args[0]
calls.append((deepcopy(values_or_values_and_fields_set), str(info)))
return values_or_values_and_fields_set
v = SchemaValidator(
core_schema.model_schema(
Model,
function_schema(
f,
core_schema.model_fields_schema(
{
'a': core_schema.model_field(
core_schema.with_default_schema(core_schema.int_schema(), default=1)
),
'b': core_schema.model_field(core_schema.int_schema()),
}
),
),
)
)
m = v.validate_python({'b': 2})
assert m.a == 1
assert m.b == 2
assert m.__pydantic_fields_set__ == {'b'}
assert calls == [call1]
v.validate_assignment(m, 'a', 10)
assert m.a == 10
assert m.b == 2
assert m.__pydantic_fields_set__ == {'a', 'b'}
assert calls == [call1, call2]
def test_model_error():
class MyModel:
# this is not required, but it avoids `__pydantic_fields_set__` being included in `__dict__`
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
field_b: int
v = SchemaValidator(
core_schema.model_schema(
MyModel,
core_schema.model_fields_schema(
{
'field_a': core_schema.model_field(core_schema.str_schema()),
'field_b': core_schema.model_field(core_schema.int_schema()),
},
model_name='MyModel',
),
)
)
m = v.validate_python({'field_a': 'test', 'field_b': 12})
assert isinstance(m, MyModel)
assert m.__dict__ == {'field_a': 'test', 'field_b': 12}
m2 = MyModel()
m2.field_a = '1'
m2.field_b = 2
m3 = v.validate_python(m2)
assert isinstance(m3, MyModel)
assert m3.__dict__ == {'field_a': '1', 'field_b': 2}
m4 = v.validate_json('{"field_a": "3", "field_b": 4}')
assert isinstance(m4, MyModel)
assert m4.__dict__ == {'field_a': '3', 'field_b': 4}
class OtherModel:
pass
with pytest.raises(ValidationError) as exc_info:
v.validate_python(OtherModel())
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'model_type',
'loc': (),
'msg': 'Input should be a valid dictionary or instance of MyModel',
'input': IsInstance(OtherModel),
'ctx': {'class_name': 'MyModel'},
}
]
with pytest.raises(ValidationError) as exc_info:
v.validate_json('123')
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'model_type',
'loc': (),
'msg': 'Input should be an object',
'input': 123,
'ctx': {'class_name': 'MyModel'},
}
]
@pytest.mark.skipif(
sys.version_info >= (3, 13),
reason='Python 3.13+ enum initialization is different, see https://github.com/python/cpython/blob/ec610069637d56101896803a70d418a89afe0b4b/Lib/enum.py#L1159-L1163',
)
def test_model_with_enum_int_field_validation_should_succeed_for_any_type_equality_checks():
# GIVEN
from enum import Enum
class EnumClass(Enum):
enum_value = 1
enum_value_2 = 2
enum_value_3 = 3
class IntWrappable:
def __init__(self, value: int):
self.value = value
def __eq__(self, other: object) -> bool:
return self.value == other
class MyModel:
__slots__ = (
'__dict__',
'__pydantic_fields_set__',
'__pydantic_extra__',
'__pydantic_private__',
)
enum_field: EnumClass
# WHEN
v = SchemaValidator(
core_schema.model_schema(
MyModel,
core_schema.model_fields_schema(
{
'enum_field': core_schema.model_field(
core_schema.enum_schema(EnumClass, list(EnumClass.__members__.values()))
),
'enum_field_2': core_schema.model_field(
core_schema.enum_schema(EnumClass, list(EnumClass.__members__.values()))
),
'enum_field_3': core_schema.model_field(
core_schema.enum_schema(EnumClass, list(EnumClass.__members__.values()))
),
}
),
)
)
# THEN
v.validate_json('{"enum_field": 1, "enum_field_2": 2, "enum_field_3": 3}')
m = v.validate_python(
{
'enum_field': Decimal(1),
'enum_field_2': Decimal(2),
'enum_field_3': IntWrappable(3),
}
)
v.validate_assignment(m, 'enum_field', Decimal(1))
v.validate_assignment(m, 'enum_field_2', Decimal(2))
v.validate_assignment(m, 'enum_field_3', IntWrappable(3))
def test_model_from_defaultdict():
# https://github.com/pydantic/pydantic/issues/12376
class MyModel:
def __init__(self, **kwargs: Any) -> None:
self.__dict__.update(kwargs)
schema = core_schema.model_schema(
MyModel, core_schema.model_fields_schema({'field_a': core_schema.model_field(core_schema.int_schema())})
)
v = SchemaValidator(schema)
with pytest.raises(ValidationError) as exc_info:
# the defaultdict should not provide default values for missing fields
v.validate_python(defaultdict(int))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'missing',
'loc': ('field_a',),
'msg': 'Field required',
'input': defaultdict(int),
}
]
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_model.py",
"license": "MIT License",
"lines": 1164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_model_fields.py | import math
import os
import re
import sys
from collections.abc import Mapping
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Union
import pytest
from dirty_equals import FunctionCheck, HasRepr, IsStr
from pydantic_core import CoreConfig, SchemaError, SchemaValidator, ValidationError, core_schema
from pydantic_core.core_schema import ExtraBehavior
from ..conftest import Err, PyAndJson
class Cls:
def __init__(self, **attributes):
for k, v in attributes.items():
setattr(self, k, v)
def __repr__(self):
return 'Cls({})'.format(', '.join(f'{k}={v!r}' for k, v in self.__dict__.items()))
class Map(Mapping):
def __init__(self, **kwargs):
self._d = kwargs
def __iter__(self):
return iter(self._d)
def __len__(self) -> int:
return len(self._d)
def __getitem__(self, k, /):
return self._d[k]
def __repr__(self):
return 'Map({})'.format(', '.join(f'{k}={v!r}' for k, v in self._d.items()))
def test_simple():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
}
)
)
assert v.validate_python({'field_a': b'abc', 'field_b': 1}) == (
{'field_a': 'abc', 'field_b': 1},
None,
{'field_a', 'field_b'},
)
def test_strict():
v = SchemaValidator(
{
'type': 'model-fields',
'fields': {
'field_a': {'type': 'model-field', 'schema': {'type': 'str'}},
'field_b': {'type': 'model-field', 'schema': {'type': 'int'}},
},
},
CoreConfig(strict=True),
)
assert v.validate_python({'field_a': 'hello', 'field_b': 12}) == (
{'field_a': 'hello', 'field_b': 12},
None,
{'field_a', 'field_b'},
)
with pytest.raises(ValidationError) as exc_info:
assert v.validate_python({'field_a': 123, 'field_b': '123'})
assert exc_info.value.errors(include_url=False) == [
{'type': 'string_type', 'loc': ('field_a',), 'msg': 'Input should be a valid string', 'input': 123},
{'type': 'int_type', 'loc': ('field_b',), 'msg': 'Input should be a valid integer', 'input': '123'},
]
def test_with_default():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(
schema=core_schema.with_default_schema(schema=core_schema.int_schema(), default=666)
),
}
)
)
assert v.validate_python({'field_a': b'abc'}) == ({'field_a': 'abc', 'field_b': 666}, None, {'field_a'})
assert v.validate_python({'field_a': b'abc', 'field_b': 1}) == (
{'field_a': 'abc', 'field_b': 1},
None,
{'field_b', 'field_a'},
)
def test_missing_error(pydantic_version):
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
}
)
)
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'field_a': b'abc'})
assert (
str(exc_info.value)
== """\
1 validation error for model-fields
field_b
Field required [type=missing, input_value={'field_a': b'abc'}, input_type=dict]"""
+ (
f'\n For further information visit https://errors.pydantic.dev/{pydantic_version}/v/missing'
if os.environ.get('PYDANTIC_ERRORS_INCLUDE_URL', '1') != 'false'
else ''
)
)
@pytest.mark.parametrize(
'config,input_value,expected',
[
({}, {'a': '123'}, ({'a': 123, 'b': 4.2}, None, {'a'})),
({}, Map(a=123), ({'a': 123, 'b': 4.2}, None, {'a'})),
({}, {b'a': '123'}, Err('Field required [type=missing,')),
({}, {'a': '123', 'c': 4}, ({'a': 123, 'b': 4.2}, None, {'a'})),
(CoreConfig(extra_fields_behavior='allow'), {'a': '123', 'c': 4}, ({'a': 123, 'b': 4.2}, {'c': 4}, {'a', 'c'})),
(
CoreConfig(extra_fields_behavior='allow'),
{'a': '123', b'c': 4},
Err('Keys should be strings [type=invalid_key,'),
),
(
CoreConfig(strict=True),
Map(a=123),
Err('Input should be a valid dictionary or instance of Model [type=model_type,'),
),
({}, {'a': '123', 'b': '4.7'}, ({'a': 123, 'b': 4.7}, None, {'a', 'b'})),
({}, {'a': '123', 'b': 'nan'}, ({'a': 123, 'b': FunctionCheck(math.isnan)}, None, {'a', 'b'})),
(
CoreConfig(allow_inf_nan=False),
{'a': '123', 'b': 'nan'},
Err('Input should be a finite number [type=finite_number,'),
),
],
ids=repr,
)
def test_config(config: CoreConfig, input_value, expected):
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'a': core_schema.model_field(schema=core_schema.int_schema()),
'b': core_schema.model_field(
schema=core_schema.with_default_schema(schema=core_schema.float_schema(), default=4.2)
),
}
),
config=config,
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
val = v.validate_python(input_value)
print(f'UNEXPECTED OUTPUT: {val!r}')
else:
result = v.validate_python(input_value)
assert result == expected
def test_ignore_extra():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
}
)
)
assert v.validate_python({'field_a': b'123', 'field_b': 1, 'field_c': 123}) == (
{'field_a': '123', 'field_b': 1},
None,
{'field_b', 'field_a'},
)
def test_forbid_extra():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={'field_a': core_schema.model_field(schema=core_schema.str_schema())}, extra_behavior='forbid'
)
)
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'field_a': 'abc', 'field_b': 1})
assert exc_info.value.errors(include_url=False) == [
{'type': 'extra_forbidden', 'loc': ('field_b',), 'msg': 'Extra inputs are not permitted', 'input': 1}
]
def test_allow_extra_invalid():
with pytest.raises(SchemaError, match='extras_schema can only be used if extra_behavior=allow'):
SchemaValidator(
schema=core_schema.model_fields_schema(
fields={}, extras_schema=core_schema.int_schema(), extra_behavior='ignore'
)
)
with pytest.raises(SchemaError, match='extras_keys_schema can only be used if extra_behavior=allow'):
SchemaValidator(
schema=core_schema.model_fields_schema(
fields={}, extras_keys_schema=core_schema.int_schema(), extra_behavior='ignore'
)
)
def test_allow_extra_wrong():
with pytest.raises(SchemaError, match='Invalid extra_behavior: `wrong`'):
SchemaValidator(
schema=core_schema.model_fields_schema(fields={}), config=CoreConfig(extra_fields_behavior='wrong')
)
def test_allow_extra_fn_override_wrong():
v = SchemaValidator(schema=core_schema.model_fields_schema(fields={}))
with pytest.raises(ValueError, match='Invalid extra_behavior: `wrong`'):
v.validate_python({}, extra='wrong')
def test_str_config():
v = SchemaValidator(
core_schema.model_fields_schema(fields={'field_a': core_schema.model_field(schema=core_schema.str_schema())}),
config=CoreConfig(str_max_length=5),
)
assert v.validate_python({'field_a': 'test'}) == ({'field_a': 'test'}, None, {'field_a'})
with pytest.raises(ValidationError, match='String should have at most 5 characters'):
v.validate_python({'field_a': 'test long'})
def test_validate_assignment():
v = SchemaValidator(
core_schema.model_fields_schema(fields={'field_a': core_schema.model_field(schema=core_schema.str_schema())})
)
assert v.validate_python({'field_a': 'test'}) == ({'field_a': 'test'}, None, {'field_a'})
data = {'field_a': 'test'}
assert v.validate_assignment(data, 'field_a', b'abc') == ({'field_a': 'abc'}, None, {'field_a'})
assert data == {'field_a': 'abc'}
def test_validate_assignment_strict_field():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={'field_a': core_schema.model_field(schema=core_schema.str_schema(strict=True))}
)
)
assert v.validate_python({'field_a': 'test'}) == ({'field_a': 'test'}, None, {'field_a'})
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment({'field_a': 'test'}, 'field_a', b'abc')
assert exc_info.value.errors(include_url=False) == [
{'input': b'abc', 'type': 'string_type', 'loc': ('field_a',), 'msg': 'Input should be a valid string'}
]
def test_validate_assignment_functions():
calls: list[Any] = []
def func_a(input_value, info):
calls.append(('func_a', input_value))
return input_value * 2
def func_b(input_value, info):
calls.append(('func_b', input_value))
return input_value / 2
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(
schema={
'type': 'function-after',
'function': {'type': 'with-info', 'function': func_a},
'schema': core_schema.str_schema(),
}
),
'field_b': core_schema.model_field(
schema={
'type': 'function-after',
'function': {'type': 'with-info', 'function': func_b},
'schema': core_schema.int_schema(),
}
),
}
)
)
assert v.validate_python({'field_a': 'test', 'field_b': 12.0}) == (
{'field_a': 'testtest', 'field_b': 6},
None,
{'field_a', 'field_b'},
)
assert calls == [('func_a', 'test'), ('func_b', 12)]
calls.clear()
assert v.validate_assignment({'field_a': 'testtest', 'field_b': 6}, 'field_a', 'new-val') == (
{'field_a': 'new-valnew-val', 'field_b': 6},
None,
{'field_a'},
)
assert calls == [('func_a', 'new-val')]
def test_validate_assignment_ignore_extra():
v = SchemaValidator(
core_schema.model_fields_schema(fields={'field_a': core_schema.model_field(schema=core_schema.str_schema())})
)
assert v.validate_python({'field_a': 'test'}) == ({'field_a': 'test'}, None, {'field_a'})
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment({'field_a': 'test'}, 'other_field', 456)
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'no_such_attribute',
'loc': ('other_field',),
'msg': "Object has no attribute 'other_field'",
'input': 456,
'ctx': {'attribute': 'other_field'},
}
]
def test_validate_assignment_allow_extra():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={'field_a': core_schema.model_field(schema=core_schema.str_schema())}, extra_behavior='allow'
)
)
assert v.validate_python({'field_a': 'test'}) == ({'field_a': 'test'}, {}, {'field_a'})
assert v.validate_assignment({'field_a': 'test'}, 'other_field', 456) == (
{'field_a': 'test'},
{'other_field': 456},
{'other_field'},
)
def test_validate_assignment_allow_extra_validate():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={'field_a': core_schema.model_field(schema=core_schema.str_schema())},
extras_schema=core_schema.int_schema(),
extra_behavior='allow',
)
)
assert v.validate_assignment({'field_a': 'test'}, 'other_field', '456') == (
{'field_a': 'test'},
{'other_field': 456},
{'other_field'},
)
with pytest.raises(ValidationError) as exc_info:
assert v.validate_assignment({'field_a': 'test'}, 'other_field', 'xyz')
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('other_field',),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'xyz',
}
]
def test_validate_assignment_with_strict():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'x': core_schema.model_field(schema=core_schema.str_schema()),
'y': core_schema.model_field(schema=core_schema.int_schema()),
}
)
)
r, model_extra, fields_set = v.validate_python({'x': 'a', 'y': '123'})
assert r == {'x': 'a', 'y': 123}
assert model_extra is None
assert fields_set == {'x', 'y'}
v.validate_assignment(r, 'y', '124')
assert r == {'x': 'a', 'y': 124}
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment(r, 'y', '124', strict=True)
assert exc_info.value.errors(include_url=False) == [
{'type': 'int_type', 'loc': ('y',), 'msg': 'Input should be a valid integer', 'input': '124'}
]
def test_json_error():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(
schema=core_schema.list_schema(items_schema=core_schema.int_schema())
)
}
)
)
with pytest.raises(ValidationError) as exc_info:
v.validate_json('{"field_a": [123, "wrong"]}')
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('field_a', 1),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'wrong',
}
]
def test_fields_required_by_default():
"""By default all fields should be required"""
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'x': core_schema.model_field(schema=core_schema.str_schema()),
'y': core_schema.model_field(schema=core_schema.str_schema()),
}
)
)
assert v.validate_python({'x': 'pika', 'y': 'chu'}) == ({'x': 'pika', 'y': 'chu'}, None, {'x', 'y'})
with pytest.raises(ValidationError) as exc_info:
assert v.validate_python({'x': 'pika'})
assert exc_info.value.errors(include_url=False) == [
{'type': 'missing', 'loc': ('y',), 'msg': 'Field required', 'input': {'x': 'pika'}}
]
def test_fields_required_by_default_with_default():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'x': core_schema.model_field(schema=core_schema.str_schema()),
'y': core_schema.model_field(
schema=core_schema.with_default_schema(schema=core_schema.str_schema(), default='bulbi')
),
}
)
)
assert v.validate_python({'x': 'pika', 'y': 'chu'}) == ({'x': 'pika', 'y': 'chu'}, None, {'x', 'y'})
assert v.validate_python({'x': 'pika'}) == ({'x': 'pika', 'y': 'bulbi'}, None, {'x'})
def test_alias(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'model-fields',
'fields': {'field_a': {'validation_alias': 'FieldA', 'type': 'model-field', 'schema': {'type': 'int'}}},
}
)
assert v.validate_test({'FieldA': '123'}) == ({'field_a': 123}, None, {'field_a'})
with pytest.raises(ValidationError, match=r'FieldA\n +Field required \[type=missing,'):
assert v.validate_test({'foobar': '123'})
with pytest.raises(ValidationError, match=r'FieldA\n +Field required \[type=missing,'):
assert v.validate_test({'field_a': '123'})
def test_empty_string_field_name(py_and_json: PyAndJson):
v = py_and_json({'type': 'model-fields', 'fields': {'': {'type': 'model-field', 'schema': {'type': 'int'}}}})
assert v.validate_test({'': 123}) == ({'': 123}, None, {''})
def test_empty_string_aliases(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'model-fields',
'fields': {'field_a': {'validation_alias': '', 'type': 'model-field', 'schema': {'type': 'int'}}},
}
)
assert v.validate_test({'': 123}) == ({'field_a': 123}, None, {'field_a'})
v = py_and_json(
{
'type': 'model-fields',
'fields': {'field_a': {'validation_alias': ['', ''], 'type': 'model-field', 'schema': {'type': 'int'}}},
}
)
assert v.validate_test({'': {'': 123}}) == ({'field_a': 123}, None, {'field_a'})
def test_alias_allow_pop(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'model-fields',
'fields': {'field_a': {'validation_alias': 'FieldA', 'type': 'model-field', 'schema': {'type': 'int'}}},
},
config=CoreConfig(validate_by_name=True),
)
assert v.validate_test({'FieldA': '123'}) == ({'field_a': 123}, None, {'field_a'})
assert v.validate_test({'field_a': '123'}) == ({'field_a': 123}, None, {'field_a'})
# alias always wins if both are present
assert v.validate_test({'FieldA': '1', 'field_a': '2'}) == ({'field_a': 1}, None, {'field_a'})
assert v.validate_test({'field_a': '1', 'FieldA': '2'}) == ({'field_a': 2}, None, {'field_a'})
# even invalid values are ignored if alias is present
assert v.validate_test({'FieldA': '1', 'field_a': 'q'}) == ({'field_a': 1}, None, {'field_a'})
assert v.validate_test({'field_a': 'q', 'FieldA': '2'}) == ({'field_a': 2}, None, {'field_a'})
# but if the alias is invalid, those errors are raised
with pytest.raises(ValidationError, match=r'FieldA\n +Input should be a valid integer.+\[type=int_parsing,'):
assert v.validate_test({'FieldA': 'q', 'field_a': '2'}) == ({'field_a': 1}, None, {'field_a'})
with pytest.raises(ValidationError, match=r'FieldA\n +Input should be a valid integer.+\[type=int_parsing,'):
assert v.validate_test({'field_a': 'q', 'FieldA': 'q'}) == ({'field_a': 2}, None, {'field_a'})
with pytest.raises(ValidationError, match=r'FieldA\n +Field required \[type=missing,'):
assert v.validate_test({'foobar': '123'})
def test_only_validate_by_name(py_and_json) -> None:
v = py_and_json(
{
'type': 'model-fields',
'fields': {'field_a': {'validation_alias': 'FieldA', 'type': 'model-field', 'schema': {'type': 'int'}}},
},
config=CoreConfig(validate_by_name=True, validate_by_alias=False),
)
assert v.validate_test({'field_a': '123'}) == ({'field_a': 123}, None, {'field_a'})
with pytest.raises(ValidationError, match=r'field_a\n +Field required \[type=missing,'):
assert v.validate_test({'FieldA': '123'})
def test_only_allow_alias(py_and_json) -> None:
v = py_and_json(
{
'type': 'model-fields',
'fields': {'field_a': {'validation_alias': 'FieldA', 'type': 'model-field', 'schema': {'type': 'int'}}},
},
config=CoreConfig(validate_by_name=False, validate_by_alias=True),
)
assert v.validate_test({'FieldA': '123'}) == ({'field_a': 123}, None, {'field_a'})
with pytest.raises(ValidationError, match=r'FieldA\n +Field required \[type=missing,'):
assert v.validate_test({'field_a': '123'})
@pytest.mark.parametrize(
'input_value,expected',
[
({'foo': {'bar': '123'}}, ({'field_a': 123}, None, {'field_a'})),
({'x': '123'}, Err(r'foo.bar\n +Field required \[type=missing,')),
({'foo': '123'}, Err(r'foo.bar\n +Field required \[type=missing,')),
({'foo': [1, 2, 3]}, Err(r'foo.bar\n +Field required \[type=missing,')),
({'foo': {'bat': '123'}}, Err(r'foo.bar\n +Field required \[type=missing,')),
],
ids=repr,
)
def test_alias_path(py_and_json: PyAndJson, input_value, expected):
v = py_and_json(
{
'type': 'model-fields',
'fields': {
'field_a': {'validation_alias': ['foo', 'bar'], 'type': 'model-field', 'schema': {'type': 'int'}}
},
}
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=expected.message):
v.validate_test(input_value)
else:
output = v.validate_test(input_value)
assert output == expected
@pytest.mark.parametrize(
'input_value,expected',
[
({'foo': {'bar': {'bat': '123'}}}, ({'field_a': 123}, None, {'field_a'})),
({'foo': [1, 2, 3, 4]}, ({'field_a': 4}, None, {'field_a'})),
({'foo': (1, 2, 3, 4)}, ({'field_a': 4}, None, {'field_a'})),
({'spam': 5}, ({'field_a': 5}, None, {'field_a'})),
({'spam': 1, 'foo': {'bar': {'bat': 2}}}, ({'field_a': 2}, None, {'field_a'})),
({'foo': {'x': 2}}, Err(r'field_a\n +Field required \[type=missing,')),
({'x': '123'}, Err(r'field_a\n +Field required \[type=missing,')),
({'x': {2: 33}}, Err(r'field_a\n +Field required \[type=missing,')),
({'foo': '01234'}, Err(r'field_a\n +Field required \[type=missing,')),
({'foo': [1]}, Err(r'field_a\n +Field required \[type=missing,')),
],
ids=repr,
)
def test_aliases_path_multiple(py_and_json: PyAndJson, input_value, expected):
v = py_and_json(
{
'type': 'model-fields',
'fields': {
'field_a': {
'validation_alias': [['foo', 'bar', 'bat'], ['foo', 3], ['spam']],
'type': 'model-field',
'schema': {'type': 'int'},
}
},
},
{'loc_by_alias': False},
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=expected.message):
val = v.validate_test(input_value)
print(f'UNEXPECTED OUTPUT: {val!r}')
else:
output = v.validate_test(input_value)
assert output == expected
@pytest.mark.parametrize(
'input_value,expected',
[
({'foo': {-2: '123'}}, ({'field_a': 123}, None, {'field_a'})),
# negatives indexes work fine
({'foo': [1, 42, 'xx']}, ({'field_a': 42}, None, {'field_a'})),
({'foo': [42, 'xxx', 42]}, Err(r'Input should be a valid integer,')),
({'foo': [42]}, Err(r'field_a\n +Field required \[type=missing,')),
({'foo': {'xx': '123'}}, Err(r'field_a\n +Field required \[type=missing,')),
({'foo': {'-2': '123'}}, Err(r'field_a\n +Field required \[type=missing,')),
({'foo': {2: '123'}}, Err(r'field_a\n +Field required \[type=missing,')),
({'foo': 'foobar'}, Err(r'field_a\n +Field required \[type=missing,')),
({'foo': {0, 1, 2}}, Err(r'field_a\n +Field required \[type=missing,')),
],
ids=repr,
)
def test_aliases_path_negative(input_value, expected):
v = SchemaValidator(
core_schema.model_fields_schema(
fields={'field_a': core_schema.model_field(validation_alias=['foo', -2], schema=core_schema.int_schema())}
),
config=CoreConfig(loc_by_alias=False),
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=expected.message):
val = v.validate_python(input_value)
print(f'UNEXPECTED OUTPUT: {val!r}')
else:
output = v.validate_python(input_value)
assert output == expected
@pytest.mark.parametrize(
'input_value,expected',
[
({'foo': [1, 42, 'xx']}, ({'field_a': 42}, None, {'field_a'})),
({'foo': [42, 'xxx', 42]}, Err(r'Input should be a valid integer,')),
({'foo': [42]}, Err(r'foo.-2\n +Field required \[type=missing,')),
],
ids=repr,
)
def test_aliases_path_negative_json(py_and_json: PyAndJson, input_value, expected):
v = py_and_json(
{
'type': 'model-fields',
'fields': {'field_a': {'validation_alias': ['foo', -2], 'type': 'model-field', 'schema': {'type': 'int'}}},
}
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=expected.message):
val = v.validate_test(input_value)
print(f'UNEXPECTED OUTPUT: {val!r}')
else:
output = v.validate_test(input_value)
assert output == expected
def test_aliases_debug():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(
validation_alias=[['foo', 'bar', 'bat'], ['foo', 3]], schema=core_schema.int_schema()
)
}
)
)
print(repr(v))
assert repr(v).startswith('SchemaValidator(title="model-fields", validator=ModelFields(')
# check that aliases with non-empty "rest" are present, i.e. non-trivial paths
assert 'rest: [\n' in repr(v)
def get_int_key():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(
validation_alias=[['foo', 3], ['spam']], schema=core_schema.int_schema()
)
}
)
)
assert v.validate_python({'foo': {3: 33}}) == ({'field_a': 33}, {}, {'field_a'})
class GetItemThing:
def __getitem__(self, v):
assert v == 'foo'
return 321
def get_custom_getitem():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={'field_a': core_schema.model_field(validation_alias=['foo'], schema=core_schema.int_schema())}
)
)
assert v.validate_python(GetItemThing()) == ({'field_a': 321}, {}, {'field_a'})
assert v.validate_python({'bar': GetItemThing()}) == ({'field_a': 321}, {}, {'field_a'})
@pytest.mark.parametrize('input_value', [{'foo': {'bar': 42}}, {'foo': 42}, {'field_a': 42}], ids=repr)
def test_paths_allow_by_name(py_and_json: PyAndJson, input_value):
v = py_and_json(
{
'type': 'model-fields',
'fields': {
'field_a': {
'validation_alias': [['foo', 'bar'], ['foo']],
'type': 'model-field',
'schema': {'type': 'int'},
}
},
},
config=CoreConfig(validate_by_name=True),
)
assert v.validate_test(input_value) == ({'field_a': 42}, None, {'field_a'})
@pytest.mark.parametrize(
'alias_schema,error',
[
({'validation_alias': []}, 'Lookup paths should have at least one element'),
({'validation_alias': [[]]}, 'Each alias path should have at least one element'),
({'validation_alias': [123]}, "TypeError: 'int' object is not an instance of 'list'"),
({'validation_alias': [[1, 'foo']]}, 'TypeError: The first item in an alias path should be a string'),
],
ids=repr,
)
def test_alias_build_error(alias_schema, error):
with pytest.raises(SchemaError, match=error):
SchemaValidator(
schema={
'type': 'model-fields',
'fields': {'field_a': {'type': 'model-field', 'schema': {'type': 'int'}, **alias_schema}},
}
)
def test_alias_error_loc_alias(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'model-fields',
'fields': {
'field_a': {
'type': 'model-field',
'schema': {'type': 'int'},
'validation_alias': [['foo', 'x'], ['bar', 1, -1]],
}
},
},
{'loc_by_alias': True}, # this is the default
)
assert v.validate_test({'foo': {'x': 42}}) == ({'field_a': 42}, None, {'field_a'})
assert v.validate_python({'bar': ['x', {-1: 42}]}) == ({'field_a': 42}, None, {'field_a'})
assert v.validate_test({'bar': ['x', [1, 2, 42]]}) == ({'field_a': 42}, None, {'field_a'})
with pytest.raises(ValidationError) as exc_info:
v.validate_test({'foo': {'x': 'not_int'}})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('foo', 'x'),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'not_int',
}
]
with pytest.raises(ValidationError) as exc_info:
v.validate_test({'bar': ['x', [1, 2, 'not_int']]})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('bar', 1, -1),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'not_int',
}
]
with pytest.raises(ValidationError) as exc_info:
v.validate_test({})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'missing', 'loc': ('foo', 'x'), 'msg': 'Field required', 'input': {}}
]
def test_alias_error_loc_field_names(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'model-fields',
'fields': {
'field_a': {
'type': 'model-field',
'schema': {'type': 'int'},
'validation_alias': [['foo'], ['bar', 1, -1]],
}
},
},
{'loc_by_alias': False},
)
assert v.validate_test({'foo': 42}) == ({'field_a': 42}, None, {'field_a'})
assert v.validate_test({'bar': ['x', [1, 2, 42]]}) == ({'field_a': 42}, None, {'field_a'})
with pytest.raises(ValidationError) as exc_info:
v.validate_test({'foo': 'not_int'})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('field_a',),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'not_int',
}
]
with pytest.raises(ValidationError) as exc_info:
v.validate_test({'bar': ['x', [1, 2, 'not_int']]})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('field_a',),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'not_int',
}
]
with pytest.raises(ValidationError) as exc_info:
v.validate_test({})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'missing', 'loc': ('field_a',), 'msg': 'Field required', 'input': {}}
]
def test_empty_model():
v = SchemaValidator(core_schema.model_fields_schema(fields={}))
assert v.validate_python({}) == ({}, None, set())
with pytest.raises(
ValidationError, match=re.escape('Input should be a valid dictionary or instance of Model [type=model_type,')
):
v.validate_python('x')
def test_model_fields_deep():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(
schema=core_schema.model_fields_schema(
fields={
'field_c': core_schema.model_field(schema=core_schema.str_schema()),
'field_d': core_schema.model_field(
schema=core_schema.model_fields_schema(
fields={
'field_e': core_schema.model_field(schema=core_schema.str_schema()),
'field_f': core_schema.model_field(schema=core_schema.int_schema()),
}
)
),
}
)
),
}
)
)
model_dict, model_extra, fields_set = v.validate_python(
{'field_a': '1', 'field_b': {'field_c': '2', 'field_d': {'field_e': '4', 'field_f': 4}}}
)
assert model_dict == {
'field_a': '1',
'field_b': (
{'field_c': '2', 'field_d': ({'field_e': '4', 'field_f': 4}, None, {'field_f', 'field_e'})},
None,
{'field_d', 'field_c'},
),
}
assert model_extra is None
assert fields_set == {'field_a', 'field_b'}
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'field_a': '1', 'field_b': {'field_c': '2', 'field_d': {'field_e': '4', 'field_f': 'xx'}}})
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('field_b', 'field_d', 'field_f'),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'xx',
}
]
class ClassWithAttributes:
def __init__(self):
self.a = 1
self.b = 2
@property
def c(self):
return 'ham'
@dataclass
class MyDataclass:
a: int = 1
b: int = 2
c: str = 'ham'
@pytest.mark.parametrize(
'input_value,expected',
[
(ClassWithAttributes(), ({'a': 1, 'b': 2, 'c': 'ham'}, None, {'a', 'b', 'c'})),
(MyDataclass(), ({'a': 1, 'b': 2, 'c': 'ham'}, None, {'a', 'b', 'c'})),
(Cls(a=1, b=2, c='ham'), ({'a': 1, 'b': 2, 'c': 'ham'}, None, {'a', 'b', 'c'})),
(dict(a=1, b=2, c='ham'), ({'a': 1, 'b': 2, 'c': 'ham'}, None, {'a', 'b', 'c'})),
(Map(a=1, b=2, c='ham'), ({'a': 1, 'b': 2, 'c': 'ham'}, None, {'a', 'b', 'c'})),
((Cls(a=1, b=2), dict(c='ham')), ({'a': 1, 'b': 2, 'c': 'ham'}, None, {'a', 'b', 'c'})),
((Cls(a=1, b=2), dict(c='bacon')), ({'a': 1, 'b': 2, 'c': 'bacon'}, None, {'a', 'b', 'c'})),
((Cls(a=1, b=2, c='ham'), dict(c='bacon')), ({'a': 1, 'b': 2, 'c': 'bacon'}, None, {'a', 'b', 'c'})),
((Cls(a=1, b=2, c='ham'), dict(d='bacon')), ({'a': 1, 'b': 2, 'c': 'ham'}, None, {'a', 'b', 'c'})),
# using type gives `__module__ == 'builtins'`
(type('Testing', (), {}), Err('[type=model_attributes_type,')),
(
'123',
Err('Input should be a valid dictionary or object to extract fields from [type=model_attributes_type,'),
),
([(1, 2)], Err('type=model_attributes_type,')),
(((1, 2),), Err('type=model_attributes_type,')),
],
ids=repr,
)
@pytest.mark.parametrize('from_attributes_mode', ['schema', 'validation'])
def test_from_attributes(input_value, expected, from_attributes_mode):
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'a': core_schema.model_field(schema=core_schema.int_schema()),
'b': core_schema.model_field(schema=core_schema.int_schema()),
'c': core_schema.model_field(schema=core_schema.str_schema()),
},
from_attributes=from_attributes_mode == 'schema',
)
)
kwargs = {}
if from_attributes_mode == 'validation':
kwargs['from_attributes'] = True
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
val = v.validate_python(input_value, **kwargs)
print(f'UNEXPECTED OUTPUT: {val!r}')
else:
output = v.validate_python(input_value, **kwargs)
assert output == expected
def test_from_attributes_type_error():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'a': core_schema.model_field(schema=core_schema.int_schema()),
'b': core_schema.model_field(schema=core_schema.int_schema()),
'c': core_schema.model_field(schema=core_schema.str_schema()),
},
from_attributes=True,
model_name='MyModel',
)
)
with pytest.raises(ValidationError) as exc_info:
v.validate_python('123')
assert exc_info.value.errors(include_url=False) == [
{
'type': 'model_attributes_type',
'loc': (),
'msg': 'Input should be a valid dictionary or object to extract fields from',
'input': '123',
}
]
with pytest.raises(ValidationError) as exc_info:
v.validate_json('123')
# insert_assert(exc_info.value.errors())
assert exc_info.value.errors(include_url=False) == [
{
'type': 'model_type',
'loc': (),
'msg': 'Input should be an object',
'input': 123,
'ctx': {'class_name': 'MyModel'},
}
]
def test_from_attributes_by_name():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={'a': core_schema.model_field(schema=core_schema.int_schema(), validation_alias='a_alias')},
from_attributes=True,
),
config=CoreConfig(validate_by_name=True),
)
assert v.validate_python(Cls(a_alias=1)) == ({'a': 1}, None, {'a'})
assert v.validate_python(Cls(a=1)) == ({'a': 1}, None, {'a'})
def test_from_attributes_override_true():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={'a': core_schema.model_field(schema=core_schema.int_schema())}, from_attributes=False
)
)
with pytest.raises(ValidationError, match='Input should be a valid dictionary'):
v.validate_python(Cls(a=1))
assert v.validate_python(Cls(a=1), from_attributes=True) == ({'a': 1}, None, {'a'})
assert v.isinstance_python(Cls(a=1), from_attributes=True) is True
assert v.isinstance_python(Cls(a=1)) is False
def test_from_attributes_override_false():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={'a': core_schema.model_field(schema=core_schema.int_schema())}, from_attributes=True
)
)
with pytest.raises(ValidationError, match='Input should be a valid dictionary'):
v.validate_python(Cls(a=1), from_attributes=False)
assert v.validate_python(Cls(a=1)) == ({'a': 1}, None, {'a'})
assert v.isinstance_python(Cls(a=1)) is True
assert v.isinstance_python(Cls(a=1), from_attributes=False) is False
def test_from_attributes_missing():
class Foobar:
def __init__(self):
self.a = 1
self.b = 2
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'a': core_schema.model_field(schema=core_schema.int_schema()),
'b': core_schema.model_field(schema=core_schema.int_schema()),
'c': core_schema.model_field(schema=core_schema.str_schema()),
},
from_attributes=True,
)
)
with pytest.raises(ValidationError) as exc_info:
v.validate_python(Foobar())
assert exc_info.value.errors(include_url=False) == [
{
'type': 'missing',
'loc': ('c',),
'msg': 'Field required',
'input': HasRepr(IsStr(regex='.+Foobar object at.+')),
}
]
def test_from_attributes_error():
class Foobar:
def __init__(self):
self.a = 1
@property
def b(self):
raise RuntimeError('intentional error')
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'a': core_schema.model_field(schema=core_schema.int_schema()),
'b': core_schema.model_field(schema=core_schema.int_schema()),
},
from_attributes=True,
)
)
with pytest.raises(ValidationError) as exc_info:
v.validate_python(Foobar())
assert exc_info.value.errors(include_url=False) == [
{
'type': 'get_attribute_error',
'loc': ('b',),
'msg': 'Error extracting attribute: RuntimeError: intentional error',
'input': HasRepr(IsStr(regex='.+Foobar object at.+')),
'ctx': {'error': 'RuntimeError: intentional error'},
}
]
def test_from_attributes_extra():
def another_function(x):
return x
class Foobar:
def __init__(self):
self.a = 1
self.b = 2
self._private_attribute = 4
@property
def c(self):
return 'ham'
@property
def _private_property(self):
return 'wrong'
@property
def property_error(self):
raise RuntimeError('xxx')
def bound_method(self):
return f'wrong {self.a}'
@staticmethod
def static_method():
return 'wrong'
# this is omitted along with the static method by the !PyFunction::is_type_of(attr) check in fields
function_attribute = another_function
@classmethod
def class_method(cls):
return 'wrong'
@dataclass
class MyDataclass:
a: int = 1
b: int = 2
c: str = 'ham'
_d: int = 4
v = SchemaValidator(
core_schema.model_fields_schema(
fields={'a': core_schema.model_field(schema=core_schema.int_schema())},
from_attributes=True,
extra_behavior='allow',
)
)
assert v.validate_python(Foobar()) == ({'a': 1}, {}, {'a'})
assert v.validate_python(MyDataclass()) == ({'a': 1}, {}, {'a'})
assert v.validate_python(Cls(a=1, b=2, c='ham')) == ({'a': 1}, {}, {'a'})
assert v.validate_python(Cls(a=1, b=datetime(2000, 1, 1))) == ({'a': 1}, {}, {'a'})
assert v.validate_python(Cls(a=1, b=datetime.now, c=lambda: 42)) == ({'a': 1}, {}, {'a'})
def test_from_attributes_extra_ignore_no_attributes_accessed() -> None:
v = SchemaValidator(
core_schema.model_fields_schema(
fields={'a': core_schema.model_field(schema=core_schema.int_schema())},
from_attributes=True,
extra_behavior='ignore',
)
)
accessed: list[str] = []
class Source:
a = 1
b = 2
def __getattribute__(self, name: str, /) -> Any:
accessed.append(name)
return super().__getattribute__(name)
assert v.validate_python(Source()) == ({'a': 1}, None, {'a'})
assert 'a' in accessed and 'b' not in accessed
def test_from_attributes_extra_forbid() -> None:
class Source:
a = 1
b = 2
v = SchemaValidator(
core_schema.model_fields_schema(
fields={'a': core_schema.model_field(schema=core_schema.int_schema())},
from_attributes=True,
extra_behavior='forbid',
)
)
assert v.validate_python(Source()) == ({'a': 1}, None, {'a'})
def foobar():
pass
@pytest.mark.parametrize(
'input_value,expected',
[
(Cls(a=1), {'a': 1}),
(Cls(a=datetime.now), {'a': datetime.now}),
(Cls(a=lambda: 42), {'a': HasRepr(IsStr(regex='.+<lambda>.+'))}),
(Cls(a=sys.path), {'a': sys.path}),
(Cls(a=foobar), {'a': foobar}),
],
ids=repr,
)
def test_from_attributes_function(input_value, expected):
v = SchemaValidator(
core_schema.model_fields_schema(
fields={'a': core_schema.model_field(schema=core_schema.any_schema())}, from_attributes=True
)
)
model_dict, model_extra, fields_set = v.validate_python(input_value)
assert model_dict == expected
assert model_extra is None
assert fields_set == {'a'}
def test_from_attributes_error_error():
class BadError(Exception):
def __str__(self):
raise RuntimeError('intentional error inside error')
class Foobar:
@property
def x(self):
raise BadError('intentional error')
v = SchemaValidator(
core_schema.model_fields_schema(
fields={'x': core_schema.model_field(schema=core_schema.int_schema())}, from_attributes=True
)
)
with pytest.raises(ValidationError) as exc_info:
v.validate_python(Foobar())
assert exc_info.value.errors(include_url=False) == [
{
'type': 'get_attribute_error',
'loc': ('x',),
'msg': IsStr(regex=r'Error extracting attribute: \S+\.<locals>\.BadError: <exception str\(\) failed>'),
'input': HasRepr(IsStr(regex='.+Foobar object at.+')),
'ctx': {'error': IsStr(regex=r'\S+\.<locals>\.BadError: <exception str\(\) failed>')},
}
]
class UnInitError:
@property
def x(self):
raise RuntimeError
with pytest.raises(ValidationError) as exc_info:
v.validate_python(UnInitError())
assert exc_info.value.errors(include_url=False) == [
{
'type': 'get_attribute_error',
'loc': ('x',),
'msg': 'Error extracting attribute: RuntimeError',
'input': HasRepr(IsStr(regex='.+UnInitError object at.+')),
'ctx': {'error': 'RuntimeError'},
}
]
@pytest.mark.parametrize(
'input_value,expected',
[
({'foo': {'bar': {'bat': '123'}}}, {'my_field': 123}),
(Cls(foo=Cls(bar=Cls(bat='123'))), {'my_field': 123}),
(Cls(foo={'bar': {'bat': '123'}}), {'my_field': 123}),
(Cls(foo=[1, 2, 3, 4]), {'my_field': 4}),
(Cls(foo=(1, 2, 3, 4)), {'my_field': 4}),
(Cls(spam=5), {'my_field': 5}),
(Cls(spam=1, foo=Cls(bar=Cls(bat=2))), {'my_field': 2}),
(Cls(x='123'), Err(r'my_field\n +Field required \[type=missing,')),
(Cls(x={2: 33}), Err(r'my_field\n +Field required \[type=missing,')),
(Cls(foo='01234'), Err(r'my_field\n +Field required \[type=missing,')),
(Cls(foo=[1]), Err(r'my_field\n +Field required \[type=missing,')),
(Cls, Err(r'Input should be a valid dictionary')),
],
ids=repr,
)
def test_from_attributes_path(input_value, expected):
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'my_field': core_schema.model_field(
validation_alias=[['foo', 'bar', 'bat'], ['foo', 3], ['spam']], schema=core_schema.int_schema()
)
},
from_attributes=True,
),
config=CoreConfig(loc_by_alias=False),
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=expected.message):
val = v.validate_python(input_value)
print(f'UNEXPECTED OUTPUT: {val!r}')
else:
model_dict, model_extra, fields_set = v.validate_python(input_value)
assert model_dict == expected
assert model_extra is None
assert fields_set == {'my_field'}
def test_from_attributes_path_error():
class PropertyError:
@property
def foo(self):
raise RuntimeError('intentional error')
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'my_field': core_schema.model_field(
validation_alias=[['foo', 'bar', 'bat'], ['foo', 3], ['spam']], schema=core_schema.int_schema()
)
},
from_attributes=True,
)
)
with pytest.raises(ValidationError) as exc_info:
v.validate_python(PropertyError())
assert exc_info.value.errors(include_url=False) == [
{
'type': 'get_attribute_error',
'loc': ('my_field',),
'msg': 'Error extracting attribute: RuntimeError: intentional error',
'input': HasRepr(IsStr(regex='.+PropertyError object at.+')),
'ctx': {'error': 'RuntimeError: intentional error'},
}
]
def test_alias_extra(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'model-fields',
'extra_behavior': 'allow',
'fields': {
'field_a': {
'validation_alias': [['FieldA'], ['foo', 2]],
'type': 'model-field',
'schema': {'type': 'int'},
}
},
},
{'loc_by_alias': False},
)
assert v.validate_test({'FieldA': 1}) == ({'field_a': 1}, {}, {'field_a'})
assert v.validate_test({'foo': [1, 2, 3]}) == ({'field_a': 3}, {}, {'field_a'})
# used_keys should be populated either though validation fails so "FieldA" is skipped in extra
with pytest.raises(ValidationError) as exc_info:
assert v.validate_test({'FieldA': '...'}) == ({'field_a': 1}, {}, {'field_a'})
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('field_a',),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': '...',
}
]
def test_alias_extra_from_attributes():
v = SchemaValidator(
core_schema.model_fields_schema(
extra_behavior='allow',
from_attributes=True,
fields={
'field_a': core_schema.model_field(
validation_alias=[['FieldA'], ['foo', 2]], schema=core_schema.int_schema()
)
},
)
)
assert v.validate_python({'FieldA': 1}) == ({'field_a': 1}, {}, {'field_a'})
assert v.validate_python(Cls(FieldA=1)) == ({'field_a': 1}, {}, {'field_a'})
assert v.validate_python(Cls(foo=[1, 2, 3])) == ({'field_a': 3}, {}, {'field_a'})
assert v.validate_python({'foo': [1, 2, 3]}) == ({'field_a': 3}, {}, {'field_a'})
def test_alias_extra_by_name(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'model-fields',
'extra_behavior': 'allow',
'from_attributes': True,
'fields': {'field_a': {'validation_alias': 'FieldA', 'type': 'model-field', 'schema': {'type': 'int'}}},
},
config=CoreConfig(validate_by_name=True),
)
assert v.validate_test({'FieldA': 1}) == ({'field_a': 1}, {}, {'field_a'})
assert v.validate_test({'field_a': 1}) == ({'field_a': 1}, {}, {'field_a'})
assert v.validate_python(Cls(FieldA=1)) == ({'field_a': 1}, {}, {'field_a'})
assert v.validate_python(Cls(field_a=1)) == ({'field_a': 1}, {}, {'field_a'})
def test_alias_extra_forbid(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'model-fields',
'extra_behavior': 'forbid',
'fields': {'field_a': {'type': 'model-field', 'validation_alias': 'FieldA', 'schema': {'type': 'int'}}},
}
)
assert v.validate_test({'FieldA': 1}) == ({'field_a': 1}, None, {'field_a'})
def test_with_default_factory():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'x': core_schema.model_field(
schema=core_schema.with_default_schema(
schema=core_schema.str_schema(), default_factory=lambda: 'pikachu'
)
)
}
)
)
assert v.validate_python({}) == ({'x': 'pikachu'}, None, set())
assert v.validate_python({'x': 'bulbi'}) == ({'x': 'bulbi'}, None, {'x'})
@pytest.mark.parametrize(
'default_factory,error_message',
[
(lambda: 1 + 'a', "unsupported operand type(s) for +: 'int' and 'str'"),
(lambda x: 'a' + x, "<lambda>() missing 1 required positional argument: 'x'"),
],
)
def test_bad_default_factory(default_factory, error_message):
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'x': core_schema.model_field(
schema=core_schema.with_default_schema(
schema=core_schema.str_schema(), default_factory=default_factory
)
)
}
)
)
with pytest.raises(TypeError, match=re.escape(error_message)):
v.validate_python({})
class TestOnError:
def test_on_error_bad_default(self):
with pytest.raises(SchemaError, match="'on_error = default' requires a `default` or `default_factory`"):
SchemaValidator(
schema=core_schema.model_fields_schema(
fields={
'x': core_schema.model_field(
schema=core_schema.with_default_schema(schema=core_schema.str_schema(), on_error='default')
)
}
)
)
def test_on_error_raise_by_default(self, py_and_json: PyAndJson):
v = py_and_json({'type': 'model-fields', 'fields': {'x': {'type': 'model-field', 'schema': {'type': 'str'}}}})
assert v.validate_test({'x': 'foo'}) == ({'x': 'foo'}, None, {'x'})
with pytest.raises(ValidationError) as exc_info:
v.validate_test({'x': ['foo']})
assert exc_info.value.errors(include_url=False) == [
{'input': ['foo'], 'type': 'string_type', 'loc': ('x',), 'msg': 'Input should be a valid string'}
]
def test_on_error_raise_explicit(self, py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'model-fields',
'fields': {
'x': {
'type': 'model-field',
'schema': {'type': 'default', 'schema': {'type': 'str'}, 'on_error': 'raise'},
}
},
}
)
assert v.validate_test({'x': 'foo'}) == ({'x': 'foo'}, None, {'x'})
with pytest.raises(ValidationError) as exc_info:
v.validate_test({'x': ['foo']})
assert exc_info.value.errors(include_url=False) == [
{'input': ['foo'], 'type': 'string_type', 'loc': ('x',), 'msg': 'Input should be a valid string'}
]
def test_on_error_default(self, py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'model-fields',
'fields': {
'x': {
'type': 'model-field',
'schema': {
'type': 'default',
'schema': {'type': 'str'},
'on_error': 'default',
'default': 'pika',
},
}
},
}
)
assert v.validate_test({'x': 'foo'}) == ({'x': 'foo'}, None, {'x'})
assert v.validate_test({'x': ['foo']}) == ({'x': 'pika'}, None, {'x'})
def test_on_error_default_factory(self, py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'model-fields',
'fields': {
'x': {
'type': 'model-field',
'schema': {
'type': 'default',
'schema': {'type': 'str'},
'on_error': 'default',
'default_factory': lambda: 'pika',
},
}
},
}
)
assert v.validate_test({'x': 'foo'}) == ({'x': 'foo'}, None, {'x'})
assert v.validate_test({'x': ['foo']}) == ({'x': 'pika'}, None, {'x'})
def test_wrap_on_error(self, py_and_json: PyAndJson):
def wrap_function(input_value, validator, info):
try:
return validator(input_value)
except ValidationError:
if isinstance(input_value, list):
return str(len(input_value))
else:
return repr(input_value)
v = py_and_json(
{
'type': 'model-fields',
'fields': {
'x': {
'type': 'model-field',
'schema': {
'type': 'default',
'on_error': 'raise',
'schema': {
'type': 'function-wrap',
'function': {'type': 'with-info', 'function': wrap_function},
'schema': {'type': 'str'},
},
},
}
},
}
)
assert v.validate_test({'x': 'foo'}) == ({'x': 'foo'}, None, {'x'})
assert v.validate_test({'x': ['foo']}) == ({'x': '1'}, None, {'x'})
assert v.validate_test({'x': ['foo', 'bar']}) == ({'x': '2'}, None, {'x'})
assert v.validate_test({'x': {'a': 'b'}}) == ({'x': "{'a': 'b'}"}, None, {'x'})
def test_frozen_field():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'name': core_schema.model_field(schema=core_schema.str_schema()),
'age': core_schema.model_field(schema=core_schema.int_schema()),
'is_developer': core_schema.model_field(
schema=core_schema.with_default_schema(schema=core_schema.bool_schema(), default=True), frozen=True
),
}
)
)
r1, model_extra, fields_set = v.validate_python({'name': 'Samuel', 'age': '36'})
assert r1 == {'name': 'Samuel', 'age': 36, 'is_developer': True}
assert model_extra is None
assert fields_set == {'name', 'age'}
v.validate_assignment(r1, 'age', '35')
assert r1 == {'name': 'Samuel', 'age': 35, 'is_developer': True}
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment(r1, 'is_developer', False)
assert exc_info.value.errors(include_url=False) == [
{'type': 'frozen_field', 'loc': ('is_developer',), 'msg': 'Field is frozen', 'input': False}
]
@pytest.mark.parametrize(
'config,schema_extra_behavior_kw',
[
(core_schema.CoreConfig(extra_fields_behavior='allow'), {}),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {'extra_behavior': None}),
(core_schema.CoreConfig(), {'extra_behavior': 'allow'}),
(None, {'extra_behavior': 'allow'}),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': 'allow'}),
],
)
@pytest.mark.parametrize(
'extras_schema_kw, expected_extra_value',
[({}, '123'), ({'extras_schema': None}, '123'), ({'extras_schema': core_schema.int_schema()}, 123)],
ids=['extras_schema=unset', 'extras_schema=None', 'extras_schema=int'],
)
def test_extra_behavior_allow(
config: Union[core_schema.CoreConfig, None],
schema_extra_behavior_kw: dict[str, Any],
extras_schema_kw: dict[str, Any],
expected_extra_value: Any,
):
v = SchemaValidator(
core_schema.model_fields_schema(
{'f': core_schema.model_field(core_schema.str_schema())}, **schema_extra_behavior_kw, **extras_schema_kw
),
config=config,
)
m, model_extra, fields_set = v.validate_python({'f': 'x', 'extra_field': '123'})
assert m == {'f': 'x'}
assert model_extra == {'extra_field': expected_extra_value}
assert fields_set == {'f', 'extra_field'}
v.validate_assignment(m, 'f', 'y')
assert m == {'f': 'y'}
new_m, new_model_extra, new_fields_set = v.validate_assignment({**m, **model_extra}, 'not_f', '123')
assert new_m == {'f': 'y'}
assert new_model_extra == {'extra_field': expected_extra_value, 'not_f': expected_extra_value}
assert new_fields_set == {'not_f'}
# We can't test the extra parameter of the validate_* functions above, since the
# extras_schema parameter isn't valid unless the models are configured with extra='allow'.
# Test the validate_* extra parameter separately instead:
@pytest.mark.parametrize(
'config,schema_extra_behavior_kw',
[
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {}),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': None}),
(core_schema.CoreConfig(), {'extra_behavior': 'forbid'}),
(None, {'extra_behavior': 'forbid'}),
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {'extra_behavior': 'forbid'}),
(core_schema.CoreConfig(), {}),
(core_schema.CoreConfig(), {'extra_behavior': None}),
(None, {'extra_behavior': None}),
],
)
def test_extra_behavior_allow_with_validate_fn_override(
config: Union[core_schema.CoreConfig, None],
schema_extra_behavior_kw: dict[str, Any],
):
v = SchemaValidator(
core_schema.model_fields_schema(
{'f': core_schema.model_field(core_schema.str_schema())}, **schema_extra_behavior_kw
),
config=config,
)
m, model_extra, fields_set = v.validate_python({'f': 'x', 'extra_field': '123'}, extra='allow')
assert m == {'f': 'x'}
assert model_extra == {'extra_field': '123'}
assert fields_set == {'f', 'extra_field'}
v.validate_assignment(m, 'f', 'y', extra='allow')
assert m == {'f': 'y'}
new_m, new_model_extra, new_fields_set = v.validate_assignment({**m, **model_extra}, 'not_f', '123', extra='allow')
assert new_m == {'f': 'y'}
assert new_model_extra == {'extra_field': '123', 'not_f': '123'}
assert new_fields_set == {'not_f'}
@pytest.mark.parametrize(
'config,schema_extra_behavior_kw,validate_fn_extra_kw',
[
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {}, None),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': None}, None),
(core_schema.CoreConfig(), {'extra_behavior': 'forbid'}, None),
(None, {'extra_behavior': 'forbid'}, None),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {'extra_behavior': 'forbid'}, None),
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {}, 'forbid'),
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {'extra_behavior': None}, 'forbid'),
(core_schema.CoreConfig(), {'extra_behavior': 'ignore'}, 'forbid'),
(None, {'extra_behavior': 'ignore'}, 'forbid'),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {'extra_behavior': 'ignore'}, 'forbid'),
(core_schema.CoreConfig(), {}, 'forbid'),
(core_schema.CoreConfig(), {'extra_behavior': None}, 'forbid'),
(None, {'extra_behavior': None}, 'forbid'),
],
)
def test_extra_behavior_forbid(
config: Union[core_schema.CoreConfig, None],
schema_extra_behavior_kw: dict[str, Any],
validate_fn_extra_kw: Union[ExtraBehavior, None],
):
v = SchemaValidator(
core_schema.model_fields_schema(
{'f': core_schema.model_field(core_schema.str_schema())}, **schema_extra_behavior_kw
),
config=config,
)
m, model_extra, fields_set = v.validate_python({'f': 'x'}, extra=validate_fn_extra_kw)
assert m == {'f': 'x'}
assert fields_set == {'f'}
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'f': 'x', 'extra_field': 123}, extra=validate_fn_extra_kw)
assert exc_info.value.errors(include_url=False) == [
{'type': 'extra_forbidden', 'loc': ('extra_field',), 'msg': 'Extra inputs are not permitted', 'input': 123}
]
v.validate_assignment(m, 'f', 'y', extra=validate_fn_extra_kw)
assert m['f'] == 'y'
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment(m, 'not_f', 'xyz', extra=validate_fn_extra_kw)
assert exc_info.value.errors(include_url=False) == [
{
'type': 'no_such_attribute',
'loc': ('not_f',),
'msg': "Object has no attribute 'not_f'",
'input': 'xyz',
'ctx': {'attribute': 'not_f'},
}
]
assert 'not_f' not in m
@pytest.mark.parametrize(
'config,schema_extra_behavior_kw,validate_fn_extra_kw',
[
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {}, None),
(core_schema.CoreConfig(), {'extra_behavior': 'ignore'}, None),
(None, {'extra_behavior': 'ignore'}, None),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': 'ignore'}, None),
(core_schema.CoreConfig(), {}, None),
(core_schema.CoreConfig(), {'extra_behavior': None}, None),
(None, {'extra_behavior': None}, None),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {}, 'ignore'),
(core_schema.CoreConfig(), {'extra_behavior': 'allow'}, 'ignore'),
(None, {'extra_behavior': 'allow'}, 'ignore'),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': 'allow'}, 'ignore'),
],
)
def test_extra_behavior_ignore(
config: Union[core_schema.CoreConfig, None],
schema_extra_behavior_kw: dict[str, Any],
validate_fn_extra_kw: Union[ExtraBehavior, None],
):
v = SchemaValidator(
core_schema.model_fields_schema(
{'f': core_schema.model_field(core_schema.str_schema())}, **schema_extra_behavior_kw
),
config=config,
)
m, model_extra, fields_set = v.validate_python({'f': 'x', 'extra_field': 123}, extra=validate_fn_extra_kw)
assert m == {'f': 'x'}
assert model_extra is None
assert fields_set == {'f'}
v.validate_assignment(m, 'f', 'y', extra=validate_fn_extra_kw)
assert m['f'] == 'y'
# even if we ignore extra attributes during initialization / validation
# we never ignore them during assignment
# instead if extra='ignore' was set (or nothing was set since that's the default)
# we treat it as if it were extra='forbid'
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment(m, 'not_f', 'xyz', extra=validate_fn_extra_kw)
assert exc_info.value.errors(include_url=False) == [
{
'type': 'no_such_attribute',
'loc': ('not_f',),
'msg': "Object has no attribute 'not_f'",
'input': 'xyz',
'ctx': {'attribute': 'not_f'},
}
]
assert 'not_f' not in m
def test_extra_behavior_allow_keys_validation() -> None:
v = SchemaValidator(
core_schema.model_fields_schema(
{}, extra_behavior='allow', extras_keys_schema=core_schema.str_schema(max_length=3)
)
)
m, model_extra, fields_set = v.validate_python({'ext': 123})
assert m == {}
assert model_extra == {'ext': 123}
assert fields_set == {'ext'}
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'extra_too_long': 123})
assert exc_info.value.errors()[0]['type'] == 'string_too_long'
@pytest.mark.parametrize('config_by_alias', [None, True, False])
@pytest.mark.parametrize('config_by_name', [None, True, False])
@pytest.mark.parametrize('runtime_by_alias', [None, True, False])
@pytest.mark.parametrize('runtime_by_name', [None, True, False])
def test_by_alias_and_name_config_interaction(
config_by_alias: Union[bool, None],
config_by_name: Union[bool, None],
runtime_by_alias: Union[bool, None],
runtime_by_name: Union[bool, None],
) -> None:
"""This test reflects the priority that applies for config vs runtime validation alias configuration.
Runtime values take precedence over config values, when set.
By default, by_alias is True and by_name is False.
"""
if config_by_alias is False and config_by_name is False and runtime_by_alias is False and runtime_by_name is False:
pytest.skip("Can't have both by_alias and by_name as effectively False")
class Model:
def __init__(self, my_field: int) -> None:
self.my_field = my_field
core_config = {
**({'validate_by_alias': config_by_alias} if config_by_alias is not None else {}),
**({'validate_by_name': config_by_name} if config_by_name is not None else {}),
}
schema = core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{
'my_field': core_schema.model_field(core_schema.int_schema(), validation_alias='my_alias'),
}
),
config=core_schema.CoreConfig(**core_config),
)
s = SchemaValidator(schema)
alias_allowed = next(x for x in (runtime_by_alias, config_by_alias, True) if x is not None)
name_allowed = next(x for x in (runtime_by_name, config_by_name, False) if x is not None)
if alias_allowed:
assert s.validate_python({'my_alias': 1}, by_alias=runtime_by_alias, by_name=runtime_by_name).my_field == 1
if name_allowed:
assert s.validate_python({'my_field': 1}, by_alias=runtime_by_alias, by_name=runtime_by_name).my_field == 1
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_model_fields.py",
"license": "MIT License",
"lines": 1628,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_model_init.py | import platform
import weakref
import pytest
from dirty_equals import IsInstance
from pydantic_core import CoreConfig, SchemaValidator, core_schema
from ..conftest import assert_gc
class MyModel:
# this is not required, but it avoids `__pydantic_fields_set__` being included in `__dict__`
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
field_b: int
def test_model_init():
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
schema=core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
}
),
)
)
m = v.validate_python({'field_a': 'test', 'field_b': 12})
assert isinstance(m, MyModel)
assert m.field_a == 'test'
assert m.field_b == 12
assert m.__pydantic_fields_set__ == {'field_a', 'field_b'}
m2 = MyModel()
validated = v.validate_python({'field_a': 'test', 'field_b': 12}, self_instance=m2)
assert validated == m2
assert validated.field_a == 'test'
assert validated.field_b == 12
assert validated.__pydantic_fields_set__ == {'field_a', 'field_b'}
def test_model_init_nested():
class MyModel:
# this is not required, but it avoids `__pydantic_fields_set__` being included in `__dict__`
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
schema=core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(
schema=core_schema.model_schema(
cls=MyModel,
schema=core_schema.model_fields_schema(
fields={
'x_a': core_schema.model_field(schema=core_schema.str_schema()),
'x_b': core_schema.model_field(schema=core_schema.int_schema()),
}
),
)
),
}
),
)
)
m = v.validate_python({'field_a': 'test', 'field_b': {'x_a': 'foo', 'x_b': 12}})
assert isinstance(m, MyModel)
assert m.field_a == 'test'
assert isinstance(m.field_b, MyModel)
assert m.field_b.x_a == 'foo'
assert m.field_b.x_b == 12
m2 = MyModel()
v.validate_python({'field_a': 'test', 'field_b': {'x_a': 'foo', 'x_b': 12}}, self_instance=m2)
assert m2.field_a == 'test'
assert isinstance(m2.field_b, MyModel)
assert m2.field_b.x_a == 'foo'
assert m2.field_b.x_b == 12
assert m2.__pydantic_fields_set__ == {'field_a', 'field_b'}
def test_function_before():
def f(input_value, _info):
assert isinstance(input_value, dict)
input_value['field_a'] += b' XX'
return input_value
v = SchemaValidator(
{
'type': 'function-before',
'function': {'type': 'with-info', 'function': f},
'schema': core_schema.model_schema(
cls=MyModel,
schema=core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
}
),
),
}
)
m = v.validate_python({'field_a': b'321', 'field_b': '12'})
assert isinstance(m, MyModel)
assert m.field_a == '321 XX'
assert m.field_b == 12
m2 = MyModel()
v.validate_python({'field_a': b'321', 'field_b': '12'}, self_instance=m2)
assert m2.__dict__ == {'field_a': '321 XX', 'field_b': 12}
assert m2.__pydantic_fields_set__ == {'field_a', 'field_b'}
def test_function_after():
def f(input_value, _info):
# always a model here, because even with `self_instance` the validator returns a model, e.g. m2 here
assert isinstance(input_value, MyModel)
input_value.field_a += ' Changed'
return input_value
v = SchemaValidator(
{
'type': 'function-after',
'function': {'type': 'with-info', 'function': f},
'schema': core_schema.model_schema(
cls=MyModel,
schema=core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
}
),
),
}
)
m = v.validate_python({'field_a': b'321', 'field_b': '12'})
assert isinstance(m, MyModel)
assert m.field_a == '321 Changed'
assert m.field_b == 12
m2 = MyModel()
v.validate_python({'field_a': b'321', 'field_b': '12'}, self_instance=m2)
assert m2.__dict__ == {'field_a': '321 Changed', 'field_b': 12}
assert m2.__pydantic_fields_set__ == {'field_a', 'field_b'}
def test_function_wrap():
def f(input_value, handler, _info):
assert isinstance(input_value, dict)
v = handler(input_value)
# always a model here, because even with `self_instance` the validator returns a model, e.g. m2 here
assert isinstance(v, MyModel)
v.field_a += ' Changed'
return v
v = SchemaValidator(
{
'type': 'function-wrap',
'function': {'type': 'with-info', 'function': f},
'schema': core_schema.model_schema(
cls=MyModel,
schema=core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
}
),
),
}
)
m = v.validate_python({'field_a': b'321', 'field_b': '12'})
assert isinstance(m, MyModel)
assert m.field_a == '321 Changed'
assert m.field_b == 12
m2 = MyModel()
v.validate_python({'field_a': b'321', 'field_b': '12'}, self_instance=m2)
assert m2.__dict__ == {'field_a': '321 Changed', 'field_b': 12}
assert m2.__pydantic_fields_set__ == {'field_a', 'field_b'}
def test_simple():
v = SchemaValidator(core_schema.str_schema())
assert v.validate_python(b'abc') == 'abc'
assert v.isinstance_python(b'abc') is True
assert v.validate_python(b'abc', self_instance='foobar') == 'abc'
assert v.isinstance_python(b'abc', self_instance='foobar') is True
assert v.validate_json('"abc"') == 'abc'
assert v.validate_json('"abc"', self_instance='foobar') == 'abc'
def test_model_custom_init():
calls = []
class Model:
def __init__(self, **kwargs):
calls.append(repr(kwargs))
if 'a' in kwargs:
kwargs['a'] *= 2
self.__pydantic_validator__.validate_python(kwargs, self_instance=self)
self.c = self.a + 2
v = SchemaValidator(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{
'a': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=1)),
'b': core_schema.model_field(core_schema.int_schema()),
}
),
custom_init=True,
)
)
Model.__pydantic_validator__ = v
m = v.validate_python({'b': 2})
assert m.a == 1
assert m.b == 2
assert m.c == 3
assert m.__pydantic_fields_set__ == {'b'}
assert calls == ["{'b': 2}"]
m2 = v.validate_python({'a': 5, 'b': 3})
assert m2.a == 10
assert m2.b == 3
assert m2.c == 12
assert m2.__pydantic_fields_set__ == {'a', 'b'}
assert calls == ["{'b': 2}", "{'a': 5, 'b': 3}"]
m3 = v.validate_json('{"a":10, "b": 4}')
assert m3.a == 20
assert m3.b == 4
assert m3.c == 22
assert m3.__pydantic_fields_set__ == {'a', 'b'}
assert calls == ["{'b': 2}", "{'a': 5, 'b': 3}", "{'a': 10, 'b': 4}"]
def test_model_custom_init_nested():
calls = []
class ModelInner:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
a: int
b: int
def __init__(self, **data):
calls.append(f'inner: {data!r}')
self.__pydantic_validator__.validate_python(data, self_instance=self)
inner_schema = core_schema.model_schema(
ModelInner,
core_schema.model_fields_schema(
{
'a': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=1)),
'b': core_schema.model_field(core_schema.int_schema()),
}
),
custom_init=True,
)
ModelInner.__pydantic_validator__ = SchemaValidator(inner_schema)
class ModelOuter:
__slots__ = '__dict__', '__pydantic_fields_set__'
a: int
b: ModelInner
def __init__(self, **data):
calls.append(f'outer: {data!r}')
self.__pydantic_validator__.validate_python(data, self_instance=self)
ModelOuter.__pydantic_validator__ = SchemaValidator(
core_schema.model_schema(
ModelOuter,
core_schema.model_fields_schema(
{
'a': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=1)),
'b': core_schema.model_field(inner_schema),
}
),
custom_init=True,
)
)
m = ModelOuter(a=2, b={'b': 3})
assert m.__pydantic_fields_set__ == {'a', 'b'}
assert m.a == 2
assert isinstance(m.b, ModelInner)
assert m.b.a == 1
assert m.b.b == 3
# insert_assert(calls)
assert calls == ["outer: {'a': 2, 'b': {'b': 3}}", "inner: {'b': 3}"]
def test_model_custom_init_extra():
calls = []
class ModelInner:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
a: int
b: int
def __getattr__(self, item):
return self.__pydantic_extra__[item]
def __init__(self, **data):
self.__pydantic_validator__.validate_python(data, self_instance=self)
calls.append(('inner', self.__dict__, self.__pydantic_fields_set__, self.__pydantic_extra__))
inner_schema = core_schema.model_schema(
ModelInner,
core_schema.model_fields_schema(
{
'a': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=1)),
'b': core_schema.model_field(core_schema.int_schema()),
}
),
config=CoreConfig(extra_fields_behavior='allow'),
custom_init=True,
)
ModelInner.__pydantic_validator__ = SchemaValidator(inner_schema)
class ModelOuter:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
a: int
b: ModelInner
def __getattr__(self, item):
return self.__pydantic_extra__[item]
def __init__(self, **data):
data['b']['z'] = 1
self.__pydantic_validator__.validate_python(data, self_instance=self)
calls.append(('outer', self.__dict__, self.__pydantic_fields_set__, self.__pydantic_extra__))
ModelOuter.__pydantic_validator__ = SchemaValidator(
core_schema.model_schema(
ModelOuter,
core_schema.model_fields_schema(
{
'a': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=1)),
'b': core_schema.model_field(inner_schema),
}
),
config=CoreConfig(extra_fields_behavior='allow'),
custom_init=True,
)
)
m = ModelOuter(a=2, b={'b': 3}, c=1)
assert m.__pydantic_fields_set__ == {'a', 'b', 'c'}
assert m.a == 2
assert m.c == 1
assert isinstance(m.b, ModelInner)
assert m.b.a == 1
assert m.b.b == 3
assert m.b.z == 1
# insert_assert(calls)
assert calls == [
('inner', {'a': 1, 'b': 3}, {'b', 'z'}, {'z': 1}),
('outer', {'a': 2, 'b': IsInstance(ModelInner)}, {'c', 'a', 'b'}, {'c': 1}),
]
def test_model_custom_init_revalidate():
calls = []
class Model:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
def __init__(self, **kwargs):
calls.append(repr(kwargs))
self.__dict__.update(kwargs)
self.__pydantic_fields_set__ = {'custom'}
self.__pydantic_extra__ = None
v = SchemaValidator(
core_schema.model_schema(
Model,
core_schema.model_fields_schema({'a': core_schema.model_field(core_schema.int_schema())}),
custom_init=True,
config=dict(revalidate_instances='always'),
)
)
m = v.validate_python({'a': '1'})
assert isinstance(m, Model)
assert m.a == '1'
assert m.__pydantic_fields_set__ == {'custom'}
assert calls == ["{'a': '1'}"]
m.x = 4
m2 = v.validate_python(m)
assert m2 is not m
assert isinstance(m2, Model)
assert m2.a == '1'
assert m2.__dict__ == {'a': '1', 'x': 4}
assert m2.__pydantic_fields_set__ == {'custom'}
assert calls == ["{'a': '1'}", "{'a': '1', 'x': 4}"]
@pytest.mark.xfail(
condition=platform.python_implementation() == 'PyPy', reason='https://foss.heptapod.net/pypy/pypy/-/issues/3899'
)
@pytest.mark.skipif(platform.python_implementation() == 'GraalVM', reason='Cannot reliably trigger GC on GraalPy')
@pytest.mark.parametrize('validator', [None, 'field', 'model'])
def test_leak_model(validator):
def fn():
class Model:
a: int
@classmethod
def _validator(cls, v, info):
return v
@classmethod
def _wrap_validator(cls, v, validator, info):
return validator(v)
field_schema = core_schema.int_schema()
if validator == 'field':
field_schema = core_schema.with_info_before_validator_function(Model._validator, field_schema)
field_schema = core_schema.with_info_wrap_validator_function(Model._wrap_validator, field_schema)
field_schema = core_schema.with_info_after_validator_function(Model._validator, field_schema)
model_schema = core_schema.model_schema(
Model, core_schema.model_fields_schema({'a': core_schema.model_field(field_schema)})
)
if validator == 'model':
model_schema = core_schema.with_info_before_validator_function(Model._validator, model_schema)
model_schema = core_schema.with_info_wrap_validator_function(Model._wrap_validator, model_schema)
model_schema = core_schema.with_info_after_validator_function(Model._validator, model_schema)
# If any of the Rust validators don't implement traversal properly,
# there will be an undetectable cycle created by this assignment
# which will keep Model alive
Model.__pydantic_validator__ = SchemaValidator(model_schema)
return Model
klass = fn()
ref = weakref.ref(klass)
assert ref() is not None
del klass
assert_gc(lambda: ref() is None)
def test_model_custom_init_with_union() -> None:
class A:
def __init__(self, **kwargs):
assert 'a' in kwargs
self.a = kwargs.get('a')
class B:
def __init__(self, **kwargs):
assert 'b' in kwargs
self.b = kwargs.get('b')
schema = {
'type': 'union',
'choices': [
{
'type': 'model',
'cls': A,
'schema': {
'type': 'model-fields',
'fields': {'a': {'type': 'model-field', 'schema': {'type': 'bool'}}},
'model_name': 'A',
},
'custom_init': True,
'ref': '__main__.A:4947206928',
},
{
'type': 'model',
'cls': B,
'schema': {
'type': 'model-fields',
'fields': {'b': {'type': 'model-field', 'schema': {'type': 'bool'}}},
'model_name': 'B',
},
'custom_init': True,
'ref': '__main__.B:4679932848',
},
],
}
validator = SchemaValidator(schema)
assert validator.validate_python({'a': False}).a is False
assert validator.validate_python({'b': True}).b is True
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_model_init.py",
"license": "MIT License",
"lines": 420,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_model_root.py | import pytest
from pydantic_core import PydanticUndefined, SchemaValidator, ValidationError, core_schema
def test_model_root():
class RootModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
root: list[int]
v = SchemaValidator(
core_schema.model_schema(RootModel, core_schema.list_schema(core_schema.int_schema()), root_model=True)
)
assert repr(v).startswith('SchemaValidator(title="RootModel", validator=Model(\n')
m = v.validate_python([1, 2, '3'])
assert isinstance(m, RootModel)
assert m.root == [1, 2, 3]
assert m.__dict__ == {'root': [1, 2, 3]}
m = v.validate_json('[1, 2, "3"]')
assert isinstance(m, RootModel)
assert m.root == [1, 2, 3]
with pytest.raises(ValidationError) as exc_info:
v.validate_python('wrong')
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'list_type', 'loc': (), 'msg': 'Input should be a valid list', 'input': 'wrong'}
]
def test_revalidate():
class RootModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
root: list[int]
v = SchemaValidator(
core_schema.model_schema(
RootModel, core_schema.list_schema(core_schema.int_schema()), root_model=True, revalidate_instances='always'
)
)
m = RootModel()
m = v.validate_python([1, '2'], self_instance=m)
assert isinstance(m, RootModel)
assert m.root == [1, 2]
assert m.__pydantic_fields_set__ == {'root'}
m2 = v.validate_python(m)
assert m2 is not m
assert isinstance(m2, RootModel)
assert m2.root == [1, 2]
assert m.__pydantic_fields_set__ == {'root'}
def test_revalidate_with_default():
class RootModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
root: int = 42
v = SchemaValidator(
core_schema.model_schema(
RootModel,
core_schema.with_default_schema(core_schema.int_schema(), default=42),
root_model=True,
revalidate_instances='always',
)
)
m = RootModel()
m = v.validate_python(PydanticUndefined, self_instance=m)
assert isinstance(m, RootModel)
assert m.root == 42
assert m.__pydantic_fields_set__ == set()
m2 = v.validate_python(m)
assert m2 is not m
assert isinstance(m2, RootModel)
assert m2.root == 42
assert m.__pydantic_fields_set__ == set()
def test_init():
class RootModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
root: str
v = SchemaValidator(
core_schema.model_schema(RootModel, core_schema.str_schema(), root_model=True, revalidate_instances='always')
)
m = RootModel()
validated = v.validate_python('foobar', self_instance=m)
assert validated is m
assert validated.root == 'foobar'
def test_assignment():
class RootModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
root: str
v = SchemaValidator(core_schema.model_schema(RootModel, core_schema.str_schema(), root_model=True))
m = v.validate_python('foobar')
assert m.root == 'foobar'
m2 = v.validate_assignment(m, 'root', 'baz')
assert m2 is m
assert m.root == 'baz'
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment(m, 'different', 'baz')
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'no_such_attribute',
'loc': ('different',),
'msg': "Object has no attribute 'different'",
'input': 'baz',
'ctx': {'attribute': 'different'},
}
]
def test_field_function():
call_infos = []
class RootModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
root: str
def f(input_value: str, info):
call_infos.append(repr(info))
return input_value + ' validated'
v = SchemaValidator(
core_schema.model_schema(
RootModel,
core_schema.with_info_after_validator_function(f, core_schema.str_schema()),
root_model=True,
)
)
m = v.validate_python('foobar', context='call 1')
assert isinstance(m, RootModel)
assert m.root == 'foobar validated'
assert call_infos == ["ValidationInfo(config=None, context='call 1', data=None, field_name='root')"]
m2 = v.validate_assignment(m, 'root', 'baz', context='assignment call')
assert m2 is m
assert m.root == 'baz validated'
assert call_infos == [
"ValidationInfo(config=None, context='call 1', data=None, field_name='root')",
"ValidationInfo(config=None, context='assignment call', data=None, field_name='root')",
]
def test_extra():
class RootModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
root: int
v = SchemaValidator(core_schema.model_schema(RootModel, core_schema.int_schema(), root_model=True))
m = v.validate_python(1)
with pytest.raises(AttributeError):
m.__pydantic_extra__
def test_fields_set():
assert core_schema.PydanticUndefined is PydanticUndefined
class RootModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
root: int = 42
v = SchemaValidator(
core_schema.model_schema(
RootModel, core_schema.with_default_schema(core_schema.int_schema(), default=42), root_model=True
)
)
m = RootModel()
v.validate_python(1, self_instance=m)
assert m.root == 1
assert m.__pydantic_fields_set__ == {'root'}
v.validate_python(PydanticUndefined, self_instance=m)
assert m.root == 42
assert m.__pydantic_fields_set__ == set()
def test_construct_from_validate_default():
class RootModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
root: int
class Model:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
value: RootModel = 42
v = SchemaValidator(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{
'value': core_schema.model_field(
core_schema.with_default_schema(
core_schema.model_schema(RootModel, core_schema.int_schema(), root_model=True),
default=42,
validate_default=True,
)
)
}
),
)
)
m = Model()
v.validate_python({}, self_instance=m)
assert m.value.root == 42
assert m.value.__pydantic_fields_set__ == {'root'}
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_model_root.py",
"license": "MIT License",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_none.py | import pytest
from pydantic_core import SchemaValidator, ValidationError
from pydantic_core import core_schema as cs
def test_python_none():
v = SchemaValidator(cs.none_schema())
assert v.validate_python(None) is None
with pytest.raises(ValidationError) as exc_info:
v.validate_python(1)
assert exc_info.value.errors(include_url=False) == [
{'type': 'none_required', 'loc': (), 'msg': 'Input should be None', 'input': 1}
]
def test_json_none():
v = SchemaValidator(cs.none_schema())
assert v.validate_json('null') is None
with pytest.raises(ValidationError) as exc_info:
v.validate_json('1')
assert exc_info.value.errors(include_url=False) == [
{'type': 'none_required', 'loc': (), 'msg': 'Input should be null', 'input': 1}
]
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_none.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_nullable.py | import platform
import weakref
import pytest
from pydantic_core import SchemaValidator, ValidationError, core_schema
from ..conftest import assert_gc
def test_nullable():
v = SchemaValidator(core_schema.nullable_schema(schema=core_schema.int_schema()))
assert v.validate_python(None) is None
assert v.validate_python(1) == 1
assert v.validate_python('123') == 123
with pytest.raises(ValidationError) as exc_info:
v.validate_python('hello')
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': (),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'hello',
}
]
def test_union_nullable_bool_int():
v = SchemaValidator(
core_schema.union_schema(
choices=[
core_schema.nullable_schema(schema=core_schema.bool_schema()),
core_schema.nullable_schema(schema=core_schema.int_schema()),
]
)
)
assert v.validate_python(None) is None
assert v.validate_python(True) is True
assert v.validate_python(1) == 1
@pytest.mark.xfail(
condition=platform.python_implementation() == 'PyPy', reason='https://foss.heptapod.net/pypy/pypy/-/issues/3899'
)
@pytest.mark.skipif(platform.python_implementation() == 'GraalVM', reason='Cannot reliably trigger GC on GraalPy')
def test_leak_nullable():
def fn():
def validate(v, info):
return v
schema = core_schema.with_info_plain_validator_function(validate)
schema = core_schema.nullable_schema(schema)
# If any of the Rust validators don't implement traversal properly,
# there will be an undetectable cycle created by this assignment
# which will keep Defaulted alive
validate.__pydantic_validator__ = SchemaValidator(schema)
return validate
cycle = fn()
ref = weakref.ref(cycle)
assert ref() is not None
del cycle
assert_gc(lambda: ref() is None)
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_nullable.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_pickling.py | import pickle
import re
from datetime import datetime, timedelta, timezone
import pytest
from pydantic_core import core_schema
from pydantic_core._pydantic_core import SchemaValidator, ValidationError
def test_basic_schema_validator():
v = SchemaValidator(
{'type': 'dict', 'strict': True, 'keys_schema': {'type': 'int'}, 'values_schema': {'type': 'int'}}
)
v = pickle.loads(pickle.dumps(v))
assert v.validate_python({'1': 2, '3': 4}) == {1: 2, 3: 4}
assert v.validate_python({}) == {}
with pytest.raises(ValidationError, match=re.escape('[type=dict_type, input_value=[], input_type=list]')):
v.validate_python([])
def test_schema_validator_containing_config():
"""
Verify that the config object is not lost during (de)serialization.
"""
v = SchemaValidator(
core_schema.model_fields_schema({'f': core_schema.model_field(core_schema.str_schema())}),
config=core_schema.CoreConfig(extra_fields_behavior='allow'),
)
v = pickle.loads(pickle.dumps(v))
m, model_extra, fields_set = v.validate_python({'f': 'x', 'extra_field': '123'})
assert m == {'f': 'x'}
# If the config was lost during (de)serialization, the below checks would fail as
# the default behavior is to ignore extra fields.
assert model_extra == {'extra_field': '123'}
assert fields_set == {'f', 'extra_field'}
v.validate_assignment(m, 'f', 'y')
assert m == {'f': 'y'}
def test_schema_validator_tz_pickle() -> None:
"""
https://github.com/pydantic/pydantic-core/issues/589
"""
v = SchemaValidator(core_schema.datetime_schema())
original = datetime(2022, 6, 8, 12, 13, 14, tzinfo=timezone(timedelta(hours=-12, minutes=-15)))
validated = v.validate_python('2022-06-08T12:13:14-12:15')
assert validated == original
assert pickle.loads(pickle.dumps(validated)) == validated == original
# Should be defined at the module level for pickling to work:
class Model:
__pydantic_validator__: SchemaValidator
__pydantic_complete__ = True
def test_schema_validator_not_reused_when_unpickling() -> None:
s = SchemaValidator(
core_schema.model_schema(
cls=Model,
schema=core_schema.model_fields_schema(fields={}, model_name='Model'),
config={'title': 'Model'},
ref='Model:123',
)
)
Model.__pydantic_validator__ = s
assert 'Prebuilt' not in str(Model.__pydantic_validator__)
reconstructed = pickle.loads(pickle.dumps(Model.__pydantic_validator__))
assert 'Prebuilt' not in str(reconstructed)
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_pickling.py",
"license": "MIT License",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_set.py | import re
from collections import deque
from typing import Any
import pytest
from pydantic_core import SchemaValidator, ValidationError
from pydantic_core import core_schema as cs
from ..conftest import Err, PyAndJson, infinite_generator
@pytest.mark.parametrize(
'input_value,expected',
[
([], set()),
([1, 2, 3], {1, 2, 3}),
([1, 2, '3'], {1, 2, 3}),
([1, 2, 3, 2, 3], {1, 2, 3}),
(5, Err('[type=set_type, input_value=5, input_type=int]')),
],
)
def test_set_ints_both(py_and_json: PyAndJson, input_value, expected):
v = py_and_json({'type': 'set', 'items_schema': {'type': 'int'}})
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_test(input_value)
else:
assert v.validate_test(input_value) == expected
@pytest.mark.parametrize('input_value,expected', [([1, 2.5, '3'], {1, 2.5, '3'})])
def test_set_no_validators_both(py_and_json: PyAndJson, input_value, expected):
v = py_and_json({'type': 'set'})
assert v.validate_test(input_value) == expected
@pytest.mark.parametrize(
'input_value,expected',
[
([1, 2.5, '3'], {1, 2.5, '3'}),
('foo', Err('[type=set_type, input_value=foo, input_type=str]')),
(1, Err('[type=set_type, input_value=1.0, input_type=float]')),
(1.0, Err('[type=set_type, input_value=1.0, input_type=float]')),
(False, Err('[type=set_type, input_value=False, input_type=bool]')),
],
)
def test_frozenset_no_validators_both(py_and_json: PyAndJson, input_value, expected):
v = py_and_json({'type': 'set'})
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=expected.message):
v.validate_test(input_value)
else:
assert v.validate_test(input_value) == expected
@pytest.mark.parametrize(
'input_value,expected',
[
({1, 2, 3}, {1, 2, 3}),
(set(), set()),
([1, 2, 3, 2, 3], {1, 2, 3}),
([], set()),
((1, 2, 3, 2, 3), {1, 2, 3}),
((), set()),
(frozenset([1, 2, 3, 2, 3]), {1, 2, 3}),
(deque((1, 2, '3')), {1, 2, 3}),
({1: 10, 2: 20, '3': '30'}.keys(), {1, 2, 3}),
({1: 10, 2: 20, '3': '30'}.values(), {10, 20, 30}),
({1: 10, 2: 20, '3': '30'}, Err('Input should be a valid set [type=set_type,')),
((x for x in [1, 2, '3']), {1, 2, 3}),
({'abc'}, Err('0\n Input should be a valid integer')),
({1: 2}, Err('1 validation error for set[int]\n Input should be a valid set')),
('abc', Err('Input should be a valid set')),
],
)
@pytest.mark.thread_unsafe # generators in parameters not compatible with pytest-run-parallel, https://github.com/Quansight-Labs/pytest-run-parallel/issues/14
def test_set_ints_python(input_value, expected):
v = SchemaValidator(cs.set_schema(items_schema=cs.int_schema()))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
assert v.validate_python(input_value) == expected
@pytest.mark.parametrize('input_value,expected', [([1, 2.5, '3'], {1, 2.5, '3'}), ([(1, 2), (3, 4)], {(1, 2), (3, 4)})])
def test_set_no_validators_python(input_value, expected):
v = SchemaValidator(cs.set_schema())
assert v.validate_python(input_value) == expected
def test_set_multiple_errors():
v = SchemaValidator(cs.set_schema(items_schema=cs.int_schema()))
with pytest.raises(ValidationError) as exc_info:
v.validate_python(['a', (1, 2), []])
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': (0,),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'a',
},
{'type': 'int_type', 'loc': (1,), 'msg': 'Input should be a valid integer', 'input': (1, 2)},
{'type': 'int_type', 'loc': (2,), 'msg': 'Input should be a valid integer', 'input': []},
]
def test_list_with_unhashable_items():
v = SchemaValidator(cs.set_schema())
class Unhashable:
__hash__ = None
unhashable = Unhashable()
with pytest.raises(ValidationError) as exc_info:
v.validate_python([{'a': 'b'}, unhashable])
assert exc_info.value.errors(include_url=False) == [
{'type': 'set_item_not_hashable', 'loc': (0,), 'msg': 'Set items should be hashable', 'input': {'a': 'b'}},
{'type': 'set_item_not_hashable', 'loc': (1,), 'msg': 'Set items should be hashable', 'input': unhashable},
]
def generate_repeats():
for i in 1, 2, 3:
yield i
yield i
@pytest.mark.parametrize(
'kwargs,input_value,expected',
[
({'strict': True}, {1, 2, 3}, {1, 2, 3}),
({'strict': True}, set(), set()),
({'strict': True}, [1, 2, 3, 2, 3], Err('Input should be a valid set [type=set_type,')),
({'strict': True}, [], Err('Input should be a valid set [type=set_type,')),
({'strict': True}, (), Err('Input should be a valid set [type=set_type,')),
({'strict': True}, (1, 2, 3), Err('Input should be a valid set [type=set_type,')),
({'strict': True}, frozenset([1, 2, 3]), Err('Input should be a valid set [type=set_type,')),
({'strict': True}, 'abc', Err('Input should be a valid set [type=set_type,')),
({'min_length': 3}, {1, 2, 3}, {1, 2, 3}),
({'min_length': 3}, {1, 2}, Err('Set should have at least 3 items after validation, not 2 [type=too_short,')),
(
{'max_length': 3},
{1, 2, 3, 4},
Err('Set should have at most 3 items after validation, not more [type=too_long,'),
),
(
{'max_length': 3},
[1, 2, 3, 4],
Err('Set should have at most 3 items after validation, not more [type=too_long,'),
),
({'max_length': 3, 'items_schema': {'type': 'int'}}, {1, 2, 3, 4}, Err('type=too_long,')),
({'max_length': 3, 'items_schema': {'type': 'int'}}, [1, 2, 3, 4], Err('type=too_long,')),
# length check after set creation
({'max_length': 3}, [1, 1, 2, 2, 3, 3], {1, 2, 3}),
({'max_length': 3}, generate_repeats(), {1, 2, 3}),
(
{'max_length': 3},
infinite_generator(),
Err('Set should have at most 3 items after validation, not more [type=too_long,'),
),
],
ids=repr,
)
@pytest.mark.thread_unsafe # generators in parameters not compatible with pytest-run-parallel, https://github.com/Quansight-Labs/pytest-run-parallel/issues/14
def test_set_kwargs(kwargs: dict[str, Any], input_value, expected):
v = SchemaValidator(cs.set_schema(**kwargs))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
r = v.validate_python(input_value)
print(f'unexpected result: {r!r}')
else:
assert v.validate_python(input_value) == expected
@pytest.mark.parametrize('input_value,expected', [({1, 2, 3}, {1, 2, 3}), ([1, 2, 3], [1, 2, 3])])
def test_union_set_list(input_value, expected):
v = SchemaValidator(cs.union_schema(choices=[cs.set_schema(), cs.list_schema()]))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
assert v.validate_python(input_value) == expected
@pytest.mark.parametrize(
'input_value,expected',
[
({1, 2, 3}, {1, 2, 3}),
({'a', 'b', 'c'}, {'a', 'b', 'c'}),
(
[1, 'a'],
Err(
'2 validation errors for union',
errors=[
{
'type': 'int_type',
'loc': ('set[int]', 1),
'msg': 'Input should be a valid integer',
'input': 'a',
},
# second because validation on the string choice comes second
{
'type': 'string_type',
'loc': ('set[str]', 0),
'msg': 'Input should be a valid string',
'input': 1,
},
],
),
),
],
)
def test_union_set_int_set_str(input_value, expected):
v = SchemaValidator(
cs.union_schema(
choices=[
cs.set_schema(items_schema=cs.int_schema(strict=True)),
cs.set_schema(items_schema=cs.str_schema(strict=True)),
]
)
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)) as exc_info:
v.validate_python(input_value)
if expected.errors is not None:
assert exc_info.value.errors(include_url=False) == expected.errors
else:
assert v.validate_python(input_value) == expected
def test_set_as_dict_keys(py_and_json: PyAndJson):
v = py_and_json({'type': 'dict', 'keys_schema': {'type': 'set'}, 'values_schema': {'type': 'int'}})
with pytest.raises(ValidationError, match=re.escape("[type=set_type, input_value='foo', input_type=str]")):
v.validate_test({'foo': 'bar'})
def test_generator_error():
def gen(error: bool):
yield 1
yield 2
if error:
raise RuntimeError('my error')
yield 3
v = SchemaValidator(cs.set_schema(items_schema=cs.int_schema()))
r = v.validate_python(gen(False))
assert r == {1, 2, 3}
assert isinstance(r, set)
msg = r'Error iterating over object, error: RuntimeError: my error \[type=iteration_error,'
with pytest.raises(ValidationError, match=msg):
v.validate_python(gen(True))
@pytest.mark.parametrize(
'input_value,items_schema,expected',
[
pytest.param(
{1: 10, 2: 20, '3': '30'}.items(),
{'type': 'tuple', 'items_schema': [{'type': 'any'}], 'variadic_item_index': 0},
{(1, 10), (2, 20), ('3', '30')},
id='Tuple[Any, Any]',
),
pytest.param(
{1: 10, 2: 20, '3': '30'}.items(),
{'type': 'tuple', 'items_schema': [{'type': 'int'}], 'variadic_item_index': 0},
{(1, 10), (2, 20), (3, 30)},
id='Tuple[int, int]',
),
pytest.param({1: 10, 2: 20, '3': '30'}.items(), {'type': 'any'}, {(1, 10), (2, 20), ('3', '30')}, id='Any'),
],
)
def test_set_from_dict_items(input_value, items_schema, expected):
v = SchemaValidator(cs.set_schema(items_schema=items_schema))
output = v.validate_python(input_value)
assert isinstance(output, set)
assert output == expected
@pytest.mark.parametrize(
'input_value,expected',
[
([], set()),
([1, '2', b'3'], {1, '2', b'3'}),
({1, '2', b'3'}, {1, '2', b'3'}),
(frozenset([1, '2', b'3']), {1, '2', b'3'}),
(deque([1, '2', b'3']), {1, '2', b'3'}),
],
)
def test_set_any(input_value, expected):
v = SchemaValidator(cs.set_schema())
output = v.validate_python(input_value)
assert output == expected
assert isinstance(output, set)
@pytest.mark.parametrize(
'fail_fast,expected',
[
pytest.param(
True,
[
{
'type': 'int_parsing',
'loc': (1,),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'not-num',
},
],
id='fail_fast',
),
pytest.param(
False,
[
{
'type': 'int_parsing',
'loc': (1,),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'not-num',
},
{
'type': 'int_parsing',
'loc': (2,),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'again',
},
],
id='not_fail_fast',
),
],
)
def test_set_fail_fast(fail_fast, expected):
v = SchemaValidator(cs.set_schema(items_schema=cs.int_schema(), fail_fast=fail_fast))
with pytest.raises(ValidationError) as exc_info:
v.validate_python([1, 'not-num', 'again'])
assert exc_info.value.errors(include_url=False) == expected
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_set.py",
"license": "MIT License",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_string.py | import platform
import re
import sys
from decimal import Decimal
from numbers import Number
from typing import Any, Union
import pytest
from pydantic_core import CoreConfig, SchemaError, SchemaValidator, ValidationError, core_schema
from ..conftest import Err, PyAndJson, plain_repr
@pytest.mark.parametrize(
'input_value,expected',
[
('foobar', 'foobar'),
(123, Err('Input should be a valid string [type=string_type, input_value=123, input_type=int]')),
(123.456, Err('Input should be a valid string [type=string_type, input_value=123.456, input_type=float]')),
(False, Err('Input should be a valid string [type=string_type')),
(True, Err('Input should be a valid string [type=string_type')),
([], Err('Input should be a valid string [type=string_type, input_value=[], input_type=list]')),
],
)
def test_str(py_and_json: PyAndJson, input_value, expected):
v = py_and_json({'type': 'str'})
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_test(input_value)
else:
assert v.validate_test(input_value) == expected
@pytest.mark.parametrize(
'input_value,expected',
[
('foobar', 'foobar'),
('🐈 Hello \ud800World', '🐈 Hello \ud800World'),
(b'foobar', 'foobar'),
(bytearray(b'foobar'), 'foobar'),
(
b'\x81',
Err('Input should be a valid string, unable to parse raw data as a unicode string [type=string_unicode'),
),
(
bytearray(b'\x81'),
Err('Input should be a valid string, unable to parse raw data as a unicode string [type=string_unicode'),
),
# null bytes are very annoying, but we can't really block them here
(b'\x00', '\x00'),
(123, Err('Input should be a valid string [type=string_type, input_value=123, input_type=int]')),
(
Decimal('123'),
Err("Input should be a valid string [type=string_type, input_value=Decimal('123'), input_type=Decimal]"),
),
],
)
def test_str_not_json(input_value, expected):
v = SchemaValidator(core_schema.str_schema())
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
assert v.validate_python(input_value) == expected
@pytest.mark.parametrize(
'kwargs,input_value,expected',
[
({}, 'abc', 'abc'),
({'strict': True}, 'Foobar', 'Foobar'),
({'to_upper': True}, 'fooBar', 'FOOBAR'),
({'to_lower': True}, 'fooBar', 'foobar'),
({'strip_whitespace': True}, ' foobar ', 'foobar'),
({'strip_whitespace': True, 'to_upper': True}, ' fooBar', 'FOOBAR'),
({'min_length': 5}, '12345', '12345'),
({'min_length': 5}, '1234', Err('String should have at least 5 characters [type=string_too_short')),
({'max_length': 5}, '12345', '12345'),
({'max_length': 5}, '123456', Err('String should have at most 5 characters [type=string_too_long')),
({'pattern': r'^\d+$'}, '12345', '12345'),
({'pattern': r'\d+$'}, 'foobar 123', 'foobar 123'),
({'pattern': r'^\d+$'}, '12345a', Err("String should match pattern '^\\d+$' [type=string_pattern_mismatch")),
({'pattern': r'[a-z]'}, 'Abc', 'Abc'),
({'pattern': re.compile(r'[a-z]')}, 'Abc', 'Abc'),
# strip comes after length check
({'max_length': 5, 'strip_whitespace': True}, '1234 ', '1234'),
# to_upper and strip comes after pattern check
({'to_upper': True, 'pattern': 'abc'}, 'abc', 'ABC'),
({'strip_whitespace': True, 'pattern': r'\d+$'}, 'foobar 123 ', 'foobar 123'),
({'min_length': 1}, '🐈 Hello', '🐈 Hello'),
],
)
def test_constrained_str(py_and_json: PyAndJson, kwargs: dict[str, Any], input_value, expected):
v = py_and_json({'type': 'str', **kwargs})
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_test(input_value)
else:
assert v.validate_test(input_value) == expected
@pytest.mark.parametrize(
'kwargs,input_value,expected',
[
({}, b'abc', 'abc'),
({'strict': True}, 'Foobar', 'Foobar'),
(
{'strict': True},
123,
Err('Input should be a valid string [type=string_type, input_value=123, input_type=int]'),
),
],
)
def test_constrained_str_py_only(kwargs: dict[str, Any], input_value, expected):
v = SchemaValidator(core_schema.str_schema(**kwargs))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
assert v.validate_python(input_value) == expected
def test_unicode_error():
# `.to_str()` Returns a `UnicodeEncodeError` if the input is not valid unicode (containing unpaired surrogates).
# https://github.com/PyO3/pyo3/blob/6503128442b8f3e767c663a6a8d96376d7fb603d/src/types/string.rs#L477
v = SchemaValidator(core_schema.str_schema(min_length=1))
assert v.validate_python('🐈 Hello') == '🐈 Hello'
with pytest.raises(ValidationError) as exc_info:
v.validate_python('🐈 Hello \ud800World')
assert exc_info.value.errors(include_url=False) == [
{
'type': 'string_unicode',
'loc': (),
'msg': 'Input should be a valid string, unable to parse raw data as a unicode string',
'input': '🐈 Hello \ud800World',
}
]
@pytest.mark.parametrize(
('data', 'max_length', 'error'),
[
pytest.param('test', 5, None, id='short string'),
pytest.param('test long', 5, 'String should have at most 5 characters', id='long string'),
pytest.param('␛⯋℃▤', 5, None, id='short string with unicode characters'),
pytest.param(
'␛⯋℃▤⩥⠫⳼⣪⨺✒⧐♳⩚⏭⏣⍥┙⧃Ⰴ┽⏏♜',
5,
'String should have at most 5 characters',
id='long string with unicode characters',
),
pytest.param('а' * 25, 32, None, id='a lot of `а`s'),
],
)
def test_str_constrained(data: str, max_length: int, error: Union[re.Pattern, None]):
v = SchemaValidator(core_schema.str_schema(max_length=max_length))
if error is None:
assert v.validate_python(data) == data
else:
with pytest.raises(ValidationError, match=error):
v.validate_python(data)
def test_str_constrained_config():
v = SchemaValidator(core_schema.str_schema(), config=CoreConfig(str_max_length=5))
assert v.validate_python('test') == 'test'
with pytest.raises(ValidationError, match='String should have at most 5 characters'):
v.validate_python('test long')
@pytest.mark.parametrize('engine', [None, 'rust-regex', 'python-re'])
def test_invalid_regex(engine):
if platform.python_implementation() == 'PyPy' and sys.version_info[:2] == (3, 11):
# pypy 3.11 type formatting
pytest.xfail()
# TODO uncomment and fix once #150 is done
# with pytest.raises(SchemaError) as exc_info:
# SchemaValidator({'type': 'str', 'pattern': 123})
# assert exc_info.value.args[0] == (
# 'Error building "str" validator:\n TypeError: \'int\' object is not an instance of \'str\''
# )
with pytest.raises(SchemaError) as exc_info:
SchemaValidator(core_schema.str_schema(pattern='(abc', regex_engine=engine))
if engine is None or engine == 'rust-regex':
assert exc_info.value.args[0] == (
'Error building "str" validator:\n SchemaError: regex parse error:\n (abc\n ^\nerror: unclosed group'
)
elif engine == 'python-re':
prefix = 'PatternError' if sys.version_info >= (3, 13) else 'error'
assert exc_info.value.args[0] == (
f'Error building "str" validator:\n {prefix}: missing ), unterminated subpattern at position 0'
)
@pytest.mark.parametrize('engine', [None, 'rust-regex', 'python-re'])
def test_regex_error(engine):
v = SchemaValidator(core_schema.str_schema(pattern='11', regex_engine=engine))
with pytest.raises(ValidationError) as exc_info:
v.validate_python('12')
assert exc_info.value.errors(include_url=False) == [
{
'type': 'string_pattern_mismatch',
'loc': (),
'msg': "String should match pattern '11'",
'input': '12',
'ctx': {'pattern': '11'},
}
]
def test_default_validator():
v = SchemaValidator(
core_schema.str_schema(strict=True, to_lower=False), config=CoreConfig(str_strip_whitespace=False)
)
assert (
plain_repr(v)
== 'SchemaValidator(title="str",validator=Str(StrValidator{strict:true,coerce_numbers_to_str:false}),definitions=[],cache_strings=True)'
)
@pytest.fixture(scope='session', name='FruitEnum')
def fruit_enum_fixture():
from enum import Enum
class FruitEnum(str, Enum):
pear = 'pear'
banana = 'banana'
return FruitEnum
@pytest.mark.parametrize('to_lower', [False, True], ids=repr)
def test_strict_subclass(to_lower: bool):
v = SchemaValidator(core_schema.str_schema(strict=True, to_lower=to_lower))
class StrSubclass(str):
pass
res = v.validate_python(StrSubclass('ABC'))
assert res == 'abc' if to_lower else 'ABC'
@pytest.mark.parametrize('kwargs', [{}, {'to_lower': True}], ids=repr)
def test_lax_subclass(FruitEnum, kwargs):
v = SchemaValidator(core_schema.str_schema(**kwargs))
assert v.validate_python('foobar') == 'foobar'
assert v.validate_python(b'foobar') == 'foobar'
p = v.validate_python(FruitEnum.pear)
assert p == 'pear'
assert type(p) is str
assert repr(p) == "'pear'"
@pytest.mark.parametrize('kwargs', [{}, {'to_lower': True}], ids=repr)
def test_lax_subclass_plain_enum(kwargs):
v = SchemaValidator(core_schema.str_schema(**kwargs))
from enum import Enum
class PlainEnum(Enum):
ONE = 'one'
p = v.validate_python(PlainEnum.ONE)
assert p == 'one'
assert type(p) is str
assert repr(p) == "'one'"
def test_subclass_preserved() -> None:
class StrSubclass(str):
pass
v = SchemaValidator(core_schema.str_schema())
assert not isinstance(v.validate_python(StrSubclass('')), StrSubclass)
assert not isinstance(v.validate_python(StrSubclass(''), strict=True), StrSubclass)
# unions do a first pass in strict mode
# so verify that they don't match the str schema in strict mode
# and preserve the type
v = SchemaValidator(core_schema.union_schema([core_schema.str_schema(), core_schema.int_schema()]))
assert not isinstance(v.validate_python(StrSubclass('')), StrSubclass)
assert not isinstance(v.validate_python(StrSubclass(''), strict=True), StrSubclass)
@pytest.mark.parametrize('string', [True, False])
def test_coerce_numbers_to_str_with_invalid_unicode_character(string) -> None:
config = core_schema.CoreConfig(coerce_numbers_to_str=True)
v = SchemaValidator(core_schema.str_schema(strict=string), config=config)
assert v.validate_python('\ud835') == '\ud835'
def test_coerce_numbers_to_str_disabled_in_strict_mode() -> None:
config = core_schema.CoreConfig(coerce_numbers_to_str=True)
v = SchemaValidator(core_schema.str_schema(strict=True), config=config)
with pytest.raises(ValidationError):
v.validate_python(42)
with pytest.raises(ValidationError):
v.validate_json('42')
def test_coerce_numbers_to_str_raises_for_bool() -> None:
config = core_schema.CoreConfig(coerce_numbers_to_str=True)
v = SchemaValidator(core_schema.str_schema(), config=config)
with pytest.raises(ValidationError):
v.validate_python(True)
with pytest.raises(ValidationError):
v.validate_json(False)
@pytest.mark.parametrize(
('number', 'expected_str'),
[
pytest.param(42, '42', id='42'),
pytest.param(42.0, '42.0', id='42.0'),
pytest.param(Decimal('42.0'), '42.0', id="Decimal('42.0')"),
],
)
def test_coerce_numbers_to_str(number: Number, expected_str: str) -> None:
config = core_schema.CoreConfig(coerce_numbers_to_str=True)
v = SchemaValidator(core_schema.str_schema(), config=config)
assert v.validate_python(number) == expected_str
@pytest.mark.parametrize(
('number', 'expected_str'),
[
pytest.param('42', '42', id='42'),
pytest.param('42.0', '42', id='42.0'),
pytest.param('42.13', '42.13', id='42.13'),
],
)
def test_coerce_numbers_to_str_from_json(number: str, expected_str: str) -> None:
config = core_schema.CoreConfig(coerce_numbers_to_str=True)
v = SchemaValidator(core_schema.str_schema(), config=config)
assert v.validate_json(number) == expected_str
@pytest.mark.parametrize('mode', (None, 'schema', 'config'))
@pytest.mark.xfail(
platform.python_implementation() == 'PyPy' and sys.version_info[:2] == (3, 11), reason='pypy 3.11 type formatting'
)
def test_backtracking_regex_rust_unsupported(mode) -> None:
pattern = r'r(#*)".*?"\1'
with pytest.raises(SchemaError) as exc_info:
if mode is None:
# rust-regex is the default
SchemaValidator(core_schema.str_schema(pattern=pattern))
elif mode == 'schema':
SchemaValidator(core_schema.str_schema(pattern=pattern, regex_engine='rust-regex'))
elif mode == 'config':
SchemaValidator(
schema=core_schema.str_schema(pattern=pattern), config=core_schema.CoreConfig(regex_engine='rust-regex')
)
assert exc_info.value.args[0] == (
'Error building "str" validator:\n'
' SchemaError: regex parse error:\n'
' r(#*)".*?"\\1\n'
' ^^\n'
'error: backreferences are not supported'
)
@pytest.mark.parametrize('mode', ('schema', 'config'))
def test_backtracking_regex_python(mode) -> None:
pattern = r'r(#*)".*?"\1'
if mode == 'schema':
v = SchemaValidator(core_schema.str_schema(pattern=pattern, regex_engine='python-re'))
elif mode == 'config':
v = SchemaValidator(
schema=core_schema.str_schema(pattern=pattern), config=core_schema.CoreConfig(regex_engine='python-re')
)
assert v.validate_python('r""') == 'r""'
assert v.validate_python('r#""#') == 'r#""#'
with pytest.raises(ValidationError):
# not a valid match for the pattern
v.validate_python('r#"#')
@pytest.mark.parametrize('number', (42, 443, 10242))
def test_coerce_numbers_to_str_schema(number: int):
v = SchemaValidator(core_schema.str_schema(coerce_numbers_to_str=True))
assert v.validate_python(number) == str(number)
assert v.validate_json(str(number)) == str(number)
@pytest.mark.parametrize('number', (42, 443, 10242))
def test_coerce_numbers_to_str_schema_precedence(number: int):
config = core_schema.CoreConfig(coerce_numbers_to_str=False)
v = SchemaValidator(core_schema.str_schema(coerce_numbers_to_str=True), config=config)
assert v.validate_python(number) == str(number)
assert v.validate_json(str(number)) == str(number)
config = core_schema.CoreConfig(coerce_numbers_to_str=True)
v = SchemaValidator(core_schema.str_schema(coerce_numbers_to_str=False), config=config)
with pytest.raises(ValidationError):
v.validate_python(number)
with pytest.raises(ValidationError):
v.validate_json(str(number))
@pytest.mark.parametrize('number', (42, 443, 10242))
def test_coerce_numbers_to_str_schema_with_strict_mode(number: int):
v = SchemaValidator(core_schema.str_schema(coerce_numbers_to_str=True, strict=True))
with pytest.raises(ValidationError):
v.validate_python(number)
with pytest.raises(ValidationError):
v.validate_json(str(number))
@pytest.mark.parametrize('engine', [None, 'rust-regex', 'python-re'])
def test_compiled_regex(engine) -> None:
v = SchemaValidator(core_schema.str_schema(pattern=re.compile('abc', re.IGNORECASE), regex_engine=engine))
assert v.validate_python('abc') == 'abc'
assert v.validate_python('ABC') == 'ABC'
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_string.py",
"license": "MIT License",
"lines": 350,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_tagged_union.py | from enum import Enum
import pytest
from dirty_equals import IsAnyStr
from pydantic_core import CoreConfig, SchemaValidator, ValidationError, core_schema
from ..conftest import Err, PyAndJson
from .test_typed_dict import Cls
@pytest.mark.parametrize(
'input_value,expected',
[
({'foo': 'apple', 'bar': '123'}, {'foo': 'apple', 'bar': 123}),
({'foo': 'banana', 'spam': [1, 2, '3']}, {'foo': 'banana', 'spam': [1, 2, 3]}),
(
{'foo': 'apple', 'bar': 'wrong'},
Err(
'Input should be a valid integer',
[
{
'type': 'int_parsing',
'loc': ('apple', 'bar'),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'wrong',
}
],
),
),
(
{'foo': 'banana'},
Err(
'Field required',
[{'type': 'missing', 'loc': ('banana', 'spam'), 'msg': 'Field required', 'input': {'foo': 'banana'}}],
),
),
(
{'foo': 'other'},
Err(
'union_tag_invalid',
[
{
'type': 'union_tag_invalid',
'loc': (),
'msg': (
"Input tag 'other' found using 'foo' does not match any "
"of the expected tags: 'apple', 'banana'"
),
'input': {'foo': 'other'},
'ctx': {'discriminator': "'foo'", 'tag': 'other', 'expected_tags': "'apple', 'banana'"},
}
],
),
),
(
{},
Err(
'union_tag_not_found',
[
{
'type': 'union_tag_not_found',
'loc': (),
'msg': "Unable to extract tag using discriminator 'foo'",
'input': {},
'ctx': {'discriminator': "'foo'"},
}
],
),
),
(
'not a dict',
Err(
'dict_type',
[
{
'type': 'dict_type',
'loc': (),
'msg': IsAnyStr(regex='Input should be (a valid dictionary|an object)'),
'input': 'not a dict',
}
],
),
),
],
ids=repr,
)
def test_simple_tagged_union(py_and_json: PyAndJson, input_value, expected):
v = py_and_json(
{
'type': 'tagged-union',
'discriminator': 'foo',
'from_attributes': False,
'choices': {
'apple': {
'type': 'typed-dict',
'fields': {
'foo': {'type': 'typed-dict-field', 'schema': {'type': 'str'}},
'bar': {'type': 'typed-dict-field', 'schema': {'type': 'int'}},
},
},
'banana': {
'type': 'typed-dict',
'fields': {
'foo': {'type': 'typed-dict-field', 'schema': {'type': 'str'}},
'spam': {
'type': 'typed-dict-field',
'schema': {'type': 'list', 'items_schema': {'type': 'int'}},
},
},
},
},
}
)
assert 'discriminator: LookupPaths' in repr(v.validator)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=expected.message) as exc_info:
v.validate_test(input_value)
# debug(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == expected.errors
else:
assert v.validate_test(input_value) == expected
@pytest.mark.parametrize(
'input_value,expected',
[
({'foo': 123, 'bar': '123'}, {'foo': 123, 'bar': 123}),
({'foo': 'banana', 'spam': [1, 2, '3']}, {'foo': 'banana', 'spam': [1, 2, 3]}),
(
{'foo': 123, 'bar': 'wrong'},
Err(
'Input should be a valid integer',
[
{
'type': 'int_parsing',
'loc': (123, 'bar'),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'wrong',
}
],
),
),
(
{'foo': 1234567, 'bar': '123'},
Err(
'union_tag_invalid',
[
{
'type': 'union_tag_invalid',
'loc': (),
'msg': (
"Input tag '1234567' found using 'foo' does not match any of the "
"expected tags: 123, 'banana'"
),
'input': {'foo': 1234567, 'bar': '123'},
'ctx': {'discriminator': "'foo'", 'tag': '1234567', 'expected_tags': "123, 'banana'"},
}
],
),
),
],
)
def test_int_choice_keys(py_and_json: PyAndJson, input_value, expected):
v = py_and_json(
{
'type': 'tagged-union',
'discriminator': 'foo',
'choices': {
123: {
'type': 'typed-dict',
'fields': {
'foo': {'type': 'typed-dict-field', 'schema': {'type': 'int'}},
'bar': {'type': 'typed-dict-field', 'schema': {'type': 'int'}},
},
},
'banana': {
'type': 'typed-dict',
'fields': {
'foo': {'type': 'typed-dict-field', 'schema': {'type': 'str'}},
'spam': {
'type': 'typed-dict-field',
'schema': {'type': 'list', 'items_schema': {'type': 'int'}},
},
},
},
},
}
)
assert 'discriminator: LookupPaths' in repr(v.validator)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=expected.message) as exc_info:
v.validate_test(input_value)
# debug(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == expected.errors
else:
assert v.validate_test(input_value) == expected
def test_enum_keys():
class FooEnum(str, Enum):
APPLE = 'apple'
BANANA = 'banana'
class BarEnum(int, Enum):
ONE = 1
class PlainEnum(Enum):
TWO = 'two'
v = SchemaValidator(
core_schema.tagged_union_schema(
discriminator='foo',
choices={
BarEnum.ONE: core_schema.typed_dict_schema(
fields={
'foo': core_schema.typed_dict_field(schema=core_schema.int_schema()),
'bar': core_schema.typed_dict_field(schema=core_schema.int_schema()),
}
),
FooEnum.BANANA: core_schema.typed_dict_schema(
fields={
'foo': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'spam': core_schema.typed_dict_field(
schema=core_schema.list_schema(items_schema=core_schema.int_schema())
),
}
),
PlainEnum.TWO: core_schema.typed_dict_schema(
fields={
'foo': core_schema.typed_dict_field(schema=core_schema.any_schema()),
'baz': core_schema.typed_dict_field(schema=core_schema.int_schema()),
}
),
},
)
)
assert v.validate_python({'foo': FooEnum.BANANA, 'spam': [1, 2, '3']}) == {'foo': FooEnum.BANANA, 'spam': [1, 2, 3]}
assert v.validate_python({'foo': BarEnum.ONE, 'bar': '123'}) == {'foo': BarEnum.ONE, 'bar': 123}
assert v.validate_python({'foo': PlainEnum.TWO, 'baz': '123'}) == {'foo': PlainEnum.TWO, 'baz': 123}
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'foo': FooEnum.APPLE, 'spam': [1, 2, '3']})
assert exc_info.value.errors(include_url=False) == [
{
'type': 'union_tag_invalid',
'loc': (),
'msg': (
"Input tag 'FooEnum.APPLE' found using 'foo' does not match any of the expected tags:"
" <BarEnum.ONE: 1>, <FooEnum.BANANA: 'banana'>, <PlainEnum.TWO: 'two'>"
),
'input': {'foo': FooEnum.APPLE, 'spam': [1, 2, '3']},
'ctx': {
'discriminator': "'foo'",
'tag': 'FooEnum.APPLE',
'expected_tags': "<BarEnum.ONE: 1>, <FooEnum.BANANA: 'banana'>, <PlainEnum.TWO: 'two'>",
},
}
]
def test_discriminator_path(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'tagged-union',
'discriminator': [['food'], ['menu', 1]],
'choices': {
'apple': {
'type': 'typed-dict',
'fields': {
'a': {'type': 'typed-dict-field', 'schema': {'type': 'str'}},
'b': {'type': 'typed-dict-field', 'schema': {'type': 'int'}},
},
},
'banana': {
'type': 'typed-dict',
'fields': {
'c': {'type': 'typed-dict-field', 'schema': {'type': 'str'}},
'd': {'type': 'typed-dict-field', 'schema': {'type': 'list', 'items_schema': {'type': 'int'}}},
},
},
},
}
)
assert v.validate_test({'food': 'apple', 'a': 'apple', 'b': '13'}) == {'a': 'apple', 'b': 13}
assert v.validate_test({'menu': ['x', 'banana'], 'c': 'C', 'd': [1, '2']}) == {'c': 'C', 'd': [1, 2]}
with pytest.raises(ValidationError) as exc_info:
v.validate_test({})
assert exc_info.value.errors(include_url=False) == [
{
'type': 'union_tag_not_found',
'loc': (),
'msg': "Unable to extract tag using discriminator 'food' | 'menu'.1",
'input': {},
'ctx': {'discriminator': "'food' | 'menu'.1"},
}
]
@pytest.mark.parametrize(
'input_value,expected',
[
('foo', 'foo'),
(123, 123),
(
'baz',
Err(
'literal_error',
[
{
'type': 'literal_error',
'loc': ('str',),
'msg': "Input should be 'foo' or 'bar'",
'input': 'baz',
'ctx': {'expected': "'foo' or 'bar'"},
}
],
),
),
(
None,
Err(
'union_tag_not_found',
[
{
'type': 'union_tag_not_found',
'loc': (),
'msg': 'Unable to extract tag using discriminator discriminator_function()',
'input': None,
'ctx': {'discriminator': 'discriminator_function()'},
}
],
),
),
(
['wrong type'],
Err(
'union_tag_invalid',
[
{
'type': 'union_tag_invalid',
'loc': (),
'msg': (
"Input tag 'other' found using discriminator_function() "
"does not match any of the expected tags: 'str', 'int'"
),
'input': ['wrong type'],
'ctx': {
'discriminator': 'discriminator_function()',
'tag': 'other',
'expected_tags': "'str', 'int'",
},
}
],
),
),
],
)
def test_discriminator_function(py_and_json: PyAndJson, input_value, expected):
def discriminator_function(obj):
if isinstance(obj, str):
return 'str'
elif isinstance(obj, int):
return 'int'
elif obj is None:
return None
else:
return 'other'
v = py_and_json(
{
'type': 'tagged-union',
'discriminator': discriminator_function,
'choices': {'str': {'type': 'literal', 'expected': ['foo', 'bar']}, 'int': {'type': 'int'}},
}
)
assert 'discriminator: Function' in repr(v.validator)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=expected.message) as exc_info:
v.validate_python(input_value)
# debug(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == expected.errors
else:
assert v.validate_test(input_value) == expected
@pytest.mark.parametrize(
'input_value,expected',
[
('foo', 'foo'),
(123, 123),
(
None,
Err(
'union_tag_not_found',
[
{
'type': 'union_tag_not_found',
'loc': (),
'msg': 'Unable to extract tag using discriminator discriminator_function()',
'input': None,
'ctx': {'discriminator': 'discriminator_function()'},
}
],
),
),
(
['wrong type'],
Err(
'union_tag_invalid',
[
{
'ctx': {'discriminator': 'discriminator_function()', 'expected_tags': "'a', 1", 'tag': 'other'},
'input': ['wrong type'],
'loc': (),
'msg': "Input tag 'other' found using discriminator_function() does not "
"match any of the expected tags: 'a', 1",
'type': 'union_tag_invalid',
}
],
),
),
],
)
def test_int_discriminator_function(py_and_json: PyAndJson, input_value, expected):
def discriminator_function(obj):
if isinstance(obj, str):
return 'a'
elif isinstance(obj, int):
return 1
elif obj is None:
return None
else:
return 'other'
v = py_and_json(
{
'type': 'tagged-union',
'discriminator': discriminator_function,
'choices': {'a': {'type': 'str'}, 1: {'type': 'int'}},
}
)
assert 'discriminator: Function' in repr(v.validator)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=expected.message) as exc_info:
v.validate_python(input_value)
# debug(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == expected.errors
else:
assert v.validate_test(input_value) == expected
def test_from_attributes():
v = SchemaValidator(
core_schema.tagged_union_schema(
discriminator='foobar',
choices={
'apple': core_schema.model_fields_schema(
fields={
'a': core_schema.model_field(schema=core_schema.str_schema()),
'b': core_schema.model_field(schema=core_schema.int_schema()),
}
),
'banana': core_schema.model_fields_schema(
fields={
'c': core_schema.model_field(schema=core_schema.str_schema()),
'd': core_schema.model_field(schema=core_schema.int_schema()),
}
),
},
),
config=CoreConfig(from_attributes=True),
)
assert v.validate_python({'foobar': 'apple', 'a': 'apple', 'b': '13'}) == (
{'a': 'apple', 'b': 13},
None,
{'a', 'b'},
)
assert v.validate_python(Cls(foobar='apple', a='apple', b='13')) == ({'a': 'apple', 'b': 13}, None, {'a', 'b'})
assert v.validate_python({'foobar': 'banana', 'c': 'banana', 'd': '31'}) == (
{'c': 'banana', 'd': 31},
None,
{'c', 'd'},
)
assert v.validate_python(Cls(foobar='banana', c='banana', d='31')) == ({'c': 'banana', 'd': 31}, None, {'c', 'd'})
def test_use_ref():
v = SchemaValidator(
core_schema.definitions_schema(
core_schema.tagged_union_schema(
discriminator='foobar',
choices={
'apple': core_schema.definition_reference_schema('apple'),
'apple2': core_schema.definition_reference_schema('apple'),
'banana': core_schema.typed_dict_schema(
{'b': core_schema.typed_dict_field(core_schema.str_schema())}
),
},
),
[core_schema.typed_dict_schema({'a': core_schema.typed_dict_field(core_schema.str_schema())}, ref='apple')],
),
config=CoreConfig(from_attributes=True),
)
assert v.validate_python({'foobar': 'apple', 'a': 'apple'}) == {'a': 'apple'}
assert v.validate_python({'foobar': 'apple2', 'a': 'apple'}) == {'a': 'apple'}
assert v.validate_python({'foobar': 'banana', 'b': 'banana'}) == {'b': 'banana'}
def test_downcast_error():
v = SchemaValidator(
core_schema.tagged_union_schema(discriminator=lambda x: 123, choices={'str': core_schema.str_schema()})
)
with pytest.raises(ValidationError) as exc_info:
v.validate_python('x')
assert exc_info.value.errors(include_url=False) == [
{
'type': 'union_tag_invalid',
'loc': (),
'msg': "Input tag '123' found using <lambda>() does not match any of the expected tags: 'str'",
'input': 'x',
}
]
def test_custom_error():
v = SchemaValidator(
core_schema.tagged_union_schema(
discriminator='foo',
custom_error_type='snap',
custom_error_message='Input should be a foo or bar',
choices={
'apple': core_schema.typed_dict_schema(
fields={
'foo': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'bar': core_schema.typed_dict_field(schema=core_schema.int_schema()),
}
),
'banana': core_schema.typed_dict_schema(
fields={
'foo': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'spam': core_schema.typed_dict_field(
schema=core_schema.list_schema(items_schema=core_schema.int_schema())
),
}
),
},
)
)
assert v.validate_python({'foo': 'apple', 'bar': '123'}) == {'foo': 'apple', 'bar': 123}
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'spam': 'apple', 'bar': 'Bar'})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'snap', 'loc': (), 'msg': 'Input should be a foo or bar', 'input': {'spam': 'apple', 'bar': 'Bar'}}
]
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'foo': 'other', 'bar': 'Bar'})
assert exc_info.value.errors(include_url=False) == [
{'type': 'snap', 'loc': (), 'msg': 'Input should be a foo or bar', 'input': {'foo': 'other', 'bar': 'Bar'}}
]
def test_custom_error_type():
v = SchemaValidator(
core_schema.tagged_union_schema(
discriminator='foo',
custom_error_type='finite_number',
choices={
'apple': core_schema.typed_dict_schema(
fields={
'foo': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'bar': core_schema.typed_dict_field(schema=core_schema.int_schema()),
}
),
'banana': core_schema.typed_dict_schema(
fields={
'foo': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'spam': core_schema.typed_dict_field(
schema=core_schema.list_schema(items_schema=core_schema.int_schema())
),
}
),
},
)
)
assert v.validate_python({'foo': 'apple', 'bar': '123'}) == {'foo': 'apple', 'bar': 123}
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'spam': 'apple', 'bar': 'Bar'})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'finite_number',
'loc': (),
'msg': 'Input should be a finite number',
'input': {'spam': 'apple', 'bar': 'Bar'},
}
]
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'foo': 'other', 'bar': 'Bar'})
assert exc_info.value.errors(include_url=False) == [
{
'type': 'finite_number',
'loc': (),
'msg': 'Input should be a finite number',
'input': {'foo': 'other', 'bar': 'Bar'},
}
]
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_tagged_union.py",
"license": "MIT License",
"lines": 577,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_time.py | import re
from datetime import date, datetime, time, timedelta, timezone
from decimal import Decimal
from typing import Any
import pytest
from pydantic_core import SchemaError, SchemaValidator, ValidationError, core_schema
from ..conftest import Err, PyAndJson
@pytest.mark.parametrize(
'constraint',
['le', 'lt', 'ge', 'gt'],
)
def test_constraints_schema_validation_error(constraint: str) -> None:
with pytest.raises(SchemaError, match=f"'{constraint}' must be coercible to a time instance"):
SchemaValidator(core_schema.time_schema(**{constraint: 'bad_value'}))
@pytest.mark.parametrize(
'input_value,expected',
[
pytest.param(time(12, 13, 14), time(12, 13, 14), id='time'),
pytest.param(time(12, 13, 14, 123), time(12, 13, 14, 123), id='time-micro'),
pytest.param(time(12, 13, 14, tzinfo=timezone.utc), time(12, 13, 14, tzinfo=timezone.utc), id='time-tz'),
pytest.param('12:13:14', time(12, 13, 14), id='str'),
pytest.param('12:13:14Z', time(12, 13, 14, tzinfo=timezone.utc), id='str-tz'),
pytest.param(b'12:13:14', time(12, 13, 14), id='bytes'),
pytest.param((1,), Err('Input should be a valid time [type=time_type'), id='tuple'),
pytest.param(date(2022, 6, 8), Err('Input should be a valid time [type=time_type'), id='date'),
pytest.param(datetime(2022, 6, 8), Err('Input should be a valid time [type=time_type'), id='datetime'),
pytest.param(123, time(0, 2, 3, tzinfo=timezone.utc), id='int'),
pytest.param(float('nan'), Err('valid time format, NaN values not permitted [type=time_parsing,'), id='nan'),
pytest.param(float('inf'), Err('valid time format, numeric times may not exceed 86,399 seconds'), id='inf'),
pytest.param(float('-inf'), Err('valid time format, time in seconds should be positive'), id='-inf'),
pytest.param(Decimal('123'), time(0, 2, 3, tzinfo=timezone.utc), id='decimal'),
pytest.param(Decimal('123.123456'), time(0, 2, 3, 123456, tzinfo=timezone.utc), id='decimal-6dig'),
pytest.param(Decimal('123.1234562'), time(0, 2, 3, 123456, tzinfo=timezone.utc), id='decimal-7dig-up'),
pytest.param(Decimal('123.1234568'), time(0, 2, 3, 123457, tzinfo=timezone.utc), id='decimal-7dig-down'),
],
)
def test_time(input_value, expected):
v = SchemaValidator(core_schema.time_schema())
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
output = v.validate_python(input_value)
assert output == expected
@pytest.mark.parametrize(
'input_value,expected',
[
pytest.param('12:13:14', time(12, 13, 14), id='str'),
pytest.param('12:13:14.123', time(12, 13, 14, 123_000), id='str-micro'),
pytest.param('12:13:14.123456', time(12, 13, 14, 123_456), id='str-micro-6dig'),
pytest.param('12:13:14.123456', time(12, 13, 14, 123_456), id='str-micro-6dig'),
pytest.param('12:13:14.1234561', time(12, 13, 14, 123_456), id='str-micro-7dig'),
pytest.param(123, time(0, 2, 3, tzinfo=timezone.utc), id='int'),
pytest.param(123.4, time(0, 2, 3, 400_000, tzinfo=timezone.utc), id='float'),
pytest.param(123.0, time(0, 2, 3, tzinfo=timezone.utc), id='float.0'),
pytest.param(0, time(0, tzinfo=timezone.utc), id='int-zero'),
pytest.param(
86400,
Err(
'Input should be in a valid time format, numeric times may not exceed 86,399 seconds [type=time_parsing'
),
id='too-high',
),
pytest.param(
-1, Err('Input should be in a valid time format, time in seconds should be positive'), id='negative'
),
pytest.param(2**32, Err('numeric times may not exceed 86,399 seconds'), id='too-high-2**32'),
pytest.param(2**64, Err('numeric times may not exceed 86,399 seconds'), id='too-high-2**64'),
pytest.param(2**100, Err('numeric times may not exceed 86,399 seconds'), id='too-high-2**100'),
pytest.param(True, Err('Input should be a valid time [type=time_type'), id='bool'),
],
)
def test_time_json(py_and_json: PyAndJson, input_value, expected):
v = py_and_json({'type': 'time'})
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_test(input_value)
else:
output = v.validate_test(input_value)
assert output == expected
def test_time_error_microseconds_overflow(py_and_json: PyAndJson) -> None:
v = py_and_json(core_schema.time_schema(microseconds_precision='error'))
with pytest.raises(ValidationError) as exc_info:
v.validate_test('00:00:00.1234567')
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'time_parsing',
'loc': (),
'msg': 'Input should be in a valid time format, second fraction value is more than 6 digits long',
'input': '00:00:00.1234567',
'ctx': {'error': 'second fraction value is more than 6 digits long'},
}
]
# insert_assert(v.validate_test('00:00:00.123456'))
assert v.validate_test('00:00:00.123456') == time(0, 0, 0, 123456)
@pytest.mark.parametrize(
'input_value,expected',
[
(time(12, 13, 14, 15), time(12, 13, 14, 15)),
('12:13:14', Err('Input should be a valid time [type=time_type')),
(b'12:13:14', Err('Input should be a valid time [type=time_type')),
(1654646400, Err('Input should be a valid time [type=time_type')),
(True, Err('Input should be a valid time [type=time_type')),
(date(2022, 6, 8), Err('Input should be a valid time [type=time_type')),
(datetime(2022, 6, 8), Err('Input should be a valid time [type=time_type')),
],
)
def test_time_strict(input_value, expected):
v = SchemaValidator(core_schema.time_schema(strict=True))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
output = v.validate_python(input_value)
assert output == expected
@pytest.mark.parametrize(
'input_value,expected',
[
('"12:13:14"', time(12, 13, 14)),
('"foobar"', Err('Input should be in a valid time format, invalid character in hour [type=time_parsing,')),
('123', Err('Input should be a valid time [type=time_type')),
],
)
def test_time_strict_json(input_value, expected):
v = SchemaValidator(core_schema.time_schema(strict=True))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_json(input_value)
else:
output = v.validate_json(input_value)
assert output == expected
@pytest.mark.parametrize(
'kwargs,input_value,expected',
[
({}, '12:13:14', time(12, 13, 14)),
({'le': time(1)}, '00:12', time(0, 12)),
({'le': time(1)}, '01:00', time(1, 0)),
({'le': time(1)}, '01:01', Err('Input should be less than or equal to 01:00:00')),
({'le': time(1)}, time(1), time(1, 0)),
({'le': time(1)}, time(1, 1), Err('Input should be less than or equal to 01:00:00')),
({'lt': time(1)}, '00:59', time(0, 59)),
({'lt': time(1)}, '01:00', Err('Input should be less than 01:00:00')),
({'ge': time(1)}, '01:00', time(1)),
({'ge': time(1)}, '00:59', Err('Input should be greater than or equal to 01:00:00')),
({'gt': time(12, 13, 14, 123_456)}, '12:13:14.123457', time(12, 13, 14, 123_457)),
({'gt': time(12, 13, 14, 123_456)}, '12:13:14.123456', Err('Input should be greater than 12:13:14.123456')),
],
)
def test_time_kwargs(kwargs: dict[str, Any], input_value, expected):
v = SchemaValidator(core_schema.time_schema(**kwargs))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)) as exc_info:
v.validate_python(input_value)
errors = exc_info.value.errors(include_url=False)
assert len(errors) == 1
if len(kwargs) == 1:
key = list(kwargs.keys())[0]
assert key in errors[0]['ctx']
else:
output = v.validate_python(input_value)
assert output == expected
def test_time_bound_ctx():
v = SchemaValidator(core_schema.time_schema(gt=time(12, 13, 14, 123_456)))
with pytest.raises(ValidationError) as exc_info:
v.validate_python('12:13')
assert exc_info.value.errors(include_url=False) == [
{
'type': 'greater_than',
'loc': (),
'msg': 'Input should be greater than 12:13:14.123456',
'input': '12:13',
'ctx': {'gt': '12:13:14.123456'},
}
]
def test_dict_py():
v = SchemaValidator(
core_schema.dict_schema(keys_schema=core_schema.time_schema(), values_schema=core_schema.int_schema())
)
assert v.validate_python({time(12, 1, 1): 2, time(12, 1, 2): 4}) == {time(12, 1, 1): 2, time(12, 1, 2): 4}
def test_dict(py_and_json: PyAndJson):
v = py_and_json({'type': 'dict', 'keys_schema': {'type': 'time'}, 'values_schema': {'type': 'int'}})
assert v.validate_test({'12:01:01': 2, '12:01:02': 4}) == {time(12, 1, 1): 2, time(12, 1, 2): 4}
def test_union():
v = SchemaValidator(core_schema.union_schema(choices=[core_schema.str_schema(), core_schema.time_schema()]))
assert v.validate_python('12:01:02') == '12:01:02'
assert v.validate_python(time(12, 1, 2)) == time(12, 1, 2)
v = SchemaValidator(core_schema.union_schema(choices=[core_schema.time_schema(), core_schema.str_schema()]))
assert v.validate_python('12:01:02') == '12:01:02'
assert v.validate_python(time(12, 1, 2)) == time(12, 1, 2)
def test_aware():
v = SchemaValidator(core_schema.time_schema(tz_constraint='aware'))
value = time(12, 13, 15, tzinfo=timezone.utc)
assert value is v.validate_python(value)
assert v.validate_python('12:13:14Z') == time(12, 13, 14, tzinfo=timezone.utc)
value = time(12, 13, 15)
with pytest.raises(ValidationError, match=r'Input should have timezone info'):
v.validate_python(value)
with pytest.raises(ValidationError, match=r'Input should have timezone info'):
v.validate_python('12:13:14')
def test_naive():
v = SchemaValidator(core_schema.time_schema(tz_constraint='naive'))
value = time(12, 13, 15)
assert value is v.validate_python(value)
assert v.validate_python('12:13:14') == time(12, 13, 14)
value = time(12, 13, 15, tzinfo=timezone.utc)
with pytest.raises(ValidationError, match=r'Input should not have timezone info'):
v.validate_python(value)
with pytest.raises(ValidationError, match=r'Input should not have timezone info'):
v.validate_python('12:13:14Z')
def test_aware_specific():
v = SchemaValidator(core_schema.time_schema(tz_constraint=0))
value = time(12, 13, 15, tzinfo=timezone.utc)
assert value is v.validate_python(value)
assert v.validate_python('12:13:14Z') == time(12, 13, 14, tzinfo=timezone.utc)
value = time(12, 13, 14)
with pytest.raises(ValidationError, match='Input should have timezone info'):
v.validate_python(value)
value = time(12, 13, 15, tzinfo=timezone(timedelta(hours=1)))
with pytest.raises(ValidationError, match='Timezone offset of 0 required, got 3600') as exc_info:
v.validate_python(value)
# insert_assert(exc_info.value.errors())
assert exc_info.value.errors(include_url=False) == [
{
'type': 'timezone_offset',
'loc': (),
'msg': 'Timezone offset of 0 required, got 3600',
'input': value,
'ctx': {'tz_expected': 0, 'tz_actual': 3600},
}
]
with pytest.raises(ValidationError, match='Timezone offset of 0 required, got 3600'):
v.validate_python('12:13:14+01:00')
def test_neg_7200():
v = SchemaValidator(core_schema.time_schema(tz_constraint=-7200))
value = time(12, 13, 15, tzinfo=timezone(timedelta(hours=-2)))
assert value is v.validate_python(value)
value = time(12, 13, 14)
with pytest.raises(ValidationError, match='Input should have timezone info'):
v.validate_python(value)
value = time(12, 13, 15, tzinfo=timezone.utc)
with pytest.raises(ValidationError, match='Timezone offset of -7200 required, got 0'):
v.validate_python(value)
with pytest.raises(ValidationError, match='Timezone offset of -7200 required, got 0'):
v.validate_python('12:13:14Z')
def test_tz_constraint_too_high():
with pytest.raises(SchemaError, match='OverflowError: Python int too large.*'):
SchemaValidator(core_schema.time_schema(tz_constraint=2**64))
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_time.py",
"license": "MIT License",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_timedelta.py | import re
from datetime import timedelta
from decimal import Decimal
from typing import Any
import pytest
from pydantic_core import SchemaError, SchemaValidator, ValidationError, core_schema
from ..conftest import Err, PyAndJson
try:
import pandas
except ImportError:
pandas = None
@pytest.mark.parametrize(
'constraint',
['le', 'lt', 'ge', 'gt'],
)
def test_constraints_schema_validation_error(constraint: str) -> None:
with pytest.raises(SchemaError, match=f"'{constraint}' must be coercible to a timedelta instance"):
SchemaValidator(core_schema.timedelta_schema(**{constraint: 'bad_value'}))
def test_constraints_schema_validation() -> None:
val = SchemaValidator(core_schema.timedelta_schema(gt=3))
with pytest.raises(ValidationError):
val.validate_python(1)
@pytest.mark.parametrize(
'input_value,expected',
[
(
timedelta(days=-3, hours=2, seconds=1, milliseconds=500),
timedelta(days=-3, hours=2, seconds=1, milliseconds=500),
),
(
timedelta(days=3, weeks=2, hours=1, minutes=2, seconds=3, milliseconds=500),
timedelta(days=3, weeks=2, hours=1, minutes=2, seconds=3, milliseconds=500),
),
('P0Y0M3D2WT1H2M3.5S', timedelta(days=3, weeks=2, hours=1, minutes=2, seconds=3, milliseconds=500)),
(b'P0Y0M3D2WT1H2M3.5S', timedelta(days=3, weeks=2, hours=1, minutes=2, seconds=3, milliseconds=500)),
((-1,), Err('Input should be a valid timedelta [type=time_delta_type')),
(
b'-1',
Err(
'Input should be a valid timedelta, "day" identifier in duration '
'not correctly formatted [type=time_delta_parsing'
),
),
(3601, timedelta(hours=1, seconds=1)),
(Decimal('3601.123456'), timedelta(hours=1, seconds=1, microseconds=123456)),
(Decimal('3601.1234562'), timedelta(hours=1, seconds=1, microseconds=123456)),
(Decimal('3601.1234568'), timedelta(hours=1, seconds=1, microseconds=123457)),
(-3601, timedelta(hours=-2, seconds=3599)),
(Decimal('-3601.222222'), timedelta(hours=-2, seconds=3598, microseconds=777778)),
(Decimal('-3601.2222222'), timedelta(hours=-2, seconds=3598, microseconds=777778)),
(Decimal('-3601.2222227'), timedelta(hours=-2, seconds=3598, microseconds=777777)),
(float('nan'), Err('Input should be a valid timedelta, NaN values not permitted')),
(float('inf'), Err('Input should be a valid timedelta, durations may not exceed 999,999,999 days')),
(float('-inf'), Err('Input should be a valid timedelta, durations may not exceed 999,999,999 days')),
(timedelta.max, timedelta.max),
('02:03:04.05', timedelta(hours=2, seconds=184, microseconds=50_000)),
(
'02:03:04.05broken',
Err('Input should be a valid timedelta, unexpected extra characters at the end of the input'),
),
],
ids=repr,
)
def test_timedelta(input_value, expected):
v = SchemaValidator(core_schema.timedelta_schema())
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
output = v.validate_python(input_value)
assert output == expected
@pytest.mark.parametrize(
'input_value,expected',
[
('"P0Y0M3D2WT1H2M3.5S"', timedelta(days=3, weeks=2, hours=1, minutes=2, seconds=3, milliseconds=500)),
('"errordata"', Err('Input should be a valid duration, invalid digit in duration [type=time_delta_parsing')),
('true', Err('Input should be a valid duration [type=time_delta_type')),
('3601', timedelta(hours=1, seconds=1)),
('3601.123456', timedelta(hours=1, seconds=1, microseconds=123456)),
('-3601', timedelta(hours=-2, seconds=3599)),
('-3601.222222', timedelta(hours=-2, seconds=3598, microseconds=777778)),
('-3601.2222222', timedelta(hours=-2, seconds=3598, microseconds=777778)),
('3600.999999', timedelta(seconds=3600, microseconds=999999)),
],
ids=repr,
)
def test_timedelta_json(input_value, expected):
v = SchemaValidator(core_schema.timedelta_schema())
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_json(input_value)
else:
output = v.validate_json(input_value)
assert output == expected
@pytest.mark.parametrize(
'input_value,expected',
[
(
timedelta(days=3, weeks=2, hours=1, minutes=2, seconds=3, milliseconds=500),
timedelta(days=3, weeks=2, hours=1, minutes=2, seconds=3, milliseconds=500),
),
('P0Y0M3D2WT1H2M3.5S', Err('Input should be a valid timedelta [type=time_delta_type')),
(b'P0Y0M3D2WT1H2M3.5S', Err('Input should be a valid timedelta [type=time_delta_type')),
],
)
def test_timedelta_strict(input_value, expected):
v = SchemaValidator(core_schema.timedelta_schema(strict=True))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
output = v.validate_python(input_value)
assert output == expected
@pytest.mark.parametrize(
'input_value,expected',
[
('"P0Y0M3D2WT1H2M3.5S"', timedelta(days=3, weeks=2, hours=1, minutes=2, seconds=3, milliseconds=500)),
('"12345"', Err('Input should be a valid duration')),
('true', Err('Input should be a valid duration [type=time_delta_type')),
],
)
def test_timedelta_strict_json(input_value, expected):
v = SchemaValidator(core_schema.timedelta_schema(strict=True))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_json(input_value)
else:
output = v.validate_json(input_value)
assert output == expected
@pytest.mark.parametrize(
'kwargs,input_value,expected',
[
({}, 'P0Y0M3D2WT1H2M3S', timedelta(days=3, weeks=2, hours=1, minutes=2, seconds=3)),
({'le': timedelta(days=3)}, 'P2DT1H', timedelta(days=2, hours=1)),
({'le': timedelta(days=3)}, 'P3DT0H', timedelta(days=3)),
({'le': timedelta(days=3)}, 'P3DT1H', Err('Input should be less than or equal to 3 days')),
({'lt': timedelta(days=3)}, 'P2DT1H', timedelta(days=2, hours=1)),
({'lt': timedelta(days=3)}, 'P3DT1H', Err('Input should be less than 3 days')),
({'ge': timedelta(days=3)}, 'P3DT1H', timedelta(days=3, hours=1)),
({'ge': timedelta(days=3)}, 'P3D', timedelta(days=3)),
({'ge': timedelta(days=3)}, 'P2DT1H', Err('Input should be greater than or equal to 3 days')),
({'gt': timedelta(days=3)}, 'P3DT1H', timedelta(days=3, hours=1)),
({'le': timedelta(seconds=-86400.123)}, '-PT86400.123S', timedelta(seconds=-86400.123)),
({'le': timedelta(seconds=-86400.123)}, '-PT86400.124S', timedelta(seconds=-86400.124)),
(
{'le': timedelta(seconds=-86400.123)},
'-PT86400.122S',
Err(
'Input should be less than or equal to -2 days and 23 hours and 59 minutes and 59 seconds and 877000 microseconds [type=less_than_equal'
),
),
({'gt': timedelta(seconds=-86400.123)}, timedelta(seconds=-86400.122), timedelta(seconds=-86400.122)),
({'gt': timedelta(seconds=-86400.123)}, '-PT86400.122S', timedelta(seconds=-86400.122)),
(
{'gt': timedelta(seconds=-86400.123)},
'-PT86400.124S',
Err(
'Input should be greater than -2 days and 23 hours and 59 minutes and 59 seconds and 877000 microseconds [type=greater_than'
),
),
(
{'gt': timedelta(hours=1, minutes=30)},
'PT180S',
Err('Input should be greater than 1 hour and 30 minutes [type=greater_than'),
),
({'gt': timedelta()}, '-P0DT0.1S', Err('Input should be greater than 0 seconds [type=greater_than')),
({'gt': timedelta()}, 'P0DT0.0S', Err('Input should be greater than 0 seconds [type=greater_than')),
({'ge': timedelta()}, 'P0DT0.0S', timedelta()),
({'lt': timedelta()}, '-PT0S', timedelta()),
(
{'lt': timedelta(days=740, weeks=1, hours=48, minutes=60, seconds=61, microseconds=100000)},
'P2Y1W10DT48H60M61.100000S',
Err('Input should be less than 749 days and 1 hour and 1 minute and 1 second and 100000 microseconds'),
),
],
ids=repr,
)
def test_timedelta_kwargs(kwargs: dict[str, Any], input_value, expected):
v = SchemaValidator(core_schema.timedelta_schema(**kwargs))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
output = v.validate_python(input_value)
assert output == expected
def test_timedelta_kwargs_strict():
v = SchemaValidator(core_schema.timedelta_schema(strict=True, le=timedelta(days=3)))
output = v.validate_python(timedelta(days=2, hours=1))
assert output == timedelta(days=2, hours=1)
def test_dict_py():
v = SchemaValidator(
core_schema.dict_schema(keys_schema=core_schema.timedelta_schema(), values_schema=core_schema.int_schema())
)
assert v.validate_python({timedelta(days=2, hours=1): 2, timedelta(days=2, hours=2): 4}) == {
timedelta(days=2, hours=1): 2,
timedelta(days=2, hours=2): 4,
}
def test_dict_key(py_and_json: PyAndJson):
v = py_and_json({'type': 'dict', 'keys_schema': {'type': 'timedelta'}, 'values_schema': {'type': 'int'}})
assert v.validate_test({'P2DT1H': 2, 'P2DT2H': 4}) == {timedelta(days=2, hours=1): 2, timedelta(days=2, hours=2): 4}
with pytest.raises(ValidationError, match=re.escape('[type=time_delta_parsing')):
v.validate_test({'errordata': 2})
def test_dict_value(py_and_json: PyAndJson):
v = py_and_json({'type': 'dict', 'keys_schema': {'type': 'int'}, 'values_schema': {'type': 'timedelta'}})
assert v.validate_test({2: 'P2DT1H', 4: 'P2DT2H'}) == {2: timedelta(days=2, hours=1), 4: timedelta(days=2, hours=2)}
with pytest.raises(ValidationError, match=re.escape('[type=time_delta_parsing')):
v.validate_test({4: 'errordata'})
def test_union():
v = SchemaValidator(core_schema.union_schema(choices=[core_schema.str_schema(), core_schema.timedelta_schema()]))
assert v.validate_python('P2DT1H') == 'P2DT1H'
assert v.validate_python(timedelta(days=2, hours=1)) == timedelta(days=2, hours=1)
v = SchemaValidator(core_schema.union_schema(choices=[core_schema.timedelta_schema(), core_schema.str_schema()]))
assert v.validate_python('P2DT1H') == 'P2DT1H'
assert v.validate_python(timedelta(days=2, hours=1)) == timedelta(days=2, hours=1)
@pytest.mark.parametrize(
'constraint,expected_duration',
[
(timedelta(days=3), {'positive': True, 'day': 3, 'second': 0, 'microsecond': 0}),
(timedelta(days=2, seconds=42.123), {'positive': True, 'day': 2, 'second': 42, 'microsecond': 123_000}),
(timedelta(days=-1), {'positive': False, 'day': 1, 'second': 0, 'microsecond': 0}),
(timedelta(seconds=86410), {'positive': True, 'day': 1, 'second': 10, 'microsecond': 0}),
(timedelta(seconds=86410.123), {'positive': True, 'day': 1, 'second': 10, 'microsecond': 123_000}),
(timedelta(seconds=-86410), {'positive': False, 'day': 1, 'second': 10, 'microsecond': 0}),
(timedelta(seconds=-86410.123), {'positive': False, 'day': 1, 'second': 10, 'microsecond': 123_000}),
(timedelta(days=-4, hours=12), {'positive': False, 'day': 3, 'second': 43200, 'microsecond': 0}),
(timedelta(days=-4, microseconds=456), {'positive': False, 'day': 3, 'second': 86399, 'microsecond': 999544}),
(timedelta(days=-1, seconds=20_000), {'positive': False, 'day': 0, 'second': 66_400, 'microsecond': 0}),
(
timedelta(days=-1, seconds=86_399, microseconds=1),
{'positive': False, 'day': 0, 'second': 0, 'microsecond': 999_999},
),
(timedelta.max, {'positive': True, 'day': 999999999, 'second': 86399, 'microsecond': 999999}),
(timedelta.min, {'positive': False, 'day': 999999999, 'second': 0, 'microsecond': 0}),
],
ids=repr,
)
def test_pytimedelta_as_timedelta(constraint, expected_duration):
v = SchemaValidator(core_schema.timedelta_schema(gt=constraint))
# simplest way to check `pytimedelta_as_timedelta` is correct is to extract duration from repr of the validator
m = re.search(r'Duration ?\{\s+positive: ?(\w+),\s+day: ?(\d+),\s+second: ?(\d+),\s+microsecond: ?(\d+)', repr(v))
pos, day, sec, micro = m.groups()
duration = {'positive': pos == 'true', 'day': int(day), 'second': int(sec), 'microsecond': int(micro)}
assert duration == pytest.approx(expected_duration), constraint
def test_large_value():
v = SchemaValidator(core_schema.timedelta_schema())
assert v.validate_python('123days, 12:34') == timedelta(days=123, hours=12, minutes=34)
assert v.validate_python(f'{999_999_999}days, 12:34') == timedelta(days=999_999_999, hours=12, minutes=34)
with pytest.raises(ValidationError, match='should be a valid timedelta, durations may not exceed 999,999,999 days'):
v.validate_python(f'{999_999_999 + 1}days, 12:34')
@pytest.mark.skipif(not pandas, reason='pandas not installed')
def test_pandas():
v = SchemaValidator(core_schema.timedelta_schema(ge=timedelta(hours=2)))
two_hours = pandas.Timestamp('2023-01-01T02:00:00Z') - pandas.Timestamp('2023-01-01T00:00:00Z')
assert v.validate_python(two_hours) == two_hours
assert v.validate_python(two_hours.to_pytimedelta()) == two_hours
one_55 = pandas.Timestamp('2023-01-01T01:55:00Z') - pandas.Timestamp('2023-01-01T00:00:00Z')
msg = r'Input should be greater than or equal to 2 hours'
with pytest.raises(ValidationError, match=msg):
v.validate_python(one_55)
with pytest.raises(ValidationError, match=msg):
v.validate_python(one_55.to_pytimedelta())
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_timedelta.py",
"license": "MIT License",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_tuple.py | import re
from collections import deque
from typing import Any
import pytest
from dirty_equals import IsNonNegative, IsTuple
from pydantic_core import SchemaValidator, ValidationError, core_schema
from ..conftest import Err, PyAndJson, infinite_generator
@pytest.mark.parametrize(
'variadic_item_index,items,input_value,expected',
[
(0, [{'type': 'int'}], [1, 2, 3], (1, 2, 3)),
(0, [{'type': 'int'}], 1, Err('[type=tuple_type, input_value=1, input_type=int]')),
(None, [{'type': 'int'}, {'type': 'int'}, {'type': 'int'}], [1, 2, '3'], (1, 2, 3)),
(
None,
[{'type': 'int'}, {'type': 'int'}, {'type': 'int'}],
5,
Err('[type=tuple_type, input_value=5, input_type=int]'),
),
],
ids=repr,
)
def test_tuple_json(py_and_json: PyAndJson, variadic_item_index, items, input_value, expected):
v = py_and_json(core_schema.tuple_schema(items_schema=items, variadic_item_index=variadic_item_index))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_test(input_value)
else:
assert v.validate_test(input_value) == expected
def test_any_no_copy():
v = SchemaValidator(core_schema.tuple_schema(items_schema=[core_schema.any_schema()], variadic_item_index=0))
input_value = (1, '2', b'3')
output = v.validate_python(input_value)
assert output == input_value
assert output is not input_value
assert id(output) != id(input_value)
@pytest.mark.parametrize(
'variadic_item_index,items,input_value,expected',
[
(0, [{'type': 'int'}], (1, 2, '33'), (1, 2, 33)),
(0, [{'type': 'str'}], (b'1', b'2', '33'), ('1', '2', '33')),
(None, [{'type': 'int'}, {'type': 'str'}, {'type': 'float'}], (1, b'a', 33), (1, 'a', 33.0)),
],
)
def test_tuple_strict_passes_with_tuple(variadic_item_index, items, input_value, expected):
v = SchemaValidator(
core_schema.tuple_schema(items_schema=items, variadic_item_index=variadic_item_index, strict=True)
)
assert v.validate_python(input_value) == expected
@pytest.mark.parametrize('fail_fast', [True, False])
def test_empty_positional_tuple(fail_fast):
v = SchemaValidator(core_schema.tuple_schema(items_schema=[], fail_fast=fail_fast))
assert v.validate_python(()) == ()
assert v.validate_python([]) == ()
with pytest.raises(ValidationError) as exc_info:
v.validate_python((1,))
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'too_long',
'loc': (),
'msg': 'Tuple should have at most 0 items after validation, not 1',
'input': (1,),
'ctx': {'field_type': 'Tuple', 'max_length': 0, 'actual_length': 1},
}
]
@pytest.mark.parametrize(
'variadic_item_index,items', [(0, [{'type': 'int'}]), (None, [{'type': 'int'}, {'type': 'int'}, {'type': 'int'}])]
)
@pytest.mark.parametrize('wrong_coll_type', [list, set, frozenset])
def test_tuple_strict_fails_without_tuple(wrong_coll_type: type[Any], variadic_item_index, items):
v = SchemaValidator(
core_schema.tuple_schema(variadic_item_index=variadic_item_index, items_schema=items, strict=True)
)
with pytest.raises(ValidationError) as exc_info:
v.validate_python(wrong_coll_type([1, 2, '33']))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'tuple_type',
'loc': (),
'msg': 'Input should be a valid tuple',
'input': wrong_coll_type([1, 2, '33']),
}
]
@pytest.mark.parametrize(
'kwargs,input_value,expected',
[
({}, (1, 2, 3, 4), (1, 2, 3, 4)),
({'min_length': 3}, (1, 2, 3, 4), (1, 2, 3, 4)),
({'min_length': 3}, (1, 2), Err('Tuple should have at least 3 items after validation, not 2 [type=too_short,')),
({'max_length': 4}, (1, 2, 3, 4), (1, 2, 3, 4)),
(
{'max_length': 3},
(1, 2, 3, 4),
Err('Tuple should have at most 3 items after validation, not 4 [type=too_long,'),
),
(
{'max_length': 3},
[1, 2, 3, 4, 5],
Err('Tuple should have at most 3 items after validation, not 5 [type=too_long,'),
),
(
{'max_length': 3},
infinite_generator(),
Err('Tuple should have at most 3 items after validation, not more [type=too_long,'),
),
],
ids=repr,
)
def test_tuple_var_len_kwargs(kwargs: dict[str, Any], input_value, expected):
v = SchemaValidator(
core_schema.tuple_schema(items_schema=[core_schema.any_schema()], variadic_item_index=0, **kwargs)
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
assert v.validate_python(input_value) == expected
@pytest.mark.parametrize(
'variadic_item_index,items', [(0, [{'type': 'int'}]), (None, [{'type': 'int'}, {'type': 'int'}, {'type': 'int'}])]
)
@pytest.mark.parametrize(
'input_value,expected',
[
((1, 2, '3'), (1, 2, 3)),
([1, 2, '3'], (1, 2, 3)),
(deque((1, 2, '3')), (1, 2, 3)),
({1: 10, 2: 20, '3': '30'}.keys(), (1, 2, 3)),
({1: 10, 2: 20, '3': '30'}.values(), (10, 20, 30)),
({1: 10, 2: 20, '3': '30'}, Err('Input should be a valid tuple [type=tuple_type,')),
({1, 2, '3'}, IsTuple(1, 2, 3, check_order=False)),
(frozenset([1, 2, '3']), IsTuple(1, 2, 3, check_order=False)),
],
ids=repr,
)
def test_tuple_validate(input_value, expected, variadic_item_index, items):
v = SchemaValidator(core_schema.tuple_schema(items_schema=items, variadic_item_index=variadic_item_index))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
assert v.validate_python(input_value) == expected
# Since `test_tuple_validate` is parametrized above, the generator is consumed
# on the first test run. This is a workaround to make sure the generator is
# always recreated.
@pytest.mark.parametrize(
'variadic_item_index,items', [(0, [{'type': 'int'}]), (None, [{'type': 'int'}, {'type': 'int'}, {'type': 'int'}])]
)
def test_tuple_validate_iterator(variadic_item_index, items):
v = SchemaValidator(core_schema.tuple_schema(items_schema=items, variadic_item_index=variadic_item_index))
assert v.validate_python(x for x in [1, 2, '3']) == (1, 2, 3)
@pytest.mark.parametrize(
'input_value,index',
[
(['wrong'], 0),
(('wrong',), 0),
((1, 2, 3, 'wrong'), 3),
((1, 2, 3, 'wrong', 4), 3),
((1, 2, 'wrong'), IsNonNegative()),
],
)
def test_tuple_var_len_errors(input_value, index):
v = SchemaValidator(core_schema.tuple_schema(items_schema=[core_schema.int_schema()], variadic_item_index=0))
with pytest.raises(ValidationError) as exc_info:
assert v.validate_python(input_value)
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': (index,),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'wrong',
}
]
@pytest.mark.parametrize(
'input_value,items,index',
[
(['wrong'], [{'type': 'int'}], 0),
(('wrong',), [{'type': 'int'}], 0),
((1, 2, 3, 'wrong'), [{'type': 'int'}, {'type': 'int'}, {'type': 'int'}, {'type': 'int'}], 3),
(
(1, 2, 3, 'wrong', 4),
[{'type': 'int'}, {'type': 'int'}, {'type': 'int'}, {'type': 'int'}, {'type': 'int'}],
3,
),
((1, 2, 'wrong'), [{'type': 'int'}, {'type': 'int'}, {'type': 'int'}], IsNonNegative()),
],
)
def test_tuple_fix_len_errors(input_value, items, index):
v = SchemaValidator(core_schema.tuple_schema(items_schema=items))
with pytest.raises(ValidationError) as exc_info:
assert v.validate_python(input_value)
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': (index,),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'wrong',
}
]
def test_multiple_missing(py_and_json: PyAndJson):
v = py_and_json(
{'type': 'tuple', 'items_schema': [{'type': 'int'}, {'type': 'int'}, {'type': 'int'}, {'type': 'int'}]}
)
assert v.validate_test([1, 2, 3, 4]) == (1, 2, 3, 4)
with pytest.raises(ValidationError) as exc_info:
v.validate_test([1])
assert exc_info.value.errors(include_url=False) == [
{'type': 'missing', 'loc': (1,), 'msg': 'Field required', 'input': [1]},
{'type': 'missing', 'loc': (2,), 'msg': 'Field required', 'input': [1]},
{'type': 'missing', 'loc': (3,), 'msg': 'Field required', 'input': [1]},
]
with pytest.raises(ValidationError) as exc_info:
v.validate_test([1, 2, 3])
assert exc_info.value.errors(include_url=False) == [
{'type': 'missing', 'loc': (3,), 'msg': 'Field required', 'input': [1, 2, 3]}
]
def test_extra_arguments(py_and_json: PyAndJson):
v = py_and_json({'type': 'tuple', 'items_schema': [{'type': 'int'}, {'type': 'int'}]})
assert v.validate_test([1, 2]) == (1, 2)
with pytest.raises(ValidationError) as exc_info:
v.validate_test([1, 2, 3, 4])
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'too_long',
'loc': (),
'msg': 'Tuple should have at most 2 items after validation, not 4',
'input': [1, 2, 3, 4],
'ctx': {'field_type': 'Tuple', 'max_length': 2, 'actual_length': 4},
}
]
def test_positional_empty(py_and_json: PyAndJson):
v = py_and_json({'type': 'tuple', 'items_schema': []})
assert v.validate_test([]) == ()
assert v.validate_python(()) == ()
with pytest.raises(ValidationError, match='type=too_long,'):
v.validate_test([1])
def test_positional_empty_extra(py_and_json: PyAndJson):
v = py_and_json({'type': 'tuple', 'items_schema': [{'type': 'int'}], 'variadic_item_index': 0})
assert v.validate_test([]) == ()
assert v.validate_python(()) == ()
assert v.validate_test([1]) == (1,)
assert v.validate_test(list(range(100))) == tuple(range(100))
@pytest.mark.parametrize('input_value,expected', [((1, 2, 3), (1, 2, 3)), ([1, 2, 3], [1, 2, 3])])
def test_union_tuple_list(input_value, expected):
v = SchemaValidator(
core_schema.union_schema(
choices=[
core_schema.tuple_schema(items_schema=[core_schema.any_schema()], variadic_item_index=0),
core_schema.list_schema(),
]
)
)
assert v.validate_python(input_value) == expected
@pytest.mark.parametrize(
'input_value,expected',
[
((1, 2, 3), (1, 2, 3)),
(('a', 'b', 'c'), ('a', 'b', 'c')),
(('a', b'a', 'c'), ('a', 'a', 'c')),
(
[5],
Err(
'2 validation errors for union',
errors=[
{
# first of all, not a tuple of ints ..
'type': 'tuple_type',
'loc': ('tuple[int, ...]',),
'msg': 'Input should be a valid tuple',
'input': [5],
},
# .. and not a tuple of strings, either
{
'type': 'tuple_type',
'loc': ('tuple[str, ...]',),
'msg': 'Input should be a valid tuple',
'input': [5],
},
],
),
),
],
ids=repr,
)
def test_union_tuple_var_len(input_value, expected):
v = SchemaValidator(
core_schema.union_schema(
choices=[
core_schema.tuple_schema(items_schema=[core_schema.int_schema()], variadic_item_index=0, strict=True),
core_schema.tuple_schema(items_schema=[core_schema.str_schema()], variadic_item_index=0, strict=True),
]
)
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)) as exc_info:
v.validate_python(input_value)
if expected.errors is not None:
assert exc_info.value.errors(include_url=False) == expected.errors
else:
assert v.validate_python(input_value) == expected
@pytest.mark.parametrize(
'input_value,expected',
[
((1, 2, 3), (1, 2, 3)),
(('a', 'b', 'c'), ('a', 'b', 'c')),
(
[5, '1', 1],
Err(
'2 validation errors for union',
errors=[
{
'type': 'tuple_type',
'loc': ('tuple[int, int, int]',),
'msg': 'Input should be a valid tuple',
'input': [5, '1', 1],
},
{
'type': 'tuple_type',
'loc': ('tuple[str, str, str]',),
'msg': 'Input should be a valid tuple',
'input': [5, '1', 1],
},
],
),
),
],
ids=repr,
)
def test_union_tuple_fix_len(input_value, expected):
v = SchemaValidator(
core_schema.union_schema(
choices=[
core_schema.tuple_schema(
items_schema=[core_schema.int_schema(), core_schema.int_schema(), core_schema.int_schema()],
strict=True,
),
core_schema.tuple_schema(
items_schema=[core_schema.str_schema(), core_schema.str_schema(), core_schema.str_schema()],
strict=True,
),
]
)
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)) as exc_info:
v.validate_python(input_value)
if expected.errors is not None:
assert exc_info.value.errors(include_url=False) == expected.errors
else:
assert v.validate_python(input_value) == expected
def test_tuple_fix_error():
v = SchemaValidator(core_schema.tuple_schema(items_schema=[core_schema.int_schema(), core_schema.str_schema()]))
with pytest.raises(ValidationError) as exc_info:
v.validate_python([1])
assert exc_info.value.errors(include_url=False) == [
{'type': 'missing', 'loc': (1,), 'msg': 'Field required', 'input': [1]}
]
@pytest.mark.parametrize(
'input_value,expected',
[
([1, 'a'], (1, 'a')),
((1, 'a'), (1, 'a')),
((1, 'a', 'b'), (1, 'a', 'b')),
([1, 'a', 'b', 'c', 'd'], (1, 'a', 'b', 'c', 'd')),
(deque([1, 'a', 'b', 'c', 'd']), (1, 'a', 'b', 'c', 'd')),
([1], Err('type=missing', errors=[{'type': 'missing', 'loc': (1,), 'msg': 'Field required', 'input': [1]}])),
],
)
def test_tuple_fix_extra(input_value, expected):
v = SchemaValidator(
core_schema.tuple_schema(
items_schema=[core_schema.int_schema(), core_schema.str_schema(), core_schema.str_schema()],
variadic_item_index=2,
)
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)) as exc_info:
v.validate_python(input_value)
assert exc_info.value.errors(include_url=False) == expected.errors
else:
assert v.validate_python(input_value) == expected
def test_tuple_fix_extra_any():
v = SchemaValidator(
core_schema.tuple_schema(
items_schema=[core_schema.str_schema(), core_schema.any_schema()], variadic_item_index=1
)
)
assert v.validate_python([b'1']) == ('1',)
assert v.validate_python([b'1', 2]) == ('1', 2)
assert v.validate_python((b'1', 2)) == ('1', 2)
assert v.validate_python([b'1', 2, b'3']) == ('1', 2, b'3')
with pytest.raises(ValidationError) as exc_info:
v.validate_python([])
assert exc_info.value.errors(include_url=False) == [
{'type': 'missing', 'loc': (0,), 'msg': 'Field required', 'input': []}
]
def test_generator_error():
def gen(error: bool):
yield 1
yield 2
if error:
raise RuntimeError('error')
yield 3
v = SchemaValidator(core_schema.tuple_schema(items_schema=[core_schema.int_schema()], variadic_item_index=0))
assert v.validate_python(gen(False)) == (1, 2, 3)
msg = r'Error iterating over object, error: RuntimeError: error \[type=iteration_error,'
with pytest.raises(ValidationError, match=msg):
v.validate_python(gen(True))
@pytest.mark.parametrize(
'input_value,items_schema,expected',
[
pytest.param(
{1: 10, 2: 20, '3': '30'}.items(),
{'type': 'tuple', 'items_schema': [{'type': 'any'}], 'variadic_item_index': 0},
((1, 10), (2, 20), ('3', '30')),
id='Tuple[Any, Any]',
),
pytest.param(
{1: 10, 2: 20, '3': '30'}.items(),
{'type': 'tuple', 'items_schema': [{'type': 'int'}], 'variadic_item_index': 0},
((1, 10), (2, 20), (3, 30)),
id='Tuple[int, int]',
),
pytest.param({1: 10, 2: 20, '3': '30'}.items(), {'type': 'any'}, ((1, 10), (2, 20), ('3', '30')), id='Any'),
],
)
def test_frozenset_from_dict_items(input_value, items_schema, expected):
v = SchemaValidator(core_schema.tuple_schema(items_schema=[items_schema], variadic_item_index=0))
output = v.validate_python(input_value)
assert isinstance(output, tuple)
assert output == expected
@pytest.mark.parametrize(
'input_value,expected',
[
([1, 2, 3, 4], (1, 2, 3, 4)),
([1, 2, 3, 4, 5], Err('Tuple should have at most 4 items after validation, not 5 [type=too_long,')),
([1, 2, 3, 'x', 4], (1, 2, 3, 4)),
],
)
def test_length_constraints_omit(input_value, expected):
v = SchemaValidator(
core_schema.tuple_schema(
items_schema=[core_schema.with_default_schema(schema=core_schema.int_schema(), on_error='omit')],
variadic_item_index=0,
max_length=4,
)
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
assert v.validate_python(input_value) == expected
@pytest.mark.parametrize(
'fail_fast,expected',
[
pytest.param(
True,
[
{
'type': 'int_parsing',
'loc': (1,),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'not-num',
}
],
id='fail_fast',
),
pytest.param(
False,
[
{
'type': 'int_parsing',
'loc': (1,),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'not-num',
},
{
'type': 'float_parsing',
'loc': (2,),
'msg': 'Input should be a valid number, unable to parse string as a number',
'input': 'again',
},
],
id='not_fail_fast',
),
],
)
def test_tuple_fail_fast(fail_fast, expected):
s = core_schema.tuple_schema(
[
core_schema.str_schema(),
core_schema.int_schema(),
core_schema.float_schema(),
],
variadic_item_index=None,
fail_fast=fail_fast,
)
v = SchemaValidator(s)
with pytest.raises(ValidationError) as exc_info:
v.validate_python(['str', 'not-num', 'again'])
assert exc_info.value.errors(include_url=False) == expected
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_tuple.py",
"license": "MIT License",
"lines": 502,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_typed_dict.py | import math
import os
import platform
import re
import weakref
from collections.abc import Mapping
from typing import Any, Union
import pytest
from dirty_equals import FunctionCheck
from pydantic_core import CoreConfig, SchemaError, SchemaValidator, ValidationError, core_schema
from pydantic_core.core_schema import ExtraBehavior
from ..conftest import Err, PyAndJson, assert_gc
class Cls:
def __init__(self, **attributes):
for k, v in attributes.items():
setattr(self, k, v)
def __repr__(self):
return 'Cls({})'.format(', '.join(f'{k}={v!r}' for k, v in self.__dict__.items()))
class Map(Mapping):
def __init__(self, **kwargs):
self._d = kwargs
def __iter__(self):
return iter(self._d)
def __len__(self) -> int:
return len(self._d)
def __getitem__(self, k, /):
return self._d[k]
def __repr__(self):
return 'Map({})'.format(', '.join(f'{k}={v!r}' for k, v in self._d.items()))
def test_simple():
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'field_a': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'field_b': core_schema.typed_dict_field(schema=core_schema.int_schema()),
}
)
)
assert v.validate_python({'field_a': b'abc', 'field_b': 1}) == {'field_a': 'abc', 'field_b': 1}
def test_strict():
v = SchemaValidator(
{
'type': 'typed-dict',
'fields': {
'field_a': {'type': 'typed-dict-field', 'schema': {'type': 'str'}},
'field_b': {'type': 'typed-dict-field', 'schema': {'type': 'int'}},
},
'config': CoreConfig(strict=True),
}
)
assert v.validate_python({'field_a': 'hello', 'field_b': 12}) == {'field_a': 'hello', 'field_b': 12}
with pytest.raises(ValidationError) as exc_info:
assert v.validate_python({'field_a': 123, 'field_b': '123'})
assert exc_info.value.errors(include_url=False) == [
{'type': 'string_type', 'loc': ('field_a',), 'msg': 'Input should be a valid string', 'input': 123},
{'type': 'int_type', 'loc': ('field_b',), 'msg': 'Input should be a valid integer', 'input': '123'},
]
def test_with_default():
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'field_a': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'field_b': core_schema.typed_dict_field(
schema=core_schema.with_default_schema(schema=core_schema.int_schema(), default=666)
),
}
)
)
assert v.validate_python({'field_a': b'abc'}) == {'field_a': 'abc', 'field_b': 666}
assert v.validate_python({'field_a': b'abc', 'field_b': 1}) == {'field_a': 'abc', 'field_b': 1}
def test_missing_error(pydantic_version):
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'field_a': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'field_b': core_schema.typed_dict_field(schema=core_schema.int_schema()),
}
)
)
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'field_a': b'abc'})
# insert_assert(str(exc_info.value))
assert str(exc_info.value) == (
'1 validation error for typed-dict\n'
'field_b\n'
" Field required [type=missing, input_value={'field_a': b'abc'}, input_type=dict]"
+ (
f'\n For further information visit https://errors.pydantic.dev/{pydantic_version}/v/missing'
if os.environ.get('PYDANTIC_ERRORS_INCLUDE_URL', '1') != 'false'
else ''
)
)
@pytest.mark.parametrize(
'config,input_value,expected',
[
({}, {'a': '123'}, {'a': 123}),
({}, Map(a=123), {'a': 123}),
({}, {b'a': '123'}, Err('Field required [type=missing,')),
({}, {'a': '123', 'c': 4}, {'a': 123}),
(CoreConfig(extra_fields_behavior='allow'), {'a': '123', 'c': 4}, {'a': 123, 'c': 4}),
(
CoreConfig(extra_fields_behavior='allow'),
{'a': '123', b'c': 4},
Err('Keys should be strings [type=invalid_key,'),
),
(CoreConfig(strict=True), Map(a=123), Err('Input should be a valid dictionary [type=dict_type,')),
({}, {'a': '123', 'b': '4.7'}, {'a': 123, 'b': 4.7}),
({}, {'a': '123', 'b': 'nan'}, {'a': 123, 'b': FunctionCheck(math.isnan)}),
(
CoreConfig(allow_inf_nan=False),
{'a': '123', 'b': 'nan'},
Err('Input should be a finite number [type=finite_number,'),
),
],
ids=repr,
)
def test_config(config: CoreConfig, input_value, expected):
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'a': core_schema.typed_dict_field(schema=core_schema.int_schema()),
'b': core_schema.typed_dict_field(schema=core_schema.float_schema(), required=False),
},
config=config,
)
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
val = v.validate_python(input_value)
print(f'UNEXPECTED OUTPUT: {val!r}')
else:
output_dict = v.validate_python(input_value)
assert output_dict == expected
def test_ignore_extra():
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'field_a': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'field_b': core_schema.typed_dict_field(schema=core_schema.int_schema()),
}
)
)
assert v.validate_python({'field_a': b'123', 'field_b': 1, 'field_c': 123}) == {'field_a': '123', 'field_b': 1}
@pytest.mark.parametrize(
'schema_extra_behavior,validate_fn_extra_kw',
[
({'extra_behavior': 'forbid'}, None),
({}, 'forbid'),
],
)
def test_forbid_extra(schema_extra_behavior: dict[str, Any], validate_fn_extra_kw: Union[ExtraBehavior, None]):
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={'field_a': core_schema.typed_dict_field(schema=core_schema.str_schema())}, **schema_extra_behavior
)
)
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'field_a': 'abc', 'field_b': 1}, extra=validate_fn_extra_kw)
assert exc_info.value.errors(include_url=False) == [
{'type': 'extra_forbidden', 'loc': ('field_b',), 'msg': 'Extra inputs are not permitted', 'input': 1}
]
def test_allow_extra_invalid():
with pytest.raises(SchemaError, match='extras_schema can only be used if extra_behavior=allow'):
SchemaValidator(
schema=core_schema.typed_dict_schema(
fields={}, extras_schema=core_schema.int_schema(), extra_behavior='ignore'
)
)
def test_str_config():
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={'field_a': core_schema.typed_dict_field(schema=core_schema.str_schema())},
config=CoreConfig(str_max_length=5),
)
)
assert v.validate_python({'field_a': 'test'}) == {'field_a': 'test'}
with pytest.raises(ValidationError, match='String should have at most 5 characters'):
v.validate_python({'field_a': 'test long'})
def test_json_error():
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'field_a': core_schema.typed_dict_field(
schema=core_schema.list_schema(items_schema=core_schema.int_schema())
)
}
)
)
with pytest.raises(ValidationError) as exc_info:
v.validate_json('{"field_a": [123, "wrong"]}')
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('field_a', 1),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'wrong',
}
]
def test_fields_required_by_default():
"""By default all fields should be required"""
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'x': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'y': core_schema.typed_dict_field(schema=core_schema.str_schema()),
}
)
)
assert v.validate_python({'x': 'pika', 'y': 'chu'}) == {'x': 'pika', 'y': 'chu'}
with pytest.raises(ValidationError) as exc_info:
assert v.validate_python({'x': 'pika'})
assert exc_info.value.errors(include_url=False) == [
{'type': 'missing', 'loc': ('y',), 'msg': 'Field required', 'input': {'x': 'pika'}}
]
def test_fields_required_by_default_with_optional():
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'x': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'y': core_schema.typed_dict_field(schema=core_schema.str_schema(), required=False),
}
)
)
assert v.validate_python({'x': 'pika', 'y': 'chu'}) == {'x': 'pika', 'y': 'chu'}
assert v.validate_python({'x': 'pika'}) == {'x': 'pika'}
def test_fields_required_by_default_with_default():
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'x': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'y': core_schema.typed_dict_field(
schema=core_schema.with_default_schema(schema=core_schema.str_schema(), default='bulbi')
),
}
)
)
assert v.validate_python({'x': 'pika', 'y': 'chu'}) == {'x': 'pika', 'y': 'chu'}
assert v.validate_python({'x': 'pika'}) == {'x': 'pika', 'y': 'bulbi'}
def test_all_optional_fields():
"""By default all fields should be optional if `total` is set to `False`"""
v = SchemaValidator(
core_schema.typed_dict_schema(
total=False,
fields={
'x': core_schema.typed_dict_field(schema=core_schema.str_schema(strict=True)),
'y': core_schema.typed_dict_field(schema=core_schema.str_schema()),
},
)
)
assert v.validate_python({'x': 'pika', 'y': 'chu'}) == {'x': 'pika', 'y': 'chu'}
assert v.validate_python({'x': 'pika'}) == {'x': 'pika'}
assert v.validate_python({'y': 'chu'}) == {'y': 'chu'}
with pytest.raises(ValidationError) as exc_info:
assert v.validate_python({'x': 123})
assert exc_info.value.errors(include_url=False) == [
{'type': 'string_type', 'loc': ('x',), 'msg': 'Input should be a valid string', 'input': 123}
]
def test_all_optional_fields_with_required_fields():
v = SchemaValidator(
core_schema.typed_dict_schema(
total=False,
fields={
'x': core_schema.typed_dict_field(schema=core_schema.str_schema(strict=True), required=True),
'y': core_schema.typed_dict_field(schema=core_schema.str_schema()),
},
)
)
assert v.validate_python({'x': 'pika', 'y': 'chu'}) == {'x': 'pika', 'y': 'chu'}
assert v.validate_python({'x': 'pika'}) == {'x': 'pika'}
with pytest.raises(ValidationError) as exc_info:
assert v.validate_python({'y': 'chu'}) == ({'y': 'chu'}, {'y'})
assert exc_info.value.errors(include_url=False) == [
{'type': 'missing', 'loc': ('x',), 'msg': 'Field required', 'input': {'y': 'chu'}}
]
def test_field_required_and_default():
"""A field cannot be required and have a default value"""
with pytest.raises(SchemaError, match="Field 'x': a required field cannot have a default value"):
SchemaValidator(
schema=core_schema.typed_dict_schema(
fields={
'x': core_schema.typed_dict_field(
schema=core_schema.with_default_schema(schema=core_schema.str_schema(), default='pika'),
required=True,
)
}
)
)
def test_alias(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'field_a': {'validation_alias': 'FieldA', 'type': 'typed-dict-field', 'schema': {'type': 'int'}}
},
}
)
assert v.validate_test({'FieldA': '123'}) == {'field_a': 123}
with pytest.raises(ValidationError, match=r'FieldA\n +Field required \[type=missing,'):
assert v.validate_test({'foobar': '123'})
with pytest.raises(ValidationError, match=r'FieldA\n +Field required \[type=missing,'):
assert v.validate_test({'field_a': '123'})
def test_empty_string_field_name(py_and_json: PyAndJson):
v = py_and_json({'type': 'typed-dict', 'fields': {'': {'type': 'typed-dict-field', 'schema': {'type': 'int'}}}})
assert v.validate_test({'': 123}) == {'': 123}
def test_empty_string_aliases(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'typed-dict',
'fields': {'field_a': {'validation_alias': '', 'type': 'typed-dict-field', 'schema': {'type': 'int'}}},
}
)
assert v.validate_test({'': 123}) == {'field_a': 123}
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'field_a': {'validation_alias': ['', ''], 'type': 'typed-dict-field', 'schema': {'type': 'int'}}
},
}
)
assert v.validate_test({'': {'': 123}}) == {'field_a': 123}
def test_alias_allow_pop(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'field_a': {'validation_alias': 'FieldA', 'type': 'typed-dict-field', 'schema': {'type': 'int'}}
},
'config': {'validate_by_name': True, 'validate_by_alias': True},
},
)
assert v.validate_test({'FieldA': '123'}) == {'field_a': 123}
assert v.validate_test({'field_a': '123'}) == {'field_a': 123}
assert v.validate_test({'FieldA': '1', 'field_a': '2'}) == {'field_a': 1}
with pytest.raises(ValidationError, match=r'FieldA\n +Field required \[type=missing,'):
assert v.validate_test({'foobar': '123'})
def test_only_validate_by_name(py_and_json) -> None:
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'field_a': {'validation_alias': 'FieldA', 'type': 'typed-dict-field', 'schema': {'type': 'int'}}
},
'config': {'validate_by_name': True, 'validate_by_alias': False},
}
)
assert v.validate_test({'field_a': '123'}) == {'field_a': 123}
with pytest.raises(ValidationError, match=r'field_a\n +Field required \[type=missing,'):
assert v.validate_test({'FieldA': '123'})
def test_only_allow_alias(py_and_json) -> None:
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'field_a': {'validation_alias': 'FieldA', 'type': 'typed-dict-field', 'schema': {'type': 'int'}}
},
'config': {'validate_by_name': False, 'validate_by_alias': True},
}
)
assert v.validate_test({'FieldA': '123'}) == {'field_a': 123}
with pytest.raises(ValidationError, match=r'FieldA\n +Field required \[type=missing,'):
assert v.validate_test({'field_a': '123'})
@pytest.mark.parametrize(
'input_value,expected',
[
({'foo': {'bar': '123'}}, {'field_a': 123}),
({'x': '123'}, Err(r'foo.bar\n +Field required \[type=missing,')),
({'foo': '123'}, Err(r'foo.bar\n +Field required \[type=missing,')),
({'foo': [1, 2, 3]}, Err(r'foo.bar\n +Field required \[type=missing,')),
({'foo': {'bat': '123'}}, Err(r'foo.bar\n +Field required \[type=missing,')),
],
ids=repr,
)
def test_alias_path(py_and_json: PyAndJson, input_value, expected):
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'field_a': {'validation_alias': ['foo', 'bar'], 'type': 'typed-dict-field', 'schema': {'type': 'int'}}
},
}
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=expected.message):
v.validate_test(input_value)
else:
output = v.validate_test(input_value)
assert output == expected
@pytest.mark.parametrize(
'input_value,expected',
[
({'foo': {'bar': {'bat': '123'}}}, {'field_a': 123}),
({'foo': [1, 2, 3, 4]}, {'field_a': 4}),
({'foo': (1, 2, 3, 4)}, {'field_a': 4}),
({'spam': 5}, {'field_a': 5}),
({'spam': 1, 'foo': {'bar': {'bat': 2}}}, {'field_a': 2}),
({'foo': {'x': 2}}, Err(r'field_a\n +Field required \[type=missing,')),
({'x': '123'}, Err(r'field_a\n +Field required \[type=missing,')),
({'x': {2: 33}}, Err(r'field_a\n +Field required \[type=missing,')),
({'foo': '01234'}, Err(r'field_a\n +Field required \[type=missing,')),
({'foo': [1]}, Err(r'field_a\n +Field required \[type=missing,')),
],
ids=repr,
)
def test_aliases_path_multiple(py_and_json: PyAndJson, input_value, expected):
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'field_a': {
'validation_alias': [['foo', 'bar', 'bat'], ['foo', 3], ['spam']],
'type': 'typed-dict-field',
'schema': {'type': 'int'},
}
},
'config': {'loc_by_alias': False},
}
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=expected.message):
val = v.validate_test(input_value)
print(f'UNEXPECTED OUTPUT: {val!r}')
else:
output = v.validate_test(input_value)
assert output == expected
@pytest.mark.parametrize(
'input_value,expected',
[
({'foo': {-2: '123'}}, {'field_a': 123}),
# negatives indexes work fine
({'foo': [1, 42, 'xx']}, {'field_a': 42}),
({'foo': [42, 'xxx', 42]}, Err(r'Input should be a valid integer,')),
({'foo': [42]}, Err(r'field_a\n +Field required \[type=missing,')),
({'foo': {'xx': '123'}}, Err(r'field_a\n +Field required \[type=missing,')),
({'foo': {'-2': '123'}}, Err(r'field_a\n +Field required \[type=missing,')),
({'foo': {2: '123'}}, Err(r'field_a\n +Field required \[type=missing,')),
({'foo': 'foobar'}, Err(r'field_a\n +Field required \[type=missing,')),
({'foo': {0, 1, 2}}, Err(r'field_a\n +Field required \[type=missing,')),
],
ids=repr,
)
def test_aliases_path_negative(input_value, expected):
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'field_a': core_schema.typed_dict_field(validation_alias=['foo', -2], schema=core_schema.int_schema())
},
config=CoreConfig(loc_by_alias=False),
)
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=expected.message):
val = v.validate_python(input_value)
print(f'UNEXPECTED OUTPUT: {val!r}')
else:
output = v.validate_python(input_value)
assert output == expected
@pytest.mark.parametrize(
'input_value,expected',
[
({'foo': [1, 42, 'xx']}, {'field_a': 42}),
({'foo': [42, 'xxx', 42]}, Err(r'Input should be a valid integer,')),
({'foo': [42]}, Err(r'foo.-2\n +Field required \[type=missing,')),
],
ids=repr,
)
def test_aliases_path_negative_json(py_and_json: PyAndJson, input_value, expected):
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'field_a': {'validation_alias': ['foo', -2], 'type': 'typed-dict-field', 'schema': {'type': 'int'}}
},
}
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=expected.message):
val = v.validate_test(input_value)
print(f'UNEXPECTED OUTPUT: {val!r}')
else:
output = v.validate_test(input_value)
assert output == expected
def test_aliases_debug():
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'field_a': core_schema.typed_dict_field(
validation_alias=[['foo', 'bar', 'bat'], ['foo', 3]], schema=core_schema.int_schema()
)
}
)
)
print(repr(v))
assert repr(v).startswith('SchemaValidator(title="typed-dict", validator=TypedDict(')
# check that aliases with non-empty "rest" are present, i.e. non-trivial paths
assert 'rest: [\n' in repr(v)
def get_int_key():
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'field_a': core_schema.typed_dict_field(
validation_alias=[['foo', 3], ['spam']], schema=core_schema.int_schema()
)
}
)
)
assert v.validate_python({'foo': {3: 33}}) == ({'field_a': 33}, {'field_a'})
class GetItemThing:
def __getitem__(self, v):
assert v == 'foo'
return 321
def get_custom_getitem():
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={'field_a': core_schema.typed_dict_field(validation_alias=['foo'], schema=core_schema.int_schema())}
)
)
assert v.validate_python(GetItemThing()) == ({'field_a': 321}, {'field_a'})
assert v.validate_python({'bar': GetItemThing()}) == ({'field_a': 321}, {'field_a'})
@pytest.mark.parametrize('input_value', [{'foo': {'bar': 42}}, {'foo': 42}, {'field_a': 42}], ids=repr)
def test_paths_allow_by_name(py_and_json: PyAndJson, input_value):
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'field_a': {
'validation_alias': [['foo', 'bar'], ['foo']],
'type': 'typed-dict-field',
'schema': {'type': 'int'},
}
},
'config': {'validate_by_name': True},
},
)
assert v.validate_test(input_value) == {'field_a': 42}
@pytest.mark.parametrize(
'alias_schema,error',
[
({'validation_alias': []}, 'Lookup paths should have at least one element'),
({'validation_alias': [[]]}, 'Each alias path should have at least one element'),
({'validation_alias': [123]}, "TypeError: 'int' object is not an instance of 'list'"),
({'validation_alias': [[1, 'foo']]}, 'TypeError: The first item in an alias path should be a string'),
],
ids=repr,
)
def test_alias_build_error(alias_schema, error):
with pytest.raises(SchemaError, match=error):
SchemaValidator(
schema={
'type': 'typed-dict',
'fields': {'field_a': {'type': 'typed-dict-field', 'schema': {'type': 'int'}, **alias_schema}},
}
)
def test_alias_error_loc_alias(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'field_a': {
'type': 'typed-dict-field',
'schema': {'type': 'int'},
'validation_alias': [['foo', 'x'], ['bar', 1, -1]],
}
},
},
{'loc_by_alias': True}, # this is the default
)
assert v.validate_test({'foo': {'x': 42}}) == {'field_a': 42}
assert v.validate_python({'bar': ['x', {-1: 42}]}) == {'field_a': 42}
assert v.validate_test({'bar': ['x', [1, 2, 42]]}) == {'field_a': 42}
with pytest.raises(ValidationError) as exc_info:
v.validate_test({'foo': {'x': 'not_int'}})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('foo', 'x'),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'not_int',
}
]
with pytest.raises(ValidationError) as exc_info:
v.validate_test({'bar': ['x', [1, 2, 'not_int']]})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('bar', 1, -1),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'not_int',
}
]
with pytest.raises(ValidationError) as exc_info:
v.validate_test({})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'missing', 'loc': ('foo', 'x'), 'msg': 'Field required', 'input': {}}
]
def test_alias_error_loc_field_names(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'field_a': {
'type': 'typed-dict-field',
'schema': {'type': 'int'},
'validation_alias': [['foo'], ['bar', 1, -1]],
}
},
'config': {'loc_by_alias': False},
}
)
assert v.validate_test({'foo': 42}) == {'field_a': 42}
assert v.validate_test({'bar': ['x', [1, 2, 42]]}) == {'field_a': 42}
with pytest.raises(ValidationError) as exc_info:
v.validate_test({'foo': 'not_int'})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('field_a',),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'not_int',
}
]
with pytest.raises(ValidationError) as exc_info:
v.validate_test({'bar': ['x', [1, 2, 'not_int']]})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('field_a',),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'not_int',
}
]
with pytest.raises(ValidationError) as exc_info:
v.validate_test({})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'missing', 'loc': ('field_a',), 'msg': 'Field required', 'input': {}}
]
def test_empty_model():
v = SchemaValidator(core_schema.typed_dict_schema(fields={}))
assert v.validate_python({}) == {}
with pytest.raises(ValidationError, match=re.escape('Input should be a valid dictionary [type=dict_type,')):
v.validate_python('x')
def test_model_deep():
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'field_a': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'field_b': core_schema.typed_dict_field(
schema=core_schema.typed_dict_schema(
fields={
'field_c': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'field_d': core_schema.typed_dict_field(
schema=core_schema.typed_dict_schema(
fields={
'field_e': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'field_f': core_schema.typed_dict_field(schema=core_schema.int_schema()),
}
)
),
}
)
),
}
)
)
output = v.validate_python({'field_a': '1', 'field_b': {'field_c': '2', 'field_d': {'field_e': '4', 'field_f': 4}}})
assert output == {'field_a': '1', 'field_b': ({'field_c': '2', 'field_d': {'field_e': '4', 'field_f': 4}})}
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'field_a': '1', 'field_b': {'field_c': '2', 'field_d': {'field_e': '4', 'field_f': 'xx'}}})
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('field_b', 'field_d', 'field_f'),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'xx',
}
]
def test_alias_extra(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'typed-dict',
'extra_behavior': 'allow',
'fields': {
'field_a': {
'validation_alias': [['FieldA'], ['foo', 2]],
'type': 'typed-dict-field',
'schema': {'type': 'int'},
}
},
'config': {'loc_by_alias': False},
}
)
assert v.validate_test({'FieldA': 1}) == {'field_a': 1}
assert v.validate_test({'foo': [1, 2, 3]}) == {'field_a': 3}
# used_keys should be populated either though validation fails so "FieldA" is skipped in extra
with pytest.raises(ValidationError) as exc_info:
assert v.validate_test({'FieldA': '...'}) == {'field_a': 1}
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('field_a',),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': '...',
}
]
def test_alias_extra_by_name(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'typed-dict',
'extra_behavior': 'allow',
'fields': {
'field_a': {'validation_alias': 'FieldA', 'type': 'typed-dict-field', 'schema': {'type': 'int'}}
},
'config': {'validate_by_name': True},
},
)
assert v.validate_test({'FieldA': 1}) == {'field_a': 1}
assert v.validate_test({'field_a': 1}) == {'field_a': 1}
def test_alias_extra_forbid(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'typed-dict',
'extra_behavior': 'forbid',
'fields': {
'field_a': {'type': 'typed-dict-field', 'validation_alias': 'FieldA', 'schema': {'type': 'int'}}
},
}
)
assert v.validate_test({'FieldA': 1}) == {'field_a': 1}
def test_with_default_factory():
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'x': core_schema.typed_dict_field(
schema=core_schema.with_default_schema(
schema=core_schema.str_schema(), default_factory=lambda: 'pikachu'
)
)
}
)
)
assert v.validate_python({}) == {'x': 'pikachu'}
assert v.validate_python({'x': 'bulbi'}) == {'x': 'bulbi'}
def test_field_required_and_default_factory():
"""A field cannot be required and have a default factory"""
with pytest.raises(SchemaError, match="Field 'x': a required field cannot have a default value"):
SchemaValidator(
schema=core_schema.typed_dict_schema(
fields={
'x': core_schema.typed_dict_field(
schema=core_schema.with_default_schema(
schema=core_schema.str_schema(), default_factory=lambda: 'pika'
),
required=True,
)
}
)
)
@pytest.mark.parametrize(
'default_factory,error_message',
[
(lambda: 1 + 'a', "unsupported operand type(s) for +: 'int' and 'str'"),
(lambda x: 'a' + x, "<lambda>() missing 1 required positional argument: 'x'"),
],
)
def test_bad_default_factory(default_factory, error_message):
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'x': core_schema.typed_dict_field(
schema=core_schema.with_default_schema(
schema=core_schema.str_schema(), default_factory=default_factory
)
)
}
)
)
with pytest.raises(TypeError, match=re.escape(error_message)):
v.validate_python({})
class TestOnError:
def test_on_error_bad_omit(self):
with pytest.raises(SchemaError, match="Field 'x': 'on_error = omit' cannot be set for required fields"):
SchemaValidator(
schema=core_schema.typed_dict_schema(
fields={
'x': core_schema.typed_dict_field(
schema=core_schema.with_default_schema(schema=core_schema.str_schema(), on_error='omit')
)
}
)
)
def test_on_error_bad_default(self):
with pytest.raises(SchemaError, match="'on_error = default' requires a `default` or `default_factory`"):
SchemaValidator(
schema=core_schema.typed_dict_schema(
fields={
'x': core_schema.typed_dict_field(
schema=core_schema.with_default_schema(schema=core_schema.str_schema(), on_error='default')
)
}
)
)
def test_on_error_raise_by_default(self, py_and_json: PyAndJson):
v = py_and_json(
{'type': 'typed-dict', 'fields': {'x': {'type': 'typed-dict-field', 'schema': {'type': 'str'}}}}
)
assert v.validate_test({'x': 'foo'}) == {'x': 'foo'}
with pytest.raises(ValidationError) as exc_info:
v.validate_test({'x': ['foo']})
assert exc_info.value.errors(include_url=False) == [
{'input': ['foo'], 'type': 'string_type', 'loc': ('x',), 'msg': 'Input should be a valid string'}
]
def test_on_error_raise_explicit(self, py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'x': {
'type': 'typed-dict-field',
'schema': {'type': 'default', 'schema': {'type': 'str'}, 'on_error': 'raise'},
}
},
}
)
assert v.validate_test({'x': 'foo'}) == {'x': 'foo'}
with pytest.raises(ValidationError) as exc_info:
v.validate_test({'x': ['foo']})
assert exc_info.value.errors(include_url=False) == [
{'input': ['foo'], 'type': 'string_type', 'loc': ('x',), 'msg': 'Input should be a valid string'}
]
def test_on_error_omit(self, py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'x': {
'type': 'typed-dict-field',
'schema': {'type': 'default', 'schema': {'type': 'str'}, 'on_error': 'omit'},
'required': False,
}
},
}
)
assert v.validate_test({'x': 'foo'}) == {'x': 'foo'}
assert v.validate_test({}) == {}
assert v.validate_test({'x': ['foo']}) == {}
def test_on_error_omit_with_default(self, py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'x': {
'type': 'typed-dict-field',
'schema': {'type': 'default', 'schema': {'type': 'str'}, 'on_error': 'omit', 'default': 'pika'},
'required': False,
}
},
}
)
assert v.validate_test({'x': 'foo'}) == {'x': 'foo'}
assert v.validate_test({}) == {'x': 'pika'}
assert v.validate_test({'x': ['foo']}) == {}
def test_on_error_default(self, py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'x': {
'type': 'typed-dict-field',
'schema': {
'type': 'default',
'schema': {'type': 'str'},
'on_error': 'default',
'default': 'pika',
},
}
},
}
)
assert v.validate_test({'x': 'foo'}) == {'x': 'foo'}
assert v.validate_test({'x': ['foo']}) == {'x': 'pika'}
def test_on_error_default_factory(self, py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'x': {
'type': 'typed-dict-field',
'schema': {
'type': 'default',
'schema': {'type': 'str'},
'on_error': 'default',
'default_factory': lambda: 'pika',
},
}
},
}
)
assert v.validate_test({'x': 'foo'}) == {'x': 'foo'}
assert v.validate_test({'x': ['foo']}) == {'x': 'pika'}
def test_wrap_on_error(self, py_and_json: PyAndJson):
def wrap_function(input_value, validator, info):
try:
return validator(input_value)
except ValidationError:
if isinstance(input_value, list):
return str(len(input_value))
else:
return repr(input_value)
v = py_and_json(
{
'type': 'typed-dict',
'fields': {
'x': {
'type': 'typed-dict-field',
'schema': {
'type': 'default',
'on_error': 'raise',
'schema': {
'type': 'function-wrap',
'function': {'type': 'with-info', 'function': wrap_function},
'schema': {'type': 'str'},
},
},
}
},
}
)
assert v.validate_test({'x': 'foo'}) == {'x': 'foo'}
assert v.validate_test({'x': ['foo']}) == {'x': '1'}
assert v.validate_test({'x': ['foo', 'bar']}) == {'x': '2'}
assert v.validate_test({'x': {'a': 'b'}}) == {'x': "{'a': 'b'}"}
@pytest.mark.parametrize(
'config,schema_extra_behavior_kw',
[
(core_schema.CoreConfig(extra_fields_behavior='allow'), {}),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {'extra_behavior': None}),
(core_schema.CoreConfig(), {'extra_behavior': 'allow'}),
(None, {'extra_behavior': 'allow'}),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': 'allow'}),
],
)
@pytest.mark.parametrize(
'extras_schema_kw, expected_extra_value',
[({}, '123'), ({'extras_schema': None}, '123'), ({'extras_schema': core_schema.int_schema()}, 123)],
ids=['extras_schema=unset', 'extras_schema=None', 'extras_schema=int'],
)
def test_extra_behavior_allow(
config: Union[core_schema.CoreConfig, None],
schema_extra_behavior_kw: dict[str, Any],
extras_schema_kw: dict[str, Any],
expected_extra_value: Any,
):
v = SchemaValidator(
core_schema.typed_dict_schema(
{'f': core_schema.typed_dict_field(core_schema.str_schema())},
**schema_extra_behavior_kw,
**extras_schema_kw,
config=config,
)
)
m: dict[str, Any] = v.validate_python({'f': 'x', 'extra_field': '123'})
assert m == {'f': 'x', 'extra_field': expected_extra_value}
# We can't test the extra parameter of the validate_* functions above, since the
# extras_schema parameter isn't valid unless the models are configured with extra='allow'.
# Test the validate_* extra parameter separately here instead:
@pytest.mark.parametrize(
'config,schema_extra_behavior_kw',
[
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {}),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': None}),
(core_schema.CoreConfig(), {'extra_behavior': 'forbid'}),
(None, {'extra_behavior': 'forbid'}),
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {'extra_behavior': 'forbid'}),
(core_schema.CoreConfig(), {}),
(core_schema.CoreConfig(), {'extra_behavior': None}),
(None, {'extra_behavior': None}),
],
)
def test_extra_behavior_allow_with_validate_fn_override(
config: Union[core_schema.CoreConfig, None],
schema_extra_behavior_kw: dict[str, Any],
):
v = SchemaValidator(
core_schema.typed_dict_schema(
{'f': core_schema.typed_dict_field(core_schema.str_schema())},
**schema_extra_behavior_kw,
config=config,
)
)
m: dict[str, Any] = v.validate_python({'f': 'x', 'extra_field': '123'}, extra='allow')
assert m == {'f': 'x', 'extra_field': '123'}
@pytest.mark.parametrize(
'config,schema_extra_behavior_kw,validate_fn_extra_kw',
[
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {}, None),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': None}, None),
(core_schema.CoreConfig(), {'extra_behavior': 'forbid'}, None),
(None, {'extra_behavior': 'forbid'}, None),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {'extra_behavior': 'forbid'}, None),
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {}, 'forbid'),
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {'extra_behavior': None}, 'forbid'),
(core_schema.CoreConfig(), {'extra_behavior': 'ignore'}, 'forbid'),
(None, {'extra_behavior': 'ignore'}, 'forbid'),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {'extra_behavior': 'ignore'}, 'forbid'),
(core_schema.CoreConfig(), {}, 'forbid'),
(core_schema.CoreConfig(), {'extra_behavior': None}, 'forbid'),
(None, {'extra_behavior': None}, 'forbid'),
],
)
def test_extra_behavior_forbid(
config: Union[core_schema.CoreConfig, None],
schema_extra_behavior_kw: dict[str, Any],
validate_fn_extra_kw: Union[ExtraBehavior, None],
):
v = SchemaValidator(
core_schema.typed_dict_schema(
{'f': core_schema.typed_dict_field(core_schema.str_schema())},
**schema_extra_behavior_kw,
config=config,
)
)
m: dict[str, Any] = v.validate_python({'f': 'x'}, extra=validate_fn_extra_kw)
assert m == {'f': 'x'}
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'f': 'x', 'extra_field': 123}, extra=validate_fn_extra_kw)
assert exc_info.value.errors(include_url=False) == [
{'type': 'extra_forbidden', 'loc': ('extra_field',), 'msg': 'Extra inputs are not permitted', 'input': 123}
]
@pytest.mark.parametrize(
'config,schema_extra_behavior_kw,validate_fn_extra_kw',
[
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {}, None),
(core_schema.CoreConfig(), {'extra_behavior': 'ignore'}, None),
(None, {'extra_behavior': 'ignore'}, None),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': 'ignore'}, None),
(core_schema.CoreConfig(), {}, None),
(core_schema.CoreConfig(), {'extra_behavior': None}, None),
(None, {'extra_behavior': None}, None),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {}, 'ignore'),
(core_schema.CoreConfig(), {'extra_behavior': 'allow'}, 'ignore'),
(None, {'extra_behavior': 'allow'}, 'ignore'),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': 'allow'}, 'ignore'),
],
)
def test_extra_behavior_ignore(
config: Union[core_schema.CoreConfig, None],
schema_extra_behavior_kw: dict[str, Any],
validate_fn_extra_kw: Union[ExtraBehavior, None],
):
v = SchemaValidator(
core_schema.typed_dict_schema(
{'f': core_schema.typed_dict_field(core_schema.str_schema())}, **schema_extra_behavior_kw
),
config=config,
)
m: dict[str, Any] = v.validate_python({'f': 'x', 'extra_field': 123}, extra=validate_fn_extra_kw)
assert m == {'f': 'x'}
@pytest.mark.xfail(
condition=platform.python_implementation() == 'PyPy', reason='https://foss.heptapod.net/pypy/pypy/-/issues/3899'
)
@pytest.mark.skipif(platform.python_implementation() == 'GraalVM', reason='Cannot reliably trigger GC on GraalPy')
def test_leak_typed_dict():
def fn():
def validate(v, info):
return v
schema = core_schema.with_info_plain_validator_function(validate)
schema = core_schema.typed_dict_schema(
{'f': core_schema.typed_dict_field(schema)}, extra_behavior='allow', extras_schema=schema
)
# If any of the Rust validators don't implement traversal properly,
# there will be an undetectable cycle created by this assignment
# which will keep Defaulted alive
validate.__pydantic_validator__ = SchemaValidator(schema)
return validate
cycle = fn()
ref = weakref.ref(cycle)
assert ref() is not None
del cycle
assert_gc(lambda: ref() is None)
@pytest.mark.parametrize('config_by_alias', [None, True, False])
@pytest.mark.parametrize('config_by_name', [None, True, False])
@pytest.mark.parametrize('runtime_by_alias', [None, True, False])
@pytest.mark.parametrize('runtime_by_name', [None, True, False])
def test_by_alias_and_name_config_interaction(
config_by_alias: Union[bool, None],
config_by_name: Union[bool, None],
runtime_by_alias: Union[bool, None],
runtime_by_name: Union[bool, None],
) -> None:
"""This test reflects the priority that applies for config vs runtime validation alias configuration.
Runtime values take precedence over config values, when set.
By default, by_alias is True and by_name is False.
"""
if config_by_alias is False and config_by_name is False and runtime_by_alias is False and runtime_by_name is False:
pytest.skip("Can't have both by_alias and by_name as effectively False")
core_config = {
**({'validate_by_alias': config_by_alias} if config_by_alias is not None else {}),
**({'validate_by_name': config_by_name} if config_by_name is not None else {}),
}
schema = core_schema.typed_dict_schema(
fields={
'my_field': core_schema.typed_dict_field(schema=core_schema.int_schema(), validation_alias='my_alias'),
},
config=core_schema.CoreConfig(**core_config),
)
s = SchemaValidator(schema)
alias_allowed = next(x for x in (runtime_by_alias, config_by_alias, True) if x is not None)
name_allowed = next(x for x in (runtime_by_name, config_by_name, False) if x is not None)
if alias_allowed:
assert s.validate_python({'my_alias': 1}, by_alias=runtime_by_alias, by_name=runtime_by_name) == {'my_field': 1}
if name_allowed:
assert s.validate_python({'my_field': 1}, by_alias=runtime_by_alias, by_name=runtime_by_name) == {'my_field': 1}
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_typed_dict.py",
"license": "MIT License",
"lines": 1118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_union.py | import platform
import sys
from dataclasses import dataclass
from datetime import date, time
from enum import Enum, IntEnum
from itertools import permutations
from typing import Any, Optional, Union
from uuid import UUID
import pytest
from dirty_equals import IsFloat, IsInt
from pydantic_core import CoreConfig, SchemaError, SchemaValidator, ValidationError, core_schema
from ..conftest import plain_repr
@pytest.mark.parametrize(
'input_value,expected_value',
[
(True, True),
(False, False),
('true', True),
('false', False),
(1, 1),
(0, 0),
(123, 123),
('123', 123),
('0', False), # this case is different depending on the order of the choices
('1', True), # this case is different depending on the order of the choices
],
)
def test_union_bool_int(input_value, expected_value):
v = SchemaValidator(core_schema.union_schema(choices=[core_schema.bool_schema(), core_schema.int_schema()]))
assert v.validate_python(input_value) == expected_value
@pytest.mark.parametrize(
'input_value,expected_value',
[
(True, True),
(False, False),
('true', True),
('false', False),
(1, 1),
(0, 0),
(123, 123),
('123', 123),
('0', 0), # this case is different depending on the order of the choices
('1', 1), # this case is different depending on the order of the choices
],
)
def test_union_int_bool(input_value, expected_value):
v = SchemaValidator(core_schema.union_schema(choices=[core_schema.int_schema(), core_schema.bool_schema()]))
assert v.validate_python(input_value) == expected_value
class TestModelClass:
class ModelA:
pass
class ModelB:
pass
@pytest.fixture(scope='class')
def schema_validator(self) -> SchemaValidator:
return SchemaValidator(
schema=core_schema.union_schema(
choices=[
core_schema.model_schema(
cls=self.ModelA,
schema=core_schema.model_fields_schema(
fields={
'a': core_schema.model_field(schema=core_schema.int_schema()),
'b': core_schema.model_field(schema=core_schema.str_schema()),
}
),
),
core_schema.model_schema(
cls=self.ModelB,
schema=core_schema.model_fields_schema(
fields={
'c': core_schema.model_field(schema=core_schema.int_schema()),
'd': core_schema.model_field(schema=core_schema.str_schema()),
}
),
),
]
)
)
def test_model_a(self, schema_validator: SchemaValidator):
m_a = schema_validator.validate_python({'a': 1, 'b': 'hello'})
assert isinstance(m_a, self.ModelA)
assert m_a.a == 1
assert m_a.b == 'hello'
def test_model_b(self, schema_validator: SchemaValidator):
m_b = schema_validator.validate_python({'c': 2, 'd': 'again'})
assert isinstance(m_b, self.ModelB)
assert m_b.c == 2
assert m_b.d == 'again'
def test_exact_check(self, schema_validator: SchemaValidator):
m_b = schema_validator.validate_python({'c': 2, 'd': 'again'})
assert isinstance(m_b, self.ModelB)
m_b2 = schema_validator.validate_python(m_b)
assert m_b2 is m_b
def test_error(self, schema_validator: SchemaValidator):
with pytest.raises(ValidationError) as exc_info:
schema_validator.validate_python({'a': 2})
assert exc_info.value.errors(include_url=False) == [
{'type': 'missing', 'loc': ('ModelA', 'b'), 'msg': 'Field required', 'input': {'a': 2}},
{'type': 'missing', 'loc': ('ModelB', 'c'), 'msg': 'Field required', 'input': {'a': 2}},
{'type': 'missing', 'loc': ('ModelB', 'd'), 'msg': 'Field required', 'input': {'a': 2}},
]
class TestModelClassSimilar:
class ModelA:
pass
class ModelB:
pass
@pytest.fixture(scope='class')
def schema_validator(self) -> SchemaValidator:
return SchemaValidator(
schema=core_schema.union_schema(
choices=[
core_schema.model_schema(
cls=self.ModelA,
schema=core_schema.model_fields_schema(
fields={
'a': core_schema.model_field(schema=core_schema.int_schema()),
'b': core_schema.model_field(schema=core_schema.str_schema()),
}
),
),
core_schema.model_schema(
cls=self.ModelB,
schema=core_schema.model_fields_schema(
fields={
'a': core_schema.model_field(schema=core_schema.int_schema()),
'b': core_schema.model_field(schema=core_schema.str_schema()),
'c': core_schema.model_field(
schema=core_schema.with_default_schema(
schema=core_schema.float_schema(), default=1.0
)
),
}
),
),
]
)
)
def test_model_a(self, schema_validator: SchemaValidator):
m = schema_validator.validate_python({'a': 1, 'b': 'hello'})
assert isinstance(m, self.ModelA)
assert m.a == 1
assert m.b == 'hello'
assert not hasattr(m, 'c')
def test_model_b_preferred(self, schema_validator: SchemaValidator):
# Note, this is a different behavior to previous smart union behavior,
# where the first match would be preferred. However, we believe is it better
# to prefer the match with the greatest number of valid fields set.
m = schema_validator.validate_python({'a': 1, 'b': 'hello', 'c': 2.0})
assert isinstance(m, self.ModelB)
assert m.a == 1
assert m.b == 'hello'
assert m.c == 2.0
def test_model_b_not_ignored(self, schema_validator: SchemaValidator):
m1 = self.ModelB()
m1.a = 1
m1.b = 'hello'
m1.c = 2.0
m2 = schema_validator.validate_python(m1)
assert isinstance(m2, self.ModelB)
assert m2.a == 1
assert m2.b == 'hello'
assert m2.c == 2.0
def test_nullable_via_union():
v = SchemaValidator(core_schema.union_schema(choices=[core_schema.none_schema(), core_schema.int_schema()]))
assert v.validate_python(None) is None
assert v.validate_python(1) == 1
with pytest.raises(ValidationError) as exc_info:
v.validate_python('hello')
assert exc_info.value.errors(include_url=False) == [
{'type': 'none_required', 'loc': ('none',), 'msg': 'Input should be None', 'input': 'hello'},
{
'type': 'int_parsing',
'loc': ('int',),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'hello',
},
]
def test_union_list_bool_int():
v = SchemaValidator(
core_schema.union_schema(
choices=[
core_schema.list_schema(items_schema=core_schema.bool_schema()),
core_schema.list_schema(items_schema=core_schema.int_schema()),
]
)
)
assert v.validate_python(['true', True, 'no']) == [True, True, False]
assert v.validate_python([5, 6, '789']) == [5, 6, 789]
assert v.validate_python(['1', '0']) == [1, 0]
with pytest.raises(ValidationError) as exc_info:
v.validate_python([3, 'true'])
assert exc_info.value.errors(include_url=False) == [
{
'type': 'bool_parsing',
'loc': ('list[bool]', 0),
'msg': 'Input should be a valid boolean, unable to interpret input',
'input': 3,
},
{
'type': 'int_parsing',
'loc': ('list[int]', 1),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'true',
},
]
@pytest.mark.xfail(
platform.python_implementation() == 'PyPy' and sys.version_info[:2] == (3, 11), reason='pypy 3.11 type formatting'
)
def test_empty_choices():
msg = r'Error building "union" validator:\s+SchemaError: One or more union choices required'
with pytest.raises(SchemaError, match=msg):
SchemaValidator(core_schema.union_schema(choices=[]))
def test_one_choice():
v = SchemaValidator(core_schema.union_schema(choices=[core_schema.str_schema()]))
assert (
plain_repr(v)
== 'SchemaValidator(title="str",validator=Str(StrValidator{strict:false,coerce_numbers_to_str:false}),definitions=[],cache_strings=True)'
)
assert v.validate_python('hello') == 'hello'
def test_strict_union_flag() -> None:
v = SchemaValidator(core_schema.union_schema(choices=[core_schema.bool_schema(), core_schema.int_schema()]))
assert v.validate_python(1, strict=True) == 1
assert v.validate_python(123, strict=True) == 123
with pytest.raises(ValidationError) as exc_info:
v.validate_python('123', strict=True)
assert exc_info.value.errors(include_url=False) == [
{'type': 'bool_type', 'loc': ('bool',), 'msg': 'Input should be a valid boolean', 'input': '123'},
{'type': 'int_type', 'loc': ('int',), 'msg': 'Input should be a valid integer', 'input': '123'},
]
def test_strict_union_config_level() -> None:
v = SchemaValidator(
core_schema.union_schema(choices=[core_schema.bool_schema(), core_schema.int_schema()]),
config=CoreConfig(strict=True),
)
assert v.validate_python(1) == 1
assert v.validate_python(123) == 123
with pytest.raises(ValidationError) as exc_info:
v.validate_python('123')
assert exc_info.value.errors(include_url=False) == [
{'type': 'bool_type', 'loc': ('bool',), 'msg': 'Input should be a valid boolean', 'input': '123'},
{'type': 'int_type', 'loc': ('int',), 'msg': 'Input should be a valid integer', 'input': '123'},
]
def test_strict_union_member_level() -> None:
v = SchemaValidator(
core_schema.union_schema(choices=[core_schema.bool_schema(strict=True), core_schema.int_schema(strict=True)])
)
assert v.validate_python(1) == 1
assert v.validate_python(123) == 123
with pytest.raises(ValidationError) as exc_info:
v.validate_python('123')
assert exc_info.value.errors(include_url=False) == [
{'type': 'bool_type', 'loc': ('bool',), 'msg': 'Input should be a valid boolean', 'input': '123'},
{'type': 'int_type', 'loc': ('int',), 'msg': 'Input should be a valid integer', 'input': '123'},
]
def test_custom_error():
v = SchemaValidator(
core_schema.union_schema(
choices=[core_schema.str_schema(), core_schema.bytes_schema()],
custom_error_type='my_error',
custom_error_message='Input should be a string or bytes',
)
)
assert v.validate_python('hello') == 'hello'
assert v.validate_python(b'hello') == b'hello'
with pytest.raises(ValidationError) as exc_info:
v.validate_python(123)
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'my_error', 'loc': (), 'msg': 'Input should be a string or bytes', 'input': 123}
]
def test_custom_error_type():
v = SchemaValidator(
core_schema.union_schema(
choices=[core_schema.str_schema(), core_schema.bytes_schema()], custom_error_type='string_type'
)
)
assert v.validate_python('hello') == 'hello'
assert v.validate_python(b'hello') == b'hello'
with pytest.raises(ValidationError) as exc_info:
v.validate_python(123)
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'string_type', 'loc': (), 'msg': 'Input should be a valid string', 'input': 123}
]
def test_custom_error_type_context():
v = SchemaValidator(
core_schema.union_schema(
choices=[core_schema.str_schema(), core_schema.bytes_schema()],
custom_error_type='less_than',
custom_error_context={'lt': 42},
)
)
assert v.validate_python('hello') == 'hello'
assert v.validate_python(b'hello') == b'hello'
with pytest.raises(ValidationError) as exc_info:
v.validate_python(123)
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'less_than', 'loc': (), 'msg': 'Input should be less than 42', 'input': 123, 'ctx': {'lt': 42.0}}
]
def test_dirty_behaviour():
"""
Check dirty-equals does what we expect.
"""
assert 1 == IsInt(approx=1, delta=0)
assert 1.0 != IsInt(approx=1, delta=0)
assert 1 != IsFloat(approx=1, delta=0)
assert 1.0 == IsFloat(approx=1, delta=0)
def test_int_float():
v = SchemaValidator(core_schema.union_schema([core_schema.int_schema(), core_schema.float_schema()]))
assert v.validate_python(1) == IsInt(approx=1, delta=0)
assert v.validate_json('1') == IsInt(approx=1, delta=0)
assert v.validate_python(1.0) == IsFloat(approx=1, delta=0)
assert v.validate_json('1.0') == IsFloat(approx=1, delta=0)
v = SchemaValidator(core_schema.union_schema([core_schema.float_schema(), core_schema.int_schema()]))
assert v.validate_python(1) == IsInt(approx=1, delta=0)
assert v.validate_json('1') == IsInt(approx=1, delta=0)
assert v.validate_python(1.0) == IsFloat(approx=1, delta=0)
assert v.validate_json('1.0') == IsFloat(approx=1, delta=0)
def test_str_float():
v = SchemaValidator(core_schema.union_schema([core_schema.str_schema(), core_schema.float_schema()]))
assert v.validate_python(1) == IsFloat(approx=1, delta=0)
assert v.validate_json('1') == IsFloat(approx=1, delta=0)
assert v.validate_python(1.0) == IsFloat(approx=1, delta=0)
assert v.validate_json('1.0') == IsFloat(approx=1, delta=0)
assert v.validate_python('1.0') == '1.0'
assert v.validate_python('1') == '1'
assert v.validate_json('"1.0"') == '1.0'
assert v.validate_json('"1"') == '1'
v = SchemaValidator(core_schema.union_schema([core_schema.float_schema(), core_schema.str_schema()]))
assert v.validate_python(1) == IsFloat(approx=1, delta=0)
assert v.validate_json('1') == IsFloat(approx=1, delta=0)
assert v.validate_python(1.0) == IsFloat(approx=1, delta=0)
assert v.validate_json('1.0') == IsFloat(approx=1, delta=0)
assert v.validate_python('1.0') == '1.0'
assert v.validate_python('1') == '1'
assert v.validate_json('"1.0"') == '1.0'
assert v.validate_json('"1"') == '1'
def test_no_strict_check():
v = SchemaValidator(core_schema.union_schema([core_schema.is_instance_schema(int), core_schema.json_schema()]))
assert v.validate_python(123) == 123
assert v.validate_python('[1, 2, 3]') == [1, 2, 3]
def test_strict_reference():
v = SchemaValidator(
core_schema.definitions_schema(
core_schema.definition_reference_schema(schema_ref='tuple-ref'),
[
core_schema.tuple_positional_schema(
[
core_schema.float_schema(),
core_schema.union_schema(
[core_schema.int_schema(), core_schema.definition_reference_schema('tuple-ref')]
),
],
ref='tuple-ref',
)
],
)
)
assert repr(v.validate_python((1, 2))) == '(1.0, 2)'
assert repr(v.validate_python((1.0, (2.0, 3)))) == '(1.0, (2.0, 3))'
def test_case_labels():
v = SchemaValidator(
core_schema.union_schema(
choices=[core_schema.none_schema(), ({'type': 'int'}, 'my_label'), core_schema.str_schema()]
)
)
assert v.validate_python(None) is None
assert v.validate_python(1) == 1
with pytest.raises(ValidationError, match=r'3 validation errors for union\[none,my_label,str]') as exc_info:
v.validate_python(1.5)
assert exc_info.value.errors(include_url=False) == [
{'input': 1.5, 'loc': ('none',), 'msg': 'Input should be None', 'type': 'none_required'},
{
'input': 1.5,
'loc': ('my_label',),
'msg': 'Input should be a valid integer, got a number with a fractional part',
'type': 'int_from_float',
},
{'input': 1.5, 'loc': ('str',), 'msg': 'Input should be a valid string', 'type': 'string_type'},
]
def test_left_to_right_doesnt_care_about_strict_check():
v = SchemaValidator(
core_schema.union_schema([core_schema.int_schema(), core_schema.json_schema()], mode='left_to_right')
)
assert 'strict_required' not in plain_repr(v)
assert 'ultra_strict_required' not in plain_repr(v)
def test_left_to_right_union():
choices = [core_schema.int_schema(), core_schema.float_schema()]
# smart union prefers float
v = SchemaValidator(core_schema.union_schema(choices, mode='smart'))
out = v.validate_python(1.0)
assert out == 1.0
assert isinstance(out, float)
# left_to_right union will select int
v = SchemaValidator(core_schema.union_schema(choices, mode='left_to_right'))
out = v.validate_python(1)
assert out == 1
assert isinstance(out, int)
out = v.validate_python(1.0)
assert out == 1
assert isinstance(out, int)
# reversing them will select float
v = SchemaValidator(core_schema.union_schema(list(reversed(choices)), mode='left_to_right'))
out = v.validate_python(1.0)
assert out == 1.0
assert isinstance(out, float)
out = v.validate_python(1)
assert out == 1.0
assert isinstance(out, float)
def test_left_to_right_union_strict():
choices = [core_schema.int_schema(strict=True), core_schema.float_schema(strict=True)]
# left_to_right union will select not cast if int first (strict int will not accept float)
v = SchemaValidator(core_schema.union_schema(choices, mode='left_to_right'))
out = v.validate_python(1)
assert out == 1
assert isinstance(out, int)
out = v.validate_python(1.0)
assert out == 1.0
assert isinstance(out, float)
# reversing union will select float always (as strict float will accept int)
v = SchemaValidator(
core_schema.union_schema(
list(reversed(choices)),
mode='left_to_right',
)
)
out = v.validate_python(1.0)
assert out == 1.0
assert isinstance(out, float)
out = v.validate_python(1)
assert out == 1.0
assert isinstance(out, float)
def test_union_function_before_called_once():
# See https://github.com/pydantic/pydantic/issues/6830 - in particular the
# smart union validator used to call `remove_prefix` twice, which is not
# ideal from a user perspective.
class SpecialValues(str, Enum):
DEFAULT = 'default'
OTHER = 'other'
special_values_schema = core_schema.no_info_after_validator_function(SpecialValues, core_schema.str_schema())
validator_called_count = 0
def remove_prefix(v: str):
nonlocal validator_called_count
validator_called_count += 1
if v.startswith('uuid::'):
return v[6:]
return v
prefixed_uuid_schema = core_schema.no_info_before_validator_function(remove_prefix, core_schema.uuid_schema())
v = SchemaValidator(core_schema.union_schema([special_values_schema, prefixed_uuid_schema]))
assert v.validate_python('uuid::12345678-1234-5678-1234-567812345678') == UUID(
'12345678-1234-5678-1234-567812345678'
)
assert validator_called_count == 1
@pytest.mark.parametrize(
('schema', 'input_value', 'expected_value'),
(
(
core_schema.uuid_schema(),
'12345678-1234-5678-1234-567812345678',
UUID('12345678-1234-5678-1234-567812345678'),
),
(core_schema.date_schema(), '2020-01-01', date(2020, 1, 1)),
(core_schema.time_schema(), '00:00:00', time(0, 0, 0)),
# In V2.4 these already returned strings, so we keep this behaviour in V2
(core_schema.datetime_schema(), '2020-01-01:00:00:00', '2020-01-01:00:00:00'),
(core_schema.url_schema(), 'https://foo.com', 'https://foo.com'),
(core_schema.multi_host_url_schema(), 'https://bar.com,foo.com', 'https://bar.com,foo.com'),
),
)
def test_smart_union_json_string_types(schema: core_schema.CoreSchema, input_value: str, expected_value: Any):
# Many types have to be represented in strings as JSON, we make sure that
# when parsing in JSON mode these types are preferred
# TODO: in V3 we will make str win in all these cases.
validator = SchemaValidator(core_schema.union_schema([schema, core_schema.str_schema()]))
assert validator.validate_json(f'"{input_value}"') == expected_value
# in Python mode the string will be preferred
assert validator.validate_python(input_value) == input_value
@pytest.mark.parametrize(
('schema', 'input_value'),
(
pytest.param(
core_schema.uuid_schema(),
'12345678-1234-5678-1234-567812345678',
marks=pytest.mark.xfail(reason='TODO: V3'),
),
(core_schema.date_schema(), '2020-01-01'),
(core_schema.time_schema(), '00:00:00'),
(core_schema.datetime_schema(), '2020-01-01:00:00:00'),
(core_schema.url_schema(), 'https://foo.com'),
(core_schema.multi_host_url_schema(), 'https://bar.com,foo.com'),
),
)
def test_smart_union_json_string_types_str_first(schema: core_schema.CoreSchema, input_value: str):
# As above, but reversed order; str should always win
validator = SchemaValidator(core_schema.union_schema([core_schema.str_schema(), schema]))
assert validator.validate_json(f'"{input_value}"') == input_value
assert validator.validate_python(input_value) == input_value
def test_smart_union_default_fallback():
"""Using a default value does not affect the exactness of the smart union match."""
class ModelA:
x: int
y: int = 1
class ModelB:
x: int
schema = core_schema.union_schema(
[
core_schema.model_schema(
ModelA,
core_schema.model_fields_schema(
{
'x': core_schema.model_field(core_schema.int_schema()),
'y': core_schema.model_field(
core_schema.with_default_schema(core_schema.int_schema(), default=1)
),
}
),
),
core_schema.model_schema(
ModelB, core_schema.model_fields_schema({'x': core_schema.model_field(core_schema.int_schema())})
),
]
)
validator = SchemaValidator(schema)
result = validator.validate_python({'x': 1})
assert isinstance(result, ModelA)
assert result.x == 1
assert result.y == 1
# passing a ModelB explicitly will not match the default value
b = ModelB()
assert validator.validate_python(b) is b
def test_smart_union_model_field():
class ModelA:
x: int
class ModelB:
x: str
schema = core_schema.union_schema(
[
core_schema.model_schema(
ModelA, core_schema.model_fields_schema({'x': core_schema.model_field(core_schema.int_schema())})
),
core_schema.model_schema(
ModelB, core_schema.model_fields_schema({'x': core_schema.model_field(core_schema.str_schema())})
),
]
)
validator = SchemaValidator(schema)
result = validator.validate_python({'x': 1})
assert isinstance(result, ModelA)
assert result.x == 1
result = validator.validate_python({'x': '1'})
assert isinstance(result, ModelB)
assert result.x == '1'
def test_smart_union_dataclass_field():
@dataclass
class ModelA:
x: int
@dataclass
class ModelB:
x: str
schema = core_schema.union_schema(
[
core_schema.dataclass_schema(
ModelA,
core_schema.dataclass_args_schema(
'ModelA', [core_schema.dataclass_field('x', core_schema.int_schema())]
),
['x'],
),
core_schema.dataclass_schema(
ModelB,
core_schema.dataclass_args_schema(
'ModelB', [core_schema.dataclass_field('x', core_schema.str_schema())]
),
['x'],
),
]
)
validator = SchemaValidator(schema)
result = validator.validate_python({'x': 1})
assert isinstance(result, ModelA)
assert result.x == 1
result = validator.validate_python({'x': '1'})
assert isinstance(result, ModelB)
assert result.x == '1'
def test_smart_union_with_any():
"""any is preferred over lax validations"""
# str not coerced to int
schema = core_schema.union_schema([core_schema.int_schema(), core_schema.any_schema()])
validator = SchemaValidator(schema)
assert validator.validate_python('1') == '1'
# int *is* coerced to float, this is a strict validation
schema = core_schema.union_schema([core_schema.float_schema(), core_schema.any_schema()])
validator = SchemaValidator(schema)
assert repr(validator.validate_python(1)) == '1.0'
def test_smart_union_validator_function():
"""adding a validator function should not change smart union behaviour"""
inner_schema = core_schema.union_schema([core_schema.int_schema(), core_schema.float_schema()])
validator = SchemaValidator(inner_schema)
assert repr(validator.validate_python(1)) == '1'
assert repr(validator.validate_python(1.0)) == '1.0'
schema = core_schema.union_schema(
[core_schema.no_info_after_validator_function(lambda v: v * 2, inner_schema), core_schema.str_schema()]
)
validator = SchemaValidator(schema)
assert repr(validator.validate_python(1)) == '2'
assert repr(validator.validate_python(1.0)) == '2.0'
assert validator.validate_python('1') == '1'
schema = core_schema.union_schema(
[
core_schema.no_info_wrap_validator_function(lambda v, handler: handler(v) * 2, inner_schema),
core_schema.str_schema(),
]
)
validator = SchemaValidator(schema)
assert repr(validator.validate_python(1)) == '2'
assert repr(validator.validate_python(1.0)) == '2.0'
assert validator.validate_python('1') == '1'
def test_smart_union_validator_function_one_arm():
"""adding a validator function should not change smart union behaviour"""
schema = core_schema.union_schema(
[
core_schema.float_schema(),
core_schema.no_info_after_validator_function(lambda v: v * 2, core_schema.int_schema()),
]
)
validator = SchemaValidator(schema)
assert repr(validator.validate_python(1)) == '2'
assert repr(validator.validate_python(1.0)) == '1.0'
schema = core_schema.union_schema(
[
core_schema.float_schema(),
core_schema.no_info_wrap_validator_function(lambda v, handler: handler(v) * 2, core_schema.int_schema()),
]
)
validator = SchemaValidator(schema)
assert repr(validator.validate_python(1)) == '2'
assert repr(validator.validate_python(1.0)) == '1.0'
def test_int_not_coerced_to_enum():
class BinaryEnum(IntEnum):
ZERO = 0
ONE = 1
enum_schema = core_schema.lax_or_strict_schema(
core_schema.no_info_after_validator_function(BinaryEnum, core_schema.int_schema()),
core_schema.is_instance_schema(BinaryEnum),
)
schema = core_schema.union_schema([enum_schema, core_schema.int_schema()])
validator = SchemaValidator(schema)
assert validator.validate_python(0) is not BinaryEnum.ZERO
assert validator.validate_python(1) is not BinaryEnum.ONE
assert validator.validate_python(BinaryEnum.ZERO) is BinaryEnum.ZERO
assert validator.validate_python(BinaryEnum.ONE) is BinaryEnum.ONE
def test_model_and_literal_union() -> None:
# see https://github.com/pydantic/pydantic/issues/8183
class ModelA:
pass
validator = SchemaValidator(
core_schema.union_schema(
choices=[
core_schema.model_schema(
cls=ModelA,
schema=core_schema.model_fields_schema(
fields={'a': core_schema.model_field(schema=core_schema.int_schema())}
),
),
core_schema.literal_schema(expected=[True]),
]
)
)
# validation against Literal[True] fails bc of the unhashable dict
# A ValidationError is raised, not a ValueError, which allows the validation against the union to continue
m = validator.validate_python({'a': 42})
assert isinstance(m, ModelA)
assert m.a == 42
assert validator.validate_python(True) is True
def permute_choices(choices: list[core_schema.CoreSchema]) -> list[list[core_schema.CoreSchema]]:
return [list(p) for p in permutations(choices)]
class TestSmartUnionWithSubclass:
class ModelA:
a: int
class ModelB(ModelA):
b: int
model_a_schema = core_schema.model_schema(
ModelA, core_schema.model_fields_schema(fields={'a': core_schema.model_field(core_schema.int_schema())})
)
model_b_schema = core_schema.model_schema(
ModelB,
core_schema.model_fields_schema(
fields={
'a': core_schema.model_field(core_schema.int_schema()),
'b': core_schema.model_field(core_schema.int_schema()),
}
),
)
@pytest.mark.parametrize('choices', permute_choices([model_a_schema, model_b_schema]))
def test_more_specific_data_matches_subclass(self, choices) -> None:
validator = SchemaValidator(core_schema.union_schema(choices))
assert isinstance(validator.validate_python({'a': 1}), self.ModelA)
assert isinstance(validator.validate_python({'a': 1, 'b': 2}), self.ModelB)
assert isinstance(validator.validate_python({'a': 1, 'b': 2}), self.ModelB)
# confirm that a model that matches in lax mode with 2 fields
# is preferred over a model that matches in strict mode with 1 field
assert isinstance(validator.validate_python({'a': '1', 'b': '2'}), self.ModelB)
assert isinstance(validator.validate_python({'a': '1', 'b': 2}), self.ModelB)
assert isinstance(validator.validate_python({'a': 1, 'b': '2'}), self.ModelB)
class TestSmartUnionWithDefaults:
class ModelA:
a: int = 0
class ModelB:
b: int = 0
model_a_schema = core_schema.model_schema(
ModelA,
core_schema.model_fields_schema(
fields={'a': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0))}
),
)
model_b_schema = core_schema.model_schema(
ModelB,
core_schema.model_fields_schema(
fields={'b': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0))}
),
)
@pytest.mark.parametrize('choices', permute_choices([model_a_schema, model_b_schema]))
def test_fields_set_ensures_best_match(self, choices) -> None:
validator = SchemaValidator(core_schema.union_schema(choices))
assert isinstance(validator.validate_python({'a': 1}), self.ModelA)
assert isinstance(validator.validate_python({'b': 1}), self.ModelB)
# defaults to leftmost choice if there's a tie
assert isinstance(validator.validate_python({}), choices[0]['cls'])
@pytest.mark.parametrize('choices', permute_choices([model_a_schema, model_b_schema]))
def test_optional_union_with_members_having_defaults(self, choices) -> None:
class WrapModel:
val: Optional[Union[self.ModelA, self.ModelB]] = None
val = SchemaValidator(
schema=core_schema.model_schema(
WrapModel,
core_schema.model_fields_schema(
fields={
'val': core_schema.model_field(
core_schema.with_default_schema(
core_schema.union_schema(choices),
default=None,
)
)
}
),
)
)
assert isinstance(val.validate_python({'val': {'a': 1}}).val, self.ModelA)
assert isinstance(val.validate_python({'val': {'b': 1}}).val, self.ModelB)
assert val.validate_python({}).val is None
def test_dc_smart_union_by_fields_set() -> None:
@dataclass
class ModelA:
x: int
@dataclass
class ModelB(ModelA):
y: int
dc_a_schema = core_schema.dataclass_schema(
ModelA,
core_schema.dataclass_args_schema('ModelA', [core_schema.dataclass_field('x', core_schema.int_schema())]),
['x'],
)
dc_b_schema = core_schema.dataclass_schema(
ModelB,
core_schema.dataclass_args_schema(
'ModelB',
[
core_schema.dataclass_field('x', core_schema.int_schema()),
core_schema.dataclass_field('y', core_schema.int_schema()),
],
),
['x', 'y'],
)
for choices in permute_choices([dc_a_schema, dc_b_schema]):
validator = SchemaValidator(core_schema.union_schema(choices=choices))
assert isinstance(validator.validate_python({'x': 1}), ModelA)
assert isinstance(validator.validate_python({'x': '1'}), ModelA)
assert isinstance(validator.validate_python({'x': 1, 'y': 2}), ModelB)
assert isinstance(validator.validate_python({'x': 1, 'y': '2'}), ModelB)
assert isinstance(validator.validate_python({'x': '1', 'y': 2}), ModelB)
assert isinstance(validator.validate_python({'x': '1', 'y': '2'}), ModelB)
def test_dc_smart_union_with_defaults() -> None:
@dataclass
class ModelA:
a: int = 0
@dataclass
class ModelB:
b: int = 0
dc_a_schema = core_schema.dataclass_schema(
ModelA,
core_schema.dataclass_args_schema(
'ModelA',
[
core_schema.dataclass_field(
'a', core_schema.with_default_schema(schema=core_schema.int_schema(), default=0)
)
],
),
['a'],
)
dc_b_schema = core_schema.dataclass_schema(
ModelB,
core_schema.dataclass_args_schema(
'ModelB',
[
core_schema.dataclass_field(
'b', core_schema.with_default_schema(schema=core_schema.int_schema(), default=0)
)
],
),
['b'],
)
for choices in permute_choices([dc_a_schema, dc_b_schema]):
validator = SchemaValidator(core_schema.union_schema(choices=choices))
assert isinstance(validator.validate_python({'a': 1}), ModelA)
assert isinstance(validator.validate_python({'b': 1}), ModelB)
def test_td_smart_union_by_fields_set() -> None:
td_a_schema = core_schema.typed_dict_schema(
fields={'x': core_schema.typed_dict_field(core_schema.int_schema())},
)
td_b_schema = core_schema.typed_dict_schema(
fields={
'x': core_schema.typed_dict_field(core_schema.int_schema()),
'y': core_schema.typed_dict_field(core_schema.int_schema()),
},
)
for choices in permute_choices([td_a_schema, td_b_schema]):
validator = SchemaValidator(core_schema.union_schema(choices=choices))
assert set(validator.validate_python({'x': 1}).keys()) == {'x'}
assert set(validator.validate_python({'x': '1'}).keys()) == {'x'}
assert set(validator.validate_python({'x': 1, 'y': 2}).keys()) == {'x', 'y'}
assert set(validator.validate_python({'x': 1, 'y': '2'}).keys()) == {'x', 'y'}
assert set(validator.validate_python({'x': '1', 'y': 2}).keys()) == {'x', 'y'}
assert set(validator.validate_python({'x': '1', 'y': '2'}).keys()) == {'x', 'y'}
def test_smart_union_does_nested_model_field_counting() -> None:
class SubModelA:
x: int = 1
class SubModelB:
y: int = 2
class ModelA:
sub: SubModelA
class ModelB:
sub: SubModelB
model_a_schema = core_schema.model_schema(
ModelA,
core_schema.model_fields_schema(
fields={
'sub': core_schema.model_field(
core_schema.model_schema(
SubModelA,
core_schema.model_fields_schema(
fields={
'x': core_schema.model_field(
core_schema.with_default_schema(core_schema.int_schema(), default=1)
)
}
),
)
)
}
),
)
model_b_schema = core_schema.model_schema(
ModelB,
core_schema.model_fields_schema(
fields={
'sub': core_schema.model_field(
core_schema.model_schema(
SubModelB,
core_schema.model_fields_schema(
fields={
'y': core_schema.model_field(
core_schema.with_default_schema(core_schema.int_schema(), default=2)
)
}
),
)
)
}
),
)
for choices in permute_choices([model_a_schema, model_b_schema]):
validator = SchemaValidator(core_schema.union_schema(choices=choices))
assert isinstance(validator.validate_python({'sub': {'x': 1}}), ModelA)
assert isinstance(validator.validate_python({'sub': {'y': 3}}), ModelB)
# defaults to leftmost choice if there's a tie
assert isinstance(validator.validate_python({'sub': {}}), choices[0]['cls'])
def test_smart_union_does_nested_dataclass_field_counting() -> None:
@dataclass
class SubModelA:
x: int = 1
@dataclass
class SubModelB:
y: int = 2
@dataclass
class ModelA:
sub: SubModelA
@dataclass
class ModelB:
sub: SubModelB
dc_a_schema = core_schema.dataclass_schema(
ModelA,
core_schema.dataclass_args_schema(
'ModelA',
[
core_schema.dataclass_field(
'sub',
core_schema.with_default_schema(
core_schema.dataclass_schema(
SubModelA,
core_schema.dataclass_args_schema(
'SubModelA',
[
core_schema.dataclass_field(
'x', core_schema.with_default_schema(core_schema.int_schema(), default=1)
)
],
),
['x'],
),
default=SubModelA(),
),
)
],
),
['sub'],
)
dc_b_schema = core_schema.dataclass_schema(
ModelB,
core_schema.dataclass_args_schema(
'ModelB',
[
core_schema.dataclass_field(
'sub',
core_schema.with_default_schema(
core_schema.dataclass_schema(
SubModelB,
core_schema.dataclass_args_schema(
'SubModelB',
[
core_schema.dataclass_field(
'y', core_schema.with_default_schema(core_schema.int_schema(), default=2)
)
],
),
['y'],
),
default=SubModelB(),
),
)
],
),
['sub'],
)
for choices in permute_choices([dc_a_schema, dc_b_schema]):
validator = SchemaValidator(core_schema.union_schema(choices=choices))
assert isinstance(validator.validate_python({'sub': {'x': 1}}), ModelA)
assert isinstance(validator.validate_python({'sub': {'y': 3}}), ModelB)
# defaults to leftmost choice if there's a tie
assert isinstance(validator.validate_python({'sub': {}}), choices[0]['cls'])
def test_smart_union_does_nested_typed_dict_field_counting() -> None:
td_a_schema = core_schema.typed_dict_schema(
fields={
'sub': core_schema.typed_dict_field(
core_schema.typed_dict_schema(fields={'x': core_schema.typed_dict_field(core_schema.int_schema())})
)
}
)
td_b_schema = core_schema.typed_dict_schema(
fields={
'sub': core_schema.typed_dict_field(
core_schema.typed_dict_schema(fields={'y': core_schema.typed_dict_field(core_schema.int_schema())})
)
}
)
for choices in permute_choices([td_a_schema, td_b_schema]):
validator = SchemaValidator(core_schema.union_schema(choices=choices))
assert set(validator.validate_python({'sub': {'x': 1}})['sub'].keys()) == {'x'}
assert set(validator.validate_python({'sub': {'y': 2}})['sub'].keys()) == {'y'}
def test_nested_unions_bubble_up_field_count() -> None:
class SubModelX:
x1: int = 0
x2: int = 0
x3: int = 0
class SubModelY:
x1: int = 0
x2: int = 0
x3: int = 0
class SubModelZ:
z1: int = 0
z2: int = 0
z3: int = 0
class SubModelW:
w1: int = 0
w2: int = 0
w3: int = 0
class ModelA:
a: Union[SubModelX, SubModelY]
class ModelB:
b: Union[SubModelZ, SubModelW]
model_x_schema = core_schema.model_schema(
SubModelX,
core_schema.model_fields_schema(
fields={
'x1': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'x2': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'x3': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
}
),
)
model_y_schema = core_schema.model_schema(
SubModelY,
core_schema.model_fields_schema(
fields={
'x1': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'x2': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'x3': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
}
),
)
model_z_schema = core_schema.model_schema(
SubModelZ,
core_schema.model_fields_schema(
fields={
'z1': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'z2': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'z3': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
}
),
)
model_w_schema = core_schema.model_schema(
SubModelW,
core_schema.model_fields_schema(
fields={
'w1': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'w2': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'w3': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
}
),
)
model_a_schema_options = [
core_schema.union_schema([model_x_schema, model_y_schema]),
core_schema.union_schema([model_y_schema, model_x_schema]),
]
model_b_schema_options = [
core_schema.union_schema([model_z_schema, model_w_schema]),
core_schema.union_schema([model_w_schema, model_z_schema]),
]
for model_a_schema in model_a_schema_options:
for model_b_schema in model_b_schema_options:
validator = SchemaValidator(
schema=core_schema.union_schema(
[
core_schema.model_schema(
ModelA,
core_schema.model_fields_schema(fields={'a': core_schema.model_field(model_a_schema)}),
),
core_schema.model_schema(
ModelB,
core_schema.model_fields_schema(fields={'b': core_schema.model_field(model_b_schema)}),
),
]
)
)
result = validator.validate_python(
{'a': {'x1': 1, 'x2': 2, 'y1': 1, 'y2': 2}, 'b': {'w1': 1, 'w2': 2, 'w3': 3}}
)
assert isinstance(result, ModelB)
assert isinstance(result.b, SubModelW)
@pytest.mark.parametrize('extra_behavior', ['forbid', 'ignore', 'allow'])
def test_smart_union_extra_behavior(extra_behavior) -> None:
class Foo:
foo: str = 'foo'
class Bar:
bar: str = 'bar'
class Model:
x: Union[Foo, Bar]
validator = SchemaValidator(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
fields={
'x': core_schema.model_field(
core_schema.union_schema(
[
core_schema.model_schema(
Foo,
core_schema.model_fields_schema(
fields={
'foo': core_schema.model_field(
core_schema.with_default_schema(core_schema.str_schema(), default='foo')
)
}
),
extra_behavior=extra_behavior,
),
core_schema.model_schema(
Bar,
core_schema.model_fields_schema(
fields={
'bar': core_schema.model_field(
core_schema.with_default_schema(core_schema.str_schema(), default='bar')
)
}
),
extra_behavior=extra_behavior,
),
]
)
)
}
),
)
)
assert isinstance(validator.validate_python({'x': {'foo': 'foo'}}).x, Foo)
assert isinstance(validator.validate_python({'x': {'bar': 'bar'}}).x, Bar)
def test_smart_union_wrap_validator_should_not_change_nested_model_field_counts() -> None:
"""Adding a wrap validator on a union member should not affect smart union behavior"""
class SubModel:
x: str = 'x'
class ModelA:
type: str = 'A'
sub: SubModel
class ModelB:
type: str = 'B'
sub: SubModel
submodel_schema = core_schema.model_schema(
SubModel,
core_schema.model_fields_schema(fields={'x': core_schema.model_field(core_schema.str_schema())}),
)
wrapped_submodel_schema = core_schema.no_info_wrap_validator_function(
lambda v, handler: handler(v), submodel_schema
)
model_a_schema = core_schema.model_schema(
ModelA,
core_schema.model_fields_schema(
fields={
'type': core_schema.model_field(
core_schema.with_default_schema(core_schema.literal_schema(['A']), default='A'),
),
'sub': core_schema.model_field(wrapped_submodel_schema),
},
),
)
model_b_schema = core_schema.model_schema(
ModelB,
core_schema.model_fields_schema(
fields={
'type': core_schema.model_field(
core_schema.with_default_schema(core_schema.literal_schema(['B']), default='B'),
),
'sub': core_schema.model_field(submodel_schema),
},
),
)
for choices in permute_choices([model_a_schema, model_b_schema]):
schema = core_schema.union_schema(choices)
validator = SchemaValidator(schema)
assert isinstance(validator.validate_python({'type': 'A', 'sub': {'x': 'x'}}), ModelA)
assert isinstance(validator.validate_python({'type': 'B', 'sub': {'x': 'x'}}), ModelB)
# defaults to leftmost choice if there's a tie
assert isinstance(validator.validate_python({'sub': {'x': 'x'}}), choices[0]['cls'])
# test validate_assignment
class RootModel:
ab: Union[ModelA, ModelB]
root_model = core_schema.model_schema(
RootModel,
core_schema.model_fields_schema(
fields={'ab': core_schema.model_field(core_schema.union_schema([model_a_schema, model_b_schema]))}
),
)
validator = SchemaValidator(root_model)
m = validator.validate_python({'ab': {'type': 'B', 'sub': {'x': 'x'}}})
assert isinstance(m, RootModel)
assert isinstance(m.ab, ModelB)
assert m.ab.sub.x == 'x'
m = validator.validate_assignment(m, 'ab', {'sub': {'x': 'y'}})
assert isinstance(m, RootModel)
assert isinstance(m.ab, ModelA)
assert m.ab.sub.x == 'y'
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_union.py",
"license": "MIT License",
"lines": 1182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_url.py | import re
from copy import deepcopy
from typing import Optional, Union
import pytest
from dirty_equals import HasRepr, IsInstance
from pydantic_core import CoreConfig, MultiHostUrl, SchemaError, SchemaValidator, Url, ValidationError, core_schema
from ..conftest import Err, PyAndJson
def test_url_ok(py_and_json: PyAndJson):
v = py_and_json(core_schema.url_schema())
url = v.validate_test('https://example.com/foo/bar?baz=qux#quux')
assert isinstance(url, Url)
assert str(url) == 'https://example.com/foo/bar?baz=qux#quux'
assert repr(url) == "Url('https://example.com/foo/bar?baz=qux#quux')"
assert url.unicode_string() == 'https://example.com/foo/bar?baz=qux#quux'
assert url.scheme == 'https'
assert url.host == 'example.com'
assert url.unicode_host() == 'example.com'
assert url.path == '/foo/bar'
assert url.query == 'baz=qux'
assert url.query_params() == [('baz', 'qux')]
assert url.fragment == 'quux'
assert url.username is None
assert url.password is None
assert url.port == 443
def test_url_from_constructor_ok():
url = Url('https://example.com/foo/bar?baz=qux#quux')
assert isinstance(url, Url)
assert str(url) == 'https://example.com/foo/bar?baz=qux#quux'
assert repr(url) == "Url('https://example.com/foo/bar?baz=qux#quux')"
assert url.unicode_string() == 'https://example.com/foo/bar?baz=qux#quux'
assert url.scheme == 'https'
assert url.host == 'example.com'
assert url.unicode_host() == 'example.com'
assert url.path == '/foo/bar'
assert url.query == 'baz=qux'
assert url.query_params() == [('baz', 'qux')]
assert url.fragment == 'quux'
assert url.username is None
assert url.password is None
assert url.port == 443
@pytest.fixture(scope='module', name='url_validator')
def url_validator_fixture():
return SchemaValidator(core_schema.url_schema())
SCHEMA_VALIDATOR_MODE = 'SCHEMA_VALIDATOR'
URL_CLASS_MODE = 'URI_CLASS'
MULTI_URL_CLASS_MODE = 'MULTI_URL_CLASS'
def url_test_case_helper(
url: str, expected: Union[Err, str], validator_mode: str, url_validator: Optional[SchemaValidator] = None
):
if isinstance(expected, Err):
with pytest.raises(ValidationError) as exc_info:
if validator_mode == SCHEMA_VALIDATOR_MODE:
url_validator.validate_python(url)
elif validator_mode == URL_CLASS_MODE:
Url(url)
else: # validator_mode == MULTI_URL_CLASS_MODE:
MultiHostUrl(url)
assert exc_info.value.error_count() == 1
error = exc_info.value.errors(include_url=False)[0]
assert error['type'] == 'url_parsing'
assert error['ctx']['error'] == expected.message
else:
if validator_mode == SCHEMA_VALIDATOR_MODE:
output_url = url_validator.validate_python(url)
elif validator_mode == URL_CLASS_MODE:
output_url = Url(url)
elif validator_mode == MULTI_URL_CLASS_MODE:
output_url = MultiHostUrl(url)
else:
raise ValueError(f'Unknown validator mode: {validator_mode}')
assert isinstance(output_url, (Url, MultiHostUrl))
if isinstance(expected, str):
assert str(output_url) == expected
else:
assert isinstance(expected, dict)
output_parts = {}
for key in expected:
if key == 'str()':
output_parts[key] = str(output_url)
elif key.endswith('()'):
output_parts[key] = getattr(output_url, key[:-2])()
else:
output_parts[key] = getattr(output_url, key)
assert output_parts == expected
@pytest.mark.parametrize('mode', [SCHEMA_VALIDATOR_MODE, URL_CLASS_MODE])
@pytest.mark.parametrize(
'url,expected',
[
('', Err('input is empty')),
(':,', Err('relative URL without a base')),
(
'http://example.com',
{
'str()': 'http://example.com/',
'host': 'example.com',
'unicode_host()': 'example.com',
'unicode_string()': 'http://example.com/',
},
),
('http://exa\nmple.com', {'str()': 'http://example.com/', 'host': 'example.com'}),
('xxx', Err('relative URL without a base')),
('http://', Err('empty host')),
('https://xn---', Err('invalid international domain name')),
('http://example.com:65535', 'http://example.com:65535/'),
('http:\\\\example.com', 'http://example.com/'),
('http:example.com', 'http://example.com/'),
('http:example.com/path', 'http://example.com/path'),
('http:example.com/path/', 'http://example.com/path/'),
('http:example.com?query=nopath', 'http://example.com/?query=nopath'),
('http:example.com/?query=haspath', 'http://example.com/?query=haspath'),
('http:example.com#nopath', 'http://example.com/#nopath'),
('http:example.com/#haspath', 'http://example.com/#haspath'),
('http://example.com:65536', Err('invalid port number')),
('http://1...1', Err('invalid IPv4 address')),
('https://[2001:0db8:85a3:0000:0000:8a2e:0370:7334[', Err('invalid IPv6 address')),
('https://[', Err('invalid IPv6 address')),
('https://example com', Err('invalid international domain name')),
('http://exam%ple.com', Err('invalid international domain name')),
('http:// /', Err('invalid international domain name')),
('/more', Err('relative URL without a base')),
('http://example.com./foobar', {'str()': 'http://example.com./foobar'}),
# works since we're in lax mode
(b'http://example.com', {'str()': 'http://example.com/', 'unicode_host()': 'example.com'}),
('http:/foo', {'str()': 'http://foo/'}),
('http:///foo', {'str()': 'http://foo/'}),
('http://exam_ple.com', {'str()': 'http://exam_ple.com/'}),
('http://exam-ple.com', {'str()': 'http://exam-ple.com/'}),
('http://example-.com', {'str()': 'http://example-.com/'}),
('https://£££.com', {'str()': 'https://xn--9aaa.com/'}),
('https://foobar.£££.com', {'str()': 'https://foobar.xn--9aaa.com/'}),
('https://foo.£$.money.com', {'str()': 'https://foo.xn--$-9ba.money.com/'}),
('https://xn--9aaa.com/', {'str()': 'https://xn--9aaa.com/'}),
('https://münchen/', {'str()': 'https://xn--mnchen-3ya/'}),
('http://à.א̈.com', {'str()': 'http://xn--0ca.xn--ssa73l.com/'}),
('ssh://xn--9aaa.com/', 'ssh://xn--9aaa.com/'),
('ssh://münchen.com/', 'ssh://m%C3%BCnchen.com/'),
('ssh://example/', 'ssh://example/'),
('ssh://£££/', 'ssh://%C2%A3%C2%A3%C2%A3/'),
('ssh://%C2%A3%C2%A3%C2%A3/', 'ssh://%C2%A3%C2%A3%C2%A3/'),
('ftp://127.0.0.1', {'str()': 'ftp://127.0.0.1/', 'path': '/'}),
('wss://1.1.1.1', {'str()': 'wss://1.1.1.1/', 'host': '1.1.1.1', 'unicode_host()': '1.1.1.1'}),
('snap://[::1]', {'str()': 'snap://[::1]', 'host': '[::1]', 'unicode_host()': '[::1]'}),
(
'ftp://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]',
{
'str()': 'ftp://[2001:db8:85a3::8a2e:370:7334]/',
'host': '[2001:db8:85a3::8a2e:370:7334]',
'unicode_host()': '[2001:db8:85a3::8a2e:370:7334]',
},
),
('foobar://127.0.0.1', {'str()': 'foobar://127.0.0.1', 'path': None}),
(
'mysql://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]',
{'str()': 'mysql://[2001:db8:85a3::8a2e:370:7334]', 'path': None},
),
(
'mysql://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]/thing',
{'str()': 'mysql://[2001:db8:85a3::8a2e:370:7334]/thing', 'path': '/thing'},
),
('https:/more', {'str()': 'https://more/', 'host': 'more'}),
('https:more', {'str()': 'https://more/', 'host': 'more'}),
('file:///foobar', {'str()': 'file:///foobar', 'host': None, 'unicode_host()': None}),
('file:///:80', {'str()': 'file:///:80'}),
('file://:80', Err('invalid international domain name')),
('foobar://:80', Err('empty host')),
# with bashslashes
('file:\\\\foobar\\more', {'str()': 'file://foobar/more', 'host': 'foobar', 'path': '/more'}),
('http:\\\\foobar\\more', {'str()': 'http://foobar/more', 'host': 'foobar', 'path': '/more'}),
('mongo:\\\\foobar\\more', {'str()': 'mongo:\\\\foobar\\more', 'host': None, 'path': '\\\\foobar\\more'}),
('mongodb+srv://server.example.com/', 'mongodb+srv://server.example.com/'),
('http://example.com.', {'host': 'example.com.', 'unicode_host()': 'example.com.'}),
('http:/example.com', {'host': 'example.com', 'unicode_host()': 'example.com'}),
('http:/foo', {'host': 'foo', 'unicode_host()': 'foo'}),
('http://foo', {'host': 'foo', 'unicode_host()': 'foo'}),
('http:///foo', {'host': 'foo', 'unicode_host()': 'foo'}),
('http:////foo', {'host': 'foo', 'unicode_host()': 'foo'}),
('http://-', {'host': '-', 'unicode_host()': '-'}),
('http:////example.com', {'host': 'example.com', 'unicode_host()': 'example.com'}),
('https://£££.com', {'host': 'xn--9aaa.com', 'unicode_host()': '£££.com'}),
('https://£££.com.', {'host': 'xn--9aaa.com.', 'unicode_host()': '£££.com.'}),
('https://xn--9aaa.com/', {'host': 'xn--9aaa.com', 'unicode_host()': '£££.com'}),
(
'https://münchen/',
{'host': 'xn--mnchen-3ya', 'unicode_host()': 'münchen', 'unicode_string()': 'https://münchen/'},
),
('http://à.א̈.com', {'host': 'xn--0ca.xn--ssa73l.com', 'unicode_host()': 'à.א̈.com'}),
('ftp://xn--0ca.xn--ssa73l.com', {'host': 'xn--0ca.xn--ssa73l.com', 'unicode_host()': 'à.א̈.com'}),
('https://foobar.£££.com/', {'host': 'foobar.xn--9aaa.com', 'unicode_host()': 'foobar.£££.com'}),
('https://£££.com', {'unicode_string()': 'https://£££.com/'}),
('https://xn--9aaa.com/', {'unicode_string()': 'https://£££.com/'}),
('wss://1.1.1.1', {'unicode_string()': 'wss://1.1.1.1/'}),
('file:///foobar', {'unicode_string()': 'file:///foobar'}),
(
'postgresql+py-postgresql://user:pass@localhost:5432/app',
{
'str()': 'postgresql+py-postgresql://user:pass@localhost:5432/app',
'username': 'user',
'password': 'pass',
},
),
('https://https/', {'host': 'https', 'unicode_host()': 'https'}),
('http://user:@example.org', {'str()': 'http://user@example.org/', 'username': 'user', 'password': None}),
(
'http://us@er:p[ass@example.org',
{'str()': 'http://us%40er:p%5Bass@example.org/', 'username': 'us%40er', 'password': 'p%5Bass'},
),
(
'http://us%40er:p%5Bass@example.org',
{'str()': 'http://us%40er:p%5Bass@example.org/', 'username': 'us%40er', 'password': 'p%5Bass'},
),
(
'http://us[]er:p,ass@example.org',
{'str()': 'http://us%5B%5Der:p,ass@example.org/', 'username': 'us%5B%5Der', 'password': 'p,ass'},
),
('http://%2F:@example.org', {'str()': 'http://%2F@example.org/', 'username': '%2F', 'password': None}),
('foo://user:@example.org', {'str()': 'foo://user@example.org', 'username': 'user', 'password': None}),
(
'foo://us@er:p[ass@example.org',
{'str()': 'foo://us%40er:p%5Bass@example.org', 'username': 'us%40er', 'password': 'p%5Bass'},
),
(
'foo://us%40er:p%5Bass@example.org',
{'str()': 'foo://us%40er:p%5Bass@example.org', 'username': 'us%40er', 'password': 'p%5Bass'},
),
(
'foo://us[]er:p,ass@example.org',
{'str()': 'foo://us%5B%5Der:p,ass@example.org', 'username': 'us%5B%5Der', 'password': 'p,ass'},
),
('foo://%2F:@example.org', {'str()': 'foo://%2F@example.org', 'username': '%2F', 'password': None}),
('HTTP://EXAMPLE.ORG', {'str()': 'http://example.org/'}),
('HTTP://EXAMPLE.org', {'str()': 'http://example.org/'}),
('POSTGRES://EXAMPLE.ORG', {'str()': 'postgres://EXAMPLE.ORG'}),
('https://twitter.com/@handle', {'str()': 'https://twitter.com/@handle', 'path': '/@handle'}),
(' https://www.example.com \n', 'https://www.example.com/'),
# https://www.xudongz.com/blog/2017/idn-phishing/ accepted but converted
('https://www.аррӏе.com/', 'https://www.xn--80ak6aa92e.com/'),
('https://exampl£e.org', 'https://xn--example-gia.org/'),
('https://example.珠宝', 'https://example.xn--pbt977c/'),
('https://example.vermögensberatung', 'https://example.xn--vermgensberatung-pwb/'),
('https://example.рф', 'https://example.xn--p1ai/'),
('https://exampl£e.珠宝', 'https://xn--example-gia.xn--pbt977c/'),
('ht💣tp://example.org', Err('relative URL without a base')),
(
'http://usßer:pasℝs@a💣b.com:123/c?d=e&d=f#g',
{
'str()': 'http://us%C3%9Fer:pas%E2%84%9Ds@xn--ab-qt72a.com:123/c?d=e&d=f#g',
'username': 'us%C3%9Fer',
'password': 'pas%E2%84%9Ds',
'host': 'xn--ab-qt72a.com',
'port': 123,
'path': '/c',
'query': 'd=e&d=f',
'query_params()': [('d', 'e'), ('d', 'f')],
'fragment': 'g',
},
),
],
)
def test_url_cases(url_validator, url, expected, mode):
url_test_case_helper(url, expected, mode, url_validator)
@pytest.mark.parametrize(
('url', 'expected', 'expected_path'),
[
('http://example.com', 'http://example.com', None),
('http:example.com', 'http://example.com', None),
('http:/example.com', 'http://example.com', None),
('http://example.com/', 'http://example.com/', '/'),
('http:example.com/', 'http://example.com/', '/'),
('http:/example.com/', 'http://example.com/', '/'),
('http://example.com?x=1', 'http://example.com?x=1', None),
('http://example.com/?x=1', 'http://example.com/?x=1', '/'),
('http://example.com#foo', 'http://example.com#foo', None),
('http://example.com/#foo', 'http://example.com/#foo', '/'),
('http://example.com/path', 'http://example.com/path', '/path'),
('http://example.com/path/', 'http://example.com/path/', '/path/'),
('http://example.com/path?x=1', 'http://example.com/path?x=1', '/path'),
('http://example.com/path/?x=1', 'http://example.com/path/?x=1', '/path/'),
],
)
def test_trailing_slash(url: str, expected: str, expected_path: Optional[str]):
url1 = Url(url, preserve_empty_path=True)
assert str(url1) == expected
assert url1.unicode_string() == expected
assert url1.path == expected_path
v = SchemaValidator(core_schema.url_schema(preserve_empty_path=True))
url2 = v.validate_python(url)
assert str(url2) == expected
assert url2.unicode_string() == expected
assert url2.path == expected_path
v = SchemaValidator(core_schema.url_schema(), CoreConfig(url_preserve_empty_path=True))
url3 = v.validate_python(url)
assert str(url3) == expected
assert url3.unicode_string() == expected
assert url3.path == expected_path
@pytest.mark.parametrize(
('url', 'expected', 'expected_path'),
[
('http://example.com', 'http://example.com', None),
('http://example.com/', 'http://example.com/', '/'),
('http://example.com/path', 'http://example.com/path', '/path'),
('http://example.com/path/', 'http://example.com/path/', '/path/'),
('http://example.com,example.org', 'http://example.com,example.org', None),
('http://example.com,example.org/', 'http://example.com,example.org/', '/'),
('http://localhost,127.0.0.1', 'http://localhost,127.0.0.1', None),
('http://localhost,127.0.0.1/', 'http://localhost,127.0.0.1/', '/'),
('http:localhost,127.0.0.1', 'http://localhost,127.0.0.1', None),
('http://localhost,127.0.0.1/path', 'http://localhost,127.0.0.1/path', '/path'),
('http://localhost,127.0.0.1/path/', 'http://localhost,127.0.0.1/path/', '/path/'),
],
)
def test_multi_trailing_slash(url: str, expected: str, expected_path: Optional[str]):
url1 = MultiHostUrl(url, preserve_empty_path=True)
assert str(url1) == expected
assert url1.unicode_string() == expected
assert url1.path == expected_path
v = SchemaValidator(core_schema.multi_host_url_schema(preserve_empty_path=True))
url2 = v.validate_python(url)
assert str(url2) == expected
assert url2.unicode_string() == expected
assert url2.path == expected_path
v = SchemaValidator(core_schema.multi_host_url_schema(), CoreConfig(url_preserve_empty_path=True))
url3 = v.validate_python(url)
assert str(url3) == expected
assert url3.unicode_string() == expected
assert url3.path == expected_path
@pytest.mark.parametrize(
'validator_kwargs,url,expected',
[
(
dict(default_port=1234, default_path='/baz'),
'http://example.org',
{'str()': 'http://example.org:1234/baz', 'host': 'example.org', 'port': 1234, 'path': '/baz'},
),
(dict(default_host='localhost'), 'redis://', {'str()': 'redis://localhost', 'host': 'localhost'}),
],
)
def test_url_defaults_single_url(validator_kwargs, url, expected):
s = SchemaValidator(core_schema.url_schema(**validator_kwargs))
url_test_case_helper(url, expected, SCHEMA_VALIDATOR_MODE, s)
def test_url_host_required():
s = SchemaValidator(core_schema.url_schema(host_required=True))
url_test_case_helper('test:', Err('empty host'), SCHEMA_VALIDATOR_MODE, s)
url_test_case_helper('sftp://', Err('empty host'), SCHEMA_VALIDATOR_MODE, s)
@pytest.mark.parametrize(
'validator_kwargs,url,expected',
[
(
dict(default_port=1234, default_path='/baz'),
'http://example.org',
{
'str()': 'http://example.org:1234/baz',
'hosts()': [{'host': 'example.org', 'password': None, 'port': 1234, 'username': None}],
'path': '/baz',
},
),
(
dict(default_host='localhost'),
'redis://',
{
'str()': 'redis://localhost',
'hosts()': [{'host': 'localhost', 'password': None, 'port': None, 'username': None}],
},
),
(
{},
'redis://localhost,127.0.0.1',
{
'str()': 'redis://localhost,127.0.0.1',
'hosts()': [
{'host': 'localhost', 'password': None, 'port': None, 'username': None},
{'host': '127.0.0.1', 'password': None, 'port': None, 'username': None},
],
},
),
({}, 'redis://', {'str()': 'redis://', 'hosts()': []}),
],
)
def test_url_defaults_multi_host_url(validator_kwargs, url, expected):
s = SchemaValidator(core_schema.multi_host_url_schema(**validator_kwargs))
url_test_case_helper(url, expected, SCHEMA_VALIDATOR_MODE, s)
@pytest.mark.parametrize(
'url,expected',
[
(
'http://example.org:1234/baz',
{
'str()': 'http://example.org:1234/baz',
'hosts()': [{'host': 'example.org', 'password': None, 'port': 1234, 'username': None}],
'path': '/baz',
},
),
(
'redis://localhost,127.0.0.1',
{
'str()': 'redis://localhost,127.0.0.1',
'hosts()': [
{'host': 'localhost', 'password': None, 'port': None, 'username': None},
{'host': '127.0.0.1', 'password': None, 'port': None, 'username': None},
],
},
),
('redis://', {'str()': 'redis://', 'hosts()': []}),
],
)
def test_multi_host_url(url, expected):
url_test_case_helper(url, expected, MULTI_URL_CLASS_MODE, None)
def test_multi_host_default_host_no_comma():
with pytest.raises(SchemaError, match='default_host cannot contain a comma, see pydantic-core#326'):
SchemaValidator(core_schema.multi_host_url_schema(default_host='foo,bar'))
@pytest.fixture(scope='module', name='strict_url_validator')
def strict_url_validator_fixture():
return SchemaValidator(core_schema.url_schema(), config=CoreConfig(strict=True))
@pytest.mark.parametrize(
'url,expected',
[
('http://example.com', {'str()': 'http://example.com/', 'host': 'example.com'}),
('http://exa\nmple.com', Err('tabs or newlines are ignored in URLs', 'url_syntax_violation')),
('xxx', Err('relative URL without a base', 'url_parsing')),
('http:/foo', Err('expected //', 'url_syntax_violation')),
('http:///foo', Err('expected //', 'url_syntax_violation')),
('http:////foo', Err('expected //', 'url_syntax_violation')),
('http://exam_ple.com', {'str()': 'http://exam_ple.com/'}),
('https:/more', Err('expected //', 'url_syntax_violation')),
('https:more', Err('expected //', 'url_syntax_violation')),
('file:///foobar', {'str()': 'file:///foobar', 'host': None, 'unicode_host()': None}),
('file://:80', Err('invalid international domain name', 'url_parsing')),
('file:/xx', Err('expected // after file:', 'url_syntax_violation')),
('foobar://:80', Err('empty host', 'url_parsing')),
('mongodb+srv://server.example.com/', 'mongodb+srv://server.example.com/'),
('http://user:@example.org', 'http://user@example.org/'),
('http://us[er:@example.org', Err('non-URL code point', 'url_syntax_violation')),
('http://us%5Ber:bar@example.org', 'http://us%5Ber:bar@example.org/'),
('http://user:@example.org', 'http://user@example.org/'),
('mongodb://us%5Ber:bar@example.org', 'mongodb://us%5Ber:bar@example.org'),
('mongodb://us@er@example.org', Err('unencoded @ sign in username or password', 'url_syntax_violation')),
],
)
def test_url_error(strict_url_validator, url, expected):
if isinstance(expected, Err):
with pytest.raises(ValidationError) as exc_info:
strict_url_validator.validate_python(url)
assert exc_info.value.error_count() == 1
error = exc_info.value.errors(include_url=False)[0]
assert error['ctx']['error'] == expected.message
assert error['type'] == expected.errors
else:
output_url = strict_url_validator.validate_python(url)
assert isinstance(output_url, Url)
if isinstance(expected, str):
assert str(output_url) == expected
else:
assert isinstance(expected, dict)
output_parts = {}
for key in expected:
if key == 'str()':
output_parts[key] = str(output_url)
elif key.endswith('()'):
output_parts[key] = getattr(output_url, key[:-2])()
else:
output_parts[key] = getattr(output_url, key)
assert output_parts == expected
def test_no_host(url_validator):
url = url_validator.validate_python('data:text/plain,Stuff')
assert str(url) == 'data:text/plain,Stuff'
assert url.host is None
assert url.scheme == 'data'
assert url.path == 'text/plain,Stuff'
def test_max_length():
v = SchemaValidator(core_schema.url_schema(max_length=25))
assert str(v.validate_python('https://example.com')) == 'https://example.com/'
with pytest.raises(ValidationError) as exc_info:
v.validate_python('https://example.com/foo/bar')
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'url_too_long',
'loc': (),
'msg': 'URL should have at most 25 characters',
'input': 'https://example.com/foo/bar',
'ctx': {'max_length': 25},
}
]
def test_allowed_schemes_ok():
v = SchemaValidator(core_schema.url_schema(allowed_schemes=['http', 'https']))
url = v.validate_python(' https://example.com ')
assert url.host == 'example.com'
assert url.scheme == 'https'
assert str(url) == 'https://example.com/'
assert str(v.validate_python('http://other.com')) == 'http://other.com/'
def test_allowed_schemes_error():
v = SchemaValidator(core_schema.url_schema(allowed_schemes=['http', 'https']))
with pytest.raises(ValidationError) as exc_info:
v.validate_python('unix:/run/foo.socket')
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'url_scheme',
'loc': (),
'msg': "URL scheme should be 'http' or 'https'",
'input': 'unix:/run/foo.socket',
'ctx': {'expected_schemes': "'http' or 'https'"},
}
]
def test_allowed_schemes_errors():
v = SchemaValidator(core_schema.url_schema(allowed_schemes=['a', 'b', 'c']))
with pytest.raises(ValidationError) as exc_info:
v.validate_python('unix:/run/foo.socket')
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'url_scheme',
'loc': (),
'msg': "URL scheme should be 'a', 'b' or 'c'",
'input': 'unix:/run/foo.socket',
'ctx': {'expected_schemes': "'a', 'b' or 'c'"},
}
]
def test_url_query_repeat(url_validator):
url: Url = url_validator.validate_python('https://example.com/foo/bar?a=1&a=2')
assert str(url) == 'https://example.com/foo/bar?a=1&a=2'
assert url.query_params() == [('a', '1'), ('a', '2')]
def test_url_to_url(url_validator, multi_host_url_validator):
url: Url = url_validator.validate_python('https://example.com')
assert isinstance(url, Url)
assert str(url) == 'https://example.com/'
url2 = url_validator.validate_python(url)
assert isinstance(url2, Url)
assert str(url2) == 'https://example.com/'
assert url is url2
multi_url = multi_host_url_validator.validate_python('https://example.com')
assert isinstance(multi_url, MultiHostUrl)
url3 = url_validator.validate_python(multi_url)
assert isinstance(url3, Url)
assert str(url3) == 'https://example.com/'
multi_url2 = multi_host_url_validator.validate_python('foobar://x:y@foo,x:y@bar.com')
assert isinstance(multi_url2, MultiHostUrl)
url4 = url_validator.validate_python(multi_url2)
assert isinstance(url4, Url)
assert str(url4) == 'foobar://x:y%40foo,x%3Ay@bar.com'
assert url4.host == 'bar.com'
def test_url_to_constraint():
v1 = SchemaValidator(core_schema.url_schema())
url: Url = v1.validate_python('http://example.com/foobar/bar')
assert str(url) == 'http://example.com/foobar/bar'
v2 = SchemaValidator(core_schema.url_schema(max_length=25))
with pytest.raises(ValidationError) as exc_info:
v2.validate_python(url)
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'url_too_long',
'loc': (),
'msg': 'URL should have at most 25 characters',
'input': IsInstance(Url) & HasRepr("Url('http://example.com/foobar/bar')"),
'ctx': {'max_length': 25},
}
]
v3 = SchemaValidator(core_schema.url_schema(allowed_schemes=['https']))
with pytest.raises(ValidationError) as exc_info:
v3.validate_python(url)
assert exc_info.value.errors(include_url=False) == [
{
'type': 'url_scheme',
'loc': (),
'msg': "URL scheme should be 'https'",
'input': IsInstance(Url) & HasRepr("Url('http://example.com/foobar/bar')"),
'ctx': {'expected_schemes': "'https'"},
}
]
def test_wrong_type_lax(url_validator):
assert str(url_validator.validate_python('http://example.com/foobar/bar')) == 'http://example.com/foobar/bar'
assert str(url_validator.validate_python(b'http://example.com/foobar/bar')) == 'http://example.com/foobar/bar'
with pytest.raises(ValidationError, match=r'URL input should be a string or URL \[type=url_type,'):
url_validator.validate_python(123)
# runtime strict
with pytest.raises(ValidationError, match=r'URL input should be a string or URL \[type=url_type,'):
url_validator.validate_python(b'http://example.com/foobar/bar', strict=True)
def test_wrong_type_strict(strict_url_validator):
url = strict_url_validator.validate_python('http://example.com/foobar/bar')
assert str(url) == 'http://example.com/foobar/bar'
assert str(strict_url_validator.validate_python(url)) == 'http://example.com/foobar/bar'
with pytest.raises(ValidationError, match=r'URL input should be a string or URL \[type=url_type,'):
strict_url_validator.validate_python(b'http://example.com/foobar/bar')
with pytest.raises(ValidationError, match=r'URL input should be a string or URL \[type=url_type,'):
strict_url_validator.validate_python(123)
@pytest.mark.parametrize(
'input_value,expected,username,password',
[
('https://apple:pie@example.com/foo', 'https://apple:pie@example.com/foo', 'apple', 'pie'),
('https://apple:@example.com/foo', 'https://apple@example.com/foo', 'apple', None),
('https://app$le:pie@example.com/foo', 'https://app$le:pie@example.com/foo', 'app$le', 'pie'),
('https://app le:pie@example.com/foo', 'https://app%20le:pie@example.com/foo', 'app%20le', 'pie'),
],
)
def test_username(url_validator, input_value, expected, username, password):
url: Url = url_validator.validate_python(input_value)
assert isinstance(url, Url)
assert str(url) == expected
assert url.username == username
assert url.password == password
def test_strict_not_strict(url_validator, strict_url_validator, multi_host_url_validator):
url = url_validator.validate_python('http:/example.com/foobar/bar')
assert str(url) == 'http://example.com/foobar/bar'
url2 = strict_url_validator.validate_python(url)
assert str(url2) == 'http://example.com/foobar/bar'
multi_url = multi_host_url_validator.validate_python('https://example.com')
assert isinstance(multi_url, MultiHostUrl)
url3 = strict_url_validator.validate_python(multi_url)
assert isinstance(url3, Url)
assert str(url3) == 'https://example.com/'
multi_url2 = multi_host_url_validator.validate_python('foobar://x:y@foo,x:y@bar.com')
assert isinstance(multi_url2, MultiHostUrl)
with pytest.raises(ValidationError, match=r'unencoded @ sign in username or password \[type=url_syntax_violation'):
strict_url_validator.validate_python(multi_url2)
def test_multi_host_url_ok_single(py_and_json: PyAndJson):
v = py_and_json(core_schema.multi_host_url_schema())
url: MultiHostUrl = v.validate_test('https://example.com/foo/bar?a=b')
assert isinstance(url, MultiHostUrl)
assert str(url) == 'https://example.com/foo/bar?a=b'
assert repr(url) == "MultiHostUrl('https://example.com/foo/bar?a=b')"
assert url.scheme == 'https'
assert url.path == '/foo/bar'
assert url.query == 'a=b'
assert url.query_params() == [('a', 'b')]
assert url.fragment is None
# insert_assert(url.hosts())
assert url.hosts() == [{'username': None, 'password': None, 'host': 'example.com', 'port': 443}]
url: MultiHostUrl = v.validate_test('postgres://foo:bar@example.com:1234')
assert isinstance(url, MultiHostUrl)
assert str(url) == 'postgres://foo:bar@example.com:1234'
assert url.scheme == 'postgres'
# insert_assert(url.hosts())
assert url.hosts() == [{'username': 'foo', 'password': 'bar', 'host': 'example.com', 'port': 1234}]
def test_multi_host_url_ok_2(py_and_json: PyAndJson):
v = py_and_json(core_schema.multi_host_url_schema())
url: MultiHostUrl = v.validate_test('https://foo.com,bar.com/path')
assert isinstance(url, MultiHostUrl)
assert str(url) == 'https://foo.com,bar.com/path'
assert url.scheme == 'https'
assert url.path == '/path'
# insert_assert(url.hosts())
assert url.hosts() == [
{'username': None, 'password': None, 'host': 'foo.com', 'port': 443},
{'username': None, 'password': None, 'host': 'bar.com', 'port': 443},
]
@pytest.fixture(scope='module', name='multi_host_url_validator')
def multi_host_url_validator_fixture():
return SchemaValidator(core_schema.multi_host_url_schema())
@pytest.mark.parametrize(
'url,expected',
[
('', Err('input is empty')),
(
'http://example.com',
{
'str()': 'http://example.com/',
'hosts()': [{'host': 'example.com', 'password': None, 'port': 80, 'username': None}],
'unicode_string()': 'http://example.com/',
},
),
(
'postgres://example.com',
{
'str()': 'postgres://example.com',
'scheme': 'postgres',
'hosts()': [{'host': 'example.com', 'password': None, 'port': None, 'username': None}],
},
),
(
'mongodb://foo,bar,spam/xxx',
{
'str()': 'mongodb://foo,bar,spam/xxx',
'scheme': 'mongodb',
'hosts()': [
{'host': 'foo', 'password': None, 'port': None, 'username': None},
{'host': 'bar', 'password': None, 'port': None, 'username': None},
{'host': 'spam', 'password': None, 'port': None, 'username': None},
],
},
),
(' mongodb://foo,bar,spam/xxx ', 'mongodb://foo,bar,spam/xxx'),
(' \n\r\t mongodb://foo,bar,spam/xxx', 'mongodb://foo,bar,spam/xxx'),
(
'mongodb+srv://foo,bar,spam/xxx',
{
'str()': 'mongodb+srv://foo,bar,spam/xxx',
'scheme': 'mongodb+srv',
'hosts()': [
{'host': 'foo', 'password': None, 'port': None, 'username': None},
{'host': 'bar', 'password': None, 'port': None, 'username': None},
{'host': 'spam', 'password': None, 'port': None, 'username': None},
],
},
),
(
'https://foo:bar@example.com,foo%20o:bar@example.com',
{
'str()': 'https://foo:bar@example.com,foo%20o:bar@example.com/',
'scheme': 'https',
'hosts()': [
{'host': 'example.com', 'password': 'bar', 'port': 443, 'username': 'foo'},
{'host': 'example.com', 'password': 'bar', 'port': 443, 'username': 'foo%20o'},
],
},
),
(
'postgres://foo:bar@example.com,foo%20o:bar@example.com',
{
'str()': 'postgres://foo:bar@example.com,foo%20o:bar@example.com',
'scheme': 'postgres',
'hosts()': [
{'host': 'example.com', 'password': 'bar', 'port': None, 'username': 'foo'},
{'host': 'example.com', 'password': 'bar', 'port': None, 'username': 'foo%20o'},
],
},
),
('postgres://', {'str()': 'postgres://', 'scheme': 'postgres', 'hosts()': []}),
('postgres://,', Err('empty host')),
('postgres://,,', Err('empty host')),
('postgres://foo,\n,bar', Err('empty host')),
('postgres://\n,bar', Err('empty host')),
('postgres://foo,\n', Err('empty host')),
('postgres://foo,', Err('empty host')),
('postgres://,foo', Err('empty host')),
('http://', Err('empty host')),
('http://,', Err('empty host')),
('http://,,', Err('empty host')),
('http://foo,\n,bar', Err('empty host')),
('http://\n,bar', Err('empty host')),
('http://foo,\n', Err('empty host')),
('http://foo,', Err('empty host')),
('http://,foo', Err('empty host')),
('http@foobar', Err('relative URL without a base')),
(
'mongodb://foo\n,b\nar,\nspam/xxx',
{
'str()': 'mongodb://foo,bar,spam/xxx',
'scheme': 'mongodb',
'hosts()': [
{'host': 'foo', 'password': None, 'port': None, 'username': None},
{'host': 'bar', 'password': None, 'port': None, 'username': None},
{'host': 'spam', 'password': None, 'port': None, 'username': None},
],
},
),
(
'postgres://user:pass@host1.db.net:4321,host2.db.net:6432/app',
{
'str()': 'postgres://user:pass@host1.db.net:4321,host2.db.net:6432/app',
'scheme': 'postgres',
'hosts()': [
{'host': 'host1.db.net', 'password': 'pass', 'port': 4321, 'username': 'user'},
{'host': 'host2.db.net', 'password': None, 'port': 6432, 'username': None},
],
'path': '/app',
},
),
(
'postgresql+py-postgresql://user:pass@localhost:5432/app',
{
'str()': 'postgresql+py-postgresql://user:pass@localhost:5432/app',
'hosts()': [{'host': 'localhost', 'password': 'pass', 'port': 5432, 'username': 'user'}],
},
),
('http://foo#bar', 'http://foo/#bar'),
('mongodb://foo#bar', 'mongodb://foo#bar'),
('http://foo,bar#spam', 'http://foo,bar/#spam'),
('mongodb://foo,bar#spam', 'mongodb://foo,bar#spam'),
('http://foo,bar?x=y', 'http://foo,bar/?x=y'),
('mongodb://foo,bar?x=y', 'mongodb://foo,bar?x=y'),
('foo://foo,bar?x=y', 'foo://foo,bar?x=y'),
(
(
'mongodb://mongodb1.example.com:27317,mongodb2.example.com:27017/'
'mydatabase?replicaSet=mySet&authSource=authDB'
),
{
'str()': (
'mongodb://mongodb1.example.com:27317,mongodb2.example.com:27017/'
'mydatabase?replicaSet=mySet&authSource=authDB'
),
'hosts()': [
{'host': 'mongodb1.example.com', 'password': None, 'port': 27317, 'username': None},
{'host': 'mongodb2.example.com', 'password': None, 'port': 27017, 'username': None},
],
'query_params()': [('replicaSet', 'mySet'), ('authSource', 'authDB')],
},
),
# with bashslashes
(
'FILE:\\\\foo,bar\\more',
{
'str()': 'file://foo,bar/more',
'path': '/more',
'hosts()': [
{'host': 'foo', 'password': None, 'port': None, 'username': None},
{'host': 'bar', 'password': None, 'port': None, 'username': None},
],
},
),
(
'http:\\\\foo,bar\\more',
{
'str()': 'http://foo,bar/more',
'path': '/more',
'hosts()': [
{'host': 'foo', 'password': None, 'port': 80, 'username': None},
{'host': 'bar', 'password': None, 'port': 80, 'username': None},
],
},
),
('mongo:\\\\foo,bar\\more', Err('empty host')),
(
'foobar://foo[]bar:x@y@whatever,foo[]bar:x@y@whichever',
{
'str()': 'foobar://foo%5B%5Dbar:x%40y@whatever,foo%5B%5Dbar:x%40y@whichever',
'hosts()': [
{'host': 'whatever', 'password': 'x%40y', 'port': None, 'username': 'foo%5B%5Dbar'},
{'host': 'whichever', 'password': 'x%40y', 'port': None, 'username': 'foo%5B%5Dbar'},
],
},
),
(
'foobar://foo%2Cbar:x@y@whatever,snap',
{
'str()': 'foobar://foo%2Cbar:x%40y@whatever,snap',
'hosts()': [
{'host': 'whatever', 'password': 'x%40y', 'port': None, 'username': 'foo%2Cbar'},
{'host': 'snap', 'password': None, 'port': None, 'username': None},
],
},
),
(
'mongodb://x:y@[::1],1.1.1.1:888/xxx',
{
'str()': 'mongodb://x:y@[::1],1.1.1.1:888/xxx',
'scheme': 'mongodb',
'hosts()': [
{'host': '[::1]', 'password': 'y', 'port': None, 'username': 'x'},
{'host': '1.1.1.1', 'password': None, 'port': 888, 'username': None},
],
},
),
(
'http://foo.co.uk,bar.spam.things.com',
{
'str()': 'http://foo.co.uk,bar.spam.things.com/',
'hosts()': [
{'host': 'foo.co.uk', 'password': None, 'port': 80, 'username': None},
{'host': 'bar.spam.things.com', 'password': None, 'port': 80, 'username': None},
],
},
),
('ht💣tp://example.com', Err('relative URL without a base')),
# punycode ß
(
'http://£££.com',
{
'str()': 'http://xn--9aaa.com/',
'hosts()': [{'host': 'xn--9aaa.com', 'password': None, 'port': 80, 'username': None}],
'unicode_string()': 'http://£££.com/',
},
),
(
'http://£££.co.uk,münchen.com/foo?bar=baz#qux',
{
'str()': 'http://xn--9aaa.co.uk,xn--mnchen-3ya.com/foo?bar=baz#qux',
'hosts()': [
{'host': 'xn--9aaa.co.uk', 'password': None, 'port': 80, 'username': None},
{'host': 'xn--mnchen-3ya.com', 'password': None, 'port': 80, 'username': None},
],
'unicode_string()': 'http://£££.co.uk,münchen.com/foo?bar=baz#qux',
},
),
(
'postgres://£££.co.uk,münchen.com/foo?bar=baz#qux',
{
'str()': 'postgres://%C2%A3%C2%A3%C2%A3.co.uk,m%C3%BCnchen.com/foo?bar=baz#qux',
'hosts()': [
{'host': '%C2%A3%C2%A3%C2%A3.co.uk', 'password': None, 'port': None, 'username': None},
{'host': 'm%C3%BCnchen.com', 'password': None, 'port': None, 'username': None},
],
'unicode_string()': 'postgres://%C2%A3%C2%A3%C2%A3.co.uk,m%C3%BCnchen.com/foo?bar=baz#qux',
},
),
],
)
def test_multi_url_cases(multi_host_url_validator, url, expected):
if isinstance(expected, Err):
with pytest.raises(ValidationError) as exc_info:
multi_host_url_validator.validate_python(url)
assert exc_info.value.error_count() == 1
error = exc_info.value.errors(include_url=False)[0]
assert error['type'] == 'url_parsing'
assert error['ctx']['error'] == expected.message
else:
output_url = multi_host_url_validator.validate_python(url)
assert isinstance(output_url, MultiHostUrl)
if isinstance(expected, str):
assert str(output_url) == expected
else:
assert isinstance(expected, dict)
output_parts = {}
for key in expected:
if key == 'str()':
output_parts[key] = str(output_url)
elif key.endswith('()'):
output_parts[key] = getattr(output_url, key[:-2])()
else:
output_parts[key] = getattr(output_url, key)
# debug(output_parts)
assert output_parts == expected
@pytest.fixture(scope='module', name='strict_multi_host_url_validator')
def strict_multi_host_url_validator_fixture():
return SchemaValidator(core_schema.multi_host_url_schema(strict=True))
@pytest.mark.parametrize(
'url,expected',
[
('http://example.com', 'http://example.com/'),
(
' mongodb://foo,bar,spam/xxx ',
Err('leading or trailing control or space character are ignored in URLs', 'url_syntax_violation'),
),
(
' \n\r\t mongodb://foo,bar,spam/xxx',
Err('leading or trailing control or space character are ignored in URLs', 'url_syntax_violation'),
),
# with bashslashes
('file:\\\\foo,bar\\more', Err('backslash', 'url_syntax_violation')),
('http:\\\\foo,bar\\more', Err('backslash', 'url_syntax_violation')),
('mongo:\\\\foo,bar\\more', Err('non-URL code point', 'url_syntax_violation')),
('foobar://foo[]bar:x@y@whatever,foo[]bar:x@y@whichever', Err('non-URL code point', 'url_syntax_violation')),
(
'foobar://foo%2Cbar:x@y@whatever,snap',
Err('unencoded @ sign in username or password', 'url_syntax_violation'),
),
('foobar://foo%2Cbar:x%40y@whatever,snap', 'foobar://foo%2Cbar:x%40y@whatever,snap'),
],
)
def test_multi_url_cases_strict(strict_multi_host_url_validator, url, expected):
if isinstance(expected, Err):
with pytest.raises(ValidationError) as exc_info:
strict_multi_host_url_validator.validate_python(url)
assert exc_info.value.error_count() == 1
error = exc_info.value.errors(include_url=False)[0]
assert error['type'] == expected.errors
assert error['ctx']['error'] == expected.message
else:
output_url = strict_multi_host_url_validator.validate_python(url)
assert isinstance(output_url, MultiHostUrl)
if isinstance(expected, str):
assert str(output_url) == expected
else:
assert isinstance(expected, dict)
output_parts = {}
for key in expected:
if key == 'str()':
output_parts[key] = str(output_url)
elif key.endswith('()'):
output_parts[key] = getattr(output_url, key[:-2])()
else:
output_parts[key] = getattr(output_url, key)
assert output_parts == expected
def test_url_to_multi_url(url_validator, multi_host_url_validator):
url: Url = url_validator.validate_python('https://example.com')
assert isinstance(url, Url)
assert str(url) == 'https://example.com/'
url2 = multi_host_url_validator.validate_python(url)
assert isinstance(url2, MultiHostUrl)
assert str(url2) == 'https://example.com/'
assert url is not url2
url3 = multi_host_url_validator.validate_python(url2)
assert isinstance(url3, MultiHostUrl)
assert str(url3) == 'https://example.com/'
assert url2 is url3
def test_multi_wrong_type(multi_host_url_validator):
assert str(multi_host_url_validator.validate_python('http://example.com')) == 'http://example.com/'
with pytest.raises(ValidationError, match=r'URL input should be a string or URL \[type=url_type,'):
multi_host_url_validator.validate_python(42)
def test_multi_allowed_schemas():
v = SchemaValidator(core_schema.multi_host_url_schema(allowed_schemes=['http', 'foo']))
assert str(v.validate_python('http://example.com')) == 'http://example.com/'
assert str(v.validate_python('foo://example.com')) == 'foo://example.com'
with pytest.raises(ValidationError, match=r"URL scheme should be 'http' or 'foo' \[type=url_scheme,"):
v.validate_python('https://example.com')
def test_multi_max_length(url_validator):
v = SchemaValidator(core_schema.multi_host_url_schema(max_length=25))
assert str(v.validate_python('http://example.com')) == 'http://example.com/'
with pytest.raises(ValidationError, match=r'URL should have at most 25 characters \[type=url_too_long,'):
v.validate_python('https://example.com/this-is-too-long')
url = v.validate_python('http://example.com')
assert str(v.validate_python(url)) == 'http://example.com/'
simple_url = url_validator.validate_python('http://example.com')
assert isinstance(simple_url, Url)
assert str(v.validate_python(simple_url)) == 'http://example.com/'
long_simple_url = url_validator.validate_python('http://example.com/this-is-too-long')
with pytest.raises(ValidationError, match=r'URL should have at most 25 characters \[type=url_too_long,'):
v.validate_python(long_simple_url)
def test_zero_schemas():
with pytest.raises(SchemaError, match='`allowed_schemes` should have length > 0'):
SchemaValidator(core_schema.multi_host_url_schema(allowed_schemes=[]))
@pytest.mark.parametrize(
'url,expected',
[
# urlparse doesn't follow RFC 3986 Section 3.2
(
'http://google.com#@evil.com/',
dict(
scheme='http',
host='google.com',
# path='', CHANGED
path='/',
fragment='@evil.com/',
),
),
# CVE-2016-5699
(
'http://127.0.0.1%0d%0aConnection%3a%20keep-alive',
# dict(scheme='http', host='127.0.0.1%0d%0aconnection%3a%20keep-alive'), CHANGED
Err('Input should be a valid URL, invalid international domain name [type=url_parsing,'),
),
# NodeJS unicode -> double dot
('http://google.com/\uff2e\uff2e/abc', dict(scheme='http', host='google.com', path='/%EF%BC%AE%EF%BC%AE/abc')),
# Scheme without ://
(
"javascript:a='@google.com:12345/';alert(0)",
dict(scheme='javascript', path="a='@google.com:12345/';alert(0)"),
),
(
'//google.com/a/b/c',
# dict(host='google.com', path='/a/b/c'),
Err('Input should be a valid URL, relative URL without a base [type=url_parsing,'),
),
# International URLs
(
'http://ヒ:キ@ヒ.abc.ニ/ヒ?キ#ワ',
dict(
scheme='http',
host='xn--pdk.abc.xn--idk',
auth='%E3%83%92:%E3%82%AD',
path='/%E3%83%92',
query='%E3%82%AD',
fragment='%E3%83%AF',
),
),
# Injected headers (CVE-2016-5699, CVE-2019-9740, CVE-2019-9947)
(
'10.251.0.83:7777?a=1 HTTP/1.1\r\nX-injected: header',
# dict( CHANGED
# host='10.251.0.83',
# port=7777,
# path='',
# query='a=1%20HTTP/1.1%0D%0AX-injected:%20header',
# ),
Err('Input should be a valid URL, relative URL without a base [type=url_parsing,'),
),
# ADDED, similar to the above with scheme added
(
'http://10.251.0.83:7777?a=1 HTTP/1.1\r\nX-injected: header',
dict(
host='10.251.0.83',
port=7777,
path='/',
# query='a=1%20HTTP/1.1%0D%0AX-injected:%20header', CHANGED
query='a=1%20HTTP/1.1X-injected:%20header',
),
),
(
'http://127.0.0.1:6379?\r\nSET test failure12\r\n:8080/test/?test=a',
dict(
scheme='http',
host='127.0.0.1',
port=6379,
# path='',
path='/',
# query='%0D%0ASET%20test%20failure12%0D%0A:8080/test/?test=a', CHANGED
query='SET%20test%20failure12:8080/test/?test=a',
),
),
# See https://bugs.xdavidhu.me/google/2020/03/08/the-unexpected-google-wide-domain-check-bypass/
(
'https://user:pass@xdavidhu.me\\test.corp.google.com:8080/path/to/something?param=value#hash',
dict(
scheme='https',
auth='user:pass',
host='xdavidhu.me',
# path='/%5Ctest.corp.google.com:8080/path/to/something', CHANGED
path='/test.corp.google.com:8080/path/to/something',
query='param=value',
fragment='hash',
),
),
# # Tons of '@' causing backtracking
(
'https://' + ('@' * 10000) + '[',
# False, CHANGED
Err('Input should be a valid URL, invalid IPv6 address [type=url_parsing,'),
),
(
'https://user:' + ('@' * 10000) + 'example.com',
dict(scheme='https', auth='user:' + ('%40' * 9999), host='example.com'),
),
],
)
def test_url_vulnerabilities(url_validator, url, expected):
"""
Test cases from
https://github.com/urllib3/urllib3/blob/7ef7444fd0fc22a825be6624af85343cefa36fef/test/test_util.py#L422
"""
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
url_validator.validate_python(url)
else:
output_url = url_validator.validate_python(url)
assert isinstance(output_url, Url)
output_parts = {}
for key in expected:
# one tweak required to match urllib3 logic
if key == 'auth':
output_parts[key] = f'{output_url.username}:{output_url.password}'
else:
output_parts[key] = getattr(output_url, key)
assert output_parts == expected
def test_multi_host_url_comparison() -> None:
assert MultiHostUrl('http://example.com,www.example.com') == MultiHostUrl('http://example.com,www.example.com')
assert MultiHostUrl('http://example.com,www.example.com') == MultiHostUrl('http://example.com,www.example.com/')
assert MultiHostUrl('http://example.com,www.example.com') != MultiHostUrl('http://example.com,www.example.com/123')
assert MultiHostUrl('http://example.com,www.example.com/123') > MultiHostUrl('http://example.com,www.example.com')
assert MultiHostUrl('http://example.com,www.example.com/123') >= MultiHostUrl('http://example.com,www.example.com')
assert MultiHostUrl('http://example.com,www.example.com') >= MultiHostUrl('http://example.com,www.example.com')
assert MultiHostUrl('http://example.com,www.example.com') < MultiHostUrl('http://example.com,www.example.com/123')
assert MultiHostUrl('http://example.com,www.example.com') <= MultiHostUrl('http://example.com,www.example.com/123')
assert MultiHostUrl('http://example.com,www.example.com') <= MultiHostUrl('http://example.com')
def test_multi_host_url_bool() -> None:
assert bool(MultiHostUrl('http://example.com,www.example.com')) is True
def test_multi_host_url_hash() -> None:
data: dict[MultiHostUrl, int] = {}
data[MultiHostUrl('http://example.com,www.example.com')] = 1
assert data == {MultiHostUrl('http://example.com,www.example.com/'): 1}
data[MultiHostUrl('http://example.com,www.example.com/123')] = 2
assert data == {
MultiHostUrl('http://example.com,www.example.com/'): 1,
MultiHostUrl('http://example.com,www.example.com/123'): 2,
}
data[MultiHostUrl('http://example.com,www.example.com')] = 3
assert data == {
MultiHostUrl('http://example.com,www.example.com/'): 3,
MultiHostUrl('http://example.com,www.example.com/123'): 2,
}
def test_multi_host_url_deepcopy() -> None:
assert deepcopy(MultiHostUrl('http://example.com')) == MultiHostUrl('http://example.com/')
def test_url_comparison() -> None:
assert Url('http://example.com') == Url('http://example.com')
assert Url('http://example.com') == Url('http://example.com/')
assert Url('http://example.com') != Url('http://example.com/123')
assert Url('http://example.com/123') > Url('http://example.com')
assert Url('http://example.com/123') >= Url('http://example.com')
assert Url('http://example.com') >= Url('http://example.com')
assert Url('http://example.com') < Url('http://example.com/123')
assert Url('http://example.com') <= Url('http://example.com/123')
assert Url('http://example.com') <= Url('http://example.com')
def test_url_bool() -> None:
assert bool(Url('http://example.com')) is True
def test_url_hash() -> None:
data: dict[Url, int] = {}
data[Url('http://example.com')] = 1
assert data == {Url('http://example.com/'): 1}
data[Url('http://example.com/123')] = 2
assert data == {Url('http://example.com/'): 1, Url('http://example.com/123'): 2}
data[Url('http://example.com')] = 3
assert data == {Url('http://example.com/'): 3, Url('http://example.com/123'): 2}
def test_url_deepcopy() -> None:
assert deepcopy(Url('http://example.com')) == Url('http://example.com/')
def test_multi_url_build() -> None:
url = MultiHostUrl.build(
scheme='postgresql',
username='testuser',
password='testpassword',
host='127.0.0.1',
port=5432,
path='database',
query='sslmode=require',
fragment='test',
)
assert url == MultiHostUrl('postgresql://testuser:testpassword@127.0.0.1:5432/database?sslmode=require#test')
assert str(url) == 'postgresql://testuser:testpassword@127.0.0.1:5432/database?sslmode=require#test'
@pytest.mark.parametrize('url_type', [Url, MultiHostUrl])
@pytest.mark.parametrize(
'include_kwarg',
[
pytest.param(
True,
marks=pytest.mark.xfail(
reason='semantics of `encode_credentials` need to be fully decided, not enabled yet'
),
),
False,
],
)
def test_url_build_not_encode_credentials(url_type: type[Union[Url, MultiHostUrl]], include_kwarg: bool) -> None:
kwargs = {}
if include_kwarg:
kwargs['encode_credentials'] = False
url = url_type.build(
scheme='postgresql',
username='user name',
password='p@ss/word?#__',
host='example.com',
port=5432,
**kwargs,
)
assert url == url_type('postgresql://user%20name:p@ss/word?#__@example.com:5432')
assert str(url) == 'postgresql://user%20name:p@ss/word?#__@example.com:5432'
# NB without encoding, the special characters can seriously affect the URL
# parts, probably not what users want.
#
# TODO: in v3, probably should set `encode_credentials=True` by default
#
# FIXME: I guess there are similar issues with query containing #, for
# example? Potentially for all of these cases we could just raise an error?
if url_type is Url:
assert url.host == 'ss'
assert url.username == 'user%20name'
assert url.password == 'p'
assert url.port is None
else:
assert url.hosts() == [{'username': 'user%20name', 'password': 'p', 'host': 'ss', 'port': None}]
assert url.path == '/word'
assert url.query == ''
assert url.fragment == '__@example.com:5432'
@pytest.mark.xfail(reason='semantics of `encode_credentials` need to be fully decided, not enabled yet')
@pytest.mark.parametrize('url_type', [Url, MultiHostUrl])
def test_url_build_encode_credentials(url_type: type[Union[Url, MultiHostUrl]]) -> None:
url = url_type.build(
scheme='postgresql',
username='user name',
password='p@ss/word?#__',
host='example.com',
port=5432,
encode_credentials=True,
)
assert url == url_type('postgresql://user%20name:p%40ss%2Fword%3F%23__@example.com:5432')
assert str(url) == 'postgresql://user%20name:p%40ss%2Fword%3F%23__@example.com:5432'
if url_type is Url:
assert url.username == 'user%20name'
assert url.password == 'p%40ss%2Fword%3F%23__'
else:
assert url.hosts() == [
{'username': 'user%20name', 'password': 'p%40ss%2Fword%3F%23__', 'host': 'example.com', 'port': 5432}
]
@pytest.mark.parametrize(
'include_kwarg',
[
pytest.param(
True,
marks=pytest.mark.xfail(
reason='semantics of `encode_credentials` need to be fully decided, not enabled yet'
),
),
False,
],
)
def test_multi_url_build_hosts_not_encode_credentials(include_kwarg: bool) -> None:
kwargs = {}
if include_kwarg:
kwargs['encode_credentials'] = False
hosts = [
{'host': 'example.com', 'password': 'p@ss/word?#__', 'username': 'user name', 'port': 5431},
{'host': 'example.org', 'password': 'p@%ss__', 'username': 'other', 'port': 5432},
]
url = MultiHostUrl.build(scheme='postgresql', hosts=hosts, **kwargs)
# NB: see comment in `test_url_build_not_encode_credentials` about not
# encoding credentials leading to VERY broken results
assert str(url) == 'postgresql://user%20name:p@ss/word?#__@example.com:5431,other:p@%ss__@example.org:5432'
assert url.hosts() == [
{'username': 'user%20name', 'password': 'p', 'host': 'ss', 'port': None},
]
assert url.path == '/word'
assert url.query == ''
assert url.fragment == '__@example.com:5431,other:p@%ss__@example.org:5432'
@pytest.mark.xfail(reason='semantics of `encode_credentials` need to be fully decided, not enabled yet')
def test_multi_url_build_hosts_encode_credentials() -> None:
hosts = [
{'host': 'example.com', 'password': 'p@ss/word?#__', 'username': 'user name', 'port': 5431},
{'host': 'example.org', 'password': 'p@%ss__', 'username': 'other', 'port': 5432},
]
url = MultiHostUrl.build(scheme='postgresql', hosts=hosts, encode_credentials=True)
assert (
str(url) == 'postgresql://user%20name:p%40ss%2Fword%3F%23__@example.com:5431,other:p%40%25ss__@example.org:5432'
)
assert url.hosts() == [
{'username': 'user%20name', 'password': 'p%40ss%2Fword%3F%23__', 'host': 'example.com', 'port': 5431},
{'username': 'other', 'password': 'p%40%25ss__', 'host': 'example.org', 'port': 5432},
]
@pytest.mark.parametrize('field', ['host', 'password', 'username', 'port'])
def test_multi_url_build_hosts_set_with_single_value(field) -> None:
"""Hosts can't be provided with any single url values."""
hosts = [
{'host': '127.0.0.1', 'password': 'testpassword', 'username': 'testuser', 'port': 5432},
{'host': '127.0.0.1', 'password': 'testpassword', 'username': 'testuser', 'port': 5432},
]
kwargs = dict(scheme='postgresql', hosts=hosts, path='database', query='sslmode=require', fragment='test')
if field == 'port':
kwargs[field] = 5432
else:
kwargs[field] = 'test'
with pytest.raises(ValueError):
MultiHostUrl.build(**kwargs)
def test_multi_url_build_hosts_empty_host() -> None:
"""Hosts can't be provided with any single url values."""
hosts = [{}]
with pytest.raises(ValueError):
MultiHostUrl.build(scheme='postgresql', hosts=hosts, path='database', query='sslmode=require', fragment='test')
def test_multi_url_build_hosts() -> None:
"""Hosts can't be provided with any single url values."""
hosts = [
{'host': '127.0.0.1', 'password': 'testpassword', 'username': 'testuser', 'port': 5431},
{'host': '127.0.0.1', 'password': 'testpassword', 'username': 'testuser', 'port': 5433},
]
kwargs = dict(scheme='postgresql', hosts=hosts, path='database', query='sslmode=require', fragment='test')
url = MultiHostUrl.build(**kwargs)
assert url == MultiHostUrl(
'postgresql://testuser:testpassword@127.0.0.1:5431,testuser:testpassword@127.0.0.1:5433/database?sslmode=require#test'
)
assert (
str(url)
== 'postgresql://testuser:testpassword@127.0.0.1:5431,testuser:testpassword@127.0.0.1:5433/database?sslmode=require#test'
)
def test_multi_url_build_neither_host_and_hosts_set() -> None:
with pytest.raises(ValueError):
MultiHostUrl.build(
scheme='postgresql',
username='testuser',
password='testpassword',
port=5432,
path='database',
query='sslmode=require',
fragment='test',
)
def test_url_build() -> None:
url = Url.build(
scheme='postgresql',
username='testuser',
password='testpassword',
host='127.0.0.1',
port=5432,
path='database',
query='sslmode=require',
fragment='test',
)
assert url == Url('postgresql://testuser:testpassword@127.0.0.1:5432/database?sslmode=require#test')
assert str(url) == 'postgresql://testuser:testpassword@127.0.0.1:5432/database?sslmode=require#test'
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_url.py",
"license": "MIT License",
"lines": 1352,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_uuid.py | import copy
import re
from uuid import UUID, SafeUUID
import pytest
from pydantic_core import SchemaValidator, ValidationError, core_schema
from ..conftest import Err, PyAndJson
class MyStr(str): ...
@pytest.mark.parametrize(
'input_value,expected',
[
# Valid UUIDs
('12345678-1234-1234-1234-567812345678', UUID('12345678-1234-1234-1234-567812345678')),
('550e8400-e29b-41d4-a716-446655440000', UUID('550e8400-e29b-41d4-a716-446655440000')),
('f47ac10b-58cc-4372-a567-0e02b2c3d479', UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')),
('123e4567-e89b-12d3-a456-426655440000', UUID('123e4567-e89b-12d3-a456-426655440000')),
('de305d54-75b4-431b-adb2-eb6b9e546014', UUID('de305d54-75b4-431b-adb2-eb6b9e546014')),
('00000000-0000-0000-0000-000000000000', UUID('00000000-0000-0000-0000-000000000000')),
('1b4e28ba-2fa1-11d2-883f-0016d3cca427', UUID('1b4e28ba-2fa1-11d2-883f-0016d3cca427')),
('6ba7b810-9dad-11d1-80b4-00c04fd430c8', UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')),
('886313e1-3b8a-5372-9b90-0c9aee199e5d', UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')),
('c0a8f9a8-aa5e-482b-a067-9cb3a51f5c11', UUID('c0a8f9a8-aa5e-482b-a067-9cb3a51f5c11')),
('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05', UUID('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05')),
('0194fdc2-5d6a-733c-97f9-2feeb9d2a609', UUID('0194fdc2-5d6a-733c-97f9-2feeb9d2a609')),
('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11', UUID('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11')),
('00000000-8000-4000-8000-000000000000', UUID('00000000-8000-4000-8000-000000000000')),
('00000000-0000-4000-0000-000000000000', UUID('00000000-0000-4000-0000-000000000000')),
(MyStr('00000000-0000-4000-0000-000000000000'), UUID('00000000-0000-4000-0000-000000000000')),
(b'\x12\x34\x56\x78' * 4, UUID('12345678-1234-5678-1234-567812345678')),
(b'\x00\x00\x00\x00' * 4, UUID('00000000-0000-0000-0000-000000000000')),
(b'ebcdab58-6eb8-46fb-a190-d07a33e9eac8', UUID('ebcdab58-6eb8-46fb-a190-d07a33e9eac8')),
(UUID('12345678-1234-5678-1234-567812345678'), UUID('12345678-1234-5678-1234-567812345678')),
(UUID('550e8400-e29b-41d4-a716-446655440000'), UUID('550e8400-e29b-41d4-a716-446655440000')),
# Invalid UUIDs
(
'not-a-valid-uuid',
Err(
'Input should be a valid UUID, invalid character: expected an optional prefix of'
+ ' `urn:uuid:` followed by [0-9a-fA-F-], found `n` at 1'
),
),
(
'12345678-1234-5678-1234-5678123456789',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 13'),
),
(
'12345678-1234-1234-1234-1234567890123',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 13'),
),
(b'\x00\x00\x00\x000' * 4, Err('Input should be a valid UUID, invalid length: expected 16 bytes, found 20')),
('550e8400-e29b-41d4-a716', Err('Input should be a valid UUID, invalid group count: expected 5, found 4')),
(
'f47ac10b-58cc-4372-a567-0e02b2c3d47',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 11'),
),
(
'de305d54-75b4-431b-adb2-eb6b9e54601',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 11'),
),
(
'1b4e28ba-2fa1-11d2-883f-0016d3cca42',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 11'),
),
(
'6ba7b810-9dad-11d1-80b4-00c04fd430c',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 11'),
),
(
'886313e1-3b8a-5372-9b90-0c9aee199e5',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 11'),
),
(
'c0a8f9a8-aa5e-482b-a067-9cb3a51f5c1',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 11'),
),
(0xA1A2A3A4B1B2C1C2D1D2D3D4D5D6D7D8, Err('UUID input should be a string, bytes or UUID object')),
(00000000000000000000000000, Err('UUID input should be a string, bytes or UUID object')),
],
)
def test_uuid(input_value, expected):
v = SchemaValidator(core_schema.uuid_schema())
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
result = v.validate_python(input_value)
print(f'input_value={input_value} result={result}')
else:
output = v.validate_python(input_value)
assert output == expected
assert isinstance(output, UUID)
@pytest.mark.parametrize(
'input_value,expected',
[
(UUID('12345678-1234-5678-1234-567812345678'), UUID('12345678-1234-5678-1234-567812345678')),
('12345678-1234-5678-1234-567812345678', Err('Input should be an instance of UUID [type=is_instance_of,')),
(b'12345678-1234-5678-1234-567812345678', Err('Input should be an instance of UUID [type=is_instance_of,')),
(1654646400, Err('Input should be an instance of UUID [type=is_instance_of')),
],
)
def test_uuid_strict(input_value, expected):
v = SchemaValidator(core_schema.uuid_schema(strict=True))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
output = v.validate_python(input_value)
assert output == expected
assert isinstance(output, UUID)
@pytest.mark.parametrize(
'input_value, version, expected',
[
# Valid UUIDs
('a6cc5730-2261-11ee-9c43-2eb5a363657c', 1, UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c')),
(UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c'), 1, UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c')),
('04e4aeb3-8f20-30d0-8852-d295e1265eed', 3, UUID('04e4aeb3-8f20-30d0-8852-d295e1265eed')),
(UUID('04e4aeb3-8f20-30d0-8852-d295e1265eed'), 3, UUID('04e4aeb3-8f20-30d0-8852-d295e1265eed')),
('0e7ac198-9acd-4c0c-b4b4-761974bf71d7', 4, UUID('0e7ac198-9acd-4c0c-b4b4-761974bf71d7')),
(UUID('0e7ac198-9acd-4c0c-b4b4-761974bf71d7'), 4, UUID('0e7ac198-9acd-4c0c-b4b4-761974bf71d7')),
('0e7ac198-9acd-4c0c-b4b4-761974bf71d7', 4, UUID('0e7ac198-9acd-4c0c-b4b4-761974bf71d7')),
(UUID('0e7ac198-9acd-4c0c-b4b4-761974bf71d7'), 4, UUID('0e7ac198-9acd-4c0c-b4b4-761974bf71d7')),
('0194fdc2-5d6a-733c-97f9-2feeb9d2a609', 7, UUID('0194fdc2-5d6a-733c-97f9-2feeb9d2a609')),
(UUID('0194fdc2-5d6a-733c-97f9-2feeb9d2a609'), 7, UUID('0194fdc2-5d6a-733c-97f9-2feeb9d2a609')),
('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05', 6, UUID('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05')),
(UUID('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05'), 6, UUID('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05')),
('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11', 8, UUID('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11')),
(UUID('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11'), 8, UUID('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11')),
# Cases from pydantic#7355 and pydantic#7537
# `UUID.version` makes sense for RFC 4122 UUIDs only. For non RFC 4122 UUIDs Python uses `UUID.version=None`
('00000000-8000-4000-8000-000000000000', 4, UUID('00000000-8000-4000-8000-000000000000')),
(UUID('00000000-8000-4000-8000-000000000000'), 4, UUID('00000000-8000-4000-8000-000000000000')),
('00000000-0000-4000-0000-000000000000', None, UUID('00000000-0000-4000-0000-000000000000')),
(UUID('00000000-0000-4000-0000-000000000000'), None, UUID('00000000-0000-4000-0000-000000000000')),
('00000000-7fff-4000-7fff-000000000000', None, UUID('00000000-7fff-4000-7fff-000000000000')),
(UUID('00000000-7fff-4000-7fff-000000000000'), None, UUID('00000000-7fff-4000-7fff-000000000000')),
(UUID('00000000-7fff-4000-7fff-000000000000'), 4, Err('UUID version 4 expected')),
('b34b6755-f49c-3bd2-6f06-131a708c2bf3', None, UUID('b34b6755-f49c-3bd2-6f06-131a708c2bf3')),
(UUID('b34b6755-f49c-3bd2-6f06-131a708c2bf3'), None, UUID('b34b6755-f49c-3bd2-6f06-131a708c2bf3')),
(UUID('b34b6755-f49c-3bd2-6f06-131a708c2bf3'), 4, Err('UUID version 4 expected')),
# Invalid UUIDs
('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05', 8, Err('UUID version 8 expected')),
(UUID('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05'), 8, Err('UUID version 8 expected')),
('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11', 6, Err('UUID version 6 expected')),
(UUID('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11'), 6, Err('UUID version 6 expected')),
('a6cc5730-2261-11ee-9c43-2eb5a363657c', 7, Err('UUID version 7 expected')),
(UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c'), 7, Err('UUID version 7 expected')),
('a6cc5730-2261-11ee-9c43-2eb5a363657c', 5, Err('UUID version 5 expected')),
(UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c'), 5, Err('UUID version 5 expected')),
('04e4aeb3-8f20-30d0-8852-d295e1265eed', 4, Err('UUID version 4 expected')),
(UUID('04e4aeb3-8f20-30d0-8852-d295e1265eed'), 4, Err('UUID version 4 expected')),
('0e7ac198-9acd-4c0c-b4b4-761974bf71d7', 3, Err('UUID version 3 expected')),
(UUID('0e7ac198-9acd-4c0c-b4b4-761974bf71d7'), 3, Err('UUID version 3 expected')),
('08ed0736-fb95-5cc5-85ed-37e4f3df9b29', 1, Err('UUID version 1 expected')),
(UUID('08ed0736-fb95-5cc5-85ed-37e4f3df9b29'), 1, Err('UUID version 1 expected')),
('00000000-0000-4000-0000-000000000000', 4, Err('UUID version 4 expected')),
(UUID('00000000-0000-4000-0000-000000000000'), 4, Err('UUID version 4 expected')),
],
)
def test_uuid_version(input_value, version, expected):
schema = core_schema.uuid_schema()
if version is not None:
schema = core_schema.uuid_schema(version=version)
v = SchemaValidator(schema)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
output = v.validate_python(input_value)
assert output == expected
assert isinstance(output, UUID)
@pytest.mark.parametrize(
'input_value,expected',
[
('a6cc5730-2261-11ee-9c43-2eb5a363657c', UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c')),
('12345678123456781234567812345678', UUID('12345678-1234-5678-1234-567812345678')),
(
'c0a8f9a8-aa5e-482b-a067-9cb3a51f5c1',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 11'),
),
(1e1, Err('input should be a string, bytes or UUID object')),
(None, Err('input should be a string, bytes or UUID object')),
(True, Err('input should be a string, bytes or UUID object')),
(0xA1A2A3A4B1B2C1C2D1D2D3D4D5D6D7D8, Err('input should be a string, bytes or UUID object')),
(0x12345678123456781234567812345678, Err('input should be a string, bytes or UUID object')),
],
)
def test_uuid_json(py_and_json: PyAndJson, input_value, expected):
v = py_and_json({'type': 'uuid'})
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_test(input_value)
else:
output = v.validate_test(input_value)
assert output == expected
assert isinstance(output, UUID)
def test_uuid_deepcopy():
output = SchemaValidator(core_schema.uuid_schema()).validate_python('a6cc5730-2261-11ee-9c43-2eb5a363657c')
c = copy.deepcopy(output)
assert repr(output) == "UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c')"
assert c == output
assert isinstance(output, UUID)
def test_uuid_copy():
output = SchemaValidator(core_schema.uuid_schema()).validate_python('a6cc5730-2261-11ee-9c43-2eb5a363657c')
c = copy.copy(output)
assert repr(output) == "UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c')"
assert c == output
assert isinstance(output, UUID)
def test_uuid_wrap_json():
# https://github.com/pydantic/pydantic/issues/8147
schema = core_schema.no_info_wrap_validator_function(lambda v, handler: handler(v), core_schema.uuid_schema())
v = SchemaValidator(schema)
assert v.validate_python(UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c'), strict=True) == UUID(
'a6cc5730-2261-11ee-9c43-2eb5a363657c'
)
assert v.validate_json('"a6cc5730-2261-11ee-9c43-2eb5a363657c"', strict=True) == UUID(
'a6cc5730-2261-11ee-9c43-2eb5a363657c'
)
def uuid_safety_unknown():
output = SchemaValidator(core_schema.uuid_schema()).validate_python('a6cc5730-2261-11ee-9c43-2eb5a363657c')
assert output.is_safe is SafeUUID.unknown
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_uuid.py",
"license": "MIT License",
"lines": 217,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/tests/validators/test_with_default.py | import os
import platform
import sys
import weakref
from collections import deque
from dataclasses import dataclass
from typing import Any, Callable, Union, cast
import pytest
from pydantic_core import (
ArgsKwargs,
PydanticUndefined,
PydanticUseDefault,
SchemaError,
SchemaValidator,
Some,
ValidationError,
core_schema,
)
from pydantic_core._pydantic_core import SchemaSerializer
from ..conftest import PyAndJson, assert_gc
def test_typed_dict_default():
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'x': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'y': core_schema.typed_dict_field(
schema=core_schema.with_default_schema(schema=core_schema.str_schema(), default='[default]')
),
}
)
)
assert v.validate_python({'x': 'x', 'y': 'y'}) == {'x': 'x', 'y': 'y'}
assert v.validate_python({'x': 'x'}) == {'x': 'x', 'y': '[default]'}
def test_typed_dict_omit():
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'x': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'y': core_schema.typed_dict_field(
schema=core_schema.with_default_schema(schema=core_schema.str_schema(), on_error='omit'),
required=False,
),
}
)
)
assert v.validate_python({'x': 'x', 'y': 'y'}) == {'x': 'x', 'y': 'y'}
assert v.validate_python({'x': 'x'}) == {'x': 'x'}
assert v.validate_python({'x': 'x', 'y': 42}) == {'x': 'x'}
def test_arguments():
v = SchemaValidator(
core_schema.arguments_schema(
arguments=[
{
'name': 'a',
'mode': 'positional_or_keyword',
'schema': core_schema.with_default_schema(
schema=core_schema.int_schema(), default_factory=lambda: 1
),
}
]
)
)
assert v.validate_python({'a': 2}) == ((), {'a': 2})
assert v.validate_python(ArgsKwargs((2,))) == ((2,), {})
assert v.validate_python(ArgsKwargs((2,), {})) == ((2,), {})
assert v.validate_python(()) == ((), {'a': 1})
def test_arguments_omit():
with pytest.raises(SchemaError, match="Parameter 'a': omit_on_error cannot be used with arguments"):
SchemaValidator(
schema=core_schema.arguments_schema(
arguments=[
{
'name': 'a',
'mode': 'positional_or_keyword',
'schema': core_schema.with_default_schema(
schema=core_schema.int_schema(), default=1, on_error='omit'
),
}
]
)
)
@pytest.mark.parametrize(
'input_value,expected', [([1, 2, 3], [1, 2, 3]), ([1, '2', 3], [1, 2, 3]), ([1, 'wrong', 3], [1, 3])]
)
def test_list_json(py_and_json: PyAndJson, input_value, expected):
v = py_and_json(
{'type': 'list', 'items_schema': {'type': 'default', 'schema': {'type': 'int'}, 'on_error': 'omit'}}
)
assert v.validate_test(input_value) == expected
@pytest.mark.parametrize(
'input_value,expected',
[
([1, '2', 3], [1, 2, 3]),
([1, 'wrong', 3], [1, 3]),
((1, '2', 3), [1, 2, 3]),
((1, 'wrong', 3), [1, 3]),
(deque([1, '2', 3]), [1, 2, 3]),
(deque([1, 'wrong', 3]), [1, 3]),
],
)
def test_list(input_value, expected):
v = SchemaValidator(
core_schema.list_schema(
items_schema=core_schema.with_default_schema(schema=core_schema.int_schema(), on_error='omit')
)
)
assert v.validate_python(input_value) == expected
@pytest.mark.parametrize(
'input_value,expected',
[
({1, '2', 3}, {1, 2, 3}),
([1, '2', 3], {1, 2, 3}),
([1, 'wrong', 3], {1, 3}),
(deque([1, '2', 3]), {1, 2, 3}),
(deque([1, 'wrong', 3]), {1, 3}),
],
)
def test_set(input_value, expected):
v = SchemaValidator(
core_schema.set_schema(
items_schema=core_schema.with_default_schema(schema=core_schema.int_schema(), on_error='omit')
)
)
assert v.validate_python(input_value) == expected
def test_dict_values(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'dict',
'keys_schema': {'type': 'str'},
'values_schema': {'type': 'default', 'schema': {'type': 'int'}, 'on_error': 'omit'},
}
)
assert v.validate_test({'a': 1, 'b': '2'}) == {'a': 1, 'b': 2}
assert v.validate_test({'a': 1, 'b': 'wrong'}) == {'a': 1}
assert v.validate_test({'a': 1, 'b': 'wrong', 'c': '3'}) == {'a': 1, 'c': 3}
def test_dict_keys():
v = SchemaValidator(
core_schema.dict_schema(
keys_schema=core_schema.with_default_schema(schema=core_schema.int_schema(), on_error='omit'),
values_schema=core_schema.str_schema(),
)
)
assert v.validate_python({1: 'a', '2': 'b'}) == {1: 'a', 2: 'b'}
assert v.validate_python({1: 'a', 'wrong': 'b'}) == {1: 'a'}
assert v.validate_python({1: 'a', 'wrong': 'b', 3: 'c'}) == {1: 'a', 3: 'c'}
def test_tuple_variable(py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'tuple',
'items_schema': [{'type': 'default', 'schema': {'type': 'int'}, 'on_error': 'omit'}],
'variadic_item_index': 0,
}
)
assert v.validate_python((1, 2, 3)) == (1, 2, 3)
assert v.validate_python([1, '2', 3]) == (1, 2, 3)
assert v.validate_python([1, 'wrong', 3]) == (1, 3)
def test_tuple_positional():
v = SchemaValidator(
core_schema.tuple_schema(
items_schema=[
core_schema.int_schema(),
core_schema.with_default_schema(schema=core_schema.int_schema(), default=42),
]
)
)
assert v.validate_python((1, '2')) == (1, 2)
assert v.validate_python([1, '2']) == (1, 2)
assert v.validate_json('[1, "2"]') == (1, 2)
assert v.validate_python((1,)) == (1, 42)
def test_tuple_positional_omit():
v = SchemaValidator(
core_schema.tuple_schema(
items_schema=[
core_schema.int_schema(),
core_schema.int_schema(),
core_schema.with_default_schema(schema=core_schema.int_schema(), on_error='omit'),
],
variadic_item_index=2,
)
)
assert v.validate_python((1, '2')) == (1, 2)
assert v.validate_python((1, '2', 3, '4')) == (1, 2, 3, 4)
assert v.validate_python((1, '2', 'wrong', '4')) == (1, 2, 4)
assert v.validate_python((1, '2', 3, 'x4')) == (1, 2, 3)
assert v.validate_json('[1, "2", 3, "x4"]') == (1, 2, 3)
def test_on_error_default():
v = SchemaValidator(core_schema.with_default_schema(schema=core_schema.int_schema(), default=2, on_error='default'))
assert v.validate_python(42) == 42
assert v.validate_python('42') == 42
assert v.validate_python('wrong') == 2
def test_factory_runtime_error():
def broken():
raise RuntimeError('this is broken')
v = SchemaValidator(
core_schema.with_default_schema(schema=core_schema.int_schema(), on_error='default', default_factory=broken)
)
assert v.validate_python(42) == 42
assert v.validate_python('42') == 42
with pytest.raises(RuntimeError, match='this is broken'):
v.validate_python('wrong')
def test_factory_missing_arg():
def broken(x):
return 7
v = SchemaValidator(
core_schema.with_default_schema(
schema=core_schema.int_schema(),
on_error='default',
default_factory=broken,
default_factory_takes_data=False,
)
)
assert v.validate_python(42) == 42
assert v.validate_python('42') == 42
with pytest.raises(TypeError, match=r"broken\(\) missing 1 required positional argument: 'x'"):
v.validate_python('wrong')
def test_typed_dict_error():
v = SchemaValidator(
core_schema.typed_dict_schema(
fields={
'x': core_schema.typed_dict_field(schema=core_schema.str_schema()),
'y': core_schema.typed_dict_field(
schema=core_schema.with_default_schema(
schema=core_schema.str_schema(), default_factory=lambda y: y * 2
)
),
}
)
)
assert v.validate_python({'x': 'x', 'y': 'y'}) == {'x': 'x', 'y': 'y'}
with pytest.raises(TypeError, match=r"<lambda>\(\) missing 1 required positional argument: 'y'"):
v.validate_python({'x': 'x'})
def test_on_error_default_not_int():
v = SchemaValidator(
core_schema.with_default_schema(schema=core_schema.int_schema(), default=[1, 2, 3], on_error='default')
)
assert v.validate_python(42) == 42
assert v.validate_python('42') == 42
assert v.validate_python('wrong') == [1, 2, 3]
def test_on_error_default_factory():
v = SchemaValidator(
core_schema.with_default_schema(schema=core_schema.int_schema(), default_factory=lambda: 17, on_error='default')
)
assert v.validate_python(42) == 42
assert v.validate_python('42') == 42
assert v.validate_python('wrong') == 17
def test_on_error_omit():
v = SchemaValidator(core_schema.with_default_schema(schema=core_schema.int_schema(), on_error='omit'))
assert v.validate_python(42) == 42
with pytest.raises(SchemaError, match='Uncaught Omit error, please check your usage of `default` validators.'):
v.validate_python('wrong')
def test_on_error_wrong():
with pytest.raises(SchemaError, match="'on_error = default' requires a `default` or `default_factory`"):
SchemaValidator(core_schema.with_default_schema(schema=core_schema.int_schema(), on_error='default'))
def test_build_default_and_default_factory():
with pytest.raises(SchemaError, match="'default' and 'default_factory' cannot be used together"):
SchemaValidator(
schema=core_schema.with_default_schema(
schema=core_schema.int_schema(), default_factory=lambda: 1, default=2
)
)
def test_model_class():
class MyModel:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
field_b: int
v = SchemaValidator(
core_schema.model_schema(
cls=MyModel,
schema=core_schema.with_default_schema(
schema=core_schema.model_fields_schema(
fields={
'field_a': core_schema.model_field(schema=core_schema.str_schema()),
'field_b': core_schema.model_field(schema=core_schema.int_schema()),
}
),
default=({'field_a': '[default-a]', 'field_b': '[default-b]'}, None, set()),
on_error='default',
),
)
)
m = v.validate_python({'field_a': 'test', 'field_b': 12})
assert isinstance(m, MyModel)
assert m.field_a == 'test'
assert m.field_b == 12
assert m.__pydantic_fields_set__ == {'field_a', 'field_b'}
m = v.validate_python({'field_a': 'test', 'field_b': 'wrong'})
assert isinstance(m, MyModel)
assert m.field_a == '[default-a]'
assert m.field_b == '[default-b]'
assert m.__pydantic_fields_set__ == set()
@pytest.mark.parametrize('config_validate_default', [True, False, None])
@pytest.mark.parametrize('schema_validate_default', [True, False, None])
@pytest.mark.parametrize(
'inner_schema',
[
core_schema.no_info_after_validator_function(lambda x: x * 2, core_schema.int_schema()),
core_schema.no_info_before_validator_function(lambda x: str(int(x) * 2), core_schema.int_schema()),
core_schema.no_info_wrap_validator_function(lambda x, h: h(str(int(x) * 2)), core_schema.int_schema()),
core_schema.no_info_wrap_validator_function(lambda x, h: h(x) * 2, core_schema.int_schema()),
],
ids=['after', 'before', 'wrap-before', 'wrap-after'],
)
def test_validate_default(
config_validate_default: Union[bool, None],
schema_validate_default: Union[bool, None],
inner_schema: core_schema.CoreSchema,
):
if config_validate_default is not None:
config = core_schema.CoreConfig(validate_default=config_validate_default)
else:
config = None
v = SchemaValidator(
core_schema.typed_dict_schema(
{
'x': core_schema.typed_dict_field(
core_schema.with_default_schema(
inner_schema, default='42', validate_default=schema_validate_default
)
)
},
config=config,
)
)
assert v.validate_python({'x': '2'}) == {'x': 4}
expected = (
84
if (config_validate_default is True and schema_validate_default is not False or schema_validate_default is True)
else '42'
)
assert v.validate_python({}) == {'x': expected}
def test_validate_default_factory():
v = SchemaValidator(
core_schema.tuple_positional_schema(
[core_schema.with_default_schema(core_schema.int_schema(), default_factory=lambda: '42')]
),
config=dict(validate_default=True),
)
assert v.validate_python(('2',)) == (2,)
assert v.validate_python(()) == (42,)
def test_validate_default_error_tuple():
v = SchemaValidator(
core_schema.tuple_positional_schema(
[core_schema.with_default_schema(core_schema.int_schema(), default='wrong', validate_default=True)]
)
)
assert v.validate_python(('2',)) == (2,)
with pytest.raises(ValidationError, match='Input should be a valid integer,') as exc_info:
v.validate_python(())
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': (0,),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'wrong',
}
]
def test_validate_default_error_typed_dict():
v = SchemaValidator(
core_schema.typed_dict_schema(
{
'x': core_schema.typed_dict_field(
core_schema.with_default_schema(core_schema.int_schema(), default='xx', validate_default=True)
)
}
)
)
assert v.validate_python({'x': '2'}) == {'x': 2}
with pytest.raises(ValidationError, match='Input should be a valid integer,') as exc_info:
v.validate_python({})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('x',),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'xx',
}
]
def test_deepcopy_mutable_defaults():
stored_empty_list = []
stored_empty_dict = {}
class Model:
int_list_with_default: list[int] = stored_empty_list
str_dict_with_default: dict[str, str] = stored_empty_dict
v = SchemaValidator(
core_schema.model_schema(
cls=Model,
schema=core_schema.model_fields_schema(
fields={
'int_list_with_default': core_schema.model_field(
schema=core_schema.with_default_schema(
schema=core_schema.list_schema(items_schema=core_schema.int_schema()),
default=stored_empty_list,
)
),
'str_dict_with_default': core_schema.model_field(
schema=core_schema.with_default_schema(
schema=core_schema.dict_schema(
keys_schema=core_schema.str_schema(), values_schema=core_schema.str_schema()
),
default=stored_empty_dict,
)
),
}
),
)
)
m1 = v.validate_python({})
assert m1.int_list_with_default == []
assert m1.str_dict_with_default == {}
assert m1.int_list_with_default is not stored_empty_list
assert m1.str_dict_with_default is not stored_empty_dict
m1.int_list_with_default.append(1)
m1.str_dict_with_default['a'] = 'abc'
m2 = v.validate_python({})
assert m2.int_list_with_default == []
assert m2.str_dict_with_default == {}
assert m2.int_list_with_default is not m1.int_list_with_default
assert m2.str_dict_with_default is not m1.str_dict_with_default
def test_default_value() -> None:
s = core_schema.with_default_schema(core_schema.list_schema(core_schema.int_schema()), default=[1, 2, 3])
v = SchemaValidator(s)
r = v.get_default_value()
assert r is not None
assert r.value == [1, 2, 3]
def test_default_value_validate_default() -> None:
s = core_schema.with_default_schema(core_schema.list_schema(core_schema.int_schema()), default=['1', '2', '3'])
v = SchemaValidator(s, config=core_schema.CoreConfig(validate_default=True))
r = v.get_default_value()
assert r is not None
assert r.value == [1, 2, 3]
def test_default_value_validate_default_fail() -> None:
s = core_schema.with_default_schema(core_schema.list_schema(core_schema.int_schema()), default=['a'])
v = SchemaValidator(s, config=core_schema.CoreConfig(validate_default=True))
with pytest.raises(ValidationError) as exc_info:
v.get_default_value()
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': (0,),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'a',
}
]
def test_default_value_validate_default_strict_pass() -> None:
s = core_schema.with_default_schema(core_schema.list_schema(core_schema.int_schema()), default=[1, 2, 3])
v = SchemaValidator(s, config=core_schema.CoreConfig(validate_default=True))
r = v.get_default_value(strict=True)
assert r is not None
assert r.value == [1, 2, 3]
def test_default_value_validate_default_strict_fail() -> None:
s = core_schema.with_default_schema(core_schema.list_schema(core_schema.int_schema()), default=['1'])
v = SchemaValidator(s, config=core_schema.CoreConfig(validate_default=True))
with pytest.raises(ValidationError) as exc_info:
v.get_default_value(strict=True)
assert exc_info.value.errors(include_url=False) == [
{'type': 'int_type', 'loc': (0,), 'msg': 'Input should be a valid integer', 'input': '1'}
]
@pytest.mark.parametrize('validate_default', [True, False])
def test_no_default_value(validate_default: bool) -> None:
s = core_schema.list_schema(core_schema.int_schema())
v = SchemaValidator(s, config=core_schema.CoreConfig(validate_default=validate_default))
assert v.get_default_value() is None
@pytest.mark.parametrize('validate_default', [True, False])
def test_some(validate_default: bool) -> None:
def get_default() -> Union[Some[int], None]:
s = core_schema.with_default_schema(core_schema.int_schema(), default=42)
return SchemaValidator(s).get_default_value()
res = get_default()
assert res is not None
assert res.value == 42
assert repr(res) == 'Some(42)'
@pytest.mark.skipif(sys.version_info < (3, 10), reason='pattern matching was added in 3.10')
def test_some_pattern_match() -> None:
code = """\
def f(v: Union[Some[Any], None]) -> str:
match v:
case Some(1):
return 'case1'
case Some(value=2):
return 'case2'
case Some(int(value)):
return f'case3: {value}'
case Some(value):
return f'case4: {type(value).__name__}({value})'
case None:
return 'case5'
"""
local_vars = {}
exec(code, globals(), local_vars)
f = cast(Callable[[Union[Some[Any], None]], str], local_vars['f'])
res = f(SchemaValidator(core_schema.with_default_schema(core_schema.int_schema(), default=1)).get_default_value())
assert res == 'case1'
res = f(SchemaValidator(core_schema.with_default_schema(core_schema.int_schema(), default=2)).get_default_value())
assert res == 'case2'
res = f(SchemaValidator(core_schema.with_default_schema(core_schema.int_schema(), default=3)).get_default_value())
assert res == 'case3: 3'
res = f(
SchemaValidator(
schema=core_schema.with_default_schema(core_schema.int_schema(), default='4')
).get_default_value()
)
assert res == 'case4: str(4)'
res = f(SchemaValidator(core_schema.int_schema()).get_default_value())
assert res == 'case5'
def test_use_default_error() -> None:
def val_func(v: Any, handler: core_schema.ValidatorFunctionWrapHandler) -> Any:
if isinstance(v, str) and v == '':
raise PydanticUseDefault
return handler(v)
validator = SchemaValidator(
core_schema.with_default_schema(
core_schema.no_info_wrap_validator_function(val_func, core_schema.int_schema()), default=10
)
)
assert validator.validate_python('1') == 1
assert validator.validate_python('') == 10
# without a default value the error bubbles up
# the error message is the same as the error message produced by PydanticOmit
validator = SchemaValidator(
core_schema.with_default_schema(core_schema.no_info_wrap_validator_function(val_func, core_schema.int_schema()))
)
with pytest.raises(
SchemaError,
match='Uncaught `PydanticUseDefault` exception: the error was raised in a field validator and no default value is available for that field.',
):
validator.validate_python('')
# same if there is no WithDefault validator
validator = SchemaValidator(core_schema.no_info_wrap_validator_function(val_func, core_schema.int_schema()))
with pytest.raises(
SchemaError,
match='Uncaught `PydanticUseDefault` exception: the error was raised in a field validator and no default value is available for that field.',
):
validator.validate_python('')
@pytest.mark.xfail(
condition=platform.python_implementation() == 'PyPy', reason='https://foss.heptapod.net/pypy/pypy/-/issues/3899'
)
@pytest.mark.skipif(platform.python_implementation() == 'GraalVM', reason='Cannot reliably trigger GC on GraalPy')
def test_leak_with_default():
def fn():
class Defaulted(int):
@classmethod
def _validator(cls, v, info):
return Defaulted(v)
schema = core_schema.with_info_plain_validator_function(Defaulted._validator)
schema = core_schema.with_default_schema(schema, default=Defaulted(0))
# If any of the Rust validators don't implement traversal properly,
# there will be an undetectable cycle created by this assignment
# which will keep Defaulted alive
Defaulted.__pydantic_validator__ = SchemaValidator(schema)
return Defaulted
klass = fn()
ref = weakref.ref(klass)
assert ref() is not None
del klass
assert_gc(lambda: ref() is None)
validate_default_raises_examples = [
(
{},
[
{'type': 'assertion_error', 'loc': ('x',), 'msg': 'Assertion failed, ', 'input': None},
{'type': 'assertion_error', 'loc': ('y',), 'msg': 'Assertion failed, ', 'input': None},
{'type': 'missing', 'loc': ('z',), 'msg': 'Field required', 'input': {}},
],
),
(
{'z': 'some str'},
[
{'type': 'assertion_error', 'loc': ('x',), 'msg': 'Assertion failed, ', 'input': None},
{'type': 'assertion_error', 'loc': ('y',), 'msg': 'Assertion failed, ', 'input': None},
],
),
(
{'x': None},
[
{'type': 'assertion_error', 'loc': ('x',), 'msg': 'Assertion failed, ', 'input': None},
{'type': 'assertion_error', 'loc': ('y',), 'msg': 'Assertion failed, ', 'input': None},
{'type': 'missing', 'loc': ('z',), 'msg': 'Field required', 'input': {'x': None}},
],
),
(
{'x': None, 'z': 'some str'},
[
{'type': 'assertion_error', 'loc': ('x',), 'msg': 'Assertion failed, ', 'input': None},
{'type': 'assertion_error', 'loc': ('y',), 'msg': 'Assertion failed, ', 'input': None},
],
),
(
{'y': None},
[
{'type': 'assertion_error', 'loc': ('x',), 'msg': 'Assertion failed, ', 'input': None},
{'type': 'assertion_error', 'loc': ('y',), 'msg': 'Assertion failed, ', 'input': None},
{'type': 'missing', 'loc': ('z',), 'msg': 'Field required', 'input': {'y': None}},
],
),
(
{'y': None, 'z': 'some str'},
[
{'type': 'assertion_error', 'loc': ('x',), 'msg': 'Assertion failed, ', 'input': None},
{'type': 'assertion_error', 'loc': ('y',), 'msg': 'Assertion failed, ', 'input': None},
],
),
(
{'x': None, 'y': None},
[
{'type': 'assertion_error', 'loc': ('x',), 'msg': 'Assertion failed, ', 'input': None},
{'type': 'assertion_error', 'loc': ('y',), 'msg': 'Assertion failed, ', 'input': None},
{'type': 'missing', 'loc': ('z',), 'msg': 'Field required', 'input': {'x': None, 'y': None}},
],
),
(
{'x': None, 'y': None, 'z': 'some str'},
[
{'type': 'assertion_error', 'loc': ('x',), 'msg': 'Assertion failed, ', 'input': None},
{'type': 'assertion_error', 'loc': ('y',), 'msg': 'Assertion failed, ', 'input': None},
],
),
(
{'x': 1, 'y': None, 'z': 'some str'},
[
{'type': 'assertion_error', 'loc': ('x',), 'msg': 'Assertion failed, ', 'input': 1},
{'type': 'assertion_error', 'loc': ('y',), 'msg': 'Assertion failed, ', 'input': None},
],
),
(
{'x': None, 'y': 1, 'z': 'some str'},
[
{'type': 'assertion_error', 'loc': ('x',), 'msg': 'Assertion failed, ', 'input': None},
{'type': 'assertion_error', 'loc': ('y',), 'msg': 'Assertion failed, ', 'input': 1},
],
),
(
{'x': 1, 'y': 1, 'z': 'some str'},
[
{'type': 'assertion_error', 'loc': ('x',), 'msg': 'Assertion failed, ', 'input': 1},
{'type': 'assertion_error', 'loc': ('y',), 'msg': 'Assertion failed, ', 'input': 1},
],
),
]
@pytest.mark.parametrize(
'core_schema_constructor,field_constructor',
[
(core_schema.model_fields_schema, core_schema.model_field),
(core_schema.typed_dict_schema, core_schema.typed_dict_field),
],
)
@pytest.mark.parametrize('input_value,expected', validate_default_raises_examples)
def test_validate_default_raises(
core_schema_constructor: Union[core_schema.ModelFieldsSchema, core_schema.TypedDictSchema],
field_constructor: Union[core_schema.model_field, core_schema.typed_dict_field],
input_value: dict,
expected: Any,
) -> None:
def _raise(ex: Exception) -> None:
raise ex()
inner_schema = core_schema.no_info_after_validator_function(
lambda x: _raise(AssertionError), core_schema.nullable_schema(core_schema.int_schema())
)
v = SchemaValidator(
core_schema_constructor(
{
'x': field_constructor(
core_schema.with_default_schema(inner_schema, default=None, validate_default=True)
),
'y': field_constructor(
core_schema.with_default_schema(inner_schema, default=None, validate_default=True)
),
'z': field_constructor(core_schema.str_schema()),
}
)
)
with pytest.raises(ValidationError) as exc_info:
v.validate_python(input_value)
assert exc_info.value.errors(include_url=False, include_context=False) == expected
@pytest.mark.parametrize('input_value,expected', validate_default_raises_examples)
def test_validate_default_raises_dataclass(input_value: dict, expected: Any) -> None:
def _raise(ex: Exception) -> None:
raise ex()
inner_schema = core_schema.no_info_after_validator_function(
lambda x: _raise(AssertionError), core_schema.nullable_schema(core_schema.int_schema())
)
x = core_schema.dataclass_field(
name='x', schema=core_schema.with_default_schema(inner_schema, default=None, validate_default=True)
)
y = core_schema.dataclass_field(
name='y', schema=core_schema.with_default_schema(inner_schema, default=None, validate_default=True)
)
z = core_schema.dataclass_field(name='z', schema=core_schema.str_schema())
v = SchemaValidator(core_schema.dataclass_args_schema('XYZ', [x, y, z]))
with pytest.raises(ValidationError) as exc_info:
v.validate_python(input_value)
assert exc_info.value.errors(include_url=False, include_context=False) == expected
@pytest.fixture(params=['model', 'typed_dict', 'dataclass', 'arguments_v3'])
def container_schema_builder(
request: pytest.FixtureRequest,
) -> Callable[[dict[str, core_schema.CoreSchema]], core_schema.CoreSchema]:
if request.param == 'model':
return lambda fields: core_schema.model_schema(
cls=type('Test', (), {}),
schema=core_schema.model_fields_schema(
fields={k: core_schema.model_field(schema=v) for k, v in fields.items()},
),
)
elif request.param == 'typed_dict':
return lambda fields: core_schema.typed_dict_schema(
fields={k: core_schema.typed_dict_field(schema=v) for k, v in fields.items()}
)
elif request.param == 'dataclass':
return lambda fields: core_schema.dataclass_schema(
cls=dataclass(type('Test', (), {})),
schema=core_schema.dataclass_args_schema(
'Test',
fields=[core_schema.dataclass_field(name=k, schema=v) for k, v in fields.items()],
),
fields=[k for k in fields.keys()],
)
elif request.param == 'arguments_v3':
# TODO: open an issue for this
raise pytest.xfail('arguments v3 does not yet support default_factory_takes_data properly')
else:
raise ValueError(f'Unknown container type {request.param}')
def test_default_factory_not_called_if_existing_error(container_schema_builder, pydantic_version) -> None:
schema = container_schema_builder(
{
'a': core_schema.int_schema(),
'b': core_schema.with_default_schema(
schema=core_schema.int_schema(), default_factory=lambda data: data['a'], default_factory_takes_data=True
),
}
)
v = SchemaValidator(schema)
with pytest.raises(ValidationError) as e:
v.validate_python({'a': 'not_an_int'})
assert e.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('a',),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'not_an_int',
},
{
'input': PydanticUndefined,
'loc': ('b',),
'msg': 'The default factory uses validated data, but at least one validation error occurred',
'type': 'default_factory_not_called',
},
]
include_urls = os.environ.get('PYDANTIC_ERRORS_INCLUDE_URL', '1') != 'false'
expected = (
f"""2 validation errors for {v.title}
a
Input should be a valid integer, unable to parse string as an integer [type=int_parsing, input_value='not_an_int', input_type=str]"""
+ (
f"""
For further information visit https://errors.pydantic.dev/{pydantic_version}/v/int_parsing"""
if include_urls
else ''
)
+ """
b
The default factory uses validated data, but at least one validation error occurred [type=default_factory_not_called]"""
+ (
f"""
For further information visit https://errors.pydantic.dev/{pydantic_version}/v/default_factory_not_called"""
if include_urls
else ''
)
)
assert str(e.value) == expected
# repeat with the first field being a default which validates incorrectly
schema = container_schema_builder(
{
'a': core_schema.with_default_schema(
schema=core_schema.int_schema(), default='not_an_int', validate_default=True
),
'b': core_schema.with_default_schema(
schema=core_schema.int_schema(), default_factory=lambda data: data['a'], default_factory_takes_data=True
),
}
)
v = SchemaValidator(schema)
with pytest.raises(ValidationError) as e:
v.validate_python({})
assert e.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('a',),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'not_an_int',
},
{
'input': PydanticUndefined,
'loc': ('b',),
'msg': 'The default factory uses validated data, but at least one validation error occurred',
'type': 'default_factory_not_called',
},
]
assert str(e.value) == expected
def test_default_factory_not_called_union_ok(container_schema_builder) -> None:
schema_fail = container_schema_builder(
{
'a': core_schema.none_schema(),
'b': core_schema.with_default_schema(
schema=core_schema.int_schema(),
default_factory=lambda data: data['a'],
default_factory_takes_data=True,
),
}
)
schema_ok = container_schema_builder(
{
'a': core_schema.int_schema(),
'b': core_schema.with_default_schema(
schema=core_schema.int_schema(),
default_factory=lambda data: data['a'] + 1,
default_factory_takes_data=True,
),
# this is used to show that this union member was selected
'c': core_schema.with_default_schema(schema=core_schema.int_schema(), default=3),
}
)
schema = core_schema.union_schema([schema_fail, schema_ok])
v = SchemaValidator(schema)
s = SchemaSerializer(schema)
assert s.to_python(v.validate_python({'a': 1}), mode='json') == {'a': 1, 'b': 2, 'c': 3}
def test_default_validate_default_after_validator_field_name() -> None:
class Model:
pass
field_name: str | None = None
def val_func(value, info: core_schema.ValidationInfo):
nonlocal field_name
field_name = info.field_name
return value
schema = core_schema.model_schema(
cls=Model,
schema=core_schema.model_fields_schema(
fields={
'a': core_schema.model_field(
schema=core_schema.with_default_schema(
schema=core_schema.with_info_after_validator_function(
val_func,
schema=core_schema.str_schema(),
),
default='default',
)
)
}
),
config={'validate_default': True},
)
val = SchemaValidator(schema)
val.validate_python({})
assert field_name == 'a'
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/tests/validators/test_with_default.py",
"license": "MIT License",
"lines": 854,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:pydantic-core/wasm-preview/run_tests.py | import base64
import importlib
import re
import sys
import traceback
from io import BytesIO
from pathlib import Path
from zipfile import ZipFile
import micropip
import pyodide
import pytest
# this seems to be required for me on M1 Mac
sys.setrecursionlimit(200)
async def main(tests_zip: str, tag_name: str):
print(f'Using pyodide version: {pyodide.__version__}')
print(f'Extracting test files (size: {len(tests_zip):,})...')
# File saved on the GH release
pydantic_core_wheel = (
'https://githubproxy.samuelcolvin.workers.dev/pydantic/pydantic-core/releases/'
f'download/{tag_name}/pydantic_core-{tag_name.lstrip("v")}-cp312-cp312-emscripten_3_1_58_wasm32.whl'
)
zip_file = ZipFile(BytesIO(base64.b64decode(tests_zip)))
count = 0
for name in zip_file.namelist():
if name.endswith('.py'):
path, subs = re.subn(r'^pydantic-core-.+?/tests/', 'tests/', name)
if subs:
count += 1
path = Path(path)
path.parent.mkdir(parents=True, exist_ok=True)
with zip_file.open(name, 'r') as f:
path.write_bytes(f.read())
print(f'Mounted {count} test files, installing dependencies...')
await micropip.install(
[
'dirty-equals',
'hypothesis',
'pytest-speed',
'pytest-mock',
'tzdata',
'inline-snapshot<0.21',
'typing-extensions>=4.14.1',
'typing-inspection',
pydantic_core_wheel,
]
)
importlib.invalidate_caches()
# print('installed packages:')
# print(micropip.list())
print('Running tests...')
pytest.main()
try:
await main(tests_zip, pydantic_core_version) # noqa: F821,F704
except Exception:
traceback.print_exc()
raise
| {
"repo_id": "pydantic/pydantic",
"file_path": "pydantic-core/wasm-preview/run_tests.py",
"license": "MIT License",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
pydantic/pydantic:tests/test_missing_sentinel.py | import pickle
from typing import Union
import pytest
from pydantic_core import MISSING, PydanticSerializationUnexpectedValue
from pydantic import BaseModel, TypeAdapter, ValidationError
def test_missing_sentinel_model() -> None:
class Model(BaseModel):
f: Union[int, MISSING] = MISSING
g: MISSING = MISSING
m1 = Model()
assert m1.model_dump() == {}
assert m1.model_dump_json() == '{}'
m2 = Model.model_validate({'f': MISSING, 'g': MISSING})
assert m2.f is MISSING
assert m2.g is MISSING
m3 = Model(f=1)
assert m3.model_dump() == {'f': 1}
assert m3.model_dump_json() == '{"f":1}'
def test_missing_sentinel_type_adapter() -> None:
"""Note that this usage isn't explicitly supported (and useless in practice)."""
# TODO Remove annotation with PEP 747:
ta: TypeAdapter[object] = TypeAdapter(MISSING)
assert ta.validate_python(MISSING) is MISSING
with pytest.raises(ValidationError) as exc_info:
ta.validate_python(1)
assert exc_info.value.errors()[0]['type'] == 'missing_sentinel_error'
assert ta.dump_python(MISSING) is MISSING
with pytest.raises(PydanticSerializationUnexpectedValue):
ta.dump_python(1)
# Defined in module to be picklable:
class ModelPickle(BaseModel):
f: Union[int, MISSING] = MISSING
@pytest.mark.xfail(reason="PEP 661 sentinels aren't picklable yet in the experimental typing-extensions implementation")
def test_missing_sentinel_pickle() -> None:
m = ModelPickle()
m_reconstructed = pickle.loads(pickle.dumps(m))
assert m_reconstructed.f is MISSING
def test_missing_sentinel_json_schema() -> None:
class Model(BaseModel):
f: Union[int, MISSING] = MISSING
g: MISSING = MISSING
h: MISSING
assert Model.model_json_schema()['properties'] == {
'f': {'title': 'F', 'type': 'integer'},
}
def test_model_construct_with_missing_default_does_not_crash() -> None:
class M(BaseModel):
a: Union[int, MISSING] = MISSING
# Should not raise
m = M.model_construct()
assert hasattr(m, 'a')
# Keep sentinel by identity
assert getattr(m, 'a') is MISSING
def test_no_warning_when_excluded_in_nested_model() -> None:
"""https://github.com/pydantic/pydantic/issues/12628"""
class Inner(BaseModel):
f1: Union[int, MISSING] = MISSING
f2: Union[int, MISSING] = MISSING
class Outer(BaseModel):
inner: Union[Inner, MISSING] = MISSING
s = Outer(
inner={'f1': 1},
)
# Shouldn't raise a serialization warning about missing fields:
assert s.model_dump() == {'inner': {'f1': 1}}
| {
"repo_id": "pydantic/pydantic",
"file_path": "tests/test_missing_sentinel.py",
"license": "MIT License",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:tests/test_deferred_annotations.py | """Tests related to deferred evaluation of annotations introduced in Python 3.14 by PEP 649 and 749."""
import sys
from dataclasses import field
from typing import Annotated
import pytest
from annotated_types import MaxLen
from pydantic import (
BaseModel,
Field,
TypeAdapter,
ValidationError,
field_serializer,
field_validator,
model_serializer,
model_validator,
)
from pydantic.dataclasses import dataclass
pytestmark = pytest.mark.skipif(
sys.version_info < (3, 14), reason='Requires deferred evaluation of annotations introduced in Python 3.14'
)
def test_deferred_annotations_model() -> None:
class Model(BaseModel):
a: Int
b: Str = 'a'
Int = int
Str = str
inst = Model(a='1', b=b'test')
assert inst.a == 1
assert inst.b == 'test'
def test_deferred_annotations_nested_model() -> None:
def outer():
def inner():
class Model(BaseModel):
ann: Annotated[List[Dict[str, str]], MaxLen(1)]
Dict = dict
return Model
List = list
Model = inner()
return Model
Model = outer()
with pytest.raises(ValidationError) as exc_info:
Model(ann=[{'a': 'b'}, {'c': 'd'}])
assert exc_info.value.errors()[0]['type'] == 'too_long'
def test_deferred_annotations_pydantic_dataclass() -> None:
@dataclass
class A:
a: Int = field(default=1)
Int = int
assert A(a='1').a == 1
def test_deferred_annotations_pydantic_dataclass_pydantic_field() -> None:
"""When initial support for Python 3.14 was added, this failed as support for the Pydantic
`Field()` function was implemented by writing directly to `__annotations__`.
"""
@dataclass
class A:
a: Int = Field(default=1)
Int = int
assert A(a='1').a == 1
def test_deferred_annotations_return_values() -> None:
class Model(BaseModel):
a: int
@model_validator(mode='after')
def check(self) -> Model:
return self
@model_validator(mode='before')
def before(cls, data) -> MyDict:
return data
@model_serializer(mode='plain')
def ser(self) -> MyDict:
return {'a': self.a}
@field_validator('a', mode='before')
def validate_a(cls, v) -> MyInt:
return v
@field_serializer('a', mode='plain')
def serialize_a(self, v) -> MyInt:
return v
MyDict = dict
MyInt = int
def test_deferred_annotations_pydantic_extra() -> None:
"""https://github.com/pydantic/pydantic/issues/12393"""
class Foo(BaseModel, extra='allow'):
a: MyInt
__pydantic_extra__: MyDict[str, int]
MyInt = int
MyDict = dict
f = Foo(a='1', extra='1')
assert f.a == 1
assert f.extra == 1
def test_deferred_annotations_json_schema_extra() -> None:
def json_schema_extra(js_schema: Anything):
return js_schema
ta = TypeAdapter(int, config={'json_schema_extra': json_schema_extra})
assert ta.json_schema() == {'type': 'integer'}
def test_deferred_annotations_default_factory() -> None:
def def_factory(validated_data: Anything):
return 1
class Model(BaseModel):
f: int = Field(default_factory=def_factory)
assert Model().f == 1
def test_deferred_annotations_custom_init() -> None:
class Model(BaseModel):
def __init__(self, a: Anything) -> None: ...
assert len(Model.__signature__.parameters) == 1
| {
"repo_id": "pydantic/pydantic",
"file_path": "tests/test_deferred_annotations.py",
"license": "MIT License",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:tests/typechecking/secret.py | from pydantic import Secret
def takes_secret(scalar_secret: Secret[str | int | float | bool]) -> None: ...
def secret_usage() -> None:
secret: Secret[str] = Secret('my secret')
takes_secret(secret)
| {
"repo_id": "pydantic/pydantic",
"file_path": "tests/typechecking/secret.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:test/datasets/test_tag_dataset.py | from torch_geometric.datasets import TAGDataset
from torch_geometric.testing import onlyFullTest, withPackage
@onlyFullTest
@withPackage('ogb')
def test_tag_dataset() -> None:
from ogb.nodeproppred import PygNodePropPredDataset
root = './data/ogb'
hf_model = 'prajjwal1/bert-tiny'
token_on_disk = True
dataset = PygNodePropPredDataset('ogbn-arxiv', root=root)
tag_dataset = TAGDataset(root, dataset, hf_model,
token_on_disk=token_on_disk)
assert 169343 == tag_dataset[0].num_nodes \
== len(tag_dataset.text) \
== len(tag_dataset.llm_explanation)
assert 1166243 == tag_dataset[0].num_edges
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "test/datasets/test_tag_dataset.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:examples/llm/txt2kg_rag.py | import argparse
import gc
import json
import os
import random
import re
import sys
from datetime import datetime
from glob import glob
from itertools import chain
from pathlib import Path
import yaml
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
import torch
from g_retriever import (
adjust_learning_rate,
get_loss,
inference_step,
load_params_dict,
save_params_dict,
)
from huggingface_hub import hf_hub_download
from torch.nn.utils import clip_grad_norm_
from tqdm import tqdm
from torch_geometric import seed_everything
from torch_geometric.llm import RAGQueryLoader
from torch_geometric.llm.models import (
LLM,
TXT2KG,
GRetriever,
LLMJudge,
SentenceTransformer,
)
from torch_geometric.llm.models.txt2kg import _chunk_text
from torch_geometric.llm.utils.backend_utils import (
create_graph_from_triples,
create_remote_backend_from_graph_data,
make_pcst_filter,
preprocess_triplet,
)
from torch_geometric.llm.utils.feature_store import KNNRAGFeatureStore
from torch_geometric.llm.utils.graph_store import NeighborSamplingRAGGraphStore
from torch_geometric.llm.utils.vectorrag import DocumentRetriever
from torch_geometric.loader import DataLoader
from torch_geometric.nn import GAT, SGFormer
# Define constants for better readability
NV_NIM_MODEL_DEFAULT = "nvidia/llama-3.1-nemotron-ultra-253b-v1"
LLM_GENERATOR_NAME_DEFAULT = "meta-llama/Meta-Llama-3.1-8B-Instruct"
ENCODER_MODEL_NAME_DEFAULT = "Alibaba-NLP/gte-modernbert-base"
KG_CHUNK_SIZE_DEFAULT = 512
GNN_HID_CHANNELS_DEFAULT = 1024
GNN_LAYERS_DEFAULT = 4
LR_DEFAULT = 1e-5
EPOCHS_DEFAULT = 2
BATCH_SIZE_DEFAULT = 1
EVAL_BATCH_SIZE_DEFAULT = 2
LLM_GEN_MODE_DEFAULT = "full"
DEFAULT_ENDPOINT_URL = "https://integrate.api.nvidia.com/v1"
max_chars_in_train_answer = 128
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gnn_model', type=str, default="GAT",
choices=["GAT", "SGFormer"],
help="The GNN model to use. Default is GAT.")
parser.add_argument('--NV_NIM_MODEL', type=str,
default=NV_NIM_MODEL_DEFAULT,
help="The NIM LLM to use for TXT2KG for LLMJudge")
parser.add_argument('--NV_NIM_KEY', type=str, help="NVIDIA API key")
parser.add_argument(
'--ENDPOINT_URL', type=str, default=DEFAULT_ENDPOINT_URL,
help="The URL hosting your model, \
in case you are not using the public NIM.")
parser.add_argument(
'--kg_chunk_size', type=int, default=KG_CHUNK_SIZE_DEFAULT,
help="When splitting context documents for txt2kg,\
the maximum number of characters per chunk.")
parser.add_argument('--gnn_hidden_channels', type=int,
default=GNN_HID_CHANNELS_DEFAULT,
help="Hidden channels for GNN")
parser.add_argument('--num_gnn_layers', type=int,
default=GNN_LAYERS_DEFAULT,
help="Number of GNN layers")
parser.add_argument('--lr', type=float, default=LR_DEFAULT,
help="Learning rate")
parser.add_argument('--epochs', type=int, default=EPOCHS_DEFAULT,
help="Number of epochs")
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE_DEFAULT,
help="Batch size")
parser.add_argument('--eval_batch_size', type=int,
default=EVAL_BATCH_SIZE_DEFAULT,
help="Evaluation batch size")
parser.add_argument('--llm_generator_name', type=str,
default=LLM_GENERATOR_NAME_DEFAULT,
help="The LLM to use for Generation")
parser.add_argument(
'--llm_generator_mode', type=str, default=LLM_GEN_MODE_DEFAULT,
choices=["frozen", "lora",
"full"], help="Whether to freeze the Generator LLM,\
use LORA, or fully finetune")
parser.add_argument('--dont_save_model', action="store_true",
help="Whether to skip model saving.")
parser.add_argument('--log_steps', type=int, default=30,
help="Log to wandb every N steps")
parser.add_argument('--wandb_project', type=str, default="techqa",
help="Weights & Biases project name")
parser.add_argument('--wandb', action="store_true",
help="Enable wandb logging")
parser.add_argument(
'--num_gpus', type=int, default=None,
help="Number of GPUs to use. If not specified,"
"will determine automatically based on model size.")
parser.add_argument('--regenerate_dataset', action="store_true",
help="Regenerate the dataset")
parser.add_argument(
'--doc_parsing_mode', type=str, default=None,
choices=["paragraph",
"file"], help="How to parse documents: 'paragraph' splits "
"files by paragraphs, 'file' treats each file as"
"one document. "
"This will override any value set in the config file.")
parser.add_argument(
'--k_for_docs', type=int, default=None,
help="Number of docs to retrieve for each question. "
"This will override any value set in the config file.")
parser.add_argument(
'--doc_chunk_size', type=int, default=None,
help="The chunk size to use VectorRAG (document retrieval). "
"This will override any value set in the config file.")
parser.add_argument(
'--dataset', type=str, default="techqa", help="Dataset folder name, "
"should contain corpus and train.json files."
"extracted triples, processed dataset, "
"document retriever, and model checkpoints "
"will be saved in the dataset folder")
parser.add_argument(
'--skip_graph_rag', action="store_true",
help="Skip the graph RAG step. "
"Used to compare the performance of Vector+Graph RAG vs Vector RAG.")
parser.add_argument(
'--use_x_percent_corpus', default=100.0, type=float,
help="Debug flag that allows user to only use a random percentage "
"of available knowledge base corpus for RAG")
args = parser.parse_args()
assert args.NV_NIM_KEY, "NVIDIA API key is required for TXT2KG and eval"
assert args.use_x_percent_corpus <= 100 and \
args.use_x_percent_corpus > 0, "Please provide a value in (0,100]"
if args.skip_graph_rag:
print("Skipping graph RAG step, setting GNN layers to 0...")
args.num_gnn_layers = 0
config_path = os.path.join(args.dataset, "config.yaml")
if os.path.exists(config_path):
print(f"Loading config from {config_path}...")
with open(config_path) as config_file:
config = yaml.safe_load(config_file)
if config is not None:
# Use a loop to check and apply config values for each parameter
config_params = [
'doc_parsing_mode', 'doc_chunk_size', 'k_for_docs'
]
for param in config_params:
if param in config and getattr(args, param) is None:
setattr(args, param, config[param])
print(f"Using config value for {param}: {config[param]}")
else:
print("Skipping config loading...")
if args.dataset == "techqa":
if args.doc_chunk_size is None:
args.doc_chunk_size = 1024
if args.k_for_docs is None:
args.k_for_docs = 14
assert args.doc_chunk_size is not None, "doc_chunk_size has not been set"
assert args.k_for_docs is not None, "k_for_docs has not been set"
return args
sys_prompt = (
"You are an expert assistant that can answer "
"any question from its knowledge, given a knowledge graph embedding and "
"it's textualized context. Just give the answer, without explanation.")
prompt_template = """
[QUESTION]
{question}
[END_QUESTION]
[RETRIEVED_CONTEXTS]
{context}
[END_RETRIEVED_CONTEXTS]
"""
def _process_and_chunk_text(text, chunk_size, doc_parsing_mode):
full_chunks = []
"""
Some corpora of docs are grouped into chunked files,
typically by paragraph.
Only split into individual documents
if multiple paragraphs are detected.
"""
if doc_parsing_mode == "paragraph":
paragraphs = re.split(r'\n{2,}', text)
else:
# doc_parsing_mode == 'file' or doc_parsing_mode is None
paragraphs = [text]
for paragraph in paragraphs:
if chunk_size is not None:
chunks = _chunk_text(paragraph, chunk_size)
else:
# defaults to 512 in _chunk_text
chunks = _chunk_text(paragraph)
full_chunks.extend(chunks)
return full_chunks
def get_data(args):
# need a JSON dict of Questions and answers, see below for how its used
json_path = Path(args.dataset) / "train.json"
corpus_path = Path(args.dataset) / "corpus"
# techqa specified but neither corpus or train.json exists
if "techqa" in args.dataset.lower() and not (json_path.exists()
or corpus_path.exists()):
print("Could not find Q&A pairs and/or knowledge base corpus")
print("Would you like to download the TechQA dataset for demo?")
user_input = input("Y/N: ")
if user_input.lower() == "y" or user_input.lower() == "yes":
print("Downloading data...")
# downloads
zip_path = hf_hub_download(
repo_id="nvidia/TechQA-RAG-Eval",
repo_type="dataset",
filename="corpus.zip",
)
json_path = hf_hub_download(
repo_id="nvidia/TechQA-RAG-Eval",
repo_type="dataset",
filename="train.json",
)
# move to working dir
if not os.path.exists(args.dataset):
os.mkdir(args.dataset)
import zipfile
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(args.dataset)
import shutil
shutil.copy(json_path, os.path.join(args.dataset, "train.json"))
elif user_input.lower() == "n" or user_input.lower() == "no":
sys.exit("No selected, no data to work with... exiting.")
else:
sys.exit("Invalid user input, exiting.")
with open(os.path.join(args.dataset, "train.json")) as file:
json_obj = json.load(file)
text_contexts = []
# Read corpus data to create the KG and for document retrieval (RAG).
# Prefer *.json files, fall back to txt files.
# TODO: add support for additional corpus file formats: PDF, CSV, XML,
# HTML, possibly others.
# corpus folder is simply a folder with context documents in it.
file_paths = glob(os.path.join(args.dataset, "corpus", "*.json"))
if len(file_paths) > 0:
for file_path in file_paths:
with open(file_path, "r+") as f:
data = json.load(f)
doc_type = data[0]["document_type"]
if doc_type != "text":
raise ValueError(f"Bad extraction for {file_path}, expecting "
f"text only but got {doc_type}")
text_contexts.extend(
_process_and_chunk_text(data[0]["metadata"]["content"],
args.doc_chunk_size,
args.doc_parsing_mode))
else:
for file_path in glob(os.path.join(args.dataset, "corpus", "*")):
with open(file_path, "r+") as f:
text_context = f.read()
text_contexts.extend(
_process_and_chunk_text(text_context, args.doc_chunk_size,
args.doc_parsing_mode))
if args.use_x_percent_corpus < 100:
random.shuffle(text_contexts)
text_contexts = text_contexts[
0:int(len(text_contexts) * args.use_x_percent_corpus / 100.0)]
return json_obj, text_contexts
def index_kg(args, context_docs):
kg_maker = TXT2KG(NVIDIA_NIM_MODEL=args.NV_NIM_MODEL,
NVIDIA_API_KEY=args.NV_NIM_KEY,
ENDPOINT_URL=args.ENDPOINT_URL,
chunk_size=args.kg_chunk_size)
print(
"Note that if the TXT2KG process is too slow for you're liking using "
"the public NIM, consider deploying yourself using local_lm flag of "
"TXT2KG or using https://build.nvidia.com/nvidia/llama-3_1-nemotron-70b-instruct " # noqa
"to deploy to a private endpoint, which you can pass to this script "
"w/ --ENDPOINT_URL flag.")
print(
"Guide for deploying NIM: https://developer.nvidia.com/blog/a-simple-guide-to-deploying-generative-ai-with-nvidia-nim/" # noqa
)
total_tqdm_count = len(context_docs)
initial_tqdm_count = 0
checkpoint_file = list(Path(args.dataset).glob("*--*--checkpoint_kg.pt"))
if len(checkpoint_file) > 1:
raise RuntimeError("Error: more than one checkpoint file found")
if len(checkpoint_file) == 1:
print("Restoring KG from checkpoint")
checkpoint_file = checkpoint_file[0]
checkpoint_model_name = checkpoint_file.name.split('--')[0]
# check if triples generation are using the correct model
if args.NV_NIM_MODEL.split('/')[-1] != checkpoint_model_name:
raise RuntimeError(
"Error: stored triples were generated using a different model")
saved_relevant_triples = torch.load(checkpoint_file,
weights_only=False)
kg_maker.relevant_triples = saved_relevant_triples
kg_maker.doc_id_counter = len(saved_relevant_triples)
initial_tqdm_count = kg_maker.doc_id_counter
context_docs = context_docs[kg_maker.doc_id_counter:]
chkpt_interval = 10
chkpt_count = 0
for context_doc in tqdm(context_docs, total=total_tqdm_count,
initial=initial_tqdm_count,
desc="Extracting KG triples"):
kg_maker.add_doc_2_KG(txt=context_doc)
chkpt_count += 1
if chkpt_count == chkpt_interval:
chkpt_count = 0
path = args.dataset + "/{m}--{t}--checkpoint_kg.pt"
model = kg_maker.NIM_MODEL.split(
'/')[-1] if not kg_maker.local_LM else "local"
path = path.format(m=model,
t=datetime.now().strftime("%Y%m%d_%H%M%S"))
torch.save(kg_maker.relevant_triples, path)
relevant_triples = kg_maker.relevant_triples
triples = list(
chain.from_iterable(triple_set
for triple_set in relevant_triples.values()))
triples = [preprocess_triplet(triplet) for triplet in triples]
triples = list(dict.fromkeys(triples))
raw_triples_path = args.dataset + "/{m}--{t}--raw_triples.pt"
model_name = kg_maker.NIM_MODEL.split(
'/')[-1] if not kg_maker.local_LM else "local"
torch.save(
triples,
raw_triples_path.format(m=model_name,
t=datetime.now().strftime("%Y%m%d_%H%M%S")))
for old_checkpoint_file in Path(
args.dataset).glob("*--*--checkpoint_kg.pt"):
os.remove(old_checkpoint_file)
return triples
def update_data_lists(args, data_lists):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# creating the embedding model
sent_trans_batch_size = 256
model = SentenceTransformer(
model_name=ENCODER_MODEL_NAME_DEFAULT).to(device).eval()
model_kwargs = {
"output_device": device,
"batch_size": int(sent_trans_batch_size / 4),
}
doc_retriever_path = os.path.join(args.dataset, "document_retriever.pt")
if os.path.exists(doc_retriever_path):
print("Loading document retriever from checkpoint...")
vector_retriever = DocumentRetriever.load(doc_retriever_path,
model=model.encode,
model_kwargs=model_kwargs)
if args.k_for_docs != vector_retriever.k_for_docs:
vector_retriever.k_for_docs = args.k_for_docs
else:
return data_lists
else:
raise ValueError("Document retriever not found")
print("k_for_docs changed, updating data lists...")
total_points = sum(len(data_list) for data_list in data_lists.values())
progress_bar = tqdm(total=total_points, desc="Updating text contexts")
for data_list in data_lists.values():
for data_point in data_list:
q = data_point["question"]
data_point["text_context"] = vector_retriever.query(q)
progress_bar.update(1)
progress_bar.close()
vector_retriever.save(doc_retriever_path)
del vector_retriever
gc.collect()
torch.cuda.empty_cache()
dataset_name = os.path.basename(args.dataset)
dataset_path = os.path.join(args.dataset, f"{dataset_name}.pt")
torch.save(data_lists, dataset_path)
return data_lists
def make_dataset(args):
qa_pairs, context_docs = get_data(args)
print("Number of Docs in our VectorDB =", len(context_docs))
data_lists = {"train": [], "validation": [], "test": []}
triples = []
raw_triples_file = list(Path(args.dataset).glob("*--*--raw_triples.pt"))
if len(raw_triples_file) > 1:
raise RuntimeError("Error: multiple raw_triples files found")
if len(raw_triples_file) == 1:
raw_triples_file = raw_triples_file[0]
stored_model_name = raw_triples_file.name.split('--')[0]
if args.NV_NIM_MODEL.split('/')[-1] != stored_model_name:
raise RuntimeError(
"Error: stored triples were generated using a different model")
print(f" -> Saved triples generated with: {stored_model_name}")
triples = torch.load(raw_triples_file)
else:
triples = index_kg(args, context_docs)
print("Number of triples in our GraphDB =", len(triples))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# creating the embedding model
sent_trans_batch_size = 256
model = SentenceTransformer(
model_name=ENCODER_MODEL_NAME_DEFAULT).to(device)
print("Creating the graph data from raw triples...")
# create the graph data from raw triples
graph_data = create_graph_from_triples(
triples=triples, embedding_model=model.encode,
embedding_method_kwargs={
"batch_size": min(len(triples), sent_trans_batch_size),
"verbose": True
})
print("Creating the graph and feature stores...")
# creating the graph and feature stores
fs, gs = create_remote_backend_from_graph_data(
graph_data=graph_data, path="backend",
graph_db=NeighborSamplingRAGGraphStore,
feature_db=KNNRAGFeatureStore).load()
"""
NOTE: these retriever hyperparams are very important.
Tuning may be needed for custom data...
"""
model_kwargs = {
"output_device": device,
"batch_size": int(sent_trans_batch_size / 4),
"verbose": True
}
doc_retriever_path = os.path.join(args.dataset, "document_retriever.pt")
if os.path.exists(doc_retriever_path):
print("Loading document retriever from checkpoint...")
vector_retriever = DocumentRetriever.load(doc_retriever_path,
model=model.encode,
model_kwargs=model_kwargs)
if args.k_for_docs != vector_retriever.k_for_docs:
vector_retriever.k_for_docs = args.k_for_docs
else:
print("Creating document retriever...")
vector_retriever = DocumentRetriever(context_docs,
k_for_docs=args.k_for_docs,
model=model.encode,
model_kwargs=model_kwargs)
vector_retriever.save(doc_retriever_path)
subgraph_filter = make_pcst_filter(
triples,
model,
topk=5, # nodes
topk_e=5, # edges
cost_e=.5, # edge cost
num_clusters=10) # num clusters
# number of neighbors for each seed node selected by KNN
fanout = 100
# number of hops for neighborsampling
num_hops = 2
query_loader_config = {
"k_nodes": 1024, # k for Graph KNN
"num_neighbors": [fanout] * num_hops, # number of sampled neighbors
"encoder_model": model,
}
# GraphDB retrieval done with KNN+NeighborSampling+PCST
# PCST = Prize Collecting Steiner Tree
# VectorDB retrieval just vanilla vector RAG
print("Now to retrieve context for each query from "
"our Vector and Graph DBs...")
query_loader = RAGQueryLoader(graph_data=(fs, gs),
subgraph_filter=subgraph_filter,
vector_retriever=vector_retriever,
config=query_loader_config)
# pre-process the dataset
total_data_list = []
extracted_triple_sizes = []
global max_chars_in_train_answer
for data_point in tqdm(qa_pairs, desc="Building un-split dataset"):
if data_point["is_impossible"]:
continue
QA_pair = (data_point["question"], data_point["answer"])
q = QA_pair[0]
max_chars_in_train_answer = max(len(QA_pair[1]),
max_chars_in_train_answer)
# (TODO) make this batch queries for retrieving w/ CuVS+CuGraph
subgraph = query_loader.query(q)
subgraph.label = QA_pair[1]
total_data_list.append(subgraph)
extracted_triple_sizes.append(len(subgraph.triples))
random.shuffle(total_data_list)
# stats
print("Min # of Retrieved Triples =", min(extracted_triple_sizes))
print("Max # of Retrieved Triples =", max(extracted_triple_sizes))
print("Average # of Retrieved Triples =",
sum(extracted_triple_sizes) / len(extracted_triple_sizes))
# 60:20:20 split
data_lists["train"] = total_data_list[:int(.6 * len(total_data_list))]
data_lists["validation"] = total_data_list[int(.6 * len(total_data_list)
):int(.8 *
len(total_data_list))]
data_lists["test"] = total_data_list[int(.8 * len(total_data_list)):]
dataset_name = os.path.basename(args.dataset)
dataset_path = os.path.join(args.dataset, f"{dataset_name}.pt")
torch.save((data_lists, max_chars_in_train_answer), dataset_path)
del model
gc.collect()
torch.cuda.empty_cache()
return data_lists
def train(args, train_loader, val_loader):
if args.wandb:
wandb.init(project=args.wandb_project,
name=f"run_{datetime.now().strftime('%Y-%m-%d_%H:%M')}",
config=vars(args))
hidden_channels = args.gnn_hidden_channels
num_gnn_layers = args.num_gnn_layers
if args.num_gnn_layers > 0:
if args.gnn_model == "GAT":
gnn = GAT(in_channels=768, hidden_channels=hidden_channels,
out_channels=1024, num_layers=num_gnn_layers, heads=4)
elif args.gnn_model == "SGFormer":
gnn = SGFormer(in_channels=768, hidden_channels=hidden_channels,
out_channels=1024, trans_num_heads=1,
trans_dropout=0.5, gnn_num_layers=num_gnn_layers,
gnn_dropout=0.5)
else:
raise ValueError(f"Invalid GNN model: {args.gnn_model}")
else:
gnn = None
if args.llm_generator_mode == "full":
llm = LLM(model_name=args.llm_generator_name, sys_prompt=sys_prompt,
n_gpus=args.num_gpus)
elif args.llm_generator_mode == "lora":
llm = LLM(model_name=args.llm_generator_name, sys_prompt=sys_prompt,
dtype=torch.float32, n_gpus=args.num_gpus)
else:
# frozen
llm = LLM(model_name=args.llm_generator_name, sys_prompt=sys_prompt,
dtype=torch.float32, n_gpus=args.num_gpus).eval()
for _, p in llm.named_parameters():
p.requires_grad = False
model = GRetriever(llm=llm, gnn=gnn,
use_lora=args.llm_generator_mode == "lora")
save_name = os.path.join(args.dataset, "model.pt")
if args.llm_generator_mode == "frozen" and args.num_gnn_layers == 0:
if not args.dont_save_model:
save_params_dict(model, save_path=save_name)
return model
if os.path.exists(save_name) and not args.regenerate_dataset:
print("Re-using saved G-retriever model for testing...")
model = load_params_dict(model, save_name)
else:
params = [p for _, p in model.named_parameters() if p.requires_grad]
lr = args.lr
optimizer = torch.optim.AdamW([{
'params': params,
'lr': lr,
'weight_decay': 0.05
}], betas=(0.9, 0.95))
num_oom_errors = 0
for epoch in range(args.epochs):
model.train()
epoch_loss = 0
epoch_str = f'Epoch: {epoch + 1}|{args.epochs}'
loader = tqdm(train_loader, desc=epoch_str)
for step, batch in enumerate(loader):
new_qs = []
for i, q in enumerate(batch["question"]):
# insert VectorRAG context
new_qs.append(
prompt_template.format(
question=q,
context="\n".join(batch.text_context[i])))
batch.question = new_qs
if args.skip_graph_rag:
batch.desc = ""
optimizer.zero_grad()
try:
loss = get_loss(model, batch)
loss.backward()
clip_grad_norm_(optimizer.param_groups[0]['params'], 0.1)
if (step + 1) % 2 == 0:
adjust_learning_rate(optimizer.param_groups[0], lr,
step / len(train_loader) + epoch,
args.epochs)
optimizer.step()
epoch_loss += float(loss.detach())
if args.wandb and (step + 1) % args.log_steps == 0:
wandb.log({
"train/loss": float(loss.detach()),
"train/lr": optimizer.param_groups[0]['lr'],
})
if (step + 1) % 2 == 0:
lr = optimizer.param_groups[0]['lr']
except torch.cuda.OutOfMemoryError:
torch.cuda.empty_cache()
print("Sequence length of last batch: ",
model.seq_length_stats[-1])
# TODO: Implement CPU fallback (WIP)
num_oom_errors += 1
print("Sequence length stats: ")
print("seq_len avg: ",
sum(model.seq_length_stats) / len(model.seq_length_stats))
print("seq_len min: ", min(model.seq_length_stats))
print("seq_len max: ", max(model.seq_length_stats))
print("Percent of OOM errors: ",
num_oom_errors / len(train_loader))
train_loss = epoch_loss / len(train_loader)
print(epoch_str + f', Train Loss: {train_loss:4f}')
# Eval Step
val_loss = 0
model.eval()
with torch.no_grad():
for batch in val_loader:
new_qs = []
for i, q in enumerate(batch["question"]):
# insert VectorRAG context
new_qs.append(
prompt_template.format(
question=q,
context="\n".join(batch.text_context[i])))
batch.question = new_qs
if args.skip_graph_rag:
batch.desc = ""
loss = get_loss(model, batch)
val_loss += loss.item()
val_loss = val_loss / len(val_loader)
print(epoch_str + f", Val Loss: {val_loss:4f}")
if args.wandb:
wandb.log({
"val/loss": val_loss,
"train/epoch_loss": train_loss,
"epoch": epoch + 1
})
if args.wandb:
wandb.finish()
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
model.eval()
if not args.dont_save_model:
save_params_dict(model, save_path=save_name)
return model
def test(model, test_loader, args):
llm_judge = LLMJudge(args.NV_NIM_MODEL, args.NV_NIM_KEY, args.ENDPOINT_URL)
def eval(question: str, pred: str, correct_answer: str):
# calculate the score based on pred and correct answer
return llm_judge.score(question, pred, correct_answer)
scores = []
eval_tuples = []
for test_batch in tqdm(test_loader, desc="Testing"):
new_qs = []
raw_qs = test_batch["question"]
for i, q in enumerate(test_batch["question"]):
# insert VectorRAG context
new_qs.append(
prompt_template.format(
question=q, context="\n".join(test_batch.text_context[i])))
test_batch.question = new_qs
if args.skip_graph_rag:
test_batch.desc = ""
preds = (inference_step(model, test_batch,
max_out_tokens=max_chars_in_train_answer / 2))
for question, pred, label in zip(raw_qs, preds, test_batch.label):
eval_tuples.append((question, pred, label))
for question, pred, label in tqdm(eval_tuples, desc="Eval"):
scores.append(eval(question, pred, label))
avg_scores = sum(scores) / len(scores)
print("Avg marlin accuracy=", avg_scores)
print("*" * 5 + "NOTE" + "*" * 5)
print("Marlin Accuracy is Estimated by LLM as a Judge!")
print("Improvement of this estimation process is WIP...")
if __name__ == '__main__':
# for reproducibility
seed_everything(50)
args = parse_args()
if args.wandb and not wandb_available:
print("Error: wandb package not found but --wandb flag was used.")
print("Please install wandb and rerun the script.")
sys.exit(1)
# Need to sanitize sensitive keys
saved_NIM_KEY = args.NV_NIM_KEY
args.NV_NIM_KEY = "********"
print(f"Starting {args.dataset} training with args: ", args)
args.NV_NIM_KEY = saved_NIM_KEY
dataset_name = os.path.basename(args.dataset)
dataset_path = os.path.join(args.dataset, f"{dataset_name}.pt")
if os.path.exists(dataset_path) and not args.regenerate_dataset:
print(f"Re-using Saved {dataset_name} KG-RAG Dataset...")
data_lists, max_chars_in_train_answer = torch.load(
dataset_path, weights_only=False)
doc_retriever_path = os.path.join(args.dataset,
"document_retriever.pt")
if os.path.exists(doc_retriever_path):
print("Updating data lists with document retriever...")
data_lists = update_data_lists(args, data_lists)
else:
data_lists = make_dataset(args)
batch_size = args.batch_size
eval_batch_size = args.eval_batch_size
train_loader = DataLoader(data_lists["train"], batch_size=batch_size,
drop_last=True, pin_memory=True, shuffle=True)
val_loader = DataLoader(data_lists["validation"],
batch_size=eval_batch_size, drop_last=False,
pin_memory=True, shuffle=False)
test_loader = DataLoader(data_lists["test"], batch_size=eval_batch_size,
drop_last=False, pin_memory=True, shuffle=False)
model = train(args, train_loader, val_loader)
test(model, test_loader, args)
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "examples/llm/txt2kg_rag.py",
"license": "MIT License",
"lines": 703,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:test/llm/test_rag_loader.py | import os
from typing import Any, Dict
from unittest.mock import Mock
import pytest
import torch
from torch_geometric.data import Data
from torch_geometric.llm.models import SentenceTransformer
from torch_geometric.llm.rag_loader import RAGQueryLoader
from torch_geometric.llm.utils.backend_utils import (
create_graph_from_triples,
create_remote_backend_from_graph_data,
)
from torch_geometric.llm.utils.feature_store import KNNRAGFeatureStore
from torch_geometric.llm.utils.graph_store import NeighborSamplingRAGGraphStore
from torch_geometric.llm.utils.vectorrag import VectorRetriever
from torch_geometric.sampler import SamplerOutput
from torch_geometric.testing import onlyRAG
class MockRAGFeatureStore:
"""Mock implementation of RAGFeatureStore protocol for testing."""
def __init__(self):
self._config = {}
self.x = torch.randn(10, 64) # Sample node features
def retrieve_seed_nodes(self, query: Any, **kwargs):
"""Mock retrieve_seed_nodes method."""
seed_nodes = torch.tensor([0, 1, 2, 3, 4])
query_enc = torch.randn(1, 64)
return seed_nodes, query_enc
@property
def config(self) -> Dict[str, Any]:
return self._config
@config.setter
def config(self, config: Dict[str, Any]):
if config is None:
raise ValueError("Config cannot be None")
if 'a' not in config:
raise ValueError("Required config parameter 'a' not found")
self._config = config
def retrieve_seed_edges(self, query: Any, **kwargs):
"""Mock retrieve_seed_edges method."""
return torch.tensor([[0, 1], [1, 2], [2, 3]])
def load_subgraph(self, sample):
"""Mock load_subgraph method."""
data = Data()
data.edge_idx = torch.tensor([0, 1, 2])
data.node_idx = torch.tensor([0, 1, 2, 3, 4])
return data
class MockRAGGraphStore:
"""Mock implementation of RAGGraphStore protocol for testing."""
def __init__(self):
self._config = {}
self.edge_index = torch.tensor([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
def sample_subgraph(self, seed_nodes, seed_edges=None, **kwargs):
"""Mock sample_subgraph method."""
return SamplerOutput(node=seed_nodes, row=torch.tensor([0, 1, 2]),
col=torch.tensor([1, 2, 3]),
edge=torch.tensor([0, 1, 2]), batch=None)
@property
def config(self) -> Dict[str, Any]:
return self._config
@config.setter
def config(self, config: Dict[str, Any]):
if config is None:
raise ValueError("Config cannot be None")
if 'b' not in config:
raise ValueError("Required config parameter 'b' not found")
self._config = config
def register_feature_store(self, feature_store):
"""Mock register_feature_store method."""
class TestRAGQueryLoader:
"""Test suite for RAGQueryLoader."""
def setup_method(self):
"""Set up test fixtures before each test method."""
self.mock_feature_store = MockRAGFeatureStore()
self.mock_graph_store = MockRAGGraphStore()
self.graph_data = (self.mock_feature_store, self.mock_graph_store)
# Sample config
self.sample_config = {"a": 5, "b": [10, 5], "c": "test_value"}
def test_initialization_basic(self):
"""Test basic initialization of RAGQueryLoader."""
loader = RAGQueryLoader(self.graph_data, config=self.sample_config)
assert loader.feature_store == self.mock_feature_store
assert loader.graph_store == self.mock_graph_store
assert loader.vector_retriever is None
assert loader.augment_query is False
assert loader.subgraph_filter is None
assert loader.config == self.sample_config
def test_initialization_with_all_params(self):
"""Test initialization with all parameters."""
mock_vector_retriever = Mock(spec=VectorRetriever)
mock_subgraph_filter = Mock()
loader = RAGQueryLoader(graph_data=self.graph_data,
subgraph_filter=mock_subgraph_filter,
augment_query=True,
vector_retriever=mock_vector_retriever,
config=self.sample_config)
assert loader.feature_store == self.mock_feature_store
assert loader.graph_store == self.mock_graph_store
assert loader.vector_retriever == mock_vector_retriever
assert loader.augment_query is True
assert loader.subgraph_filter == mock_subgraph_filter
assert loader.config == self.sample_config
def test_bad_config(self):
"""Test bad config initialization."""
with pytest.raises(ValueError):
RAGQueryLoader(self.graph_data)
with pytest.raises(ValueError):
RAGQueryLoader(self.graph_data, config={'d': 'foobar'})
def test_config_propagation(self):
"""Test that config is propagated during initialization."""
loader = RAGQueryLoader(self.graph_data, config=self.sample_config)
assert loader.feature_store.config == self.sample_config
assert loader.graph_store.config == self.sample_config
def test_basic_query_without_vector_retriever(self):
"""Test basic query functionality without vector retriever."""
loader = RAGQueryLoader(self.graph_data, config=self.sample_config)
query = "test query"
result = loader.query(query)
# Verify result is a Data object
assert isinstance(result, Data)
# Verify the data has expected attributes
assert hasattr(result, 'node_idx')
assert hasattr(result, 'num_nodes')
assert hasattr(result, 'x')
assert hasattr(result, 'edge_index')
def test_query_with_vector_retriever(self):
"""Test query functionality with vector retriever."""
mock_vector_retriever = Mock(spec=VectorRetriever)
mock_vector_retriever.query.return_value = [
"retrieved doc 1", "retrieved doc 2"
]
loader = RAGQueryLoader(self.graph_data,
vector_retriever=mock_vector_retriever,
config=self.sample_config)
query = "test query"
result = loader.query(query)
# Verify vector retriever was called
mock_vector_retriever.query.assert_called_once_with(query)
# Verify result has text_context
assert hasattr(result, 'text_context')
assert result.text_context == ["retrieved doc 1", "retrieved doc 2"]
def test_query_with_subgraph_filter(self):
"""Test query functionality with subgraph filter."""
mock_filter_result = Data()
mock_filter_result.filtered = True
mock_subgraph_filter = Mock(return_value=mock_filter_result)
loader = RAGQueryLoader(self.graph_data,
subgraph_filter=mock_subgraph_filter,
config=self.sample_config)
query = "test query"
result = loader.query(query)
# Verify subgraph filter was called
mock_subgraph_filter.assert_called_once()
call_args = mock_subgraph_filter.call_args[0]
assert len(call_args) == 2
assert call_args[1] == query
# Verify result is the filtered result
assert result == mock_filter_result
assert hasattr(result, 'filtered')
assert result.filtered is True
@onlyRAG
def test_rag_loader_integration(tmp_path):
"""Test RAGQueryLoader with real feature and graph stores from triples."""
# Define test triplets - simple knowledge graph about cities/countries
triplets = [
["Paris", "capital_of", "France"],
["London", "capital_of", "UK"],
["Berlin", "capital_of", "Germany"],
["France", "in_continent", "Europe"],
["UK", "in_continent", "Europe"],
["Germany", "in_continent", "Europe"],
["Rome", "capital_of", "Italy"],
["Italy", "in_continent", "Europe"],
["Madrid", "capital_of", "Spain"],
["Spain", "in_continent", "Europe"],
]
encoder_model = SentenceTransformer('prajjwal1/bert-tiny')
# Create graph from triplets
graph_data = create_graph_from_triples(triplets, encoder_model.encode)
save_path = os.path.join(tmp_path, "test_graph.pt")
loader = create_remote_backend_from_graph_data(
graph_data=graph_data, path=save_path, n_parts=1,
graph_db=NeighborSamplingRAGGraphStore, feature_db=KNNRAGFeatureStore)
feature_store, graph_store = loader.load()
# Configuration
config = {
"k_nodes": 1,
"encoder_model": encoder_model,
"num_neighbors": [10] # 10 neighbors only one hop
}
# Create RAG loader
rag_data = (feature_store, graph_store)
loader = RAGQueryLoader(rag_data, config=config)
# Test query about European capitals
query = "countries in Europe"
result = loader.query(query)
# Verify result structure
assert isinstance(result, Data)
assert torch.equal(result.edge_index,
torch.tensor([[1, 2, 3, 4, 5], [0, 0, 0, 0, 0]]))
expected_x = encoder_model.encode(
["Europe", "France", "UK", "Germany", "Italy", "Spain"]).cpu()
expected_edge_attr = encoder_model.encode(["in_continent"] * 5).cpu()
assert torch.allclose(result.x, expected_x, atol=1e-6)
assert torch.allclose(result.edge_attr, expected_edge_attr, atol=1e-6)
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "test/llm/test_rag_loader.py",
"license": "MIT License",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:test/llm/utils/test_rag_backend_utils.py | import os
import tempfile
from typing import List
import torch
from torch_geometric.data import Data
from torch_geometric.llm.utils.backend_utils import (
create_graph_from_triples,
create_remote_backend_from_graph_data,
)
class MockEmbeddingModel:
"""Mock embedding model for testing."""
def __init__(self, embed_dim: int = 64):
self.embed_dim = embed_dim
def __call__(self, texts: List[str], **kwargs) -> torch.Tensor:
"""Mock embedding generation - creates deterministic embeddings."""
# Create simple hash-based embeddings for reproducible testing
if len(texts) == 0:
return torch.empty(0, self.embed_dim)
embeddings = []
for text in texts:
# Simple deterministic embedding based on text hash
hash_val = hash(text)
# Use the hash to create a reproducible embedding
torch.manual_seed(abs(hash_val) % 2**31)
embedding = torch.randn(self.embed_dim)
embeddings.append(embedding)
return torch.stack(embeddings)
class TestCreateGraphFromTriples:
"""Test suite for create_graph_from_triples function."""
def setup_method(self):
"""Set up test fixtures."""
self.sample_triples = [('Alice', 'works with', 'Bob'),
('Alice', 'leads', 'Carol'),
('Carol', 'works with', 'Dave')]
self.mock_embedding_model = MockEmbeddingModel(embed_dim=32)
def test_create_graph_basic_functionality(self):
"""Test basic functionality of create_graph_from_triples."""
result = create_graph_from_triples(
triples=self.sample_triples,
embedding_model=self.mock_embedding_model)
# Verify result is a Data object
assert isinstance(result, Data)
x = result.x
edge_attr = result.edge_attr
assert x.shape == (4, 32)
assert edge_attr.shape == (3, 32)
for t in self.sample_triples:
assert self.mock_embedding_model([t[0]]) in x
assert self.mock_embedding_model([t[2]]) in x
assert self.mock_embedding_model([t[1]]) in edge_attr
expected_edge_index = torch.tensor([[0, 0, 2], [1, 2, 3]])
assert torch.allclose(result.edge_index, expected_edge_index)
def test_create_graph_empty_triples(self):
"""Test create_graph_from_triples with empty triples list."""
empty_triples = []
result = create_graph_from_triples(
triples=empty_triples, embedding_model=self.mock_embedding_model)
# Should create an empty graph
assert isinstance(result, Data)
assert result.num_nodes == 0
assert result.num_edges == 0
class TestCreateRemoteBackendFromGraphData:
"""Test suite for create_remote_backend_from_graph_data function."""
def setup_method(self):
"""Set up test fixtures."""
self.sample_triples = [('Alice', 'works with', 'Bob'),
('Alice', 'leads', 'Carol'),
('Carol', 'works with', 'Dave')]
self.mock_embedding_model = MockEmbeddingModel(embed_dim=32)
# Create sample graph data using create_graph_from_triples
self.sample_graph_data = create_graph_from_triples(
triples=self.sample_triples,
embedding_model=self.mock_embedding_model)
def test_create_backend_data_load(self):
"""Test that data integrity is preserved in backend creation."""
with tempfile.TemporaryDirectory() as temp_dir:
save_path = os.path.join(temp_dir, "test_graph.pt")
loader = create_remote_backend_from_graph_data(
graph_data=self.sample_graph_data, path=save_path, n_parts=1)
# Load and verify data
feature_store, graph_store = loader.load()
# Check that the original graph structure is preserved
loaded_data = torch.load(save_path, weights_only=False)
# Verify basic properties match
assert loaded_data.num_nodes == self.sample_graph_data.num_nodes
assert loaded_data.num_edges == self.sample_graph_data.num_edges
# Verify tensors match
assert torch.allclose(loaded_data.x, self.sample_graph_data.x)
assert torch.allclose(loaded_data.edge_index,
self.sample_graph_data.edge_index)
assert torch.allclose(loaded_data.edge_attr,
self.sample_graph_data.edge_attr)
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "test/llm/utils/test_rag_backend_utils.py",
"license": "MIT License",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:test/llm/utils/test_rag_feature_store.py | from unittest.mock import Mock, patch
import pytest
import torch
from torch_geometric.data import Data
from torch_geometric.llm.utils.feature_store import KNNRAGFeatureStore
from torch_geometric.sampler import SamplerOutput
from torch_geometric.testing.decorators import onlyRAG
class TestKNNRAGFeatureStore:
"""Test suite for KNNRAGFeatureStore methods."""
def setup_method(self):
"""Set up test fixtures."""
self.mock_encoder = Mock()
self.mock_encoder.encode = Mock()
self.mock_encoder.to = Mock(return_value=self.mock_encoder)
self.mock_encoder.eval = Mock()
self.config = {"k_nodes": 5, "encoder_model": self.mock_encoder}
self.sample_x = torch.randn(40, 128) # 40 nodes, 128 features
self.sample_edge_attr = torch.randn(40, 64) # 40 edges, 64 features
def test_bad_config(self):
"""Test bad config initialization."""
with pytest.raises(ValueError, match="Required config parameter"):
store = KNNRAGFeatureStore()
store.config = {}
def create_feature_store(self):
"""Create a FeatureStore with mocked dependencies."""
store = KNNRAGFeatureStore()
store.config = self.config
# Mock the tensor storage
store.put_tensor(self.sample_x, group_name=None, attr_name='x')
store.put_tensor(self.sample_edge_attr, group_name=(None, None),
attr_name='edge_attr')
return store
@onlyRAG
def test_retrieve_seed_nodes_single_query(self):
"""Test retrieve_seed_nodes with a single query."""
store = self.create_feature_store()
# Mock the encoder output and batch_knn
query_text = "test query"
mock_query_enc = torch.randn(1, 128)
self.mock_encoder.encode.return_value = mock_query_enc
expected_indices = torch.tensor([0, 3, 7, 2, 9])
with patch('torch_geometric.llm.utils.feature_store.batch_knn'
) as mock_batch_knn:
# Mock batch_knn to return an iterator
def mock_generator():
yield (expected_indices, mock_query_enc)
mock_batch_knn.return_value = mock_generator()
result, query_enc = store.retrieve_seed_nodes(query_text)
# Verify encoder was called correctly
self.mock_encoder.encode.assert_called_once_with([query_text])
# Verify batch_knn was called correctly
mock_batch_knn.assert_called_once()
args = mock_batch_knn.call_args[0]
assert torch.equal(args[0], mock_query_enc)
assert torch.equal(args[1], self.sample_x)
assert args[2] == 5 # k_nodes
# Verify results
assert torch.equal(result, expected_indices)
assert torch.equal(query_enc, mock_query_enc)
@onlyRAG
def test_retrieve_seed_nodes_multiple_queries(self):
"""Test retrieve_seed_nodes with multiple queries."""
store = self.create_feature_store()
queries = ["query 1", "query 2"]
mock_query_enc = torch.randn(2, 128)
self.mock_encoder.encode.return_value = mock_query_enc
expected_indices = [
torch.tensor([1, 4, 6, 8, 0]),
torch.tensor([0, 3, 7, 2, 9])
]
with patch('torch_geometric.llm.utils.feature_store.batch_knn'
) as mock_batch_knn:
def mock_generator():
for i in range(len(expected_indices)):
yield (expected_indices[i], mock_query_enc[i])
mock_batch_knn.return_value = mock_generator()
out_dict = store.retrieve_seed_nodes(queries)
# Verify encoder was called with the list directly
self.mock_encoder.encode.assert_called_once_with(queries)
# Verify results
for i, query in enumerate(queries):
result, query_enc = out_dict[query]
assert torch.equal(result, expected_indices[i])
assert torch.equal(query_enc, mock_query_enc[i])
@pytest.mark.parametrize("induced", [True, False])
def test_load_subgraph_valid_sample(self, induced):
"""Test load_subgraph with valid SamplerOutput."""
store = self.create_feature_store()
# Create a mock SamplerOutput
sample = SamplerOutput(node=torch.tensor([6, 7, 8, 9]),
row=torch.tensor([0, 1, 2]),
col=torch.tensor([1, 2, 3]),
edge=torch.tensor([0, 1, 2]), batch=None)
expected_edge_indices = torch.tensor([[0, 1, 2], [1, 2, 3]]) \
if induced else torch.tensor([[6, 7, 8], [7, 8, 9]])
result = store.load_subgraph(sample, induced=induced)
# Verify result is a Data object
assert isinstance(result, Data)
# Verify edge attributes are correctly extracted
expected_edge_attr = self.sample_edge_attr[torch.tensor([0, 1, 2])]
assert torch.equal(result.edge_attr, expected_edge_attr)
assert torch.equal(result.edge_index, expected_edge_indices)
if induced:
assert torch.equal(result.node_idx, sample.node)
assert torch.equal(result.edge_idx, sample.edge)
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "test/llm/utils/test_rag_feature_store.py",
"license": "MIT License",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:test/llm/utils/test_rag_graph_store.py | from unittest.mock import Mock, patch
import pytest
import torch
from torch_geometric.data import FeatureStore
from torch_geometric.llm.utils.graph_store import NeighborSamplingRAGGraphStore
from torch_geometric.sampler import BidirectionalNeighborSampler, SamplerOutput
def setup_test_fixtures():
"""Set up test fixtures."""
feature_store = Mock(spec=FeatureStore)
config = {"num_neighbors": [10, 5]}
return feature_store, config
def test_sample_subgraph_with_valid_tensor_input():
"""Test sample_subgraph with valid tensor input."""
# Create graph store and set config
feature_store, config = setup_test_fixtures()
graph_store = NeighborSamplingRAGGraphStore(feature_store=feature_store,
replace=True, disjoint=False)
graph_store.config = config
# Create mock sampler and its output
mock_sampler = Mock(spec=BidirectionalNeighborSampler)
expected_output = SamplerOutput(node=torch.tensor([0, 1, 2, 3]),
row=torch.tensor([0, 1, 1]),
col=torch.tensor([1, 2, 3]),
edge=torch.tensor([0, 1, 2]), batch=None,
num_sampled_nodes=[2, 2],
num_sampled_edges=[3])
mock_sampler.sample_from_nodes.return_value = expected_output
# Intentionally not sorted
graph_store.edge_index = torch.tensor([[3, 1, 1, 0], [4, 2, 3, 1]])
# Initially sampler should not be initialized
assert not graph_store._sampler_is_initialized
# Mock the _init_sampler method to set our mock sampler
with patch.object(graph_store, '_init_sampler') as mock_init:
def set_sampler():
graph_store.sampler = mock_sampler
graph_store._sampler_is_initialized = True
mock_init.side_effect = set_sampler
# Test input
seed_nodes = torch.tensor([0])
result = graph_store.sample_subgraph(seed_nodes)
# Verify sampler was initialized
mock_init.assert_called_once()
# Verify sample_from_nodes was called with correct input
mock_sampler.sample_from_nodes.assert_called_once()
assert result == expected_output
def test_bad_config():
"""Test bad config initialization."""
with pytest.raises(ValueError, match="Required config parameter"):
store = NeighborSamplingRAGGraphStore()
store.config = {}
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "test/llm/utils/test_rag_graph_store.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:test/llm/utils/test_vectorrag.py | import pytest
import torch
from torch_geometric.llm.utils.vectorrag import DocumentRetriever
from torch_geometric.testing import onlyRAG
@pytest.fixture
def sample_documents():
"""Fixture providing sample documents for testing."""
return [
"This is the first test document.",
"This is the second test document.", "This is the third test document."
]
@pytest.fixture
def sample_model():
"""Fixture providing a mock model for testing."""
from unittest.mock import Mock
mock_model = Mock()
# Mock the model to return a simple tensor when called
mock_model.side_effect = [
torch.zeros(1, 384),
torch.ones(1, 384),
torch.ones(1, 384) * 2,
torch.ones(1, 384) * 1
]
return mock_model
def test_save_load(sample_documents, sample_model, tmp_path):
"""Test whether saving/loading a DocumentRetriever maintains state."""
retriever = DocumentRetriever(sample_documents, model=sample_model)
retriever.save(tmp_path / "retriever.pth")
loaded_retriever = DocumentRetriever.load(tmp_path / "retriever.pth",
sample_model)
assert retriever.raw_docs == loaded_retriever.raw_docs
assert torch.allclose(retriever.embedded_docs,
loaded_retriever.embedded_docs)
assert retriever.k_for_docs == loaded_retriever.k_for_docs
assert retriever.model == loaded_retriever.model
@onlyRAG
def test_query(sample_documents, sample_model):
"""Test query functionality of DocumentRetriever."""
retriever = DocumentRetriever(sample_documents, model=sample_model)
query = "What is the first test document?"
retrieved_docs = retriever.query(query)
assert retrieved_docs == [sample_documents[0]]
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "test/llm/utils/test_vectorrag.py",
"license": "MIT License",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:torch_geometric/llm/models/llm_judge.py | from math import isnan
from typing import Optional
from torch_geometric.llm.models.txt2kg import \
_chunk_to_triples_str_cloud as call_NIM
# Credit for original "Marlin Accuracy" system goes to:
# Gilberto Titericz (NVIDIA)
# This work is an adaptation of his for PyG
SYSTEM_PROMPT_1 = (
"Instruction: You are a world class state of the art " +
"assistant for rating " +
"a User Answer given a Question. The Question is completely" +
" answered by the Reference Answer.\n" +
"Say 4, if User Answer is full contained and equivalent to" +
" Reference Answer" +
"in all terms, topics, numbers, metrics, dates and units.\n" +
"Say 2, if User Answer is partially contained and almost " +
"equivalent to Reference Answer" +
"in all terms, topics, numbers, metrics, dates and units.\n" +
"Say 0, if User Answer is not contained in Reference Answer" +
" or not accurate in all terms, topics," +
"numbers, metrics, dates and units or the User Answer do not" +
" answer the question.\n" +
"Do not explain or justify your rating. Your rating must be " +
"only 4, 2 or 0 according to the instructions above.\n" +
"### Question: \"{question}\"\n" + "### User Answer: \"{model_pred}\"\n" +
"### Reference Answer: \"{correct_answer}\"\n" + "The rating is:\n")
SYSTEM_PROMPT_2 = (
"I will rate the User Answer in comparison to the Reference " +
"Answer for a given Question.\n" +
"A rating of 4 indicates that the User Answer is entirely " +
"consistent with the Reference Answer, covering all aspects," +
" topics, numbers, metrics, dates, and units.\n" +
"A rating of 2 signifies that the User Answer is mostly " +
"aligned with the Reference Answer, with minor discrepancies" +
" in some areas.\n" +
"A rating of 0 means that the User Answer is either " +
"inaccurate, incomplete, or unrelated to the Reference " +
"Answer, or it fails to address the Question.\n" +
"I will provide the rating without any explanation or " +
"justification, adhering to the following scale: " +
"0 (no match), 2 (partial match), 4 (exact match).\n" +
"Do not explain or justify my rating. My rating must" +
" be only 4, 2 or 0 only.\n\n" + "Question: \"{question}\"\n\n" +
"Reference Answer: \"{model_pred}\"\n\n" +
"User Answer: \"{correct_answer}\"\n\n" + "Rating: ")
# TODO: add support for Local LM
# TODO: add multiproc support like txt2kg
class LLMJudge():
"""Uses NIMs to score a triple of (question, model_pred, correct_answer)
This whole class is an adaptation of Gilberto's work for PyG.
Args:
NVIDIA_NIM_MODEL : (str, optional)
The name of the NVIDIA NIM model to use.
(default: "nvidia/llama-3.1-nemotron-70b-instruct").
NVIDIA_API_KEY : (str, optional)
The API key for accessing NVIDIA's NIM models.
(default: "").
ENDPOINT_URL : (str, optional)
The URL hosting your model, in case you are not using
the public NIM.
(default: "https://integrate.api.nvidia.com/v1").
"""
def __init__(
self,
NVIDIA_NIM_MODEL: Optional[
str] = "nvidia/llama-3.1-nemotron-70b-instruct",
NVIDIA_API_KEY: Optional[str] = "",
ENDPOINT_URL: Optional[str] = "https://integrate.api.nvidia.com/v1",
) -> None:
self.NVIDIA_API_KEY = NVIDIA_API_KEY
self.NIM_MODEL = NVIDIA_NIM_MODEL
self.ENDPOINT_URL = ENDPOINT_URL
def _process_score(self, response: str) -> float:
"""Uses 3 and 1 even though prompt says only 0, 2, 4.
This is because LLMs don't always follow instructions.
Credit to Gilberto.
"""
for i in [4, 3, 2, 1, 0]:
if str(i) in response:
return i / 4
return float("nan")
def _average_scores(self, score0: float, score1: float):
"""Take the average of score0 and score1.
Sometimes the LLM fail to respond or have no score in the response.
In those cases the failed score is discarded.
Credit to Gilberto.
Args:
score0 (float): judge accuracy score.
score1 (float): judge accuracy score by permuting agent answer and
ground truth.
Returns:
(float) average of score0 and score1 of both contains scores,
otherwise pick the max.
"""
score = float("nan")
if score0 >= 0 and score1 >= 0:
score = (score0 + score1) / 2
else:
score = max(score0, score1)
return score
def score(
self,
question: str,
model_pred: str,
correct_answer: str,
) -> float:
"""Args:
question (str): The original question asked to the model.
model_pred (str): The prediction made by the model.
correct_answer (str): The actual correct answer to the question.
Returns:
score (float): score of 0-1, may be nan due to LLM judge failure.
Evals should skip nan's when aggregating score.
"""
prompt1 = SYSTEM_PROMPT_1.format(question=question,
model_pred=model_pred,
correct_answer=correct_answer)
prompt2 = SYSTEM_PROMPT_2.format(question=question,
model_pred=model_pred,
correct_answer=correct_answer)
score1 = float("nan")
score2 = float("nan")
for _retry in range(200):
try:
score1 = self._process_score(
call_NIM(prompt1, self.NVIDIA_API_KEY, self.NIM_MODEL,
self.ENDPOINT_URL, post_text=""))
if not isnan(score1):
break
except ImportError:
raise
except: # noqa
pass
for _retry in range(20):
try:
score2 = self._process_score(
call_NIM(prompt2, self.NVIDIA_API_KEY, self.NIM_MODEL,
self.ENDPOINT_URL, post_text=""))
if not isnan(score2):
break
except ImportError:
raise
except: # noqa
pass
return self._average_scores(score1, score2)
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/llm/models/llm_judge.py",
"license": "MIT License",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:torch_geometric/llm/models/txt2kg.py | import os
import time
from typing import List, Optional, Tuple
import torch
import torch.multiprocessing as mp
CLIENT_INITD = False
CLIENT = None
GLOBAL_NIM_KEY = ""
SYSTEM_PROMPT = "Please convert the above text into a list of knowledge triples with the form ('entity', 'relation', 'entity'). Separate each with a new line. Do not output anything else. Try to focus on key triples that form a connected graph." # noqa
class TXT2KG():
"""A class to convert text data into a Knowledge Graph (KG) format.
Uses NVIDIA NIMs + Prompt engineering by default.
Default model `nvidia/llama-3.1-nemotron-70b-instruct`
is on par or better than GPT4o in benchmarks.
We need a high quality model to ensure high quality KG.
Otherwise we have garbage in garbage out for the rest of the
GNN+LLM RAG pipeline.
Use local_lm flag for local debugging/dev. You still need to be able to
inference a 14B param LLM, 'VAGOsolutions/SauerkrautLM-v2-14b-DPO'.
Smaller LLMs did not work at all in testing.
Note this 14B model requires a considerable amount of GPU memory.
See examples/llm/txt2kg_rag.py for an example.
Args:
NVIDIA_NIM_MODEL : str, optional
The name of the NVIDIA NIM model to use.
(default: "nvidia/llama-3.1-nemotron-70b-instruct").
NVIDIA_API_KEY : str, optional
The API key for accessing NVIDIA's NIM models (default: "").
ENDPOINT_URL : str, optional
The URL hosting your model, in case you are not using
the public NIM.
(default: "https://integrate.api.nvidia.com/v1").
local_LM : bool, optional
A flag indicating whether a local Language Model (LM)
should be used. This uses HuggingFace and will be slower
than deploying your own private NIM endpoint. This flag
is mainly recommended for dev/debug.
(default: False).
chunk_size : int, optional
The size of the chunks in which the text data is processed
(default: 512).
"""
def __init__(
self,
NVIDIA_NIM_MODEL: Optional[
str] = "nvidia/llama-3.1-nemotron-70b-instruct",
NVIDIA_API_KEY: Optional[str] = "",
ENDPOINT_URL: Optional[str] = "https://integrate.api.nvidia.com/v1",
local_LM: bool = False,
chunk_size: int = 512,
) -> None:
self.local_LM = local_LM
# Initialize the local LM flag and the NIM model info accordingly
if self.local_LM:
# If using a local LM, set the initd_LM flag to False
self.initd_LM = False
else:
# If not using a local LM, store the provided NIM model info
self.NVIDIA_API_KEY = NVIDIA_API_KEY
self.NIM_MODEL = NVIDIA_NIM_MODEL
self.ENDPOINT_URL = ENDPOINT_URL
# Set the chunk size for processing text data
self.chunk_size = chunk_size
# Initialize counters and storage for parsing results
self.doc_id_counter = 0
self.relevant_triples = {}
self.total_chars_parsed = 0
self.time_to_parse = 0.0
def save_kg(self, path: str) -> None:
"""Saves the relevant triples in the knowledge graph (KG) to a file.
Args:
path (str): The file path where the KG will be saved.
Returns:
None
"""
torch.save(self.relevant_triples, path)
def _chunk_to_triples_str_local(self, txt: str) -> str:
# call LLM on text
chunk_start_time = time.time()
if not self.initd_LM:
from torch_geometric.nn.nlp import LLM
LM_name = "VAGOsolutions/SauerkrautLM-v2-14b-DPO"
self.model = LLM(LM_name).eval()
self.initd_LM = True
out_str = self.model.inference(question=[txt + '\n' + SYSTEM_PROMPT],
max_tokens=self.chunk_size)[0]
# for debug
self.total_chars_parsed += len(txt)
self.time_to_parse += round(time.time() - chunk_start_time, 2)
self.avg_chars_parsed_per_sec = self.total_chars_parsed / self.time_to_parse # noqa
return out_str
def add_doc_2_KG(
self,
txt: str,
QA_pair: Optional[Tuple[str, str]] = None,
) -> None:
"""Add a document to the Knowledge Graph (KG).
Args:
txt (str): The text to extract triples from.
QA_pair (Tuple[str, str]], optional):
A QA pair to associate with the extracted triples.
Useful for downstream evaluation.
Returns:
- None
"""
if not self.local_LM:
# Ensure NVIDIA_API_KEY is set before proceeding
assert self.NVIDIA_API_KEY != '', \
"Please init TXT2KG w/ NVIDIA_API_KEY or set local_lm=True"
if QA_pair:
# QA_pairs should be unique keys, check if already exists in KG
if QA_pair in self.relevant_triples.keys():
print("Warning: QA_Pair was already added to the set")
print("Q=", QA_pair[0])
print("A=", QA_pair[1])
print("Previously parsed triples=",
self.relevant_triples[QA_pair])
print("Skipping...")
key = QA_pair
else:
# If no QA_pair, use the current doc_id_counter as the key
key = self.doc_id_counter
# Handle empty text (context-less QA pairs)
if txt == "":
self.relevant_triples[key] = []
else:
# Chunk the text into smaller pieces for processing
chunks = _chunk_text(txt, chunk_size=self.chunk_size)
if self.local_LM:
# For debugging purposes...
# process chunks sequentially on the local LM
self.relevant_triples[key] = _llm_then_python_parse(
chunks, _parse_n_check_triples,
self._chunk_to_triples_str_local)
else:
# Process chunks in parallel using multiple processes
num_procs = min(len(chunks), _get_num_procs())
meta_chunk_size = int(len(chunks) / num_procs)
in_chunks_per_proc = {
j:
chunks[j *
meta_chunk_size:min((j + 1) *
meta_chunk_size, len(chunks))]
for j in range(num_procs)
}
for _retry_j in range(5):
try:
for _retry_i in range(200):
try:
# Spawn multiple processes
# process chunks in parallel
mp.spawn(
_multiproc_helper,
args=(in_chunks_per_proc,
_parse_n_check_triples,
_chunk_to_triples_str_cloud,
self.NVIDIA_API_KEY, self.NIM_MODEL,
self.ENDPOINT_URL), nprocs=num_procs)
break
except: # noqa
# keep retrying...
# txt2kg is costly -> stoppage is costly
pass
# Collect the results from each process
self.relevant_triples[key] = []
for rank in range(num_procs):
self.relevant_triples[key] += torch.load(
"/tmp/outs_for_proc_" + str(rank))
os.remove("/tmp/outs_for_proc_" + str(rank))
break
except: # noqa
pass
# Increment the doc_id_counter for the next document
self.doc_id_counter += 1
known_reasoners = [
"llama-3.1-nemotron-ultra-253b-v1",
"kimi-k2-instruct",
"nemotron-super-49b-v1_5",
"gpt-oss",
]
def _chunk_to_triples_str_cloud(
txt: str, GLOBAL_NIM_KEY='',
NIM_MODEL="nvidia/llama-3.1-nemotron-ultra-253b-v1",
ENDPOINT_URL="https://integrate.api.nvidia.com/v1",
post_text=SYSTEM_PROMPT) -> str:
global CLIENT_INITD
if not CLIENT_INITD:
# We use NIMs since most PyG users may not be able to run a 70B+ model
try:
from openai import OpenAI
except ImportError:
quit(
"Failed to import `openai` package, please install it and rerun the script" # noqa
)
global CLIENT
CLIENT = OpenAI(base_url=ENDPOINT_URL, api_key=GLOBAL_NIM_KEY)
CLIENT_INITD = True
txt_input = txt
if post_text != "":
txt_input += '\n' + post_text
messages = []
if any([model_name_str in NIM_MODEL
for model_name_str in known_reasoners]):
messages.append({"role": "system", "content": "detailed thinking on"})
messages.append({"role": "user", "content": txt_input})
completion = CLIENT.chat.completions.create(model=NIM_MODEL,
messages=messages,
temperature=0, top_p=1,
max_tokens=1024, stream=True)
out_str = ""
for chunk in completion:
if chunk.choices[0].delta.content is not None:
out_str += chunk.choices[0].delta.content
return out_str
def _parse_n_check_triples(triples_str: str) -> List[Tuple[str, str, str]]:
# use pythonic checks for triples
processed = []
split_by_newline = triples_str.split("\n")
# sometimes LLM fails to obey the prompt
if len(split_by_newline) > 1:
split_triples = split_by_newline
llm_obeyed = True
else:
# handles form "(e, r, e) (e, r, e) ... (e, r, e)""
split_triples = triples_str[1:-1].split(") (")
llm_obeyed = False
for triple_str in split_triples:
try:
if llm_obeyed:
# remove parenthesis and single quotes for parsing
triple_str = triple_str.replace("(", "").replace(")",
"").replace(
"'", "")
split_trip = triple_str.split(',')
# remove blank space at beginning or end
split_trip = [(i[1:] if i[0] == " " else i) for i in split_trip]
split_trip = [(i[:-1].lower() if i[-1] == " " else i)
for i in split_trip]
potential_trip = tuple(split_trip)
except: # noqa
continue
if 'tuple' in str(type(potential_trip)) and len(
potential_trip
) == 3 and "note:" not in potential_trip[0].lower():
# additional check for empty node/edge attrs
if potential_trip[0] != '' and potential_trip[
1] != '' and potential_trip[2] != '':
processed.append(potential_trip)
return processed
def _llm_then_python_parse(chunks, py_fn, llm_fn, **kwargs):
relevant_triples = []
for chunk in chunks:
relevant_triples += py_fn(llm_fn(chunk, **kwargs))
return relevant_triples
def _multiproc_helper(rank, in_chunks_per_proc, py_fn, llm_fn, NIM_KEY,
NIM_MODEL, ENDPOINT_URL):
out = _llm_then_python_parse(in_chunks_per_proc[rank], py_fn, llm_fn,
GLOBAL_NIM_KEY=NIM_KEY, NIM_MODEL=NIM_MODEL,
ENDPOINT_URL=ENDPOINT_URL)
torch.save(out, "/tmp/outs_for_proc_" + str(rank))
def _get_num_procs():
if hasattr(os, "sched_getaffinity"):
try:
num_proc = len(os.sched_getaffinity(0)) / (2)
except Exception:
pass
if num_proc is None:
num_proc = os.cpu_count() / (2)
return int(num_proc)
def _chunk_text(text: str, chunk_size: int = 512) -> list[str]:
"""Function to chunk text into sentence-based segments.
Co-authored with Claude AI.
"""
# If the input text is empty or None, return an empty list
if not text:
return []
# List of punctuation marks that typically end sentences
sentence_endings = '.!?'
# List to store the resulting chunks
chunks = []
# Continue processing the entire text
while text:
# If the remaining text is shorter than chunk_size, add it and break
if len(text) <= chunk_size:
chunks.append(text.strip())
break
# Start with the maximum possible chunk
chunk = text[:chunk_size]
# Try to find the last sentence ending within the chunk
best_split = chunk_size
for ending in sentence_endings:
# Find the last occurrence of the ending punctuation
last_ending = chunk.rfind(ending)
if last_ending != -1:
# Ensure we include the punctuation and any following space
best_split = min(
best_split, last_ending + 1 +
(1 if last_ending + 1 < len(chunk)
and chunk[last_ending + 1].isspace() else 0))
# Adjust to ensure we don't break words
# If the next character is a letter, find the last space
if best_split < len(text) and text[best_split].isalpha():
# Find the last space before the current split point
space_split = text[:best_split].rfind(' ')
if space_split != -1:
best_split = space_split
# Append the chunk, ensuring it's stripped
chunks.append(text[:best_split].strip())
# Remove the processed part from the text
text = text[best_split:].lstrip()
return chunks
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/llm/models/txt2kg.py",
"license": "MIT License",
"lines": 311,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:torch_geometric/llm/rag_loader.py | from abc import abstractmethod
from typing import Any, Callable, Dict, Optional, Protocol, Tuple, Union
from torch_geometric.data import Data, FeatureStore, HeteroData
from torch_geometric.llm.utils.vectorrag import VectorRetriever
from torch_geometric.sampler import HeteroSamplerOutput, SamplerOutput
from torch_geometric.typing import InputEdges, InputNodes
class RAGFeatureStore(Protocol):
"""Feature store template for remote GNN RAG backend."""
@abstractmethod
def retrieve_seed_nodes(self, query: Any, **kwargs) -> InputNodes:
"""Makes a comparison between the query and all the nodes to get all
the closest nodes. Return the indices of the nodes that are to be seeds
for the RAG Sampler.
"""
...
@property
@abstractmethod
def config(self) -> Dict[str, Any]:
"""Get the config for the RAGFeatureStore."""
...
@config.setter
@abstractmethod
def config(self, config: Dict[str, Any]):
"""Set the config for the RAGFeatureStore."""
...
@abstractmethod
def retrieve_seed_edges(self, query: Any, **kwargs) -> InputEdges:
"""Makes a comparison between the query and all the edges to get all
the closest nodes. Returns the edge indices that are to be the seeds
for the RAG Sampler.
"""
...
@abstractmethod
def load_subgraph(
self, sample: Union[SamplerOutput, HeteroSamplerOutput]
) -> Union[Data, HeteroData]:
"""Combines sampled subgraph output with features in a Data object."""
...
class RAGGraphStore(Protocol):
"""Graph store template for remote GNN RAG backend."""
@abstractmethod
def sample_subgraph(self, seed_nodes: InputNodes, seed_edges: InputEdges,
**kwargs) -> Union[SamplerOutput, HeteroSamplerOutput]:
"""Sample a subgraph using the seeded nodes and edges."""
...
@property
@abstractmethod
def config(self) -> Dict[str, Any]:
"""Get the config for the RAGGraphStore."""
...
@config.setter
@abstractmethod
def config(self, config: Dict[str, Any]):
"""Set the config for the RAGGraphStore."""
...
@abstractmethod
def register_feature_store(self, feature_store: FeatureStore):
"""Register a feature store to be used with the sampler. Samplers need
info from the feature store in order to work properly on HeteroGraphs.
"""
...
# TODO: Make compatible with Heterographs
class RAGQueryLoader:
"""Loader meant for making RAG queries from a remote backend."""
def __init__(self, graph_data: Tuple[RAGFeatureStore, RAGGraphStore],
subgraph_filter: Optional[Callable[[Data, Any], Data]] = None,
augment_query: bool = False,
vector_retriever: Optional[VectorRetriever] = None,
config: Optional[Dict[str, Any]] = None):
"""Loader meant for making queries from a remote backend.
Args:
graph_data (Tuple[RAGFeatureStore, RAGGraphStore]):
Remote FeatureStore and GraphStore to load from.
Assumed to conform to the protocols listed above.
subgraph_filter (Optional[Callable[[Data, Any], Data]], optional):
Optional local transform to apply to data after retrieval.
Defaults to None.
augment_query (bool, optional): Whether to augment the query with
retrieved documents. Defaults to False.
vector_retriever (Optional[VectorRetriever], optional):
VectorRetriever to use for retrieving documents.
Defaults to None.
config (Optional[Dict[str, Any]], optional): Config to pass into
the RAGQueryLoader. Defaults to None.
"""
fstore, gstore = graph_data
self.vector_retriever = vector_retriever
self.augment_query = augment_query
self.feature_store = fstore
self.graph_store = gstore
self.graph_store.edge_index = self.graph_store.edge_index.contiguous()
self.graph_store.register_feature_store(self.feature_store)
self.subgraph_filter = subgraph_filter
self.config = config
def _propagate_config(self, config: Dict[str, Any]):
"""Propagate the config the relevant components."""
self.feature_store.config = config
self.graph_store.config = config
@property
def config(self):
"""Get the config for the RAGQueryLoader."""
return self._config
@config.setter
def config(self, config: Dict[str, Any]):
"""Set the config for the RAGQueryLoader.
Args:
config (Dict[str, Any]): The config to set.
"""
self._propagate_config(config)
self._config = config
def query(self, query: Any) -> Data:
"""Retrieve a subgraph associated with the query with all its feature
attributes.
"""
if self.vector_retriever:
retrieved_docs = self.vector_retriever.query(query)
if self.augment_query:
query = [query] + retrieved_docs
seed_nodes, query_enc = self.feature_store.retrieve_seed_nodes(query)
subgraph_sample = self.graph_store.sample_subgraph(seed_nodes)
data = self.feature_store.load_subgraph(sample=subgraph_sample)
# apply local filter
if self.subgraph_filter:
data = self.subgraph_filter(data, query)
if self.vector_retriever:
data.text_context = retrieved_docs
return data
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/llm/rag_loader.py",
"license": "MIT License",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:torch_geometric/llm/utils/backend_utils.py | import os
from dataclasses import dataclass
from enum import Enum, auto
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Protocol,
Tuple,
Type,
Union,
no_type_check,
runtime_checkable,
)
import numpy as np
import torch
from torch import Tensor
from torch.nn import Module
from torch_geometric.data import Data, FeatureStore, GraphStore
from torch_geometric.distributed import (
LocalFeatureStore,
LocalGraphStore,
Partitioner,
)
from torch_geometric.llm.large_graph_indexer import (
EDGE_RELATION,
LargeGraphIndexer,
TripletLike,
)
from torch_geometric.llm.models import SentenceTransformer
from torch_geometric.typing import EdgeType, NodeType
try:
from pandas import DataFrame
except ImportError:
DataFrame = None
RemoteGraphBackend = Tuple[FeatureStore, GraphStore]
# TODO: Make everything compatible with Hetero graphs aswell
def preprocess_triplet(triplet: TripletLike) -> TripletLike:
h, r, t = triplet
return str(h).lower(), str(r).lower(), str(t).lower()
@no_type_check
def retrieval_via_pcst(
data: Data,
q_emb: Tensor,
textual_nodes: Any,
textual_edges: Any,
topk: int = 3,
topk_e: int = 5,
cost_e: float = 0.5,
num_clusters: int = 1,
) -> Tuple[Data, str]:
# skip PCST for bad graphs
booly = data.edge_attr is None or data.edge_attr.numel() == 0
booly = booly or data.x is None or data.x.numel() == 0
booly = booly or data.edge_index is None or data.edge_index.numel() == 0
if not booly:
c = 0.01
from pcst_fast import pcst_fast
root = -1
pruning = 'gw'
verbosity_level = 0
if topk > 0:
n_prizes = torch.nn.CosineSimilarity(dim=-1)(q_emb, data.x)
topk = min(topk, data.num_nodes)
_, topk_n_indices = torch.topk(n_prizes, topk, largest=True)
n_prizes = torch.zeros_like(n_prizes)
n_prizes[topk_n_indices] = torch.arange(topk, 0, -1,
device=n_prizes.device,
dtype=n_prizes.dtype)
else:
n_prizes = torch.zeros(data.num_nodes)
if topk_e > 0:
e_prizes = torch.nn.CosineSimilarity(dim=-1)(q_emb, data.edge_attr)
topk_e = min(topk_e, e_prizes.unique().size(0))
topk_e_values, _ = torch.topk(e_prizes.unique(), topk_e,
largest=True)
e_prizes[e_prizes < topk_e_values[-1]] = 0.0
last_topk_e_value = topk_e
for k in range(topk_e):
indices = e_prizes == topk_e_values[k]
value = min((topk_e - k) / sum(indices), last_topk_e_value - c)
e_prizes[indices] = value
last_topk_e_value = value * (1 - c)
# reduce the cost of the edges so that at least one edge is chosen
cost_e = min(cost_e, e_prizes.max().item() * (1 - c / 2))
else:
e_prizes = torch.zeros(data.num_edges)
costs = []
edges = []
virtual_n_prizes = []
virtual_edges = []
virtual_costs = []
mapping_n = {}
mapping_e = {}
for i, (src, dst) in enumerate(data.edge_index.t().numpy()):
prize_e = e_prizes[i]
if prize_e <= cost_e:
mapping_e[len(edges)] = i
edges.append((src, dst))
costs.append(cost_e - prize_e)
else:
virtual_node_id = data.num_nodes + len(virtual_n_prizes)
mapping_n[virtual_node_id] = i
virtual_edges.append((src, virtual_node_id))
virtual_edges.append((virtual_node_id, dst))
virtual_costs.append(0)
virtual_costs.append(0)
virtual_n_prizes.append(prize_e - cost_e)
prizes = np.concatenate([n_prizes, np.array(virtual_n_prizes)])
num_edges = len(edges)
if len(virtual_costs) > 0:
costs = np.array(costs + virtual_costs)
edges = np.array(edges + virtual_edges)
vertices, edges = pcst_fast(edges, prizes, costs, root, num_clusters,
pruning, verbosity_level)
selected_nodes = vertices[vertices < data.num_nodes]
selected_edges = [mapping_e[e] for e in edges if e < num_edges]
virtual_vertices = vertices[vertices >= data.num_nodes]
if len(virtual_vertices) > 0:
virtual_vertices = vertices[vertices >= data.num_nodes]
virtual_edges = [mapping_n[i] for i in virtual_vertices]
selected_edges = np.array(selected_edges + virtual_edges)
edge_index = data.edge_index[:, selected_edges]
selected_nodes = np.unique(
np.concatenate(
[selected_nodes, edge_index[0].numpy(),
edge_index[1].numpy()]))
n = textual_nodes.iloc[selected_nodes]
e = textual_edges.iloc[selected_edges]
else:
n = textual_nodes
e = textual_edges
desc = n.to_csv(index=False) + '\n' + e.to_csv(
index=False, columns=['src', 'edge_attr', 'dst'])
if booly:
return data, desc
mapping = {n: i for i, n in enumerate(selected_nodes.tolist())}
src = [mapping[i] for i in edge_index[0].tolist()]
dst = [mapping[i] for i in edge_index[1].tolist()]
# HACK Added so that the subset of nodes and edges selected can be tracked
node_idx = np.array(data.node_idx)[selected_nodes]
edge_idx = np.array(data.edge_idx)[selected_edges]
data = Data(
x=data.x[selected_nodes],
edge_index=torch.tensor([src, dst]).to(torch.long),
edge_attr=data.edge_attr[selected_edges],
# HACK: track subset of selected nodes/edges
node_idx=node_idx,
edge_idx=edge_idx,
)
return data, desc
def batch_knn(query_enc: Tensor, embeds: Tensor,
k: int) -> Iterator[Tuple[Tensor, Tensor]]:
from torchmetrics.functional import pairwise_cosine_similarity
prizes = pairwise_cosine_similarity(query_enc, embeds.to(query_enc.device))
topk = min(k, len(embeds))
for i, q in enumerate(prizes):
_, indices = torch.topk(q, topk, largest=True)
yield indices, query_enc[i].unsqueeze(0)
# Adapted from LocalGraphStore
@runtime_checkable
class ConvertableGraphStore(Protocol):
@classmethod
def from_data(
cls,
edge_id: Tensor,
edge_index: Tensor,
num_nodes: int,
is_sorted: bool = False,
) -> GraphStore:
...
@classmethod
def from_hetero_data(
cls,
edge_id_dict: Dict[EdgeType, Tensor],
edge_index_dict: Dict[EdgeType, Tensor],
num_nodes_dict: Dict[NodeType, int],
is_sorted: bool = False,
) -> GraphStore:
...
@classmethod
def from_partition(cls, root: str, pid: int) -> GraphStore:
...
# Adapted from LocalFeatureStore
@runtime_checkable
class ConvertableFeatureStore(Protocol):
@classmethod
def from_data(
cls,
node_id: Tensor,
x: Optional[Tensor] = None,
y: Optional[Tensor] = None,
edge_id: Optional[Tensor] = None,
edge_attr: Optional[Tensor] = None,
) -> FeatureStore:
...
@classmethod
def from_hetero_data(
cls,
node_id_dict: Dict[NodeType, Tensor],
x_dict: Optional[Dict[NodeType, Tensor]] = None,
y_dict: Optional[Dict[NodeType, Tensor]] = None,
edge_id_dict: Optional[Dict[EdgeType, Tensor]] = None,
edge_attr_dict: Optional[Dict[EdgeType, Tensor]] = None,
) -> FeatureStore:
...
@classmethod
def from_partition(cls, root: str, pid: int) -> FeatureStore:
...
class RemoteDataType(Enum):
DATA = auto()
PARTITION = auto()
@dataclass
class RemoteGraphBackendLoader:
"""Utility class to load triplets into a RAG Backend."""
path: str
datatype: RemoteDataType
graph_store_type: Type[ConvertableGraphStore]
feature_store_type: Type[ConvertableFeatureStore]
def load(self, pid: Optional[int] = None) -> RemoteGraphBackend:
if self.datatype == RemoteDataType.DATA:
data_obj = torch.load(self.path, weights_only=False)
# is_sorted=true since assume nodes come sorted from indexer
graph_store = self.graph_store_type.from_data(
edge_id=data_obj['edge_id'], edge_index=data_obj.edge_index,
num_nodes=data_obj.num_nodes, is_sorted=True)
feature_store = self.feature_store_type.from_data(
node_id=data_obj['node_id'], x=data_obj.x,
edge_id=data_obj['edge_id'], edge_attr=data_obj.edge_attr)
elif self.datatype == RemoteDataType.PARTITION:
if pid is None:
assert pid is not None, \
"Partition ID must be defined for loading from a " \
+ "partitioned store."
graph_store = self.graph_store_type.from_partition(self.path, pid)
feature_store = self.feature_store_type.from_partition(
self.path, pid)
else:
raise NotImplementedError
return (feature_store, graph_store)
def __del__(self) -> None:
if os.path.exists(self.path):
os.remove(self.path)
def create_graph_from_triples(
triples: Iterable[TripletLike],
embedding_model: Union[Module, Callable],
embedding_method_kwargs: Optional[Dict[str, Any]] = None,
pre_transform: Optional[Callable[[TripletLike], TripletLike]] = None,
) -> Data:
"""Utility function that can be used to create a graph from triples."""
# Resolve callable methods
embedding_method_kwargs = embedding_method_kwargs \
if embedding_method_kwargs is not None else dict()
indexer = LargeGraphIndexer.from_triplets(triples,
pre_transform=pre_transform)
node_feats = embedding_model(indexer.get_unique_node_features(),
**embedding_method_kwargs)
indexer.add_node_feature('x', node_feats)
edge_feats = embedding_model(
indexer.get_unique_edge_features(feature_name=EDGE_RELATION),
**embedding_method_kwargs)
indexer.add_edge_feature(new_feature_name="edge_attr",
new_feature_vals=edge_feats,
map_from_feature=EDGE_RELATION)
data = indexer.to_data(node_feature_name='x',
edge_feature_name='edge_attr')
data = data.to("cpu")
return data
def create_remote_backend_from_graph_data(
graph_data: Data,
graph_db: Type[ConvertableGraphStore] = LocalGraphStore,
feature_db: Type[ConvertableFeatureStore] = LocalFeatureStore,
path: str = '',
n_parts: int = 1,
) -> RemoteGraphBackendLoader:
"""Utility function that can be used to create a RAG Backend from triples.
Args:
graph_data (Data): Graph data to load into the RAG Backend.
graph_db (Type[ConvertableGraphStore], optional): GraphStore class to
use. Defaults to LocalGraphStore.
feature_db (Type[ConvertableFeatureStore], optional): FeatureStore
class to use. Defaults to LocalFeatureStore.
path (str, optional): path to save resulting stores. Defaults to ''.
n_parts (int, optional): Number of partitons to store in.
Defaults to 1.
Returns:
RemoteGraphBackendLoader: Loader to load RAG backend from disk or
memory.
"""
# Will return attribute errors for missing attributes
if not issubclass(graph_db, ConvertableGraphStore):
_ = graph_db.from_data
_ = graph_db.from_hetero_data
_ = graph_db.from_partition
elif not issubclass(feature_db, ConvertableFeatureStore):
_ = feature_db.from_data
_ = feature_db.from_hetero_data
_ = feature_db.from_partition
if n_parts == 1:
torch.save(graph_data, path)
return RemoteGraphBackendLoader(path, RemoteDataType.DATA, graph_db,
feature_db)
else:
partitioner = Partitioner(data=graph_data, num_parts=n_parts,
root=path)
partitioner.generate_partition()
return RemoteGraphBackendLoader(path, RemoteDataType.PARTITION,
graph_db, feature_db)
def make_pcst_filter(triples: List[Tuple[str, str,
str]], model: SentenceTransformer,
topk: int = 5, topk_e: int = 5, cost_e: float = 0.5,
num_clusters: int = 1) -> Callable[[Data, str], Data]:
"""Creates a PCST (Prize Collecting Tree) filter.
:param triples: List of triples (head, relation, tail) representing KG data
:param model: SentenceTransformer model for embedding text
:param topk: Number of top-K results to return (default: 5)
:param topk_e: Number of top-K entity results to return (default: 5)
:param cost_e: Cost of edges (default: 0.5)
:param num_clusters: Number of connected components in the PCST output.
:return: PCST Filter function
"""
if DataFrame is None:
raise Exception("PCST requires `pip install pandas`"
) # Check if pandas is installed
# Remove duplicate triples to ensure unique set
triples = list(dict.fromkeys(triples))
# Initialize empty list to store nodes (entities) from triples
nodes = []
# Iterate over triples to extract unique nodes (entities)
for h, _, t in triples:
for node in (h, t): # Extract head and tail entities from each triple
nodes.append(node)
# Remove duplicates and create final list of unique nodes
nodes = list(dict.fromkeys(nodes))
# Create full list of textual nodes (entities) for filtering
full_textual_nodes = nodes
def apply_retrieval_via_pcst(
graph: Data, # Input graph data
query: str, # Search query
) -> Data:
"""Applies PCST filtering for retrieval.
:param graph: Input graph data
:param query: Search query
:return: Retrieved graph/query data
"""
# PCST relies on numpy and pcst_fast pypi libs, hence to("cpu")
with torch.no_grad():
q_emb = model.encode([query]).to("cpu")
textual_nodes = [(int(i), full_textual_nodes[i])
for i in graph["node_idx"]]
textual_nodes = DataFrame(textual_nodes,
columns=["node_id", "node_attr"])
textual_edges = [triples[i] for i in graph["edge_idx"]]
textual_edges = DataFrame(textual_edges,
columns=["src", "edge_attr", "dst"])
out_graph, desc = retrieval_via_pcst(graph.to(q_emb.device), q_emb,
textual_nodes, textual_edges,
topk=topk, topk_e=topk_e,
cost_e=cost_e,
num_clusters=num_clusters)
out_graph["desc"] = desc
where_trips_start = desc.find("src,edge_attr,dst")
parsed_trips = []
for trip in desc[where_trips_start + 18:-1].split("\n"):
parsed_trips.append(tuple(trip.split(",")))
# Handle case where PCST returns an isolated node
"""
TODO find a better solution since these failed subgraphs
severely hurt accuracy.
"""
if str(parsed_trips) == "[('',)]" or out_graph.edge_index.numel() == 0:
out_graph["triples"] = []
else:
out_graph["triples"] = parsed_trips
out_graph["question"] = query
return out_graph
return apply_retrieval_via_pcst
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/llm/utils/backend_utils.py",
"license": "MIT License",
"lines": 381,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:torch_geometric/llm/utils/feature_store.py | import gc
from collections.abc import Iterable, Iterator
from typing import Any, Dict, List, Tuple, Union
import torch
from torch import Tensor
from torch_geometric.data import Data, HeteroData
from torch_geometric.distributed.local_feature_store import LocalFeatureStore
from torch_geometric.llm.utils.backend_utils import batch_knn
from torch_geometric.sampler import HeteroSamplerOutput, SamplerOutput
from torch_geometric.typing import InputNodes
# NOTE: Only compatible with Homogeneous graphs for now
class KNNRAGFeatureStore(LocalFeatureStore):
"""A feature store that uses a KNN-based retrieval."""
def __init__(self) -> None:
"""Initializes the feature store."""
# to be set by the config
self.encoder_model = None
self.k_nodes = None
self._config: Dict[str, Any] = {}
super().__init__()
@property
def config(self) -> Dict[str, Any]:
"""Get the config for the feature store."""
return self._config
def _set_from_config(self, config: Dict[str, Any], attr_name: str) -> None:
"""Set an attribute from the config.
Args:
config (Dict[str, Any]): Config dictionary
attr_name (str): Name of attribute to set
Raises:
ValueError: If required attribute not found in config
"""
if attr_name not in config:
raise ValueError(
f"Required config parameter '{attr_name}' not found")
setattr(self, attr_name, config[attr_name])
@config.setter # type: ignore
def config(self, config: Dict[str, Any]) -> None:
"""Set the config for the feature store.
Args:
config (Dict[str, Any]):
Config dictionary containing required parameters
Raises:
ValueError: If required parameters missing from config
"""
self._set_from_config(config, "k_nodes")
self._set_from_config(config, "encoder_model")
assert self.encoder_model is not None, \
"Need to define encoder model from config"
self.encoder_model.eval()
self._config = config
@property
def x(self) -> Tensor:
"""Returns the node features."""
return Tensor(self.get_tensor(group_name=None, attr_name='x'))
@property
def edge_attr(self) -> Tensor:
"""Returns the edge attributes."""
return Tensor(
self.get_tensor(group_name=(None, None), attr_name='edge_attr'))
def retrieve_seed_nodes( # noqa: D417
self, query: Union[str, List[str],
Tuple[str]]) -> Tuple[InputNodes, Tensor]:
"""Retrieves the k_nodes most similar nodes to the given query.
Args:
query (Union[str, List[str], Tuple[str]]): The query
or list of queries to search for.
Returns:
The indices of the most similar nodes and the encoded query
"""
if not isinstance(query, (list, tuple)):
query = [query]
assert self.k_nodes is not None, "please set k_nodes via config"
if len(query) == 1:
result, query_enc = next(
self._retrieve_seed_nodes_batch(query, self.k_nodes))
gc.collect()
torch.cuda.empty_cache()
return result, query_enc
else:
out_dict = {}
for i, out in enumerate(
self._retrieve_seed_nodes_batch(query, self.k_nodes)):
out_dict[query[i]] = out
gc.collect()
torch.cuda.empty_cache()
return out_dict
def _retrieve_seed_nodes_batch( # noqa: D417
self, query: Iterable[Any],
k_nodes: int) -> Iterator[Tuple[InputNodes, Tensor]]:
"""Retrieves the k_nodes most similar nodes to each query in the batch.
Args:
- query (Iterable[Any]: The batch of queries to search for.
- k_nodes (int): The number of nodes to retrieve.
Yields:
- The indices of the most similar nodes for each query.
"""
if isinstance(self.meta, dict) and self.meta.get("is_hetero", False):
raise NotImplementedError
assert self.encoder_model is not None, \
"Need to define encoder model from config"
query_enc = self.encoder_model.encode(query)
return batch_knn(query_enc, self.x, k_nodes)
def load_subgraph( # noqa
self,
sample: Union[SamplerOutput, HeteroSamplerOutput],
induced: bool = True,
) -> Union[Data, HeteroData]:
"""Loads a subgraph from the given sample.
Args:
sample: The sample to load the subgraph from.
induced: Whether to return the induced subgraph.
Resets node and edge ids.
Returns:
The loaded subgraph.
"""
if isinstance(sample, HeteroSamplerOutput):
raise NotImplementedError
"""
NOTE: torch_geometric.loader.utils.filter_custom_store
can be used here if it supported edge features.
"""
edge_id = sample.edge
x = self.x[sample.node]
edge_attr = self.edge_attr[edge_id]
edge_idx = torch.stack(
[sample.row, sample.col], dim=0) if induced else torch.stack(
[sample.global_row, sample.global_col], dim=0)
result = Data(x=x, edge_attr=edge_attr, edge_index=edge_idx)
# useful for tracking what subset of the graph was sampled
result.node_idx = sample.node
result.edge_idx = edge_id
return result
"""
TODO: make class CuVSKNNRAGFeatureStore(KNNRAGFeatureStore)
include a approximate knn flag for the CuVS.
Connect this with a CuGraphGraphStore
for enabling a accelerated boolean flag for RAGQueryLoader.
On by default if CuGraph+CuVS avail.
If not raise note mentioning its speedup.
"""
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/llm/utils/feature_store.py",
"license": "MIT License",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:torch_geometric/llm/utils/graph_store.py | from typing import Any, Dict, Optional, Tuple, Union
import torch
from torch import Tensor
from torch_geometric.data import FeatureStore
from torch_geometric.distributed.local_graph_store import LocalGraphStore
from torch_geometric.sampler import (
BidirectionalNeighborSampler,
NodeSamplerInput,
SamplerOutput,
)
from torch_geometric.utils import index_sort
# A representation of an edge index, following the possible formats:
# * default: Tensor, size = [2, num_edges]
# * Tensor[0, :] == row, Tensor[1, :] == col
# * COO: (row, col)
# * CSC: (row, colptr)
# * CSR: (rowptr, col)
_EdgeTensorType = Union[Tensor, Tuple[Tensor, Tensor]]
class NeighborSamplingRAGGraphStore(LocalGraphStore):
"""Neighbor sampling based graph-store to store & retrieve graph data."""
def __init__( # type: ignore[no-untyped-def]
self,
feature_store: Optional[FeatureStore] = None,
**kwargs,
):
"""Initializes the graph store.
Optional feature store and neighbor sampling settings.
Args:
feature_store (optional): The feature store to use.
None if not yet registered.
**kwargs (optional):
Additional keyword arguments for neighbor sampling.
"""
self.feature_store = feature_store
self.sample_kwargs = kwargs
self._sampler_is_initialized = False
self._config: Dict[str, Any] = {}
# to be set by the config
self.num_neighbors = None
super().__init__()
@property
def config(self) -> Dict[str, Any]:
"""Get the config for the feature store."""
return self._config
def _set_from_config(self, config: Dict[str, Any], attr_name: str) -> None:
"""Set an attribute from the config.
Args:
config (Dict[str, Any]): Config dictionary
attr_name (str): Name of attribute to set
Raises:
ValueError: If required attribute not found in config
"""
if attr_name not in config:
raise ValueError(
f"Required config parameter '{attr_name}' not found")
setattr(self, attr_name, config[attr_name])
@config.setter # type: ignore
def config(self, config: Dict[str, Any]) -> None:
"""Set the config for the feature store.
Args:
config (Dict[str, Any]):
Config dictionary containing required parameters
Raises:
ValueError: If required parameters missing from config
"""
self._set_from_config(config, "num_neighbors")
if hasattr(self, 'sampler'):
self.sampler.num_neighbors = ( # type: ignore[has-type]
self.num_neighbors)
self._config = config
def _init_sampler(self) -> None:
"""Initializes neighbor sampler with the registered feature store."""
if self.feature_store is None:
raise AttributeError("Feature store not registered yet.")
assert self.num_neighbors is not None, \
"Please set num_neighbors through config"
self.sampler = BidirectionalNeighborSampler(
data=(self.feature_store, self), num_neighbors=self.num_neighbors,
**self.sample_kwargs)
self._sampler_is_initialized = True
def register_feature_store(self, feature_store: FeatureStore) -> None:
"""Registers a feature store with the graph store.
:param feature_store: The feature store to register.
"""
self.feature_store = feature_store
self._sampler_is_initialized = False
def put_edge_id( # type: ignore[no-untyped-def]
self, edge_id: Tensor, *args, **kwargs) -> bool:
"""Stores an edge ID in the graph store.
:param edge_id: The edge ID to store.
:return: Whether the operation was successful.
"""
ret = super().put_edge_id(edge_id.contiguous(), *args, **kwargs)
self._sampler_is_initialized = False
return ret
@property
def edge_index(self) -> _EdgeTensorType:
"""Gets the edge index of the graph.
:return: The edge index as a tensor.
"""
return self.get_edge_index(*self.edge_idx_args, **self.edge_idx_kwargs)
def put_edge_index( # type: ignore[no-untyped-def]
self, edge_index: _EdgeTensorType, *args, **kwargs) -> bool:
"""Stores an edge index in the graph store.
:param edge_index: The edge index to store.
:return: Whether the operation was successful.
"""
ret = super().put_edge_index(edge_index, *args, **kwargs)
# HACK
self.edge_idx_args = args
self.edge_idx_kwargs = kwargs
self._sampler_is_initialized = False
return ret
# HACKY
@edge_index.setter # type: ignore
def edge_index(self, edge_index: _EdgeTensorType) -> None:
"""Sets the edge index of the graph.
:param edge_index: The edge index to set.
"""
# correct since we make node list from triples
if isinstance(edge_index, Tensor):
num_nodes = int(edge_index.max()) + 1
else:
assert isinstance(edge_index, tuple) \
and isinstance(edge_index[0], Tensor) \
and isinstance(edge_index[1], Tensor), \
"edge_index must be a Tensor of [2, num_edges] \
or a tuple of Tensors, (row, col)."
num_nodes = int(edge_index[0].max()) + 1
attr = dict(
edge_type=None,
layout='coo',
size=(num_nodes, num_nodes),
is_sorted=False,
)
# edge index needs to be sorted here and the perm saved for later
col_sorted, self.perm = index_sort(edge_index[1], num_nodes,
stable=True)
row_sorted = edge_index[0][self.perm]
edge_index_sorted = torch.stack([row_sorted, col_sorted], dim=0)
self.put_edge_index(edge_index_sorted, **attr)
def sample_subgraph(
self,
seed_nodes: Tensor,
) -> SamplerOutput:
"""Sample the graph starting from the given nodes using the
in-built NeighborSampler.
Args:
seed_nodes (InputNodes): Seed nodes to start sampling from.
num_neighbors (Optional[NumNeighborsType], optional): Parameters
to determine how many hops and number of neighbors per hop.
Defaults to None.
Returns:
Union[SamplerOutput, HeteroSamplerOutput]: NeighborSamplerOutput
for the input.
"""
# TODO add support for Hetero
if not self._sampler_is_initialized:
self._init_sampler()
seed_nodes = seed_nodes.unique().contiguous()
node_sample_input = NodeSamplerInput(input_id=None, node=seed_nodes)
out = self.sampler.sample_from_nodes( # type: ignore[has-type]
node_sample_input)
# edge ids need to be remapped to the original indices
out.edge = self.perm[out.edge]
return out
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/llm/utils/graph_store.py",
"license": "MIT License",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:torch_geometric/llm/utils/vectorrag.py | # mypy: ignore-errors
import os
from abc import abstractmethod
from typing import Any, Callable, Dict, List, Optional, Protocol, Union
import torch
from torch import Tensor
from torch_geometric.data import Data
from torch_geometric.llm.models import SentenceTransformer
from torch_geometric.llm.utils.backend_utils import batch_knn
class VectorRetriever(Protocol):
"""Protocol for VectorRAG."""
@abstractmethod
def query(self, query: Any, **kwargs: Optional[Dict[str, Any]]) -> Data:
"""Retrieve a context for a given query."""
...
class DocumentRetriever(VectorRetriever):
"""Retrieve documents from a vector database."""
def __init__(self, raw_docs: List[str],
embedded_docs: Optional[Tensor] = None, k_for_docs: int = 2,
model: Optional[Union[SentenceTransformer, torch.nn.Module,
Callable]] = None,
model_kwargs: Optional[Dict[str, Any]] = None):
"""Retrieve documents from a vector database.
Args:
raw_docs: List[str]: List of raw documents.
embedded_docs: Optional[Tensor]: Embedded documents.
k_for_docs: int: Number of documents to retrieve.
model: Optional[Union[SentenceTransformer, torch.nn.Module]]:
Model to use for encoding.
model_kwargs: Optional[Dict[str, Any]]:
Keyword arguments to pass to the model.
"""
self.raw_docs = raw_docs
self.embedded_docs = embedded_docs
self.k_for_docs = k_for_docs
self.model = model
if self.model is not None:
self.encoder = self.model
self.model_kwargs = model_kwargs
if self.embedded_docs is None:
assert self.model is not None, \
"Model must be provided if embedded_docs is not provided"
self.model_kwargs = model_kwargs or {}
self.embedded_docs = self.encoder(self.raw_docs,
**self.model_kwargs)
# we don't want to print the verbose output in `query`
self.model_kwargs.pop("verbose", None)
def query(self, query: Union[str, Tensor]) -> List[str]:
"""Retrieve documents from the vector database.
Args:
query: Union[str, Tensor]: Query to retrieve documents for.
Returns:
List[str]: Documents retrieved from the vector database.
"""
if isinstance(query, str):
with torch.no_grad():
query_enc = self.encoder(query, **self.model_kwargs)
else:
query_enc = query
selected_doc_idxs, _ = next(
batch_knn(query_enc, self.embedded_docs, self.k_for_docs))
return [self.raw_docs[i] for i in selected_doc_idxs]
def save(self, path: str) -> None:
"""Save the DocumentRetriever instance to disk.
Args:
path: str: Path where to save the retriever.
"""
os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True)
# Prepare data to save
save_dict = {
'raw_docs': self.raw_docs,
'embedded_docs': self.embedded_docs,
'k_for_docs': self.k_for_docs,
}
# We do not serialize the model
torch.save(save_dict, path)
@classmethod
def load(cls, path: str, model: Union[SentenceTransformer, torch.nn.Module,
Callable],
model_kwargs: Optional[Dict[str, Any]] = None) -> VectorRetriever:
"""Load a DocumentRetriever instance from disk.
Args:
path: str: Path to the saved retriever.
model: Union[SentenceTransformer, torch.nn.Module, Callable]:
Model to use for encoding.
If None, the saved model will be used if available.
model_kwargs: Optional[Dict[str, Any]]
Key word args to be passed to model
Returns:
DocumentRetriever: The loaded retriever.
"""
if not os.path.exists(path):
raise FileNotFoundError(
f"No saved document retriever found at {path}")
save_dict = torch.load(path, weights_only=False)
if save_dict['embedded_docs'] is not None \
and isinstance(save_dict['embedded_docs'], Tensor)\
and model_kwargs is not None:
model_kwargs.pop("verbose", None)
# Create a new DocumentRetriever with the loaded data
return cls(raw_docs=save_dict['raw_docs'],
embedded_docs=save_dict['embedded_docs'],
k_for_docs=save_dict['k_for_docs'], model=model,
model_kwargs=model_kwargs)
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/llm/utils/vectorrag.py",
"license": "MIT License",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:examples/lpformer.py | import random
from argparse import ArgumentParser
from collections import defaultdict
import numpy as np
import torch
from ogb.linkproppred import Evaluator, PygLinkPropPredDataset
from torch.utils.data import DataLoader
from tqdm import tqdm
from torch_geometric.nn.models import LPFormer
parser = ArgumentParser()
parser.add_argument('--data_name', type=str, default='ogbl-ppa')
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--runs', help="# random seeds to run over", type=int,
default=5)
parser.add_argument('--batch_size', type=int, default=32768)
parser.add_argument('--hidden_channels', type=int, default=64)
parser.add_argument('--gnn_layers', type=int, default=3)
parser.add_argument('--dropout', help="Applies to GNN and Transformer",
type=float, default=0.1)
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--eps', help="PPR precision", type=float, default=5e-5)
parser.add_argument('--thresholds',
help="List of cn, 1-hop, >1-hop (in that order)",
nargs="+", default=[0, 1e-4, 1e-2])
args = parser.parse_args()
device = torch.device(args.device)
dataset = PygLinkPropPredDataset(name=args.data_name)
data = dataset[0].to(device)
data.edge_index = data.edge_index.to(device)
if hasattr(data, 'x') and data.x is not None:
data.x = data.x.to(device).to(torch.float)
split_edge = dataset.get_edge_split()
split_data = {
"train_pos": split_edge['train']['edge'].to(device),
"valid_pos": split_edge['valid']['edge'].to(device),
"valid_neg": split_edge['valid']['edge_neg'].to(device),
"test_pos": split_edge['test']['edge'].to(device),
"test_neg": split_edge['test']['edge_neg'].to(device)
}
if hasattr(data, 'edge_weight') and data.edge_weight is not None:
edge_weight = data.edge_weight.to(torch.float)
data.edge_weight = data.edge_weight.view(-1).to(torch.float)
else:
edge_weight = torch.ones(data.edge_index.size(1)).to(device).float()
# Convert edge_index to SparseTensor for efficiency
# adj_prop = SparseTensor.from_edge_index(
# data.edge_index, edge_weight.squeeze(-1),
# [data.num_nodes, data.num_nodes]).to(device)
adj_prop = torch.sparse_coo_tensor(data.edge_index, edge_weight.squeeze(-1),
[data.num_nodes, data.num_nodes]).to(device)
evaluator_hit = Evaluator(name=args.data_name)
model = LPFormer(data.x.size(-1), args.hidden_channels,
num_gnn_layers=args.gnn_layers,
ppr_thresholds=args.thresholds, gnn_dropout=args.dropout,
transformer_dropout=args.dropout, gcn_cache=True).to(device)
# Get PPR matrix in sparse format
ppr_matrix = model.calc_sparse_ppr(data.edge_index, data.num_nodes,
eps=args.eps).to(device)
def train_epoch():
model.train()
train_pos = split_data['train_pos'].to(device)
adjt_mask = torch.ones(train_pos.size(0), dtype=torch.bool, device=device)
total_loss = total_examples = 0
d = DataLoader(range(train_pos.size(0)), args.batch_size, shuffle=True)
for perm in tqdm(d, "Epoch"):
edges = train_pos[perm].t()
# Mask positive input samples - Common strategy during training
adjt_mask[perm] = 0
edge2keep = train_pos[adjt_mask, :].t()
# masked_adj_prop = SparseTensor.from_edge_index(
# edge2keep.t(), sparse_sizes=(data['num_nodes'],
# data['num_nodes'])).to_device(device)
# masked_adj_prop = masked_adj_prop.to_symmetric()
# Ensure symmetric
edge2keep = torch.cat((edge2keep, edge2keep[[1, 0]]), dim=1)
masked_adj_prop = torch.sparse_coo_tensor(
edge2keep,
torch.ones(edge2keep.size(1)).to(device),
(data['num_nodes'], data['num_nodes'])).to(device)
# For next batch
adjt_mask[perm] = 1
pos_out = model(edges, data.x, masked_adj_prop, ppr_matrix)
pos_loss = -torch.log(torch.sigmoid(pos_out) + 1e-6).mean()
# Trivial random sampling
neg_edges = torch.randint(0, data['num_nodes'],
(edges.size(0), edges.size(1)),
dtype=torch.long, device=edges.device)
neg_out = model(neg_edges, data.x, adj_prop, ppr_matrix)
neg_loss = -torch.log(1 - torch.sigmoid(neg_out) + 1e-6).mean()
loss = pos_loss + neg_loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
num_examples = pos_out.size(0)
total_loss += loss.item() * num_examples
total_examples += num_examples
return total_loss / total_examples
@torch.no_grad()
def test():
# NOTE: Eval for ogbl-citation2 is different
# See `train.py` in https://github.com/HarryShomer/LPFormer/ for more
# Also see there for how to eval under the HeaRT setting
# HeaRT = https://arxiv.org/abs/2306.10453
model.eval()
all_preds = defaultdict(list)
for split_key, split_vals in split_data.items():
if "train" not in split_key:
preds = []
for perm in DataLoader(range(split_vals.size(0)), args.batch_size):
edges = split_vals[perm].t()
perm_logits = model(edges, data.x, adj_prop, ppr_matrix)
preds += [torch.sigmoid(perm_logits).cpu()]
all_preds[split_key] = torch.cat(preds, dim=0)
val_hits = evaluator_hit.eval({
'y_pred_pos': all_preds['valid_pos'],
'y_pred_neg': all_preds['valid_neg']
})[f'hits@{evaluator_hit.K}']
test_hits = evaluator_hit.eval({
'y_pred_pos': all_preds['test_pos'],
'y_pred_neg': all_preds['test_neg']
})[f'hits@{evaluator_hit.K}']
return val_hits, test_hits
def set_seeds(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# Train over args.runs seeds and average results
# Best result for reach run chosen via validation
val_perf_runs = []
test_perf_runs = []
for run in range(1, args.runs + 1):
print("=" * 75)
print(f"RUNNING run={run}")
print("=" * 75)
set_seeds(run)
model.reset_parameters()
optimizer = torch.optim.Adam(list(model.parameters()), lr=args.lr)
best_valid = 0
best_valid_test = 0
for epoch in range(1, 1 + args.epochs):
loss = train_epoch()
print(f"Epoch {epoch} Loss: {loss:.4f}\n")
if epoch % 5 == 0:
print("Evaluating model...\n", flush=True)
eval_val, eval_test = test()
print(f"Valid Hits@{evaluator_hit.K} = {eval_val}")
print(f"Test Hits@{evaluator_hit.K} = {eval_test}")
if eval_val > best_valid:
best_valid = eval_val
best_valid_test = eval_test
print(
f"\nBest Performance:\n Valid={best_valid}\n Test={best_valid_test}")
val_perf_runs.append(best_valid)
test_perf_runs.append(best_valid_test)
if args.runs > 1:
print("\n\n")
print(f"Results over {args.runs} runs:")
print(f" Valid = {np.mean(val_perf_runs)} +/- {np.std(val_perf_runs)}")
print(f" Test = {np.mean(test_perf_runs)} +/- {np.std(test_perf_runs)}")
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "examples/lpformer.py",
"license": "MIT License",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:test/nn/models/test_lpformer.py | import torch
from torch_geometric.nn import LPFormer
from torch_geometric.testing import withPackage
from torch_geometric.utils import to_undirected
@withPackage('numba') # For ppr calculation
def test_lpformer():
model = LPFormer(16, 32, num_gnn_layers=2, num_transformer_layers=1)
assert str(
model
) == 'LPFormer(16, 32, num_gnn_layers=2, num_transformer_layers=1)'
num_nodes = 20
x = torch.randn(num_nodes, 16)
edges = torch.randint(0, num_nodes - 1, (2, 110))
edge_index, test_edges = edges[:, :100], edges[:, 100:]
edge_index = to_undirected(edge_index)
ppr_matrix = model.calc_sparse_ppr(edge_index, num_nodes, eps=1e-4)
assert ppr_matrix.is_sparse
assert ppr_matrix.size() == (num_nodes, num_nodes)
assert ppr_matrix.sum().item() > 0
# Test with dense edge_index
out = model(test_edges, x, edge_index, ppr_matrix)
assert out.size() == (10, )
# Test with sparse edge_index
adj = torch.sparse_coo_tensor(edge_index, torch.ones(edge_index.size(1)),
[num_nodes, num_nodes])
out2 = model(test_edges, x, adj, ppr_matrix)
assert out2.size() == (10, )
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "test/nn/models/test_lpformer.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:torch_geometric/nn/models/lpformer.py | import math
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Parameter
from ...nn.conv import MessagePassing
from ...nn.dense.linear import Linear
from ...nn.inits import glorot, zeros
from ...typing import Adj, OptTensor, Tuple
from ...utils import get_ppr, is_sparse, scatter, softmax
from .basic_gnn import GCN
class LPFormer(nn.Module):
r"""The LPFormer model from the
`"LPFormer: An Adaptive Graph Transformer for Link Prediction"
<https://arxiv.org/abs/2310.11009>`_ paper.
.. note::
For an example of using LPFormer, see
`examples/lpformer.py
<https://github.com/pyg-team/pytorch_geometric/blob/master/examples/
lpformer.py>`_.
Args:
in_channels (int): Size of input dimension
hidden_channels (int): Size of hidden dimension
num_gnn_layers (int, optional): Number of GNN layers
(default: :obj:`2`)
gnn_dropout(float, optional): Dropout used for GNN
(default: :obj:`0.1`)
num_transformer_layers (int, optional): Number of Transformer layers
(default: :obj:`1`)
num_heads (int, optional): Number of heads to use in MHA
(default: :obj:`1`)
transformer_dropout (float, optional): Dropout used for Transformer
(default: :obj:`0.1`)
ppr_thresholds (list): PPR thresholds for different types of nodes.
Types include (in order) common neighbors, 1-Hop nodes
(that aren't CNs), and all other nodes.
(default: :obj:`[0, 1e-4, 1e-2]`)
gcn_cache (bool, optional): Whether to cache edge indices
during message passing. (default: :obj:`False`)
"""
def __init__(
self,
in_channels: int,
hidden_channels: int,
num_gnn_layers: int = 2,
gnn_dropout: float = 0.1,
num_transformer_layers: int = 1,
num_heads: int = 1,
transformer_dropout: float = 0.1,
ppr_thresholds: list = None,
gcn_cache=False,
):
super().__init__()
# Default thresholds
if ppr_thresholds is None:
ppr_thresholds = [0, 1e-4, 1e-2]
if len(ppr_thresholds) == 3:
self.thresh_cn = ppr_thresholds[0]
self.thresh_1hop = ppr_thresholds[1]
self.thresh_non1hop = ppr_thresholds[2]
else:
raise ValueError(
"Argument 'ppr_thresholds' must only be length 3!")
self.in_dim = in_channels
self.hid_dim = hidden_channels
self.gnn_drop = gnn_dropout
self.trans_drop = transformer_dropout
self.gnn = GCN(in_channels, hidden_channels, num_gnn_layers,
dropout=gnn_dropout, norm="layer_norm",
cached=gcn_cache)
self.gnn_norm = nn.LayerNorm(hidden_channels)
# Create Transformer Layers
self.att_layers = nn.ModuleList()
for il in range(num_transformer_layers):
if il == 0:
node_dim = None
self.out_dim = self.hid_dim * 2 if num_transformer_layers > 1 \
else self.hid_dim
elif il == self.num_layers - 1:
node_dim = self.hid_dim
else:
self.out_dim = node_dim = self.hid_dim
self.att_layers.append(
LPAttLayer(self.hid_dim, self.out_dim, node_dim, num_heads,
self.trans_drop))
self.elementwise_lin = MLP(self.hid_dim, self.hid_dim, self.hid_dim)
# Relative Positional Encodings
self.ppr_encoder_cn = MLP(2, self.hid_dim, self.hid_dim)
self.ppr_encoder_onehop = MLP(2, self.hid_dim, self.hid_dim)
self.ppr_encoder_non1hop = MLP(2, self.hid_dim, self.hid_dim)
# thresh=1 implies ignoring some set of nodes
# Also allows us to be more efficient later
if self.thresh_non1hop == 1 and self.thresh_1hop == 1:
self.mask = "cn"
elif self.thresh_non1hop == 1 and self.thresh_1hop < 1:
self.mask = "1-hop"
else:
self.mask = "all"
# 4 is for counts of diff nodes
pairwise_dim = self.hid_dim * num_heads + 4
self.pairwise_lin = MLP(pairwise_dim, pairwise_dim, self.hid_dim)
self.score_func = MLP(self.hid_dim * 2, self.hid_dim * 2, 1, norm=None)
def __repr__(self) -> str:
return (f'{self.__class__.__name__}({self.in_dim}, '
f'{self.hid_dim}, num_gnn_layers={self.gnn.num_layers}, '
f'num_transformer_layers={len(self.att_layers)})')
def reset_parameters(self):
r"""Resets all learnable parameters of the module."""
self.gnn.reset_parameters()
self.gnn_norm.reset_parameters()
self.elementwise_lin.reset_parameters()
self.pairwise_lin.reset_parameters()
self.ppr_encoder_cn.reset_parameters()
self.ppr_encoder_onehop.reset_parameters()
self.ppr_encoder_non1hop.reset_parameters()
self.score_func.reset_parameters()
for i in range(len(self.att_layers)):
self.att_layers[i].reset_parameters()
def forward(
self,
batch: Tensor,
x: Tensor,
edge_index: Adj,
ppr_matrix: Tensor,
) -> Tensor:
r"""Forward Pass of LPFormer.
Returns raw logits for each link
Args:
batch (Tensor): The batch vector.
Denotes which node pairs to predict.
x (Tensor): Input node features
edge_index (torch.Tensor, SparseTensor): The edge indices.
Either in COO or SparseTensor format
ppr_matrix (Tensor): PPR matrix
"""
batch = batch.to(x.device)
X_node = self.propagate(x, edge_index)
x_i, x_j = X_node[batch[0]], X_node[batch[1]]
elementwise_edge_feats = self.elementwise_lin(x_i * x_j)
# Ensure in sparse format
# Need as native torch.sparse for later computations
# (necessary operations are not supported by PyG SparseTensor)
if not edge_index.is_sparse:
num_nodes = ppr_matrix.size(1)
vals = torch.ones(len(edge_index[0]), device=edge_index.device)
edge_index = torch.sparse_coo_tensor(edge_index, vals,
[num_nodes, num_nodes])
# Checks if SparseTensor, if so the convert
if is_sparse(edge_index) and not edge_index.is_sparse:
edge_index = edge_index.to_torch_sparse_coo_tensor()
# Ensure {0, 1}
edge_index = edge_index.coalesce().bool().int()
pairwise_feats = self.calc_pairwise(batch, X_node, edge_index,
ppr_matrix)
combined_feats = torch.cat((elementwise_edge_feats, pairwise_feats),
dim=-1)
logits = self.score_func(combined_feats)
return logits
def propagate(self, x: Tensor, adj: Adj) -> Tensor:
"""Propagate via GNN.
Args:
x (Tensor): Node features
adj (torch.Tensor, SparseTensor): Adjacency matrix
"""
x = F.dropout(x, p=self.gnn_drop, training=self.training)
X_node = self.gnn(x, adj)
X_node = self.gnn_norm(X_node)
return X_node
def calc_pairwise(self, batch: Tensor, X_node: Tensor, adj_mask: Tensor,
ppr_matrix: Tensor) -> Tensor:
r"""Calculate the pairwise features for the node pairs.
Args:
batch (Tensor): The batch vector.
Denotes which node pairs to predict.
X_node (Tensor): Node representations
adj_mask (Tensor): Mask of adjacency matrix used for computing the
different node types.
ppr_matrix (Tensor): PPR matrix
"""
k_i, k_j = X_node[batch[0]], X_node[batch[1]]
pairwise_feats = torch.cat((k_i, k_j), dim=-1)
cn_info, onehop_info, non1hop_info = self.compute_node_mask(
batch, adj_mask, ppr_matrix)
all_mask = cn_info[0]
if onehop_info is not None:
all_mask = torch.cat((all_mask, onehop_info[0]), dim=-1)
if non1hop_info is not None:
all_mask = torch.cat((all_mask, non1hop_info[0]), dim=-1)
pes = self.get_pos_encodings(cn_info[1:], onehop_info[1:],
non1hop_info[1:])
for lay in range(len(self.att_layers)):
pairwise_feats = self.att_layers[lay](all_mask, pairwise_feats,
X_node, pes)
num_cns, num_1hop, num_non1hop, num_neigh = self.get_structure_cnts(
batch, cn_info, onehop_info, non1hop_info)
pairwise_feats = torch.cat(
(pairwise_feats, num_cns, num_1hop, num_non1hop, num_neigh),
dim=-1)
pairwise_feats = self.pairwise_lin(pairwise_feats)
return pairwise_feats
def get_pos_encodings(
self, cn_ppr: Tuple[Tensor, Tensor],
onehop_ppr: Optional[Tuple[Tensor, Tensor]] = None,
non1hop_ppr: Optional[Tuple[Tensor, Tensor]] = None) -> Tensor:
r"""Calculate the PPR-based relative positional encodings.
Due to thresholds, sometimes we don't have 1-hop or >1-hop nodes.
In those cases, the value of onehop_ppr and/or non1hop_ppr should
be `None`.
Args:
cn_ppr (tuple, optional): PPR scores of CNs.
onehop_ppr (tuple, optional): PPR scores of 1-Hop.
(default: :obj:`None`)
non1hop_ppr (tuple, optional): PPR scores of >1-Hop.
(default: :obj:`None`)
"""
cn_a = self.ppr_encoder_cn(torch.stack((cn_ppr[0], cn_ppr[1])).t())
cn_b = self.ppr_encoder_cn(torch.stack((cn_ppr[1], cn_ppr[0])).t())
cn_pe = cn_a + cn_b
if onehop_ppr is None:
return cn_pe
onehop_a = self.ppr_encoder_onehop(
torch.stack((onehop_ppr[0], onehop_ppr[1])).t())
onehop_b = self.ppr_encoder_onehop(
torch.stack((onehop_ppr[1], onehop_ppr[0])).t())
onehop_pe = onehop_a + onehop_b
if non1hop_ppr is None:
return torch.cat((cn_pe, onehop_pe), dim=0)
non1hop_a = self.ppr_encoder_non1hop(
torch.stack((non1hop_ppr[0], non1hop_ppr[1])).t())
non1hop_b = self.ppr_encoder_non1hop(
torch.stack((non1hop_ppr[1], non1hop_ppr[0])).t())
non1hop_pe = non1hop_a + non1hop_b
return torch.cat((cn_pe, onehop_pe, non1hop_pe), dim=0)
def compute_node_mask(
self, batch: Tensor, adj: Tensor, ppr_matrix: Tensor
) -> Tuple[Tuple, Optional[Tuple], Optional[Tuple]]:
r"""Get mask based on type of node.
When mask_type is not "cn", also return the ppr vals for both
the source and target.
Args:
batch (Tensor): The batch vector.
Denotes which node pairs to predict.
adj (SparseTensor): Adjacency matrix
ppr_matrix (Tensor): PPR matrix
"""
src_adj = torch.index_select(adj, 0, batch[0])
tgt_adj = torch.index_select(adj, 0, batch[1])
if self.mask == "cn":
# 1 when CN, 0 otherwise
pair_adj = src_adj * tgt_adj
else:
# Equals: {0: ">1-Hop", 1: "1-Hop (Non-CN)", 2: "CN"}
pair_adj = src_adj + tgt_adj
pair_ix, node_type, src_ppr, tgt_ppr = self.get_ppr_vals(
batch, pair_adj, ppr_matrix)
cn_filt_cond = (src_ppr >= self.thresh_cn) & (tgt_ppr
>= self.thresh_cn)
onehop_filt_cond = (src_ppr >= self.thresh_1hop) & (
tgt_ppr >= self.thresh_1hop)
if self.mask != "cn":
filt_cond = torch.where(node_type == 1, onehop_filt_cond,
cn_filt_cond)
else:
filt_cond = torch.where(node_type == 0, onehop_filt_cond,
cn_filt_cond)
pair_ix, node_type = pair_ix[:, filt_cond], node_type[filt_cond]
src_ppr, tgt_ppr = src_ppr[filt_cond], tgt_ppr[filt_cond]
# >1-Hop mask is gotten separately
if self.mask == "all":
non1hop_ix, non1hop_sppr, non1hop_tppr = self.get_non_1hop_ppr(
batch, adj, ppr_matrix)
# Dropout
if self.training and self.trans_drop > 0:
pair_ix, src_ppr, tgt_ppr, node_type = self.drop_pairwise(
pair_ix, src_ppr, tgt_ppr, node_type)
if self.mask == "all":
non1hop_ix, non1hop_sppr, non1hop_tppr, _ = self.drop_pairwise(
non1hop_ix, non1hop_sppr, non1hop_tppr)
# Separate out CN and 1-Hop
if self.mask != "cn":
cn_ind = node_type == 2
cn_ix = pair_ix[:, cn_ind]
cn_src_ppr = src_ppr[cn_ind]
cn_tgt_ppr = tgt_ppr[cn_ind]
one_hop_ind = node_type == 1
onehop_ix = pair_ix[:, one_hop_ind]
onehop_src_ppr = src_ppr[one_hop_ind]
onehop_tgt_ppr = tgt_ppr[one_hop_ind]
if self.mask == "cn":
return (pair_ix, src_ppr, tgt_ppr), None, None
elif self.mask == "1-hop":
return (cn_ix, cn_src_ppr, cn_tgt_ppr), (onehop_ix, onehop_src_ppr,
onehop_tgt_ppr), None
else:
return (cn_ix, cn_src_ppr,
cn_tgt_ppr), (onehop_ix, onehop_src_ppr,
onehop_tgt_ppr), (non1hop_ix, non1hop_sppr,
non1hop_tppr)
def get_ppr_vals(
self, batch: Tensor, pair_diff_adj: Tensor,
ppr_matrix: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
r"""Get the src and tgt ppr vals.
Returns the: link the node belongs to, type of node
(e.g., CN), PPR relative to src, PPR relative to tgt.
Args:
batch (Tensor): The batch vector.
Denotes which node pairs to predict.
pair_diff_adj (SparseTensor): Combination of rows in
adjacency for src and tgt nodes (e.g., X1 + X2)
ppr_matrix (Tensor): PPR matrix
"""
# Additional terms for also choosing scores when ppr=0
# Multiplication removes any values for nodes not in batch
# Addition then adds offset to ensure we select when ppr=0
# All selected scores are +1 higher than their true val
src_ppr_adj = torch.index_select(
ppr_matrix, 0, batch[0]) * pair_diff_adj + pair_diff_adj
tgt_ppr_adj = torch.index_select(
ppr_matrix, 0, batch[1]) * pair_diff_adj + pair_diff_adj
# Can now convert ppr scores to dense
ppr_ix = src_ppr_adj.coalesce().indices()
src_ppr = src_ppr_adj.coalesce().values()
tgt_ppr = tgt_ppr_adj.coalesce().values()
# TODO: Needed due to a bug in recent torch versions
# see here for more - https://github.com/pytorch/pytorch/issues/114529
# note that if one is 0 so is the other
zero_vals = (src_ppr != 0)
src_ppr = src_ppr[zero_vals]
tgt_ppr = tgt_ppr[tgt_ppr != 0]
ppr_ix = ppr_ix[:, zero_vals]
pair_diff_adj = pair_diff_adj.coalesce().values()
node_type = pair_diff_adj[src_ppr != 0]
# Remove additional +1 from each ppr val
src_ppr = (src_ppr - node_type) / node_type
tgt_ppr = (tgt_ppr - node_type) / node_type
return ppr_ix, node_type, src_ppr, tgt_ppr
def drop_pairwise(
self,
pair_ix: Tensor,
src_ppr: Optional[Tensor] = None,
tgt_ppr: Optional[Tensor] = None,
node_indicator: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
r"""Perform dropout on pairwise information
by randomly dropping a percentage of nodes.
Done before performing attention for efficiency
Args:
pair_ix (Tensor): Link node belongs to
src_ppr (Tensor, optional): PPR relative to src
(default: :obj:`None`)
tgt_ppr (Tensor, optional): PPR relative to tgt
(default: :obj:`None`)
node_indicator (Tensor, optional): Type of node (e.g., CN)
(default: :obj:`None`)
"""
num_indices = math.ceil(pair_ix.size(1) * (1 - self.trans_drop))
indices = torch.randperm(pair_ix.size(1))[:num_indices]
pair_ix = pair_ix[:, indices]
if src_ppr is not None:
src_ppr = src_ppr[indices]
if tgt_ppr is not None:
tgt_ppr = tgt_ppr[indices]
if node_indicator is not None:
node_indicator = node_indicator[indices]
return pair_ix, src_ppr, tgt_ppr, node_indicator
def get_structure_cnts(
self,
batch: Tensor,
cn_info: Tuple[Tensor, Tensor],
onehop_info: Tuple[Tensor, Tensor],
non1hop_info: Optional[Tuple[Tensor, Tensor]],
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Counts for CNs, 1-Hop, and >1-Hop that satisfy PPR threshold.
Also include total # of neighbors
Args:
batch (Tensor): The batch vector.
Denotes which node pairs to predict.
cn_info (tuple): Information of CN nodes
Contains (ID of node, src ppr, tgt ppr)
onehop_info (tuple): Information of 1-Hop nodes.
Contains (ID of node, src ppr, tgt ppr)
non1hop_info (tuple): Information of >1-Hop nodes.
Contains (ID of node, src ppr, tgt ppr)
"""
num_cns = self.get_num_ppr_thresh(batch, cn_info[0], cn_info[1],
cn_info[2], self.thresh_cn)
num_1hop = self.get_num_ppr_thresh(batch, onehop_info[0],
onehop_info[1], onehop_info[2],
self.thresh_1hop)
# TOTAL num of 1-hop neighbors union
num_ppr_ones = self.get_num_ppr_thresh(batch, onehop_info[0],
onehop_info[1], onehop_info[2],
thresh=0)
num_neighbors = num_cns + num_ppr_ones
# Process for >1-hop is different which is why we use get_count below
if non1hop_info is None:
return num_cns, num_1hop, 0, num_neighbors
else:
num_non1hop = self.get_count(non1hop_info[0], batch)
return num_cns, num_1hop, num_non1hop, num_neighbors
def get_num_ppr_thresh(self, batch: Tensor, node_mask: Tensor,
src_ppr: Tensor, tgt_ppr: Tensor,
thresh: float) -> Tensor:
"""Get # of nodes `v` where `ppr(a, v) >= eta` & `ppr(b, v) >= eta`.
Args:
batch (Tensor): The batch vector.
Denotes which node pairs to predict.
node_mask (Tensor): IDs of nodes
src_ppr (Tensor): PPR relative to src node
tgt_ppr (Tensor): PPR relative to tgt node
thresh (float): PPR threshold for nodes (`eta`)
"""
weight = torch.ones(node_mask.size(1), device=node_mask.device)
ppr_above_thresh = (src_ppr >= thresh) & (tgt_ppr >= thresh)
num_ppr = scatter(ppr_above_thresh.float() * weight,
node_mask[0].long(), dim=0, dim_size=batch.size(1),
reduce="sum")
num_ppr = num_ppr.unsqueeze(-1)
return num_ppr
def get_count(
self,
node_mask: Tensor,
batch: Tensor,
) -> Tensor:
"""# of nodes for each sample in batch.
They node have already filtered by PPR beforehand
Args:
node_mask (Tensor): IDs of nodes
batch (Tensor): The batch vector.
Denotes which node pairs to predict.
"""
weight = torch.ones(node_mask.size(1), device=node_mask.device)
num_nodes = scatter(weight, node_mask[0].long(), dim=0,
dim_size=batch.size(1), reduce="sum")
num_nodes = num_nodes.unsqueeze(-1)
return num_nodes
def get_non_1hop_ppr(self, batch: Tensor, adj: Tensor,
ppr_matrix: Tensor) -> Tensor:
r"""Get PPR scores for non-1hop nodes.
Args:
batch (Tensor): Links in batch
adj (Tensor): Adjacency matrix
ppr_matrix (Tensor): Sparse PPR matrix
"""
# NOTE: Use original adj (one pass in forward() removes links in batch)
# Done since removing them converts src/tgt nodes to >1-hop nodes.
# Therefore removing CN and 1-hop will also remove the batch links.
# During training we add back in the links in the batch
# (we're removed from adjacency before being passed to model)
# Done since otherwise they will be mistakenly seen as >1-Hop nodes
# Instead they're 1-Hop, and get ignored accordingly
# Ignored during eval since we know the links aren't in the adj
adj2 = adj
if self.training:
n = adj.size(0)
batch_flip = torch.cat(
(batch, torch.flip(batch, (0, )).to(batch.device)), dim=-1)
batch_ones = torch.ones_like(batch_flip[0], device=batch.device)
adj_edges = torch.sparse_coo_tensor(batch_flip, batch_ones, [n, n],
device=batch.device)
adj_edges = adj_edges
adj2 = (adj + adj_edges).coalesce().bool().int()
src_adj = torch.index_select(adj2, 0, batch[0])
tgt_adj = torch.index_select(adj2, 0, batch[1])
src_ppr = torch.index_select(ppr_matrix, 0, batch[0])
tgt_ppr = torch.index_select(ppr_matrix, 0, batch[1])
# Remove CN scores
src_ppr = src_ppr - src_ppr * (src_adj * tgt_adj)
tgt_ppr = tgt_ppr - tgt_ppr * (src_adj * tgt_adj)
# Also need to remove CN entries in Adj
# Otherwise they leak into next computation
src_adj = src_adj - src_adj * (src_adj * tgt_adj)
tgt_adj = tgt_adj - tgt_adj * (src_adj * tgt_adj)
# Remove 1-Hop scores
src_ppr = src_ppr - src_ppr * (src_adj + tgt_adj)
tgt_ppr = tgt_ppr - tgt_ppr * (src_adj + tgt_adj)
# Make sure we include both when we convert to dense so indices align
# Do so by adding 1 to each based on the other
src_ppr_add = src_ppr + torch.sign(tgt_ppr)
tgt_ppr_add = tgt_ppr + torch.sign(src_ppr)
src_ix = src_ppr_add.coalesce().indices()
src_vals = src_ppr_add.coalesce().values()
tgt_vals = tgt_ppr_add.coalesce().values()
# Now we can remove value which is just 1
# Technically creates -1 scores for ppr scores that were 0
# Doesn't matter as they'll be filtered out by condition later
src_vals = src_vals - 1
tgt_vals = tgt_vals - 1
ppr_condition = (src_vals >= self.thresh_non1hop) & (
tgt_vals >= self.thresh_non1hop)
src_ix, src_vals, tgt_vals = src_ix[:, ppr_condition], src_vals[
ppr_condition], tgt_vals[ppr_condition]
return src_ix, src_vals, tgt_vals
def calc_sparse_ppr(self, edge_index: Tensor, num_nodes: int,
alpha: float = 0.15, eps: float = 5e-5) -> Tensor:
r"""Calculate the PPR of the graph in sparse format.
Args:
edge_index: The edge indices
num_nodes: Number of nodes
alpha (float, optional): The alpha value of the PageRank algorithm.
(default: :obj:`0.15`)
eps (float, optional): Threshold for stopping the PPR calculation
(default: :obj:`5e-5`)
"""
ei, ei_w = get_ppr(edge_index.cpu(), alpha=alpha, eps=eps,
num_nodes=num_nodes)
ppr_matrix = torch.sparse_coo_tensor(ei, ei_w, [num_nodes, num_nodes])
return ppr_matrix
class LPAttLayer(MessagePassing):
r"""Attention Layer for pairwise interaction module.
Args:
in_channels (int): Size of input dimension
out_channels (int): Size of output dimension
node_dim (int): Dimension of nodes being aggregated
num_heads (int): Number of heads to use in MHA
dropout (float): Dropout on attention values
concat (bool, optional): Whether to concat attention
heads. Otherwise sum (default: :obj:`True`)
"""
_alpha: OptTensor
def __init__(
self,
in_channels: int,
out_channels: int,
node_dim: int,
num_heads: int,
dropout: float,
concat: bool = True,
**kwargs,
):
super().__init__(node_dim=0, flow="target_to_source", **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = num_heads
self.concat = concat
self.dropout = dropout
self.negative_slope = 0.2 # LeakyRelu
out_dim = 2
if node_dim is None:
node_dim = in_channels * out_dim
else:
node_dim = node_dim * out_dim
self.lin_l = Linear(in_channels, self.heads * out_channels,
weight_initializer='glorot')
self.lin_r = Linear(node_dim, self.heads * out_channels,
weight_initializer='glorot')
att_out = out_channels
self.att = Parameter(Tensor(1, self.heads, att_out))
if concat:
self.bias = Parameter(Tensor(self.heads * out_channels))
else:
self.bias = Parameter(Tensor(out_channels))
self._alpha = None
self.dropout = dropout
self.post_att_norm = nn.LayerNorm(out_channels)
self.reset_parameters()
def __repr__(self) -> str:
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}, heads={self.heads})')
def reset_parameters(self):
self.lin_l.reset_parameters()
self.lin_r.reset_parameters()
self.post_att_norm.reset_parameters()
glorot(self.att)
zeros(self.bias)
def forward(
self,
edge_index: Tensor,
edge_feats: Tensor,
node_feats: Tensor,
ppr_rpes: Tensor,
) -> Tensor:
"""Runs the forward pass of the module.
Args:
edge_index (Tensor): The edge indices.
edge_feats (Tensor): Concatenated representations
of src and target nodes for each link
node_feats (Tensor): Representations for individual
nodes
ppr_rpes (Tensor): Relative PEs for each node
"""
out = self.propagate(edge_index, x=(edge_feats, node_feats),
ppr_rpes=ppr_rpes, size=None)
alpha = self._alpha
assert alpha is not None
self._alpha = None
if self.concat:
out = out.view(-1, self.heads * self.out_channels)
else:
out = out.mean(dim=1)
if self.bias is not None:
out = out + self.bias
out = self.post_att_norm(out)
out = F.dropout(out, p=self.dropout, training=self.training)
return out
def message(self, x_i: Tensor, x_j: Tensor, ppr_rpes: Tensor,
index: Tensor, ptr: Tensor, size_i: Optional[int]) -> Tensor:
H, C = self.heads, self.out_channels
x_j = torch.cat((x_j, ppr_rpes), dim=-1)
x_j = self.lin_r(x_j).view(-1, H, C)
# e=(a, b) attending to v
e1, e2 = x_i.chunk(2, dim=-1)
e1 = self.lin_l(e1).view(-1, H, C)
e2 = self.lin_l(e2).view(-1, H, C)
x = x_j * (e1 + e2)
x = F.leaky_relu(x, self.negative_slope)
alpha = (x * self.att).sum(dim=-1)
alpha = softmax(alpha, index, ptr, size_i)
self._alpha = alpha
return x_j * alpha.unsqueeze(-1)
class MLP(nn.Module):
r"""L Layer MLP."""
def __init__(self, in_channels: int, hid_channels: int, out_channels: int,
num_layers: int = 2, drop: int = 0, norm: str = "layer"):
super().__init__()
self.dropout = drop
if norm == "batch":
self.norm = nn.BatchNorm1d(hid_channels)
elif norm == "layer":
self.norm = nn.LayerNorm(hid_channels)
else:
self.norm = None
self.linears = torch.nn.ModuleList()
if num_layers == 1:
self.linears.append(nn.Linear(in_channels, out_channels))
else:
self.linears.append(nn.Linear(in_channels, hid_channels))
for _ in range(num_layers - 2):
self.linears.append(nn.Linear(hid_channels, hid_channels))
self.linears.append(nn.Linear(hid_channels, out_channels))
def reset_parameters(self):
for lin in self.linears:
lin.reset_parameters()
if self.norm is not None:
self.norm.reset_parameters()
def forward(self, x: Tensor) -> Tensor:
for lin in self.linears[:-1]:
x = lin(x)
x = self.norm(x) if self.norm is not None else x
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.linears[-1](x)
return x.squeeze(-1)
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/nn/models/lpformer.py",
"license": "MIT License",
"lines": 645,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:test/test_onnx.py | import os
import tempfile
import warnings
from typing import Any
from unittest.mock import patch
import pytest
import torch
from torch_geometric import is_in_onnx_export, safe_onnx_export
# Global mock to prevent ANY real ONNX calls in tests
# This ensures no deprecation warnings or real ONNX issues
pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning")
class SimpleModel(torch.nn.Module):
"""Simple model for testing ONNX export."""
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(4, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.linear(x)
def test_is_in_onnx_export() -> None:
"""Test is_in_onnx_export function."""
assert not is_in_onnx_export()
def test_safe_onnx_export_ci_resilient() -> None:
"""Test safe_onnx_export handles CI environment issues gracefully."""
model = SimpleModel()
x = torch.randn(3, 4)
# Use mocking to prevent real ONNX calls and deprecation warnings
with patch('torch.onnx.export', return_value=None) as mock_export:
with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as f:
try:
# Test with skip_on_error=True - should never fail
result = safe_onnx_export(model, (x, ), f.name,
skip_on_error=True)
# Should always succeed with mocking
assert result is True
# Verify the mock was called correctly
mock_export.assert_called_once()
call_args = mock_export.call_args[0]
assert call_args[0] is model
assert isinstance(call_args[1], tuple)
assert call_args[2] == f.name
finally:
if os.path.exists(f.name):
try:
os.unlink(f.name)
except (PermissionError, OSError):
pass # Ignore file lock issues
def test_safe_onnx_export_success() -> None:
"""Test successful ONNX export with pure mocking."""
model = SimpleModel()
x = torch.randn(3, 4)
# Use comprehensive mocking to avoid any real ONNX calls
with patch('torch.onnx.export', return_value=None) as mock_export:
with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as f:
try:
# Test with tuple args - should succeed with mock
result = safe_onnx_export(model, (x, ), f.name)
assert result is True
# Verify torch.onnx.export was called with correct args
mock_export.assert_called()
call_args = mock_export.call_args[0]
assert call_args[0] is model # model
assert isinstance(call_args[1], tuple) # args as tuple
assert call_args[2] == f.name # file path
# Reset mock for second test
mock_export.reset_mock()
# Test with single tensor (should be converted to tuple)
result = safe_onnx_export(model, x, f.name)
assert result is True
# Verify single tensor was converted to tuple
call_args = mock_export.call_args[0]
assert isinstance(call_args[1], tuple)
finally:
if os.path.exists(f.name):
try:
try:
os.unlink(f.name)
except (PermissionError, OSError):
pass
except (PermissionError, OSError):
pass
def test_safe_onnx_export_with_skip_on_error() -> None:
"""Test safe_onnx_export with skip_on_error=True."""
model = SimpleModel()
x = torch.randn(3, 4)
# Mock torch.onnx.export to raise SerdeError
with patch('torch.onnx.export') as mock_export:
mock_export.side_effect = Exception(
"onnx_ir.serde.SerdeError: allowzero")
with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as f:
try:
# Should return False instead of raising
result = safe_onnx_export(model, (x, ), f.name,
skip_on_error=True)
assert result is False
finally:
if os.path.exists(f.name):
try:
os.unlink(f.name)
except (PermissionError, OSError):
pass
def test_serde_error_patterns() -> None:
"""Test detection of various SerdeError patterns."""
model = SimpleModel()
x = torch.randn(3, 4)
error_patterns = [
"onnx_ir.serde.SerdeError: allowzero attribute",
"ValueError: Value out of range: 1", "serialize_model_into failed",
"serialize_attribute_into failed"
]
for error_msg in error_patterns:
# Use multiple patch targets to ensure comprehensive mocking
with patch('torch.onnx.export') as mock_export, \
patch('torch_geometric._onnx.torch.onnx.export') as mock_export2:
mock_export.side_effect = Exception(error_msg)
mock_export2.side_effect = Exception(error_msg)
with tempfile.NamedTemporaryFile(suffix='.onnx',
delete=False) as f:
try:
result = safe_onnx_export(model, (x, ), f.name,
skip_on_error=True)
assert result is False
finally:
if os.path.exists(f.name):
try:
try:
os.unlink(f.name)
except (PermissionError, OSError):
pass
except (PermissionError, OSError):
pass # Ignore file lock issues
def test_non_serde_error_reraise() -> None:
"""Test that non-SerdeError exceptions are re-raised."""
model = SimpleModel()
x = torch.randn(3, 4)
# Use comprehensive mocking to prevent real ONNX calls
with patch('torch.onnx.export') as mock_export:
mock_export.side_effect = ValueError("Some other error")
with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as f:
try:
with pytest.raises(ValueError, match="Some other error"):
safe_onnx_export(model, (x, ), f.name)
finally:
if os.path.exists(f.name):
try:
os.unlink(f.name)
except (PermissionError, OSError):
pass
def test_dynamo_fallback() -> None:
"""Test dynamo=False fallback strategy."""
model = SimpleModel()
x = torch.randn(3, 4)
call_count = 0
def mock_export_side_effect(*_args: Any, **kwargs: Any) -> None:
nonlocal call_count
call_count += 1
if call_count == 1:
# First call fails
raise Exception("onnx_ir.serde.SerdeError: allowzero")
elif call_count == 2 and not kwargs.get('dynamo', True):
# Second call succeeds with dynamo=False
return None
else:
raise Exception("Unexpected call")
with patch('torch.onnx.export', side_effect=mock_export_side_effect):
with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as f:
try:
result = safe_onnx_export(model, (x, ), f.name, dynamo=True)
assert result is True
assert call_count == 2
finally:
if os.path.exists(f.name):
try:
os.unlink(f.name)
except (PermissionError, OSError):
pass
def test_opset_fallback() -> None:
"""Test opset version fallback strategy."""
model = SimpleModel()
x = torch.randn(3, 4)
call_count = 0
def mock_export_side_effect(*_args: Any, **kwargs: Any) -> None:
nonlocal call_count
call_count += 1
# Fail until we get to opset_version=17
if kwargs.get('opset_version') == 17:
# This call succeeds
return None
else:
# All other calls fail
raise Exception("onnx_ir.serde.SerdeError: allowzero")
with patch('torch.onnx.export', side_effect=mock_export_side_effect):
with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as f:
try:
result = safe_onnx_export(model, (x, ), f.name,
opset_version=18)
# Should succeed when opset_version=17 is tried
assert result is True
finally:
if os.path.exists(f.name):
try:
try:
os.unlink(f.name)
except (PermissionError, OSError):
pass
except (PermissionError, OSError):
pass
def test_all_strategies_fail() -> None:
"""Test when all workaround strategies fail."""
model = SimpleModel()
x = torch.randn(3, 4)
with patch('torch.onnx.export') as mock_export:
mock_export.side_effect = Exception(
"onnx_ir.serde.SerdeError: allowzero")
with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as f:
try:
# Should raise RuntimeError when skip_on_error=False
with pytest.raises(RuntimeError,
match="Failed to export model to ONNX"):
safe_onnx_export(model, (x, ), f.name, skip_on_error=False)
# Should return False when skip_on_error=True
result = safe_onnx_export(model, (x, ), f.name,
skip_on_error=True)
assert result is False
finally:
if os.path.exists(f.name):
try:
os.unlink(f.name)
except (PermissionError, OSError):
pass
def test_pytest_environment_detection() -> None:
"""Test pytest environment detection for better error messages."""
model = SimpleModel()
x = torch.randn(3, 4)
with patch('torch.onnx.export') as mock_export:
mock_export.side_effect = Exception(
"onnx_ir.serde.SerdeError: allowzero")
# Set pytest environment variable
with patch.dict(os.environ, {'PYTEST_CURRENT_TEST': 'test_something'}):
with tempfile.NamedTemporaryFile(suffix='.onnx',
delete=False) as f:
try:
with pytest.raises(RuntimeError) as exc_info:
safe_onnx_export(model, (x, ), f.name,
skip_on_error=False)
# Should contain pytest-specific guidance
assert "pytest environments" in str(exc_info.value)
assert "torch.jit.script()" in str(exc_info.value)
finally:
if os.path.exists(f.name):
try:
os.unlink(f.name)
except (PermissionError, OSError):
pass
def test_warnings_emitted() -> None:
"""Test that appropriate warnings are emitted during workarounds."""
model = SimpleModel()
x = torch.randn(3, 4)
call_count = 0
def mock_export_side_effect(*_args: Any, **_kwargs: Any) -> None:
nonlocal call_count
call_count += 1
if call_count == 1:
raise Exception("onnx_ir.serde.SerdeError: allowzero")
elif call_count == 2:
return None # Success on dynamo fallback
else:
raise Exception("Unexpected call")
with patch('torch.onnx.export', side_effect=mock_export_side_effect):
with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as f:
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
result = safe_onnx_export(model, (x, ), f.name,
dynamo=True)
assert result is True
assert len(w) >= 2 # Initial error + dynamo fallback
assert any("allowzero boolean attribute bug" in str(
warning.message) for warning in w)
assert any(
"dynamo=False as workaround" in str(warning.message)
for warning in w)
finally:
if os.path.exists(f.name):
try:
os.unlink(f.name)
except (PermissionError, OSError):
pass
@pytest.mark.parametrize(
"args_input",
[
torch.randn(3, 4), # Single tensor
(torch.randn(3, 4), ), # Tuple with one tensor
(torch.randn(3, 4), torch.randn(3, 2)), # Tuple with multiple tensors
])
def test_args_conversion(args_input: Any) -> None:
"""Test that args are properly converted to tuple format."""
model = SimpleModel()
with patch('torch.onnx.export') as mock_export:
mock_export.return_value = None
with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as f:
try:
result = safe_onnx_export(model, args_input, f.name)
assert result is True
# Check that torch.onnx.export was called with tuple args
mock_export.assert_called_once()
call_args = mock_export.call_args[0]
assert isinstance(call_args[1], tuple) # args should be tuple
finally:
if os.path.exists(f.name):
try:
os.unlink(f.name)
except (PermissionError, OSError):
pass
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "test/test_onnx.py",
"license": "MIT License",
"lines": 313,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:test/nn/attention/test_polynormer_attention.py | import torch
from torch_geometric.nn.attention import PolynormerAttention
def test_performer_attention():
x = torch.randn(1, 4, 16)
mask = torch.ones([1, 4], dtype=torch.bool)
attn = PolynormerAttention(channels=16, heads=4)
out = attn(x, mask)
assert out.shape == (1, 4, 256)
assert str(attn) == 'PolynormerAttention(heads=4, head_channels=64)'
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "test/nn/attention/test_polynormer_attention.py",
"license": "MIT License",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:test/nn/models/test_polynormer.py | import pytest
import torch
from torch_geometric.nn.models import Polynormer
@pytest.mark.parametrize('local_attn', [True, False])
@pytest.mark.parametrize('qk_shared', [True, False])
@pytest.mark.parametrize('pre_ln', [True, False])
@pytest.mark.parametrize('post_bn', [True, False])
def test_polynormer(local_attn, qk_shared, pre_ln, post_bn):
x = torch.randn(10, 16)
edge_index = torch.tensor([
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 0, 6, 7, 8, 9, 5],
])
batch = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
model = Polynormer(
in_channels=16,
hidden_channels=128,
out_channels=40,
qk_shared=qk_shared,
pre_ln=pre_ln,
post_bn=post_bn,
local_attn=local_attn,
)
out = model(x, edge_index, batch)
assert out.size() == (10, 40)
model._global = True
out = model(x, edge_index, batch)
assert out.size() == (10, 40)
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "test/nn/models/test_polynormer.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:torch_geometric/nn/attention/polynormer.py | from typing import Optional
import torch
import torch.nn.functional as F
from torch import Tensor
class PolynormerAttention(torch.nn.Module):
r"""The polynomial-expressive attention mechanism from the
`"Polynormer: Polynomial-Expressive Graph Transformer in Linear Time"
<https://arxiv.org/abs/2403.01232>`_ paper.
Args:
channels (int): Size of each input sample.
heads (int, optional): Number of parallel attention heads.
head_channels (int, optional): Size of each attention head.
(default: :obj:`64.`)
beta (float, optional): Polynormer beta initialization.
(default: :obj:`0.9`)
qkv_bias (bool, optional): If specified, add bias to query, key
and value in the self attention. (default: :obj:`False`)
qk_shared (bool optional): Whether weight of query and key are shared.
(default: :obj:`True`)
dropout (float, optional): Dropout probability of the final
attention output. (default: :obj:`0.0`)
"""
def __init__(
self,
channels: int,
heads: int,
head_channels: int = 64,
beta: float = 0.9,
qkv_bias: bool = False,
qk_shared: bool = True,
dropout: float = 0.0,
) -> None:
super().__init__()
self.head_channels = head_channels
self.heads = heads
self.beta = beta
self.qk_shared = qk_shared
inner_channels = heads * head_channels
self.h_lins = torch.nn.Linear(channels, inner_channels)
if not self.qk_shared:
self.q = torch.nn.Linear(channels, inner_channels, bias=qkv_bias)
self.k = torch.nn.Linear(channels, inner_channels, bias=qkv_bias)
self.v = torch.nn.Linear(channels, inner_channels, bias=qkv_bias)
self.lns = torch.nn.LayerNorm(inner_channels)
self.lin_out = torch.nn.Linear(inner_channels, inner_channels)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, x: Tensor, mask: Optional[Tensor] = None) -> Tensor:
r"""Forward pass.
Args:
x (torch.Tensor): Node feature tensor
:math:`\mathbf{X} \in \mathbb{R}^{B \times N \times F}`, with
batch-size :math:`B`, (maximum) number of nodes :math:`N` for
each graph, and feature dimension :math:`F`.
mask (torch.Tensor, optional): Mask matrix
:math:`\mathbf{M} \in {\{ 0, 1 \}}^{B \times N}` indicating
the valid nodes for each graph. (default: :obj:`None`)
"""
B, N, *_ = x.shape
h = self.h_lins(x)
k = self.k(x).sigmoid().view(B, N, self.head_channels, self.heads)
if self.qk_shared:
q = k
else:
q = F.sigmoid(self.q(x)).view(B, N, self.head_channels, self.heads)
v = self.v(x).view(B, N, self.head_channels, self.heads)
if mask is not None:
mask = mask[:, :, None, None]
v.masked_fill_(~mask, 0.)
# numerator
kv = torch.einsum('bndh, bnmh -> bdmh', k, v)
num = torch.einsum('bndh, bdmh -> bnmh', q, kv)
# denominator
k_sum = torch.einsum('bndh -> bdh', k)
den = torch.einsum('bndh, bdh -> bnh', q, k_sum).unsqueeze(2)
# linear global attention based on kernel trick
x = (num / (den + 1e-6)).reshape(B, N, -1)
x = self.lns(x) * (h + self.beta)
x = F.relu(self.lin_out(x))
x = self.dropout(x)
return x
def reset_parameters(self) -> None:
self.h_lins.reset_parameters()
if not self.qk_shared:
self.q.reset_parameters()
self.k.reset_parameters()
self.v.reset_parameters()
self.lns.reset_parameters()
self.lin_out.reset_parameters()
def __repr__(self) -> str:
return (f'{self.__class__.__name__}('
f'heads={self.heads}, '
f'head_channels={self.head_channels})')
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/nn/attention/polynormer.py",
"license": "MIT License",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
pyg-team/pytorch_geometric:torch_geometric/nn/models/polynormer.py | from typing import Optional
import torch
import torch.nn.functional as F
from torch import Tensor
from torch_geometric.nn import GATConv, GCNConv
from torch_geometric.nn.attention import PolynormerAttention
from torch_geometric.utils import to_dense_batch
class Polynormer(torch.nn.Module):
r"""The polynormer module from the
`"Polynormer: polynomial-expressive graph
transformer in linear time"
<https://arxiv.org/abs/2403.01232>`_ paper.
Args:
in_channels (int): Input channels.
hidden_channels (int): Hidden channels.
out_channels (int): Output channels.
local_layers (int): The number of local attention layers.
(default: :obj:`7`)
global_layers (int): The number of global attention layers.
(default: :obj:`2`)
in_dropout (float): Input dropout rate.
(default: :obj:`0.15`)
dropout (float): Dropout rate.
(default: :obj:`0.5`)
global_dropout (float): Global dropout rate.
(default: :obj:`0.5`)
heads (int): The number of heads.
(default: :obj:`1`)
beta (float): Aggregate type.
(default: :obj:`0.9`)
qk_shared (bool optional): Whether weight of query and key are shared.
(default: :obj:`True`)
pre_ln (bool): Pre layer normalization.
(default: :obj:`False`)
post_bn (bool): Post batch normalization.
(default: :obj:`True`)
local_attn (bool): Whether use local attention.
(default: :obj:`False`)
"""
def __init__(
self,
in_channels: int,
hidden_channels: int,
out_channels: int,
local_layers: int = 7,
global_layers: int = 2,
in_dropout: float = 0.15,
dropout: float = 0.5,
global_dropout: float = 0.5,
heads: int = 1,
beta: float = 0.9,
qk_shared: bool = False,
pre_ln: bool = False,
post_bn: bool = True,
local_attn: bool = False,
) -> None:
super().__init__()
self._global = False
self.in_drop = in_dropout
self.dropout = dropout
self.pre_ln = pre_ln
self.post_bn = post_bn
self.beta = beta
self.h_lins = torch.nn.ModuleList()
self.local_convs = torch.nn.ModuleList()
self.lins = torch.nn.ModuleList()
self.lns = torch.nn.ModuleList()
if self.pre_ln:
self.pre_lns = torch.nn.ModuleList()
if self.post_bn:
self.post_bns = torch.nn.ModuleList()
# first layer
inner_channels = heads * hidden_channels
self.h_lins.append(torch.nn.Linear(in_channels, inner_channels))
if local_attn:
self.local_convs.append(
GATConv(in_channels, hidden_channels, heads=heads, concat=True,
add_self_loops=False, bias=False))
else:
self.local_convs.append(
GCNConv(in_channels, inner_channels, cached=False,
normalize=True))
self.lins.append(torch.nn.Linear(in_channels, inner_channels))
self.lns.append(torch.nn.LayerNorm(inner_channels))
if self.pre_ln:
self.pre_lns.append(torch.nn.LayerNorm(in_channels))
if self.post_bn:
self.post_bns.append(torch.nn.BatchNorm1d(inner_channels))
# following layers
for _ in range(local_layers - 1):
self.h_lins.append(torch.nn.Linear(inner_channels, inner_channels))
if local_attn:
self.local_convs.append(
GATConv(inner_channels, hidden_channels, heads=heads,
concat=True, add_self_loops=False, bias=False))
else:
self.local_convs.append(
GCNConv(inner_channels, inner_channels, cached=False,
normalize=True))
self.lins.append(torch.nn.Linear(inner_channels, inner_channels))
self.lns.append(torch.nn.LayerNorm(inner_channels))
if self.pre_ln:
self.pre_lns.append(torch.nn.LayerNorm(heads *
hidden_channels))
if self.post_bn:
self.post_bns.append(torch.nn.BatchNorm1d(inner_channels))
self.lin_in = torch.nn.Linear(in_channels, inner_channels)
self.ln = torch.nn.LayerNorm(inner_channels)
self.global_attn = torch.nn.ModuleList()
for _ in range(global_layers):
self.global_attn.append(
PolynormerAttention(
channels=hidden_channels,
heads=heads,
head_channels=hidden_channels,
beta=beta,
dropout=global_dropout,
qk_shared=qk_shared,
))
self.pred_local = torch.nn.Linear(inner_channels, out_channels)
self.pred_global = torch.nn.Linear(inner_channels, out_channels)
self.reset_parameters()
def reset_parameters(self) -> None:
for local_conv in self.local_convs:
local_conv.reset_parameters()
for attn in self.global_attn:
attn.reset_parameters()
for lin in self.lins:
lin.reset_parameters()
for h_lin in self.h_lins:
h_lin.reset_parameters()
for ln in self.lns:
ln.reset_parameters()
if self.pre_ln:
for p_ln in self.pre_lns:
p_ln.reset_parameters()
if self.post_bn:
for p_bn in self.post_bns:
p_bn.reset_parameters()
self.lin_in.reset_parameters()
self.ln.reset_parameters()
self.pred_local.reset_parameters()
self.pred_global.reset_parameters()
def forward(
self,
x: Tensor,
edge_index: Tensor,
batch: Optional[Tensor],
) -> Tensor:
r"""Forward pass.
Args:
x (torch.Tensor): The input node features.
edge_index (torch.Tensor or SparseTensor): The edge indices.
batch (torch.Tensor, optional): The batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns
each element to a specific example.
"""
x = F.dropout(x, p=self.in_drop, training=self.training)
# equivariant local attention
x_local = 0
for i, local_conv in enumerate(self.local_convs):
if self.pre_ln:
x = self.pre_lns[i](x)
h = self.h_lins[i](x)
h = F.relu(h)
x = local_conv(x, edge_index) + self.lins[i](x)
if self.post_bn:
x = self.post_bns[i](x)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (1 - self.beta) * self.lns[i](h * x) + self.beta * x
x_local = x_local + x
# equivariant global attention
if self._global:
batch, indices = batch.sort()
rev_perm = torch.empty_like(indices)
rev_perm[indices] = torch.arange(len(indices),
device=indices.device)
x_local = self.ln(x_local[indices])
x_global, mask = to_dense_batch(x_local, batch)
for attn in self.global_attn:
x_global = attn(x_global, mask)
x = x_global[mask][rev_perm]
x = self.pred_global(x)
else:
x = self.pred_local(x_local)
return F.log_softmax(x, dim=-1)
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/nn/models/polynormer.py",
"license": "MIT License",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:examples/llm/protein_mpnn.py | """This example implements the ProteinMPNN model
(https://www.biorxiv.org/content/10.1101/2022.06.03.494563v1) using PyG.
"""
import argparse
import time
import numpy as np
import psutil
import torch
from torch_geometric import seed_everything
from torch_geometric.datasets import ProteinMPNNDataset
from torch_geometric.llm.models import ProteinMPNN
from torch_geometric.loader import DataLoader
def loss_smoothed(y, logits, mask, weight=0.1):
"""Negative log probabilities."""
y_onehot = torch.nn.functional.one_hot(y, 21).float()
# Label smoothing
y_onehot = y_onehot + weight / float(y_onehot.size(-1))
y_onehot = y_onehot / y_onehot.sum(-1, keepdim=True)
loss = -(y_onehot * logits).sum(-1)
loss_av = torch.sum(loss * mask) / 2000.0
return loss, loss_av
def loss_nll(y, logits, mask):
"""Negative log probabilities."""
criterion = torch.nn.NLLLoss(reduction='none')
loss = criterion(logits.contiguous().view(-1, logits.size(-1)),
y.contiguous().view(-1)).view(y.size())
y_argmaxed = torch.argmax(logits, -1) # [B, L]
true_false = (y == y_argmaxed).float()
loss_av = torch.sum(loss * mask) / torch.sum(mask)
return loss, loss_av, true_false
class NoamOpt:
"""Optim wrapper that implements rate."""
def __init__(self, model_size, factor, warmup, optimizer, step):
self.optimizer = optimizer
self._step = step
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
@property
def param_groups(self):
"""Return param_groups."""
return self.optimizer.param_groups
def step(self):
"""Update parameters and rate."""
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step=None):
"""Implement learning rate above."""
if step is None:
step = self._step
return self.factor * (self.model_size**(-0.5) *
min(step**(-0.5), step * self.warmup**(-1.5)))
def zero_grad(self):
self.optimizer.zero_grad()
def train(model, optimizer, data_loader, device, scaler):
model.train()
train_sum = 0.0
train_acc = 0.0
train_weights = 0.0
for batch in data_loader:
optimizer.zero_grad()
batch = batch.to(device)
mask_for_loss = batch.mask * batch.chain_mask_all
y = batch.chain_seq_label
if torch.cuda.is_available() and args.mixed_precision:
with torch.amp.autocast('cuda'):
logits = model(batch.x, batch.chain_seq_label, batch.mask,
batch.chain_mask_all, batch.residue_idx,
batch.chain_encoding_all, batch.batch)
_, loss = loss_smoothed(y, logits, mask_for_loss)
scaler.scale(loss).backward()
if args.gradient_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(),
args.gradient_norm)
scaler.step(optimizer)
scaler.update()
else:
logits = model(batch.x, batch.chain_seq_label, batch.mask,
batch.chain_mask_all, batch.residue_idx,
batch.chain_encoding_all, batch.batch)
_, loss = loss_smoothed(y, logits, mask_for_loss)
loss.backward()
if args.gradient_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(),
args.gradient_norm)
optimizer.step()
loss, _, true_false = loss_nll(y, logits, mask_for_loss)
train_sum += torch.sum(loss * mask_for_loss).cpu().data.numpy()
train_acc += torch.sum(true_false * mask_for_loss).cpu().data.numpy()
train_weights += torch.sum(mask_for_loss).cpu().data.numpy()
train_loss = train_sum / train_weights
train_accuracy = train_acc / train_weights
train_perplexity = np.exp(train_loss)
return train_perplexity, train_accuracy
@torch.no_grad()
def eval(model, data_loader, device):
model.eval()
valid_sum = 0.
valid_weights = 0.
valid_acc = 0.
for batch in data_loader:
batch = batch.to(device)
logits = model(batch.x, batch.chain_seq_label, batch.mask,
batch.chain_mask_all, batch.residue_idx,
batch.chain_encoding_all, batch.batch)
mask_for_loss = batch.mask * batch.chain_mask_all
y = batch.chain_seq_label
loss, _, true_false = loss_nll(y, logits, mask_for_loss)
valid_sum += torch.sum(loss * mask_for_loss).cpu().data.numpy()
valid_acc += torch.sum(true_false * mask_for_loss).cpu().data.numpy()
valid_weights += torch.sum(mask_for_loss).cpu().data.numpy()
valid_loss = valid_sum / valid_weights
valid_accuracy = valid_acc / valid_weights
valid_perplexity = np.exp(valid_loss)
return valid_perplexity, valid_accuracy
def main(args):
wall_clock_start = time.perf_counter()
seed_everything(123)
scaler = torch.amp.GradScaler()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if args.size == 'large' and psutil.virtual_memory().total < 64.1 * 1024**3:
print('Warning: may not have enough RAM to run this example.')
print('Consider upgrading RAM if an error occurs.')
print('Estimated RAM Needed: ~64.1GB.')
train_dataset = ProteinMPNNDataset(
root=args.data_path,
size=args.size,
split='train',
rescut=args.rescut,
max_length=args.max_protein_length,
)
valid_dataset = ProteinMPNNDataset(
root=args.data_path,
size=args.size,
split='valid',
rescut=args.rescut,
max_length=args.max_protein_length,
)
test_dataset = ProteinMPNNDataset(
root=args.data_path,
size=args.size,
split='test',
rescut=args.rescut,
max_length=args.max_protein_length,
)
train_loader = DataLoader(train_dataset, batch_size=args.train_batch_size,
shuffle=True, num_workers=6)
valid_loader = DataLoader(valid_dataset, batch_size=args.eval_batch_size,
shuffle=False, num_workers=6)
test_loader = DataLoader(test_dataset, batch_size=args.eval_batch_size,
shuffle=False, num_workers=6)
model = ProteinMPNN(
hidden_dim=args.hidden_dim,
num_encoder_layers=args.num_encoder_layers,
num_decoder_layers=args.num_decoder_layers,
num_neighbors=args.num_neighbors,
dropout=args.dropout,
augment_eps=args.backbone_noise,
num_positional_embedding=16,
).to(device)
total_step = 0
optimizer = NoamOpt(
model_size=args.hidden_dim, factor=2, warmup=4000,
optimizer=torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98),
eps=1e-9), step=total_step)
times = []
for e in range(args.num_epochs):
start = time.perf_counter()
train_perplexity, train_accuracy = train(model, optimizer,
train_loader, device, scaler)
valid_perplexity, valid_accuracy = eval(model, valid_loader, device)
print(
f'epoch: {e:03d}, step: {total_step}, '
f'train: {train_perplexity:.3f}, valid: {valid_perplexity:.3f}, '
f'train_acc: {train_accuracy:.3f}, valid_acc: {valid_accuracy:.3f}'
)
times.append(time.perf_counter() - start)
print(f'Average Epoch Time: {torch.tensor(times).mean():.4f}s')
print(f'Median Epoch Time: {torch.tensor(times).median():.4f}s')
print(f'Total Program Runtime: '
f'{time.perf_counter() - wall_clock_start:.4f}s')
# Test
test_perplexity, test_accuracy = eval(model, test_loader, device)
print(f'test: {test_perplexity:.3f}, test_acc: {test_accuracy:.3f}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# dataset config
parser.add_argument('--data_path', type=str, default='data/ProteinMPNN',
help='path for loading training data')
parser.add_argument(
'--size', type=str, default='small', choices=['small', 'large'],
help='Use of "small (229.4 MB)" or "large (64.1 GB)" dataset')
parser.add_argument('--max_protein_length', type=int, default=10000,
help='maximum length of the protein complext')
parser.add_argument('--rescut', type=float, default=3.5,
help='PDB resolution cutoff')
# training config
parser.add_argument('--num_epochs', type=int, default=50,
help='number of epochs to train for')
parser.add_argument('--train_batch_size', type=int, default=4,
help='number of tokens for one train batch')
parser.add_argument('--eval_batch_size', type=int, default=8,
help='number of tokens for one valid or test batch')
parser.add_argument(
'--gradient_norm', type=float, default=-1.0,
help='clip gradient norm, set to negative to omit clipping')
parser.add_argument('--mixed_precision', type=bool, default=True,
help='train with mixed precision')
# model config
parser.add_argument('--hidden_dim', type=int, default=128,
help='hidden model dimension')
parser.add_argument('--num_encoder_layers', type=int, default=3,
help='number of encoder layers')
parser.add_argument('--num_decoder_layers', type=int, default=3,
help='number of decoder layers')
parser.add_argument('--num_neighbors', type=int, default=30,
help='number of neighbors for the sparse graph')
parser.add_argument('--num_rbf', type=int, default=16,
help='number of radial basis functions')
parser.add_argument('--dropout', type=float, default=0.1,
help='dropout level; 0.0 means no dropout')
parser.add_argument(
'--backbone_noise', type=float, default=0.2,
help='amount of noise added to backbone during training')
args = parser.parse_args()
main(args)
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "examples/llm/protein_mpnn.py",
"license": "MIT License",
"lines": 231,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:test/datasets/test_protein_mpnn_dataset.py | from torch_geometric.datasets import ProteinMPNNDataset
from torch_geometric.testing import onlyOnline, withPackage
@onlyOnline
@withPackage('pandas')
def test_protein_mpnn_dataset():
dataset = ProteinMPNNDataset(root='./data/ProteinMPNN')
assert len(dataset) == 150
assert dataset[0].x.size() == (229, 4, 3)
assert dataset[0].chain_seq_label.size() == (229, )
assert dataset[0].mask.size() == (229, )
assert dataset[0].chain_mask_all.size() == (229, )
assert dataset[0].residue_idx.size() == (229, )
assert dataset[0].chain_encoding_all.size() == (229, )
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "test/datasets/test_protein_mpnn_dataset.py",
"license": "MIT License",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:torch_geometric/datasets/protein_mpnn_dataset.py | import os
import pickle
import random
from collections import defaultdict
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from tqdm import tqdm
from torch_geometric.data import (
Data,
InMemoryDataset,
download_url,
extract_tar,
)
class ProteinMPNNDataset(InMemoryDataset):
r"""The ProteinMPNN dataset from the `"Robust deep learning based protein
sequence design using ProteinMPNN"
<https://www.biorxiv.org/content/10.1101/2022.06.03.494563v1>`_ paper.
Args:
root (str): Root directory where the dataset should be saved.
size (str): Size of the PDB information to train the model.
If :obj:`"small"`, loads the small dataset (229.4 MB).
If :obj:`"large"`, loads the large dataset (64.1 GB).
(default: :obj:`"small"`)
split (str, optional): If :obj:`"train"`, loads the training dataset.
If :obj:`"valid"`, loads the validation dataset.
If :obj:`"test"`, loads the test dataset.
(default: :obj:`"train"`)
datacut (str, optional): Date cutoff to filter the dataset.
(default: :obj:`"2030-01-01"`)
rescut (float, optional): PDB resolution cutoff.
(default: :obj:`3.5`)
homo (float, optional): Homology cutoff.
(default: :obj:`0.70`)
max_length (int, optional): Maximum length of the protein complex.
(default: :obj:`10000`)
num_units (int, optional): Number of units of the protein complex.
(default: :obj:`150`)
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
pre_filter (callable, optional): A function that takes in an
:obj:`torch_geometric.data.Data` object and returns a boolean
value, indicating whether the data object should be included in the
final dataset. (default: :obj:`None`)
force_reload (bool, optional): Whether to re-process the dataset.
(default: :obj:`False`)
"""
raw_url = {
'small':
'https://files.ipd.uw.edu/pub/training_sets/'
'pdb_2021aug02_sample.tar.gz',
'large':
'https://files.ipd.uw.edu/pub/training_sets/'
'pdb_2021aug02.tar.gz',
}
splits = {
'train': 1,
'valid': 2,
'test': 3,
}
def __init__(
self,
root: str,
size: str = 'small',
split: str = 'train',
datacut: str = '2030-01-01',
rescut: float = 3.5,
homo: float = 0.70,
max_length: int = 10000,
num_units: int = 150,
transform: Optional[Callable] = None,
pre_transform: Optional[Callable] = None,
pre_filter: Optional[Callable] = None,
force_reload: bool = False,
) -> None:
self.size = size
self.split = split
self.datacut = datacut
self.rescut = rescut
self.homo = homo
self.max_length = max_length
self.num_units = num_units
self.sub_folder = self.raw_url[self.size].split('/')[-1].split('.')[0]
super().__init__(root, transform, pre_transform, pre_filter,
force_reload=force_reload)
self.load(self.processed_paths[self.splits[self.split]])
@property
def raw_file_names(self) -> List[str]:
return [
f'{self.sub_folder}/{f}'
for f in ['list.csv', 'valid_clusters.txt', 'test_clusters.txt']
]
@property
def processed_file_names(self) -> List[str]:
return ['splits.pkl', 'train.pt', 'valid.pt', 'test.pt']
def download(self) -> None:
file_path = download_url(self.raw_url[self.size], self.raw_dir)
extract_tar(file_path, self.raw_dir)
os.unlink(file_path)
def process(self) -> None:
alphabet_set = set(list('ACDEFGHIKLMNPQRSTVWYX'))
cluster_ids = self._process_split()
total_items = sum(len(items) for items in cluster_ids.values())
data_list = []
with tqdm(total=total_items, desc="Processing") as pbar:
for _, items in cluster_ids.items():
for chain_id, _ in items:
item = self._process_pdb1(chain_id)
if 'label' not in item:
pbar.update(1)
continue
if len(list(np.unique(item['idx']))) >= 352:
pbar.update(1)
continue
my_dict = self._process_pdb2(item)
if len(my_dict['seq']) > self.max_length:
pbar.update(1)
continue
bad_chars = set(list(
my_dict['seq'])).difference(alphabet_set)
if len(bad_chars) > 0:
pbar.update(1)
continue
x_chain_all, chain_seq_label_all, mask, chain_mask_all, residue_idx, chain_encoding_all = self._process_pdb3( # noqa: E501
my_dict)
data = Data(
x=x_chain_all, # [seq_len, 4, 3]
chain_seq_label=chain_seq_label_all, # [seq_len]
mask=mask, # [seq_len]
chain_mask_all=chain_mask_all, # [seq_len]
residue_idx=residue_idx, # [seq_len]
chain_encoding_all=chain_encoding_all, # [seq_len]
)
if self.pre_filter is not None and not self.pre_filter(
data):
continue
if self.pre_transform is not None:
data = self.pre_transform(data)
data_list.append(data)
if len(data_list) >= self.num_units:
pbar.update(total_items - pbar.n)
break
pbar.update(1)
else:
continue
break
self.save(data_list, self.processed_paths[self.splits[self.split]])
def _process_split(self) -> Dict[int, List[Tuple[str, int]]]:
import pandas as pd
save_path = self.processed_paths[0]
if os.path.exists(save_path):
print('Load split')
with open(save_path, 'rb') as f:
data = pickle.load(f)
else:
# CHAINID, DEPOSITION, RESOLUTION, HASH, CLUSTER, SEQUENCE
df = pd.read_csv(self.raw_paths[0])
df = df[(df['RESOLUTION'] <= self.rescut)
& (df['DEPOSITION'] <= self.datacut)]
val_ids = pd.read_csv(self.raw_paths[1], header=None)[0].tolist()
test_ids = pd.read_csv(self.raw_paths[2], header=None)[0].tolist()
# compile training and validation sets
data = {
'train': defaultdict(list),
'valid': defaultdict(list),
'test': defaultdict(list),
}
for _, r in tqdm(df.iterrows(), desc='Processing split',
total=len(df)):
cluster_id = r['CLUSTER']
hash_id = r['HASH']
chain_id = r['CHAINID']
if cluster_id in val_ids:
data['valid'][cluster_id].append((chain_id, hash_id))
elif cluster_id in test_ids:
data['test'][cluster_id].append((chain_id, hash_id))
else:
data['train'][cluster_id].append((chain_id, hash_id))
with open(save_path, 'wb') as f:
pickle.dump(data, f)
return data[self.split]
def _process_pdb1(self, chain_id: str) -> Dict[str, Any]:
pdbid, chid = chain_id.split('_')
prefix = f'{self.raw_dir}/{self.sub_folder}/pdb/{pdbid[1:3]}/{pdbid}'
# load metadata
if not os.path.isfile(f'{prefix}.pt'):
return {'seq': np.zeros(5)}
meta = torch.load(f'{prefix}.pt')
asmb_ids = meta['asmb_ids']
asmb_chains = meta['asmb_chains']
chids = np.array(meta['chains'])
# find candidate assemblies which contain chid chain
asmb_candidates = {
a
for a, b in zip(asmb_ids, asmb_chains) if chid in b.split(',')
}
# if the chains is missing is missing from all the assemblies
# then return this chain alone
if len(asmb_candidates) < 1:
chain = torch.load(f'{prefix}_{chid}.pt')
L = len(chain['seq'])
return {
'seq': chain['seq'],
'xyz': chain['xyz'],
'idx': torch.zeros(L).int(),
'masked': torch.Tensor([0]).int(),
'label': chain_id,
}
# randomly pick one assembly from candidates
asmb_i = random.sample(list(asmb_candidates), 1)
# indices of selected transforms
idx = np.where(np.array(asmb_ids) == asmb_i)[0]
# load relevant chains
chains = {
c: torch.load(f'{prefix}_{c}.pt')
for i in idx
for c in asmb_chains[i] if c in meta['chains']
}
# generate assembly
asmb = {}
for k in idx:
# pick k-th xform
xform = meta[f'asmb_xform{k}']
u = xform[:, :3, :3]
r = xform[:, :3, 3]
# select chains which k-th xform should be applied to
s1 = set(meta['chains'])
s2 = set(asmb_chains[k].split(','))
chains_k = s1 & s2
# transform selected chains
for c in chains_k:
try:
xyz = chains[c]['xyz']
xyz_ru = torch.einsum('bij,raj->brai', u, xyz) + r[:, None,
None, :]
asmb.update({
(c, k, i): xyz_i
for i, xyz_i in enumerate(xyz_ru)
})
except KeyError:
return {'seq': np.zeros(5)}
# select chains which share considerable similarity to chid
seqid = meta['tm'][chids == chid][0, :, 1]
homo = {
ch_j
for seqid_j, ch_j in zip(seqid, chids) if seqid_j > self.homo
}
# stack all chains in the assembly together
seq: str = ''
xyz_all: List[torch.Tensor] = []
idx_all: List[torch.Tensor] = []
masked: List[int] = []
seq_list = []
for counter, (k, v) in enumerate(asmb.items()):
seq += chains[k[0]]['seq']
seq_list.append(chains[k[0]]['seq'])
xyz_all.append(v)
idx_all.append(torch.full((v.shape[0], ), counter))
if k[0] in homo:
masked.append(counter)
return {
'seq': seq,
'xyz': torch.cat(xyz_all, dim=0),
'idx': torch.cat(idx_all, dim=0),
'masked': torch.Tensor(masked).int(),
'label': chain_id,
}
def _process_pdb2(self, t: Dict[str, Any]) -> Dict[str, Any]:
init_alphabet = list(
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz')
extra_alphabet = [str(item) for item in list(np.arange(300))]
chain_alphabet = init_alphabet + extra_alphabet
my_dict: Dict[str, Union[str, int, Dict[str, Any], List[Any]]] = {}
concat_seq = ''
mask_list = []
visible_list = []
for idx in list(np.unique(t['idx'])):
letter = chain_alphabet[idx]
res = np.argwhere(t['idx'] == idx)
initial_sequence = "".join(list(
np.array(list(t['seq']))[res][
0,
]))
if initial_sequence[-6:] == "HHHHHH":
res = res[:, :-6]
if initial_sequence[0:6] == "HHHHHH":
res = res[:, 6:]
if initial_sequence[-7:-1] == "HHHHHH":
res = res[:, :-7]
if initial_sequence[-8:-2] == "HHHHHH":
res = res[:, :-8]
if initial_sequence[-9:-3] == "HHHHHH":
res = res[:, :-9]
if initial_sequence[-10:-4] == "HHHHHH":
res = res[:, :-10]
if initial_sequence[1:7] == "HHHHHH":
res = res[:, 7:]
if initial_sequence[2:8] == "HHHHHH":
res = res[:, 8:]
if initial_sequence[3:9] == "HHHHHH":
res = res[:, 9:]
if initial_sequence[4:10] == "HHHHHH":
res = res[:, 10:]
if res.shape[1] >= 4:
chain_seq = "".join(list(np.array(list(t['seq']))[res][0]))
my_dict[f'seq_chain_{letter}'] = chain_seq
concat_seq += chain_seq
if idx in t['masked']:
mask_list.append(letter)
else:
visible_list.append(letter)
coords_dict_chain = {}
all_atoms = np.array(t['xyz'][res])[0] # [L, 14, 3]
for i, c in enumerate(['N', 'CA', 'C', 'O']):
coords_dict_chain[
f'{c}_chain_{letter}'] = all_atoms[:, i, :].tolist()
my_dict[f'coords_chain_{letter}'] = coords_dict_chain
my_dict['name'] = t['label']
my_dict['masked_list'] = mask_list
my_dict['visible_list'] = visible_list
my_dict['num_of_chains'] = len(mask_list) + len(visible_list)
my_dict['seq'] = concat_seq
return my_dict
def _process_pdb3(
self, b: Dict[str, Any]
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor,
torch.Tensor, torch.Tensor]:
L = len(b['seq'])
# residue idx with jumps across chains
residue_idx = -100 * np.ones([L], dtype=np.int32)
# get the list of masked / visible chains
masked_chains, visible_chains = b['masked_list'], b['visible_list']
visible_temp_dict, masked_temp_dict = {}, {}
for letter in masked_chains + visible_chains:
chain_seq = b[f'seq_chain_{letter}']
if letter in visible_chains:
visible_temp_dict[letter] = chain_seq
elif letter in masked_chains:
masked_temp_dict[letter] = chain_seq
# check for duplicate chains (same sequence but different identity)
for _, vm in masked_temp_dict.items():
for kv, vv in visible_temp_dict.items():
if vm == vv:
if kv not in masked_chains:
masked_chains.append(kv)
if kv in visible_chains:
visible_chains.remove(kv)
# build protein data structures
all_chains = masked_chains + visible_chains
np.random.shuffle(all_chains)
x_chain_list = []
chain_mask_list = []
chain_seq_list = []
chain_encoding_list = []
c, l0, l1 = 1, 0, 0
for letter in all_chains:
chain_seq = b[f'seq_chain_{letter}']
chain_length = len(chain_seq)
chain_coords = b[f'coords_chain_{letter}']
x_chain = np.stack([
chain_coords[c] for c in [
f'N_chain_{letter}', f'CA_chain_{letter}',
f'C_chain_{letter}', f'O_chain_{letter}'
]
], 1) # [chain_length, 4, 3]
x_chain_list.append(x_chain)
chain_seq_list.append(chain_seq)
if letter in visible_chains:
chain_mask = np.zeros(chain_length) # 0 for visible chains
elif letter in masked_chains:
chain_mask = np.ones(chain_length) # 1 for masked chains
chain_mask_list.append(chain_mask)
chain_encoding_list.append(c * np.ones(chain_length))
l1 += chain_length
residue_idx[l0:l1] = 100 * (c - 1) + np.arange(l0, l1)
l0 += chain_length
c += 1
x_chain_all = np.concatenate(x_chain_list, 0) # [L, 4, 3]
chain_seq_all = "".join(chain_seq_list)
# [L,] 1.0 for places that need to be predicted
chain_mask_all = np.concatenate(chain_mask_list, 0)
chain_encoding_all = np.concatenate(chain_encoding_list, 0)
# Convert to labels
alphabet = 'ACDEFGHIKLMNPQRSTVWYX'
chain_seq_label_all = np.asarray(
[alphabet.index(a) for a in chain_seq_all], dtype=np.int32)
isnan = np.isnan(x_chain_all)
mask = np.isfinite(np.sum(x_chain_all, (1, 2))).astype(np.float32)
x_chain_all[isnan] = 0.
# Conversion
return (
torch.from_numpy(x_chain_all).to(dtype=torch.float32),
torch.from_numpy(chain_seq_label_all).to(dtype=torch.long),
torch.from_numpy(mask).to(dtype=torch.float32),
torch.from_numpy(chain_mask_all).to(dtype=torch.float32),
torch.from_numpy(residue_idx).to(dtype=torch.long),
torch.from_numpy(chain_encoding_all).to(dtype=torch.long),
)
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/datasets/protein_mpnn_dataset.py",
"license": "MIT License",
"lines": 404,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:test/datasets/test_teeth3ds.py | from torch_geometric.data import Data
from torch_geometric.datasets import Teeth3DS
from torch_geometric.testing import withPackage
@withPackage('trimesh', 'fpsample')
def test_teeth3ds(tmp_path) -> None:
dataset = Teeth3DS(root=tmp_path, split='sample', train=True)
assert len(dataset) > 0
data = dataset[0]
assert isinstance(data, Data)
assert data.pos.size(1) == 3
assert data.x.size(0) == data.pos.size(0)
assert data.y.size(0) == data.pos.size(0)
assert isinstance(data.jaw, str)
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "test/datasets/test_teeth3ds.py",
"license": "MIT License",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:torch_geometric/datasets/teeth3ds.py | import json
import os
import os.path as osp
from glob import glob
from typing import Callable, Dict, List, Optional
import numpy as np
import torch
from tqdm import tqdm
from torch_geometric.data import (
Data,
InMemoryDataset,
download_url,
extract_zip,
)
class Teeth3DS(InMemoryDataset):
r"""The Teeth3DS+ dataset from the `"An Extended Benchmark for Intra-oral
3D Scans Analysis" <https://crns-smartvision.github.io/teeth3ds/>`_ paper.
This dataset is the first comprehensive public benchmark designed to
advance the field of intra-oral 3D scan analysis developed as part of the
3DTeethSeg 2022 and 3DTeethLand 2024 MICCAI challenges, aiming to drive
research in teeth identification, segmentation, labeling, 3D modeling,
and dental landmark identification.
The dataset includes at least 1,800 intra-oral scans (containing 23,999
annotated teeth) collected from 900 patients, covering both upper and lower
jaws separately.
Args:
root (str): Root directory where the dataset should be saved.
split (str): The split name (one of :obj:`"Teeth3DS"`,
:obj:`"3DTeethSeg22_challenge"` or :obj:`"3DTeethLand_challenge"`).
train (bool, optional): If :obj:`True`, loads the training dataset,
otherwise the test dataset. (default: :obj:`True`)
num_samples (int, optional): Number of points to sample from each mesh.
(default: :obj:`30000`)
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
force_reload (bool, optional): Whether to re-process the dataset.
(default: :obj:`False`)
"""
urls = {
'data_part_1.zip':
'https://osf.io/download/qhprs/',
'data_part_2.zip':
'https://osf.io/download/4pwnr/',
'data_part_3.zip':
'https://osf.io/download/frwdp/',
'data_part_4.zip':
'https://osf.io/download/2arn4/',
'data_part_5.zip':
'https://osf.io/download/xrz5f/',
'data_part_6.zip':
'https://osf.io/download/23hgq/',
'data_part_7.zip':
'https://osf.io/download/u83ad/',
'train_test_split':
'https://files.de-1.osf.io/v1/'
'resources/xctdy/providers/osfstorage/?zip='
}
sample_url = {
'teeth3ds_sample': 'https://osf.io/download/vr38s/',
}
landmarks_urls = {
'3DTeethLand_landmarks_train.zip': 'https://osf.io/download/k5hbj/',
'3DTeethLand_landmarks_test.zip': 'https://osf.io/download/sqw5e/',
}
def __init__(
self,
root: str,
split:
str = 'Teeth3DS', # [3DTeethSeg22_challenge, 3DTeethLand_challenge]
train: bool = True,
num_samples: int = 30000,
transform: Optional[Callable] = None,
pre_transform: Optional[Callable] = None,
force_reload: bool = False,
) -> None:
self.mode = 'training' if train else 'testing'
self.split = split
self.num_samples = num_samples
super().__init__(root, transform, pre_transform,
force_reload=force_reload)
@property
def processed_dir(self) -> str:
return os.path.join(self.root, f'processed_{self.split}_{self.mode}')
@property
def raw_file_names(self) -> List[str]:
return ['license.txt']
@property
def processed_file_names(self) -> List[str]:
# Directory containing train/test split files:
split_subdir = 'teeth3ds_sample' if self.split == 'sample' else ''
split_dir = osp.join(
self.raw_dir,
split_subdir,
f'{self.split}_train_test_split',
)
split_files = glob(osp.join(split_dir, f'{self.mode}*.txt'))
# Collect all file names from the split files:
combined_list = []
for file_path in split_files:
with open(file_path) as file:
combined_list.extend(file.read().splitlines())
# Generate the list of processed file paths:
return [f'{file_name}.pt' for file_name in combined_list]
def download(self) -> None:
if self.split == 'sample':
for key, url in self.sample_url.items():
path = download_url(url, self.root, filename=key)
extract_zip(path, self.raw_dir)
os.unlink(path)
else:
for key, url in self.urls.items():
path = download_url(url, self.root, filename=key)
extract_zip(path, self.raw_dir)
os.unlink(path)
for key, url in self.landmarks_urls.items():
path = download_url(url, self.root, filename=key)
extract_zip(path, self.raw_dir) # Extract each downloaded part
os.unlink(path)
def process_file(self, file_path: str) -> Optional[Data]:
"""Processes the input file path to load mesh data, annotations,
and prepare the input features for a graph-based deep learning model.
"""
import trimesh
from fpsample import bucket_fps_kdline_sampling
mesh = trimesh.load_mesh(file_path)
if isinstance(mesh, list):
# Handle the case where a list of Geometry objects is returned
mesh = mesh[0]
vertices = mesh.vertices
vertex_normals = mesh.vertex_normals
# Perform sampling on mesh vertices:
if len(vertices) < self.num_samples:
sampled_indices = np.random.choice(
len(vertices),
self.num_samples,
replace=True,
)
else:
sampled_indices = bucket_fps_kdline_sampling(
vertices,
self.num_samples,
h=5,
start_idx=0,
)
if len(sampled_indices) != self.num_samples:
raise RuntimeError(f"Sampled points mismatch, expected "
f"{self.num_samples} points, but got "
f"{len(sampled_indices)} for '{file_path}'")
# Extract features and annotations for the sampled points:
pos = torch.tensor(vertices[sampled_indices], dtype=torch.float)
x = torch.tensor(vertex_normals[sampled_indices], dtype=torch.float)
# Load segmentation annotations:
seg_annotation_path = file_path.replace('.obj', '.json')
if osp.exists(seg_annotation_path):
with open(seg_annotation_path) as f:
seg_annotations = json.load(f)
y = torch.tensor(
np.asarray(seg_annotations['labels'])[sampled_indices],
dtype=torch.float)
instances = torch.tensor(
np.asarray(seg_annotations['instances'])[sampled_indices],
dtype=torch.float)
else:
y = torch.empty(0, 3)
instances = torch.empty(0, 3)
# Load landmarks annotations:
landmarks_annotation_path = file_path.replace('.obj', '__kpt.json')
# Parse keypoint annotations into structured tensors:
keypoints_dict: Dict[str, List] = {
key: []
for key in [
'Mesial', 'Distal', 'Cusp', 'InnerPoint', 'OuterPoint',
'FacialPoint'
]
}
keypoint_tensors: Dict[str, torch.Tensor] = {
key: torch.empty(0, 3)
for key in [
'Mesial', 'Distal', 'Cusp', 'InnerPoint', 'OuterPoint',
'FacialPoint'
]
}
if osp.exists(landmarks_annotation_path):
with open(landmarks_annotation_path) as f:
landmarks_annotations = json.load(f)
for keypoint in landmarks_annotations['objects']:
keypoints_dict[keypoint['class']].extend(keypoint['coord'])
keypoint_tensors = {
k: torch.tensor(np.asarray(v),
dtype=torch.float).reshape(-1, 3)
for k, v in keypoints_dict.items()
}
data = Data(
pos=pos,
x=x,
y=y,
instances=instances,
jaw=file_path.split('.obj')[0].split('_')[1],
mesial=keypoint_tensors['Mesial'],
distal=keypoint_tensors['Distal'],
cusp=keypoint_tensors['Cusp'],
inner_point=keypoint_tensors['InnerPoint'],
outer_point=keypoint_tensors['OuterPoint'],
facial_point=keypoint_tensors['FacialPoint'],
)
if self.pre_transform is not None:
data = self.pre_transform(data)
return data
def process(self) -> None:
for file in tqdm(self.processed_file_names):
name = file.split('.')[0]
path = osp.join(self.raw_dir, '**', '*', name + '.obj')
paths = glob(path)
if len(paths) == 1:
data = self.process_file(paths[0])
torch.save(data, osp.join(self.processed_dir, file))
def len(self) -> int:
return len(self.processed_file_names)
def get(self, idx: int) -> Data:
return torch.load(
osp.join(self.processed_dir, self.processed_file_names[idx]),
weights_only=False,
)
def __repr__(self) -> str:
return (f'{self.__class__.__name__}({len(self)}, '
f'mode={self.mode}, split={self.split})')
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/datasets/teeth3ds.py",
"license": "MIT License",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:test/nn/conv/test_meshcnn_conv.py | import pytest
import torch
from torch.nn import Linear, ModuleList, Sequential, Sigmoid
from torch_geometric.nn import MeshCNNConv
@pytest.mark.parametrize('in_channels, out_channels', [
(1, 1),
(1, 2),
(8, 3),
(8, 3),
(42, 40),
])
def test_meshcnn_conv(in_channels: int, out_channels: int):
# m = (V, F), shape [|V| x 3, 3 * |F|]
# The simplest manifold triangular mesh is a tetrahedron
E_cardinality = 6 # |E|, the number of edges
x0 = torch.randn(E_cardinality, in_channels) # X^(k), the prior layer
edge_index = torch.tensor([[
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5
], [
1, 2, 3, 4, 2, 0, 4, 5, 5, 3, 0, 1, 2, 5, 4, 0, 0, 3, 5, 1, 1, 4, 3, 2
]], dtype=torch.int64)
# in_channels is the `Dim-Out(k)` in torch.nn.conv.MeshCNNConv
# out_channels is the `Dim-Out(k+1)` in torch.nn.conv.MeshCNNConv
conv = MeshCNNConv(in_channels, out_channels)
# Assert right representation (defined by the class's __repr__ method)
# WARN: For now we do not account for the 5 default kernels in the
# representation.
assert str(conv) == f"MeshCNNConv({in_channels}, {out_channels})"
x1 = conv(x0, edge_index)
assert x1.size() == (E_cardinality, out_channels)
# assert determinism
assert torch.allclose(conv(x0, edge_index), x1)
# kernels MUST be a ModuleList of length 5.
# Where kernels[0] is known as W_0^{(k+1)} in MeshCNNConv etc
kernels = ModuleList([
Sequential(Linear(in_channels, out_channels), Sigmoid())
for _ in range(5)
])
with pytest.warns(UserWarning, match="does not have attribute"):
conv = MeshCNNConv(in_channels, out_channels, kernels)
# WARN: For now we do not account for the 5 kernels in the
# representation
assert str(conv) == f"MeshCNNConv({in_channels}, {out_channels})"
x1 = conv(x0, edge_index)
assert x1.size() == (E_cardinality, out_channels)
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "test/nn/conv/test_meshcnn_conv.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:torch_geometric/nn/conv/meshcnn_conv.py | # The below is to suppress the warning on torch.nn.conv.MeshCNNConv::update
# pyright: reportIncompatibleMethodOverride=false
import warnings
from typing import Optional
import torch
from torch.nn import Linear, Module, ModuleList
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.typing import Tensor
class MeshCNNConv(MessagePassing):
r"""The convolutional layer introduced by the paper
`"MeshCNN: A Network With An Edge" <https://arxiv.org/abs/1809.05910>`_.
Recall that, given a set of categories :math:`C`,
MeshCNN is a function that takes as its input
a triangular mesh
:math:`\mathcal{m} = (V, F) \in \mathbb{R}^{|V| \times 3} \times
\{0,...,|V|-1\}^{3 \times |F|}`, and returns as its output
a :math:`|C|`-dimensional vector, whose :math:`i` th component denotes
the probability of the input mesh belonging to category :math:`c_i \in C`.
Let :math:`X^{(k)} \in \mathbb{R}^{|E| \times \text{Dim-Out}(k)}`
denote the output value of the prior (e.g. :math:`k` th )
layer of our neural network. The :math:`i` th row of :math:`X^{(k)}` is a
:math:`\text{Dim-Out}(k)`-dimensional vector that represents the features
computed by the :math:`k` th layer for edge :math:`e_i` of the input mesh
:math:`\mathcal{m}`. Let :math:`A \in \{0, ..., |E|-1\}^{2 \times 4*|E|}`
denote the *edge adjacency* matrix of our input mesh :math:`\mathcal{m}`.
The :math:`j` th column of :math:`A` returns a pair of indices
:math:`k,l \in \{0,...,|E|-1\}`, which means that edge
:math:`e_k` is adjacent to edge :math:`e_l`
in our input mesh :math:`\mathcal{m}`.
The definition of edge adjacency in a triangular
mesh is illustrated in Figure 1.
In a triangular
mesh, each edge :math:`e_i` is expected to be adjacent to exactly :math:`4`
neighboring edges, hence the number of columns of :math:`A`: :math:`4*|E|`.
We write *the neighborhood* of edge :math:`e_i` as
:math:`\mathcal{N}(i) = (a(i), b(i), c(i), d(i))` where
1. :math:`a(i)` denotes the index of the *first* counter-clockwise
edge of the face *above* :math:`e_i`.
2. :math:`b(i)` denotes the index of the *second* counter-clockwise
edge of the face *above* :math:`e_i`.
3. :math:`c(i)` denotes the index of the *first* counter-clockwise edge
of the face *below* :math:`e_i`.
4. :math:`d(i)` denotes the index of the *second*
counter-clockwise edge of the face *below* :math:`e_i`.
.. figure:: ../_figures/meshcnn_edge_adjacency.svg
:align: center
:width: 80%
**Figure 1:** The neighbors of edge :math:`\mathbf{e_1}`
are :math:`\mathbf{e_2}, \mathbf{e_3}, \mathbf{e_4}` and
:math:`\mathbf{e_5}`, respectively.
We write this as
:math:`\mathcal{N}(1) = (a(1), b(1), c(1), d(1)) = (2, 3, 4, 5)`
Because of this ordering constraint, :obj:`MeshCNNConv` **requires
that the columns of** :math:`A`
**be ordered in the following way**:
.. math::
&A[:,0] = (0, \text{The index of the "a" edge for edge } 0) \\
&A[:,1] = (0, \text{The index of the "b" edge for edge } 0) \\
&A[:,2] = (0, \text{The index of the "c" edge for edge } 0) \\
&A[:,3] = (0, \text{The index of the "d" edge for edge } 0) \\
\vdots \\
&A[:,4*|E|-4] =
\bigl(|E|-1,
a\bigl(|E|-1\bigr)\bigr) \\
&A[:,4*|E|-3] =
\bigl(|E|-1,
b\bigl(|E|-1\bigr)\bigr) \\
&A[:,4*|E|-2] =
\bigl(|E|-1,
c\bigl(|E|-1\bigr)\bigr) \\
&A[:,4*|E|-1] =
\bigl(|E|-1,
d\bigl(|E|-1\bigr)\bigr)
Stated a bit more compactly, for every edge :math:`e_i` in the input mesh,
:math:`A`, should have the following entries
.. math::
A[:, 4*i] &= (i, a(i)) \\
A[:, 4*i + 1] &= (i, b(i)) \\
A[:, 4*i + 2] &= (i, c(i)) \\
A[:, 4*i + 3] &= (i, d(i))
To summarize so far, we have defined 3 things:
1. The activation of the prior (e.g. :math:`k` th) layer,
:math:`X^{(k)} \in \mathbb{R}^{|E| \times \text{Dim-Out}(k)}`
2. The edge adjacency matrix and the definition of edge adjacency.
:math:`A \in \{0,...,|E|-1\}^{2 \times 4*|E|}`
3. The ways the columns of :math:`A` must be ordered.
We are now finally able to define the :obj:`MeshCNNConv` class/layer.
In the following definition
we assume :obj:`MeshCNNConv` is at the :math:`k+1` th layer of our
neural network.
The :obj:`MeshCNNConv` layer is a function,
.. math::
\text{MeshCNNConv}^{(k+1)}(X^{(k)}, A) = X^{(k+1)},
that, given the prior layer's output
:math:`X^{(k)} \in \mathbb{R}^{|E| \times \text{Dim-Out}(k)}`
and the edge adjacency matrix :math:`A`
of the input mesh (graph) :math:`\mathcal{m}` ,
returns a new edge feature tensor
:math:`X^{(k+1)} \in \mathbb{R}^{|E| \times \text{Dim-Out}(k+1)}`,
where the :math:`i` th row of :math:`X^{(k+1)}`, denoted by
:math:`x^{(k+1)}_i`,
represents the :math:`\text{Dim-Out}(k+1)`-dimensional feature vector
of edge :math:`e_i`, **and is defined as follows**:
.. math::
x^{(k+1)}_i &= W^{(k+1)}_0 x^{(k)}_i \\
&+ W^{(k+1)}_1 \bigl| x^{(k)}_{a(i)} - x^{(k)}_{c(i)} \bigr| \\
&+ W^{(k+1)}_2 \bigl( x^{(k)}_{a(i)} + x^{(k)}_{c(i)} \bigr) \\
&+ W^{(k+1)}_3 \bigl| x^{(k)}_{b(i)} - x^{(k)}_{d(i)} \bigr| \\
&+ W^{(k+1)}_4 \bigl( x^{(k)}_{b(i)} + x^{(k)}_{d(i)} \bigr).
:math:`W_0^{(k+1)},W_1^{(k+1)},W_2^{(k+1)},W_3^{(k+1)}, W_4^{(k+1)}
\in \mathbb{R}^{\text{Dim-Out}(k+1) \times \text{Dim-Out}(k)}`
are trainable linear functions (i.e. "the weights" of this layer).
:math:`x_i` is the :math:`\text{Dim-Out}(k)`-dimensional feature of
edge :math:`e_i` vector computed by the prior (e.g. :math:`k`) th layer.
:math:`x^{(k)}_{a(i)}, x^{(k)}_{b(i)}, x^{(k)}_{c(i)}`, and
:math:`x^{(k)}_{d(i)}` are the :math:`\text{Dim-Out}(k)`-feature vectors,
computed in the :math:`k` th layer, that are associated with the :math:`4`
neighboring edges of :math:`e_i`.
Args:
in_channels (int): Corresponds to :math:`\text{Dim-Out}(k)`
in the above overview. This
represents the output dimension of the prior layer. For the given
input mesh :math:`\mathcal{m} = (V, F)`, the prior layer is
expected to output a
:math:`X \in \mathbb{R}^{|E| \times \textit{in_channels}}`
feature matrix.
Assuming the instance of this class
is situated at layer :math:`k+1`, we write that
:math:`X^{(k)} \in \mathbb{R}^{|E| \times \textit{in_channels}}`.
out_channels (int): Corresponds to :math:`\text{Dim-Out}(k+1)` in the
above overview. This represents the output dimension of this layer.
Assuming the instance of this class
is situated at layer :math:`k+1`, we write that
:math:`X^{(k+1)}
\in \mathbb{R}^{|E| \times \textit{out_channels}}`.
kernels (torch.nn.ModuleList, optional): A list of length of 5,
where each
element is a :class:`torch.nn.module` (i.e a neural network),
that each MUST take as input a vector
of dimension :`obj:in_channels` and return a vector of dimension
:obj:`out_channels`. In particular,
`obj:kernels[0]` is :math:`W^{(k+1)}_0` in the above overview
(see :obj:`MeshCNNConv`), `obj:kernels[1]` is :math:`W^{(k+1)}_1`,
`obj:kernels[2]` is :math:`W^{(k+1)}_2`,
`obj:kernels[3]` is :math:`W^{(k+1)}_3`
`obj:kernels[4]` is :math:`W^{(k+1)}_4`.
Note that this input is optional, in which case
each of the 5 elements in the kernels will be a linear
neural network :class:`torch.nn.modules.Linear`
correctly configured to take as input
:attr:`in_channels`-dimensional vectors and return
a vector of dimensions :attr:`out_channels`.
Discussion:
The key difference that separates :obj:`MeshCNNConv` from a traditional
message passing graph neural network is that :obj:`MeshCNNConv`
requires the set of neighbors for a node
:math:`\mathcal{N}(u) = (v_1, v_2, ...)`
to *be an ordered set* (i.e. a tuple). In
fact, :obj:`MeshCNNConv` goes further, requiring
that :math:`\mathcal{N}(u)` always return a set of size :math:`4`.
This is different to most message passing graph neural networks,
which assume that :math:`\mathcal{N}(u) = \{v_1, v_2, ...\}` returns an
ordered set. This lends :obj:`MeshCNNConv` more expressive power,
at the cost of no longer being permutation invariant to
:math:`\mathbb{S}_4`. Put more plainly, in tradition message passing
GNNs, the network is *unable* to distinguish one neighboring node
from another.
In contrast, in :obj:`MeshCNNConv`, each of the 4 neighbors has a
"role", either the "a", "b", "c", or "d" neighbor. We encode this fact
by requiring that :math:`\mathcal{N}` return the 4-tuple,
where the first component is the "a" neighbor, and so on.
To summarize this comparison, it may re-define
:obj:`MeshCNNConv` in terms of :math:`\text{UPDATE}` and
:math:`\text{AGGREGATE}`
functions, which is a general way to define a traditional GNN layer.
If we let :math:`x_i^{(k+1)}`
denote the output of a GNN layer for node :math:`i` at
layer :math:`k+1`, and let
:math:`\mathcal{N}(i)` denote the set of nodes adjacent
to node :math:`i`,
then we can describe the :math:`k+1` th layer as traditional GNN
as
.. math::
x_i^{(k+1)} = \text{UPDATE}^{(k+1)}\bigl(x^{(k)}_i,
\text{AGGREGATE}^{(k+1)}\bigl(\mathcal{N}(i)\bigr)\bigr).
Here, :math:`\text{UPDATE}^{(k+1)}` is a function of :math:`2`
:math:`\text{Dim-Out}(k)`-dimensional vectors, and returns a
:math:`\text{Dim-Out}(k+1)`-dimensional vector.
:math:`\text{AGGREGATE}^{(k+1)}` function
is a function of a *unordered set*
of nodes that are neighbors of node :math:`i`, as defined by
:math:`\mathcal{N}(i)`. Usually the size of this set varies across
different nodes :math:`i`, and one of the most basic examples
of such a function is the "sum aggregation", defined as
:math:`\text{AGGREGATE}^{(k+1)}(\mathcal{N}(i)) =
\sum_{j \in \mathcal{N}(i)} x^{(k)}_j`.
See
:class:`SumAggregation <torch_geometric.nn.aggr.basic.SumAggregation>`
for more.
In contrast, while :obj:`MeshCNNConv` 's :math:`\text{UPDATE}`
function follows
a tradition GNN, its :math:`\text{AGGREGATE}` is a function of a tuple
(i.e. an ordered set) of neighbors
rather than a unordered set of neighbors.
In particular, while the :math:`\text{UPDATE}`
function of :obj:`MeshCNNConv` for :math:`e_i` is
.. math::
x_i^{(k+1)} = \text{UPDATE}^{(k+1)}(x_i^{(k)}, s_i^{(k+1)})
= W_0^{(k+1)}x_i^{(k)} + s_i^{(k+1)},
in contrast, :obj:`MeshCNNConv` 's :math:`\text{AGGREGATE}` function is
.. math::
s_i^{(k+1)} = \text{AGGREGATE}^{(k+1)}(A, B, C, D)
&= W_1^{(k+1)}\bigl|A - C \bigr| \\
&= W_2^{(k+1)}\bigl(A + C \bigr) \\
&= W_3^{(k+1)}\bigl|B - D \bigr| \\
&= W_4^{(k+1)}\bigl(B + D \bigr),
where :math:`A=x_{a(i)}^{(k)}, B=x_{b(i)}^{(k)}, C=x_{c(i)}^{(k)},`
and :math:`D=x_{d(i)}^{(k)}`.
..
The :math:`i` th row of
:math:`V \in \mathbb{R}^{|V| \times 3}`
holds the cartesian :math:`xyz`
coordinates for node :math:`v_i` in the mesh, and the :math:`j` th
column in :math:`F \in \{1,...,|V|\}^{3 \times |V|}`
holds the :math:`3` indices
:math:`(k,l,m)` that correspond to the :math:`3` nodes
:math:`(v_k, v_l, v_m)` that construct face :math:`j` of the mesh.
"""
def __init__(self, in_channels: int, out_channels: int,
kernels: Optional[ModuleList] = None):
super().__init__(aggr='add')
self.in_channels = in_channels
self.out_channels = out_channels
if kernels is None:
self.kernels = ModuleList(
[Linear(in_channels, out_channels) for _ in range(5)])
else:
# ensures kernels is properly formed, otherwise throws
# the appropriate error.
self._assert_kernels(kernels)
self.kernels = kernels
def forward(self, x: Tensor, edge_index: Tensor):
r"""Forward pass.
Args:
x(torch.Tensor): :math:`X^{(k)} \in
\mathbb{R}^{|E| \times \textit{in_channels}}`.
The edge feature tensor returned by the prior layer
(e.g. :math:`k`). The tensor is of shape
:math:`|E| \times \text{Dim-Out}(k)`, or equivalently,
:obj:`(|E|, self.in_channels)`.
edge_index(torch.Tensor):
:math:`A \in \{0,...,|E|-1\}^{2 \times 4*|E|}`.
The edge adjacency tensor of the networks input mesh
:math:`\mathcal{m} = (V, F)`. The edge adjacency tensor
**MUST** have the following form:
.. math::
&A[:,0] = (0,
\text{The index of the "a" edge for edge } 0) \\
&A[:,1] = (0,
\text{The index of the "b" edge for edge } 0) \\
&A[:,2] = (0,
\text{The index of the "c" edge for edge } 0) \\
&A[:,3] = (0,
\text{The index of the "d" edge for edge } 0) \\
\vdots \\
&A[:,4*|E|-4] =
\bigl(|E|-1,
a\bigl(|E|-1\bigr)\bigr) \\
&A[:,4*|E|-3] =
\bigl(|E|-1,
b\bigl(|E|-1\bigr)\bigr) \\
&A[:,4*|E|-2] =
\bigl(|E|-1,
c\bigl(|E|-1\bigr)\bigr) \\
&A[:,4*|E|-1] =
\bigl(|E|-1,
d\bigl(|E|-1\bigr)\bigr)
See :obj:`MeshCNNConv` for what
"index of the 'a'(b,c,d) edge for edge i" means, and also
for the general definition of edge adjacency in MeshCNN.
These definitions are also provided in the
`paper <https://arxiv.org/abs/1809.05910>`_ itself.
Returns:
torch.Tensor:
:math:`X^{(k+1)} \in \mathbb{R}^{|E| \times \textit{out_channels}}`.
The edge feature tensor for this (e.g. the :math:`k+1` th) layer.
The :math:`i` th row of :math:`X^{(k+1)}` is computed according
to the formula
.. math::
x^{(k+1)}_i &= W^{(k+1)}_0 x^{(k)}_i \\
&+ W^{(k+1)}_1 \bigl| x^{(k)}_{a(i)} - x^{(k)}_{c(i)} \bigr| \\
&+ W^{(k+1)}_2 \bigl( x^{(k)}_{a(i)} + x^{(k)}_{c(i)} \bigr) \\
&+ W^{(k+1)}_3 \bigl| x^{(k)}_{b(i)} - x^{(k)}_{d(i)} \bigr| \\
&+ W^{(k+1)}_4 \bigl( x^{(k)}_{b(i)} + x^{(k)}_{d(i)} \bigr),
where :math:`W_0^{(k+1)},W_1^{(k+1)},
W_2^{(k+1)},W_3^{(k+1)}, W_4^{(k+1)}
\in \mathbb{R}^{\text{Dim-Out}(k+1) \times \text{Dim-Out}(k)}`
are the trainable linear functions (i.e. the trainable
"weights") of this layer, and
:math:`x^{(k)}_{a(i)}, x^{(k)}_{b(i)}, x^{(k)}_{c(i)}`,
:math:`x^{(k)}_{d(i)}` are the
:math:`\text{Dim-Out}(k)`-dimensional edge feature vectors
computed by the prior (:math:`k` th) layer,
that are associated with the :math:`4`
neighboring edges of :math:`e_i`.
"""
return self.propagate(edge_index, x=x)
def message(self, x_j: Tensor) -> Tensor:
r"""The messaging passing step of :obj:`MeshCNNConv`.
Args:
x_j: A :obj:`[4*|E|, num_node_features]` tensor.
Its ith row holds the value
stored by the source node in the previous layer of edge i.
Returns:
A :obj:`[|E|, num_node_features]` tensor,
whose ith row will be the value
that the target node of edge i will receive.
"""
# The following variables names are taken from the paper
# MeshCNN computes the features associated with edge
# e by (|a - c|, a + c, |b - c|, b + c), where a, b, c, d are the
# neighboring edges of e, a being the 1 edge of the upper face,
# b being the second edge of the upper face, c being the first edge
# of the lower face,
# and d being the second edge of the lower face of the input Mesh
# TODO: It is unclear if view is faster. If it is not,
# then we should prefer the strided method commented out below
E4, in_channels = x_j.size() # E4 = 4|E|, i.e. num edges in line graph
# Option 1
n_a = x_j[0::4] # shape: |E| x in_channels
n_b = x_j[1::4] # shape: |E| x in_channels
n_c = x_j[2::4] # shape: |E| x in_channels
n_d = x_j[3::4] # shape: |E| x in_channels
m = torch.empty(E4, self.out_channels)
m[0::4] = self.kernels[1].forward(torch.abs(n_a - n_c))
m[1::4] = self.kernels[2].forward(n_a + n_c)
m[2::4] = self.kernels[3].forward(torch.abs(n_b - n_d))
m[3::4] = self.kernels[4].forward(n_b + n_d)
return m
# Option 2
# E4, in_channels = x_j.size()
# E = E4 // 4
# x_j = x_j.view(E, 4, in_channels) # shape: (|E| x 4 x in_channels)
# n_a, n_b, n_c, n_d = x_j.unbind(
# dim=1) # shape: (4 x |E| x in_channels)
# m = torch.stack(
# [
# (n_a - n_c).abs(), # shape: |E| x in_channels
# n_a + n_c,
# (n_b - n_d).abs(),
# n_b + n_d,
# ],
# dim=1) # shape: (|E| x 4 x in_channels)
# m.view(E4, in_channels) # shape 4*|E| x in_channels
# return m
def update(self, inputs: Tensor, x: Tensor) -> Tensor:
r"""The UPDATE step, in reference to the UPDATE and AGGREGATE
formulation of message passing convolution.
Args:
inputs(torch.Tensor): The :attr:`in_channels`-dimensional vector
returned by aggregate.
x(torch.Tensor): :math:`X^{(k)}`. The original inputs to this layer.
Returns:
torch.Tensor: :math:`X^{(k+1)}`. The output of this layer, which
has shape :obj:`(|E|, out_channels)`.
"""
return self.kernels[0].forward(x) + inputs
def _assert_kernels(self, kernels: ModuleList):
r"""Ensures that :obj:`kernels` is a list of 5 :obj:`torch.nn.Module`
modules (i.e. networks). In addition, it also ensures that each network
takes in input of dimension :attr:`in_channels`, and returns output
of dimension :attr:`out_channels`.
This method throws an error otherwise.
.. warn::
This method throws an error if :obj:`kernels` is
not valid. (Otherwise this method returns nothing)
"""
assert isinstance(kernels, ModuleList), \
f"Parameter 'kernels' must be a \
torch.nn.module.ModuleList with 5 members, but we got \
{type(kernels)}."
assert len(kernels) == 5, "Parameter 'kernels' must be a \
torch.nn.module.ModuleList of with exactly 5 members"
for i, network in enumerate(kernels):
assert isinstance(network, Module), \
f"kernels[{i}] must be torch.nn.Module, got \
{type(network)}"
if not hasattr(network, "in_channels") and \
not hasattr(network, "in_features"):
warnings.warn(
f"kernel[{i}] does not have attribute 'in_channels' nor "
f"'out_features'. The network must take as input a "
f"{self.in_channels}-dimensional tensor.", stacklevel=2)
else:
input_dimension = getattr(network, "in_channels",
network.in_features)
assert input_dimension == self.in_channels, f"The input \
dimension of the neural network in kernel[{i}] must \
be \
equal to 'in_channels', but input_dimension = \
{input_dimension}, and \
self.in_channels={self.in_channels}."
if not hasattr(network, "out_channels") and \
not hasattr(network, "out_features"):
warnings.warn(
f"kernel[{i}] does not have attribute 'in_channels' nor "
f"'out_features'. The network must take as input a "
f"{self.in_channels}-dimensional tensor.", stacklevel=2)
else:
output_dimension = getattr(network, "out_channels",
network.out_features)
assert output_dimension == self.out_channels, f"The output \
dimension of the neural network in kernel[{i}] must \
be \
equal to 'out_channels', but out_dimension = \
{output_dimension}, and \
self.out_channels={self.out_channels}."
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/nn/conv/meshcnn_conv.py",
"license": "MIT License",
"lines": 417,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
pyg-team/pytorch_geometric:test/utils/test_total_influence.py | import torch
from torch_geometric.data import Data
from torch_geometric.nn import GCNConv
from torch_geometric.utils import total_influence
class GNN(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = GCNConv(5, 6)
self.conv2 = GCNConv(6, 7)
def forward(self, x0, edge_index):
x1 = self.conv1(x0, edge_index)
x2 = self.conv2(x1, edge_index)
return x2
def test_total_influence_smoke():
x = torch.randn(6, 5)
edge_index = torch.tensor([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5]])
max_hops = 2
num_samples = 4
data = Data(
x=x,
edge_index=edge_index,
)
model = GNN()
I, R = total_influence(
model,
data,
max_hops=max_hops,
num_samples=num_samples,
)
assert I.shape == (max_hops + 1, )
assert 0.0 <= R <= max_hops
I, R = total_influence(
model,
data,
max_hops=max_hops,
num_samples=num_samples,
average=False,
)
assert I.shape == torch.Size([num_samples, max_hops + 1])
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "test/utils/test_total_influence.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:torch_geometric/utils/influence.py | from typing import List, Tuple, Union, cast
import torch
from torch import Tensor
from torch.autograd.functional import jacobian
from tqdm.auto import tqdm
from torch_geometric.data import Data
from torch_geometric.utils import k_hop_subgraph
def k_hop_subsets_rough(
node_idx: int,
num_hops: int,
edge_index: Tensor,
num_nodes: int,
) -> List[Tensor]:
r"""Return *rough* (possibly overlapping) *k*-hop node subsets.
This is a thin wrapper around
:pyfunc:`torch_geometric.utils.k_hop_subgraph` that *additionally* returns
**all** intermediate hop subsets rather than the full union only.
Parameters
----------
node_idx: int
Index or indices of the central node(s).
num_hops: int
Number of hops *k*.
edge_index: Tensor
Edge index in COO format with shape :math:`[2, \text{num_edges}]`.
num_nodes: int
Total number of nodes in the graph. Required to allocate the masks.
Returns:
-------
List[Tensor]
A list ``[H₀, H₁, …, H_k]`` where ``H₀`` contains the seed node(s) and
``H_i`` (for *i*>0) contains **all** nodes that are exactly *i* hops
away in the *expanded* neighbourhood (i.e. overlaps are *not*
removed).
"""
col, row = edge_index
node_mask = row.new_empty(num_nodes, dtype=torch.bool)
edge_mask = row.new_empty(row.size(0), dtype=torch.bool)
node_idx_ = torch.tensor([node_idx], device=row.device)
subsets = [node_idx_]
for _ in range(num_hops):
node_mask.zero_()
node_mask[subsets[-1]] = True
torch.index_select(node_mask, 0, row, out=edge_mask)
subsets.append(col[edge_mask])
return subsets
def k_hop_subsets_exact(
node_idx: int,
num_hops: int,
edge_index: Tensor,
num_nodes: int,
device: Union[torch.device, str],
) -> List[Tensor]:
"""Return **disjoint** *k*-hop subsets.
This function refines :pyfunc:`k_hop_subsets_rough` by removing nodes that
have already appeared in previous hops, ensuring that each subset contains
nodes *exactly* *i* hops away from the seed.
"""
rough_subsets = k_hop_subsets_rough(node_idx, num_hops, edge_index,
num_nodes)
exact_subsets: List[List[int]] = [rough_subsets[0].tolist()]
visited: set[int] = set(exact_subsets[0])
for hop_subset in rough_subsets[1:]:
fresh = set(hop_subset.tolist()) - visited
visited |= fresh
exact_subsets.append(list(fresh))
return [
torch.tensor(s, device=device, dtype=edge_index.dtype)
for s in exact_subsets
]
def jacobian_l1(
model: torch.nn.Module,
data: Data,
max_hops: int,
node_idx: int,
device: Union[torch.device, str],
*,
vectorize: bool = True,
) -> Tensor:
"""Compute the **L1 norm** of the Jacobian for a given node.
The Jacobian is evaluated w.r.t. the node features of the *k*-hop induced
sub‑graph centred at ``node_idx``. The result is *folded back* onto the
**original** node index space so that the returned tensor has length
``data.num_nodes``, where the influence score will be zero for nodes
outside the *k*-hop subgraph.
Notes:
-----
* The function assumes that the model *and* ``data.x`` share the same
floating‑point precision (e.g. both ``float32`` or both ``float16``).
"""
# Build the induced *k*-hop sub‑graph (with node re‑labelling).
edge_index = cast(Tensor, data.edge_index)
x = cast(Tensor, data.x)
k_hop_nodes, sub_edge_index, mapping, _ = k_hop_subgraph(
node_idx, max_hops, edge_index, relabel_nodes=True)
# get the location of the *center* node inside the sub‑graph
root_pos = cast(int, mapping[0])
# Move tensors & model to the correct device
device = torch.device(device)
sub_x = x[k_hop_nodes].to(device)
sub_edge_index = sub_edge_index.to(device)
model = model.to(device)
# Jacobian evaluation
def _forward(x: Tensor) -> Tensor:
return model(x, sub_edge_index)[root_pos]
jac = jacobian(_forward, sub_x, vectorize=vectorize)
influence_sub = jac.abs().sum(dim=(0, 2)) # Sum of L1 norm
num_nodes = cast(int, data.num_nodes)
# Scatter the influence scores back to the *global* node space
influence_full = torch.zeros(num_nodes, dtype=influence_sub.dtype,
device=device)
influence_full[k_hop_nodes] = influence_sub
return influence_full
def jacobian_l1_agg_per_hop(
model: torch.nn.Module,
data: Data,
max_hops: int,
node_idx: int,
device: Union[torch.device, str],
vectorize: bool = True,
) -> Tensor:
"""Aggregate Jacobian L1 norms **per hop** for node_idx.
Returns a vector ``[I_0, I_1, …, I_k]`` where ``I_i`` is the *total*
influence exerted by nodes that are exactly *i* hops away from
``node_idx``.
"""
num_nodes = cast(int, data.num_nodes)
edge_index = cast(Tensor, data.edge_index)
influence = jacobian_l1(model, data, max_hops, node_idx, device,
vectorize=vectorize)
hop_subsets = k_hop_subsets_exact(node_idx, max_hops, edge_index,
num_nodes, influence.device)
single_node_influence_per_hop = [influence[s].sum() for s in hop_subsets]
return torch.tensor(single_node_influence_per_hop, device=influence.device)
def avg_total_influence(
influence_all_nodes: Tensor,
normalize: bool = True,
) -> Tensor:
"""Compute the *influence‑weighted receptive field* ``R``."""
avg_total_influences = torch.mean(influence_all_nodes, dim=0)
if normalize: # normalize by hop_0 (jacobian of the center node feature)
avg_total_influences = avg_total_influences / avg_total_influences[0]
return avg_total_influences
def influence_weighted_receptive_field(T: Tensor) -> float:
"""Compute the *influence‑weighted receptive field* ``R``.
Given an influence matrix ``T`` of shape ``[N, k+1]`` (i‑th row contains
the per‑hop influences of node *i*), the receptive field breadth *R* is
defined as the expected hop distance when weighting by influence.
A larger *R* indicates that, on average, influence comes from **farther**
hops.
"""
normalised = T / torch.sum(T, dim=1, keepdim=True)
hops = torch.arange(T.shape[1]).float() # 0 … k
breadth = normalised @ hops # shape (N,)
return breadth.mean().item()
def total_influence(
model: torch.nn.Module,
data: Data,
max_hops: int,
num_samples: Union[int, None] = None,
normalize: bool = True,
average: bool = True,
device: Union[torch.device, str] = "cpu",
vectorize: bool = True,
) -> Tuple[Tensor, float]:
r"""Compute Jacobian‑based influence aggregates for *multiple* seed nodes,
as introduced in the
`"Towards Quantifying Long-Range Interactions in Graph Machine Learning:
a Large Graph Dataset and a Measurement"
<https://arxiv.org/abs/2503.09008>`_ paper.
This measurement quantifies how a GNN model's output at a node is
influenced by features of other nodes at increasing hop distances.
Specifically, for every sampled node :math:`v`, this method
1. evaluates the **L1‑norm** of the Jacobian of the model output at
:math:`v` w.r.t. the node features of its *k*-hop induced sub‑graph;
2. sums these scores **per hop** to obtain the influence vector
:math:`(I_{0}, I_{1}, \dots, I_{k})`;
3. optionally averages those vectors over all sampled nodes and
optionally normalises them by :math:`I_{0}`.
Please refer to Section 4 of the paper for a more detailed definition.
Args:
model (torch.nn.Module): A PyTorch Geometric‑compatible model with
forward signature ``model(x, edge_index) -> Tensor``.
data (torch_geometric.data.Data): Graph data object providing at least
:obj:`x` (node features) and :obj:`edge_index` (connectivity).
max_hops (int): Maximum hop distance :math:`k`.
num_samples (int, optional): Number of random seed nodes to evaluate.
If :obj:`None`, all nodes are used. (default: :obj:`None`)
normalize (bool, optional): If :obj:`True`, normalize each hop‑wise
influence by the influence of hop 0. (default: :obj:`True`)
average (bool, optional): If :obj:`True`, return the hop‑wise **mean**
over all seed nodes (shape ``[k+1]``).
If :obj:`False`, return the full influence matrix of shape
``[N, k+1]``. (default: :obj:`True`)
device (torch.device or str, optional): Device on which to perform the
computation. (default: :obj:`"cpu"`)
vectorize (bool, optional): Forwarded to
:func:`torch.autograd.functional.jacobian`. Keeping this
:obj:`True` is often faster but increases memory usage.
(default: :obj:`True`)
Returns:
Tuple[Tensor, float]:
* **avg_influence** (*Tensor*):
shape ``[k+1]`` if :obj:`average=True`;
shape ``[N, k+1]`` otherwise.
* **R** (*float*): Influence‑weighted receptive‑field breadth
returned by :func:`influence_weighted_receptive_field`.
Example::
>>> avg_I, R = total_influence(model, data, max_hops=3,
... num_samples=1000)
>>> avg_I
tensor([1.0000, 0.1273, 0.0142, 0.0019])
>>> R
0.216
"""
num_samples = data.num_nodes if num_samples is None else num_samples
num_nodes = cast(int, data.num_nodes)
nodes = torch.randperm(num_nodes)[:num_samples].tolist()
influence_all_nodes: List[Tensor] = [
jacobian_l1_agg_per_hop(model, data, max_hops, n, device,
vectorize=vectorize)
for n in tqdm(nodes, desc="Influence")
]
allnodes = torch.vstack(influence_all_nodes).detach().cpu()
# Average total influence at each hop
if average:
avg_influence = avg_total_influence(allnodes, normalize=normalize)
else:
avg_influence = allnodes
# Influence‑weighted receptive field
R = influence_weighted_receptive_field(allnodes)
return avg_influence, R
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/utils/influence.py",
"license": "MIT License",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
pyg-team/pytorch_geometric:test/datasets/test_medshapenet.py | import torch
from torch_geometric.data import Data
from torch_geometric.datasets import MedShapeNet
from torch_geometric.testing import withPackage
@withPackage('MedShapeNet')
def test_medshapenet():
dataset = MedShapeNet(root="./data/MedShapeNet", size=1)
assert str(dataset) == f'MedShapeNet({len(dataset)})'
assert isinstance(dataset[0], Data)
assert dataset.num_classes == 8
assert isinstance(dataset[0].pos, torch.Tensor)
assert len(dataset[0].pos) > 0
assert isinstance(dataset[0].face, torch.Tensor)
assert len(dataset[0].face) == 3
assert isinstance(dataset[0].y, torch.Tensor)
assert len(dataset[0].y) == 1
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "test/datasets/test_medshapenet.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pyg-team/pytorch_geometric:torch_geometric/datasets/medshapenet.py | import os
import os.path as osp
from typing import Callable, List, Optional
import numpy as np
import torch
from torch_geometric.data import Data, InMemoryDataset
class MedShapeNet(InMemoryDataset):
r"""The MedShapeNet datasets from the `"MedShapeNet -- A Large-Scale
Dataset of 3D Medical Shapes for Computer Vision"
<https://arxiv.org/abs/2308.16139>`_ paper,
containing 8 different type of structures (classes).
.. note::
Data objects hold mesh faces instead of edge indices.
To convert the mesh to a graph, use the
:obj:`torch_geometric.transforms.FaceToEdge` as :obj:`pre_transform`.
To convert the mesh to a point cloud, use the
:obj:`torch_geometric.transforms.SamplePoints` as :obj:`transform` to
sample a fixed number of points on the mesh faces according to their
face area.
Args:
root (str): Root directory where the dataset should be saved.
size (int): Number of invividual 3D structures to download per
type (classes).
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
pre_filter (callable, optional): A function that takes in an
:obj:`torch_geometric.data.Data` object and returns a boolean
value, indicating whether the data object should be included in the
final dataset. (default: :obj:`None`)
force_reload (bool, optional): Whether to re-process the dataset.
(default: :obj:`False`)
"""
def __init__(
self,
root: str,
size: int = 100,
transform: Optional[Callable] = None,
pre_transform: Optional[Callable] = None,
pre_filter: Optional[Callable] = None,
force_reload: bool = False,
) -> None:
self.size = size
super().__init__(root, transform, pre_transform, pre_filter,
force_reload=force_reload)
path = self.processed_paths[0]
self.load(path)
@property
def raw_file_names(self) -> List[str]:
return [
'3DTeethSeg', 'CoronaryArteries', 'FLARE', 'KITS', 'PULMONARY',
'SurgicalInstruments', 'ThoracicAorta_Saitta', 'ToothFairy'
]
@property
def processed_file_names(self) -> List[str]:
return ['dataset.pt']
@property
def raw_paths(self) -> List[str]:
r"""The absolute filepaths that must be present in order to skip
downloading.
"""
return [osp.join(self.raw_dir, f) for f in self.raw_file_names]
def process(self) -> None:
import urllib3
from MedShapeNet import MedShapeNet as msn
msn_instance = msn(timeout=120)
urllib3.HTTPConnectionPool("medshapenet.ddns.net", maxsize=50)
list_of_datasets = msn_instance.datasets(False)
list_of_datasets = list(
filter(
lambda x: x not in [
'medshapenetcore/ASOCA', 'medshapenetcore/AVT',
'medshapenetcore/AutoImplantCraniotomy',
'medshapenetcore/FaceVR'
], list_of_datasets))
subset = []
for dataset in list_of_datasets:
parts = dataset.split("/")
self.newpath = self.root + '/' + parts[1 if len(parts) > 1 else 0]
if not os.path.exists(self.newpath):
os.makedirs(self.newpath)
stl_files = msn_instance.dataset_files(dataset, '.stl')
subset.extend(stl_files[:self.size])
for stl_file in stl_files[:self.size]:
msn_instance.download_stl_as_numpy(bucket_name=dataset,
stl_file=stl_file,
output_dir=self.newpath,
print_output=False)
class_mapping = {
'3DTeethSeg': 0,
'CoronaryArteries': 1,
'FLARE': 2,
'KITS': 3,
'PULMONARY': 4,
'SurgicalInstruments': 5,
'ThoracicAorta_Saitta': 6,
'ToothFairy': 7
}
for dataset, path in zip([subset], self.processed_paths):
data_list = []
for item in dataset:
class_name = item.split("/")[0]
item = item.split("stl")[0]
target = class_mapping[class_name]
file = osp.join(self.root, item + 'npz')
data = np.load(file)
pre_data_list = Data(
pos=torch.tensor(data["vertices"], dtype=torch.float),
face=torch.tensor(data["faces"],
dtype=torch.long).t().contiguous())
pre_data_list.y = torch.tensor([target], dtype=torch.long)
data_list.append(pre_data_list)
if self.pre_filter is not None:
data_list = [d for d in data_list if self.pre_filter(d)]
if self.pre_transform is not None:
data_list = [self.pre_transform(d) for d in data_list]
self.save(data_list, path)
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/datasets/medshapenet.py",
"license": "MIT License",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:examples/rdl.py | """This example demonstrates how to train a Relational Deep Learning model
using RelBench.
Please refer to:
1. https://arxiv.org/abs/2407.20060 for RelBench, and
2. https://github.com/snap-stanford/relbench for reproducing the results
reported on the RelBench paper.
"""
import argparse
import math
import operator
import os
from typing import Any, Dict, List, NamedTuple, Optional, Tuple
import numpy as np
import pandas as pd
import torch
import torch_frame
from relbench.base import EntityTask, Table, TaskType
from relbench.datasets import get_dataset, get_dataset_names
from relbench.modeling.graph import make_pkey_fkey_graph
from relbench.modeling.utils import get_stype_proposal
from relbench.tasks import get_task, get_task_names
from sentence_transformers import SentenceTransformer
from torch import Tensor
from torch_frame.config.text_embedder import TextEmbedderConfig
from torch_frame.data.stats import StatType
from torch_frame.nn.models import ResNet
from tqdm import tqdm
from torch_geometric.data import HeteroData
from torch_geometric.loader import NeighborLoader
from torch_geometric.nn import (
MLP,
HeteroConv,
LayerNorm,
PositionalEncoding,
SAGEConv,
)
from torch_geometric.seed import seed_everything
from torch_geometric.typing import EdgeType, NodeType
class GloveTextEmbedding:
"""GloveTextEmbedding based on SentenceTransformer."""
def __init__(self, device: Optional[torch.device] = None) -> None:
self.model = SentenceTransformer(
"sentence-transformers/average_word_embeddings_glove.6B.300d",
device=device,
)
def __call__(self, sentences: List[str]) -> Tensor:
return torch.from_numpy(self.model.encode(sentences))
class HeteroEncoder(torch.nn.Module):
r"""HeteroEncoder based on PyTorch Frame implemented with ResNet.
A heterogeneous encoder that processes different node types using PyTorch
Frame models. For each node type, it creates a separate encoder model
that processes the node features according to their data types
(categorical, numerical, etc).
Args:
channels: The output channels for each node type.
num_layers: The number of layers for the ResNet.
col_names_dict: A dictionary mapping from node type to column names
dictionary compatible with PyTorch Frame.
stats_dict: A dictionary containing statistics for each column in each
node type. Used for feature normalization and encoding.
"""
def __init__(
self,
channels: int,
num_layers: int,
col_names_dict: Dict[NodeType, Dict[torch_frame.stype, List[str]]],
stats_dict: Dict[NodeType, Dict[str, Dict[StatType, Any]]],
) -> None:
super().__init__()
self.encoders = torch.nn.ModuleDict()
for node_type in col_names_dict.keys():
stype_encoder_dict = {
torch_frame.categorical:
torch_frame.nn.EmbeddingEncoder(),
torch_frame.numerical:
torch_frame.nn.LinearEncoder(),
torch_frame.multicategorical:
torch_frame.nn.MultiCategoricalEmbeddingEncoder(),
torch_frame.embedding:
torch_frame.nn.LinearEmbeddingEncoder(),
torch_frame.timestamp:
torch_frame.nn.TimestampEncoder()
}
torch_frame_model = ResNet(
channels=channels,
num_layers=num_layers,
out_channels=channels,
col_stats=stats_dict[node_type],
col_names_dict=col_names_dict[node_type],
stype_encoder_dict=stype_encoder_dict,
)
self.encoders[node_type] = torch_frame_model
def reset_parameters(self) -> None:
"""Reset the parameters of all encoder models."""
for encoder in self.encoders.values():
encoder.reset_parameters()
def forward(
self,
tf_dict: Dict[NodeType, torch_frame.TensorFrame],
) -> Dict[NodeType, Tensor]:
"""Forward pass of the heterogeneous encoder.
Args:
tf_dict: A dictionary mapping node types to their corresponding
TensorFrame objects containing the node features.
Returns:
Dictionary mapping node types to their encoded representations.
Each tensor has shape ``[num_nodes, channels]``.
"""
return {
node_type: self.encoders[node_type](tf)
for node_type, tf in tf_dict.items()
}
class HeteroTemporalEncoder(torch.nn.Module):
"""HeteroTemporalEncoder class that uses PositionalEncoding to encode
temporal information for heterogeneous graphs.
This encoder computes relative time embeddings between a seed time and
node timestamps, converting the time differences from seconds to days.
It applies positional encoding followed by a linear transformation for
each node type.
Args:
node_types: List of node types in the heterogeneous graph
channels: Number of channels/dimensions for the encoded embeddings
Example:
>>> encoder = HeteroTemporalEncoder(['user', 'item'], channels=64)
>>> seed_time = torch.tensor([1000]) # Reference timestamp
>>> time_dict = {'user': torch.tensor([800, 900]),
>>> 'item': torch.tensor([700, 850])}
>>> batch_dict = {'user': torch.tensor([0, 0]),
>>> 'item': torch.tensor([0, 0])}
>>> out_dict = encoder(seed_time, time_dict, batch_dict)
>>> out_dict['user'].shape
torch.Size([2, 64])
"""
def __init__(self, node_types: List[NodeType], channels: int) -> None:
super().__init__()
self.encoder_dict = torch.nn.ModuleDict({
node_type:
PositionalEncoding(channels)
for node_type in node_types
})
self.lin_dict = torch.nn.ModuleDict({
node_type:
torch.nn.Linear(channels, channels)
for node_type in node_types
})
def reset_parameters(self) -> None:
"""Reset the parameters of all encoders and linear layers."""
for encoder in self.encoder_dict.values():
encoder.reset_parameters()
for lin in self.lin_dict.values():
lin.reset_parameters()
def forward(
self,
seed_time: Tensor,
time_dict: Dict[NodeType, Tensor],
batch_dict: Dict[NodeType, Tensor],
) -> Dict[NodeType, Tensor]:
"""Forward pass of the temporal encoder.
Args:
seed_time: Reference timestamps for computing relative times
time_dict: Dictionary mapping node types to their timestamps
batch_dict: Dictionary mapping node types to batch assignments
Returns:
Dictionary mapping node types to their temporal embeddings
"""
out_dict: Dict[NodeType, Tensor] = {}
for node_type, time in time_dict.items():
rel_time = seed_time[batch_dict[node_type]] - time
rel_time = rel_time / (60 * 60 * 24) # Convert seconds to days.
x = self.encoder_dict[node_type](rel_time)
x = self.lin_dict[node_type](x)
out_dict[node_type] = x
return out_dict
class HeteroGraphSAGE(torch.nn.Module):
"""Heterogeneous GraphSAGE model with layer normalization.
This model implements a heterogeneous version of GraphSAGE
that operates on multiple node and edge types. Each layer
consists of a heterogeneous graph convolution followed by
layer normalization and ReLU activation.
Args:
node_types: List of node types in the graph
edge_types: List of edge types in the graph
channels: Number of channels/features
aggr: Node aggregation scheme.
num_layers: Number of graph convolution layers.
Example:
>>> model = HeteroGraphSAGE(
>>> node_types=['user', 'item'],
>>> edge_types=[('user', 'rates', 'item')],
>>> channels=64)
>>> out_dict = model(x_dict, edge_index_dict)
"""
def __init__(
self,
node_types: List[NodeType],
edge_types: List[EdgeType],
channels: int,
aggr: str = "mean",
num_layers: int = 2,
) -> None:
super().__init__()
self.convs = torch.nn.ModuleList()
for _ in range(num_layers):
conv = HeteroConv(
{
edge_type: SAGEConv(
(channels, channels), channels, aggr=aggr)
for edge_type in edge_types
},
aggr="sum",
)
self.convs.append(conv)
self.norms = torch.nn.ModuleList()
for _ in range(num_layers):
norm_dict = torch.nn.ModuleDict()
for node_type in node_types:
norm_dict[node_type] = LayerNorm(channels, mode="node")
self.norms.append(norm_dict)
def reset_parameters(self) -> None:
"""Reset the parameters of all convolution and normalization layers."""
for conv in self.convs:
conv.reset_parameters()
for norm_dict in self.norms:
for norm in norm_dict.values():
norm.reset_parameters()
def forward(
self,
x_dict: Dict[NodeType, Tensor],
edge_index_dict: Dict[NodeType, Tensor],
) -> Dict[NodeType, Tensor]:
"""Forward pass of the heterogeneous GraphSAGE model.
Args:
x_dict: Node feature dictionary
edge_index_dict: Edge index dictionary
Returns:
Updated node features after message passing
"""
for _, (conv, norm_dict) in enumerate(zip(self.convs, self.norms)):
x_dict = conv(x_dict, edge_index_dict)
x_dict = {key: norm_dict[key](x) for key, x in x_dict.items()}
x_dict = {key: x.relu() for key, x in x_dict.items()}
return x_dict
class Model(torch.nn.Module):
"""A heterogeneous graph neural network model for temporal graph learning.
This model consists of:
1. A heterogeneous feature encoder for node attributes
2. A temporal encoder for handling time information
3. A heterogeneous GraphSAGE model for message passing
4. An MLP head for final predictions
Args:
node_types: List of node types in the graph
edge_types: List of edge types in the graph
col_names_dict: Dictionary mapping node types to their column names and
types
temporal_node_types: List of node types with temporal features
col_stats_dict: Statistics of node features
num_layers: Number of GNN layers
channels: Hidden dimension size
out_channels: Output dimension size
aggr: Aggregation method for GNN
norm: Normalization method for MLP
"""
def __init__(
self,
node_types: List[NodeType],
edge_types: List[EdgeType],
col_names_dict: Dict[NodeType, Dict[torch_frame.stype, List[str]]],
temporal_node_types: List[NodeType],
col_stats_dict: Dict[NodeType, Dict[str, Dict[StatType, Any]]],
num_layers: int,
channels: int,
out_channels: int,
aggr: str,
norm: str,
) -> None:
super().__init__()
self.encoder = HeteroEncoder(
channels=channels,
num_layers=num_layers,
col_names_dict=col_names_dict,
stats_dict=col_stats_dict,
)
self.temporal_encoder = HeteroTemporalEncoder(
node_types=temporal_node_types,
channels=channels,
)
self.gnn = HeteroGraphSAGE(
node_types=node_types,
edge_types=edge_types,
channels=channels,
aggr=aggr,
num_layers=num_layers,
)
self.head = MLP(
channels,
out_channels=out_channels,
norm=norm,
num_layers=1,
)
self.reset_parameters()
def reset_parameters(self) -> None:
"""Reset the parameters of all model components."""
self.encoder.reset_parameters()
self.temporal_encoder.reset_parameters()
self.gnn.reset_parameters()
self.head.reset_parameters()
def forward(
self,
batch: HeteroData,
entity_table: NodeType,
) -> Tensor:
"""Forward pass of the model.
Steps:
1. Get seed time from entity table
2. Encode node features using HeteroEncoder
3. Encode temporal features using HeteroTemporalEncoder
4. Add temporal embeddings to node features
5. Apply graph neural network (HeteroGraphSAGE)
6. Apply final MLP head to target node embeddings
Args:
batch: Batch of heterogeneous graph data
entity_table: The target node type for prediction
Returns:
Tensor: Predictions for nodes in the entity table
"""
seed_time = batch[entity_table].seed_time
x_dict = self.encoder(batch.tf_dict)
rel_time_dict = self.temporal_encoder(
seed_time,
batch.time_dict,
batch.batch_dict,
)
for node_type, rel_time in rel_time_dict.items():
x_dict[node_type] = x_dict[node_type] + rel_time
x_dict = self.gnn(x_dict, batch.edge_index_dict)
return self.head(x_dict[entity_table][:seed_time.size(0)])
class AttachTargetTransform:
r"""Attach the target label to the heterogeneous mini-batch.
The batch consists of disjoins subgraphs loaded via temporal sampling. The
same input node can occur multiple times with different timestamps, and
thus different subgraphs and labels. Hence labels cannot be stored in the
graph object directly, and must be attached to the batch after the batch is
created.
"""
def __init__(self, entity: str, target: Tensor) -> None:
self.entity = entity
self.target = target
def __call__(self, batch: HeteroData) -> HeteroData:
batch[self.entity].y = self.target[batch[self.entity].input_id]
return batch
class TrainingTableInput(NamedTuple):
r"""Training table input for node prediction tasks.
A container for organizing input data needed for node-level predictions.
Attributes:
nodes: Tuple of (node_type, indices_tensor) containing the node type
identifier and Tensor of node IDs to predict on.
time: Optional Tensor of timestamps for temporal sampling. Shape
matches node indices. None if task is not temporal.
target: Optional Tensor of ground truth labels/values. Shape matches
node indices. None during inference.
transform: Optional transform that attaches target labels to batches
during training. Needed for temporal sampling where nodes can
appear multiple times with different labels.
"""
nodes: Tuple[NodeType, Tensor]
time: Optional[Tensor]
target: Optional[Tensor]
transform: Optional[AttachTargetTransform]
def get_task_type_params(
task: EntityTask) -> Tuple[int, torch.nn.Module, str, bool]:
r"""Get task-specific optimization parameters based on task type.
Args:
task: Task specification containing task type.
Returns:
Tuple containing:
- out_channels: Number of output channels
- loss_fn: Loss function
- tune_metric: Metric to optimize
- higher_is_better: Whether higher metric values are better
"""
if task.task_type == TaskType.REGRESSION:
out_channels = 1
loss_fn = torch.nn.L1Loss()
tune_metric = "mae"
higher_is_better = False
elif task.task_type == TaskType.BINARY_CLASSIFICATION:
out_channels = 1
loss_fn = torch.nn.BCEWithLogitsLoss()
tune_metric = "roc_auc"
higher_is_better = True
else:
raise ValueError(f"Unsupported task type: {task.task_type}")
return out_channels, loss_fn, tune_metric, higher_is_better
def to_unix_time(ser: pd.Series) -> np.ndarray:
r"""Convert a pandas Timestamp series to UNIX timestamp in seconds.
Args:
ser: Input pandas Series containing datetime values.
Returns:
Array of UNIX timestamps in seconds.
"""
assert ser.dtype in [np.dtype("datetime64[s]"), np.dtype("datetime64[ns]")]
unix_time = ser.astype("int64").values
if ser.dtype == np.dtype("datetime64[ns]"):
unix_time //= 10**9
return unix_time
def get_train_table_input(
split_table: Table,
task: EntityTask,
) -> TrainingTableInput:
r"""Get the training table input for node prediction.
Processes a table split and task to create a TrainingTableInput
object containing:
1. Node indices for the target entity type
2. Optional timestamps for temporal sampling
3. Optional target labels/values for training
4. Optional transform to attach labels during batch loading
Args:
split_table: Table containing node IDs, optional timestamps, and
optional target values to predict.
task: Task specification containing entity table name, entity column
name, target column name, etc.
Returns:
Container with processed node indices, timestamps, target values and
transform needed for training/inference.
"""
nodes = torch.from_numpy(
split_table.df[task.entity_col].astype(int).values)
time: Optional[Tensor] = None
if split_table.time_col is not None:
time = torch.from_numpy(
to_unix_time(split_table.df[split_table.time_col]))
target: Optional[Tensor] = None
transform: Optional[AttachTargetTransform] = None
if task.target_col in split_table.df:
target = torch.from_numpy(
split_table.df[task.target_col].values.astype(float))
transform = AttachTargetTransform(task.entity_table, target)
return TrainingTableInput(
nodes=(task.entity_table, nodes),
time=time,
target=target,
transform=transform,
)
def train(
model: Model,
train_loader: NeighborLoader,
task: EntityTask,
optimizer: torch.optim.Optimizer,
loss_fn: torch.nn.Module,
device: torch.device,
) -> float:
model.train()
loss_accum = torch.zeros(1, device=device).squeeze_()
count_accum = 0
for batch in tqdm(train_loader):
batch = batch.to(device)
optimizer.zero_grad()
pred = model(batch, task.entity_table)
pred = pred.view(-1) if pred.size(1) == 1 else pred
# Get the target column name from the task
loss = loss_fn(pred, batch[task.entity_table].y.float())
loss.backward()
optimizer.step()
loss *= pred.size(0)
loss_accum += loss
count_accum += pred.size(0)
return loss_accum.item() / count_accum
@torch.no_grad()
def test(
test_loader: NeighborLoader,
model: Model,
task: EntityTask,
device: torch.device,
) -> np.ndarray:
model.eval()
pred_list = []
for batch in tqdm(test_loader):
batch = batch.to(device)
pred = model(batch, task.entity_table)
pred = pred.view(-1) if pred.size(1) == 1 else pred
pred_list.append(pred.detach().cpu())
return torch.cat(pred_list, dim=0).numpy()
def main():
seed_everything(42)
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--dataset", type=str, default="rel-f1",
choices=get_dataset_names())
parser.add_argument(
"--task", type=str, default=None,
help="See available tasks at https://relbench.stanford.edu/")
parser.add_argument("--batch_size", type=int, default=512)
parser.add_argument("--temporal_strategy", type=str, default="uniform",
choices=["uniform", "last"])
parser.add_argument("--num_neighbors", type=list, default=[128, 128])
parser.add_argument("--channels", type=int, default=128)
parser.add_argument("--aggr", type=str, default="sum")
parser.add_argument("--norm", type=str, default="batch_norm")
parser.add_argument("--epochs", type=int, default=10)
parser.add_argument("--lr", type=float, default=0.005)
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device:", device)
print("Loading dataset and task...")
assert args.task in get_task_names(args.dataset), (
f"Invalid --task '{args.task}' for --dataset '{args.dataset}'. "
f"Available tasks: {get_task_names(args.dataset)}")
dataset = get_dataset(name=args.dataset, download=True)
task = get_task(
dataset_name=args.dataset,
task_name=args.task,
download=True,
)
print(f"Task type: {task.task_type}")
print(f"Target column: '{task.target_col}'")
print(f"Entity table: '{task.entity_table}'")
print("Getting column to stype dictionary...")
db = dataset.get_db()
col_to_stype_dict = get_stype_proposal(db)
print("Column to stype dictionary: ", col_to_stype_dict)
print("Defining text embedder...")
text_embedder_cfg = TextEmbedderConfig(
text_embedder=GloveTextEmbedding(device=device),
batch_size=256,
)
# Transform the dataset into a HeteroData object with torch_frame features
# See also:
# https://github.com/snap-stanford/relbench/blob/v1.1.0/relbench/modeling/graph.py#L20-L111 # noqa: E501
print("Transforming dataset into HeteroData object...")
data, col_stats_dict = make_pkey_fkey_graph(
db,
col_to_stype_dict=col_to_stype_dict, # specified column types
text_embedder_cfg=text_embedder_cfg, # our chosen text encoder
cache_dir=os.path.join( # store materialized graph for convenience
"./data",
f"{args.dataset}_{args.task}_materialized_cache",
),
)
print("Preparing data loaders...")
loader_dict = {}
num_neighbors_dict = {
edge_type: args.num_neighbors
for edge_type in data.edge_types
}
for split in ["train", "val", "test"]:
table = task.get_table(split)
print(f"Creating '{split}' dataloader with columns: "
f"{list(table.df.columns)}")
table_input = get_train_table_input(split_table=table, task=task)
loader_dict[split] = NeighborLoader(
data=data,
num_neighbors=num_neighbors_dict,
input_nodes=table_input.nodes,
input_time=table_input.time,
time_attr="time",
transform=table_input.transform,
batch_size=args.batch_size,
temporal_strategy=args.temporal_strategy,
shuffle=split == "train",
num_workers=4,
persistent_workers=True,
)
print("Getting task-specific parameters...")
out_channels, loss_fn, tune_metric, higher_is_better = \
get_task_type_params(task)
print("out_channels: ", out_channels)
print("loss_fn: ", loss_fn)
print("tune_metric: ", tune_metric)
print("higher_is_better: ", higher_is_better)
print("Initializing the model...")
col_names_dict = {
node_type: data[node_type].tf.col_names_dict
for node_type in data.node_types
}
temporal_node_types = [
node_type for node_type in data.node_types if "time" in data[node_type]
]
model = Model(
node_types=data.node_types, # Include all node types
edge_types=data.edge_types, # Include all edge types
col_names_dict=col_names_dict,
col_stats_dict=col_stats_dict,
temporal_node_types=temporal_node_types,
num_layers=len(args.num_neighbors),
channels=args.channels,
out_channels=out_channels,
aggr=args.aggr,
norm=args.norm,
).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
print("Training the model...")
best_val_metric = -math.inf if higher_is_better else math.inf
for epoch in range(1, args.epochs + 1):
train_loss = train(
model=model,
train_loader=loader_dict["train"],
task=task,
optimizer=optimizer,
loss_fn=loss_fn,
device=device,
)
val_pred = test(
test_loader=loader_dict["val"],
model=model,
task=task,
device=device,
)
val_metrics = task.evaluate(val_pred, task.get_table("val"))
print(
f"Epoch: {epoch:02d}, "
f"train_loss: {train_loss:.4f}, "
f"{', '.join([f'val_{k}: {v:.4f}' for k, v in val_metrics.items()])}" # noqa: E501
)
is_better_op = operator.gt if higher_is_better else operator.lt
if is_better_op(val_metrics[tune_metric], best_val_metric):
best_val_metric = val_metrics[tune_metric]
torch.save(model.state_dict(), "best_model.pt")
print("Testing the best model...")
model.load_state_dict(torch.load("best_model.pt"))
test_pred = test(
test_loader=loader_dict["test"],
model=model,
task=task,
device=device,
)
test_metrics = task.evaluate(test_pred)
print(
f"{', '.join([f'test_{k}: {v:.4f}' for k, v in test_metrics.items()])}"
)
if __name__ == "__main__":
main()
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "examples/rdl.py",
"license": "MIT License",
"lines": 631,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pyg-team/pytorch_geometric:torch_geometric/datasets/city.py | import os.path as osp
from typing import Callable, Optional
from torch_geometric.data import (
Data,
InMemoryDataset,
download_url,
extract_tar,
)
from torch_geometric.io import fs
class CityNetwork(InMemoryDataset):
r"""The City-Networks are introduced in
`"Towards Quantifying Long-Range Interactions in Graph Machine Learning:
a Large Graph Dataset and a Measurement"
<https://arxiv.org/abs/2503.09008>`_ paper.
The dataset contains four city networks: `paris`, `shanghai`, `la`,
and `london`, where nodes represent junctions and edges represent
undirected road segments. The task is to predict each node's eccentricity
score, which is approximated based on its 16-hop neighborhood and naturally
requires long-range information. The score indicates how accessible one
node is in the network, and is mapped to 10 quantiles for transductive
classification. See the original
`source code <https://github.com/LeonResearch/City-Networks>`_ for more
details on the individual networks.
Args:
root (str): Root directory where the dataset should be saved.
name (str): The name of the dataset (``"paris"``, ``"shanghai"``,
``"la"``, ``"london"``).
augmented (bool, optional): Whether to use the augmented node features
from edge features.(default: :obj:`True`)
transform (callable, optional): A function/transform that takes in an
:class:`~torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
every access. (default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :class:`~torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
force_reload (bool, optional): Whether to re-process the dataset.
(default: :obj:`False`)
**STATS:**
.. list-table::
:widths: 10 10 10 10 10
:header-rows: 1
* - Name
- #nodes
- #edges
- #features
- #classes
* - paris
- 114,127
- 182,511
- 37
- 10
* - shanghai
- 183,917
- 262,092
- 37
- 10
* - la
- 240,587
- 341,523
- 37
- 10
* - london
- 568,795
- 756,502
- 37
- 10
"""
url = "https://github.com/LeonResearch/City-Networks/raw/refs/heads/main/data/" # noqa: E501
def __init__(
self,
root: str,
name: str,
augmented: bool = True,
transform: Optional[Callable] = None,
pre_transform: Optional[Callable] = None,
force_reload: bool = False,
delete_raw: bool = False,
) -> None:
self.name = name.lower()
assert self.name in ["paris", "shanghai", "la", "london"]
self.augmented = augmented
self.delete_raw = delete_raw
super().__init__(
root,
transform,
pre_transform,
force_reload=force_reload,
)
self.load(self.processed_paths[0])
@property
def raw_dir(self) -> str:
return osp.join(self.root, self.name, "raw")
@property
def processed_dir(self) -> str:
return osp.join(self.root, self.name, "processed")
@property
def raw_file_names(self) -> str:
return f"{self.name}.json"
@property
def processed_file_names(self) -> str:
return "data.pt"
def download(self) -> None:
self.download_path = download_url(
self.url + f"{self.name}.tar.gz",
self.raw_dir,
)
def process(self) -> None:
extract_tar(self.download_path, self.raw_dir)
data_path = osp.join(self.raw_dir, self.name)
node_feat = fs.torch_load(
osp.join(
data_path,
f"node_features{'_augmented' if self.augmented else ''}.pt",
))
edge_index = fs.torch_load(osp.join(data_path, "edge_indices.pt"))
label = fs.torch_load(
osp.join(data_path, "10-chunk_16-hop_node_labels.pt"))
train_mask = fs.torch_load(osp.join(data_path, "train_mask.pt"))
val_mask = fs.torch_load(osp.join(data_path, "valid_mask.pt"))
test_mask = fs.torch_load(osp.join(data_path, "test_mask.pt"))
data = Data(
x=node_feat,
edge_index=edge_index,
y=label,
train_mask=train_mask,
val_mask=val_mask,
test_mask=test_mask,
)
if self.pre_transform is not None:
data = self.pre_transform(data)
self.save([data], self.processed_paths[0])
if self.delete_raw:
fs.rm(data_path)
def __repr__(self) -> str:
return (f"{self.__class__.__name__}("
f"root='{self.root}', "
f"name='{self.name}', "
f"augmented={self.augmented})")
| {
"repo_id": "pyg-team/pytorch_geometric",
"file_path": "torch_geometric/datasets/city.py",
"license": "MIT License",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
pypa/pipenv:tests/unit/test_install_error_context.py | import queue
import pytest
def test_install_error_formats_list():
"""
If `InstallError` receives a list/tuple of deps, it should formats it into readable
comma-separated string instead of dumping Python repr.
"""
from pipenv.exceptions import InstallError
exc = InstallError(["requests==2.32.0", "flask==3.0.0"])
text = str(exc)
assert "requests==2.32.0" in text
assert "flask==3.0.0" in text
assert "Couldn't install package" in text
def test_cleanup_procs_raises_install_error_with_deps():
"""
_cleanup_procs() should include the deps context from subprocess result
in the raised `InstallError` message when the pip subprocess fails.
"""
from pipenv.exceptions import InstallError
from pipenv.routines.install import _cleanup_procs
class _Settings:
def is_verbose(self):
return False
def is_quiet(self):
return False
class _Project:
s = _Settings()
class _DummyProc:
def __init__(self):
self.returncode = 1
self.stdout = "pip stdout error details"
self.stderr = "pip stderr"
self.deps = ["requests==0.0.0", "flask==3.0.0"]
def communicate(self):
return (self.stdout, self.stderr)
procs = queue.Queue(maxsize=1)
procs.put(_DummyProc())
with pytest.raises(InstallError) as ctx:
_cleanup_procs(_Project(), procs)
text = str(ctx.value)
assert "requests==0.0.0" in text
assert "flask==3.0.0" in text
def test_pip_install_deps_attaches_deps_to_subprocess(monkeypatch, tmp_path):
"""
pip_install_deps() should attach deps to the returned subproccess result.
so error handling can display it.
"""
from pipenv.utils import pip as pip_utils
class _Settings:
PIPENV_CACHE_DIR = str(tmp_path)
PIP_EXISTS_ACTION = None
def is_verbose(self):
return False
class _Project:
s = _Settings()
settings = {}
virtualenv_src_location = str(tmp_path / "src")
# Patch helpers used to build the pip command so this stays a pure unit test
monkeypatch.setattr(pip_utils, "project_python", lambda project, system=False: "python")
monkeypatch.setattr(pip_utils, "get_runnable_pip", lambda: "pip")
monkeypatch.setattr(pip_utils, "get_pip_args", lambda *a, **k: [])
monkeypatch.setattr(pip_utils, "prepare_pip_source_args", lambda sources: [])
monkeypatch.setattr(pip_utils, "normalize_path", lambda p: p)
# Capture subprocess_run call and return a dummy proc object
class _DummyProc:
def __init__(self):
self.returncode = 0
self.stdout = ""
self.stderr = ""
def communicate(self):
return (self.stdout, self.stderr)
def _fake_subprocess_run(*args, **kwargs):
return _DummyProc()
monkeypatch.setattr(pip_utils, "subprocess_run", _fake_subprocess_run)
deps = ["requests==2.32.0"]
cmds = pip_utils.pip_install_deps(
project=_Project(),
deps=deps,
sources=[],
allow_global=False,
ignore_hashes=True,
no_deps=True,
requirements_dir=str(tmp_path),
use_pep517=True,
extra_pip_args=None,
)
assert len(cmds) >= 1
c = cmds[0]
assert hasattr(c, "deps")
assert c.deps == deps
| {
"repo_id": "pypa/pipenv",
"file_path": "tests/unit/test_install_error_context.py",
"license": "MIT License",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pypa/pipenv:pipenv/routines/audit.py | """
Audit command implementation using pip-audit for vulnerability scanning.
This module provides the preferred way to audit Python packages for known
security vulnerabilities using pip-audit, which queries the Python Packaging
Advisory Database (PyPI) or Open Source Vulnerabilities (OSV) database.
"""
import logging
import subprocess
import sys
from pipenv.utils import console, err
from pipenv.utils.processes import run_command
from pipenv.utils.project import ensure_project
from pipenv.utils.shell import project_python
def is_pip_audit_installed(project=None, system=False):
"""Check if pip-audit is installed by trying to run it."""
if project:
python = project_python(project, system=system)
else:
python = sys.executable
try:
result = subprocess.run(
[python, "-m", "pip_audit", "--version"],
capture_output=True,
text=True,
check=False,
)
return result.returncode == 0
except Exception:
return False
def install_pip_audit(project, system=False):
"""Install pip-audit."""
from pipenv.vendor import click
python = project_python(project, system=system)
console.print(
"[yellow bold]pip-audit is required for vulnerability scanning but not installed.[/yellow bold]"
)
install = click.confirm(
"Would you like to install pip-audit? This will not modify your Pipfile/lockfile.",
default=True,
)
if not install:
console.print(
"[yellow]Vulnerability scanning skipped. Install pip-audit with 'pip install pip-audit'[/yellow]"
)
return False
console.print("[green]Installing pip-audit...[/green]")
cmd = [python, "-m", "pip", "install", "pip-audit>=2.7.0", "--quiet"]
c = run_command(cmd)
if c.returncode != 0:
err.print(
"[red]Failed to install pip-audit. Please install it manually with 'pip install pip-audit'[/red]"
)
return False
console.print("[green]pip-audit installed successfully![/green]")
return True
def build_audit_options(
output="columns",
strict=False,
ignore=None,
fix=False,
dry_run=False,
skip_editable=False,
no_deps=False,
local_only=False,
vulnerability_service="pypi",
descriptions=False,
aliases=False,
output_file=None,
requirements_file=None,
use_lockfile=False,
):
"""Build command line options for pip-audit."""
options = []
# Output format
if output and output != "columns":
options.extend(["-f", output])
# Vulnerability service
if vulnerability_service and vulnerability_service != "pypi":
options.extend(["-s", vulnerability_service])
# Flags
if strict:
options.append("--strict")
if fix:
options.append("--fix")
if dry_run:
options.append("--dry-run")
if skip_editable:
options.append("--skip-editable")
if no_deps:
options.append("--no-deps")
if local_only:
options.append("--local")
if descriptions:
options.append("--desc")
if aliases:
options.append("--aliases")
# Use lockfile (pyproject.toml / pylock.toml)
if use_lockfile:
options.append("--locked")
# Requirements file
if requirements_file:
options.extend(["-r", requirements_file])
# Output file
if output_file:
options.extend(["-o", output_file])
# Ignore specific vulnerabilities
if ignore:
for vuln_id in ignore:
options.extend(["--ignore-vuln", vuln_id])
return options
def do_audit( # noqa: PLR0913
project,
python=False,
system=False,
output="columns",
quiet=False,
verbose=False,
strict=False,
ignore=None,
fix=False,
dry_run=False,
skip_editable=False,
no_deps=False,
local_only=False,
vulnerability_service="pypi",
descriptions=False,
aliases=False,
output_file=None,
pypi_mirror=None,
categories="",
use_installed=False,
use_lockfile=False,
):
"""Audit packages for known security vulnerabilities using pip-audit.
This is the preferred method for vulnerability scanning in pipenv.
It uses the Python Packaging Advisory Database (PyPI) or OSV database.
Supports auditing from:
- The current virtualenv (default)
- pyproject.toml / pylock.toml files (with --locked flag)
"""
if not verbose:
logging.getLogger("pipenv").setLevel(logging.ERROR if quiet else logging.WARN)
if not system:
ensure_project(
project,
python=python,
validate=False,
warn=False,
pypi_mirror=pypi_mirror,
)
if not quiet and not project.s.is_quiet():
if use_lockfile:
console.print(
"[bold]Auditing lockfile packages for vulnerabilities...[/bold]"
)
else:
console.print("[bold]Auditing packages for vulnerabilities...[/bold]")
# Check if pip-audit is installed
if not is_pip_audit_installed(project, system=system):
if not install_pip_audit(project, system=system):
console.print("[yellow]Vulnerability audit aborted.[/yellow]")
return
# Check again after installation
if not is_pip_audit_installed(project, system=system):
err.print(
"[red]pip-audit installation was reported successful but module not found. "
"Please try again or install manually with 'pip install pip-audit'[/red]"
)
return
# Build options for pip-audit
options = build_audit_options(
output=output,
strict=strict,
ignore=ignore,
fix=fix,
dry_run=dry_run,
skip_editable=skip_editable,
no_deps=no_deps,
local_only=local_only,
vulnerability_service=vulnerability_service,
descriptions=descriptions,
aliases=aliases,
output_file=output_file,
use_lockfile=use_lockfile,
)
# Build the command
python_path = project_python(project, system=system)
cmd = [python_path, "-m", "pip_audit"] + options
# If using lockfile mode, add the project directory path
if use_lockfile:
cmd.append(project.project_directory)
if not quiet and not project.s.is_quiet() and verbose:
console.print(f"[dim]Running: {' '.join(cmd)}[/dim]")
# Run pip-audit
try:
result = subprocess.run(
cmd,
capture_output=False, # Let output go directly to terminal
check=False,
)
sys.exit(result.returncode)
except Exception as e:
err.print(f"[red]Error running pip-audit: {str(e)}[/red]")
sys.exit(1)
| {
"repo_id": "pypa/pipenv",
"file_path": "pipenv/routines/audit.py",
"license": "MIT License",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pypa/pipenv:tests/integration/test_uninstall_dev.py | import pytest
@pytest.mark.install
@pytest.mark.uninstall
def test_uninstall_dev_flag(pipenv_instance_private_pypi):
"""Ensure that running `pipenv uninstall --dev` properly removes packages from dev-packages"""
with pipenv_instance_private_pypi() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
six = "*"
[dev-packages]
pytest = "*"
""".strip()
f.write(contents)
# Install both packages
c = p.pipenv("install --dev")
assert c.returncode == 0
assert "six" in p.pipfile["packages"]
assert "pytest" in p.pipfile["dev-packages"]
assert "six" in p.lockfile["default"]
assert "pytest" in p.lockfile["develop"]
# Verify both packages are installed
c = p.pipenv('run python -c "import six, pytest"')
assert c.returncode == 0
# Uninstall pytest with --dev flag
c = p.pipenv("uninstall pytest --dev")
assert c.returncode == 0
# Verify pytest was removed from dev-packages
assert "six" in p.pipfile["packages"]
assert "pytest" not in p.pipfile["dev-packages"]
assert "six" in p.lockfile["default"]
assert "pytest" not in p.lockfile["develop"]
# Verify pytest is no longer importable
c = p.pipenv('run python -c "import pytest"')
assert c.returncode != 0
# Verify six is still importable
c = p.pipenv('run python -c "import six"')
assert c.returncode == 0
@pytest.mark.install
@pytest.mark.uninstall
def test_uninstall_dev_flag_with_categories(pipenv_instance_private_pypi):
"""Ensure that running `pipenv uninstall --dev` works the same as `--categories dev-packages`"""
with pipenv_instance_private_pypi() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
six = "*"
[dev-packages]
pytest = "*"
""".strip()
f.write(contents)
# Install both packages
c = p.pipenv("install --dev")
assert c.returncode == 0
# Create a second project to test with categories
with pipenv_instance_private_pypi() as p2:
with open(p2.pipfile_path, "w") as f:
contents = """
[packages]
six = "*"
[dev-packages]
pytest = "*"
""".strip()
f.write(contents)
# Install both packages
c = p2.pipenv("install --dev")
assert c.returncode == 0
# Uninstall pytest with --categories
c = p2.pipenv("uninstall pytest --categories dev-packages")
assert c.returncode == 0
# Verify pytest was removed from dev-packages
assert "six" in p2.pipfile["packages"]
assert "pytest" not in p2.pipfile["dev-packages"]
assert "six" in p2.lockfile["default"]
assert "pytest" not in p2.lockfile["develop"]
# Compare with first project
c = p.pipenv("uninstall pytest --dev")
assert c.returncode == 0
# Verify both approaches have the same result
assert p.pipfile["packages"] == p2.pipfile["packages"]
assert p.pipfile["dev-packages"] == p2.pipfile["dev-packages"]
assert p.lockfile["default"] == p2.lockfile["default"]
assert p.lockfile["develop"] == p2.lockfile["develop"]
| {
"repo_id": "pypa/pipenv",
"file_path": "tests/integration/test_uninstall_dev.py",
"license": "MIT License",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pypa/pipenv:tests/integration/test_editable_vcs.py | import shutil
from pathlib import Path
import pytest
@pytest.mark.integration
@pytest.mark.install
@pytest.mark.editable
@pytest.mark.vcs
def test_editable_vcs_reinstall(pipenv_instance_private_pypi):
"""Test that editable VCS dependencies are reinstalled when the source checkout is missing."""
with pipenv_instance_private_pypi() as p:
# Create a Pipfile with an editable VCS dependency
with open(p.pipfile_path, "w") as f:
f.write("""
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[packages]
gunicorn = {git = "https://github.com/benoitc/gunicorn", ref = "23.0.0", editable = true}
""".strip())
# Install the dependency
c = p.pipenv("install")
assert c.returncode == 0, f"Failed to install: {c.stderr}"
# Verify the src directory was created
# The src directory could be in the project directory or in the virtualenv directory
src_dir_project = Path(p.path) / "src"
venv_location = p.virtualenv_location
src_dir_venv = Path(venv_location) / "src" if venv_location else None
# Check if either src directory exists
src_dir = src_dir_project if src_dir_project.exists() else src_dir_venv
assert src_dir.exists(), f"src directory was not created in either {src_dir_project} or {src_dir_venv}"
assert any(src_dir.iterdir()), "src directory is empty"
# Import the package to verify it's installed correctly
c = p.pipenv('run python -c "import gunicorn"')
assert c.returncode == 0, f"Failed to import gunicorn: {c.stderr}"
# Remove the src directory to simulate the issue
# Note: shutil.rmtree is patched by the fixture to handle Windows read-only files
shutil.rmtree(src_dir)
assert not src_dir.exists(), "Failed to remove src directory"
# Run pipenv install again to see if it reinstalls the dependency
c = p.pipenv("install")
assert c.returncode == 0, f"Failed to reinstall: {c.stderr}"
# Verify the src directory was recreated
# Check both possible locations again
src_dir_project = Path(p.path) / "src"
venv_location = p.virtualenv_location
src_dir_venv = Path(venv_location) / "src" if venv_location else None
# Check if either src directory exists
src_dir = src_dir_project if src_dir_project.exists() else src_dir_venv
assert src_dir.exists(), f"src directory was not recreated in either {src_dir_project} or {src_dir_venv}"
assert any(src_dir.iterdir()), "recreated src directory is empty"
# Import the package again to verify it's reinstalled correctly
c = p.pipenv('run python -c "import gunicorn"')
assert c.returncode == 0, f"Failed to import gunicorn after reinstall: {c.stderr}"
| {
"repo_id": "pypa/pipenv",
"file_path": "tests/integration/test_editable_vcs.py",
"license": "MIT License",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pypa/pipenv:pipenv/utils/pylock.py | """
PEP 751 pylock.toml file handling utilities.
This module provides functionality for reading and parsing pylock.toml files
as specified in PEP 751 (A file format to record Python dependencies for
installation reproducibility).
"""
import datetime
import json
import os
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Union
from pipenv.utils import err
from pipenv.utils.locking import atomic_open_for_write
from pipenv.vendor import tomlkit
class PylockError(Exception):
"""Base exception for pylock.toml related errors."""
pass
class PylockVersionError(PylockError):
"""Raised when the lock-version is not supported."""
pass
class PylockFormatError(PylockError):
"""Raised when the pylock.toml file format is invalid."""
pass
@dataclass
class PylockFile:
"""Represents a pylock.toml file as specified in PEP 751."""
path: Path
data: Dict[str, Any] = field(default_factory=dict)
@classmethod
def from_lockfile(
cls,
lockfile_path: Union[str, Path],
pylock_path: Union[str, Path] = None,
dev_groups: Optional[List[str]] = None,
) -> "PylockFile":
"""Create a PylockFile from a Pipfile.lock file.
Args:
lockfile_path: Path to the Pipfile.lock file
pylock_path: Path to save the pylock.toml file, defaults to pylock.toml in the same directory
dev_groups: List of dependency group names for develop packages (default: ['dev'])
Returns:
A PylockFile instance
Raises:
FileNotFoundError: If the Pipfile.lock file doesn't exist
ValueError: If the Pipfile.lock file is invalid
"""
if isinstance(lockfile_path, str):
lockfile_path = Path(lockfile_path)
if not lockfile_path.exists():
raise FileNotFoundError(f"Pipfile.lock not found: {lockfile_path}")
if pylock_path is None:
pylock_path = lockfile_path.parent / "pylock.toml"
elif isinstance(pylock_path, str):
pylock_path = Path(pylock_path)
# Default dev groups
if dev_groups is None:
dev_groups = ["dev"]
try:
with open(lockfile_path, encoding="utf-8") as f:
lockfile_data = json.load(f)
except Exception as e:
raise ValueError(f"Invalid Pipfile.lock file: {e}")
# Create the basic pylock.toml structure
pylock_data = {
"lock-version": "1.0",
"environments": [],
"extras": [],
"dependency-groups": dev_groups.copy(), # Include dev groups
"default-groups": ["default"], # Default group for non-dev packages
"created-by": "pipenv",
"packages": [],
}
# Add Python version requirement if present
meta = lockfile_data.get("_meta", {})
requires = meta.get("requires", {})
if "python_version" in requires:
pylock_data["requires-python"] = f">={requires['python_version']}"
elif "python_full_version" in requires:
pylock_data["requires-python"] = f"=={requires['python_full_version']}"
# Ensure all values are properly formatted for TOML
# Convert None values to empty strings or arrays
for key in ["environments", "extras", "dependency-groups", "default-groups"]:
if key in pylock_data and pylock_data[key] is None:
pylock_data[key] = []
# Extract sources and build index URL map
sources = meta.get("sources", [])
index_url_map = {} # Map source name to URL
default_index = "https://pypi.org/simple/"
if sources:
for source in sources:
source_name = source.get("name", "")
source_url = source.get("url", "")
if source_name and source_url:
# Convert to simple API URL format
index_url_map[source_name] = source_url.rstrip("/") + "/"
# Use the first source as default
if sources and sources[0].get("url"):
default_index = sources[0]["url"].rstrip("/") + "/"
# Build dev marker expression: 'group1' in dependency_groups or 'group2' in ...
dev_marker = " or ".join(f"'{g}' in dependency_groups" for g in dev_groups)
# Process packages
for section in ["default", "develop"]:
packages = lockfile_data.get(section, {})
for name, package_data in packages.items():
package = {"name": name}
# Add version if present and not a wildcard
if "version" in package_data:
version = package_data["version"]
if version == "*":
# Skip wildcard versions - they don't belong in pylock.toml
pass
elif version.startswith("=="):
package["version"] = version[2:]
else:
package["version"] = version
# Add markers if present
# PEP 751 marker syntax: 'group' in dependency_groups
if "markers" in package_data:
# For develop packages, add dependency_groups marker
if section == "develop":
package["marker"] = (
f"({dev_marker}) and ({package_data['markers']})"
)
else:
package["marker"] = package_data["markers"]
elif section == "develop":
package["marker"] = dev_marker
# Add package index URL (PEP 751 packages.index)
# Use the index from package_data if specified, otherwise default
if "index" in package_data:
index_name = package_data["index"]
package["index"] = index_url_map.get(index_name, default_index)
else:
package["index"] = default_index
# Add hashes if present, with proper wheel/sdist structure
if "hashes" in package_data:
wheels = []
for hash_value in package_data["hashes"]:
if hash_value.startswith("sha256:"):
hash_hex = hash_value[7:] # Remove "sha256:" prefix
version_str = package.get("version", "0.0.0")
wheel_name = (
f"{name.replace('-', '_')}-{version_str}-py3-none-any.whl"
)
wheel = {
"name": wheel_name,
"url": f"{package['index']}{name}/{wheel_name}",
"hashes": {"sha256": hash_hex},
}
wheels.append(wheel)
if wheels:
package["wheels"] = wheels
pylock_data["packages"].append(package)
# Add tool.pipenv section with metadata
pylock_data["tool"] = {
"pipenv": {
"generated_from": "Pipfile.lock",
"generation_date": datetime.datetime.now(
datetime.timezone.utc
).isoformat(),
}
}
instance = cls(path=pylock_path, data=pylock_data)
return instance
@classmethod
def from_path(cls, path: Union[str, Path]) -> "PylockFile":
"""Load a pylock.toml file from the given path.
Args:
path: Path to the pylock.toml file
Returns:
A PylockFile instance
Raises:
FileNotFoundError: If the file doesn't exist
PylockFormatError: If the file is not a valid pylock.toml file
PylockVersionError: If the lock-version is not supported
"""
if isinstance(path, str):
path = Path(path)
if not path.exists():
raise FileNotFoundError(f"Pylock file not found: {path}")
try:
with open(path, encoding="utf-8") as f:
content = f.read()
data = tomlkit.parse(content)
# Convert tomlkit objects to Python native types
data_dict = {}
for key, value in data.items():
if isinstance(
value,
(tomlkit.items.Table, tomlkit.items.AoT, tomlkit.items.Array),
):
data_dict[key] = value.unwrap()
else:
data_dict[key] = value
except Exception as e:
raise PylockFormatError(f"Invalid pylock.toml file: {e}")
# Validate lock-version
lock_version = data_dict.get("lock-version")
if not lock_version:
raise PylockFormatError("Missing required field: lock-version")
# Currently, we only support version 1.0
if lock_version != "1.0":
raise PylockVersionError(
f"Unsupported lock-version: {lock_version}. Only version 1.0 is supported."
)
return cls(path=path, data=data_dict)
@classmethod
def from_pyproject(
cls,
pyproject_path: Union[str, Path],
pylock_path: Union[str, Path] = None,
) -> "PylockFile":
"""Create a PylockFile skeleton from a pyproject.toml file.
This reads the dependencies from pyproject.toml and creates a pylock.toml
structure. Note: This does NOT resolve dependencies - it only reads
the declared dependencies. Use a locker to resolve and populate full details.
Args:
pyproject_path: Path to the pyproject.toml file
pylock_path: Path to save the pylock.toml file
Returns:
A PylockFile instance with declared dependencies (unresolved)
Raises:
FileNotFoundError: If the pyproject.toml file doesn't exist
ValueError: If the pyproject.toml file is invalid
"""
if isinstance(pyproject_path, str):
pyproject_path = Path(pyproject_path)
if not pyproject_path.exists():
raise FileNotFoundError(f"pyproject.toml not found: {pyproject_path}")
if pylock_path is None:
pylock_path = pyproject_path.parent / "pylock.toml"
elif isinstance(pylock_path, str):
pylock_path = Path(pylock_path)
try:
with open(pyproject_path, encoding="utf-8") as f:
pyproject_data = tomlkit.parse(f.read())
except Exception as e:
raise ValueError(f"Invalid pyproject.toml file: {e}")
# Extract project metadata (PEP 621)
project = pyproject_data.get("project", {})
project_name = project.get("name", "")
requires_python = project.get("requires-python", "")
# Create the basic pylock.toml structure
pylock_data: Dict[str, Any] = {
"lock-version": "1.0",
"environments": [],
"created-by": "pipenv",
"packages": [],
}
if requires_python:
pylock_data["requires-python"] = requires_python
# Extract dependencies from [project.dependencies] (PEP 621)
dependencies = project.get("dependencies", [])
# Extract optional dependencies for extras (PEP 621)
optional_deps = project.get("optional-dependencies", {})
extras = list(optional_deps.keys())
if extras:
pylock_data["extras"] = extras
# Extract dependency groups (PEP 735)
dependency_groups_data = pyproject_data.get("dependency-groups", {})
dependency_groups = list(dependency_groups_data.keys())
if dependency_groups:
pylock_data["dependency-groups"] = dependency_groups
# Default groups (main dependencies)
pylock_data["default-groups"] = ["default"] if dependencies else []
# Parse main dependencies
for dep in dependencies:
package = cls._parse_dependency_string(dep)
if package:
pylock_data["packages"].append(package)
# Parse optional dependencies (extras)
for extra_name, extra_deps in optional_deps.items():
for dep in extra_deps:
package = cls._parse_dependency_string(dep)
if package:
# Add marker for extra
existing_marker = package.get("marker", "")
extra_marker = f"'{extra_name}' in extras"
if existing_marker:
package["marker"] = f"({extra_marker}) and ({existing_marker})"
else:
package["marker"] = extra_marker
pylock_data["packages"].append(package)
# Parse dependency groups (PEP 735)
for group_name, group_deps in dependency_groups_data.items():
for dep in group_deps:
# Skip include directives like {include-group = "..."}
if isinstance(dep, dict):
continue
package = cls._parse_dependency_string(dep)
if package:
# Add marker for dependency group
existing_marker = package.get("marker", "")
group_marker = f"'{group_name}' in dependency_groups"
if existing_marker:
package["marker"] = f"({group_marker}) and ({existing_marker})"
else:
package["marker"] = group_marker
pylock_data["packages"].append(package)
# Add tool.pipenv section with metadata
pylock_data["tool"] = {
"pipenv": {
"generated_from": "pyproject.toml",
"project_name": project_name,
"generation_date": datetime.datetime.now(
datetime.timezone.utc
).isoformat(),
}
}
return cls(path=pylock_path, data=pylock_data)
@staticmethod
def _parse_dependency_string(dep_string: str) -> Optional[Dict[str, Any]]:
"""Parse a PEP 508 dependency string into a package dict.
Args:
dep_string: A PEP 508 dependency specifier (e.g., "requests>=2.28.0")
Returns:
A dict with 'name' and optionally 'marker', or None if parsing fails
"""
if not dep_string or not isinstance(dep_string, str):
return None
try:
# Use pip's requirement parser
from pipenv.patched.pip._vendor.packaging.requirements import Requirement
req = Requirement(dep_string)
package: Dict[str, Any] = {"name": req.name}
# Add marker if present
if req.marker:
package["marker"] = str(req.marker)
return package
except Exception:
# Fallback: simple name extraction
import re
match = re.match(r"^([a-zA-Z0-9][-a-zA-Z0-9._]*)", dep_string)
if match:
return {"name": match.group(1).lower()}
return None
def write(self) -> None:
"""Write the pylock.toml file to disk.
Raises:
OSError: If there is an error writing the file
"""
try:
# Ensure all values are properly formatted for TOML
# Create a deep copy of the data to avoid modifying the original
data_copy = {}
for key, value in self.data.items():
if isinstance(value, dict):
data_copy[key] = value.copy()
elif isinstance(value, list):
data_copy[key] = value.copy()
else:
data_copy[key] = value
# Convert None values to empty strings or arrays
for key in ["environments", "extras", "dependency-groups", "default-groups"]:
if key in data_copy:
if data_copy[key] is None:
data_copy[key] = []
# Convert the data to a TOML document
doc = tomlkit.document()
# Add top-level keys in a specific order for readability
for key in [
"lock-version",
"environments",
"requires-python",
"extras",
"dependency-groups",
"default-groups",
"created-by",
]:
if key in data_copy:
doc[key] = data_copy[key]
# Add packages
if "packages" in data_copy:
doc["packages"] = tomlkit.aot()
for package in data_copy["packages"]:
pkg_table = tomlkit.table()
# Add basic package info first for better readability
for key in ["name", "version", "marker", "requires-python"]:
if key in package:
pkg_table[key] = package[key]
# Add remaining keys except wheels and sdist
for k, v in package.items():
if k not in {
"name",
"version",
"marker",
"requires-python",
"wheels",
"sdist",
}:
pkg_table[k] = v
# Add wheels as an array of tables with better formatting
if "wheels" in package:
wheels_array = tomlkit.array()
wheels_array.multiline(True)
for wheel in package["wheels"]:
wheel_table = tomlkit.inline_table()
# Add wheel properties in a specific order
for key in ["name", "upload-time", "url", "size"]:
if key in wheel:
wheel_table[key] = wheel[key]
# Add hashes as a table
if "hashes" in wheel:
hashes_table = tomlkit.inline_table()
for hash_algo, hash_value in wheel["hashes"].items():
hashes_table[hash_algo] = hash_value
wheel_table["hashes"] = hashes_table
wheels_array.append(wheel_table)
pkg_table["wheels"] = wheels_array
# Add sdist as a table
if "sdist" in package:
sdist_table = tomlkit.inline_table()
# Add sdist properties in a specific order
for key in ["name", "upload-time", "url", "size"]:
if key in package["sdist"]:
sdist_table[key] = package["sdist"][key]
# Add hashes as a table
if "hashes" in package["sdist"]:
hashes_table = tomlkit.inline_table()
for hash_algo, hash_value in package["sdist"][
"hashes"
].items():
hashes_table[hash_algo] = hash_value
sdist_table["hashes"] = hashes_table
pkg_table["sdist"] = sdist_table
doc["packages"].append(pkg_table)
# Add tool section
if "tool" in data_copy:
tool_table = tomlkit.table()
for tool_name, tool_data in data_copy["tool"].items():
tool_section = tomlkit.table()
for k, v in tool_data.items():
tool_section[k] = v
tool_table[tool_name] = tool_section
doc["tool"] = tool_table
# Write the document to the file with proper formatting
with atomic_open_for_write(self.path, encoding="utf-8") as f:
content = tomlkit.dumps(doc)
# Ensure there's a blank line between package entries for readability
content = content.replace("[[packages]]\n", "\n[[packages]]\n")
f.write(content)
except Exception as e:
err.print(f"[bold red]Error writing pylock.toml: {e}[/bold red]")
raise OSError(f"Error writing pylock.toml: {e}")
@property
def lock_version(self) -> str:
"""Get the lock-version."""
return self.data.get("lock-version", "")
@property
def environments(self) -> List[str]:
"""Get the environments list."""
return self.data.get("environments", [])
@property
def requires_python(self) -> Optional[str]:
"""Get the requires-python value."""
return self.data.get("requires-python")
@property
def extras(self) -> List[str]:
"""Get the extras list."""
return self.data.get("extras", [])
@property
def dependency_groups(self) -> List[str]:
"""Get the dependency-groups list."""
return self.data.get("dependency-groups", [])
@property
def default_groups(self) -> List[str]:
"""Get the default-groups list."""
return self.data.get("default-groups", [])
@property
def created_by(self) -> str:
"""Get the created-by value."""
return self.data.get("created-by", "")
@property
def packages(self) -> List[Dict[str, Any]]:
"""Get the packages list."""
return self.data.get("packages", [])
@property
def tool(self) -> Dict[str, Any]:
"""Get the tool table."""
return self.data.get("tool", {})
def get_packages_for_environment(
self,
extras: Optional[Set[str]] = None,
dependency_groups: Optional[Set[str]] = None,
) -> List[Dict[str, Any]]:
"""Get packages that should be installed for the given environment.
Args:
extras: Set of extras to include
dependency_groups: Set of dependency groups to include
Returns:
List of package dictionaries that should be installed
"""
from pipenv.patched.pip._vendor.packaging.markers import (
InvalidMarker,
Marker,
)
# Set up extras and dependency_groups for marker evaluation
_extras = frozenset(extras) if extras is not None else frozenset()
_dependency_groups = (
frozenset(dependency_groups)
if dependency_groups is not None
else frozenset(self.default_groups)
)
result = []
for package in self.packages:
# Check if the package has a marker
marker_str = package.get("marker")
if marker_str:
try:
marker = Marker(marker_str)
# Evaluate the marker with the lock_file context
# which supports extras and dependency_groups as sets
environment = {
"extras": _extras,
"dependency_groups": _dependency_groups,
}
if not marker.evaluate(environment=environment, context="lock_file"):
# Marker does not match, skip this package
continue
except InvalidMarker:
# If the marker is invalid, include the package anyway
# to be safe and let the installer handle it
pass
result.append(package)
return result
def convert_to_pipenv_lockfile(self) -> Dict[str, Any]:
"""Convert the pylock.toml file to a Pipfile.lock format.
Returns:
A dictionary in Pipfile.lock format
"""
# Create the basic structure
lockfile = {
"_meta": {
"hash": {"sha256": ""}, # We don't have a hash in pylock.toml
"pipfile-spec": 6,
"requires": {},
"sources": [],
},
"default": {},
"develop": {},
}
# Add Python version requirement if present
if self.requires_python:
lockfile["_meta"]["requires"]["python_version"] = self.requires_python
# Add sources if present
if "sources" in self.data:
lockfile["_meta"]["sources"] = self.data["sources"]
# If no sources in pylock.toml, add a default source
else:
lockfile["_meta"]["sources"] = [
{
"name": "pypi",
"url": "https://pypi.org/simple",
"verify_ssl": True,
}
]
# Process packages
for package in self.packages:
name = package.get("name")
if not name:
continue
# Determine if this is a dev package based on markers
# This is a simplification - in reality we'd need to parse the markers
is_dev = False
marker = package.get("marker", "")
if marker and "dependency_groups" in marker:
# Simple heuristic - if it mentions dev or test, it's probably a dev package
if "dev" in marker.lower() or "test" in marker.lower():
is_dev = True
# Create the package entry
package_entry = {}
# Add version if present, otherwise use wildcard for Pipfile.lock compatibility
if "version" in package:
package_entry["version"] = f"=={package['version']}"
else:
# No version in pylock.toml means any version is acceptable
package_entry["version"] = "*"
# Add hashes if present
hashes = []
if "wheels" in package:
hashes.extend(
f"sha256:{wheel['hashes']['sha256']}"
for wheel in package["wheels"]
if "hashes" in wheel and "sha256" in wheel["hashes"]
)
if (
"sdist" in package
and "hashes" in package["sdist"]
and "sha256" in package["sdist"]["hashes"]
):
hashes.append(f"sha256:{package['sdist']['hashes']['sha256']}")
if hashes:
package_entry["hashes"] = hashes
# Add marker if present
if marker:
package_entry["markers"] = marker
# Add to the appropriate section
section = "develop" if is_dev else "default"
lockfile[section][name] = package_entry
return lockfile
def find_pylock_file(directory: Union[str, Path] = None) -> Optional[Path]:
"""Find a pylock.toml file in the given directory.
Args:
directory: Directory to search in, defaults to current directory
Returns:
Path to the pylock.toml file if found, None otherwise
"""
if directory is None:
directory = os.getcwd()
if isinstance(directory, str):
directory = Path(directory)
# First, look for pylock.toml
pylock_path = directory / "pylock.toml"
if pylock_path.exists():
return pylock_path
# Then, look for named pylock files (pylock.*.toml)
for file in directory.glob("pylock.*.toml"):
return file
return None
| {
"repo_id": "pypa/pipenv",
"file_path": "pipenv/utils/pylock.py",
"license": "MIT License",
"lines": 620,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pypa/pipenv:tests/integration/test_pylock.py | import os
import shutil
from pathlib import Path
import pytest
from pipenv.project import Project
from pipenv.utils.pylock import PylockFile, find_pylock_file
@pytest.fixture
def pylock_project(tmp_path):
"""Create a temporary project with a pylock.toml file."""
# Copy the example pylock.toml to the temporary directory
example_pylock = Path(__file__).parent.parent.parent / "examples" / "pylock.toml"
tmp_pylock = tmp_path / "pylock.toml"
# Create a simple Pipfile
pipfile_content = """
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[packages]
requests = "*"
[dev-packages]
[requires]
python_version = "3.8"
"""
with open(tmp_path / "Pipfile", "w") as f:
f.write(pipfile_content)
shutil.copy(example_pylock, tmp_pylock)
# Change to the temporary directory
old_cwd = os.getcwd()
os.chdir(tmp_path)
try:
yield tmp_path
finally:
os.chdir(old_cwd)
def test_find_pylock_file(pylock_project):
"""Test that find_pylock_file correctly finds the pylock.toml file."""
pylock_path = find_pylock_file(pylock_project)
assert pylock_path is not None
assert pylock_path.name == "pylock.toml"
assert pylock_path.exists()
def test_pylock_file_loading(pylock_project):
"""Test loading a pylock.toml file."""
pylock_path = pylock_project / "pylock.toml"
pylock = PylockFile.from_path(pylock_path)
assert pylock.lock_version == "1.0"
assert pylock.created_by == "pipenv"
assert pylock.requires_python == ">=3.8"
# Updated example now has 5 packages: requests, urllib3, certifi, pytest, cryptography
assert len(pylock.packages) == 5
assert pylock.packages[0]["name"] == "requests"
assert pylock.packages[0]["version"] == "2.28.1"
# Check new index field
assert pylock.packages[0].get("index") == "https://pypi.org/simple/"
def test_project_pylock_integration(pylock_project):
"""Test that Project class correctly detects and uses pylock.toml."""
# Create a project instance
project = Project(chdir=False)
# Check that pylock.toml is detected
assert project.pylock_exists
assert project.pylock_location is not None
assert Path(project.pylock_location).name == "pylock.toml"
# Check that lockfile_content returns the converted pylock content
lockfile_content = project.lockfile_content
assert "_meta" in lockfile_content
assert "default" in lockfile_content
assert "requests" in lockfile_content["default"]
assert "urllib3" in lockfile_content["default"]
assert "certifi" in lockfile_content["default"]
# pytest should be in develop section due to dependency_groups marker
assert "develop" in lockfile_content
assert "pytest" in lockfile_content["develop"]
# Check that the converted content has the correct format
requests_entry = lockfile_content["default"]["requests"]
assert requests_entry["version"] == "==2.28.1"
assert "hashes" in requests_entry
assert len(requests_entry["hashes"]) == 1
assert requests_entry["hashes"][0].startswith("sha256:")
@pytest.fixture
def pylock_write_project(tmp_path):
"""Create a temporary project with a Pipfile that has use_pylock enabled."""
# Create a simple Pipfile with use_pylock enabled
pipfile_content = """
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[packages]
requests = "*"
[dev-packages]
[requires]
python_version = "3.8"
[pipenv]
use_pylock = true
"""
# Create a simple Pipfile.lock
lockfile_content = """
{
"_meta": {
"hash": {
"sha256": "b8c2e1580c53e383cfe4254c1f16560b855d984c674dc07bcce19a8b5b28c6b2"
},
"pipfile-spec": 6,
"requires": {
"python_version": "3.8"
},
"sources": [
{
"name": "pypi",
"url": "https://pypi.org/simple",
"verify_ssl": true
}
]
},
"default": {
"certifi": {
"hashes": [
"sha256:0d9c601124e5a6ba9712dbc60d9c53c21e34f5f641fe83002317394311bdce14"
],
"version": "==2022.9.24"
},
"charset-normalizer": {
"hashes": [
"sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f"
],
"version": "==2.1.1"
},
"idna": {
"hashes": [
"sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"
],
"version": "==3.4"
},
"requests": {
"hashes": [
"sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7"
],
"index": "pypi",
"version": "==2.28.1"
},
"urllib3": {
"hashes": [
"sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997"
],
"version": "==1.26.12"
}
},
"develop": {}
}
"""
with open(tmp_path / "Pipfile", "w") as f:
f.write(pipfile_content)
with open(tmp_path / "Pipfile.lock", "w") as f:
f.write(lockfile_content)
# Change to the temporary directory
old_cwd = os.getcwd()
os.chdir(tmp_path)
try:
yield tmp_path
finally:
os.chdir(old_cwd)
def test_write_pylock_file(pylock_write_project):
"""Test that Project class correctly writes pylock.toml files."""
# Create a project instance
project = Project(chdir=False)
# Check that use_pylock is enabled
assert project.use_pylock is True
# Check that pylock_output_path is correct
assert project.pylock_output_path == str(pylock_write_project / "pylock.toml")
# Load the lockfile content
lockfile_content = project.lockfile_content
# Write the lockfile (which should also write pylock.toml)
project.write_lockfile(lockfile_content)
# Check that pylock.toml was created
pylock_path = pylock_write_project / "pylock.toml"
assert pylock_path.exists()
# Load the pylock.toml file and verify its contents
pylock = PylockFile.from_path(pylock_path)
# Check basic properties
assert pylock.lock_version == "1.0"
assert pylock.created_by == "pipenv"
# Check that all packages are included
package_names = [p["name"] for p in pylock.packages]
assert "requests" in package_names
assert "urllib3" in package_names
assert "certifi" in package_names
assert "charset-normalizer" in package_names
assert "idna" in package_names
# Check that the tool.pipenv section exists
assert "pipenv" in pylock.tool
assert "generated_from" in pylock.tool["pipenv"]
assert pylock.tool["pipenv"]["generated_from"] == "Pipfile.lock"
@pytest.fixture
def pylock_write_named_project(tmp_path):
"""Create a temporary project with a Pipfile that has use_pylock and pylock_name enabled."""
# Create a simple Pipfile with use_pylock and pylock_name enabled
pipfile_content = """
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[packages]
requests = "*"
[dev-packages]
[requires]
python_version = "3.8"
[pipenv]
use_pylock = true
pylock_name = "dev"
"""
# Create a simple Pipfile.lock (same as in pylock_write_project)
lockfile_content = """
{
"_meta": {
"hash": {
"sha256": "b8c2e1580c53e383cfe4254c1f16560b855d984c674dc07bcce19a8b5b28c6b2"
},
"pipfile-spec": 6,
"requires": {
"python_version": "3.8"
},
"sources": [
{
"name": "pypi",
"url": "https://pypi.org/simple",
"verify_ssl": true
}
]
},
"default": {
"certifi": {
"hashes": [
"sha256:0d9c601124e5a6ba9712dbc60d9c53c21e34f5f641fe83002317394311bdce14"
],
"version": "==2022.9.24"
},
"charset-normalizer": {
"hashes": [
"sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f"
],
"version": "==2.1.1"
},
"idna": {
"hashes": [
"sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"
],
"version": "==3.4"
},
"requests": {
"hashes": [
"sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7"
],
"index": "pypi",
"version": "==2.28.1"
},
"urllib3": {
"hashes": [
"sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997"
],
"version": "==1.26.12"
}
},
"develop": {}
}
"""
with open(tmp_path / "Pipfile", "w") as f:
f.write(pipfile_content)
with open(tmp_path / "Pipfile.lock", "w") as f:
f.write(lockfile_content)
# Change to the temporary directory
old_cwd = os.getcwd()
os.chdir(tmp_path)
try:
yield tmp_path
finally:
os.chdir(old_cwd)
def test_write_named_pylock_file(pylock_write_named_project):
"""Test that Project class correctly writes named pylock.toml files."""
# Create a project instance
project = Project(chdir=False)
# Check that use_pylock is enabled
assert project.use_pylock is True
# Check that pylock_name is set
assert project.settings.get("pylock_name") == "dev"
# Check that pylock_output_path is correct
assert project.pylock_output_path == str(pylock_write_named_project / "pylock.dev.toml")
# Load the lockfile content
lockfile_content = project.lockfile_content
# Write the lockfile (which should also write pylock.dev.toml)
project.write_lockfile(lockfile_content)
# Check that pylock.dev.toml was created
pylock_path = pylock_write_named_project / "pylock.dev.toml"
assert pylock_path.exists()
# Load the pylock.dev.toml file and verify its contents
pylock = PylockFile.from_path(pylock_path)
# Check basic properties
assert pylock.lock_version == "1.0"
assert pylock.created_by == "pipenv"
# Check that all packages are included
package_names = [p["name"] for p in pylock.packages]
assert "requests" in package_names
assert "urllib3" in package_names
assert "certifi" in package_names
assert "charset-normalizer" in package_names
assert "idna" in package_names
| {
"repo_id": "pypa/pipenv",
"file_path": "tests/integration/test_pylock.py",
"license": "MIT License",
"lines": 304,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pypa/pipenv:tests/unit/test_pylock.py | import os
import tempfile
from pathlib import Path
import pytest
from pipenv.utils.pylock import PylockFile, PylockFormatError, PylockVersionError, find_pylock_file
@pytest.fixture
def valid_pylock_content():
return """
lock-version = '1.0'
environments = ["sys_platform == 'win32'", "sys_platform == 'linux'"]
requires-python = '==3.12'
created-by = 'test-tool'
[[packages]]
name = 'requests'
version = '2.28.1'
requires-python = '>=3.7'
[[packages.wheels]]
name = 'requests-2.28.1-py3-none-any.whl'
upload-time = '2022-07-13T14:00:00Z'
url = 'https://files.pythonhosted.org/packages/example/requests-2.28.1-py3-none-any.whl'
size = 61000
hashes = {sha256 = 'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890'}
[[packages]]
name = 'pytest'
version = '7.0.0'
marker = "'dev' in dependency_groups or 'test' in dependency_groups"
requires-python = '>=3.6'
[[packages.wheels]]
name = 'pytest-7.0.0-py3-none-any.whl'
upload-time = '2022-02-03T12:00:00Z'
url = 'https://files.pythonhosted.org/packages/example/pytest-7.0.0-py3-none-any.whl'
size = 45000
hashes = {sha256 = '1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef'}
"""
@pytest.fixture
def invalid_version_pylock_content():
return """
lock-version = '2.0'
created-by = 'test-tool'
[[packages]]
name = 'requests'
version = '2.28.1'
"""
@pytest.fixture
def missing_version_pylock_content():
return """
created-by = 'test-tool'
[[packages]]
name = 'requests'
version = '2.28.1'
"""
@pytest.fixture
def pylock_file(valid_pylock_content):
with tempfile.NamedTemporaryFile(mode='w+', suffix='.toml', delete=False) as f:
f.write(valid_pylock_content)
f.flush()
path = f.name
yield path
# Clean up
if os.path.exists(path):
os.unlink(path)
def test_pylock_file_from_path(pylock_file):
"""Test loading a pylock file from a path."""
pylock = PylockFile.from_path(pylock_file)
assert pylock.lock_version == "1.0"
assert pylock.created_by == "test-tool"
assert pylock.requires_python == "==3.12"
assert len(pylock.packages) == 2
assert pylock.packages[0]["name"] == "requests"
assert pylock.packages[0]["version"] == "2.28.1"
assert pylock.packages[1]["name"] == "pytest"
assert pylock.packages[1]["marker"] == "'dev' in dependency_groups or 'test' in dependency_groups"
def test_pylock_file_invalid_version(invalid_version_pylock_content):
"""Test loading a pylock file with an invalid version."""
with tempfile.NamedTemporaryFile(mode='w+', suffix='.toml', delete=False) as f:
f.write(invalid_version_pylock_content)
f.flush()
path = f.name
try:
with pytest.raises(PylockVersionError):
PylockFile.from_path(path)
finally:
if os.path.exists(path):
os.unlink(path)
def test_pylock_file_missing_version(missing_version_pylock_content):
"""Test loading a pylock file with a missing version."""
with tempfile.NamedTemporaryFile(mode='w+', suffix='.toml', delete=False) as f:
f.write(missing_version_pylock_content)
f.flush()
path = f.name
try:
with pytest.raises(PylockFormatError):
PylockFile.from_path(path)
finally:
if os.path.exists(path):
os.unlink(path)
def test_find_pylock_file():
"""Test finding a pylock file."""
with tempfile.TemporaryDirectory() as tmpdir:
# No pylock file
assert find_pylock_file(tmpdir) is None
# Create pylock.toml
pylock_path = os.path.join(tmpdir, "pylock.toml")
with open(pylock_path, "w") as f:
f.write("lock-version = '1.0'")
assert find_pylock_file(tmpdir) == Path(pylock_path)
# Create pylock.dev.toml
os.unlink(pylock_path) # Remove pylock.toml
pylock_dev_path = os.path.join(tmpdir, "pylock.dev.toml")
with open(pylock_dev_path, "w") as f:
f.write("lock-version = '1.0'")
assert find_pylock_file(tmpdir) == Path(pylock_dev_path)
def test_convert_to_pipenv_lockfile(pylock_file):
"""Test converting a pylock file to a Pipfile.lock format."""
pylock = PylockFile.from_path(pylock_file)
lockfile = pylock.convert_to_pipenv_lockfile()
# Check structure
assert "_meta" in lockfile
assert "default" in lockfile
assert "develop" in lockfile
# Check packages
assert "requests" in lockfile["default"]
assert "pytest" in lockfile["develop"]
# Check package details
assert lockfile["default"]["requests"]["version"] == "==2.28.1"
assert "hashes" in lockfile["default"]["requests"]
assert lockfile["develop"]["pytest"]["version"] == "==7.0.0"
assert lockfile["develop"]["pytest"]["markers"] == "'dev' in dependency_groups or 'test' in dependency_groups"
def test_from_lockfile(tmp_path):
"""Test creating a PylockFile from a Pipfile.lock file."""
# Create a simple Pipfile.lock
lockfile_content = """
{
"_meta": {
"hash": {
"sha256": "b8c2e1580c53e383cfe4254c1f16560b855d984c674dc07bcce19a8b5b28c6b2"
},
"pipfile-spec": 6,
"requires": {
"python_version": "3.8"
},
"sources": [
{
"name": "pypi",
"url": "https://pypi.org/simple",
"verify_ssl": true
}
]
},
"default": {
"requests": {
"hashes": [
"sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7"
],
"index": "pypi",
"version": "==2.28.1"
}
},
"develop": {
"pytest": {
"hashes": [
"sha256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"
],
"version": "==7.0.0"
}
}
}
"""
lockfile_path = tmp_path / "Pipfile.lock"
pylock_path = tmp_path / "pylock.toml"
with open(lockfile_path, "w") as f:
f.write(lockfile_content)
# Create a PylockFile from the Pipfile.lock
pylock = PylockFile.from_lockfile(lockfile_path, pylock_path)
# Check basic properties
assert pylock.lock_version == "1.0"
assert pylock.created_by == "pipenv"
assert pylock.requires_python == ">=3.8"
# Check that packages were correctly converted
package_names = [p["name"] for p in pylock.packages]
assert "requests" in package_names
assert "pytest" in package_names
# Check that the tool.pipenv section exists
assert "pipenv" in pylock.tool
assert "generated_from" in pylock.tool["pipenv"]
assert pylock.tool["pipenv"]["generated_from"] == "Pipfile.lock"
def test_wildcard_version_handling(tmp_path):
"""Test that wildcard versions are handled correctly.
When converting from Pipfile.lock to pylock.toml, wildcard versions should be skipped.
When converting back, packages without versions should get wildcard version.
"""
# Create a Pipfile.lock with a wildcard version
lockfile_content = """
{
"_meta": {
"hash": {"sha256": "test"},
"pipfile-spec": 6,
"requires": {"python_version": "3.10"},
"sources": []
},
"default": {
"legacy-cgi": {
"markers": "python_version >= '3.13'",
"version": "*"
},
"requests": {
"version": "==2.28.1"
}
},
"develop": {}
}
"""
lockfile_path = tmp_path / "Pipfile.lock"
pylock_path = tmp_path / "pylock.toml"
with open(lockfile_path, "w") as f:
f.write(lockfile_content)
# Create a PylockFile from the Pipfile.lock
pylock = PylockFile.from_lockfile(lockfile_path, pylock_path)
# Check that legacy-cgi has no version (wildcard was skipped)
legacy_cgi_pkg = next((p for p in pylock.packages if p["name"] == "legacy-cgi"), None)
assert legacy_cgi_pkg is not None
assert "version" not in legacy_cgi_pkg # Wildcard version should not be stored
# Check that requests has a version
requests_pkg = next((p for p in pylock.packages if p["name"] == "requests"), None)
assert requests_pkg is not None
assert requests_pkg["version"] == "2.28.1"
# Now write and reload the pylock.toml
pylock.write()
loaded_pylock = PylockFile.from_path(pylock_path)
# Convert back to Pipfile.lock format
converted_lockfile = loaded_pylock.convert_to_pipenv_lockfile()
# Check that legacy-cgi gets wildcard version back
assert "legacy-cgi" in converted_lockfile["default"]
assert converted_lockfile["default"]["legacy-cgi"]["version"] == "*"
# Check that requests keeps its pinned version
assert "requests" in converted_lockfile["default"]
assert converted_lockfile["default"]["requests"]["version"] == "==2.28.1"
def test_write_method(tmp_path):
"""Test writing a PylockFile to disk."""
# Create a simple PylockFile
pylock_data = {
"lock-version": "1.0",
"environments": ["sys_platform == 'linux'"],
"requires-python": ">=3.8",
"extras": [],
"dependency-groups": [],
"default-groups": [],
"created-by": "test",
"packages": [
{
"name": "requests",
"version": "2.28.1",
"wheels": [
{
"name": "requests-2.28.1-py3-none-any.whl",
"hashes": {"sha256": "test-hash"}
}
]
}
],
"tool": {
"pipenv": {
"generated_from": "test"
}
}
}
pylock_path = tmp_path / "pylock.toml"
pylock = PylockFile(path=pylock_path, data=pylock_data)
# Write the file
pylock.write()
# Check that the file was created
assert pylock_path.exists()
# Load the file and check its contents
loaded_pylock = PylockFile.from_path(pylock_path)
# Check basic properties
assert loaded_pylock.lock_version == "1.0"
assert loaded_pylock.created_by == "test"
assert loaded_pylock.requires_python == ">=3.8"
# Check that packages were correctly written
assert len(loaded_pylock.packages) == 1
assert loaded_pylock.packages[0]["name"] == "requests"
assert loaded_pylock.packages[0]["version"] == "2.28.1"
# Check that the tool.pipenv section was correctly written
assert "pipenv" in loaded_pylock.tool
assert loaded_pylock.tool["pipenv"]["generated_from"] == "test"
def test_get_packages_for_environment_marker_evaluation(tmp_path):
"""Test that get_packages_for_environment correctly evaluates markers.
This test verifies that:
- Packages without markers are always included
- Packages with dependency_groups markers are filtered based on provided groups
- Packages with extras markers are filtered based on provided extras
Note: PEP 751 marker syntax uses 'value' in marker_variable, e.g.:
- 'dev' in dependency_groups
- 'crypto' in extras
"""
# Create a pylock file with various markers using PEP 751 syntax
pylock_content = """
lock-version = '1.0'
created-by = 'test-tool'
[[packages]]
name = 'requests'
version = '2.28.1'
[[packages]]
name = 'pytest'
version = '7.0.0'
marker = "'dev' in dependency_groups or 'test' in dependency_groups"
[[packages]]
name = 'sphinx'
version = '6.0.0'
marker = "'docs' in dependency_groups"
[[packages]]
name = 'cryptography'
version = '41.0.0'
marker = "'crypto' in extras"
[[packages]]
name = 'validators'
version = '0.22.0'
marker = "'validation' in extras"
[[packages]]
name = 'dev-only-tool'
version = '1.0.0'
marker = "'dev' in dependency_groups"
"""
pylock_path = tmp_path / "pylock.toml"
with open(pylock_path, "w") as f:
f.write(pylock_content)
pylock = PylockFile.from_path(pylock_path)
# Test 1: No extras, no dependency_groups - only packages without markers
packages = pylock.get_packages_for_environment(extras=set(), dependency_groups=set())
package_names = [p["name"] for p in packages]
assert "requests" in package_names
assert "pytest" not in package_names
assert "sphinx" not in package_names
assert "cryptography" not in package_names
assert "validators" not in package_names
assert "dev-only-tool" not in package_names
# Test 2: With 'dev' dependency_group
packages = pylock.get_packages_for_environment(extras=set(), dependency_groups={"dev"})
package_names = [p["name"] for p in packages]
assert "requests" in package_names
assert "pytest" in package_names # 'dev' in dependency_groups evaluates to True
assert "sphinx" not in package_names # 'docs' not provided
assert "dev-only-tool" in package_names # 'dev' in dependency_groups evaluates to True
assert "cryptography" not in package_names
# Test 3: With 'docs' dependency_group
packages = pylock.get_packages_for_environment(extras=set(), dependency_groups={"docs"})
package_names = [p["name"] for p in packages]
assert "requests" in package_names
assert "pytest" not in package_names
assert "sphinx" in package_names # 'docs' in dependency_groups evaluates to True
assert "dev-only-tool" not in package_names
# Test 4: With 'crypto' extra
packages = pylock.get_packages_for_environment(extras={"crypto"}, dependency_groups=set())
package_names = [p["name"] for p in packages]
assert "requests" in package_names
assert "cryptography" in package_names # 'crypto' in extras evaluates to True
assert "validators" not in package_names # 'validation' not provided
assert "pytest" not in package_names
# Test 5: With multiple dependency_groups and extras
packages = pylock.get_packages_for_environment(
extras={"crypto", "validation"},
dependency_groups={"dev", "docs"}
)
package_names = [p["name"] for p in packages]
assert "requests" in package_names
assert "pytest" in package_names
assert "sphinx" in package_names
assert "cryptography" in package_names
assert "validators" in package_names
assert "dev-only-tool" in package_names
def test_from_lockfile_with_custom_dev_groups(tmp_path):
"""Test from_lockfile with custom dev_groups parameter."""
lockfile_content = {
"_meta": {
"sources": [
{"name": "pypi", "url": "https://pypi.org/simple/", "verify_ssl": True}
],
"requires": {"python_version": "3.10"},
},
"default": {
"requests": {"version": "==2.28.1", "hashes": ["sha256:abc123"]},
},
"develop": {
"pytest": {"version": "==7.0.0", "hashes": ["sha256:def456"]},
},
}
lockfile_path = tmp_path / "Pipfile.lock"
import json
with open(lockfile_path, "w") as f:
json.dump(lockfile_content, f)
# Test with custom dev groups
pylock = PylockFile.from_lockfile(
lockfile_path, dev_groups=["testing", "development"]
)
# Check that the dependency-groups includes our custom groups
assert "testing" in pylock.dependency_groups
assert "development" in pylock.dependency_groups
# Check that the marker for develop packages uses the custom groups
pytest_pkg = next(p for p in pylock.packages if p["name"] == "pytest")
assert "'testing' in dependency_groups" in pytest_pkg["marker"]
assert "'development' in dependency_groups" in pytest_pkg["marker"]
def test_from_lockfile_adds_package_index(tmp_path):
"""Test that from_lockfile adds packages.index field (PEP 751)."""
lockfile_content = {
"_meta": {
"sources": [
{"name": "pypi", "url": "https://pypi.org/simple/", "verify_ssl": True}
],
"requires": {"python_version": "3.10"},
},
"default": {
"requests": {"version": "==2.28.1", "hashes": ["sha256:abc123"]},
},
"develop": {},
}
lockfile_path = tmp_path / "Pipfile.lock"
import json
with open(lockfile_path, "w") as f:
json.dump(lockfile_content, f)
pylock = PylockFile.from_lockfile(lockfile_path)
# Check that packages have index field
requests_pkg = next(p for p in pylock.packages if p["name"] == "requests")
assert "index" in requests_pkg
assert requests_pkg["index"] == "https://pypi.org/simple/"
def test_from_pyproject(tmp_path):
"""Test creating a PylockFile from pyproject.toml."""
pyproject_content = '''
[project]
name = "my-project"
version = "1.0.0"
requires-python = ">=3.9"
dependencies = [
"requests>=2.28.0",
"click>=8.0.0",
]
[project.optional-dependencies]
crypto = [
"cryptography>=40.0.0",
]
[dependency-groups]
dev = [
"pytest>=7.0.0",
"black>=23.0.0",
]
'''
pyproject_path = tmp_path / "pyproject.toml"
with open(pyproject_path, "w") as f:
f.write(pyproject_content)
pylock = PylockFile.from_pyproject(pyproject_path)
# Check basic metadata
assert pylock.lock_version == "1.0"
assert pylock.created_by == "pipenv"
assert pylock.requires_python == ">=3.9"
# Check extras
assert "crypto" in pylock.extras
# Check dependency groups
assert "dev" in pylock.dependency_groups
# Check packages
package_names = [p["name"] for p in pylock.packages]
assert "requests" in package_names
assert "click" in package_names
assert "cryptography" in package_names
assert "pytest" in package_names
assert "black" in package_names
# Check markers for extras
crypto_pkg = next(p for p in pylock.packages if p["name"] == "cryptography")
assert "'crypto' in extras" in crypto_pkg["marker"]
# Check markers for dependency groups
pytest_pkg = next(p for p in pylock.packages if p["name"] == "pytest")
assert "'dev' in dependency_groups" in pytest_pkg["marker"]
def test_from_pyproject_missing_file(tmp_path):
"""Test from_pyproject raises error for missing file."""
pyproject_path = tmp_path / "pyproject.toml"
with pytest.raises(FileNotFoundError):
PylockFile.from_pyproject(pyproject_path)
| {
"repo_id": "pypa/pipenv",
"file_path": "tests/unit/test_pylock.py",
"license": "MIT License",
"lines": 475,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pypa/pipenv:benchmarks/benchmark.py | #!/usr/bin/env python3
"""
Pipenv benchmark runner based on python-package-manager-shootout.
"""
import csv
import os
import shutil
import subprocess
import sys
import time
import urllib.request
from pathlib import Path
from typing import List, Tuple
def subprocess_env():
"""Get environment variables for subprocess calls with CI-friendly settings."""
env = os.environ.copy()
# Ensure pipenv doesn't wait for user input
env["PIPENV_YES"] = "1"
env["PIPENV_NOSPIN"] = "1"
# Force pipenv to create its own venv, not use any existing one
env["PIPENV_IGNORE_VIRTUALENVS"] = "1"
# Suppress courtesy notices
env["PIPENV_VERBOSITY"] = "-1"
return env
class PipenvBenchmark:
def __init__(self, benchmark_dir: Path):
self.benchmark_dir = benchmark_dir
self.timings_dir = benchmark_dir / "timings"
self.timings_dir.mkdir(exist_ok=True)
self.requirements_url = "https://raw.githubusercontent.com/getsentry/sentry/51281a6abd8ff4a93d2cebc04e1d5fc7aa9c4c11/requirements-base.txt"
self.test_package = "goodconf"
def run_timed_command(
self, command: List[str], timing_file: str, cwd: Path = None, timeout: int = 600
) -> Tuple[float, int]:
"""Run a command and measure execution time."""
if cwd is None:
cwd = self.benchmark_dir
# Set environment to prevent interactive prompts
env = subprocess_env()
print(f" Running: {' '.join(command)}", flush=True)
start_time = time.time()
# Use Popen with communicate() to avoid pipe buffer deadlock
# that can occur with capture_output=True on commands with lots of output
process = subprocess.Popen(
command,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
env=env,
)
try:
stdout, stderr = process.communicate(timeout=timeout)
elapsed = time.time() - start_time
returncode = process.returncode
if returncode != 0:
print(f" ✗ Command failed after {elapsed:.3f}s: {' '.join(command)}")
print(f" Return code: {returncode}")
if stderr and stderr.strip():
print(" Error output:")
for line in stderr.strip().split("\n")[:5]:
print(f" {line}")
if stdout and stdout.strip():
print(" Stdout:")
for line in stdout.strip().split("\n")[:3]:
print(f" {line}")
raise subprocess.CalledProcessError(returncode, command, stdout, stderr)
# Write timing info (simplified format for cross-platform compatibility)
timing_path = self.timings_dir / timing_file
with open(timing_path, "w") as f:
f.write(
f"{elapsed:.3f},0,0,0,0,0,0\n"
) # elapsed,system,user,cpu%,maxrss,inputs,outputs
print(f" ✓ Completed in {elapsed:.3f}s")
if stdout and stdout.strip():
# Show first few lines of output
output_lines = stdout.strip().split("\n")[:3]
for line in output_lines:
print(f" {line[:100]}")
if len(stdout.strip().split("\n")) > 3:
print(" ...")
return elapsed, returncode
except subprocess.TimeoutExpired:
process.kill()
stdout, stderr = process.communicate()
elapsed = time.time() - start_time
print(f" ✗ Command timed out after {elapsed:.3f}s: {' '.join(command)}")
print(f" Timeout was set to {timeout}s")
if stdout and stdout.strip():
print(" Stdout before timeout:")
for line in stdout.strip().split("\n")[-10:]:
print(f" {line}")
if stderr and stderr.strip():
print(" Stderr before timeout:")
for line in stderr.strip().split("\n")[-5:]:
print(f" {line}")
raise
def setup_requirements(self):
"""Download and prepare requirements.txt."""
print("Setting up requirements.txt...")
requirements_path = self.benchmark_dir / "requirements.txt"
try:
with urllib.request.urlopen(self.requirements_url) as response:
content = response.read().decode("utf-8")
# Filter out --index-url lines like the original
filtered_lines = [
line
for line in content.splitlines()
if not line.strip().startswith("--index-url")
]
with open(requirements_path, "w") as f:
f.write("\n".join(filtered_lines))
print(f"Downloaded {len(filtered_lines)} requirements")
except Exception as e:
print(f"Failed to download requirements: {e}")
raise
def clean_cache(self):
"""Clean pipenv and pip caches."""
print("Cleaning caches...")
cache_dirs = [Path.home() / ".cache" / "pip", Path.home() / ".cache" / "pipenv"]
for cache_dir in cache_dirs:
if cache_dir.exists():
shutil.rmtree(cache_dir, ignore_errors=True)
def clean_venv(self):
"""Clean virtual environment."""
print("Cleaning virtual environment...")
try:
# Get venv path
result = subprocess.run(
["pipenv", "--venv"],
cwd=self.benchmark_dir,
capture_output=True,
text=True,
check=False,
timeout=30,
env=subprocess_env(),
)
if result.returncode == 0:
venv_path = Path(result.stdout.strip())
if venv_path.exists():
print(f" Removing venv: {venv_path}")
shutil.rmtree(venv_path, ignore_errors=True)
else:
print(" No virtual environment found")
except subprocess.TimeoutExpired:
print(" Warning: pipenv --venv timed out")
except Exception as e:
print(f" Warning: Could not clean venv: {e}")
pass # Ignore errors if venv doesn't exist
def clean_lock(self):
"""Remove Pipfile.lock."""
print("Cleaning lock file...")
lock_file = self.benchmark_dir / "Pipfile.lock"
if lock_file.exists():
lock_file.unlink()
def benchmark_tooling(self):
"""Benchmark pipenv installation (using current dev version)."""
print("Benchmarking tooling...")
# Install current development version
parent_dir = self.benchmark_dir.parent
elapsed, _ = self.run_timed_command(
[sys.executable, "-m", "pip", "install", "-e", str(parent_dir)], "tooling.txt"
)
print(f"Tooling completed in {elapsed:.3f}s")
def benchmark_import(self):
"""Benchmark importing requirements.txt to Pipfile."""
print("Benchmarking import...")
elapsed, _ = self.run_timed_command(
["pipenv", "install", "-r", "requirements.txt"], "import.txt"
)
print(f"Import completed in {elapsed:.3f}s")
def benchmark_lock(self, timing_file: str):
"""Benchmark lock file generation."""
print(f"Benchmarking lock ({timing_file})...")
elapsed, _ = self.run_timed_command(["pipenv", "lock"], timing_file)
print(f"Lock completed in {elapsed:.3f}s")
def benchmark_install(self, timing_file: str):
"""Benchmark package installation."""
print(f"Benchmarking install ({timing_file})...")
elapsed, _ = self.run_timed_command(["pipenv", "sync"], timing_file)
print(f"Install completed in {elapsed:.3f}s")
def benchmark_update(self, timing_file: str):
"""Benchmark package updates."""
print(f"Benchmarking update ({timing_file})...")
elapsed, _ = self.run_timed_command(["pipenv", "update"], timing_file)
print(f"Update completed in {elapsed:.3f}s")
def benchmark_add_package(self):
"""Benchmark adding a new package."""
print("Benchmarking add package...")
elapsed, _ = self.run_timed_command(
["pipenv", "install", self.test_package], "add-package.txt"
)
print(f"Add package completed in {elapsed:.3f}s")
def get_pipenv_version(self) -> str:
"""Get pipenv version."""
try:
result = subprocess.run(
["pipenv", "--version"],
capture_output=True,
text=True,
check=True,
timeout=30,
env=subprocess_env(),
)
# Extract version from "pipenv, version X.X.X"
return result.stdout.split()[-1]
except Exception:
return "unknown"
def generate_stats(self):
"""Generate CSV stats file."""
print("Generating stats...")
version = self.get_pipenv_version()
timestamp = int(time.time())
stats_file = self.benchmark_dir / "stats.csv"
with open(stats_file, "w", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(
[
"tool",
"version",
"timestamp",
"stat",
"elapsed time",
"system",
"user",
"cpu percent",
"max rss",
"inputs",
"outputs",
]
)
stats = [
"tooling",
"import",
"lock-cold",
"lock-warm",
"install-cold",
"install-warm",
"update-cold",
"update-warm",
"add-package",
]
for stat in stats:
timing_file = self.timings_dir / f"{stat}.txt"
if timing_file.exists():
with open(timing_file) as f:
timing_data = f.read().strip()
writer.writerow(["pipenv", version, timestamp, stat, timing_data])
print(f"Stats written to {stats_file}")
def run_full_benchmark(self):
"""Run the complete benchmark suite."""
print("=" * 60)
print("Starting pipenv benchmark suite...")
print("=" * 60)
total_steps = 11
# Setup
print(f"\n[1/{total_steps}] Setup")
print("-" * 40)
self.setup_requirements()
# Tooling
print(f"\n[2/{total_steps}] Tooling")
print("-" * 40)
self.benchmark_tooling()
# Import
print(f"\n[3/{total_steps}] Import")
print("-" * 40)
self.benchmark_import()
# Lock cold
print(f"\n[4/{total_steps}] Lock (cold)")
print("-" * 40)
self.clean_cache()
self.clean_venv()
self.clean_lock()
self.benchmark_lock("lock-cold.txt")
# Lock warm
print(f"\n[5/{total_steps}] Lock (warm)")
print("-" * 40)
self.clean_lock()
self.benchmark_lock("lock-warm.txt")
# Install cold
print(f"\n[6/{total_steps}] Install (cold)")
print("-" * 40)
self.clean_cache()
self.clean_venv()
self.benchmark_install("install-cold.txt")
# Install warm
print(f"\n[7/{total_steps}] Install (warm)")
print("-" * 40)
self.clean_venv()
self.benchmark_install("install-warm.txt")
# Update cold
print(f"\n[8/{total_steps}] Update (cold)")
print("-" * 40)
self.clean_cache()
self.benchmark_update("update-cold.txt")
# Update warm
print(f"\n[9/{total_steps}] Update (warm)")
print("-" * 40)
self.benchmark_update("update-warm.txt")
# Add package
print(f"\n[10/{total_steps}] Add package")
print("-" * 40)
self.benchmark_add_package()
# Generate stats
print(f"\n[11/{total_steps}] Generate stats")
print("-" * 40)
self.generate_stats()
print("\n" + "=" * 60)
print("Benchmark suite completed!")
print("=" * 60)
def main():
benchmark_dir = Path(__file__).parent
benchmark = PipenvBenchmark(benchmark_dir)
if len(sys.argv) > 1:
operation = sys.argv[1]
if operation == "setup":
benchmark.setup_requirements()
elif operation == "tooling":
benchmark.benchmark_tooling()
elif operation == "import":
benchmark.benchmark_import()
elif operation == "lock-cold":
benchmark.clean_cache()
benchmark.clean_venv()
benchmark.clean_lock()
benchmark.benchmark_lock("lock-cold.txt")
elif operation == "lock-warm":
benchmark.clean_lock()
benchmark.benchmark_lock("lock-warm.txt")
elif operation == "install-cold":
benchmark.clean_cache()
benchmark.clean_venv()
benchmark.benchmark_install("install-cold.txt")
elif operation == "install-warm":
benchmark.clean_venv()
benchmark.benchmark_install("install-warm.txt")
elif operation == "update-cold":
benchmark.clean_cache()
benchmark.benchmark_update("update-cold.txt")
elif operation == "update-warm":
benchmark.benchmark_update("update-warm.txt")
elif operation == "add-package":
benchmark.benchmark_add_package()
elif operation == "stats":
benchmark.generate_stats()
else:
print(f"Unknown operation: {operation}")
sys.exit(1)
else:
benchmark.run_full_benchmark()
if __name__ == "__main__":
main()
| {
"repo_id": "pypa/pipenv",
"file_path": "benchmarks/benchmark.py",
"license": "MIT License",
"lines": 352,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pypa/pipenv:pipenv/patched/pip/_internal/commands/lock.py | from __future__ import annotations
import sys
from optparse import Values
from pathlib import Path
from pipenv.patched.pip._internal.cache import WheelCache
from pipenv.patched.pip._internal.cli import cmdoptions
from pipenv.patched.pip._internal.cli.req_command import (
RequirementCommand,
with_cleanup,
)
from pipenv.patched.pip._internal.cli.status_codes import SUCCESS
from pipenv.patched.pip._internal.models.pylock import Pylock, is_valid_pylock_file_name
from pipenv.patched.pip._internal.operations.build.build_tracker import get_build_tracker
from pipenv.patched.pip._internal.utils.logging import getLogger
from pipenv.patched.pip._internal.utils.misc import (
get_pip_version,
)
from pipenv.patched.pip._internal.utils.temp_dir import TempDirectory
logger = getLogger(__name__)
class LockCommand(RequirementCommand):
"""
EXPERIMENTAL - Lock packages and their dependencies from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports locking from "requirements files", which provide an easy
way to specify a whole environment to be installed.
The generated lock file is only guaranteed to be valid for the current
python version and platform.
"""
usage = """
%prog [options] [-e] <local project path> ...
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] <archive url/path> ..."""
def add_options(self) -> None:
self.cmd_opts.add_option(
cmdoptions.PipOption(
"--output",
"-o",
dest="output_file",
metavar="path",
type="path",
default="pylock.toml",
help="Lock file name (default=pylock.toml). Use - for stdout.",
)
)
self.cmd_opts.add_option(cmdoptions.requirements())
self.cmd_opts.add_option(cmdoptions.constraints())
self.cmd_opts.add_option(cmdoptions.build_constraints())
self.cmd_opts.add_option(cmdoptions.no_deps())
self.cmd_opts.add_option(cmdoptions.pre())
self.cmd_opts.add_option(cmdoptions.editable())
self.cmd_opts.add_option(cmdoptions.src())
self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
self.cmd_opts.add_option(cmdoptions.no_build_isolation())
self.cmd_opts.add_option(cmdoptions.use_pep517())
self.cmd_opts.add_option(cmdoptions.check_build_deps())
self.cmd_opts.add_option(cmdoptions.config_settings())
self.cmd_opts.add_option(cmdoptions.no_binary())
self.cmd_opts.add_option(cmdoptions.only_binary())
self.cmd_opts.add_option(cmdoptions.prefer_binary())
self.cmd_opts.add_option(cmdoptions.require_hashes())
self.cmd_opts.add_option(cmdoptions.progress_bar())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, self.cmd_opts)
@with_cleanup
def run(self, options: Values, args: list[str]) -> int:
logger.verbose("Using %s", get_pip_version())
logger.warning(
"pip lock is currently an experimental command. "
"It may be removed/changed in a future release "
"without prior warning."
)
cmdoptions.check_build_constraints(options)
session = self.get_default_session(options)
finder = self._build_package_finder(
options=options,
session=session,
ignore_requires_python=options.ignore_requires_python,
)
build_tracker = self.enter_context(get_build_tracker())
directory = TempDirectory(
delete=not options.no_clean,
kind="install",
globally_managed=True,
)
reqs = self.get_requirements(args, options, finder, session)
wheel_cache = WheelCache(options.cache_dir)
# Only when installing is it permitted to use PEP 660.
# In other circumstances (pip wheel, pip download) we generate
# regular (i.e. non editable) metadata and wheels.
for req in reqs:
req.permit_editable_wheels = True
preparer = self.make_requirement_preparer(
temp_build_dir=directory,
options=options,
build_tracker=build_tracker,
session=session,
finder=finder,
use_user_site=False,
verbosity=self.verbosity,
)
resolver = self.make_resolver(
preparer=preparer,
finder=finder,
options=options,
wheel_cache=wheel_cache,
use_user_site=False,
ignore_installed=True,
ignore_requires_python=options.ignore_requires_python,
upgrade_strategy="to-satisfy-only",
)
self.trace_basic_info(finder)
requirement_set = resolver.resolve(reqs, check_supported_wheels=True)
if options.output_file == "-":
base_dir = Path.cwd()
else:
output_file_path = Path(options.output_file)
if not is_valid_pylock_file_name(output_file_path):
logger.warning(
"%s is not a valid lock file name.",
output_file_path,
)
base_dir = output_file_path.parent
pylock_toml = Pylock.from_install_requirements(
requirement_set.requirements.values(), base_dir=base_dir
).as_toml()
if options.output_file == "-":
sys.stdout.write(pylock_toml)
else:
output_file_path.write_text(pylock_toml, encoding="utf-8")
return SUCCESS
| {
"repo_id": "pypa/pipenv",
"file_path": "pipenv/patched/pip/_internal/commands/lock.py",
"license": "MIT License",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
pypa/pipenv:pipenv/patched/pip/_internal/models/pylock.py | from __future__ import annotations
import dataclasses
import re
from collections.abc import Iterable
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Any
from pipenv.patched.pip._vendor import tomli_w
from pipenv.patched.pip._internal.models.direct_url import ArchiveInfo, DirInfo, VcsInfo
from pipenv.patched.pip._internal.models.link import Link
from pipenv.patched.pip._internal.req.req_install import InstallRequirement
from pipenv.patched.pip._internal.utils.urls import url_to_path
if TYPE_CHECKING:
from typing_extensions import Self
PYLOCK_FILE_NAME_RE = re.compile(r"^pylock\.([^.]+)\.toml$")
def is_valid_pylock_file_name(path: Path) -> bool:
return path.name == "pylock.toml" or bool(re.match(PYLOCK_FILE_NAME_RE, path.name))
def _toml_dict_factory(data: list[tuple[str, Any]]) -> dict[str, Any]:
return {key.replace("_", "-"): value for key, value in data if value is not None}
@dataclass
class PackageVcs:
type: str
url: str | None
# (not supported) path: Optional[str]
requested_revision: str | None
commit_id: str
subdirectory: str | None
@dataclass
class PackageDirectory:
path: str
editable: bool | None
subdirectory: str | None
@dataclass
class PackageArchive:
url: str | None
# (not supported) path: Optional[str]
# (not supported) size: Optional[int]
# (not supported) upload_time: Optional[datetime]
hashes: dict[str, str]
subdirectory: str | None
@dataclass
class PackageSdist:
name: str
# (not supported) upload_time: Optional[datetime]
url: str | None
# (not supported) path: Optional[str]
# (not supported) size: Optional[int]
hashes: dict[str, str]
@dataclass
class PackageWheel:
name: str
# (not supported) upload_time: Optional[datetime]
url: str | None
# (not supported) path: Optional[str]
# (not supported) size: Optional[int]
hashes: dict[str, str]
@dataclass
class Package:
name: str
version: str | None = None
# (not supported) marker: Optional[str]
# (not supported) requires_python: Optional[str]
# (not supported) dependencies
vcs: PackageVcs | None = None
directory: PackageDirectory | None = None
archive: PackageArchive | None = None
# (not supported) index: Optional[str]
sdist: PackageSdist | None = None
wheels: list[PackageWheel] | None = None
# (not supported) attestation_identities: Optional[List[Dict[str, Any]]]
# (not supported) tool: Optional[Dict[str, Any]]
@classmethod
def from_install_requirement(cls, ireq: InstallRequirement, base_dir: Path) -> Self:
base_dir = base_dir.resolve()
dist = ireq.get_dist()
download_info = ireq.download_info
assert download_info
package = cls(name=dist.canonical_name)
if ireq.is_direct:
if isinstance(download_info.info, VcsInfo):
package.vcs = PackageVcs(
type=download_info.info.vcs,
url=download_info.url,
requested_revision=download_info.info.requested_revision,
commit_id=download_info.info.commit_id,
subdirectory=download_info.subdirectory,
)
elif isinstance(download_info.info, DirInfo):
package.directory = PackageDirectory(
path=(
Path(url_to_path(download_info.url))
.resolve()
.relative_to(base_dir)
.as_posix()
),
editable=(
download_info.info.editable
if download_info.info.editable
else None
),
subdirectory=download_info.subdirectory,
)
elif isinstance(download_info.info, ArchiveInfo):
if not download_info.info.hashes:
raise NotImplementedError()
package.archive = PackageArchive(
url=download_info.url,
hashes=download_info.info.hashes,
subdirectory=download_info.subdirectory,
)
else:
# should never happen
raise NotImplementedError()
else:
package.version = str(dist.version)
if isinstance(download_info.info, ArchiveInfo):
if not download_info.info.hashes:
raise NotImplementedError()
link = Link(download_info.url)
if link.is_wheel:
package.wheels = [
PackageWheel(
name=link.filename,
url=download_info.url,
hashes=download_info.info.hashes,
)
]
else:
package.sdist = PackageSdist(
name=link.filename,
url=download_info.url,
hashes=download_info.info.hashes,
)
else:
# should never happen
raise NotImplementedError()
return package
@dataclass
class Pylock:
lock_version: str = "1.0"
# (not supported) environments: Optional[List[str]]
# (not supported) requires_python: Optional[str]
# (not supported) extras: List[str] = []
# (not supported) dependency_groups: List[str] = []
created_by: str = "pip"
packages: list[Package] = dataclasses.field(default_factory=list)
# (not supported) tool: Optional[Dict[str, Any]]
def as_toml(self) -> str:
return tomli_w.dumps(dataclasses.asdict(self, dict_factory=_toml_dict_factory))
@classmethod
def from_install_requirements(
cls, install_requirements: Iterable[InstallRequirement], base_dir: Path
) -> Self:
return cls(
packages=sorted(
(
Package.from_install_requirement(ireq, base_dir)
for ireq in install_requirements
),
key=lambda p: p.name,
)
)
| {
"repo_id": "pypa/pipenv",
"file_path": "pipenv/patched/pip/_internal/models/pylock.py",
"license": "MIT License",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pypa/pipenv:pipenv/patched/pip/_internal/req/req_dependency_group.py | from __future__ import annotations
from collections.abc import Iterable, Iterator
from typing import Any
from pipenv.patched.pip._vendor.dependency_groups import DependencyGroupResolver
from pipenv.patched.pip._internal.exceptions import InstallationError
from pipenv.patched.pip._internal.utils.compat import tomllib
def parse_dependency_groups(groups: list[tuple[str, str]]) -> list[str]:
"""
Parse dependency groups data as provided via the CLI, in a `[path:]group` syntax.
Raises InstallationErrors if anything goes wrong.
"""
resolvers = _build_resolvers(path for (path, _) in groups)
return list(_resolve_all_groups(resolvers, groups))
def _resolve_all_groups(
resolvers: dict[str, DependencyGroupResolver], groups: list[tuple[str, str]]
) -> Iterator[str]:
"""
Run all resolution, converting any error from `DependencyGroupResolver` into
an InstallationError.
"""
for path, groupname in groups:
resolver = resolvers[path]
try:
yield from (str(req) for req in resolver.resolve(groupname))
except (ValueError, TypeError, LookupError) as e:
raise InstallationError(
f"[dependency-groups] resolution failed for '{groupname}' "
f"from '{path}': {e}"
) from e
def _build_resolvers(paths: Iterable[str]) -> dict[str, Any]:
resolvers = {}
for path in paths:
if path in resolvers:
continue
pyproject = _load_pyproject(path)
if "dependency-groups" not in pyproject:
raise InstallationError(
f"[dependency-groups] table was missing from '{path}'. "
"Cannot resolve '--group' option."
)
raw_dependency_groups = pyproject["dependency-groups"]
if not isinstance(raw_dependency_groups, dict):
raise InstallationError(
f"[dependency-groups] table was malformed in {path}. "
"Cannot resolve '--group' option."
)
resolvers[path] = DependencyGroupResolver(raw_dependency_groups)
return resolvers
def _load_pyproject(path: str) -> dict[str, Any]:
"""
This helper loads a pyproject.toml as TOML.
It raises an InstallationError if the operation fails.
"""
try:
with open(path, "rb") as fp:
return tomllib.load(fp)
except FileNotFoundError:
raise InstallationError(f"{path} not found. Cannot resolve '--group' option.")
except tomllib.TOMLDecodeError as e:
raise InstallationError(f"Error parsing {path}: {e}") from e
except OSError as e:
raise InstallationError(f"Error reading {path}: {e}") from e
| {
"repo_id": "pypa/pipenv",
"file_path": "pipenv/patched/pip/_internal/req/req_dependency_group.py",
"license": "MIT License",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
python-poetry/poetry:tests/repositories/test_cached_repository.py | from __future__ import annotations
from typing import Any
import pytest
from packaging.utils import NormalizedName
from packaging.utils import canonicalize_name
from poetry.core.constraints.version import Version
from poetry.inspection.info import PackageInfo
from poetry.repositories.cached_repository import CachedRepository
class MockCachedRepository(CachedRepository):
def _get_release_info(
self, name: NormalizedName, version: Version
) -> dict[str, Any]:
raise NotImplementedError
@pytest.fixture
def release_info() -> PackageInfo:
return PackageInfo(
name="mylib",
version="1.0",
summary="",
requires_dist=[],
requires_python=">=3.9",
files=[
{
"file": "mylib-1.0-py3-none-any.whl",
"hash": "sha256:dummyhashvalue1234567890abcdef",
},
{
"file": "mylib-1.0.tar.gz",
"hash": "sha256:anotherdummyhashvalueabcdef1234567890",
},
],
cache_version=str(CachedRepository.CACHE_VERSION),
)
@pytest.fixture
def outdated_release_info() -> PackageInfo:
return PackageInfo(
name="mylib",
version="1.0",
summary="",
requires_dist=[],
requires_python=">=3.9",
files=[
{
"file": "mylib-1.0-py3-none-any.whl",
"hash": "sha256:dummyhashvalue1234567890abcdef",
}
],
cache_version=str(CachedRepository.CACHE_VERSION),
)
@pytest.mark.parametrize("disable_cache", [False, True])
def test_get_release_info_cache(
release_info: PackageInfo, outdated_release_info: PackageInfo, disable_cache: bool
) -> None:
repo = MockCachedRepository("mock", disable_cache=disable_cache)
repo._get_release_info = lambda name, version: outdated_release_info.asdict() # type: ignore[method-assign]
name = canonicalize_name("mylib")
version = Version.parse("1.0")
assert len(repo.get_release_info(name, version).files) == 1
# without disable_cache: cached value is returned even if the underlying data has changed
# with disable_cache: cached value is ignored and updated data is returned
repo._get_release_info = lambda name, version: release_info.asdict() # type: ignore[method-assign]
assert len(repo.get_release_info(name, version).files) == (
2 if disable_cache else 1
)
# after clearing the cache entry, updated data is returned
repo.forget(name, version)
assert len(repo.get_release_info(name, version).files) == 2
| {
"repo_id": "python-poetry/poetry",
"file_path": "tests/repositories/test_cached_repository.py",
"license": "MIT License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-poetry/poetry:tests/console/commands/debug/test_info.py | from __future__ import annotations
import sys
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
from poetry.__version__ import __version__
from poetry.utils.env import MockEnv
if TYPE_CHECKING:
from cleo.testers.command_tester import CommandTester
from pytest_mock import MockerFixture
from tests.types import CommandTesterFactory
@pytest.fixture(autouse=True)
def setup(mocker: MockerFixture) -> None:
mocker.patch(
"poetry.utils.env.EnvManager.get",
return_value=MockEnv(
path=Path("/prefix"), base=Path("/base/prefix"), is_venv=True
),
)
mocker.patch(
"sys.prefix",
"/poetry/prefix",
)
mocker.patch(
"sys.executable",
"/poetry/prefix/bin/python",
)
@pytest.fixture
def tester(command_tester_factory: CommandTesterFactory) -> CommandTester:
return command_tester_factory("debug info")
def test_debug_info_displays_complete_info(tester: CommandTester) -> None:
tester.execute()
expected = f"""
Poetry
Version: {__version__}
Python: {".".join(str(v) for v in sys.version_info[:3])}
Path: {Path("/poetry/prefix")}
Executable: {Path("/poetry/prefix/bin/python")}
Virtualenv
Python: 3.7.0
Implementation: CPython
Path: {Path("/prefix")}
Executable: {Path(sys.executable)}
Valid: True
Base
Platform: darwin
OS: posix
Python: {".".join(str(v) for v in sys.version_info[:3])}
Path: {Path("/base/prefix")}
Executable: python
"""
assert tester.io.fetch_output() == expected
| {
"repo_id": "python-poetry/poetry",
"file_path": "tests/console/commands/debug/test_info.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-poetry/poetry:tests/console/commands/self/test_show.py | from __future__ import annotations
import json
from typing import TYPE_CHECKING
import pytest
import tomlkit
from poetry.__version__ import __version__
from poetry.console.commands.self.self_command import SelfCommand
if TYPE_CHECKING:
from cleo.testers.command_tester import CommandTester
from tests.types import CommandTesterFactory
@pytest.fixture()
def tester(command_tester_factory: CommandTesterFactory) -> CommandTester:
return command_tester_factory("self show")
@pytest.mark.parametrize("options", ["", "--format json", "--format text"])
def test_show_format(tester: CommandTester, options: str) -> None:
pyproject_content = {
"tool": {
"poetry": {
"name": "poetry-instance",
"version": __version__,
"dependencies": {"python": "^3.9", "poetry": __version__},
}
}
}
lock_content = {
"package": [
{
"name": "poetry",
"version": __version__,
"optional": False,
"platform": "*",
"python-versions": "*",
"files": [],
},
],
"metadata": {
"lock-version": "2.0",
"python-versions": "^3.9",
"content-hash": "123456789",
},
}
if "json" in options:
expected = json.dumps(
[
{
"name": "poetry",
"installed_status": "installed",
"version": __version__,
"description": "",
}
]
)
else:
expected = f"poetry {__version__}"
system_pyproject_file = SelfCommand.get_default_system_pyproject_file()
system_pyproject_file.write_text(tomlkit.dumps(pyproject_content), encoding="utf-8")
system_pyproject_file.parent.joinpath("poetry.lock").write_text(
tomlkit.dumps(lock_content), encoding="utf-8"
)
assert tester.execute(options) == 0
assert tester.io.fetch_output().strip() == expected
def test_self_show_errors_without_lock_file(tester: CommandTester) -> None:
system_pyproject_file = SelfCommand.get_default_system_pyproject_file()
system_pyproject_file.write_text(
tomlkit.dumps(
{
"tool": {
"poetry": {
"name": "poetry-instance",
"version": __version__,
"dependencies": {"python": "^3.9", "poetry": __version__},
}
}
}
),
encoding="utf-8",
)
system_pyproject_file.parent.joinpath("poetry.lock").unlink(missing_ok=True)
assert tester.execute() == 1
assert (
tester.io.fetch_error()
== "Error: poetry.lock not found. Run `poetry self lock` to create it.\n"
)
| {
"repo_id": "python-poetry/poetry",
"file_path": "tests/console/commands/self/test_show.py",
"license": "MIT License",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-poetry/poetry:src/poetry/utils/log_utils.py | from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from poetry.core.packages.package import Package
from poetry.utils.env import Env
def format_build_wheel_log(package: Package, env: Env) -> str:
"""Format a log message indicating
that a wheel is being built for the given package and environment."""
marker_env = env.marker_env
python_version_info = marker_env.get(
"version_info", ("<unknown>", "<unknown>", "<unknown>")
)
python_version = (
f"{python_version_info[0]}.{python_version_info[1]}.{python_version_info[2]}"
)
platform = marker_env.get("sys_platform", "<unknown-platform>")
architecture = marker_env.get("platform_machine", "<unknown-arch>")
message = (
f" <info>Building a wheel file for {package.pretty_name} "
f"for Python {python_version} on {platform}-{architecture}</info>"
)
return message
| {
"repo_id": "python-poetry/poetry",
"file_path": "src/poetry/utils/log_utils.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
python-poetry/poetry:tests/utils/test_log_utils.py | from __future__ import annotations
from poetry.core.packages.package import Package
from poetry.utils.env.mock_env import MockEnv
from poetry.utils.log_utils import format_build_wheel_log
def test_format_build_wheel_log() -> None:
env = MockEnv(version_info=(3, 13, 1), platform="win32", platform_machine="AMD64")
package = Package(name="demo", version="1.2.3")
result = format_build_wheel_log(package, env)
expected = (
" <info>Building a wheel file for demo for Python 3.13.1 on win32-AMD64</info>"
)
assert result == expected
| {
"repo_id": "python-poetry/poetry",
"file_path": "tests/utils/test_log_utils.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-telegram-bot/python-telegram-bot:src/telegram/_userrating.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram user rating."""
from telegram._telegramobject import TelegramObject
from telegram._utils.types import JSONDict
class UserRating(TelegramObject):
"""
This object describes the rating of a user based on their Telegram Star spendings.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`level` and :attr:`rating` are equal.
.. versionadded:: 22.6
Args:
level (:obj:`int`): Current level of the user, indicating their reliability when purchasing
digital goods and services. A higher level suggests a more trustworthy customer; a
negative level is likely reason for concern.
rating (:obj:`int`): Numerical value of the user's rating; the higher the rating, the
better
current_level_rating (:obj:`int`): The rating value required to get the current level
next_level_rating (:obj:`int`, optional): The rating value required to get to the next
level; omitted if the maximum level was reached
Attributes:
level (:obj:`int`): Current level of the user, indicating their reliability when purchasing
digital goods and services. A higher level suggests a more trustworthy customer; a
negative level is likely reason for concern.
rating (:obj:`int`): Numerical value of the user's rating; the higher the rating, the
better
current_level_rating (:obj:`int`): The rating value required to get the current level
next_level_rating (:obj:`int`): Optional. The rating value required to get to the next
level; omitted if the maximum level was reached
"""
__slots__ = ("current_level_rating", "level", "next_level_rating", "rating")
def __init__(
self,
level: int,
rating: int,
current_level_rating: int,
next_level_rating: int | None = None,
*,
api_kwargs: JSONDict | None = None,
) -> None:
super().__init__(api_kwargs=api_kwargs)
self.level: int = level
self.rating: int = rating
self.current_level_rating: int = current_level_rating
self.next_level_rating: int | None = next_level_rating
self._id_attrs = (self.level, self.rating)
self._freeze()
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "src/telegram/_userrating.py",
"license": "GNU General Public License v3.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
python-telegram-bot/python-telegram-bot:tests/test_userrating.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pytest
from telegram import BotCommand, UserRating
from tests.auxil.slots import mro_slots
@pytest.fixture(scope="module")
def user_rating():
return UserRating(
level=UserRatingTestBase.level,
rating=UserRatingTestBase.rating,
current_level_rating=UserRatingTestBase.current_level_rating,
next_level_rating=UserRatingTestBase.next_level_rating,
)
class UserRatingTestBase:
level = 2
rating = 120
current_level_rating = 100
next_level_rating = 180
class TestUserRatingWithoutRequest(UserRatingTestBase):
def test_slot_behaviour(self, user_rating):
for attr in user_rating.__slots__:
assert getattr(user_rating, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(user_rating)) == len(set(mro_slots(user_rating))), "duplicate slot"
def test_de_json_with_next(self, offline_bot):
json_dict = {
"level": self.level,
"rating": self.rating,
"current_level_rating": self.current_level_rating,
"next_level_rating": self.next_level_rating,
}
ur = UserRating.de_json(json_dict, offline_bot)
assert ur.api_kwargs == {}
assert ur.level == self.level
assert ur.rating == self.rating
assert ur.current_level_rating == self.current_level_rating
assert ur.next_level_rating == self.next_level_rating
def test_de_json_no_optional(self, offline_bot):
json_dict = {
"level": self.level,
"rating": self.rating,
"current_level_rating": self.current_level_rating,
}
ur = UserRating.de_json(json_dict, offline_bot)
assert ur.api_kwargs == {}
assert ur.level == self.level
assert ur.rating == self.rating
assert ur.current_level_rating == self.current_level_rating
assert ur.next_level_rating is None
def test_to_dict(self, user_rating):
ur_dict = user_rating.to_dict()
assert isinstance(ur_dict, dict)
assert ur_dict["level"] == user_rating.level
assert ur_dict["rating"] == user_rating.rating
assert ur_dict["current_level_rating"] == user_rating.current_level_rating
assert ur_dict["next_level_rating"] == user_rating.next_level_rating
def test_equality(self):
a = UserRating(3, 200, 150, 300)
b = UserRating(3, 200, 100, None)
c = UserRating(3, 201, 150, 300)
d = UserRating(4, 200, 150, 300)
e = BotCommand("start", "description")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "tests/test_userrating.py",
"license": "GNU General Public License v3.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-telegram-bot/python-telegram-bot:tests/ext/_utils/test_networkloop.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains tests for the network_retry_loop function.
Note:
Most of the retry loop functionality is already covered in test_updater and test_application.
These tests focus specifically on the max_retries behavior for different exception types
and the error callback handling, which were added as part of the bug fix in #5030.
"""
import pytest
from telegram.error import InvalidToken, RetryAfter, TelegramError, TimedOut
from telegram.ext._utils.networkloop import network_retry_loop
class TestNetworkRetryLoop:
"""Tests for the network_retry_loop function.
Note:
The general retry loop functionality is extensively tested in test_updater and
test_application. These tests focus on the specific max_retries behavior for
different exception types.
"""
@pytest.mark.parametrize(
("exception_class", "exception_args"),
[
(RetryAfter, (1,)),
(TimedOut, ("Test timeout",)),
],
ids=["RetryAfter", "TimedOut"],
)
async def test_exception_respects_max_retries(self, exception_class, exception_args):
"""Test that RetryAfter and TimedOut exceptions respect max_retries limit."""
call_count = 0
async def action_with_exception():
nonlocal call_count
call_count += 1
raise exception_class(*exception_args)
with pytest.raises(exception_class):
await network_retry_loop(
action_cb=action_with_exception,
description=f"Test {exception_class.__name__}",
interval=0,
max_retries=2,
)
# Should be called 3 times: initial call + 2 retries
assert call_count == 3
@pytest.mark.parametrize(
("exception_class", "exception_args"),
[
(RetryAfter, (1,)),
(TimedOut, ("Test timeout",)),
],
ids=["RetryAfter", "TimedOut"],
)
async def test_exception_with_zero_max_retries(self, exception_class, exception_args):
"""Test that RetryAfter and TimedOut with max_retries=0 don't retry."""
call_count = 0
async def action_with_exception():
nonlocal call_count
call_count += 1
raise exception_class(*exception_args)
with pytest.raises(exception_class):
await network_retry_loop(
action_cb=action_with_exception,
description=f"Test {exception_class.__name__} no retries",
interval=0,
max_retries=0,
)
# Should be called only once with max_retries=0
assert call_count == 1
async def test_invalid_token_aborts_immediately(self):
"""Test that InvalidToken exceptions abort immediately without retries."""
call_count = 0
async def action_with_invalid_token():
nonlocal call_count
call_count += 1
raise InvalidToken("Invalid token")
with pytest.raises(InvalidToken):
await network_retry_loop(
action_cb=action_with_invalid_token,
description="Test InvalidToken",
interval=0,
max_retries=5,
)
# Should be called only once, no retries for invalid token
assert call_count == 1
async def test_telegram_error_respects_max_retries(self):
"""Test that general TelegramError exceptions respect max_retries limit."""
call_count = 0
async def action_with_telegram_error():
nonlocal call_count
call_count += 1
raise TelegramError("Test error")
with pytest.raises(TelegramError):
await network_retry_loop(
action_cb=action_with_telegram_error,
description="Test TelegramError",
interval=0,
max_retries=3,
)
# Should be called 4 times: initial call + 3 retries
assert call_count == 4
@pytest.mark.parametrize(
("exception_class", "exception_args"),
[
(RetryAfter, (1,)),
(TimedOut, ("Test timeout",)),
(InvalidToken, ("Invalid token",)),
],
ids=["RetryAfter", "TimedOut", "InvalidToken"],
)
async def test_error_callback_not_called_for_specific_exceptions(
self, exception_class, exception_args
):
"""Test that error callback is not called for RetryAfter, TimedOut, or InvalidToken."""
error_callback_called = False
def error_callback(exc):
nonlocal error_callback_called
error_callback_called = True
async def action_with_exception():
raise exception_class(*exception_args)
with pytest.raises(exception_class):
await network_retry_loop(
action_cb=action_with_exception,
on_err_cb=error_callback,
description=f"Test {exception_class.__name__} callback",
interval=0,
max_retries=1,
)
assert not error_callback_called
async def test_error_callback_called_for_telegram_error(self):
"""Test that error callback is called for general TelegramError exceptions."""
error_callback_count = 0
caught_exception = None
def error_callback(exc):
nonlocal error_callback_count, caught_exception
error_callback_count += 1
caught_exception = exc
async def action_with_telegram_error():
raise TelegramError("Test error")
with pytest.raises(TelegramError):
await network_retry_loop(
action_cb=action_with_telegram_error,
on_err_cb=error_callback,
description="Test TelegramError callback",
interval=0,
max_retries=2,
)
# Should be called 3 times (initial + 2 retries)
assert error_callback_count == 3
assert isinstance(caught_exception, TelegramError)
async def test_success_after_retries(self):
"""Test that action succeeds after some retries."""
call_count = 0
async def action_succeeds_on_third_try():
nonlocal call_count
call_count += 1
if call_count < 3:
raise TimedOut("Test timeout")
# Success on third try
await network_retry_loop(
action_cb=action_succeeds_on_third_try,
description="Test success after retries",
interval=0,
max_retries=5,
)
assert call_count == 3
@pytest.mark.parametrize(
("exception_class", "exception_args", "success_after"),
[
(RetryAfter, (0.01,), 5),
(TimedOut, ("Test timeout",), 4),
],
ids=["RetryAfter", "TimedOut"],
)
async def test_exception_with_negative_max_retries(
self, exception_class, exception_args, success_after
):
"""Test that exceptions with max_retries=-1 retry indefinitely until success."""
call_count = 0
async def action_succeeds_after_few_tries():
nonlocal call_count
call_count += 1
if call_count < success_after:
raise exception_class(*exception_args)
# Success after specified tries
await network_retry_loop(
action_cb=action_succeeds_after_few_tries,
description=f"Test {exception_class.__name__} infinite retries",
interval=0,
max_retries=-1,
)
assert call_count == success_after
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "tests/ext/_utils/test_networkloop.py",
"license": "GNU General Public License v3.0",
"lines": 205,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-telegram-bot/python-telegram-bot:src/telegram/_directmessagestopic.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the DirectMessagesTopic class."""
from typing import TYPE_CHECKING, Optional
from telegram._telegramobject import TelegramObject
from telegram._user import User
from telegram._utils.argumentparsing import de_json_optional
from telegram._utils.types import JSONDict
if TYPE_CHECKING:
from telegram._bot import Bot
class DirectMessagesTopic(TelegramObject):
"""
This class represents a topic for direct messages in a chat.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`topic_id` and :attr:`user` is equal.
.. versionadded:: 22.4
Args:
topic_id (:obj:`int`): Unique identifier of the topic. This number may have more than 32
significant bits and some programming languages may have difficulty/silent defects in
interpreting it. But it has at most 52 significant bits, so a 64-bit integer or
double-precision float type are safe for storing this identifier.
user (:class:`telegram.User`, optional): Information about the user that created the topic.
.. hint::
According to Telegram, this field is always present as of Bot API 9.2.
Attributes:
topic_id (:obj:`int`): Unique identifier of the topic. This number may have more than 32
significant bits and some programming languages may have difficulty/silent defects in
interpreting it. But it has at most 52 significant bits, so a 64-bit integer or
double-precision float type are safe for storing this identifier.
user (:class:`telegram.User`): Optional. Information about the user that created the topic.
.. hint::
According to Telegram, this field is always present as of Bot API 9.2.
"""
__slots__ = ("topic_id", "user")
def __init__(
self, topic_id: int, user: User | None = None, *, api_kwargs: JSONDict | None = None
):
super().__init__(api_kwargs=api_kwargs)
# Required:
self.topic_id: int = topic_id
# Optionals:
self.user: User | None = user
self._id_attrs = (self.topic_id, self.user)
self._freeze()
@classmethod
def de_json(cls, data: JSONDict, bot: Optional["Bot"] = None) -> "DirectMessagesTopic":
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
data["user"] = de_json_optional(data.get("user"), User, bot)
return super().de_json(data=data, bot=bot)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "src/telegram/_directmessagestopic.py",
"license": "GNU General Public License v3.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
python-telegram-bot/python-telegram-bot:src/telegram/_suggestedpost.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains objects related to Telegram suggested posts."""
import datetime as dtm
from typing import TYPE_CHECKING, Final, Optional
from telegram import constants
from telegram._message import Message
from telegram._payment.stars.staramount import StarAmount
from telegram._telegramobject import TelegramObject
from telegram._utils import enum
from telegram._utils.argumentparsing import de_json_optional
from telegram._utils.datetime import extract_tzinfo_from_defaults, from_timestamp
from telegram._utils.types import JSONDict
if TYPE_CHECKING:
from telegram import Bot
class SuggestedPostPrice(TelegramObject):
"""
Desribes the price of a suggested post.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`currency` and :attr:`amount` are equal.
.. versionadded:: 22.4
Args:
currency (:obj:`str`):
Currency in which the post will be paid. Currently, must be one of ``“XTR”`` for
Telegram Stars or ``“TON”`` for toncoins.
amount (:obj:`int`):
The amount of the currency that will be paid for the post in the smallest units of the
currency, i.e. Telegram Stars or nanotoncoins. Currently, price in Telegram Stars must
be between :tg-const:`telegram.constants.SuggestedPost.MIN_PRICE_STARS`
and :tg-const:`telegram.constants.SuggestedPost.MAX_PRICE_STARS`, and price in
nanotoncoins must be between
:tg-const:`telegram.constants.SuggestedPost.MIN_PRICE_NANOTONCOINS`
and :tg-const:`telegram.constants.SuggestedPost.MAX_PRICE_NANOTONCOINS`.
Attributes:
currency (:obj:`str`):
Currency in which the post will be paid. Currently, must be one of ``“XTR”`` for
Telegram Stars or ``“TON”`` for toncoins.
amount (:obj:`int`):
The amount of the currency that will be paid for the post in the smallest units of the
currency, i.e. Telegram Stars or nanotoncoins. Currently, price in Telegram Stars must
be between :tg-const:`telegram.constants.SuggestedPost.MIN_PRICE_STARS`
and :tg-const:`telegram.constants.SuggestedPost.MAX_PRICE_STARS`, and price in
nanotoncoins must be between
:tg-const:`telegram.constants.SuggestedPost.MIN_PRICE_NANOTONCOINS`
and :tg-const:`telegram.constants.SuggestedPost.MAX_PRICE_NANOTONCOINS`.
"""
__slots__ = ("amount", "currency")
def __init__(
self,
currency: str,
amount: int,
*,
api_kwargs: JSONDict | None = None,
):
super().__init__(api_kwargs=api_kwargs)
self.currency: str = currency
self.amount: int = amount
self._id_attrs = (self.currency, self.amount)
self._freeze()
class SuggestedPostParameters(TelegramObject):
"""
Contains parameters of a post that is being suggested by the bot.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`price` and :attr:`send_date` are equal.
.. versionadded:: 22.4
Args:
price (:class:`telegram.SuggestedPostPrice`, optional):
Proposed price for the post. If the field is omitted, then the post is unpaid.
send_date (:class:`datetime.datetime`, optional):
Proposed send date of the post. If specified, then the date
must be between :tg-const:`telegram.constants.SuggestedPost.MIN_SEND_DATE`
second and :tg-const:`telegram.constants.SuggestedPost.MAX_SEND_DATE` seconds (30 days)
in the future. If the field is omitted, then the post can be published at any time
within :tg-const:`telegram.constants.SuggestedPost.MAX_SEND_DATE` seconds (30 days) at
the sole discretion of the user who approves it.
|datetime_localization|
Attributes:
price (:class:`telegram.SuggestedPostPrice`):
Optional. Proposed price for the post. If the field is omitted, then the post
is unpaid.
send_date (:class:`datetime.datetime`):
Optional. Proposed send date of the post. If specified, then the date
must be between :tg-const:`telegram.constants.SuggestedPost.MIN_SEND_DATE`
second and :tg-const:`telegram.constants.SuggestedPost.MAX_SEND_DATE` seconds (30 days)
in the future. If the field is omitted, then the post can be published at any time
within :tg-const:`telegram.constants.SuggestedPost.MAX_SEND_DATE` seconds (30 days) at
the sole discretion of the user who approves it.
|datetime_localization|
"""
__slots__ = ("price", "send_date")
def __init__(
self,
price: SuggestedPostPrice | None = None,
send_date: dtm.datetime | None = None,
*,
api_kwargs: JSONDict | None = None,
):
super().__init__(api_kwargs=api_kwargs)
self.price: SuggestedPostPrice | None = price
self.send_date: dtm.datetime | None = send_date
self._id_attrs = (self.price, self.send_date)
self._freeze()
@classmethod
def de_json(cls, data: JSONDict, bot: Optional["Bot"] = None) -> "SuggestedPostParameters":
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
data["price"] = de_json_optional(data.get("price"), SuggestedPostPrice, bot)
# Get the local timezone from the bot if it has defaults
loc_tzinfo = extract_tzinfo_from_defaults(bot)
data["send_date"] = from_timestamp(data.get("send_date"), tzinfo=loc_tzinfo)
return super().de_json(data=data, bot=bot)
class SuggestedPostInfo(TelegramObject):
"""
Contains information about a suggested post.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`state` and :attr:`price` are equal.
.. versionadded:: 22.4
Args:
state (:obj:`str`):
State of the suggested post. Currently, it can be one of
:tg-const:`~telegram.constants.SuggestedPostInfoState.PENDING`,
:tg-const:`~telegram.constants.SuggestedPostInfoState.APPROVED`,
:tg-const:`~telegram.constants.SuggestedPostInfoState.DECLINED`.
price (:obj:`SuggestedPostPrice`, optional):
Proposed price of the post. If the field is omitted, then the post is unpaid.
send_date (:class:`datetime.datetime`, optional):
Proposed send date of the post. If the field is omitted, then the post can be published
at any time within 30 days at the sole discretion of the user or administrator who
approves it.
|datetime_localization|
Attributes:
state (:obj:`str`):
State of the suggested post. Currently, it can be one of
:tg-const:`~telegram.constants.SuggestedPostInfoState.PENDING`,
:tg-const:`~telegram.constants.SuggestedPostInfoState.APPROVED`,
:tg-const:`~telegram.constants.SuggestedPostInfoState.DECLINED`.
price (:obj:`SuggestedPostPrice`):
Optional. Proposed price of the post. If the field is omitted, then the post is unpaid.
send_date (:class:`datetime.datetime`):
Optional. Proposed send date of the post. If the field is omitted, then the post can be
published at any time within 30 days at the sole discretion of the user or
administrator who approves it.
|datetime_localization|
"""
__slots__ = ("price", "send_date", "state")
PENDING: Final[str] = constants.SuggestedPostInfoState.PENDING
""":const:`telegram.constants.SuggestedPostInfoState.PENDING`"""
APPROVED: Final[str] = constants.SuggestedPostInfoState.APPROVED
""":const:`telegram.constants.SuggestedPostInfoState.APPROVED`"""
DECLINED: Final[str] = constants.SuggestedPostInfoState.DECLINED
""":const:`telegram.constants.SuggestedPostInfoState.DECLINED`"""
def __init__(
self,
state: str,
price: SuggestedPostPrice | None = None,
send_date: dtm.datetime | None = None,
*,
api_kwargs: JSONDict | None = None,
):
super().__init__(api_kwargs=api_kwargs)
# Required
self.state: str = enum.get_member(constants.SuggestedPostInfoState, state, state)
# Optionals
self.price: SuggestedPostPrice | None = price
self.send_date: dtm.datetime | None = send_date
self._id_attrs = (self.state, self.price)
self._freeze()
@classmethod
def de_json(cls, data: JSONDict, bot: Optional["Bot"] = None) -> "SuggestedPostInfo":
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
# Get the local timezone from the bot if it has defaults
loc_tzinfo = extract_tzinfo_from_defaults(bot)
data["price"] = de_json_optional(data.get("price"), SuggestedPostPrice, bot)
data["send_date"] = from_timestamp(data.get("send_date"), tzinfo=loc_tzinfo)
return super().de_json(data=data, bot=bot)
class SuggestedPostDeclined(TelegramObject):
"""
Describes a service message about the rejection of a suggested post.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`suggested_post_message` and :attr:`comment` are equal.
.. versionadded:: 22.4
Args:
suggested_post_message (:class:`telegram.Message`, optional):
Message containing the suggested post. Note that the :class:`~telegram.Message` object
in this field will not contain the :attr:`~telegram.Message.reply_to_message` field
even if it itself is a reply.
comment (:obj:`str`, optional):
Comment with which the post was declined.
Attributes:
suggested_post_message (:class:`telegram.Message`):
Optional. Message containing the suggested post. Note that the
:class:`~telegram.Message` object in this field will not contain
the :attr:`~telegram.Message.reply_to_message` field even if it itself is a reply.
comment (:obj:`str`):
Optional. Comment with which the post was declined.
"""
__slots__ = ("comment", "suggested_post_message")
def __init__(
self,
suggested_post_message: Message | None = None,
comment: str | None = None,
*,
api_kwargs: JSONDict | None = None,
):
super().__init__(api_kwargs=api_kwargs)
self.suggested_post_message: Message | None = suggested_post_message
self.comment: str | None = comment
self._id_attrs = (self.suggested_post_message, self.comment)
self._freeze()
@classmethod
def de_json(cls, data: JSONDict, bot: Optional["Bot"] = None) -> "SuggestedPostDeclined":
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
data["suggested_post_message"] = de_json_optional(
data.get("suggested_post_message"), Message, bot
)
return super().de_json(data=data, bot=bot)
class SuggestedPostPaid(TelegramObject):
"""
Describes a service message about a successful payment for a suggested post.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if all of their attributes are equal.
.. versionadded:: 22.4
Args:
suggested_post_message (:class:`telegram.Message`, optional):
Message containing the suggested post. Note that the :class:`~telegram.Message` object
in this field will not contain the :attr:`~telegram.Message.reply_to_message` field
even if it itself is a reply.
currency (:obj:`str`):
Currency in which the payment was made. Currently, one of ``“XTR”`` for Telegram Stars
or ``“TON”`` for toncoins.
amount (:obj:`int`, optional):
The amount of the currency that was received by the channel in nanotoncoins; for
payments in toncoins only.
star_amount (:class:`telegram.StarAmount`, optional):
The amount of Telegram Stars that was received by the channel; for payments in Telegram
Stars only.
Attributes:
suggested_post_message (:class:`telegram.Message`):
Optional. Message containing the suggested post. Note that the
:class:`~telegram.Message` object in this field will not contain
the :attr:`~telegram.Message.reply_to_message` field even if it itself is a reply.
currency (:obj:`str`):
Currency in which the payment was made. Currently, one of ``“XTR”`` for Telegram Stars
or ``“TON”`` for toncoins.
amount (:obj:`int`):
Optional. The amount of the currency that was received by the channel in nanotoncoins;
for payments in toncoins only.
star_amount (:class:`telegram.StarAmount`):
Optional. The amount of Telegram Stars that was received by the channel; for payments
in Telegram Stars only.
"""
__slots__ = ("amount", "currency", "star_amount", "suggested_post_message")
def __init__(
self,
currency: str,
suggested_post_message: Message | None = None,
amount: int | None = None,
star_amount: StarAmount | None = None,
*,
api_kwargs: JSONDict | None = None,
):
super().__init__(api_kwargs=api_kwargs)
# Required
self.currency: str = currency
# Optionals
self.suggested_post_message: Message | None = suggested_post_message
self.amount: int | None = amount
self.star_amount: StarAmount | None = star_amount
self._id_attrs = (
self.currency,
self.suggested_post_message,
self.amount,
self.star_amount,
)
self._freeze()
@classmethod
def de_json(cls, data: JSONDict, bot: Optional["Bot"] = None) -> "SuggestedPostPaid":
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
data["suggested_post_message"] = de_json_optional(
data.get("suggested_post_message"), Message, bot
)
data["star_amount"] = de_json_optional(data.get("star_amount"), StarAmount, bot)
return super().de_json(data=data, bot=bot)
class SuggestedPostRefunded(TelegramObject):
"""
Describes a service message about a payment refund for a suggested post.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`suggested_post_message` and :attr:`reason` are equal.
.. versionadded:: 22.4
Args:
suggested_post_message (:class:`telegram.Message`, optional):
Message containing the suggested post. Note that the :class:`~telegram.Message` object
in this field will not contain the :attr:`~telegram.Message.reply_to_message` field
even if it itself is a reply.
reason (:obj:`str`):
Reason for the refund. Currently,
one of :tg-const:`telegram.constants.SuggestedPostRefunded.POST_DELETED` if the post
was deleted within 24 hours of being posted or removed from scheduled messages without
being posted, or :tg-const:`telegram.constants.SuggestedPostRefunded.PAYMENT_REFUNDED`
if the payer refunded their payment.
Attributes:
suggested_post_message (:class:`telegram.Message`):
Optional. Message containing the suggested post. Note that the
:class:`~telegram.Message` object in this field will not contain
the :attr:`~telegram.Message.reply_to_message` field even if it itself is a reply.
reason (:obj:`str`):
Reason for the refund. Currently,
one of :tg-const:`telegram.constants.SuggestedPostRefunded.POST_DELETED` if the post
was deleted within 24 hours of being posted or removed from scheduled messages without
being posted, or :tg-const:`telegram.constants.SuggestedPostRefunded.PAYMENT_REFUNDED`
if the payer refunded their payment.
"""
__slots__ = ("reason", "suggested_post_message")
def __init__(
self,
reason: str,
suggested_post_message: Message | None = None,
*,
api_kwargs: JSONDict | None = None,
):
super().__init__(api_kwargs=api_kwargs)
# Required
self.reason: str = reason
# Optionals
self.suggested_post_message: Message | None = suggested_post_message
self._id_attrs = (self.reason, self.suggested_post_message)
self._freeze()
@classmethod
def de_json(cls, data: JSONDict, bot: Optional["Bot"] = None) -> "SuggestedPostRefunded":
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
data["suggested_post_message"] = de_json_optional(
data.get("suggested_post_message"), Message, bot
)
return super().de_json(data=data, bot=bot)
class SuggestedPostApproved(TelegramObject):
"""
Describes a service message about the approval of a suggested post.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if all of their attributes are equal.
.. versionadded:: 22.4
Args:
suggested_post_message (:class:`telegram.Message`, optional):
Message containing the suggested post. Note that the :class:`~telegram.Message` object
in this field will not contain the :attr:`~telegram.Message.reply_to_message` field
even if it itself is a reply.
price (:obj:`SuggestedPostPrice`, optional):
Amount paid for the post.
send_date (:class:`datetime.datetime`):
Date when the post will be published.
|datetime_localization|
Attributes:
suggested_post_message (:class:`telegram.Message`):
Optional. Message containing the suggested post. Note that the
:class:`~telegram.Message` object in this field will not contain
the :attr:`~telegram.Message.reply_to_message` field even if it itself is a reply.
price (:obj:`SuggestedPostPrice`):
Optional. Amount paid for the post.
send_date (:class:`datetime.datetime`):
Date when the post will be published.
|datetime_localization|
"""
__slots__ = ("price", "send_date", "suggested_post_message")
def __init__(
self,
send_date: dtm.datetime,
suggested_post_message: Message | None = None,
price: SuggestedPostPrice | None = None,
*,
api_kwargs: JSONDict | None = None,
):
super().__init__(api_kwargs=api_kwargs)
# Required
self.send_date: dtm.datetime = send_date
# Optionals
self.suggested_post_message: Message | None = suggested_post_message
self.price: SuggestedPostPrice | None = price
self._id_attrs = (self.send_date, self.suggested_post_message, self.price)
self._freeze()
@classmethod
def de_json(cls, data: JSONDict, bot: Optional["Bot"] = None) -> "SuggestedPostApproved":
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
# Get the local timezone from the bot if it has defaults
loc_tzinfo = extract_tzinfo_from_defaults(bot)
data["send_date"] = from_timestamp(data.get("send_date"), tzinfo=loc_tzinfo)
data["price"] = de_json_optional(data.get("price"), SuggestedPostPrice, bot)
data["suggested_post_message"] = de_json_optional(
data.get("suggested_post_message"), Message, bot
)
return super().de_json(data=data, bot=bot)
class SuggestedPostApprovalFailed(TelegramObject):
"""
Describes a service message about the failed approval of a suggested post. Currently, only
caused by insufficient user funds at the time of approval.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`suggested_post_message` and :attr:`price` are equal.
.. versionadded:: 22.4
Args:
suggested_post_message (:class:`telegram.Message`, optional):
Message containing the suggested post. Note that the :class:`~telegram.Message` object
in this field will not contain the :attr:`~telegram.Message.reply_to_message` field
even if it itself is a reply.
price (:obj:`SuggestedPostPrice`):
Expected price of the post.
Attributes:
suggested_post_message (:class:`telegram.Message`):
Optional. Message containing the suggested post. Note that the
:class:`~telegram.Message` object in this field will not contain
the :attr:`~telegram.Message.reply_to_message` field even if it itself is a reply.
price (:obj:`SuggestedPostPrice`):
Expected price of the post.
"""
__slots__ = ("price", "suggested_post_message")
def __init__(
self,
price: SuggestedPostPrice,
suggested_post_message: Message | None = None,
*,
api_kwargs: JSONDict | None = None,
):
super().__init__(api_kwargs=api_kwargs)
# Required
self.price: SuggestedPostPrice = price
# Optionals
self.suggested_post_message: Message | None = suggested_post_message
self._id_attrs = (self.price, self.suggested_post_message)
self._freeze()
@classmethod
def de_json(cls, data: JSONDict, bot: Optional["Bot"] = None) -> "SuggestedPostApprovalFailed":
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
data["price"] = de_json_optional(data.get("price"), SuggestedPostPrice, bot)
data["suggested_post_message"] = de_json_optional(
data.get("suggested_post_message"), Message, bot
)
return super().de_json(data=data, bot=bot)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "src/telegram/_suggestedpost.py",
"license": "GNU General Public License v3.0",
"lines": 456,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
python-telegram-bot/python-telegram-bot:tests/test_directmessagestopic.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the TestDirectMessagesTopic class."""
import pytest
from telegram import DirectMessagesTopic, User
from tests.auxil.slots import mro_slots
@pytest.fixture(scope="module")
def direct_messages_topic(offline_bot):
dmt = DirectMessagesTopic(
topic_id=DirectMessagesTopicTestBase.topic_id,
user=DirectMessagesTopicTestBase.user,
)
dmt.set_bot(offline_bot)
dmt._unfreeze()
return dmt
class DirectMessagesTopicTestBase:
topic_id = 12345
user = User(id=67890, is_bot=False, first_name="Test")
class TestDirectMessagesTopicWithoutRequest(DirectMessagesTopicTestBase):
def test_slot_behaviour(self, direct_messages_topic):
cfi = direct_messages_topic
for attr in cfi.__slots__:
assert getattr(cfi, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(cfi)) == len(set(mro_slots(cfi))), "duplicate slot"
def test_de_json(self, offline_bot):
json_dict = {
"topic_id": self.topic_id,
"user": self.user.to_dict(),
}
dmt = DirectMessagesTopic.de_json(json_dict, offline_bot)
assert dmt.topic_id == self.topic_id
assert dmt.user == self.user
assert dmt.api_kwargs == {}
def test_to_dict(self, direct_messages_topic):
dmt = direct_messages_topic
dmt_dict = dmt.to_dict()
assert isinstance(dmt_dict, dict)
assert dmt_dict["topic_id"] == dmt.topic_id
assert dmt_dict["user"] == dmt.user.to_dict()
def test_equality(self, direct_messages_topic):
dmt_1 = direct_messages_topic
dmt_2 = DirectMessagesTopic(
topic_id=dmt_1.topic_id,
user=dmt_1.user,
)
assert dmt_1 == dmt_2
assert hash(dmt_1) == hash(dmt_2)
random = User(id=99999, is_bot=False, first_name="Random")
assert random != dmt_2
assert hash(random) != hash(dmt_2)
dmt_3 = DirectMessagesTopic(
topic_id=8371,
user=dmt_1.user,
)
assert dmt_1 != dmt_3
assert hash(dmt_1) != hash(dmt_3)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "tests/test_directmessagestopic.py",
"license": "GNU General Public License v3.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-telegram-bot/python-telegram-bot:tests/test_suggestedpost.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import datetime as dtm
import pytest
from telegram import Dice
from telegram._chat import Chat
from telegram._message import Message
from telegram._payment.stars.staramount import StarAmount
from telegram._suggestedpost import (
SuggestedPostApprovalFailed,
SuggestedPostApproved,
SuggestedPostDeclined,
SuggestedPostInfo,
SuggestedPostPaid,
SuggestedPostParameters,
SuggestedPostPrice,
SuggestedPostRefunded,
)
from telegram._utils.datetime import UTC, to_timestamp
from telegram.constants import SuggestedPostInfoState
from tests.auxil.slots import mro_slots
@pytest.fixture(scope="module")
def suggested_post_parameters():
return SuggestedPostParameters(
price=SuggestedPostParametersTestBase.price,
send_date=SuggestedPostParametersTestBase.send_date,
)
class SuggestedPostParametersTestBase:
price = SuggestedPostPrice(currency="XTR", amount=100)
send_date = dtm.datetime.now(tz=UTC).replace(microsecond=0)
class TestSuggestedPostParametersWithoutRequest(SuggestedPostParametersTestBase):
def test_slot_behaviour(self, suggested_post_parameters):
for attr in suggested_post_parameters.__slots__:
assert getattr(suggested_post_parameters, attr, "err") != "err", (
f"got extra slot '{attr}'"
)
assert len(mro_slots(suggested_post_parameters)) == len(
set(mro_slots(suggested_post_parameters))
), "duplicate slot"
def test_de_json(self, offline_bot):
json_dict = {
"price": self.price.to_dict(),
"send_date": to_timestamp(self.send_date),
}
spp = SuggestedPostParameters.de_json(json_dict, offline_bot)
assert spp.price == self.price
assert spp.send_date == self.send_date
assert spp.api_kwargs == {}
def test_de_json_localization(self, offline_bot, raw_bot, tz_bot):
json_dict = {
"price": self.price.to_dict(),
"send_date": to_timestamp(self.send_date),
}
spp_bot = SuggestedPostParameters.de_json(json_dict, offline_bot)
spp_bot_raw = SuggestedPostParameters.de_json(json_dict, raw_bot)
spp_bot_tz = SuggestedPostParameters.de_json(json_dict, tz_bot)
# comparing utcoffsets because comparing tzinfo objects is not reliable
send_date_offset = spp_bot_tz.send_date.utcoffset()
send_date_offset_tz = tz_bot.defaults.tzinfo.utcoffset(
spp_bot_tz.send_date.replace(tzinfo=None)
)
assert spp_bot.send_date.tzinfo == UTC
assert spp_bot_raw.send_date.tzinfo == UTC
assert send_date_offset_tz == send_date_offset
def test_to_dict(self, suggested_post_parameters):
spp_dict = suggested_post_parameters.to_dict()
assert isinstance(spp_dict, dict)
assert spp_dict["price"] == self.price.to_dict()
assert spp_dict["send_date"] == to_timestamp(self.send_date)
def test_equality(self, suggested_post_parameters):
a = suggested_post_parameters
b = SuggestedPostParameters(price=self.price, send_date=self.send_date)
c = SuggestedPostParameters(
price=self.price, send_date=self.send_date + dtm.timedelta(seconds=1)
)
e = Dice(4, "emoji")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != e
assert hash(a) != hash(e)
@pytest.fixture(scope="module")
def suggested_post_info():
return SuggestedPostInfo(
state=SuggestedPostInfoTestBase.state,
price=SuggestedPostInfoTestBase.price,
send_date=SuggestedPostInfoTestBase.send_date,
)
class SuggestedPostInfoTestBase:
state = SuggestedPostInfoState.PENDING
price = SuggestedPostPrice(currency="XTR", amount=100)
send_date = dtm.datetime.now(tz=UTC).replace(microsecond=0)
class TestSuggestedPostInfoWithoutRequest(SuggestedPostInfoTestBase):
def test_slot_behaviour(self, suggested_post_info):
for attr in suggested_post_info.__slots__:
assert getattr(suggested_post_info, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(suggested_post_info)) == len(set(mro_slots(suggested_post_info))), (
"duplicate slot"
)
def test_type_enum_conversion(self):
assert type(SuggestedPostInfo("pending").state) is SuggestedPostInfoState
assert SuggestedPostInfo("unknown").state == "unknown"
def test_de_json(self, offline_bot):
json_dict = {
"state": self.state,
"price": self.price.to_dict(),
"send_date": to_timestamp(self.send_date),
}
spi = SuggestedPostInfo.de_json(json_dict, offline_bot)
assert spi.state == self.state
assert spi.price == self.price
assert spi.send_date == self.send_date
assert spi.api_kwargs == {}
def test_de_json_localization(self, offline_bot, raw_bot, tz_bot):
json_dict = {
"state": self.state,
"price": self.price.to_dict(),
"send_date": to_timestamp(self.send_date),
}
spi_bot = SuggestedPostInfo.de_json(json_dict, offline_bot)
spi_bot_raw = SuggestedPostInfo.de_json(json_dict, raw_bot)
spi_bot_tz = SuggestedPostInfo.de_json(json_dict, tz_bot)
# comparing utcoffsets because comparing tzinfo objects is not reliable
send_date_offset = spi_bot_tz.send_date.utcoffset()
send_date_offset_tz = tz_bot.defaults.tzinfo.utcoffset(
spi_bot_tz.send_date.replace(tzinfo=None)
)
assert spi_bot.send_date.tzinfo == UTC
assert spi_bot_raw.send_date.tzinfo == UTC
assert send_date_offset_tz == send_date_offset
def test_to_dict(self, suggested_post_info):
spi_dict = suggested_post_info.to_dict()
assert isinstance(spi_dict, dict)
assert spi_dict["state"] == self.state
assert spi_dict["price"] == self.price.to_dict()
assert spi_dict["send_date"] == to_timestamp(self.send_date)
def test_equality(self, suggested_post_info):
a = suggested_post_info
b = SuggestedPostInfo(state=self.state, price=self.price)
c = SuggestedPostInfo(state=SuggestedPostInfoState.DECLINED, price=self.price)
e = Dice(4, "emoji")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != e
assert hash(a) != hash(e)
@pytest.fixture(scope="module")
def suggested_post_price():
return SuggestedPostPrice(
currency=SuggestedPostPriceTestBase.currency,
amount=SuggestedPostPriceTestBase.amount,
)
class SuggestedPostPriceTestBase:
currency = "XTR"
amount = 100
class TestSuggestedPostPriceWithoutRequest(SuggestedPostPriceTestBase):
def test_slot_behaviour(self, suggested_post_price):
for attr in suggested_post_price.__slots__:
assert getattr(suggested_post_price, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(suggested_post_price)) == len(set(mro_slots(suggested_post_price))), (
"duplicate slot"
)
def test_de_json(self, offline_bot):
json_dict = {
"currency": self.currency,
"amount": self.amount,
}
spp = SuggestedPostPrice.de_json(json_dict, offline_bot)
assert spp.currency == self.currency
assert spp.amount == self.amount
assert spp.api_kwargs == {}
def test_to_dict(self, suggested_post_price):
spp_dict = suggested_post_price.to_dict()
assert isinstance(spp_dict, dict)
assert spp_dict["currency"] == self.currency
assert spp_dict["amount"] == self.amount
def test_equality(self, suggested_post_price):
a = suggested_post_price
b = SuggestedPostPrice(currency=self.currency, amount=self.amount)
c = SuggestedPostPrice(currency="TON", amount=self.amount)
e = Dice(4, "emoji")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != e
assert hash(a) != hash(e)
@pytest.fixture(scope="module")
def suggested_post_declined():
return SuggestedPostDeclined(
suggested_post_message=SuggestedPostDeclinedTestBase.suggested_post_message,
comment=SuggestedPostDeclinedTestBase.comment,
)
class SuggestedPostDeclinedTestBase:
suggested_post_message = Message(1, dtm.datetime.now(), Chat(1, ""), text="post this pls.")
comment = "another time"
class TestSuggestedPostDeclinedWithoutRequest(SuggestedPostDeclinedTestBase):
def test_slot_behaviour(self, suggested_post_declined):
for attr in suggested_post_declined.__slots__:
assert getattr(suggested_post_declined, attr, "err") != "err", (
f"got extra slot '{attr}'"
)
assert len(mro_slots(suggested_post_declined)) == len(
set(mro_slots(suggested_post_declined))
), "duplicate slot"
def test_de_json(self, offline_bot):
json_dict = {
"suggested_post_message": self.suggested_post_message.to_dict(),
"comment": self.comment,
}
spd = SuggestedPostDeclined.de_json(json_dict, offline_bot)
assert spd.suggested_post_message == self.suggested_post_message
assert spd.comment == self.comment
assert spd.api_kwargs == {}
def test_to_dict(self, suggested_post_declined):
spd_dict = suggested_post_declined.to_dict()
assert isinstance(spd_dict, dict)
assert spd_dict["suggested_post_message"] == self.suggested_post_message.to_dict()
assert spd_dict["comment"] == self.comment
def test_equality(self, suggested_post_declined):
a = suggested_post_declined
b = SuggestedPostDeclined(
suggested_post_message=self.suggested_post_message, comment=self.comment
)
c = SuggestedPostDeclined(suggested_post_message=self.suggested_post_message, comment="no")
e = Dice(4, "emoji")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != e
assert hash(a) != hash(e)
@pytest.fixture(scope="module")
def suggested_post_paid():
return SuggestedPostPaid(
currency=SuggestedPostPaidTestBase.currency,
suggested_post_message=SuggestedPostPaidTestBase.suggested_post_message,
amount=SuggestedPostPaidTestBase.amount,
star_amount=SuggestedPostPaidTestBase.star_amount,
)
class SuggestedPostPaidTestBase:
suggested_post_message = Message(1, dtm.datetime.now(), Chat(1, ""), text="post this pls.")
currency = "XTR"
amount = 100
star_amount = StarAmount(100)
class TestSuggestedPostPaidWithoutRequest(SuggestedPostPaidTestBase):
def test_slot_behaviour(self, suggested_post_paid):
for attr in suggested_post_paid.__slots__:
assert getattr(suggested_post_paid, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(suggested_post_paid)) == len(set(mro_slots(suggested_post_paid))), (
"duplicate slot"
)
def test_de_json(self, offline_bot):
json_dict = {
"suggested_post_message": self.suggested_post_message.to_dict(),
"currency": self.currency,
"amount": self.amount,
"star_amount": self.star_amount.to_dict(),
}
spp = SuggestedPostPaid.de_json(json_dict, offline_bot)
assert spp.suggested_post_message == self.suggested_post_message
assert spp.currency == self.currency
assert spp.amount == self.amount
assert spp.star_amount == self.star_amount
assert spp.api_kwargs == {}
def test_to_dict(self, suggested_post_paid):
spp_dict = suggested_post_paid.to_dict()
assert isinstance(spp_dict, dict)
assert spp_dict["suggested_post_message"] == self.suggested_post_message.to_dict()
assert spp_dict["currency"] == self.currency
assert spp_dict["amount"] == self.amount
assert spp_dict["star_amount"] == self.star_amount.to_dict()
def test_equality(self, suggested_post_paid):
a = suggested_post_paid
b = SuggestedPostPaid(
suggested_post_message=self.suggested_post_message,
currency=self.currency,
amount=self.amount,
star_amount=self.star_amount,
)
c = SuggestedPostPaid(
suggested_post_message=self.suggested_post_message,
currency=self.currency,
amount=self.amount - 1,
star_amount=StarAmount(self.amount - 1),
)
e = Dice(4, "emoji")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != e
assert hash(a) != hash(e)
@pytest.fixture(scope="module")
def suggested_post_refunded():
return SuggestedPostRefunded(
reason=SuggestedPostRefundedTestBase.reason,
suggested_post_message=SuggestedPostRefundedTestBase.suggested_post_message,
)
class SuggestedPostRefundedTestBase:
reason = "post_deleted"
suggested_post_message = Message(1, dtm.datetime.now(), Chat(1, ""), text="post this pls.")
class TestSuggestedPostRefundedWithoutRequest(SuggestedPostRefundedTestBase):
def test_slot_behaviour(self, suggested_post_refunded):
for attr in suggested_post_refunded.__slots__:
assert getattr(suggested_post_refunded, attr, "err") != "err", (
f"got extra slot '{attr}'"
)
assert len(mro_slots(suggested_post_refunded)) == len(
set(mro_slots(suggested_post_refunded))
), "duplicate slot"
def test_de_json(self, offline_bot):
json_dict = {
"suggested_post_message": self.suggested_post_message.to_dict(),
"reason": self.reason,
}
spr = SuggestedPostRefunded.de_json(json_dict, offline_bot)
assert spr.suggested_post_message == self.suggested_post_message
assert spr.reason == self.reason
assert spr.api_kwargs == {}
def test_to_dict(self, suggested_post_refunded):
spr_dict = suggested_post_refunded.to_dict()
assert isinstance(spr_dict, dict)
assert spr_dict["suggested_post_message"] == self.suggested_post_message.to_dict()
assert spr_dict["reason"] == self.reason
def test_equality(self, suggested_post_refunded):
a = suggested_post_refunded
b = SuggestedPostRefunded(
suggested_post_message=self.suggested_post_message, reason=self.reason
)
c = SuggestedPostRefunded(
suggested_post_message=self.suggested_post_message, reason="payment_refunded"
)
e = Dice(4, "emoji")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != e
assert hash(a) != hash(e)
@pytest.fixture(scope="module")
def suggested_post_approved():
return SuggestedPostApproved(
send_date=SuggestedPostApprovedTestBase.send_date,
suggested_post_message=SuggestedPostApprovedTestBase.suggested_post_message,
price=SuggestedPostApprovedTestBase.price,
)
class SuggestedPostApprovedTestBase:
send_date = dtm.datetime.now(tz=UTC).replace(microsecond=0)
suggested_post_message = Message(1, dtm.datetime.now(), Chat(1, ""), text="post this pls.")
price = SuggestedPostPrice(currency="XTR", amount=100)
class TestSuggestedPostApprovedWithoutRequest(SuggestedPostApprovedTestBase):
def test_slot_behaviour(self, suggested_post_approved):
for attr in suggested_post_approved.__slots__:
assert getattr(suggested_post_approved, attr, "err") != "err", (
f"got extra slot '{attr}'"
)
assert len(mro_slots(suggested_post_approved)) == len(
set(mro_slots(suggested_post_approved))
), "duplicate slot"
def test_de_json(self, offline_bot):
json_dict = {
"send_date": to_timestamp(self.send_date),
"suggested_post_message": self.suggested_post_message.to_dict(),
"price": self.price.to_dict(),
}
spa = SuggestedPostApproved.de_json(json_dict, offline_bot)
assert spa.send_date == self.send_date
assert spa.suggested_post_message == self.suggested_post_message
assert spa.price == self.price
assert spa.api_kwargs == {}
def test_de_json_localization(self, offline_bot, raw_bot, tz_bot):
json_dict = {
"send_date": to_timestamp(self.send_date),
"suggested_post_message": self.suggested_post_message.to_dict(),
"price": self.price.to_dict(),
}
spa_bot = SuggestedPostApproved.de_json(json_dict, offline_bot)
spa_bot_raw = SuggestedPostApproved.de_json(json_dict, raw_bot)
spi_bot_tz = SuggestedPostApproved.de_json(json_dict, tz_bot)
# comparing utcoffsets because comparing tzinfo objects is not reliable
send_date_offset = spi_bot_tz.send_date.utcoffset()
send_date_offset_tz = tz_bot.defaults.tzinfo.utcoffset(
spi_bot_tz.send_date.replace(tzinfo=None)
)
assert spa_bot.send_date.tzinfo == UTC
assert spa_bot_raw.send_date.tzinfo == UTC
assert send_date_offset_tz == send_date_offset
def test_to_dict(self, suggested_post_approved):
spa_dict = suggested_post_approved.to_dict()
assert isinstance(spa_dict, dict)
assert spa_dict["send_date"] == to_timestamp(self.send_date)
assert spa_dict["suggested_post_message"] == self.suggested_post_message.to_dict()
assert spa_dict["price"] == self.price.to_dict()
def test_equality(self, suggested_post_approved):
a = suggested_post_approved
b = SuggestedPostApproved(
send_date=self.send_date,
suggested_post_message=self.suggested_post_message,
price=self.price,
)
c = SuggestedPostApproved(
send_date=self.send_date,
suggested_post_message=self.suggested_post_message,
price=SuggestedPostPrice(currency="XTR", amount=self.price.amount - 1),
)
e = Dice(4, "emoji")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != e
assert hash(a) != hash(e)
@pytest.fixture(scope="module")
def suggested_post_approval_failed():
return SuggestedPostApprovalFailed(
price=SuggestedPostApprovalFailedTestBase.price,
suggested_post_message=SuggestedPostApprovalFailedTestBase.suggested_post_message,
)
class SuggestedPostApprovalFailedTestBase:
price = SuggestedPostPrice(currency="XTR", amount=100)
suggested_post_message = Message(1, dtm.datetime.now(), Chat(1, ""), text="post this pls.")
class TestSuggestedPostApprovalFailedWithoutRequest(SuggestedPostApprovalFailedTestBase):
def test_slot_behaviour(self, suggested_post_approval_failed):
for attr in suggested_post_approval_failed.__slots__:
assert getattr(suggested_post_approval_failed, attr, "err") != "err", (
f"got extra slot '{attr}'"
)
assert len(mro_slots(suggested_post_approval_failed)) == len(
set(mro_slots(suggested_post_approval_failed))
), "duplicate slot"
def test_de_json(self, offline_bot):
json_dict = {
"price": self.price.to_dict(),
"suggested_post_message": self.suggested_post_message.to_dict(),
}
spaf = SuggestedPostApprovalFailed.de_json(json_dict, offline_bot)
assert spaf.price == self.price
assert spaf.suggested_post_message == self.suggested_post_message
assert spaf.api_kwargs == {}
def test_to_dict(self, suggested_post_approval_failed):
spaf_dict = suggested_post_approval_failed.to_dict()
assert isinstance(spaf_dict, dict)
assert spaf_dict["price"] == self.price.to_dict()
assert spaf_dict["suggested_post_message"] == self.suggested_post_message.to_dict()
def test_equality(self, suggested_post_approval_failed):
a = suggested_post_approval_failed
b = SuggestedPostApprovalFailed(
price=self.price,
suggested_post_message=self.suggested_post_message,
)
c = SuggestedPostApprovalFailed(
price=SuggestedPostPrice(currency="XTR", amount=self.price.amount - 1),
suggested_post_message=self.suggested_post_message,
)
e = Dice(4, "emoji")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != e
assert hash(a) != hash(e)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "tests/test_suggestedpost.py",
"license": "GNU General Public License v3.0",
"lines": 480,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-telegram-bot/python-telegram-bot:src/telegram/_utils/usernames.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""Helper utilities around Telegram Objects first_name, last_name and username.
.. versionadded:: 22.4
Warning:
Contents of this module are intended to be used internally by the library and *not* by the
user. Changes to this module are not considered breaking changes and may not be documented in
the changelog.
"""
from typing import TYPE_CHECKING, Protocol, TypeVar, overload
TeleUserLike = TypeVar("TeleUserLike", bound="UserLike")
TeleUserLikeOptional = TypeVar("TeleUserLikeOptional", bound="UserLikeOptional")
if TYPE_CHECKING:
from typing import type_check_only
@type_check_only
class UserLike(Protocol):
first_name: str
last_name: str | None
username: str | None
@type_check_only
class UserLikeOptional(Protocol):
first_name: str | None
last_name: str | None
username: str | None
@overload
def get_name(userlike: TeleUserLike) -> str: ...
@overload
def get_name(userlike: TeleUserLikeOptional) -> str | None: ...
def get_name(userlike: TeleUserLike | TeleUserLikeOptional) -> str | None:
"""Returns ``username`` prefixed with "@". If ``username`` is not available, calls
:func:`get_full_name` below`.
"""
if userlike.username:
return f"@{userlike.username}"
return get_full_name(userlike=userlike)
@overload
def get_full_name(userlike: TeleUserLike) -> str: ...
@overload
def get_full_name(userlike: TeleUserLikeOptional) -> str | None: ...
def get_full_name(userlike: TeleUserLike | TeleUserLikeOptional) -> str | None:
"""
If parameter ``first_name`` is not :obj:`None`, gives
``first_name`` followed by (if available) `UserLike.last_name`. Otherwise,
:obj:`None` is returned.
"""
if not userlike.first_name:
return None
if userlike.last_name:
return f"{userlike.first_name} {userlike.last_name}"
return userlike.first_name
# We isolate these TypeVars to accomodiate telegram objects with ``username``
# and no ``first_name`` or ``last_name`` (e.g ``ChatShared``)
TeleLinkable = TypeVar("TeleLinkable", bound="Linkable")
TeleLinkableOptional = TypeVar("TeleLinkableOptional", bound="LinkableOptional")
if TYPE_CHECKING:
@type_check_only
class Linkable(Protocol):
username: str
@type_check_only
class LinkableOptional(Protocol):
username: str | None
@overload
def get_link(linkable: TeleLinkable) -> str: ...
@overload
def get_link(linkable: TeleLinkableOptional) -> str | None: ...
def get_link(linkable: TeleLinkable | TeleLinkableOptional) -> str | None:
"""If ``username`` is available, returns a t.me link of the user/chat."""
if linkable.username:
return f"https://t.me/{linkable.username}"
return None
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "src/telegram/_utils/usernames.py",
"license": "GNU General Public License v3.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
python-telegram-bot/python-telegram-bot:src/telegram/_checklists.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an objects related to Telegram checklists."""
import datetime as dtm
from collections.abc import Sequence
from typing import TYPE_CHECKING, Optional
from telegram._chat import Chat
from telegram._messageentity import MessageEntity
from telegram._telegramobject import TelegramObject
from telegram._user import User
from telegram._utils.argumentparsing import de_json_optional, de_list_optional, parse_sequence_arg
from telegram._utils.datetime import extract_tzinfo_from_defaults, from_timestamp
from telegram._utils.entities import parse_message_entities, parse_message_entity
from telegram._utils.types import JSONDict
from telegram.constants import ZERO_DATE
if TYPE_CHECKING:
from telegram import Bot, Message
class ChecklistTask(TelegramObject):
"""
Describes a task in a checklist.
Objects of this class are comparable in terms of equality.
Two objects of this class are considered equal, if their :attr:`id` is equal.
.. versionadded:: 22.3
Args:
id (:obj:`int`): Unique identifier of the task.
text (:obj:`str`): Text of the task.
text_entities (Sequence[:class:`telegram.MessageEntity`], optional): Special
entities that appear in the task text.
completed_by_user (:class:`telegram.User`, optional): User that completed the task; omitted
if the task wasn't completed
completed_by_chat (:class:`telegram.Chat`, optional): Chat that completed the task; omitted
if the task wasn't completed by a chat
.. versionadded:: 22.6
completion_date (:class:`datetime.datetime`, optional): Point in time when
the task was completed; :attr:`~telegram.constants.ZERO_DATE` if the task wasn't
completed
|datetime_localization|
Attributes:
id (:obj:`int`): Unique identifier of the task.
text (:obj:`str`): Text of the task.
text_entities (Tuple[:class:`telegram.MessageEntity`]): Optional. Special
entities that appear in the task text.
completed_by_user (:class:`telegram.User`): Optional. User that completed the task; omitted
if the task wasn't completed
completed_by_chat (:class:`telegram.Chat`): Optional. Chat that completed the task; omitted
if the task wasn't completed by a chat
.. versionadded:: 22.6
completion_date (:class:`datetime.datetime`): Optional. Point in time when
the task was completed; :attr:`~telegram.constants.ZERO_DATE` if the task wasn't
completed
|datetime_localization|
"""
__slots__ = (
"completed_by_chat",
"completed_by_user",
"completion_date",
"id",
"text",
"text_entities",
)
def __init__(
self,
id: int, # pylint: disable=redefined-builtin
text: str,
text_entities: Sequence[MessageEntity] | None = None,
completed_by_user: User | None = None,
completion_date: dtm.datetime | None = None,
completed_by_chat: Chat | None = None,
*,
api_kwargs: JSONDict | None = None,
):
super().__init__(api_kwargs=api_kwargs)
self.id: int = id
self.text: str = text
self.text_entities: tuple[MessageEntity, ...] = parse_sequence_arg(text_entities)
self.completed_by_user: User | None = completed_by_user
self.completed_by_chat: Chat | None = completed_by_chat
self.completion_date: dtm.datetime | None = completion_date
self._id_attrs = (self.id,)
self._freeze()
@classmethod
def de_json(cls, data: JSONDict, bot: Optional["Bot"] = None) -> "ChecklistTask":
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
# Get the local timezone from the bot if it has defaults
loc_tzinfo = extract_tzinfo_from_defaults(bot)
if (date := data.get("completion_date")) == 0:
data["completion_date"] = ZERO_DATE
else:
data["completion_date"] = from_timestamp(date, tzinfo=loc_tzinfo)
data["completed_by_user"] = de_json_optional(data.get("completed_by_user"), User, bot)
data["completed_by_chat"] = de_json_optional(data.get("completed_by_chat"), Chat, bot)
data["text_entities"] = de_list_optional(data.get("text_entities"), MessageEntity, bot)
return super().de_json(data=data, bot=bot)
def parse_entity(self, entity: MessageEntity) -> str:
"""Returns the text in :attr:`text`
from a given :class:`telegram.MessageEntity` of :attr:`text_entities`.
Note:
This method is present because Telegram calculates the offset and length in
UTF-16 codepoint pairs, which some versions of Python don't handle automatically.
(That is, you can't just slice ``ChecklistTask.text`` with the offset and length.)
Args:
entity (:class:`telegram.MessageEntity`): The entity to extract the text from. It must
be an entity that belongs to :attr:`text_entities`.
Returns:
:obj:`str`: The text of the given entity.
"""
return parse_message_entity(self.text, entity)
def parse_entities(self, types: list[str] | None = None) -> dict[MessageEntity, str]:
"""
Returns a :obj:`dict` that maps :class:`telegram.MessageEntity` to :obj:`str`.
It contains entities from this checklist task filtered by their ``type`` attribute as
the key, and the text that each entity belongs to as the value of the :obj:`dict`.
Note:
This method should always be used instead of the :attr:`text_entities`
attribute, since it calculates the correct substring from the message text based on
UTF-16 codepoints. See :attr:`parse_entity` for more info.
Args:
types (list[:obj:`str`], optional): List of ``MessageEntity`` types as strings. If the
``type`` attribute of an entity is contained in this list, it will be returned.
Defaults to :attr:`telegram.MessageEntity.ALL_TYPES`.
Returns:
dict[:class:`telegram.MessageEntity`, :obj:`str`]: A dictionary of entities mapped to
the text that belongs to them, calculated based on UTF-16 codepoints.
"""
return parse_message_entities(self.text, self.text_entities, types)
class Checklist(TelegramObject):
"""
Describes a checklist.
Objects of this class are comparable in terms of equality.
Two objects of this class are considered equal, if all their :attr:`tasks` are equal.
.. versionadded:: 22.3
Args:
title (:obj:`str`): Title of the checklist.
title_entities (Sequence[:class:`telegram.MessageEntity`], optional): Special
entities that appear in the checklist title.
tasks (Sequence[:class:`telegram.ChecklistTask`]): List of tasks in the checklist.
others_can_add_tasks (:obj:`bool`, optional): :obj:`True` if users other than the creator
of the list can add tasks to the list
others_can_mark_tasks_as_done (:obj:`bool`, optional): :obj:`True` if users other than the
creator of the list can mark tasks as done or not done
Attributes:
title (:obj:`str`): Title of the checklist.
title_entities (Tuple[:class:`telegram.MessageEntity`]): Optional. Special
entities that appear in the checklist title.
tasks (Tuple[:class:`telegram.ChecklistTask`]): List of tasks in the checklist.
others_can_add_tasks (:obj:`bool`): Optional. :obj:`True` if users other than the creator
of the list can add tasks to the list
others_can_mark_tasks_as_done (:obj:`bool`): Optional. :obj:`True` if users other than the
creator of the list can mark tasks as done or not done
"""
__slots__ = (
"others_can_add_tasks",
"others_can_mark_tasks_as_done",
"tasks",
"title",
"title_entities",
)
def __init__(
self,
title: str,
tasks: Sequence[ChecklistTask],
title_entities: Sequence[MessageEntity] | None = None,
others_can_add_tasks: bool | None = None,
others_can_mark_tasks_as_done: bool | None = None,
*,
api_kwargs: JSONDict | None = None,
):
super().__init__(api_kwargs=api_kwargs)
self.title: str = title
self.title_entities: tuple[MessageEntity, ...] = parse_sequence_arg(title_entities)
self.tasks: tuple[ChecklistTask, ...] = parse_sequence_arg(tasks)
self.others_can_add_tasks: bool | None = others_can_add_tasks
self.others_can_mark_tasks_as_done: bool | None = others_can_mark_tasks_as_done
self._id_attrs = (self.tasks,)
self._freeze()
@classmethod
def de_json(cls, data: JSONDict, bot: Optional["Bot"] = None) -> "Checklist":
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
data["title_entities"] = de_list_optional(data.get("title_entities"), MessageEntity, bot)
data["tasks"] = de_list_optional(data.get("tasks"), ChecklistTask, bot)
return super().de_json(data=data, bot=bot)
def parse_entity(self, entity: MessageEntity) -> str:
"""Returns the text in :attr:`title`
from a given :class:`telegram.MessageEntity` of :attr:`title_entities`.
Note:
This method is present because Telegram calculates the offset and length in
UTF-16 codepoint pairs, which some versions of Python don't handle automatically.
(That is, you can't just slice :attr:`title` with the offset and length.)
Args:
entity (:class:`telegram.MessageEntity`): The entity to extract the text from. It must
be an entity that belongs to :attr:`title_entities`.
Returns:
:obj:`str`: The text of the given entity.
"""
return parse_message_entity(self.title, entity)
def parse_entities(self, types: list[str] | None = None) -> dict[MessageEntity, str]:
"""
Returns a :obj:`dict` that maps :class:`telegram.MessageEntity` to :obj:`str`.
It contains entities from this checklist's title filtered by their ``type`` attribute as
the key, and the text that each entity belongs to as the value of the :obj:`dict`.
Note:
This method should always be used instead of the :attr:`title_entities`
attribute, since it calculates the correct substring from the message text based on
UTF-16 codepoints. See :attr:`parse_entity` for more info.
Args:
types (list[:obj:`str`], optional): List of ``MessageEntity`` types as strings. If the
``type`` attribute of an entity is contained in this list, it will be returned.
Defaults to :attr:`telegram.MessageEntity.ALL_TYPES`.
Returns:
dict[:class:`telegram.MessageEntity`, :obj:`str`]: A dictionary of entities mapped to
the text that belongs to them, calculated based on UTF-16 codepoints.
"""
return parse_message_entities(self.title, self.title_entities, types)
class ChecklistTasksDone(TelegramObject):
"""
Describes a service message about checklist tasks marked as done or not done.
Objects of this class are comparable in terms of equality.
Two objects of this class are considered equal, if their :attr:`marked_as_done_task_ids` and
:attr:`marked_as_not_done_task_ids` are equal.
.. versionadded:: 22.3
Args:
checklist_message (:class:`telegram.Message`, optional): Message containing the checklist
whose tasks were marked as done or not done. Note that the ~:class:`telegram.Message`
object in this field will not contain the :attr:`~telegram.Message.reply_to_message`
field even if it itself is a reply.
marked_as_done_task_ids (Sequence[:obj:`int`], optional): Identifiers of the tasks that
were marked as done
marked_as_not_done_task_ids (Sequence[:obj:`int`], optional): Identifiers of the tasks that
were marked as not done
Attributes:
checklist_message (:class:`telegram.Message`): Optional. Message containing the checklist
whose tasks were marked as done or not done. Note that the ~:class:`telegram.Message`
object in this field will not contain the :attr:`~telegram.Message.reply_to_message`
field even if it itself is a reply.
marked_as_done_task_ids (Tuple[:obj:`int`]): Optional. Identifiers of the tasks that were
marked as done
marked_as_not_done_task_ids (Tuple[:obj:`int`]): Optional. Identifiers of the tasks that
were marked as not done
"""
__slots__ = (
"checklist_message",
"marked_as_done_task_ids",
"marked_as_not_done_task_ids",
)
def __init__(
self,
checklist_message: Optional["Message"] = None,
marked_as_done_task_ids: Sequence[int] | None = None,
marked_as_not_done_task_ids: Sequence[int] | None = None,
*,
api_kwargs: JSONDict | None = None,
):
super().__init__(api_kwargs=api_kwargs)
self.checklist_message: Message | None = checklist_message
self.marked_as_done_task_ids: tuple[int, ...] = parse_sequence_arg(marked_as_done_task_ids)
self.marked_as_not_done_task_ids: tuple[int, ...] = parse_sequence_arg(
marked_as_not_done_task_ids
)
self._id_attrs = (self.marked_as_done_task_ids, self.marked_as_not_done_task_ids)
self._freeze()
@classmethod
def de_json(cls, data: JSONDict, bot: Optional["Bot"] = None) -> "ChecklistTasksDone":
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
# needs to be imported here to avoid circular import issues
from telegram import Message # pylint: disable=import-outside-toplevel # noqa: PLC0415
data["checklist_message"] = de_json_optional(data.get("checklist_message"), Message, bot)
return super().de_json(data=data, bot=bot)
class ChecklistTasksAdded(TelegramObject):
"""
Describes a service message about tasks added to a checklist.
Objects of this class are comparable in terms of equality.
Two objects of this class are considered equal, if their :attr:`tasks` are equal.
.. versionadded:: 22.3
Args:
checklist_message (:class:`telegram.Message`, optional): Message containing the checklist
to which tasks were added. Note that the ~:class:`telegram.Message`
object in this field will not contain the :attr:`~telegram.Message.reply_to_message`
field even if it itself is a reply.
tasks (Sequence[:class:`telegram.ChecklistTask`]): List of tasks added to the checklist
Attributes:
checklist_message (:class:`telegram.Message`): Optional. Message containing the checklist
to which tasks were added. Note that the ~:class:`telegram.Message`
object in this field will not contain the :attr:`~telegram.Message.reply_to_message`
field even if it itself is a reply.
tasks (Tuple[:class:`telegram.ChecklistTask`]): List of tasks added to the checklist
"""
__slots__ = ("checklist_message", "tasks")
def __init__(
self,
tasks: Sequence[ChecklistTask],
checklist_message: Optional["Message"] = None,
*,
api_kwargs: JSONDict | None = None,
):
super().__init__(api_kwargs=api_kwargs)
self.checklist_message: Message | None = checklist_message
self.tasks: tuple[ChecklistTask, ...] = parse_sequence_arg(tasks)
self._id_attrs = (self.tasks,)
self._freeze()
@classmethod
def de_json(cls, data: JSONDict, bot: Optional["Bot"] = None) -> "ChecklistTasksAdded":
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
# needs to be imported here to avoid circular import issues
from telegram import Message # pylint: disable=import-outside-toplevel # noqa: PLC0415
data["checklist_message"] = de_json_optional(data.get("checklist_message"), Message, bot)
data["tasks"] = ChecklistTask.de_list(data.get("tasks", []), bot)
return super().de_json(data=data, bot=bot)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "src/telegram/_checklists.py",
"license": "GNU General Public License v3.0",
"lines": 327,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
python-telegram-bot/python-telegram-bot:src/telegram/_directmessagepricechanged.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Direct Message Price."""
from telegram._telegramobject import TelegramObject
from telegram._utils.types import JSONDict
class DirectMessagePriceChanged(TelegramObject):
"""
Describes a service message about a change in the price of direct messages sent to a channel
chat.
.. versionadded:: 22.3
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`are_direct_messages_enabled`, and
:attr:`direct_message_star_count` are equal.
Args:
are_direct_messages_enabled (:obj:`bool`):
:obj:`True`, if direct messages are enabled for the channel chat; :obj:`False`
otherwise.
direct_message_star_count (:obj:`int`, optional):
The new number of Telegram Stars that must be paid by users for each direct message
sent to the channel. Does not apply to users who have been exempted by administrators.
Defaults to ``0``.
Attributes:
are_direct_messages_enabled (:obj:`bool`):
:obj:`True`, if direct messages are enabled for the channel chat; :obj:`False`
otherwise.
direct_message_star_count (:obj:`int`):
Optional. The new number of Telegram Stars that must be paid by users for each direct
message sent to the channel. Does not apply to users who have been exempted by
administrators. Defaults to ``0``.
"""
__slots__ = ("are_direct_messages_enabled", "direct_message_star_count")
def __init__(
self,
are_direct_messages_enabled: bool,
direct_message_star_count: int | None = None,
*,
api_kwargs: JSONDict | None = None,
):
super().__init__(api_kwargs=api_kwargs)
self.are_direct_messages_enabled: bool = are_direct_messages_enabled
self.direct_message_star_count: int | None = direct_message_star_count
self._id_attrs = (self.are_direct_messages_enabled, self.direct_message_star_count)
self._freeze()
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "src/telegram/_directmessagepricechanged.py",
"license": "GNU General Public License v3.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
python-telegram-bot/python-telegram-bot:src/telegram/_inputchecklist.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an objects that are related to Telegram input checklists."""
from collections.abc import Sequence
from telegram._messageentity import MessageEntity
from telegram._telegramobject import TelegramObject
from telegram._utils.argumentparsing import parse_sequence_arg
from telegram._utils.defaultvalue import DEFAULT_NONE
from telegram._utils.types import JSONDict, ODVInput
class InputChecklistTask(TelegramObject):
"""
Describes a task to add to a checklist.
Objects of this class are comparable in terms of equality.
Two objects of this class are considered equal if their :attr:`id` is equal.
.. versionadded:: 22.3
Args:
id (:obj:`int`):
Unique identifier of the task; must be positive and unique among all task identifiers
currently present in the checklist.
text (:obj:`str`):
Text of the task;
:tg-const:`telegram.constants.InputChecklistLimit.MIN_TEXT_LENGTH`\
-:tg-const:`telegram.constants.InputChecklistLimit.MAX_TEXT_LENGTH` characters after
entities parsing.
parse_mode (:obj:`str`, optional):
|parse_mode|
text_entities (Sequence[:class:`telegram.MessageEntity`], optional):
List of special entities that appear in the text, which can be specified instead of
parse_mode. Currently, only bold, italic, underline, strikethrough, spoiler, and
custom_emoji entities are allowed.
Attributes:
id (:obj:`int`):
Unique identifier of the task; must be positive and unique among all task identifiers
currently present in the checklist.
text (:obj:`str`):
Text of the task;
:tg-const:`telegram.constants.InputChecklistLimit.MIN_TEXT_LENGTH`\
-:tg-const:`telegram.constants.InputChecklistLimit.MAX_TEXT_LENGTH` characters after
entities parsing.
parse_mode (:obj:`str`):
Optional. |parse_mode|
text_entities (Sequence[:class:`telegram.MessageEntity`]):
Optional. List of special entities that appear in the text, which can be specified
instead of parse_mode. Currently, only bold, italic, underline, strikethrough, spoiler,
and custom_emoji entities are allowed.
"""
__slots__ = (
"id",
"parse_mode",
"text",
"text_entities",
)
def __init__(
self,
id: int, # pylint: disable=redefined-builtin
text: str,
parse_mode: ODVInput[str] = DEFAULT_NONE,
text_entities: Sequence[MessageEntity] | None = None,
*,
api_kwargs: JSONDict | None = None,
):
super().__init__(api_kwargs=api_kwargs)
self.id: int = id
self.text: str = text
self.parse_mode: ODVInput[str] = parse_mode
self.text_entities: tuple[MessageEntity, ...] = parse_sequence_arg(text_entities)
self._id_attrs = (self.id,)
self._freeze()
class InputChecklist(TelegramObject):
"""
Describes a checklist to create.
Objects of this class are comparable in terms of equality.
Two objects of this class are considered equal if their :attr:`tasks` is equal.
.. versionadded:: 22.3
Args:
title (:obj:`str`):
Title of the checklist;
:tg-const:`telegram.constants.InputChecklistLimit.MIN_TITLE_LENGTH`\
-:tg-const:`telegram.constants.InputChecklistLimit.MAX_TITLE_LENGTH` characters after
entities parsing.
parse_mode (:obj:`str`, optional):
|parse_mode|
title_entities (Sequence[:class:`telegram.MessageEntity`], optional):
List of special entities that appear in the title, which
can be specified instead of :paramref:`parse_mode`. Currently, only bold, italic,
underline, strikethrough, spoiler, and custom_emoji entities are allowed.
tasks (Sequence[:class:`telegram.InputChecklistTask`]):
List of
:tg-const:`telegram.constants.InputChecklistLimit.MIN_TASK_NUMBER`\
-:tg-const:`telegram.constants.InputChecklistLimit.MAX_TASK_NUMBER` tasks in
the checklist.
others_can_add_tasks (:obj:`bool`, optional):
Pass :obj:`True` if other users can add tasks to the checklist.
others_can_mark_tasks_as_done (:obj:`bool`, optional):
Pass :obj:`True` if other users can mark tasks as done or not done in the checklist.
Attributes:
title (:obj:`str`):
Title of the checklist;
:tg-const:`telegram.constants.InputChecklistLimit.MIN_TITLE_LENGTH`\
-:tg-const:`telegram.constants.InputChecklistLimit.MAX_TITLE_LENGTH` characters after
entities parsing.
parse_mode (:obj:`str`):
Optional. |parse_mode|
title_entities (Sequence[:class:`telegram.MessageEntity`]):
Optional. List of special entities that appear in the title, which
can be specified instead of :paramref:`parse_mode`. Currently, only bold, italic,
underline, strikethrough, spoiler, and custom_emoji entities are allowed.
tasks (Sequence[:class:`telegram.InputChecklistTask`]):
List of
:tg-const:`telegram.constants.InputChecklistLimit.MIN_TASK_NUMBER`\
-:tg-const:`telegram.constants.InputChecklistLimit.MAX_TASK_NUMBER` tasks in
the checklist.
others_can_add_tasks (:obj:`bool`):
Optional. Pass :obj:`True` if other users can add tasks to the checklist.
others_can_mark_tasks_as_done (:obj:`bool`):
Optional. Pass :obj:`True` if other users can mark tasks as done or not done in
the checklist.
"""
__slots__ = (
"others_can_add_tasks",
"others_can_mark_tasks_as_done",
"parse_mode",
"tasks",
"title",
"title_entities",
)
def __init__(
self,
title: str,
tasks: Sequence[InputChecklistTask],
parse_mode: ODVInput[str] = DEFAULT_NONE,
title_entities: Sequence[MessageEntity] | None = None,
others_can_add_tasks: bool | None = None,
others_can_mark_tasks_as_done: bool | None = None,
*,
api_kwargs: JSONDict | None = None,
):
super().__init__(api_kwargs=api_kwargs)
self.title: str = title
self.tasks: tuple[InputChecklistTask, ...] = parse_sequence_arg(tasks)
self.parse_mode: ODVInput[str] = parse_mode
self.title_entities: tuple[MessageEntity, ...] = parse_sequence_arg(title_entities)
self.others_can_add_tasks: bool | None = others_can_add_tasks
self.others_can_mark_tasks_as_done: bool | None = others_can_mark_tasks_as_done
self._id_attrs = (self.tasks,)
self._freeze()
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "src/telegram/_inputchecklist.py",
"license": "GNU General Public License v3.0",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
python-telegram-bot/python-telegram-bot:tests/test_checklists.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import datetime as dtm
import pytest
from telegram import (
Chat,
Checklist,
ChecklistTask,
ChecklistTasksAdded,
ChecklistTasksDone,
Dice,
MessageEntity,
User,
)
from telegram._utils.datetime import UTC, to_timestamp
from telegram.constants import ZERO_DATE
from tests.auxil.build_messages import make_message
from tests.auxil.slots import mro_slots
class ChecklistTaskTestBase:
id = 42
text = "here is a text"
text_entities = [
MessageEntity(type="bold", offset=0, length=4),
MessageEntity(type="italic", offset=5, length=2),
]
completed_by_user = User(id=1, first_name="Test", last_name="User", is_bot=False)
completed_by_chat = Chat(id=-100, type=Chat.SUPERGROUP, title="Test Chat")
completion_date = dtm.datetime.now(tz=UTC).replace(microsecond=0)
@pytest.fixture(scope="module")
def checklist_task():
return ChecklistTask(
id=ChecklistTaskTestBase.id,
text=ChecklistTaskTestBase.text,
text_entities=ChecklistTaskTestBase.text_entities,
completed_by_user=ChecklistTaskTestBase.completed_by_user,
completed_by_chat=ChecklistTaskTestBase.completed_by_chat,
completion_date=ChecklistTaskTestBase.completion_date,
)
class TestChecklistTaskWithoutRequest(ChecklistTaskTestBase):
def test_slot_behaviour(self, checklist_task):
for attr in checklist_task.__slots__:
assert getattr(checklist_task, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(checklist_task)) == len(set(mro_slots(checklist_task))), (
"duplicate slot"
)
def test_to_dict(self, checklist_task):
clt_dict = checklist_task.to_dict()
assert isinstance(clt_dict, dict)
assert clt_dict["id"] == self.id
assert clt_dict["text"] == self.text
assert clt_dict["text_entities"] == [entity.to_dict() for entity in self.text_entities]
assert clt_dict["completed_by_user"] == self.completed_by_user.to_dict()
assert clt_dict["completed_by_chat"] == self.completed_by_chat.to_dict()
assert clt_dict["completion_date"] == to_timestamp(self.completion_date)
def test_de_json(self, offline_bot):
json_dict = {
"id": self.id,
"text": self.text,
"text_entities": [entity.to_dict() for entity in self.text_entities],
"completed_by_user": self.completed_by_user.to_dict(),
"completed_by_chat": self.completed_by_chat.to_dict(),
"completion_date": to_timestamp(self.completion_date),
}
clt = ChecklistTask.de_json(json_dict, offline_bot)
assert isinstance(clt, ChecklistTask)
assert clt.id == self.id
assert clt.text == self.text
assert clt.text_entities == tuple(self.text_entities)
assert clt.completed_by_user == self.completed_by_user
assert clt.completed_by_chat == self.completed_by_chat
assert clt.completion_date == self.completion_date
assert clt.api_kwargs == {}
def test_de_json_required_fields(self, offline_bot):
json_dict = {
"id": self.id,
"text": self.text,
}
clt = ChecklistTask.de_json(json_dict, offline_bot)
assert isinstance(clt, ChecklistTask)
assert clt.id == self.id
assert clt.text == self.text
assert clt.text_entities == ()
assert clt.completed_by_user is None
assert clt.completion_date is None
assert clt.api_kwargs == {}
def test_de_json_localization(self, offline_bot, raw_bot, tz_bot):
json_dict = {
"id": self.id,
"text": self.text,
"completion_date": to_timestamp(self.completion_date),
}
clt_bot = ChecklistTask.de_json(json_dict, offline_bot)
clt_bot_raw = ChecklistTask.de_json(json_dict, raw_bot)
clt_bot_tz = ChecklistTask.de_json(json_dict, tz_bot)
# comparing utcoffsets because comparing tzinfo objects is not reliable
completion_date_offset = clt_bot_tz.completion_date.utcoffset()
completion_date_offset_tz = tz_bot.defaults.tzinfo.utcoffset(
clt_bot_tz.completion_date.replace(tzinfo=None)
)
assert clt_bot.completion_date.tzinfo == UTC
assert clt_bot_raw.completion_date.tzinfo == UTC
assert completion_date_offset_tz == completion_date_offset
@pytest.mark.parametrize(
("completion_date", "expected"),
[
(None, None),
(0, ZERO_DATE),
(1735689600, dtm.datetime(2025, 1, 1, tzinfo=UTC)),
],
)
def test_de_json_completion_date(self, offline_bot, completion_date, expected):
json_dict = {
"id": self.id,
"text": self.text,
"completion_date": completion_date,
}
clt = ChecklistTask.de_json(json_dict, offline_bot)
assert isinstance(clt, ChecklistTask)
assert clt.completion_date == expected
def test_parse_entity(self, checklist_task):
assert checklist_task.parse_entity(checklist_task.text_entities[0]) == "here"
def test_parse_entities(self, checklist_task):
assert checklist_task.parse_entities(MessageEntity.BOLD) == {
checklist_task.text_entities[0]: "here"
}
assert checklist_task.parse_entities() == {
checklist_task.text_entities[0]: "here",
checklist_task.text_entities[1]: "is",
}
def test_equality(self, checklist_task):
clt1 = checklist_task
clt2 = ChecklistTask(
id=self.id,
text="other text",
)
clt3 = ChecklistTask(
id=self.id + 1,
text=self.text,
)
clt4 = Dice(value=1, emoji="🎲")
assert clt1 == clt2
assert hash(clt1) == hash(clt2)
assert clt1 != clt3
assert hash(clt1) != hash(clt3)
assert clt1 != clt4
assert hash(clt1) != hash(clt4)
class ChecklistTestBase:
title = "Checklist Title"
title_entities = [
MessageEntity(type="bold", offset=0, length=9),
MessageEntity(type="italic", offset=10, length=5),
]
tasks = [
ChecklistTask(
id=1,
text="Task 1",
),
ChecklistTask(
id=2,
text="Task 2",
),
]
others_can_add_tasks = True
others_can_mark_tasks_as_done = False
@pytest.fixture(scope="module")
def checklist():
return Checklist(
title=ChecklistTestBase.title,
title_entities=ChecklistTestBase.title_entities,
tasks=ChecklistTestBase.tasks,
others_can_add_tasks=ChecklistTestBase.others_can_add_tasks,
others_can_mark_tasks_as_done=ChecklistTestBase.others_can_mark_tasks_as_done,
)
class TestChecklistWithoutRequest(ChecklistTestBase):
def test_slot_behaviour(self, checklist):
for attr in checklist.__slots__:
assert getattr(checklist, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(checklist)) == len(set(mro_slots(checklist))), "duplicate slot"
def test_to_dict(self, checklist):
cl_dict = checklist.to_dict()
assert isinstance(cl_dict, dict)
assert cl_dict["title"] == self.title
assert cl_dict["title_entities"] == [entity.to_dict() for entity in self.title_entities]
assert cl_dict["tasks"] == [task.to_dict() for task in self.tasks]
assert cl_dict["others_can_add_tasks"] is self.others_can_add_tasks
assert cl_dict["others_can_mark_tasks_as_done"] is self.others_can_mark_tasks_as_done
def test_de_json(self, offline_bot):
json_dict = {
"title": self.title,
"title_entities": [entity.to_dict() for entity in self.title_entities],
"tasks": [task.to_dict() for task in self.tasks],
"others_can_add_tasks": self.others_can_add_tasks,
"others_can_mark_tasks_as_done": self.others_can_mark_tasks_as_done,
}
cl = Checklist.de_json(json_dict, offline_bot)
assert isinstance(cl, Checklist)
assert cl.title == self.title
assert cl.title_entities == tuple(self.title_entities)
assert cl.tasks == tuple(self.tasks)
assert cl.others_can_add_tasks is self.others_can_add_tasks
assert cl.others_can_mark_tasks_as_done is self.others_can_mark_tasks_as_done
assert cl.api_kwargs == {}
def test_de_json_required_fields(self, offline_bot):
json_dict = {
"title": self.title,
"tasks": [task.to_dict() for task in self.tasks],
}
cl = Checklist.de_json(json_dict, offline_bot)
assert isinstance(cl, Checklist)
assert cl.title == self.title
assert cl.title_entities == ()
assert cl.tasks == tuple(self.tasks)
assert not cl.others_can_add_tasks
assert not cl.others_can_mark_tasks_as_done
def test_parse_entity(self, checklist):
assert checklist.parse_entity(checklist.title_entities[0]) == "Checklist"
assert checklist.parse_entity(checklist.title_entities[1]) == "Title"
def test_parse_entities(self, checklist):
assert checklist.parse_entities(MessageEntity.BOLD) == {
checklist.title_entities[0]: "Checklist"
}
assert checklist.parse_entities() == {
checklist.title_entities[0]: "Checklist",
checklist.title_entities[1]: "Title",
}
def test_equality(self, checklist, checklist_task):
cl1 = checklist
cl2 = Checklist(
title=self.title + " other",
tasks=[ChecklistTask(id=1, text="something"), ChecklistTask(id=2, text="something")],
)
cl3 = Checklist(
title=self.title + " other",
tasks=[ChecklistTask(id=42, text="Task 2")],
)
cl4 = checklist_task
assert cl1 == cl2
assert hash(cl1) == hash(cl2)
assert cl1 != cl3
assert hash(cl1) != hash(cl3)
assert cl1 != cl4
assert hash(cl1) != hash(cl4)
class ChecklistTasksDoneTestBase:
checklist_message = make_message("Checklist message")
marked_as_done_task_ids = [1, 2, 3]
marked_as_not_done_task_ids = [4, 5]
@pytest.fixture(scope="module")
def checklist_tasks_done():
return ChecklistTasksDone(
checklist_message=ChecklistTasksDoneTestBase.checklist_message,
marked_as_done_task_ids=ChecklistTasksDoneTestBase.marked_as_done_task_ids,
marked_as_not_done_task_ids=ChecklistTasksDoneTestBase.marked_as_not_done_task_ids,
)
class TestChecklistTasksDoneWithoutRequest(ChecklistTasksDoneTestBase):
def test_slot_behaviour(self, checklist_tasks_done):
for attr in checklist_tasks_done.__slots__:
assert getattr(checklist_tasks_done, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(checklist_tasks_done)) == len(set(mro_slots(checklist_tasks_done))), (
"duplicate slot"
)
def test_to_dict(self, checklist_tasks_done):
cltd_dict = checklist_tasks_done.to_dict()
assert isinstance(cltd_dict, dict)
assert cltd_dict["checklist_message"] == self.checklist_message.to_dict()
assert cltd_dict["marked_as_done_task_ids"] == self.marked_as_done_task_ids
assert cltd_dict["marked_as_not_done_task_ids"] == self.marked_as_not_done_task_ids
def test_de_json(self, offline_bot):
json_dict = {
"checklist_message": self.checklist_message.to_dict(),
"marked_as_done_task_ids": self.marked_as_done_task_ids,
"marked_as_not_done_task_ids": self.marked_as_not_done_task_ids,
}
cltd = ChecklistTasksDone.de_json(json_dict, offline_bot)
assert isinstance(cltd, ChecklistTasksDone)
assert cltd.checklist_message == self.checklist_message
assert cltd.marked_as_done_task_ids == tuple(self.marked_as_done_task_ids)
assert cltd.marked_as_not_done_task_ids == tuple(self.marked_as_not_done_task_ids)
assert cltd.api_kwargs == {}
def test_de_json_required_fields(self, offline_bot):
cltd = ChecklistTasksDone.de_json({}, offline_bot)
assert isinstance(cltd, ChecklistTasksDone)
assert cltd.checklist_message is None
assert cltd.marked_as_done_task_ids == ()
assert cltd.marked_as_not_done_task_ids == ()
assert cltd.api_kwargs == {}
def test_equality(self, checklist_tasks_done):
cltd1 = checklist_tasks_done
cltd2 = ChecklistTasksDone(
checklist_message=None,
marked_as_done_task_ids=[1, 2, 3],
marked_as_not_done_task_ids=[4, 5],
)
cltd3 = ChecklistTasksDone(
checklist_message=make_message("Checklist message"),
marked_as_done_task_ids=[1, 2, 3],
)
cltd4 = make_message("Not a checklist tasks done")
assert cltd1 == cltd2
assert hash(cltd1) == hash(cltd2)
assert cltd1 != cltd3
assert hash(cltd1) != hash(cltd3)
assert cltd1 != cltd4
assert hash(cltd1) != hash(cltd4)
class ChecklistTasksAddedTestBase:
checklist_message = make_message("Checklist message")
tasks = [
ChecklistTask(id=1, text="Task 1"),
ChecklistTask(id=2, text="Task 2"),
ChecklistTask(id=3, text="Task 3"),
]
@pytest.fixture(scope="module")
def checklist_tasks_added():
return ChecklistTasksAdded(
checklist_message=ChecklistTasksAddedTestBase.checklist_message,
tasks=ChecklistTasksAddedTestBase.tasks,
)
class TestChecklistTasksAddedWithoutRequest(ChecklistTasksAddedTestBase):
def test_slot_behaviour(self, checklist_tasks_added):
for attr in checklist_tasks_added.__slots__:
assert getattr(checklist_tasks_added, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(checklist_tasks_added)) == len(
set(mro_slots(checklist_tasks_added))
), "duplicate slot"
def test_to_dict(self, checklist_tasks_added):
clta_dict = checklist_tasks_added.to_dict()
assert isinstance(clta_dict, dict)
assert clta_dict["checklist_message"] == self.checklist_message.to_dict()
assert clta_dict["tasks"] == [task.to_dict() for task in self.tasks]
def test_de_json(self, offline_bot):
json_dict = {
"checklist_message": self.checklist_message.to_dict(),
"tasks": [task.to_dict() for task in self.tasks],
}
clta = ChecklistTasksAdded.de_json(json_dict, offline_bot)
assert isinstance(clta, ChecklistTasksAdded)
assert clta.checklist_message == self.checklist_message
assert clta.tasks == tuple(self.tasks)
assert clta.api_kwargs == {}
def test_de_json_required_fields(self, offline_bot):
clta = ChecklistTasksAdded.de_json(
{"tasks": [task.to_dict() for task in self.tasks]}, offline_bot
)
assert isinstance(clta, ChecklistTasksAdded)
assert clta.checklist_message is None
assert clta.tasks == tuple(self.tasks)
assert clta.api_kwargs == {}
def test_equality(self, checklist_tasks_added):
clta1 = checklist_tasks_added
clta2 = ChecklistTasksAdded(
checklist_message=None,
tasks=[
ChecklistTask(id=1, text="Other Task 1"),
ChecklistTask(id=2, text="Other Task 2"),
ChecklistTask(id=3, text="Other Task 3"),
],
)
clta3 = ChecklistTasksAdded(
checklist_message=make_message("Checklist message"),
tasks=[ChecklistTask(id=1, text="Task 1")],
)
clta4 = make_message("Not a checklist tasks added")
assert clta1 == clta2
assert hash(clta1) == hash(clta2)
assert clta1 != clta3
assert hash(clta1) != hash(clta3)
assert clta1 != clta4
assert hash(clta1) != hash(clta4)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "tests/test_checklists.py",
"license": "GNU General Public License v3.0",
"lines": 383,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-telegram-bot/python-telegram-bot:tests/test_directmessagepricechanged.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object for testing a Direct Message Price."""
from typing import TYPE_CHECKING
import pytest
from telegram import DirectMessagePriceChanged, User
from tests.auxil.slots import mro_slots
if TYPE_CHECKING:
from telegram._utils.types import JSONDict
@pytest.fixture
def direct_message_price_changed():
return DirectMessagePriceChanged(
are_direct_messages_enabled=DirectMessagePriceChangedTestBase.are_direct_messages_enabled,
direct_message_star_count=DirectMessagePriceChangedTestBase.direct_message_star_count,
)
class DirectMessagePriceChangedTestBase:
are_direct_messages_enabled: bool = True
direct_message_star_count: int = 100
class TestDirectMessagePriceChangedWithoutRequest(DirectMessagePriceChangedTestBase):
def test_slot_behaviour(self, direct_message_price_changed):
action = direct_message_price_changed
for attr in action.__slots__:
assert getattr(action, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(action)) == len(set(mro_slots(action))), "duplicate slot"
def test_de_json(self, offline_bot):
json_dict: JSONDict = {
"are_direct_messages_enabled": self.are_direct_messages_enabled,
"direct_message_star_count": self.direct_message_star_count,
}
dmpc = DirectMessagePriceChanged.de_json(json_dict, offline_bot)
assert dmpc.api_kwargs == {}
assert dmpc.are_direct_messages_enabled == self.are_direct_messages_enabled
assert dmpc.direct_message_star_count == self.direct_message_star_count
def test_to_dict(self, direct_message_price_changed):
dmpc_dict = direct_message_price_changed.to_dict()
assert dmpc_dict["are_direct_messages_enabled"] == self.are_direct_messages_enabled
assert dmpc_dict["direct_message_star_count"] == self.direct_message_star_count
def test_equality(self, direct_message_price_changed):
dmpc1 = direct_message_price_changed
dmpc2 = DirectMessagePriceChanged(
are_direct_messages_enabled=self.are_direct_messages_enabled,
direct_message_star_count=self.direct_message_star_count,
)
assert dmpc1 == dmpc2
assert hash(dmpc1) == hash(dmpc2)
dmpc3 = DirectMessagePriceChanged(
are_direct_messages_enabled=False,
direct_message_star_count=self.direct_message_star_count,
)
assert dmpc1 != dmpc3
assert hash(dmpc1) != hash(dmpc3)
not_a_dmpc = User(id=1, first_name="wrong", is_bot=False)
assert dmpc1 != not_a_dmpc
assert hash(dmpc1) != hash(not_a_dmpc)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "tests/test_directmessagepricechanged.py",
"license": "GNU General Public License v3.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-telegram-bot/python-telegram-bot:tests/test_inputchecklist.py | #!/usr/bin/env python
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pytest
from telegram import Dice, InputChecklist, InputChecklistTask, MessageEntity
from tests.auxil.slots import mro_slots
@pytest.fixture(scope="module")
def input_checklist_task():
return InputChecklistTask(
id=InputChecklistTaskTestBase.id,
text=InputChecklistTaskTestBase.text,
parse_mode=InputChecklistTaskTestBase.parse_mode,
text_entities=InputChecklistTaskTestBase.text_entities,
)
class InputChecklistTaskTestBase:
id = 1
text = "buy food"
parse_mode = "MarkdownV2"
text_entities = [
MessageEntity(type="bold", offset=0, length=3),
MessageEntity(type="italic", offset=4, length=4),
]
class TestInputChecklistTaskWithoutRequest(InputChecklistTaskTestBase):
def test_slot_behaviour(self, input_checklist_task):
for attr in input_checklist_task.__slots__:
assert getattr(input_checklist_task, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(input_checklist_task)) == len(set(mro_slots(input_checklist_task))), (
"duplicate slot"
)
def test_expected_values(self, input_checklist_task):
assert input_checklist_task.id == self.id
assert input_checklist_task.text == self.text
assert input_checklist_task.parse_mode == self.parse_mode
assert input_checklist_task.text_entities == tuple(self.text_entities)
def test_to_dict(self, input_checklist_task):
iclt_dict = input_checklist_task.to_dict()
assert isinstance(iclt_dict, dict)
assert iclt_dict["id"] == self.id
assert iclt_dict["text"] == self.text
assert iclt_dict["parse_mode"] == self.parse_mode
assert iclt_dict["text_entities"] == [entity.to_dict() for entity in self.text_entities]
# Test that default-value parameter `parse_mode` is handled correctly
input_checklist_task = InputChecklistTask(id=1, text="text")
iclt_dict = input_checklist_task.to_dict()
assert "parse_mode" not in iclt_dict
def test_equality(self, input_checklist_task):
a = input_checklist_task
b = InputChecklistTask(id=self.id, text=f"other {self.text}")
c = InputChecklistTask(id=self.id + 1, text=self.text)
d = Dice(value=1, emoji="🎲")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture(scope="module")
def input_checklist():
return InputChecklist(
title=InputChecklistTestBase.title,
tasks=InputChecklistTestBase.tasks,
parse_mode=InputChecklistTestBase.parse_mode,
title_entities=InputChecklistTestBase.title_entities,
others_can_add_tasks=InputChecklistTestBase.others_can_add_tasks,
others_can_mark_tasks_as_done=InputChecklistTestBase.others_can_mark_tasks_as_done,
)
class InputChecklistTestBase:
title = "test list"
tasks = [
InputChecklistTask(id=1, text="eat"),
InputChecklistTask(id=2, text="sleep"),
]
parse_mode = "MarkdownV2"
title_entities = [
MessageEntity(type="bold", offset=0, length=4),
MessageEntity(type="italic", offset=5, length=4),
]
others_can_add_tasks = True
others_can_mark_tasks_as_done = False
class TestInputChecklistWithoutRequest(InputChecklistTestBase):
def test_slot_behaviour(self, input_checklist):
for attr in input_checklist.__slots__:
assert getattr(input_checklist, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(input_checklist)) == len(set(mro_slots(input_checklist))), (
"duplicate slot"
)
def test_expected_values(self, input_checklist):
assert input_checklist.title == self.title
assert input_checklist.tasks == tuple(self.tasks)
assert input_checklist.parse_mode == self.parse_mode
assert input_checklist.title_entities == tuple(self.title_entities)
assert input_checklist.others_can_add_tasks == self.others_can_add_tasks
assert input_checklist.others_can_mark_tasks_as_done == self.others_can_mark_tasks_as_done
def test_to_dict(self, input_checklist):
icl_dict = input_checklist.to_dict()
assert isinstance(icl_dict, dict)
assert icl_dict["title"] == self.title
assert icl_dict["tasks"] == [task.to_dict() for task in self.tasks]
assert icl_dict["parse_mode"] == self.parse_mode
assert icl_dict["title_entities"] == [entity.to_dict() for entity in self.title_entities]
assert icl_dict["others_can_add_tasks"] == self.others_can_add_tasks
assert icl_dict["others_can_mark_tasks_as_done"] == self.others_can_mark_tasks_as_done
# Test that default-value parameter `parse_mode` is handled correctly
input_checklist = InputChecklist(title=self.title, tasks=self.tasks)
icl_dict = input_checklist.to_dict()
assert "parse_mode" not in icl_dict
def test_equality(self, input_checklist):
a = input_checklist
b = InputChecklist(
title=f"other {self.title}",
tasks=[InputChecklistTask(id=1, text="eat"), InputChecklistTask(id=2, text="sleep")],
)
c = InputChecklist(
title=self.title,
tasks=[InputChecklistTask(id=9, text="Other Task")],
)
d = Dice(value=1, emoji="🎲")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "tests/test_inputchecklist.py",
"license": "GNU General Public License v3.0",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-telegram-bot/python-telegram-bot:tests/_files/test_inputprofilephoto.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import datetime as dtm
import pytest
from telegram import (
InputFile,
InputProfilePhoto,
InputProfilePhotoAnimated,
InputProfilePhotoStatic,
)
from telegram.constants import InputProfilePhotoType
from tests.auxil.files import data_file
from tests.auxil.slots import mro_slots
class TestInputProfilePhotoWithoutRequest:
def test_type_enum_conversion(self):
instance = InputProfilePhoto(type="static")
assert isinstance(instance.type, InputProfilePhotoType)
assert instance.type is InputProfilePhotoType.STATIC
instance = InputProfilePhoto(type="animated")
assert isinstance(instance.type, InputProfilePhotoType)
assert instance.type is InputProfilePhotoType.ANIMATED
instance = InputProfilePhoto(type="unknown")
assert isinstance(instance.type, str)
assert instance.type == "unknown"
@pytest.fixture(scope="module")
def input_profile_photo_static():
return InputProfilePhotoStatic(photo=InputProfilePhotoStaticTestBase.photo.read_bytes())
class InputProfilePhotoStaticTestBase:
type_ = "static"
photo = data_file("telegram.jpg")
class TestInputProfilePhotoStaticWithoutRequest(InputProfilePhotoStaticTestBase):
def test_slot_behaviour(self, input_profile_photo_static):
inst = input_profile_photo_static
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_expected_values(self, input_profile_photo_static):
inst = input_profile_photo_static
assert inst.type == self.type_
assert isinstance(inst.photo, InputFile)
def test_to_dict(self, input_profile_photo_static):
inst = input_profile_photo_static
data = inst.to_dict()
assert data["type"] == self.type_
assert data["photo"] == inst.photo
def test_with_local_file(self):
inst = InputProfilePhotoStatic(photo=data_file("telegram.jpg"))
assert inst.photo == data_file("telegram.jpg").as_uri()
def test_type_enum_conversion(self, input_profile_photo_static):
assert input_profile_photo_static.type is InputProfilePhotoType.STATIC
@pytest.fixture(scope="module")
def input_profile_photo_animated():
return InputProfilePhotoAnimated(
animation=InputProfilePhotoAnimatedTestBase.animation.read_bytes(),
main_frame_timestamp=InputProfilePhotoAnimatedTestBase.main_frame_timestamp,
)
class InputProfilePhotoAnimatedTestBase:
type_ = "animated"
animation = data_file("telegram2.mp4")
main_frame_timestamp = dtm.timedelta(seconds=42, milliseconds=43)
class TestInputProfilePhotoAnimatedWithoutRequest(InputProfilePhotoAnimatedTestBase):
def test_slot_behaviour(self, input_profile_photo_animated):
inst = input_profile_photo_animated
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_expected_values(self, input_profile_photo_animated):
inst = input_profile_photo_animated
assert inst.type == self.type_
assert isinstance(inst.animation, InputFile)
assert inst.main_frame_timestamp == self.main_frame_timestamp
def test_to_dict(self, input_profile_photo_animated):
inst = input_profile_photo_animated
data = inst.to_dict()
assert data["type"] == self.type_
assert data["animation"] == inst.animation
assert data["main_frame_timestamp"] == self.main_frame_timestamp.total_seconds()
def test_with_local_file(self):
inst = InputProfilePhotoAnimated(
animation=data_file("telegram2.mp4"),
main_frame_timestamp=self.main_frame_timestamp,
)
assert inst.animation == data_file("telegram2.mp4").as_uri()
def test_type_enum_conversion(self, input_profile_photo_animated):
assert input_profile_photo_animated.type is InputProfilePhotoType.ANIMATED
@pytest.mark.parametrize(
"timestamp",
[
dtm.timedelta(days=2),
dtm.timedelta(seconds=2 * 24 * 60 * 60),
2 * 24 * 60 * 60,
float(2 * 24 * 60 * 60),
],
)
def test_main_frame_timestamp_conversion(self, timestamp):
inst = InputProfilePhotoAnimated(
animation=self.animation,
main_frame_timestamp=timestamp,
)
assert isinstance(inst.main_frame_timestamp, dtm.timedelta)
assert inst.main_frame_timestamp == dtm.timedelta(days=2)
assert (
InputProfilePhotoAnimated(
animation=self.animation,
main_frame_timestamp=None,
).main_frame_timestamp
is None
)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "tests/_files/test_inputprofilephoto.py",
"license": "GNU General Public License v3.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-telegram-bot/python-telegram-bot:tests/_files/test_inputstorycontent.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import datetime as dtm
import pytest
from telegram import InputFile, InputStoryContent, InputStoryContentPhoto, InputStoryContentVideo
from telegram.constants import InputStoryContentType
from tests.auxil.files import data_file
from tests.auxil.slots import mro_slots
@pytest.fixture(scope="module")
def input_story_content():
return InputStoryContent(
type=InputStoryContentTestBase.type,
)
class InputStoryContentTestBase:
type = InputStoryContent.PHOTO
class TestInputStoryContent(InputStoryContentTestBase):
def test_slot_behaviour(self, input_story_content):
inst = input_story_content
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_type_enum_conversion(self):
assert type(InputStoryContent(type="video").type) is InputStoryContentType
assert InputStoryContent(type="unknown").type == "unknown"
@pytest.fixture(scope="module")
def input_story_content_photo():
return InputStoryContentPhoto(photo=InputStoryContentPhotoTestBase.photo.read_bytes())
class InputStoryContentPhotoTestBase:
type = InputStoryContentType.PHOTO
photo = data_file("telegram.jpg")
class TestInputStoryContentPhotoWithoutRequest(InputStoryContentPhotoTestBase):
def test_slot_behaviour(self, input_story_content_photo):
inst = input_story_content_photo
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_expected_values(self, input_story_content_photo):
inst = input_story_content_photo
assert inst.type is self.type
assert isinstance(inst.photo, InputFile)
def test_to_dict(self, input_story_content_photo):
inst = input_story_content_photo
json_dict = inst.to_dict()
assert json_dict["type"] is self.type
assert json_dict["photo"] == inst.photo
def test_with_photo_file(self, photo_file):
inst = InputStoryContentPhoto(photo=photo_file)
assert inst.type is self.type
assert isinstance(inst.photo, InputFile)
def test_with_local_files(self):
inst = InputStoryContentPhoto(photo=data_file("telegram.jpg"))
assert inst.photo == data_file("telegram.jpg").as_uri()
@pytest.fixture(scope="module")
def input_story_content_video():
return InputStoryContentVideo(
video=InputStoryContentVideoTestBase.video.read_bytes(),
duration=InputStoryContentVideoTestBase.duration,
cover_frame_timestamp=InputStoryContentVideoTestBase.cover_frame_timestamp,
is_animation=InputStoryContentVideoTestBase.is_animation,
)
class InputStoryContentVideoTestBase:
type = InputStoryContentType.VIDEO
video = data_file("telegram.mp4")
duration = dtm.timedelta(seconds=30)
cover_frame_timestamp = dtm.timedelta(seconds=15)
is_animation = False
class TestInputStoryContentVideoWithoutRequest(InputStoryContentVideoTestBase):
def test_slot_behaviour(self, input_story_content_video):
inst = input_story_content_video
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_expected_values(self, input_story_content_video):
inst = input_story_content_video
assert inst.type is self.type
assert isinstance(inst.video, InputFile)
assert inst.duration == self.duration
assert inst.cover_frame_timestamp == self.cover_frame_timestamp
assert inst.is_animation is self.is_animation
def test_to_dict(self, input_story_content_video):
inst = input_story_content_video
json_dict = inst.to_dict()
assert json_dict["type"] is self.type
assert json_dict["video"] == inst.video
assert json_dict["duration"] == self.duration.total_seconds()
assert json_dict["cover_frame_timestamp"] == self.cover_frame_timestamp.total_seconds()
assert json_dict["is_animation"] is self.is_animation
@pytest.mark.parametrize(
("argument", "expected"),
[(4, 4), (4.0, 4), (dtm.timedelta(seconds=4), 4), (4.5, 4.5)],
)
def test_to_dict_float_time_period(self, argument, expected):
# We test that whole number conversion works properly. Only tested here but
# relevant for some other classes too (e.g InputProfilePhotoAnimated.main_frame_timestamp)
inst = InputStoryContentVideo(
video=self.video.read_bytes(),
duration=argument,
cover_frame_timestamp=argument,
)
json_dict = inst.to_dict()
assert json_dict["duration"] == expected
assert type(json_dict["duration"]) is type(expected)
assert json_dict["cover_frame_timestamp"] == expected
assert type(json_dict["cover_frame_timestamp"]) is type(expected)
def test_with_video_file(self, video_file):
inst = InputStoryContentVideo(video=video_file)
assert inst.type is self.type
assert isinstance(inst.video, InputFile)
def test_with_local_files(self):
inst = InputStoryContentVideo(video=data_file("telegram.mp4"))
assert inst.video == data_file("telegram.mp4").as_uri()
@pytest.mark.parametrize("timestamp", [dtm.timedelta(seconds=60), 60, float(60)])
@pytest.mark.parametrize("field", ["duration", "cover_frame_timestamp"])
def test_time_period_arg_conversion(self, field, timestamp):
inst = InputStoryContentVideo(
video=self.video,
**{field: timestamp},
)
value = getattr(inst, field)
assert isinstance(value, dtm.timedelta)
assert value == dtm.timedelta(seconds=60)
inst = InputStoryContentVideo(
video=self.video,
**{field: None},
)
value = getattr(inst, field)
assert value is None
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "tests/_files/test_inputstorycontent.py",
"license": "GNU General Public License v3.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-telegram-bot/python-telegram-bot:tests/_payment/stars/test_staramount.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pytest
from telegram import StarAmount
from tests.auxil.slots import mro_slots
@pytest.fixture(scope="module")
def star_amount():
return StarAmount(
amount=StarTransactionTestBase.amount,
nanostar_amount=StarTransactionTestBase.nanostar_amount,
)
class StarTransactionTestBase:
amount = 100
nanostar_amount = 356
class TestStarAmountWithoutRequest(StarTransactionTestBase):
def test_slot_behaviour(self, star_amount):
inst = star_amount
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_de_json(self, offline_bot):
json_dict = {
"amount": self.amount,
"nanostar_amount": self.nanostar_amount,
}
st = StarAmount.de_json(json_dict, offline_bot)
assert st.api_kwargs == {}
assert st.amount == self.amount
assert st.nanostar_amount == self.nanostar_amount
def test_to_dict(self, star_amount):
expected_dict = {
"amount": self.amount,
"nanostar_amount": self.nanostar_amount,
}
assert star_amount.to_dict() == expected_dict
def test_equality(self, star_amount):
a = star_amount
b = StarAmount(amount=self.amount, nanostar_amount=self.nanostar_amount)
c = StarAmount(amount=99, nanostar_amount=99)
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "tests/_payment/stars/test_staramount.py",
"license": "GNU General Public License v3.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-telegram-bot/python-telegram-bot:tests/test_business_methods.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import datetime as dtm
import pytest
from telegram import (
BusinessBotRights,
BusinessConnection,
Chat,
InputProfilePhotoStatic,
InputStoryContentPhoto,
MessageEntity,
StarAmount,
Story,
StoryAreaTypeLink,
StoryAreaTypeUniqueGift,
User,
)
from telegram._files._inputstorycontent import InputStoryContentVideo
from telegram._files.sticker import Sticker
from telegram._gifts import AcceptedGiftTypes, Gift
from telegram._inline.inlinekeyboardbutton import InlineKeyboardButton
from telegram._inline.inlinekeyboardmarkup import InlineKeyboardMarkup
from telegram._inputchecklist import InputChecklist, InputChecklistTask
from telegram._message import Message
from telegram._ownedgift import OwnedGiftRegular, OwnedGifts
from telegram._reply import ReplyParameters
from telegram._utils.datetime import UTC
from telegram._utils.defaultvalue import DEFAULT_NONE
from telegram.constants import InputProfilePhotoType, InputStoryContentType
from tests.auxil.files import data_file
class BusinessMethodsTestBase:
bci = "42"
class TestBusinessMethodsWithoutRequest(BusinessMethodsTestBase):
async def test_get_business_connection(self, offline_bot, monkeypatch):
user = User(1, "first", False)
user_chat_id = 1
date = dtm.datetime.utcnow()
rights = BusinessBotRights(can_reply=True)
is_enabled = True
bc = BusinessConnection(
self.bci,
user,
user_chat_id,
date,
is_enabled,
rights=rights,
).to_json()
async def do_request(*args, **kwargs):
data = kwargs.get("request_data")
obj = data.parameters.get("business_connection_id")
if obj == self.bci:
return 200, f'{{"ok": true, "result": {bc}}}'.encode()
return 400, b'{"ok": false, "result": []}'
monkeypatch.setattr(offline_bot.request, "do_request", do_request)
obj = await offline_bot.get_business_connection(business_connection_id=self.bci)
assert isinstance(obj, BusinessConnection)
@pytest.mark.parametrize("bool_param", [True, False, None])
async def test_get_business_account_gifts(self, offline_bot, monkeypatch, bool_param):
offset = 50
limit = 50
owned_gifts = OwnedGifts(
total_count=1,
gifts=[
OwnedGiftRegular(
gift=Gift(
id="id1",
sticker=Sticker(
"file_id", "file_unique_id", 512, 512, False, False, "regular"
),
star_count=5,
),
send_date=dtm.datetime.now(tz=UTC).replace(microsecond=0),
owned_gift_id="some_id_1",
)
],
).to_json()
async def do_request_and_make_assertions(*args, **kwargs):
data = kwargs.get("request_data").parameters
assert data.get("business_connection_id") == self.bci
assert data.get("exclude_unsaved") is bool_param
assert data.get("exclude_saved") is bool_param
assert data.get("exclude_unlimited") is bool_param
assert data.get("exclude_limited_upgradable") is bool_param
assert data.get("exclude_limited_non_upgradable") is bool_param
assert data.get("exclude_unique") is bool_param
assert data.get("exclude_from_blockchain") is bool_param
assert data.get("sort_by_price") is bool_param
assert data.get("offset") == offset
assert data.get("limit") == limit
return 200, f'{{"ok": true, "result": {owned_gifts}}}'.encode()
monkeypatch.setattr(offline_bot.request, "do_request", do_request_and_make_assertions)
obj = await offline_bot.get_business_account_gifts(
business_connection_id=self.bci,
exclude_unsaved=bool_param,
exclude_saved=bool_param,
exclude_unlimited=bool_param,
exclude_limited_upgradable=bool_param,
exclude_limited_non_upgradable=bool_param,
exclude_unique=bool_param,
exclude_from_blockchain=bool_param,
sort_by_price=bool_param,
offset=offset,
limit=limit,
)
assert isinstance(obj, OwnedGifts)
async def test_get_business_account_star_balance(self, offline_bot, monkeypatch):
star_amount_json = StarAmount(amount=100, nanostar_amount=356).to_json()
async def do_request(*args, **kwargs):
data = kwargs.get("request_data")
obj = data.parameters.get("business_connection_id")
if obj == self.bci:
return 200, f'{{"ok": true, "result": {star_amount_json}}}'.encode()
return 400, b'{"ok": false, "result": []}'
monkeypatch.setattr(offline_bot.request, "do_request", do_request)
obj = await offline_bot.get_business_account_star_balance(business_connection_id=self.bci)
assert isinstance(obj, StarAmount)
async def test_read_business_message(self, offline_bot, monkeypatch):
chat_id = 43
message_id = 44
async def make_assertion(*args, **kwargs):
data = kwargs.get("request_data").parameters
assert data.get("business_connection_id") == self.bci
assert data.get("chat_id") == chat_id
assert data.get("message_id") == message_id
return True
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
assert await offline_bot.read_business_message(
business_connection_id=self.bci, chat_id=chat_id, message_id=message_id
)
async def test_delete_business_messages(self, offline_bot, monkeypatch):
message_ids = [1, 2, 3]
async def make_assertion(*args, **kwargs):
data = kwargs.get("request_data").parameters
assert data.get("business_connection_id") == self.bci
assert data.get("message_ids") == message_ids
return True
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
assert await offline_bot.delete_business_messages(
business_connection_id=self.bci, message_ids=message_ids
)
@pytest.mark.parametrize("last_name", [None, "last_name"])
async def test_set_business_account_name(self, offline_bot, monkeypatch, last_name):
first_name = "Test Business Account"
async def make_assertion(*args, **kwargs):
data = kwargs.get("request_data").parameters
assert data.get("business_connection_id") == self.bci
assert data.get("first_name") == first_name
assert data.get("last_name") == last_name
return True
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
assert await offline_bot.set_business_account_name(
business_connection_id=self.bci, first_name=first_name, last_name=last_name
)
@pytest.mark.parametrize("username", ["username", None])
async def test_set_business_account_username(self, offline_bot, monkeypatch, username):
async def make_assertion(*args, **kwargs):
data = kwargs.get("request_data").parameters
assert data.get("business_connection_id") == self.bci
assert data.get("username") == username
return True
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
assert await offline_bot.set_business_account_username(
business_connection_id=self.bci, username=username
)
@pytest.mark.parametrize("bio", ["bio", None])
async def test_set_business_account_bio(self, offline_bot, monkeypatch, bio):
async def make_assertion(*args, **kwargs):
data = kwargs.get("request_data").parameters
assert data.get("business_connection_id") == self.bci
assert data.get("bio") == bio
return True
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
assert await offline_bot.set_business_account_bio(business_connection_id=self.bci, bio=bio)
async def test_set_business_account_gift_settings(self, offline_bot, monkeypatch):
show_gift_button = True
accepted_gift_types = AcceptedGiftTypes(True, True, True, True, True)
async def make_assertion(*args, **kwargs):
data = kwargs.get("request_data").json_parameters
assert data.get("business_connection_id") == self.bci
assert data.get("show_gift_button") == "true"
assert data.get("accepted_gift_types") == accepted_gift_types.to_json()
return True
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
assert await offline_bot.set_business_account_gift_settings(
business_connection_id=self.bci,
show_gift_button=show_gift_button,
accepted_gift_types=accepted_gift_types,
)
async def test_convert_gift_to_stars(self, offline_bot, monkeypatch):
owned_gift_id = "some_id"
async def make_assertion(*args, **kwargs):
data = kwargs.get("request_data").parameters
assert data.get("business_connection_id") == self.bci
assert data.get("owned_gift_id") == owned_gift_id
return True
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
assert await offline_bot.convert_gift_to_stars(
business_connection_id=self.bci,
owned_gift_id=owned_gift_id,
)
@pytest.mark.parametrize("keep_original_details", [True, None])
@pytest.mark.parametrize("star_count", [100, None])
async def test_upgrade_gift(self, offline_bot, monkeypatch, keep_original_details, star_count):
owned_gift_id = "some_id"
async def make_assertion(*args, **kwargs):
data = kwargs.get("request_data").parameters
assert data.get("business_connection_id") == self.bci
assert data.get("owned_gift_id") == owned_gift_id
assert data.get("keep_original_details") is keep_original_details
assert data.get("star_count") == star_count
return True
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
assert await offline_bot.upgrade_gift(
business_connection_id=self.bci,
owned_gift_id=owned_gift_id,
keep_original_details=keep_original_details,
star_count=star_count,
)
@pytest.mark.parametrize("star_count", [100, None])
async def test_transfer_gift(self, offline_bot, monkeypatch, star_count):
owned_gift_id = "some_id"
new_owner_chat_id = 123
async def make_assertion(*args, **kwargs):
data = kwargs.get("request_data").parameters
assert data.get("business_connection_id") == self.bci
assert data.get("owned_gift_id") == owned_gift_id
assert data.get("new_owner_chat_id") == new_owner_chat_id
assert data.get("star_count") == star_count
return True
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
assert await offline_bot.transfer_gift(
business_connection_id=self.bci,
owned_gift_id=owned_gift_id,
new_owner_chat_id=new_owner_chat_id,
star_count=star_count,
)
async def test_transfer_business_account_stars(self, offline_bot, monkeypatch):
star_count = 100
async def make_assertion(*args, **kwargs):
data = kwargs.get("request_data").parameters
assert data.get("business_connection_id") == self.bci
assert data.get("star_count") == star_count
return True
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
assert await offline_bot.transfer_business_account_stars(
business_connection_id=self.bci,
star_count=star_count,
)
@pytest.mark.parametrize("is_public", [True, False, None, DEFAULT_NONE])
async def test_set_business_account_profile_photo(self, offline_bot, monkeypatch, is_public):
async def make_assertion(*args, **kwargs):
request_data = kwargs.get("request_data")
params = request_data.parameters
assert params.get("business_connection_id") == self.bci
if is_public is DEFAULT_NONE:
assert "is_public" not in params
else:
assert params.get("is_public") == is_public
assert (photo_dict := params.get("photo")).get("type") == InputProfilePhotoType.STATIC
assert (photo_attach := photo_dict["photo"]).startswith("attach://")
assert isinstance(
request_data.multipart_data.get(photo_attach.removeprefix("attach://")), tuple
)
return True
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
kwargs = {
"business_connection_id": self.bci,
"photo": InputProfilePhotoStatic(
photo=data_file("telegram.jpg").read_bytes(),
),
}
if is_public is not DEFAULT_NONE:
kwargs["is_public"] = is_public
assert await offline_bot.set_business_account_profile_photo(**kwargs)
async def test_set_business_account_profile_photo_local_file(self, offline_bot, monkeypatch):
async def make_assertion(*args, **kwargs):
request_data = kwargs.get("request_data")
params = request_data.parameters
assert params.get("business_connection_id") == self.bci
assert (photo_dict := params.get("photo")).get("type") == InputProfilePhotoType.STATIC
assert photo_dict["photo"] == data_file("telegram.jpg").as_uri()
assert not request_data.multipart_data
return True
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
kwargs = {
"business_connection_id": self.bci,
"photo": InputProfilePhotoStatic(
photo=data_file("telegram.jpg"),
),
}
assert await offline_bot.set_business_account_profile_photo(**kwargs)
@pytest.mark.parametrize("is_public", [True, False, None, DEFAULT_NONE])
async def test_remove_business_account_profile_photo(
self, offline_bot, monkeypatch, is_public
):
async def make_assertion(*args, **kwargs):
data = kwargs.get("request_data").parameters
assert data.get("business_connection_id") == self.bci
if is_public is DEFAULT_NONE:
assert "is_public" not in data
else:
assert data.get("is_public") == is_public
return True
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
kwargs = {"business_connection_id": self.bci}
if is_public is not DEFAULT_NONE:
kwargs["is_public"] = is_public
assert await offline_bot.remove_business_account_profile_photo(**kwargs)
@pytest.mark.parametrize("active_period", [dtm.timedelta(seconds=30), 30])
async def test_post_story_all_args(self, offline_bot, monkeypatch, active_period):
content = InputStoryContentPhoto(photo=data_file("telegram.jpg").read_bytes())
caption = "test caption"
caption_entities = [
MessageEntity(MessageEntity.BOLD, 0, 3),
MessageEntity(MessageEntity.ITALIC, 5, 11),
]
parse_mode = "Markdown"
areas = [StoryAreaTypeLink("http_url"), StoryAreaTypeUniqueGift("unique_gift_name")]
post_to_chat_page = True
protect_content = True
json_story = Story(chat=Chat(123, "private"), id=123).to_json()
async def do_request_and_make_assertions(*args, **kwargs):
request_data = kwargs.get("request_data")
params = kwargs.get("request_data").parameters
assert params.get("business_connection_id") == self.bci
assert params.get("active_period") == 30
assert params.get("caption") == caption
assert params.get("caption_entities") == [e.to_dict() for e in caption_entities]
assert params.get("parse_mode") == parse_mode
assert params.get("areas") == [area.to_dict() for area in areas]
assert params.get("post_to_chat_page") is post_to_chat_page
assert params.get("protect_content") is protect_content
assert (content_dict := params.get("content")).get(
"type"
) == InputStoryContentType.PHOTO
assert (photo_attach := content_dict["photo"]).startswith("attach://")
assert isinstance(
request_data.multipart_data.get(photo_attach.removeprefix("attach://")), tuple
)
return 200, f'{{"ok": true, "result": {json_story}}}'.encode()
monkeypatch.setattr(offline_bot.request, "do_request", do_request_and_make_assertions)
obj = await offline_bot.post_story(
business_connection_id=self.bci,
content=content,
active_period=active_period,
caption=caption,
caption_entities=caption_entities,
parse_mode=parse_mode,
areas=areas,
post_to_chat_page=post_to_chat_page,
protect_content=protect_content,
)
assert isinstance(obj, Story)
@pytest.mark.parametrize("active_period", [dtm.timedelta(seconds=30), 30])
async def test_post_story_local_file(self, offline_bot, monkeypatch, active_period):
json_story = Story(chat=Chat(123, "private"), id=123).to_json()
async def make_assertion(*args, **kwargs):
request_data = kwargs.get("request_data")
params = request_data.parameters
assert params.get("business_connection_id") == self.bci
assert (content_dict := params.get("content")).get(
"type"
) == InputStoryContentType.PHOTO
assert content_dict["photo"] == data_file("telegram.jpg").as_uri()
assert not request_data.multipart_data
return 200, f'{{"ok": true, "result": {json_story}}}'.encode()
monkeypatch.setattr(offline_bot.request, "do_request", make_assertion)
kwargs = {
"business_connection_id": self.bci,
"content": InputStoryContentPhoto(
photo=data_file("telegram.jpg"),
),
"active_period": active_period,
}
assert await offline_bot.post_story(**kwargs)
@pytest.mark.parametrize("default_bot", [{"parse_mode": "Markdown"}], indirect=True)
@pytest.mark.parametrize(
("passed_value", "expected_value"),
[(DEFAULT_NONE, "Markdown"), ("HTML", "HTML"), (None, None)],
)
async def test_post_story_default_parse_mode(
self, default_bot, monkeypatch, passed_value, expected_value
):
async def make_assertion(url, request_data, *args, **kwargs):
assert request_data.parameters.get("parse_mode") == expected_value
return Story(chat=Chat(123, "private"), id=123).to_dict()
monkeypatch.setattr(default_bot.request, "post", make_assertion)
kwargs = {
"business_connection_id": self.bci,
"content": InputStoryContentPhoto(photo=data_file("telegram.jpg").read_bytes()),
"active_period": dtm.timedelta(seconds=20),
"caption": "caption",
}
if passed_value is not DEFAULT_NONE:
kwargs["parse_mode"] = passed_value
await default_bot.post_story(**kwargs)
@pytest.mark.parametrize("default_bot", [{"protect_content": True}], indirect=True)
@pytest.mark.parametrize(
("passed_value", "expected_value"),
[(DEFAULT_NONE, True), (False, False), (None, None)],
)
async def test_post_story_default_protect_content(
self, default_bot, monkeypatch, passed_value, expected_value
):
async def make_assertion(url, request_data, *args, **kwargs):
assert request_data.parameters.get("protect_content") == expected_value
return Story(chat=Chat(123, "private"), id=123).to_dict()
monkeypatch.setattr(default_bot.request, "post", make_assertion)
kwargs = {
"business_connection_id": self.bci,
"content": InputStoryContentPhoto(bytes("photo", encoding="utf-8")),
"active_period": dtm.timedelta(seconds=20),
}
if passed_value is not DEFAULT_NONE:
kwargs["protect_content"] = passed_value
await default_bot.post_story(**kwargs)
@pytest.mark.parametrize(
("argument", "expected"),
[(4, 4), (4.0, 4), (dtm.timedelta(seconds=4), 4), (4.5, 4.5)],
)
async def test_post_story_float_time_period(
self, offline_bot, monkeypatch, argument, expected
):
# We test that whole number conversion works properly. Only tested here but
# relevant for some other methods too (e.g bot.set_business_account_profile_photo)
async def make_assertion(url, request_data, *args, **kwargs):
data = request_data.parameters
content = data["content"]
assert content["duration"] == expected
assert type(content["duration"]) is type(expected)
assert content["cover_frame_timestamp"] == expected
assert type(content["cover_frame_timestamp"]) is type(expected)
return Story(chat=Chat(123, "private"), id=123).to_dict()
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
kwargs = {
"business_connection_id": self.bci,
"content": InputStoryContentVideo(
video=data_file("telegram.mp4"),
duration=argument,
cover_frame_timestamp=argument,
),
"active_period": dtm.timedelta(seconds=20),
}
assert await offline_bot.post_story(**kwargs)
async def test_edit_story_all_args(self, offline_bot, monkeypatch):
story_id = 1234
content = InputStoryContentPhoto(photo=data_file("telegram.jpg").read_bytes())
caption = "test caption"
caption_entities = [
MessageEntity(MessageEntity.BOLD, 0, 3),
MessageEntity(MessageEntity.ITALIC, 5, 11),
]
parse_mode = "Markdown"
areas = [StoryAreaTypeLink("http_url"), StoryAreaTypeUniqueGift("unique_gift_name")]
json_story = Story(chat=Chat(123, "private"), id=123).to_json()
async def do_request_and_make_assertions(*args, **kwargs):
request_data = kwargs.get("request_data")
params = kwargs.get("request_data").parameters
assert params.get("business_connection_id") == self.bci
assert params.get("story_id") == story_id
assert params.get("caption") == caption
assert params.get("caption_entities") == [e.to_dict() for e in caption_entities]
assert params.get("parse_mode") == parse_mode
assert params.get("areas") == [area.to_dict() for area in areas]
assert (content_dict := params.get("content")).get(
"type"
) == InputStoryContentType.PHOTO
assert (photo_attach := content_dict["photo"]).startswith("attach://")
assert isinstance(
request_data.multipart_data.get(photo_attach.removeprefix("attach://")), tuple
)
return 200, f'{{"ok": true, "result": {json_story}}}'.encode()
monkeypatch.setattr(offline_bot.request, "do_request", do_request_and_make_assertions)
obj = await offline_bot.edit_story(
business_connection_id=self.bci,
story_id=story_id,
content=content,
caption=caption,
caption_entities=caption_entities,
parse_mode=parse_mode,
areas=areas,
)
assert isinstance(obj, Story)
async def test_edit_story_local_file(self, offline_bot, monkeypatch):
json_story = Story(chat=Chat(123, "private"), id=123).to_json()
async def make_assertion(*args, **kwargs):
request_data = kwargs.get("request_data")
params = request_data.parameters
assert params.get("business_connection_id") == self.bci
assert (content_dict := params.get("content")).get(
"type"
) == InputStoryContentType.PHOTO
assert content_dict["photo"] == data_file("telegram.jpg").as_uri()
assert not request_data.multipart_data
return 200, f'{{"ok": true, "result": {json_story}}}'.encode()
monkeypatch.setattr(offline_bot.request, "do_request", make_assertion)
kwargs = {
"business_connection_id": self.bci,
"story_id": 1234,
"content": InputStoryContentPhoto(
photo=data_file("telegram.jpg"),
),
}
assert await offline_bot.edit_story(**kwargs)
@pytest.mark.parametrize("default_bot", [{"parse_mode": "Markdown"}], indirect=True)
@pytest.mark.parametrize(
("passed_value", "expected_value"),
[(DEFAULT_NONE, "Markdown"), ("HTML", "HTML"), (None, None)],
)
async def test_edit_story_default_parse_mode(
self, default_bot, monkeypatch, passed_value, expected_value
):
async def make_assertion(url, request_data, *args, **kwargs):
assert request_data.parameters.get("parse_mode") == expected_value
return Story(chat=Chat(123, "private"), id=123).to_dict()
monkeypatch.setattr(default_bot.request, "post", make_assertion)
kwargs = {
"business_connection_id": self.bci,
"story_id": 1234,
"content": InputStoryContentPhoto(photo=data_file("telegram.jpg").read_bytes()),
"caption": "caption",
}
if passed_value is not DEFAULT_NONE:
kwargs["parse_mode"] = passed_value
await default_bot.edit_story(**kwargs)
async def test_delete_story(self, offline_bot, monkeypatch):
story_id = 123
async def make_assertion(*args, **kwargs):
data = kwargs.get("request_data").parameters
assert data.get("business_connection_id") == self.bci
assert data.get("story_id") == story_id
return True
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
assert await offline_bot.delete_story(business_connection_id=self.bci, story_id=story_id)
async def test_send_checklist_all_args(self, offline_bot, monkeypatch):
chat_id = 123
checklist = InputChecklist(
title="My Checklist",
tasks=[InputChecklistTask(1, "Task 1"), InputChecklistTask(2, "Task 2")],
)
disable_notification = True
protect_content = False
message_effect_id = 42
reply_parameters = ReplyParameters(23, chat_id, allow_sending_without_reply=True)
reply_markup = InlineKeyboardMarkup(
[[InlineKeyboardButton(text="test", callback_data="test2")]]
)
json_message = Message(1, dtm.datetime.now(), Chat(1, ""), text="test").to_json()
async def make_assertions(*args, **kwargs):
params = kwargs.get("request_data").parameters
assert params.get("business_connection_id") == self.bci
assert params.get("chat_id") == chat_id
assert params.get("checklist") == checklist.to_dict()
assert params.get("disable_notification") is disable_notification
assert params.get("protect_content") is protect_content
assert params.get("message_effect_id") == message_effect_id
assert params.get("reply_parameters") == reply_parameters.to_dict()
assert params.get("reply_markup") == reply_markup.to_dict()
return 200, f'{{"ok": true, "result": {json_message}}}'.encode()
monkeypatch.setattr(offline_bot.request, "do_request", make_assertions)
obj = await offline_bot.send_checklist(
business_connection_id=self.bci,
chat_id=chat_id,
checklist=checklist,
disable_notification=disable_notification,
protect_content=protect_content,
message_effect_id=message_effect_id,
reply_parameters=reply_parameters,
reply_markup=reply_markup,
)
assert isinstance(obj, Message)
@pytest.mark.parametrize("default_bot", [{"disable_notification": True}], indirect=True)
@pytest.mark.parametrize(
("passed_value", "expected_value"),
[(DEFAULT_NONE, True), (False, False), (None, None)],
)
async def test_send_checklist_default_disable_notification(
self, default_bot, monkeypatch, passed_value, expected_value
):
async def make_assertion(url, request_data, *args, **kwargs):
assert request_data.parameters.get("disable_notification") is expected_value
return Message(1, dtm.datetime.now(), Chat(1, ""), text="test").to_dict()
monkeypatch.setattr(default_bot.request, "post", make_assertion)
kwargs = {
"business_connection_id": self.bci,
"chat_id": 123,
"checklist": InputChecklist(
title="My Checklist",
tasks=[InputChecklistTask(1, "Task 1")],
),
}
if passed_value is not DEFAULT_NONE:
kwargs["disable_notification"] = passed_value
await default_bot.send_checklist(**kwargs)
@pytest.mark.parametrize("default_bot", [{"protect_content": True}], indirect=True)
@pytest.mark.parametrize(
("passed_value", "expected_value"),
[(DEFAULT_NONE, True), (False, False), (None, None)],
)
async def test_send_checklist_default_protect_content(
self, default_bot, monkeypatch, passed_value, expected_value
):
async def make_assertion(url, request_data, *args, **kwargs):
assert request_data.parameters.get("protect_content") is expected_value
return Message(1, dtm.datetime.now(), Chat(1, ""), text="test").to_dict()
monkeypatch.setattr(default_bot.request, "post", make_assertion)
kwargs = {
"business_connection_id": self.bci,
"chat_id": 123,
"checklist": InputChecklist(
title="My Checklist",
tasks=[InputChecklistTask(1, "Task 1")],
),
}
if passed_value is not DEFAULT_NONE:
kwargs["protect_content"] = passed_value
await default_bot.send_checklist(**kwargs)
async def test_send_checklist_mutually_exclusive_reply_parameters(self, offline_bot):
"""Test that reply_to_message_id and allow_sending_without_reply are mutually exclusive
with reply_parameters."""
with pytest.raises(ValueError, match="`reply_to_message_id` and"):
await offline_bot.send_checklist(
self.bci,
123,
InputChecklist(title="My Checklist", tasks=[InputChecklistTask(1, "Task 1")]),
reply_to_message_id=1,
reply_parameters=True,
)
with pytest.raises(ValueError, match="`allow_sending_without_reply` and"):
await offline_bot.send_checklist(
self.bci,
123,
InputChecklist(title="My Checklist", tasks=[InputChecklistTask(1, "Task 1")]),
allow_sending_without_reply=True,
reply_parameters=True,
)
async def test_edit_message_checklist_all_args(self, offline_bot, monkeypatch):
chat_id = 123
message_id = 45
checklist = InputChecklist(
title="My Checklist",
tasks=[InputChecklistTask(1, "Task 1"), InputChecklistTask(2, "Task 2")],
)
reply_markup = InlineKeyboardMarkup(
[[InlineKeyboardButton(text="test", callback_data="test2")]]
)
json_message = Message(1, dtm.datetime.now(), Chat(1, ""), text="test").to_json()
async def make_assertions(*args, **kwargs):
params = kwargs.get("request_data").parameters
assert params.get("business_connection_id") == self.bci
assert params.get("chat_id") == chat_id
assert params.get("message_id") == message_id
assert params.get("checklist") == checklist.to_dict()
assert params.get("reply_markup") == reply_markup.to_dict()
return 200, f'{{"ok": true, "result": {json_message}}}'.encode()
monkeypatch.setattr(offline_bot.request, "do_request", make_assertions)
obj = await offline_bot.edit_message_checklist(
business_connection_id=self.bci,
chat_id=chat_id,
message_id=message_id,
checklist=checklist,
reply_markup=reply_markup,
)
assert isinstance(obj, Message)
async def test_repost_story(self, offline_bot, monkeypatch):
"""No way to test this without stories"""
async def make_assertion(url, request_data, *args, **kwargs):
for param in (
"business_connection_id",
"from_chat_id",
"from_story_id",
"active_period",
"post_to_chat_page",
"protect_content",
):
assert request_data.parameters.get(param) == param
return Story(chat=Chat(id=1, type=Chat.PRIVATE), id=42).to_dict()
monkeypatch.setattr(offline_bot.request, "post", make_assertion)
story = await offline_bot.repost_story(
business_connection_id="business_connection_id",
from_chat_id="from_chat_id",
from_story_id="from_story_id",
active_period="active_period",
post_to_chat_page="post_to_chat_page",
protect_content="protect_content",
)
assert story.chat.id == 1
assert story.id == 42
@pytest.mark.parametrize("default_bot", [{"protect_content": True}], indirect=True)
@pytest.mark.parametrize(
("passed_value", "expected_value"),
[(DEFAULT_NONE, True), (False, False), (None, None)],
)
async def test_repost_story_default_protect_content(
self, default_bot, monkeypatch, passed_value, expected_value
):
async def make_assertion(url, request_data, *args, **kwargs):
assert request_data.parameters.get("protect_content") == expected_value
return Story(chat=Chat(123, "private"), id=123).to_dict()
monkeypatch.setattr(default_bot.request, "post", make_assertion)
kwargs = {
"business_connection_id": self.bci,
"from_chat_id": 123,
"from_story_id": 456,
"active_period": dtm.timedelta(seconds=20),
}
if passed_value is not DEFAULT_NONE:
kwargs["protect_content"] = passed_value
await default_bot.repost_story(**kwargs)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "tests/test_business_methods.py",
"license": "GNU General Public License v3.0",
"lines": 725,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-telegram-bot/python-telegram-bot:tests/test_ownedgift.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import datetime as dtm
from copy import deepcopy
import pytest
from telegram import Dice, User
from telegram._files.sticker import Sticker
from telegram._gifts import Gift
from telegram._messageentity import MessageEntity
from telegram._ownedgift import OwnedGift, OwnedGiftRegular, OwnedGifts, OwnedGiftUnique
from telegram._uniquegift import (
UniqueGift,
UniqueGiftBackdrop,
UniqueGiftBackdropColors,
UniqueGiftModel,
UniqueGiftSymbol,
)
from telegram._utils.datetime import UTC, to_timestamp
from telegram.constants import OwnedGiftType
from tests.auxil.slots import mro_slots
@pytest.fixture
def owned_gift():
return OwnedGift(type=OwnedGiftTestBase.type)
class OwnedGiftTestBase:
type = OwnedGiftType.REGULAR
gift = Gift(
id="some_id",
sticker=Sticker(
file_id="file_id",
file_unique_id="file_unique_id",
width=512,
height=512,
is_animated=False,
is_video=False,
type="regular",
),
star_count=5,
)
unique_gift = UniqueGift(
gift_id="gift_id",
base_name="human_readable",
name="unique_name",
number=10,
model=UniqueGiftModel(
name="model_name",
sticker=Sticker("file_id1", "file_unique_id1", 512, 512, False, False, "regular"),
rarity_per_mille=10,
),
symbol=UniqueGiftSymbol(
name="symbol_name",
sticker=Sticker("file_id2", "file_unique_id2", 512, 512, True, True, "mask"),
rarity_per_mille=20,
),
backdrop=UniqueGiftBackdrop(
name="backdrop_name",
colors=UniqueGiftBackdropColors(0x00FF00, 0xEE00FF, 0xAA22BB, 0x20FE8F),
rarity_per_mille=30,
),
)
send_date = dtm.datetime.now(tz=UTC).replace(microsecond=0)
owned_gift_id = "not_real_id"
sender_user = User(1, "test user", False)
text = "test text"
entities = (
MessageEntity(MessageEntity.BOLD, 0, 4),
MessageEntity(MessageEntity.ITALIC, 5, 8),
)
is_private = True
is_saved = True
can_be_upgraded = True
was_refunded = False
convert_star_count = 100
prepaid_upgrade_star_count = 200
can_be_transferred = True
transfer_star_count = 300
next_transfer_date = dtm.datetime.now(tz=UTC).replace(microsecond=0)
is_upgrade_separate = False
unique_gift_number = 37
class TestOwnedGiftWithoutRequest(OwnedGiftTestBase):
def test_slot_behaviour(self, owned_gift):
inst = owned_gift
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_type_enum_conversion(self, owned_gift):
assert type(OwnedGift("regular").type) is OwnedGiftType
assert OwnedGift("unknown").type == "unknown"
def test_de_json(self, offline_bot):
data = {"type": "unknown"}
paid_media = OwnedGift.de_json(data, offline_bot)
assert paid_media.api_kwargs == {}
assert paid_media.type == "unknown"
@pytest.mark.parametrize(
("og_type", "subclass", "gift"),
[
("regular", OwnedGiftRegular, OwnedGiftTestBase.gift),
("unique", OwnedGiftUnique, OwnedGiftTestBase.unique_gift),
],
)
def test_de_json_subclass(self, offline_bot, og_type, subclass, gift):
json_dict = {
"type": og_type,
"gift": gift.to_dict(),
"send_date": to_timestamp(self.send_date),
"owned_gift_id": self.owned_gift_id,
"sender_user": self.sender_user.to_dict(),
"text": self.text,
"entities": [e.to_dict() for e in self.entities],
"is_private": self.is_private,
"is_saved": self.is_saved,
"can_be_upgraded": self.can_be_upgraded,
"was_refunded": self.was_refunded,
"convert_star_count": self.convert_star_count,
"prepaid_upgrade_star_count": self.prepaid_upgrade_star_count,
"can_be_transferred": self.can_be_transferred,
"transfer_star_count": self.transfer_star_count,
"next_transfer_date": to_timestamp(self.next_transfer_date),
}
og = OwnedGift.de_json(json_dict, offline_bot)
assert type(og) is subclass
assert set(og.api_kwargs.keys()) == set(json_dict.keys()) - set(subclass.__slots__) - {
"type"
}
assert og.type == og_type
def test_to_dict(self, owned_gift):
assert owned_gift.to_dict() == {"type": owned_gift.type}
def test_equality(self, owned_gift):
a = owned_gift
b = OwnedGift(self.type)
c = OwnedGift("unknown")
d = Dice(5, "test")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def owned_gift_regular():
return OwnedGiftRegular(
gift=TestOwnedGiftRegularWithoutRequest.gift,
send_date=TestOwnedGiftRegularWithoutRequest.send_date,
owned_gift_id=TestOwnedGiftRegularWithoutRequest.owned_gift_id,
sender_user=TestOwnedGiftRegularWithoutRequest.sender_user,
text=TestOwnedGiftRegularWithoutRequest.text,
entities=TestOwnedGiftRegularWithoutRequest.entities,
is_private=TestOwnedGiftRegularWithoutRequest.is_private,
is_saved=TestOwnedGiftRegularWithoutRequest.is_saved,
can_be_upgraded=TestOwnedGiftRegularWithoutRequest.can_be_upgraded,
was_refunded=TestOwnedGiftRegularWithoutRequest.was_refunded,
convert_star_count=TestOwnedGiftRegularWithoutRequest.convert_star_count,
prepaid_upgrade_star_count=TestOwnedGiftRegularWithoutRequest.prepaid_upgrade_star_count,
is_upgrade_separate=TestOwnedGiftRegularWithoutRequest.is_upgrade_separate,
unique_gift_number=TestOwnedGiftRegularWithoutRequest.unique_gift_number,
)
class TestOwnedGiftRegularWithoutRequest(OwnedGiftTestBase):
type = OwnedGiftType.REGULAR
def test_slot_behaviour(self, owned_gift_regular):
inst = owned_gift_regular
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_de_json(self, offline_bot):
json_dict = {
"gift": self.gift.to_dict(),
"send_date": to_timestamp(self.send_date),
"owned_gift_id": self.owned_gift_id,
"sender_user": self.sender_user.to_dict(),
"text": self.text,
"entities": [e.to_dict() for e in self.entities],
"is_private": self.is_private,
"is_saved": self.is_saved,
"can_be_upgraded": self.can_be_upgraded,
"was_refunded": self.was_refunded,
"convert_star_count": self.convert_star_count,
"prepaid_upgrade_star_count": self.prepaid_upgrade_star_count,
"is_upgrade_separate": self.is_upgrade_separate,
"unique_gift_number": self.unique_gift_number,
}
ogr = OwnedGiftRegular.de_json(json_dict, offline_bot)
assert ogr.gift == self.gift
assert ogr.send_date == self.send_date
assert ogr.owned_gift_id == self.owned_gift_id
assert ogr.sender_user == self.sender_user
assert ogr.text == self.text
assert ogr.entities == self.entities
assert ogr.is_private == self.is_private
assert ogr.is_saved == self.is_saved
assert ogr.can_be_upgraded == self.can_be_upgraded
assert ogr.was_refunded == self.was_refunded
assert ogr.convert_star_count == self.convert_star_count
assert ogr.prepaid_upgrade_star_count == self.prepaid_upgrade_star_count
assert ogr.is_upgrade_separate == self.is_upgrade_separate
assert ogr.unique_gift_number == self.unique_gift_number
assert ogr.api_kwargs == {}
def test_to_dict(self, owned_gift_regular):
json_dict = owned_gift_regular.to_dict()
assert isinstance(json_dict, dict)
assert json_dict["type"] == self.type
assert json_dict["gift"] == self.gift.to_dict()
assert json_dict["send_date"] == to_timestamp(self.send_date)
assert json_dict["owned_gift_id"] == self.owned_gift_id
assert json_dict["sender_user"] == self.sender_user.to_dict()
assert json_dict["text"] == self.text
assert json_dict["entities"] == [e.to_dict() for e in self.entities]
assert json_dict["is_private"] == self.is_private
assert json_dict["is_saved"] == self.is_saved
assert json_dict["can_be_upgraded"] == self.can_be_upgraded
assert json_dict["was_refunded"] == self.was_refunded
assert json_dict["convert_star_count"] == self.convert_star_count
assert json_dict["prepaid_upgrade_star_count"] == self.prepaid_upgrade_star_count
assert json_dict["is_upgrade_separate"] == self.is_upgrade_separate
assert json_dict["unique_gift_number"] == self.unique_gift_number
def test_parse_entity(self, owned_gift_regular):
entity = MessageEntity(MessageEntity.BOLD, 0, 4)
assert owned_gift_regular.parse_entity(entity) == "test"
with pytest.raises(RuntimeError, match="OwnedGiftRegular has no"):
OwnedGiftRegular(
gift=self.gift,
send_date=self.send_date,
).parse_entity(entity)
def test_parse_entities(self, owned_gift_regular):
entity = MessageEntity(MessageEntity.BOLD, 0, 4)
entity_2 = MessageEntity(MessageEntity.ITALIC, 5, 8)
assert owned_gift_regular.parse_entities(MessageEntity.BOLD) == {entity: "test"}
assert owned_gift_regular.parse_entities() == {entity: "test", entity_2: "text"}
with pytest.raises(RuntimeError, match="OwnedGiftRegular has no"):
OwnedGiftRegular(
gift=self.gift,
send_date=self.send_date,
).parse_entities()
def test_equality(self, owned_gift_regular):
a = owned_gift_regular
b = OwnedGiftRegular(deepcopy(self.gift), deepcopy(self.send_date))
c = OwnedGiftRegular(self.gift, self.send_date + dtm.timedelta(seconds=1))
d = Dice(5, "test")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def owned_gift_unique():
return OwnedGiftUnique(
gift=TestOwnedGiftUniqueWithoutRequest.unique_gift,
send_date=TestOwnedGiftUniqueWithoutRequest.send_date,
owned_gift_id=TestOwnedGiftUniqueWithoutRequest.owned_gift_id,
sender_user=TestOwnedGiftUniqueWithoutRequest.sender_user,
is_saved=TestOwnedGiftUniqueWithoutRequest.is_saved,
can_be_transferred=TestOwnedGiftUniqueWithoutRequest.can_be_transferred,
transfer_star_count=TestOwnedGiftUniqueWithoutRequest.transfer_star_count,
next_transfer_date=TestOwnedGiftUniqueWithoutRequest.next_transfer_date,
)
class TestOwnedGiftUniqueWithoutRequest(OwnedGiftTestBase):
type = OwnedGiftType.UNIQUE
def test_slot_behaviour(self, owned_gift_unique):
inst = owned_gift_unique
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_de_json(self, offline_bot):
json_dict = {
"gift": self.unique_gift.to_dict(),
"send_date": to_timestamp(self.send_date),
"owned_gift_id": self.owned_gift_id,
"sender_user": self.sender_user.to_dict(),
"is_saved": self.is_saved,
"can_be_transferred": self.can_be_transferred,
"transfer_star_count": self.transfer_star_count,
"next_transfer_date": to_timestamp(self.next_transfer_date),
}
ogu = OwnedGiftUnique.de_json(json_dict, offline_bot)
assert ogu.gift == self.unique_gift
assert ogu.send_date == self.send_date
assert ogu.owned_gift_id == self.owned_gift_id
assert ogu.sender_user == self.sender_user
assert ogu.is_saved == self.is_saved
assert ogu.can_be_transferred == self.can_be_transferred
assert ogu.transfer_star_count == self.transfer_star_count
assert ogu.next_transfer_date == self.next_transfer_date
assert ogu.api_kwargs == {}
def test_to_dict(self, owned_gift_unique):
json_dict = owned_gift_unique.to_dict()
assert isinstance(json_dict, dict)
assert json_dict["type"] == self.type
assert json_dict["gift"] == self.unique_gift.to_dict()
assert json_dict["send_date"] == to_timestamp(self.send_date)
assert json_dict["owned_gift_id"] == self.owned_gift_id
assert json_dict["sender_user"] == self.sender_user.to_dict()
assert json_dict["is_saved"] == self.is_saved
assert json_dict["can_be_transferred"] == self.can_be_transferred
assert json_dict["transfer_star_count"] == self.transfer_star_count
assert json_dict["next_transfer_date"] == to_timestamp(self.next_transfer_date)
def test_equality(self, owned_gift_unique):
a = owned_gift_unique
b = OwnedGiftUnique(deepcopy(self.unique_gift), deepcopy(self.send_date))
c = OwnedGiftUnique(self.unique_gift, self.send_date + dtm.timedelta(seconds=1))
d = Dice(5, "test")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def owned_gifts(request):
return OwnedGifts(
total_count=OwnedGiftsTestBase.total_count,
gifts=OwnedGiftsTestBase.gifts,
next_offset=OwnedGiftsTestBase.next_offset,
)
class OwnedGiftsTestBase:
total_count = 2
next_offset = "next_offset_str"
gifts: list[OwnedGift] = [
OwnedGiftRegular(
gift=Gift(
id="id1",
sticker=Sticker(
file_id="file_id",
file_unique_id="file_unique_id",
width=512,
height=512,
is_animated=False,
is_video=False,
type="regular",
),
star_count=5,
total_count=5,
remaining_count=5,
upgrade_star_count=5,
),
send_date=dtm.datetime.now(tz=UTC).replace(microsecond=0),
owned_gift_id="some_id_1",
),
OwnedGiftUnique(
gift=UniqueGift(
gift_id="gift_id",
base_name="human_readable",
name="unique_name",
number=10,
model=UniqueGiftModel(
name="model_name",
sticker=Sticker(
"file_id1", "file_unique_id1", 512, 512, False, False, "regular"
),
rarity_per_mille=10,
),
symbol=UniqueGiftSymbol(
name="symbol_name",
sticker=Sticker("file_id2", "file_unique_id2", 512, 512, True, True, "mask"),
rarity_per_mille=20,
),
backdrop=UniqueGiftBackdrop(
name="backdrop_name",
colors=UniqueGiftBackdropColors(0x00FF00, 0xEE00FF, 0xAA22BB, 0x20FE8F),
rarity_per_mille=30,
),
),
send_date=dtm.datetime.now(tz=UTC).replace(microsecond=0),
owned_gift_id="some_id_2",
),
]
class TestOwnedGiftsWithoutRequest(OwnedGiftsTestBase):
def test_slot_behaviour(self, owned_gifts):
for attr in owned_gifts.__slots__:
assert getattr(owned_gifts, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(owned_gifts)) == len(set(mro_slots(owned_gifts))), "duplicate slot"
def test_de_json(self, offline_bot):
json_dict = {
"total_count": self.total_count,
"gifts": [gift.to_dict() for gift in self.gifts],
"next_offset": self.next_offset,
}
owned_gifts = OwnedGifts.de_json(json_dict, offline_bot)
assert owned_gifts.api_kwargs == {}
assert owned_gifts.total_count == self.total_count
assert owned_gifts.gifts == tuple(self.gifts)
assert type(owned_gifts.gifts[0]) is OwnedGiftRegular
assert type(owned_gifts.gifts[1]) is OwnedGiftUnique
assert owned_gifts.next_offset == self.next_offset
def test_to_dict(self, owned_gifts):
gifts_dict = owned_gifts.to_dict()
assert isinstance(gifts_dict, dict)
assert gifts_dict["total_count"] == self.total_count
assert gifts_dict["gifts"] == [gift.to_dict() for gift in self.gifts]
assert gifts_dict["next_offset"] == self.next_offset
def test_equality(self, owned_gifts):
a = owned_gifts
b = OwnedGifts(self.total_count, self.gifts)
c = OwnedGifts(self.total_count - 1, self.gifts[:1])
d = Dice(5, "test")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "tests/test_ownedgift.py",
"license": "GNU General Public License v3.0",
"lines": 418,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-telegram-bot/python-telegram-bot:tests/test_paidmessagepricechanged.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pytest
from telegram import Dice, PaidMessagePriceChanged
from tests.auxil.slots import mro_slots
class PaidMessagePriceChangedTestBase:
paid_message_star_count = 291
@pytest.fixture(scope="module")
def paid_message_price_changed():
return PaidMessagePriceChanged(PaidMessagePriceChangedTestBase.paid_message_star_count)
class TestPaidMessagePriceChangedWithoutRequest(PaidMessagePriceChangedTestBase):
def test_slot_behaviour(self, paid_message_price_changed):
for attr in paid_message_price_changed.__slots__:
assert getattr(paid_message_price_changed, attr, "err") != "err", (
f"got extra slot '{attr}'"
)
assert len(mro_slots(paid_message_price_changed)) == len(
set(mro_slots(paid_message_price_changed))
), "duplicate slot"
def test_to_dict(self, paid_message_price_changed):
pmpc_dict = paid_message_price_changed.to_dict()
assert isinstance(pmpc_dict, dict)
assert pmpc_dict["paid_message_star_count"] == self.paid_message_star_count
def test_de_json(self, offline_bot):
json_dict = {"paid_message_star_count": self.paid_message_star_count}
pmpc = PaidMessagePriceChanged.de_json(json_dict, offline_bot)
assert isinstance(pmpc, PaidMessagePriceChanged)
assert pmpc.paid_message_star_count == self.paid_message_star_count
assert pmpc.api_kwargs == {}
def test_equality(self):
pmpc1 = PaidMessagePriceChanged(self.paid_message_star_count)
pmpc2 = PaidMessagePriceChanged(self.paid_message_star_count)
pmpc3 = PaidMessagePriceChanged(3)
dice = Dice(5, "emoji")
assert pmpc1 == pmpc2
assert hash(pmpc1) == hash(pmpc2)
assert pmpc1 != pmpc3
assert hash(pmpc1) != hash(pmpc3)
assert pmpc1 != dice
assert hash(pmpc1) != hash(dice)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "tests/test_paidmessagepricechanged.py",
"license": "GNU General Public License v3.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-telegram-bot/python-telegram-bot:tests/test_storyarea.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pytest
from telegram._dice import Dice
from telegram._reaction import ReactionTypeEmoji
from telegram._storyarea import (
LocationAddress,
StoryArea,
StoryAreaPosition,
StoryAreaType,
StoryAreaTypeLink,
StoryAreaTypeLocation,
StoryAreaTypeSuggestedReaction,
StoryAreaTypeUniqueGift,
StoryAreaTypeWeather,
)
from telegram.constants import StoryAreaTypeType
from tests.auxil.slots import mro_slots
@pytest.fixture
def story_area_position():
return StoryAreaPosition(
x_percentage=StoryAreaPositionTestBase.x_percentage,
y_percentage=StoryAreaPositionTestBase.y_percentage,
width_percentage=StoryAreaPositionTestBase.width_percentage,
height_percentage=StoryAreaPositionTestBase.height_percentage,
rotation_angle=StoryAreaPositionTestBase.rotation_angle,
corner_radius_percentage=StoryAreaPositionTestBase.corner_radius_percentage,
)
class StoryAreaPositionTestBase:
x_percentage = 50.0
y_percentage = 10.0
width_percentage = 15
height_percentage = 15
rotation_angle = 0.0
corner_radius_percentage = 8.0
class TestStoryAreaPositionWithoutRequest(StoryAreaPositionTestBase):
def test_slot_behaviour(self, story_area_position):
inst = story_area_position
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_expected_values(self, story_area_position):
assert story_area_position.x_percentage == self.x_percentage
assert story_area_position.y_percentage == self.y_percentage
assert story_area_position.width_percentage == self.width_percentage
assert story_area_position.height_percentage == self.height_percentage
assert story_area_position.rotation_angle == self.rotation_angle
assert story_area_position.corner_radius_percentage == self.corner_radius_percentage
def test_to_dict(self, story_area_position):
json_dict = story_area_position.to_dict()
assert json_dict["x_percentage"] == self.x_percentage
assert json_dict["y_percentage"] == self.y_percentage
assert json_dict["width_percentage"] == self.width_percentage
assert json_dict["height_percentage"] == self.height_percentage
assert json_dict["rotation_angle"] == self.rotation_angle
assert json_dict["corner_radius_percentage"] == self.corner_radius_percentage
def test_equality(self, story_area_position):
a = story_area_position
b = StoryAreaPosition(
self.x_percentage,
self.y_percentage,
self.width_percentage,
self.height_percentage,
self.rotation_angle,
self.corner_radius_percentage,
)
c = StoryAreaPosition(
self.x_percentage + 10.0,
self.y_percentage,
self.width_percentage,
self.height_percentage,
self.rotation_angle,
self.corner_radius_percentage,
)
d = Dice(5, "test")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def location_address():
return LocationAddress(
country_code=LocationAddressTestBase.country_code,
state=LocationAddressTestBase.state,
city=LocationAddressTestBase.city,
street=LocationAddressTestBase.street,
)
class LocationAddressTestBase:
country_code = "CC"
state = "State"
city = "City"
street = "12 downtown"
class TestLocationAddressWithoutRequest(LocationAddressTestBase):
def test_slot_behaviour(self, location_address):
inst = location_address
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_expected_values(self, location_address):
assert location_address.country_code == self.country_code
assert location_address.state == self.state
assert location_address.city == self.city
assert location_address.street == self.street
def test_to_dict(self, location_address):
json_dict = location_address.to_dict()
assert json_dict["country_code"] == self.country_code
assert json_dict["state"] == self.state
assert json_dict["city"] == self.city
assert json_dict["street"] == self.street
def test_equality(self, location_address):
a = location_address
b = LocationAddress(self.country_code, self.state, self.city, self.street)
c = LocationAddress("some_other_code", self.state, self.city, self.street)
d = Dice(5, "test")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def story_area():
return StoryArea(
position=StoryAreaTestBase.position,
type=StoryAreaTestBase.type,
)
class StoryAreaTestBase:
position = StoryAreaPosition(
x_percentage=50.0,
y_percentage=10.0,
width_percentage=15,
height_percentage=15,
rotation_angle=0.0,
corner_radius_percentage=8.0,
)
type = StoryAreaTypeLink(url="some_url")
class TestStoryAreaWithoutRequest(StoryAreaTestBase):
def test_slot_behaviour(self, story_area):
inst = story_area
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_expected_values(self, story_area):
assert story_area.position == self.position
assert story_area.type == self.type
def test_to_dict(self, story_area):
json_dict = story_area.to_dict()
assert json_dict["position"] == self.position.to_dict()
assert json_dict["type"] == self.type.to_dict()
def test_equality(self, story_area):
a = story_area
b = StoryArea(self.position, self.type)
c = StoryArea(self.position, StoryAreaTypeLink(url="some_other_url"))
d = Dice(5, "test")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def story_area_type():
return StoryAreaType(type=StoryAreaTypeTestBase.type)
class StoryAreaTypeTestBase:
type = StoryAreaTypeType.LOCATION
latitude = 100.5
longitude = 200.5
address = LocationAddress(
country_code="cc",
state="State",
city="City",
street="12 downtown",
)
reaction_type = ReactionTypeEmoji(emoji="emoji")
is_dark = False
is_flipped = False
url = "http_url"
temperature = 35.0
emoji = "emoji"
background_color = 0xFF66CCFF
name = "unique_gift_name"
class TestStoryAreaTypeWithoutRequest(StoryAreaTypeTestBase):
def test_slot_behaviour(self, story_area_type):
inst = story_area_type
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_expected_values(self, story_area_type):
assert story_area_type.type == self.type
def test_type_enum_conversion(self, story_area_type):
assert type(StoryAreaType("location").type) is StoryAreaTypeType
assert StoryAreaType("unknown").type == "unknown"
def test_to_dict(self, story_area_type):
assert story_area_type.to_dict() == {"type": self.type}
def test_equality(self, story_area_type):
a = story_area_type
b = StoryAreaType(self.type)
c = StoryAreaType("unknown")
d = Dice(5, "test")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def story_area_type_location():
return StoryAreaTypeLocation(
latitude=TestStoryAreaTypeLocationWithoutRequest.latitude,
longitude=TestStoryAreaTypeLocationWithoutRequest.longitude,
address=TestStoryAreaTypeLocationWithoutRequest.address,
)
class TestStoryAreaTypeLocationWithoutRequest(StoryAreaTypeTestBase):
type = StoryAreaTypeType.LOCATION
def test_slot_behaviour(self, story_area_type_location):
inst = story_area_type_location
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_expected_values(self, story_area_type_location):
assert story_area_type_location.type == self.type
assert story_area_type_location.latitude == self.latitude
assert story_area_type_location.longitude == self.longitude
assert story_area_type_location.address == self.address
def test_to_dict(self, story_area_type_location):
json_dict = story_area_type_location.to_dict()
assert isinstance(json_dict, dict)
assert json_dict["type"] == self.type
assert json_dict["latitude"] == self.latitude
assert json_dict["longitude"] == self.longitude
assert json_dict["address"] == self.address.to_dict()
def test_equality(self, story_area_type_location):
a = story_area_type_location
b = StoryAreaTypeLocation(self.latitude, self.longitude, self.address)
c = StoryAreaTypeLocation(self.latitude + 0.5, self.longitude, self.address)
d = Dice(5, "test")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def story_area_type_suggested_reaction():
return StoryAreaTypeSuggestedReaction(
reaction_type=TestStoryAreaTypeSuggestedReactionWithoutRequest.reaction_type,
is_dark=TestStoryAreaTypeSuggestedReactionWithoutRequest.is_dark,
is_flipped=TestStoryAreaTypeSuggestedReactionWithoutRequest.is_flipped,
)
class TestStoryAreaTypeSuggestedReactionWithoutRequest(StoryAreaTypeTestBase):
type = StoryAreaTypeType.SUGGESTED_REACTION
def test_slot_behaviour(self, story_area_type_suggested_reaction):
inst = story_area_type_suggested_reaction
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_expected_values(self, story_area_type_suggested_reaction):
assert story_area_type_suggested_reaction.type == self.type
assert story_area_type_suggested_reaction.reaction_type == self.reaction_type
assert story_area_type_suggested_reaction.is_dark is self.is_dark
assert story_area_type_suggested_reaction.is_flipped is self.is_flipped
def test_to_dict(self, story_area_type_suggested_reaction):
json_dict = story_area_type_suggested_reaction.to_dict()
assert isinstance(json_dict, dict)
assert json_dict["type"] == self.type
assert json_dict["reaction_type"] == self.reaction_type.to_dict()
assert json_dict["is_dark"] is self.is_dark
assert json_dict["is_flipped"] is self.is_flipped
def test_equality(self, story_area_type_suggested_reaction):
a = story_area_type_suggested_reaction
b = StoryAreaTypeSuggestedReaction(self.reaction_type, self.is_dark, self.is_flipped)
c = StoryAreaTypeSuggestedReaction(self.reaction_type, not self.is_dark, self.is_flipped)
d = Dice(5, "test")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def story_area_type_link():
return StoryAreaTypeLink(
url=TestStoryAreaTypeLinkWithoutRequest.url,
)
class TestStoryAreaTypeLinkWithoutRequest(StoryAreaTypeTestBase):
type = StoryAreaTypeType.LINK
def test_slot_behaviour(self, story_area_type_link):
inst = story_area_type_link
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_expected_values(self, story_area_type_link):
assert story_area_type_link.type == self.type
assert story_area_type_link.url == self.url
def test_to_dict(self, story_area_type_link):
json_dict = story_area_type_link.to_dict()
assert isinstance(json_dict, dict)
assert json_dict["type"] == self.type
assert json_dict["url"] == self.url
def test_equality(self, story_area_type_link):
a = story_area_type_link
b = StoryAreaTypeLink(self.url)
c = StoryAreaTypeLink("other_http_url")
d = Dice(5, "test")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def story_area_type_weather():
return StoryAreaTypeWeather(
temperature=TestStoryAreaTypeWeatherWithoutRequest.temperature,
emoji=TestStoryAreaTypeWeatherWithoutRequest.emoji,
background_color=TestStoryAreaTypeWeatherWithoutRequest.background_color,
)
class TestStoryAreaTypeWeatherWithoutRequest(StoryAreaTypeTestBase):
type = StoryAreaTypeType.WEATHER
def test_slot_behaviour(self, story_area_type_weather):
inst = story_area_type_weather
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_expected_values(self, story_area_type_weather):
assert story_area_type_weather.type == self.type
assert story_area_type_weather.temperature == self.temperature
assert story_area_type_weather.emoji == self.emoji
assert story_area_type_weather.background_color == self.background_color
def test_to_dict(self, story_area_type_weather):
json_dict = story_area_type_weather.to_dict()
assert isinstance(json_dict, dict)
assert json_dict["type"] == self.type
assert json_dict["temperature"] == self.temperature
assert json_dict["emoji"] == self.emoji
assert json_dict["background_color"] == self.background_color
def test_equality(self, story_area_type_weather):
a = story_area_type_weather
b = StoryAreaTypeWeather(self.temperature, self.emoji, self.background_color)
c = StoryAreaTypeWeather(self.temperature - 5.0, self.emoji, self.background_color)
d = Dice(5, "test")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def story_area_type_unique_gift():
return StoryAreaTypeUniqueGift(
name=TestStoryAreaTypeUniqueGiftWithoutRequest.name,
)
class TestStoryAreaTypeUniqueGiftWithoutRequest(StoryAreaTypeTestBase):
type = StoryAreaTypeType.UNIQUE_GIFT
def test_slot_behaviour(self, story_area_type_unique_gift):
inst = story_area_type_unique_gift
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_expected_values(self, story_area_type_unique_gift):
assert story_area_type_unique_gift.type == self.type
assert story_area_type_unique_gift.name == self.name
def test_to_dict(self, story_area_type_unique_gift):
json_dict = story_area_type_unique_gift.to_dict()
assert isinstance(json_dict, dict)
assert json_dict["type"] == self.type
assert json_dict["name"] == self.name
def test_equality(self, story_area_type_unique_gift):
a = story_area_type_unique_gift
b = StoryAreaTypeUniqueGift(self.name)
c = StoryAreaTypeUniqueGift("other_name")
d = Dice(5, "test")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "tests/test_storyarea.py",
"license": "GNU General Public License v3.0",
"lines": 401,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
python-telegram-bot/python-telegram-bot:tests/test_uniquegift.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import datetime as dtm
import pytest
from telegram import (
BotCommand,
Chat,
Sticker,
UniqueGift,
UniqueGiftBackdrop,
UniqueGiftBackdropColors,
UniqueGiftColors,
UniqueGiftInfo,
UniqueGiftModel,
UniqueGiftSymbol,
)
from telegram._utils.datetime import UTC, to_timestamp
from telegram.constants import UniqueGiftInfoOrigin
from tests.auxil.slots import mro_slots
@pytest.fixture
def unique_gift_colors():
return UniqueGiftColors(
model_custom_emoji_id=UniqueGiftColorsTestBase.model_custom_emoji_id,
symbol_custom_emoji_id=UniqueGiftColorsTestBase.symbol_custom_emoji_id,
light_theme_main_color=UniqueGiftColorsTestBase.light_theme_main_color,
light_theme_other_colors=UniqueGiftColorsTestBase.light_theme_other_colors,
dark_theme_main_color=UniqueGiftColorsTestBase.dark_theme_main_color,
dark_theme_other_colors=UniqueGiftColorsTestBase.dark_theme_other_colors,
)
class UniqueGiftColorsTestBase:
model_custom_emoji_id = "model_emoji_id"
symbol_custom_emoji_id = "symbol_emoji_id"
light_theme_main_color = 0xFFFFFF
light_theme_other_colors = [0xAAAAAA, 0xBBBBBB]
dark_theme_main_color = 0x000000
dark_theme_other_colors = [0x111111, 0x222222]
class TestUniqueGiftColorsWithoutRequest(UniqueGiftColorsTestBase):
def test_slot_behaviour(self, unique_gift_colors):
for attr in unique_gift_colors.__slots__:
assert getattr(unique_gift_colors, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(unique_gift_colors)) == len(set(mro_slots(unique_gift_colors))), (
"duplicate slot"
)
def test_de_json(self, offline_bot):
json_dict = {
"model_custom_emoji_id": self.model_custom_emoji_id,
"symbol_custom_emoji_id": self.symbol_custom_emoji_id,
"light_theme_main_color": self.light_theme_main_color,
"light_theme_other_colors": self.light_theme_other_colors,
"dark_theme_main_color": self.dark_theme_main_color,
"dark_theme_other_colors": self.dark_theme_other_colors,
}
unique_gift_colors = UniqueGiftColors.de_json(json_dict, offline_bot)
assert unique_gift_colors.api_kwargs == {}
assert unique_gift_colors.model_custom_emoji_id == self.model_custom_emoji_id
assert unique_gift_colors.symbol_custom_emoji_id == self.symbol_custom_emoji_id
assert unique_gift_colors.light_theme_main_color == self.light_theme_main_color
assert unique_gift_colors.light_theme_other_colors == tuple(self.light_theme_other_colors)
assert unique_gift_colors.dark_theme_main_color == self.dark_theme_main_color
assert unique_gift_colors.dark_theme_other_colors == tuple(self.dark_theme_other_colors)
def test_to_dict(self, unique_gift_colors):
json_dict = unique_gift_colors.to_dict()
assert json_dict["model_custom_emoji_id"] == self.model_custom_emoji_id
assert json_dict["symbol_custom_emoji_id"] == self.symbol_custom_emoji_id
assert json_dict["light_theme_main_color"] == self.light_theme_main_color
assert json_dict["light_theme_other_colors"] == self.light_theme_other_colors
assert json_dict["dark_theme_main_color"] == self.dark_theme_main_color
assert json_dict["dark_theme_other_colors"] == self.dark_theme_other_colors
def test_equality(self, unique_gift_colors):
a = unique_gift_colors
b = UniqueGiftColors(
self.model_custom_emoji_id,
self.symbol_custom_emoji_id,
self.light_theme_main_color,
self.light_theme_other_colors,
self.dark_theme_main_color,
self.dark_theme_other_colors,
)
c = UniqueGiftColors(
"other_model_emoji_id",
self.symbol_custom_emoji_id,
self.light_theme_main_color,
self.light_theme_other_colors,
self.dark_theme_main_color,
self.dark_theme_other_colors,
)
d = BotCommand("start", "description")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def unique_gift():
return UniqueGift(
gift_id=UniqueGiftTestBase.gift_id,
base_name=UniqueGiftTestBase.base_name,
name=UniqueGiftTestBase.name,
number=UniqueGiftTestBase.number,
model=UniqueGiftTestBase.model,
symbol=UniqueGiftTestBase.symbol,
backdrop=UniqueGiftTestBase.backdrop,
publisher_chat=UniqueGiftTestBase.publisher_chat,
is_premium=UniqueGiftTestBase.is_premium,
is_from_blockchain=UniqueGiftTestBase.is_from_blockchain,
colors=UniqueGiftTestBase.colors,
)
class UniqueGiftTestBase:
gift_id = "gift_id"
base_name = "human_readable"
name = "unique_name"
number = 10
model = UniqueGiftModel(
name="model_name",
sticker=Sticker("file_id1", "file_unique_id1", 512, 512, False, False, "regular"),
rarity_per_mille=10,
)
symbol = UniqueGiftSymbol(
name="symbol_name",
sticker=Sticker("file_id2", "file_unique_id2", 512, 512, True, True, "mask"),
rarity_per_mille=20,
)
backdrop = UniqueGiftBackdrop(
name="backdrop_name",
colors=UniqueGiftBackdropColors(0x00FF00, 0xEE00FF, 0xAA22BB, 0x20FE8F),
rarity_per_mille=30,
)
publisher_chat = Chat(1, Chat.PRIVATE)
is_premium = False
is_from_blockchain = True
colors = UniqueGiftColors(
model_custom_emoji_id="M",
symbol_custom_emoji_id="S",
light_theme_main_color=0xFFFFFF,
light_theme_other_colors=[0xAAAAAA],
dark_theme_main_color=0x000000,
dark_theme_other_colors=[0x111111],
)
class TestUniqueGiftWithoutRequest(UniqueGiftTestBase):
def test_slot_behaviour(self, unique_gift):
for attr in unique_gift.__slots__:
assert getattr(unique_gift, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(unique_gift)) == len(set(mro_slots(unique_gift))), "duplicate slot"
def test_de_json(self, offline_bot):
json_dict = {
"gift_id": self.gift_id,
"base_name": self.base_name,
"name": self.name,
"number": self.number,
"model": self.model.to_dict(),
"symbol": self.symbol.to_dict(),
"backdrop": self.backdrop.to_dict(),
"publisher_chat": self.publisher_chat.to_dict(),
"is_premium": self.is_premium,
"is_from_blockchain": self.is_from_blockchain,
"colors": self.colors.to_dict(),
}
unique_gift = UniqueGift.de_json(json_dict, offline_bot)
assert unique_gift.api_kwargs == {}
assert unique_gift.base_name == self.base_name
assert unique_gift.name == self.name
assert unique_gift.number == self.number
assert unique_gift.model == self.model
assert unique_gift.symbol == self.symbol
assert unique_gift.backdrop == self.backdrop
assert unique_gift.publisher_chat == self.publisher_chat
assert unique_gift.is_premium == self.is_premium
assert unique_gift.is_from_blockchain == self.is_from_blockchain
assert unique_gift.colors == self.colors
def test_to_dict(self, unique_gift):
gift_dict = unique_gift.to_dict()
assert isinstance(gift_dict, dict)
assert gift_dict["gift_id"] == self.gift_id
assert gift_dict["base_name"] == self.base_name
assert gift_dict["name"] == self.name
assert gift_dict["number"] == self.number
assert gift_dict["model"] == self.model.to_dict()
assert gift_dict["symbol"] == self.symbol.to_dict()
assert gift_dict["backdrop"] == self.backdrop.to_dict()
assert gift_dict["publisher_chat"] == self.publisher_chat.to_dict()
assert gift_dict["is_premium"] == self.is_premium
assert gift_dict["is_from_blockchain"] == self.is_from_blockchain
assert gift_dict["colors"] == self.colors.to_dict()
def test_equality(self, unique_gift):
a = unique_gift
b = UniqueGift(
gift_id=self.gift_id,
base_name=self.base_name,
name=self.name,
number=self.number,
model=self.model,
symbol=self.symbol,
backdrop=self.backdrop,
publisher_chat=self.publisher_chat,
)
c = UniqueGift(
gift_id=self.gift_id,
base_name="other_base_name",
name=self.name,
number=self.number,
model=self.model,
symbol=self.symbol,
backdrop=self.backdrop,
publisher_chat=self.publisher_chat,
)
d = BotCommand("start", "description")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def unique_gift_model():
return UniqueGiftModel(
name=UniqueGiftModelTestBase.name,
sticker=UniqueGiftModelTestBase.sticker,
rarity_per_mille=UniqueGiftModelTestBase.rarity_per_mille,
)
class UniqueGiftModelTestBase:
name = "model_name"
sticker = Sticker("file_id", "file_unique_id", 512, 512, False, False, "regular")
rarity_per_mille = 10
class TestUniqueGiftModelWithoutRequest(UniqueGiftModelTestBase):
def test_slot_behaviour(self, unique_gift_model):
for attr in unique_gift_model.__slots__:
assert getattr(unique_gift_model, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(unique_gift_model)) == len(set(mro_slots(unique_gift_model))), (
"duplicate slot"
)
def test_de_json(self, offline_bot):
json_dict = {
"name": self.name,
"sticker": self.sticker.to_dict(),
"rarity_per_mille": self.rarity_per_mille,
}
unique_gift_model = UniqueGiftModel.de_json(json_dict, offline_bot)
assert unique_gift_model.api_kwargs == {}
assert unique_gift_model.name == self.name
assert unique_gift_model.sticker == self.sticker
assert unique_gift_model.rarity_per_mille == self.rarity_per_mille
def test_to_dict(self, unique_gift_model):
json_dict = unique_gift_model.to_dict()
assert json_dict["name"] == self.name
assert json_dict["sticker"] == self.sticker.to_dict()
assert json_dict["rarity_per_mille"] == self.rarity_per_mille
def test_equality(self, unique_gift_model):
a = unique_gift_model
b = UniqueGiftModel(self.name, self.sticker, self.rarity_per_mille)
c = UniqueGiftModel("other_name", self.sticker, self.rarity_per_mille)
d = UniqueGiftSymbol(self.name, self.sticker, self.rarity_per_mille)
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def unique_gift_symbol():
return UniqueGiftSymbol(
name=UniqueGiftSymbolTestBase.name,
sticker=UniqueGiftSymbolTestBase.sticker,
rarity_per_mille=UniqueGiftSymbolTestBase.rarity_per_mille,
)
class UniqueGiftSymbolTestBase:
name = "symbol_name"
sticker = Sticker("file_id", "file_unique_id", 512, 512, False, False, "regular")
rarity_per_mille = 20
class TestUniqueGiftSymbolWithoutRequest(UniqueGiftSymbolTestBase):
def test_slot_behaviour(self, unique_gift_symbol):
for attr in unique_gift_symbol.__slots__:
assert getattr(unique_gift_symbol, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(unique_gift_symbol)) == len(set(mro_slots(unique_gift_symbol))), (
"duplicate slot"
)
def test_de_json(self, offline_bot):
json_dict = {
"name": self.name,
"sticker": self.sticker.to_dict(),
"rarity_per_mille": self.rarity_per_mille,
}
unique_gift_symbol = UniqueGiftSymbol.de_json(json_dict, offline_bot)
assert unique_gift_symbol.api_kwargs == {}
assert unique_gift_symbol.name == self.name
assert unique_gift_symbol.sticker == self.sticker
assert unique_gift_symbol.rarity_per_mille == self.rarity_per_mille
def test_to_dict(self, unique_gift_symbol):
json_dict = unique_gift_symbol.to_dict()
assert json_dict["name"] == self.name
assert json_dict["sticker"] == self.sticker.to_dict()
assert json_dict["rarity_per_mille"] == self.rarity_per_mille
def test_equality(self, unique_gift_symbol):
a = unique_gift_symbol
b = UniqueGiftSymbol(self.name, self.sticker, self.rarity_per_mille)
c = UniqueGiftSymbol("other_name", self.sticker, self.rarity_per_mille)
d = UniqueGiftModel(self.name, self.sticker, self.rarity_per_mille)
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def unique_gift_backdrop():
return UniqueGiftBackdrop(
name=UniqueGiftBackdropTestBase.name,
colors=UniqueGiftBackdropTestBase.colors,
rarity_per_mille=UniqueGiftBackdropTestBase.rarity_per_mille,
)
class UniqueGiftBackdropTestBase:
name = "backdrop_name"
colors = UniqueGiftBackdropColors(0x00FF00, 0xEE00FF, 0xAA22BB, 0x20FE8F)
rarity_per_mille = 30
class TestUniqueGiftBackdropWithoutRequest(UniqueGiftBackdropTestBase):
def test_slot_behaviour(self, unique_gift_backdrop):
for attr in unique_gift_backdrop.__slots__:
assert getattr(unique_gift_backdrop, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(unique_gift_backdrop)) == len(set(mro_slots(unique_gift_backdrop))), (
"duplicate slot"
)
def test_de_json(self, offline_bot):
json_dict = {
"name": self.name,
"colors": self.colors.to_dict(),
"rarity_per_mille": self.rarity_per_mille,
}
unique_gift_backdrop = UniqueGiftBackdrop.de_json(json_dict, offline_bot)
assert unique_gift_backdrop.api_kwargs == {}
assert unique_gift_backdrop.name == self.name
assert unique_gift_backdrop.colors == self.colors
assert unique_gift_backdrop.rarity_per_mille == self.rarity_per_mille
def test_to_dict(self, unique_gift_backdrop):
json_dict = unique_gift_backdrop.to_dict()
assert json_dict["name"] == self.name
assert json_dict["colors"] == self.colors.to_dict()
assert json_dict["rarity_per_mille"] == self.rarity_per_mille
def test_equality(self, unique_gift_backdrop):
a = unique_gift_backdrop
b = UniqueGiftBackdrop(self.name, self.colors, self.rarity_per_mille)
c = UniqueGiftBackdrop("other_name", self.colors, self.rarity_per_mille)
d = BotCommand("start", "description")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def unique_gift_backdrop_colors():
return UniqueGiftBackdropColors(
center_color=UniqueGiftBackdropColorsTestBase.center_color,
edge_color=UniqueGiftBackdropColorsTestBase.edge_color,
symbol_color=UniqueGiftBackdropColorsTestBase.symbol_color,
text_color=UniqueGiftBackdropColorsTestBase.text_color,
)
class UniqueGiftBackdropColorsTestBase:
center_color = 0x00FF00
edge_color = 0xEE00FF
symbol_color = 0xAA22BB
text_color = 0x20FE8F
class TestUniqueGiftBackdropColorsWithoutRequest(UniqueGiftBackdropColorsTestBase):
def test_slot_behaviour(self, unique_gift_backdrop_colors):
for attr in unique_gift_backdrop_colors.__slots__:
assert getattr(unique_gift_backdrop_colors, attr, "err") != "err", (
f"got extra slot '{attr}'"
)
assert len(mro_slots(unique_gift_backdrop_colors)) == len(
set(mro_slots(unique_gift_backdrop_colors))
), "duplicate slot"
def test_de_json(self, offline_bot):
json_dict = {
"center_color": self.center_color,
"edge_color": self.edge_color,
"symbol_color": self.symbol_color,
"text_color": self.text_color,
}
unique_gift_backdrop_colors = UniqueGiftBackdropColors.de_json(json_dict, offline_bot)
assert unique_gift_backdrop_colors.api_kwargs == {}
assert unique_gift_backdrop_colors.center_color == self.center_color
assert unique_gift_backdrop_colors.edge_color == self.edge_color
assert unique_gift_backdrop_colors.symbol_color == self.symbol_color
assert unique_gift_backdrop_colors.text_color == self.text_color
def test_to_dict(self, unique_gift_backdrop_colors):
json_dict = unique_gift_backdrop_colors.to_dict()
assert json_dict["center_color"] == self.center_color
assert json_dict["edge_color"] == self.edge_color
assert json_dict["symbol_color"] == self.symbol_color
assert json_dict["text_color"] == self.text_color
def test_equality(self, unique_gift_backdrop_colors):
a = unique_gift_backdrop_colors
b = UniqueGiftBackdropColors(
center_color=self.center_color,
edge_color=self.edge_color,
symbol_color=self.symbol_color,
text_color=self.text_color,
)
c = UniqueGiftBackdropColors(
center_color=0x000000,
edge_color=self.edge_color,
symbol_color=self.symbol_color,
text_color=self.text_color,
)
d = BotCommand("start", "description")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
@pytest.fixture
def unique_gift_info():
return UniqueGiftInfo(
gift=UniqueGiftInfoTestBase.gift,
origin=UniqueGiftInfoTestBase.origin,
owned_gift_id=UniqueGiftInfoTestBase.owned_gift_id,
transfer_star_count=UniqueGiftInfoTestBase.transfer_star_count,
last_resale_currency=UniqueGiftInfoTestBase.last_resale_currency,
last_resale_amount=UniqueGiftInfoTestBase.last_resale_amount,
next_transfer_date=UniqueGiftInfoTestBase.next_transfer_date,
)
class UniqueGiftInfoTestBase:
gift = UniqueGift(
gift_id="gift_id",
base_name="human_readable_name",
name="unique_name",
number=10,
model=UniqueGiftModel(
name="model_name",
sticker=Sticker("file_id1", "file_unique_id1", 512, 512, False, False, "regular"),
rarity_per_mille=10,
),
symbol=UniqueGiftSymbol(
name="symbol_name",
sticker=Sticker("file_id2", "file_unique_id2", 512, 512, True, True, "mask"),
rarity_per_mille=20,
),
backdrop=UniqueGiftBackdrop(
name="backdrop_name",
colors=UniqueGiftBackdropColors(0x00FF00, 0xEE00FF, 0xAA22BB, 0x20FE8F),
rarity_per_mille=2,
),
)
origin = UniqueGiftInfo.UPGRADE
owned_gift_id = "some_id"
transfer_star_count = 10
last_resale_currency = "XTR"
last_resale_amount = 1234
next_transfer_date = dtm.datetime.now(tz=UTC).replace(microsecond=0)
class TestUniqueGiftInfoWithoutRequest(UniqueGiftInfoTestBase):
def test_slot_behaviour(self, unique_gift_info):
for attr in unique_gift_info.__slots__:
assert getattr(unique_gift_info, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(unique_gift_info)) == len(set(mro_slots(unique_gift_info))), (
"duplicate slot"
)
def test_de_json(self, offline_bot):
json_dict = {
"gift": self.gift.to_dict(),
"origin": self.origin,
"owned_gift_id": self.owned_gift_id,
"transfer_star_count": self.transfer_star_count,
"last_resale_currency": self.last_resale_currency,
"last_resale_amount": self.last_resale_amount,
"next_transfer_date": to_timestamp(self.next_transfer_date),
}
unique_gift_info = UniqueGiftInfo.de_json(json_dict, offline_bot)
assert unique_gift_info.api_kwargs == {}
assert unique_gift_info.gift == self.gift
assert unique_gift_info.origin == self.origin
assert unique_gift_info.owned_gift_id == self.owned_gift_id
assert unique_gift_info.transfer_star_count == self.transfer_star_count
assert unique_gift_info.last_resale_currency == self.last_resale_currency
assert unique_gift_info.last_resale_amount == self.last_resale_amount
assert unique_gift_info.next_transfer_date == self.next_transfer_date
def test_de_json_localization(self, tz_bot, offline_bot, raw_bot):
json_dict = {
"gift": self.gift.to_dict(),
"origin": self.origin,
"owned_gift_id": self.owned_gift_id,
"transfer_star_count": self.transfer_star_count,
"last_resale_currency": self.last_resale_currency,
"last_resale_amount": self.last_resale_amount,
"next_transfer_date": to_timestamp(self.next_transfer_date),
}
unique_gift_info_raw = UniqueGiftInfo.de_json(json_dict, raw_bot)
unique_gift_info_offline = UniqueGiftInfo.de_json(json_dict, offline_bot)
unique_gift_info_tz = UniqueGiftInfo.de_json(json_dict, tz_bot)
# comparing utcoffsets because comparing timezones is unpredicatable
unique_gift_info_tz_offset = unique_gift_info_tz.next_transfer_date.utcoffset()
tz_bot_offset = tz_bot.defaults.tzinfo.utcoffset(
unique_gift_info_tz.next_transfer_date.replace(tzinfo=None)
)
assert unique_gift_info_raw.next_transfer_date.tzinfo == UTC
assert unique_gift_info_offline.next_transfer_date.tzinfo == UTC
assert unique_gift_info_tz_offset == tz_bot_offset
def test_to_dict(self, unique_gift_info):
json_dict = unique_gift_info.to_dict()
assert json_dict["gift"] == self.gift.to_dict()
assert json_dict["origin"] == self.origin
assert json_dict["owned_gift_id"] == self.owned_gift_id
assert json_dict["transfer_star_count"] == self.transfer_star_count
assert json_dict["last_resale_currency"] == self.last_resale_currency
assert json_dict["last_resale_amount"] == self.last_resale_amount
assert json_dict["next_transfer_date"] == to_timestamp(self.next_transfer_date)
def test_enum_type_conversion(self, unique_gift_info):
assert type(unique_gift_info.origin) is UniqueGiftInfoOrigin
assert unique_gift_info.origin == UniqueGiftInfoOrigin.UPGRADE
def test_equality(self, unique_gift_info):
a = unique_gift_info
b = UniqueGiftInfo(self.gift, self.origin, self.owned_gift_id, self.transfer_star_count)
c = UniqueGiftInfo(
self.gift, UniqueGiftInfo.TRANSFER, self.owned_gift_id, self.transfer_star_count
)
d = BotCommand("start", "description")
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
| {
"repo_id": "python-telegram-bot/python-telegram-bot",
"file_path": "tests/test_uniquegift.py",
"license": "GNU General Public License v3.0",
"lines": 537,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pytorch/examples:distributed/FSDP2/checkpoint.py | import os
import time
import torch
import torch.nn as nn
from torch.distributed.checkpoint.state_dict import (
_init_optim_state,
get_model_state_dict,
get_optimizer_state_dict,
set_model_state_dict,
set_optimizer_state_dict,
StateDictOptions,
)
from torch.distributed.fsdp import FSDPModule
from torch.distributed.tensor import distribute_tensor, DTensor
MODEL_CHECKPOINT = "model_state_dict.pt"
OPTIM_CHECKPOINT = "optim_state_dict.pt"
PARAMS = "params"
def get_latest_checkpoint_folder(path):
max_num = None
if not os.path.exists(path):
return max_num
for name in os.listdir(path):
folder_path = os.path.join(path, name)
if os.path.isdir(folder_path):
try:
num = int(name)
if max_num is None or num > max_num:
max_num = num
except ValueError:
pass # Skip non-numeric folder names
return max_num
class Checkpointer:
def __init__(self, folder: str, dcp_api: bool):
self.folder = folder
self.dcp_api = dcp_api
self.last_training_time = get_latest_checkpoint_folder(
f"{folder}/{'dcp_api' if dcp_api else 'dtensor_api'}"
)
def is_empty(self):
return self.last_training_time is None
def load_model(self, model: FSDPModule):
last_model_checkpoint = (
f"{self.folder}/{'dcp_api' if self.dcp_api else 'dtensor_api'}"
f"/{self.last_training_time}/{MODEL_CHECKPOINT}"
)
full_sd = torch.load(
last_model_checkpoint, mmap=True, weights_only=True, map_location="cpu"
)
if self.dcp_api:
set_model_state_dict(
model=model,
model_state_dict=full_sd,
options=StateDictOptions(
full_state_dict=True,
broadcast_from_rank0=True,
),
)
return
meta_sharded_sd = model.state_dict()
sharded_sd = {}
for param_name, full_tensor in full_sd.items():
sharded_meta_param = meta_sharded_sd.get(param_name)
sharded_tensor = distribute_tensor(
full_tensor,
sharded_meta_param.device_mesh,
sharded_meta_param.placements,
)
sharded_sd[param_name] = nn.Parameter(sharded_tensor)
# choose `assign=True` since we cannot call `copy_` on meta tensor
model.load_state_dict(sharded_sd, strict=False, assign=True)
def load_optim(self, model: FSDPModule, opt: torch.optim.Optimizer):
last_optim_checkpoint = (
f"{self.folder}/{'dcp_api' if self.dcp_api else 'dtensor_api'}"
f"/{self.last_training_time}/{OPTIM_CHECKPOINT}"
)
full_sd = torch.load(
last_optim_checkpoint, mmap=True, weights_only=True, map_location="cpu"
)
if self.dcp_api:
set_optimizer_state_dict(
model=model,
optimizers=opt,
optim_state_dict=full_sd,
options=StateDictOptions(
full_state_dict=True,
broadcast_from_rank0=True,
),
)
return
_init_optim_state(opt)
param_groups = opt.state_dict()["param_groups"]
state = opt.state_dict()["state"]
full_param_groups = full_sd["param_groups"]
full_state = full_sd["state"]
for param_group, full_param_group in zip(param_groups, full_param_groups):
for key, value in full_param_group.items():
if key == PARAMS:
continue
param_group[key] = value
for pid, full_pid in zip(param_group[PARAMS], full_param_group[PARAMS]):
if pid not in state:
continue
param_state = state[pid]
full_param_state = full_state[full_pid]
for attr, full_tensor in full_param_state.items():
sharded_tensor = param_state[attr]
if isinstance(sharded_tensor, DTensor):
# exp_avg is DTensor
param_state[attr] = distribute_tensor(
full_tensor,
sharded_tensor.device_mesh,
sharded_tensor.placements,
)
else:
# step is plain tensor
param_state[attr] = full_tensor
opt.load_state_dict(
{
"param_groups": param_groups,
"state": state,
}
)
def _get_full_model_state_dict(self, model: FSDPModule):
if self.dcp_api:
return get_model_state_dict(
model=model,
options=StateDictOptions(
full_state_dict=True,
cpu_offload=True,
),
)
sharded_sd = model.state_dict()
cpu_state_dict = {}
for param_name, sharded_param in sharded_sd.items():
full_param = sharded_param.full_tensor()
if torch.distributed.get_rank() == 0:
cpu_state_dict[param_name] = full_param.cpu()
else:
del full_param
return cpu_state_dict
def _get_full_optimizer_state_dict(
self,
model: FSDPModule,
opt: torch.optim.Optimizer,
):
if self.dcp_api:
return get_optimizer_state_dict(
model=model,
optimizers=opt,
options=StateDictOptions(
full_state_dict=True,
cpu_offload=True,
),
)
is_rank_zero = torch.distributed.get_rank() == 0
sharded_sd = opt.state_dict()
sharded_state = sharded_sd["state"]
full_state = {}
for group_id, sharded_group in sharded_state.items():
group_state = {}
for attr, sharded_tensor in sharded_group.items():
if isinstance(sharded_tensor, DTensor):
# "exp_avg" in AdamW is `DTensor`
full_tensor = sharded_tensor.full_tensor()
else:
# "step" in AdamW is plain tensor
full_tensor = sharded_tensor
if is_rank_zero:
group_state[attr] = full_tensor.cpu()
else:
del full_tensor
if is_rank_zero:
full_state[group_id] = group_state
else:
del group_state
if is_rank_zero:
return {
"param_groups": sharded_sd["param_groups"],
"state": full_state,
}
else:
return {}
def save(self, model: FSDPModule, optim: torch.optim.Optimizer):
model_state_dict = self._get_full_model_state_dict(model)
optim_state_dict = self._get_full_optimizer_state_dict(model, optim)
if torch.distributed.get_rank() == 0:
new_training_time = int(time.time() * 1000)
new_checkpoint_folder = f"{self.folder}/{'dcp_api' if self.dcp_api else 'dtensor_api'}/{new_training_time}"
new_model_checkpoint = f"{new_checkpoint_folder}/{MODEL_CHECKPOINT}"
new_optim_checkpoint = f"{new_checkpoint_folder}/{OPTIM_CHECKPOINT}"
os.makedirs(new_checkpoint_folder, exist_ok=True)
torch.save(model_state_dict, new_model_checkpoint)
torch.save(optim_state_dict, new_optim_checkpoint)
| {
"repo_id": "pytorch/examples",
"file_path": "distributed/FSDP2/checkpoint.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pytorch/examples:distributed/FSDP2/model.py | from dataclasses import dataclass
import torch
import torch.nn as nn
import torch.nn.functional as F
@dataclass
class ModelArgs:
n_layers: int = 2
vocab_size: int = 8
max_seq_len: int = 16
dim: int = 16
n_heads: int = 4
dropout_p: float = 0.1
class Attention(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
assert args.dim % args.n_heads == 0
self.head_dim = args.dim // args.n_heads
self.n_heads = args.n_heads
self.dropout_p = args.dropout_p
self.resid_dropout = nn.Dropout(args.dropout_p)
self.wq = nn.Linear(args.dim, args.dim, bias=False)
self.wk = nn.Linear(args.dim, args.dim, bias=False)
self.wv = nn.Linear(args.dim, args.dim, bias=False)
self.wo = nn.Linear(args.dim, args.dim, bias=False)
def forward(self, x):
bsz, seq_len, _ = x.size()
queries, keys, values = self.wq(x), self.wk(x), self.wv(x)
queries = queries.view(bsz, seq_len, self.n_heads, self.head_dim)
keys = keys.view(bsz, seq_len, self.n_heads, self.head_dim)
values = values.view(bsz, seq_len, self.n_heads, self.head_dim)
queries = queries.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim)
keys = keys.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim)
values = values.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim)
output = F.scaled_dot_product_attention(
queries,
keys,
values,
None,
self.dropout_p if self.training else 0,
)
output = output.transpose(1, 2).contiguous().view(bsz, seq_len, -1)
return self.resid_dropout(self.wo(output))
def reset_parameters(self):
self.wq.reset_parameters()
self.wk.reset_parameters()
self.wv.reset_parameters()
self.wo.reset_parameters()
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout_p):
super().__init__()
self.w1 = nn.Linear(dim, hidden_dim)
self.gelu = nn.GELU()
self.w2 = nn.Linear(hidden_dim, dim)
self.resid_dropout = nn.Dropout(dropout_p)
def forward(self, x):
return self.resid_dropout(self.w2(self.gelu(self.w1(x))))
def reset_parameters(self):
self.w1.reset_parameters()
self.w2.reset_parameters()
class TransformerBlock(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.attention_norm = nn.LayerNorm(args.dim)
self.attention = Attention(args)
self.ffn_norm = nn.LayerNorm(args.dim)
self.feed_forward = FeedForward(
args.dim, hidden_dim=4 * args.dim, dropout_p=args.dropout_p
)
def forward(self, x):
h = x + self.attention(self.attention_norm(x))
out = h + self.feed_forward(self.ffn_norm(h))
return out
def reset_parameters(self):
self.attention_norm.reset_parameters()
self.attention.reset_parameters()
self.ffn_norm.reset_parameters()
self.feed_forward.reset_parameters()
# A toy transformer model, partly inspired by the nanoGPT model:
# https://github.com/karpathy/nanoGPT.
class Transformer(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
assert args.vocab_size is not None
assert args.max_seq_len is not None
self.model_args = args
self.max_seq_len = args.max_seq_len
self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim)
self.pos_embeddings = nn.Embedding(args.max_seq_len, args.dim)
self.dropout = nn.Dropout(args.dropout_p)
self.layers = nn.ModuleList()
for _ in range(args.n_layers):
self.layers.append(TransformerBlock(args))
self.norm = nn.LayerNorm(args.dim)
self.output = nn.Linear(args.dim, args.vocab_size, bias=False)
def forward(self, tokens):
_bsz, seq_len = tokens.size()
assert seq_len <= self.max_seq_len
h = self.tok_embeddings(tokens)
pos = torch.arange(0, seq_len, device=tokens.device)
p = self.pos_embeddings(pos) # positional embeddings of shape (seq_len, dim)
h = h + p
h = self.dropout(h)
for layer in self.layers:
h = layer(h)
h = self.norm(h)
output = self.output(h).float()
return output
def reset_parameters(self):
self.tok_embeddings.reset_parameters()
self.pos_embeddings.reset_parameters()
self.norm.reset_parameters()
self.output.reset_parameters()
| {
"repo_id": "pytorch/examples",
"file_path": "distributed/FSDP2/model.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
pytorch/examples:distributed/FSDP2/utils.py | import torch
from model import Transformer
from torch.distributed.fsdp import FSDPModule
from torch.distributed.tensor import Shard
def inspect_model(model: FSDPModule):
assert isinstance(model, Transformer)
assert isinstance(model, FSDPModule)
if torch.distributed.get_rank() == 0:
print(model)
for param in model.parameters():
assert param.placements == (Shard(0),)
assert param.dtype == torch.float32
# print(param.get_local_tensor())
def inspect_mixed_precision(model: FSDPModule):
model.unshard()
for param in model.parameters(recurse=False):
assert param.dtype == torch.bfloat16
model.reshard()
| {
"repo_id": "pytorch/examples",
"file_path": "distributed/FSDP2/utils.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ranaroussi/yfinance:doc/source/reference/examples/calendars.py | import yfinance as yf
from datetime import datetime, timedelta
# Default init (today + 7 days)
calendar = yf.Calendars()
# Today's events: calendar of 1 day
tomorrow = datetime.now() + timedelta(days=1)
calendar = yf.Calendars(end=tomorrow)
# Default calendar queries - accessing the properties will fetch the data from YF
calendar.earnings_calendar
calendar.ipo_info_calendar
calendar.splits_calendar
calendar.economic_events_calendar
# Manual queries
calendar.get_earnings_calendar()
calendar.get_ipo_info_calendar()
calendar.get_splits_calendar()
calendar.get_economic_events_calendar()
# Earnings calendar custom filters
calendar.get_earnings_calendar(
market_cap=100_000_000, # filter out small-cap
filter_most_active=True, # show only actively traded. Uses: `screen(query="MOST_ACTIVES")`
)
# Example of real use case:
# Get inminent unreported earnings events
today = datetime.now()
is_friday = today.weekday() == 4
day_after_tomorrow = today + timedelta(days=4 if is_friday else 2)
calendar = yf.Calendars(today, day_after_tomorrow)
df = calendar.get_earnings_calendar(limit=100)
unreported_df = df[df["Reported EPS"].isnull()]
| {
"repo_id": "ranaroussi/yfinance",
"file_path": "doc/source/reference/examples/calendars.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.