language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
encode__django-rest-framework
tests/test_validators.py
{ "start": 6332, "end": 21136 }
class ____(TestCase): def setUp(self): self.instance = UniquenessTogetherModel.objects.create( race_name='example', position=1 ) UniquenessTogetherModel.objects.create( race_name='example', position=2 ) UniquenessTogetherModel.objects.create( race_name='other', position=1 ) def test_repr(self): serializer = UniquenessTogetherSerializer() expected = dedent(r""" UniquenessTogetherSerializer\(\): id = IntegerField\(label='ID', read_only=True\) race_name = CharField\(max_length=100, required=True\) position = IntegerField\(.*required=True\) class Meta: validators = \[<UniqueTogetherValidator\(queryset=UniquenessTogetherModel.objects.all\(\), fields=\('race_name', 'position'\)\)>\] """) assert re.search(expected, repr(serializer)) is not None def test_is_not_unique_together(self): """ Failing unique together validation should result in non field errors. """ data = {'race_name': 'example', 'position': 2} serializer = UniquenessTogetherSerializer(data=data) assert not serializer.is_valid() assert serializer.errors == { 'non_field_errors': [ 'The fields race_name, position must make a unique set.' ] } def test_is_unique_together(self): """ In a unique together validation, one field may be non-unique so long as the set as a whole is unique. """ data = {'race_name': 'other', 'position': 2} serializer = UniquenessTogetherSerializer(data=data) assert serializer.is_valid() assert serializer.validated_data == { 'race_name': 'other', 'position': 2 } def test_updated_instance_excluded_from_unique_together(self): """ When performing an update, the existing instance does not count as a match against uniqueness. """ data = {'race_name': 'example', 'position': 1} serializer = UniquenessTogetherSerializer(self.instance, data=data) assert serializer.is_valid() assert serializer.validated_data == { 'race_name': 'example', 'position': 1 } def test_unique_together_is_required(self): """ In a unique together validation, all fields are required. """ data = {'position': 2} serializer = UniquenessTogetherSerializer(data=data, partial=True) assert not serializer.is_valid() assert serializer.errors == { 'race_name': ['This field is required.'] } def test_ignore_excluded_fields(self): """ When model fields are not included in a serializer, then uniqueness validators should not be added for that field. """ class ExcludedFieldSerializer(serializers.ModelSerializer): class Meta: model = UniquenessTogetherModel fields = ('id', 'race_name',) serializer = ExcludedFieldSerializer() expected = dedent(""" ExcludedFieldSerializer(): id = IntegerField(label='ID', read_only=True) race_name = CharField(max_length=100) """) assert repr(serializer) == expected def test_ignore_read_only_fields(self): """ When serializer fields are read only, then uniqueness validators should not be added for that field. """ class ReadOnlyFieldSerializer(serializers.ModelSerializer): class Meta: model = UniquenessTogetherModel fields = ('id', 'race_name', 'position') read_only_fields = ('race_name',) serializer = ReadOnlyFieldSerializer() expected = dedent(r""" ReadOnlyFieldSerializer\(\): id = IntegerField\(label='ID', read_only=True\) race_name = CharField\(read_only=True\) position = IntegerField\(.*required=True\) """) assert re.search(expected, repr(serializer)) is not None def test_read_only_fields_with_default(self): """ Special case of read_only + default DOES validate unique_together. """ class ReadOnlyFieldWithDefaultSerializer(serializers.ModelSerializer): race_name = serializers.CharField(max_length=100, read_only=True, default='example') class Meta: model = UniquenessTogetherModel fields = ('id', 'race_name', 'position') data = {'position': 2} serializer = ReadOnlyFieldWithDefaultSerializer(data=data) assert len(serializer.validators) == 1 assert isinstance(serializer.validators[0], UniqueTogetherValidator) assert serializer.validators[0].fields == ('race_name', 'position') assert not serializer.is_valid() assert serializer.errors == { 'non_field_errors': [ 'The fields race_name, position must make a unique set.' ] } def test_read_only_fields_with_default_and_source(self): class ReadOnlySerializer(serializers.ModelSerializer): name = serializers.CharField(source='race_name', default='test', read_only=True) class Meta: model = UniquenessTogetherModel fields = ['name', 'position'] validators = [ UniqueTogetherValidator( queryset=UniquenessTogetherModel.objects.all(), fields=['name', 'position'] ) ] serializer = ReadOnlySerializer(data={'position': 1}) assert serializer.is_valid(raise_exception=True) def test_writeable_fields_with_source(self): class WriteableSerializer(serializers.ModelSerializer): name = serializers.CharField(source='race_name') class Meta: model = UniquenessTogetherModel fields = ['name', 'position'] validators = [ UniqueTogetherValidator( queryset=UniquenessTogetherModel.objects.all(), fields=['name', 'position'] ) ] serializer = WriteableSerializer(data={'name': 'test', 'position': 1}) assert serializer.is_valid(raise_exception=True) # Validation error should use seriazlier field name, not source serializer = WriteableSerializer(data={'position': 1}) assert not serializer.is_valid() assert serializer.errors == { 'name': [ 'This field is required.' ] } def test_default_validator_with_fields_with_source(self): class TestSerializer(serializers.ModelSerializer): name = serializers.CharField(source='race_name') class Meta: model = UniquenessTogetherModel fields = ['name', 'position'] serializer = TestSerializer() expected = dedent(r""" TestSerializer\(\): name = CharField\(source='race_name'\) position = IntegerField\(.*\) class Meta: validators = \[<UniqueTogetherValidator\(queryset=UniquenessTogetherModel.objects.all\(\), fields=\('name', 'position'\)\)>\] """) assert re.search(expected, repr(serializer)) is not None def test_default_validator_with_multiple_fields_with_same_source(self): class TestSerializer(serializers.ModelSerializer): name = serializers.CharField(source='race_name') other_name = serializers.CharField(source='race_name') class Meta: model = UniquenessTogetherModel fields = ['name', 'other_name', 'position'] serializer = TestSerializer(data={ 'name': 'foo', 'other_name': 'foo', 'position': 1, }) with pytest.raises(AssertionError) as excinfo: serializer.is_valid() expected = ( "Unable to create `UniqueTogetherValidator` for " "`UniquenessTogetherModel.race_name` as `TestSerializer` has " "multiple fields (name, other_name) that map to this model field. " "Either remove the extra fields, or override `Meta.validators` " "with a `UniqueTogetherValidator` using the desired field names.") assert str(excinfo.value) == expected def test_allow_explicit_override(self): """ Ensure validators can be explicitly removed.. """ class NoValidatorsSerializer(serializers.ModelSerializer): class Meta: model = UniquenessTogetherModel fields = ('id', 'race_name', 'position') validators = [] serializer = NoValidatorsSerializer() expected = dedent(r""" NoValidatorsSerializer\(\): id = IntegerField\(label='ID', read_only=True.*\) race_name = CharField\(max_length=100\) position = IntegerField\(.*\) """) assert re.search(expected, repr(serializer)) is not None def test_ignore_validation_for_null_fields(self): # None values that are on fields which are part of the uniqueness # constraint cause the instance to ignore uniqueness validation. NullUniquenessTogetherModel.objects.create( date_of_birth=datetime.date(2000, 1, 1), race_name='Paris Marathon', position=None ) data = { 'date': datetime.date(2000, 1, 1), 'race_name': 'Paris Marathon', 'position': None } serializer = NullUniquenessTogetherSerializer(data=data) assert serializer.is_valid() def test_ignore_validation_for_missing_nullable_fields(self): data = { 'date': datetime.date(2000, 1, 1), 'race_name': 'Paris Marathon', } serializer = NullUniquenessTogetherSerializer(data=data) assert serializer.is_valid(), serializer.errors def test_do_not_ignore_validation_for_null_fields(self): # None values that are not on fields part of the uniqueness constraint # do not cause the instance to skip validation. NullUniquenessTogetherModel.objects.create( date_of_birth=datetime.date(2000, 1, 1), race_name='Paris Marathon', position=1 ) data = {'date': None, 'race_name': 'Paris Marathon', 'position': 1} serializer = NullUniquenessTogetherSerializer(data=data) assert not serializer.is_valid() def test_ignore_validation_for_unchanged_fields(self): """ If all fields in the unique together constraint are unchanged, then the instance should skip uniqueness validation. """ instance = UniquenessTogetherModel.objects.create( race_name="Paris Marathon", position=1 ) data = {"race_name": "Paris Marathon", "position": 1} serializer = UniquenessTogetherSerializer(data=data, instance=instance) with patch( "rest_framework.validators.qs_exists" ) as mock: assert serializer.is_valid() assert not mock.called @patch("rest_framework.validators.qs_exists") def test_unique_together_with_source(self, mock_qs_exists): class UniqueTogetherWithSourceSerializer(serializers.ModelSerializer): name = serializers.CharField(source="race_name") pos = serializers.IntegerField(source="position") class Meta: model = UniquenessTogetherModel fields = ["name", "pos"] data = {"name": "Paris Marathon", "pos": 1} instance = UniquenessTogetherModel.objects.create( race_name="Paris Marathon", position=1 ) serializer = UniqueTogetherWithSourceSerializer(data=data) assert not serializer.is_valid() assert mock_qs_exists.called mock_qs_exists.reset_mock() serializer = UniqueTogetherWithSourceSerializer(data=data, instance=instance) assert serializer.is_valid() assert not mock_qs_exists.called def test_filter_queryset_do_not_skip_existing_attribute(self): """ filter_queryset should add value from existing instance attribute if it is not provided in attributes dict """ class MockQueryset: def filter(self, **kwargs): self.called_with = kwargs data = {'race_name': 'bar'} queryset = MockQueryset() serializer = UniquenessTogetherSerializer(instance=self.instance) validator = UniqueTogetherValidator(queryset, fields=('race_name', 'position')) validator.filter_queryset(attrs=data, queryset=queryset, serializer=serializer) assert queryset.called_with == {'race_name': 'bar', 'position': 1} def test_uniq_together_validation_uses_model_fields_method_field(self): class TestSerializer(serializers.ModelSerializer): position = serializers.SerializerMethodField() def get_position(self, obj): return obj.position or 0 class Meta: model = NullUniquenessTogetherModel fields = ['race_name', 'position'] serializer = TestSerializer() expected = dedent(""" TestSerializer(): race_name = CharField(max_length=100) position = SerializerMethodField() """) assert repr(serializer) == expected def test_uniq_together_validation_uses_model_fields_with_source_field(self): class TestSerializer(serializers.ModelSerializer): pos = serializers.IntegerField(source='position') class Meta: model = NullUniquenessTogetherModel fields = ['race_name', 'pos'] serializer = TestSerializer() expected = dedent(""" TestSerializer(): race_name = CharField(max_length=100, required=True) pos = IntegerField(source='position') class Meta: validators = [<UniqueTogetherValidator(queryset=NullUniquenessTogetherModel.objects.all(), fields=('race_name', 'pos'))>] """) assert repr(serializer) == expected
TestUniquenessTogetherValidation
python
ray-project__ray
python/ray/autoscaler/v2/instance_manager/node_provider.py
{ "start": 8660, "end": 8943 }
class ____: """ The arguments to launch a node. """ # The node type to launch. node_type: NodeType # Number of nodes to launch. count: int # A unique id that identifies the request. request_id: str @dataclass(frozen=True)
CloudInstanceLaunchRequest
python
sqlalchemy__sqlalchemy
test/orm/test_deprecations.py
{ "start": 52424, "end": 53848 }
class ____(CacheKeyFixture, _poly_fixtures._Polymorphic): run_setup_mappers = "once" run_inserts = None run_deletes = None def _stmt_20(self, *elements): return tuple( elem._statement_20() if isinstance(elem, sa.orm.Query) else elem for elem in elements ) def test_wp_queries(self): Person, Manager, Engineer, Boss = self.classes( "Person", "Manager", "Engineer", "Boss" ) def two(): wp = with_polymorphic(Person, [Manager, Engineer]) return fixture_session().query(wp) def three(): wp = with_polymorphic(Person, [Manager, Engineer]) return fixture_session().query(wp).filter(wp.name == "asdfo") def three_a(): wp = with_polymorphic(Person, [Manager, Engineer], flat=True) return fixture_session().query(wp).filter(wp.name == "asdfo") def five(): subq = ( select(Person) .outerjoin(Manager) .outerjoin(Engineer) .subquery() ) wp = with_polymorphic(Person, [Manager, Engineer], subq) return fixture_session().query(wp).filter(wp.name == "asdfo") self._run_cache_key_fixture( lambda: self._stmt_20(two(), three(), three_a(), five()), compare_values=True, )
PolyCacheKeyTest
python
dagster-io__dagster
python_modules/dagster/dagster/_core/executor/child_process_executor.py
{ "start": 1185, "end": 1643 }
class ____(ABC): """Inherit from this class in order to use this library. The object must be picklable; instantiate it and pass it to _execute_command_in_child_process. """ @abstractmethod def execute(self) -> Iterator[Union[ChildProcessEvent, "DagsterEvent"]]: """This method is invoked in the child process. Yields a sequence of events to be handled by _execute_command_in_child_process. """
ChildProcessCommand
python
openai__openai-python
tests/test_models.py
{ "start": 437, "end": 27204 }
class ____(BaseModel): foo: str @pytest.mark.parametrize("value", ["hello", 1], ids=["correct type", "mismatched"]) def test_basic(value: object) -> None: m = BasicModel.construct(foo=value) assert m.foo == value def test_directly_nested_model() -> None: class NestedModel(BaseModel): nested: BasicModel m = NestedModel.construct(nested={"foo": "Foo!"}) assert m.nested.foo == "Foo!" # mismatched types m = NestedModel.construct(nested="hello!") assert cast(Any, m.nested) == "hello!" def test_optional_nested_model() -> None: class NestedModel(BaseModel): nested: Optional[BasicModel] m1 = NestedModel.construct(nested=None) assert m1.nested is None m2 = NestedModel.construct(nested={"foo": "bar"}) assert m2.nested is not None assert m2.nested.foo == "bar" # mismatched types m3 = NestedModel.construct(nested={"foo"}) assert isinstance(cast(Any, m3.nested), set) assert cast(Any, m3.nested) == {"foo"} def test_list_nested_model() -> None: class NestedModel(BaseModel): nested: List[BasicModel] m = NestedModel.construct(nested=[{"foo": "bar"}, {"foo": "2"}]) assert m.nested is not None assert isinstance(m.nested, list) assert len(m.nested) == 2 assert m.nested[0].foo == "bar" assert m.nested[1].foo == "2" # mismatched types m = NestedModel.construct(nested=True) assert cast(Any, m.nested) is True m = NestedModel.construct(nested=[False]) assert cast(Any, m.nested) == [False] def test_optional_list_nested_model() -> None: class NestedModel(BaseModel): nested: Optional[List[BasicModel]] m1 = NestedModel.construct(nested=[{"foo": "bar"}, {"foo": "2"}]) assert m1.nested is not None assert isinstance(m1.nested, list) assert len(m1.nested) == 2 assert m1.nested[0].foo == "bar" assert m1.nested[1].foo == "2" m2 = NestedModel.construct(nested=None) assert m2.nested is None # mismatched types m3 = NestedModel.construct(nested={1}) assert cast(Any, m3.nested) == {1} m4 = NestedModel.construct(nested=[False]) assert cast(Any, m4.nested) == [False] def test_list_optional_items_nested_model() -> None: class NestedModel(BaseModel): nested: List[Optional[BasicModel]] m = NestedModel.construct(nested=[None, {"foo": "bar"}]) assert m.nested is not None assert isinstance(m.nested, list) assert len(m.nested) == 2 assert m.nested[0] is None assert m.nested[1] is not None assert m.nested[1].foo == "bar" # mismatched types m3 = NestedModel.construct(nested="foo") assert cast(Any, m3.nested) == "foo" m4 = NestedModel.construct(nested=[False]) assert cast(Any, m4.nested) == [False] def test_list_mismatched_type() -> None: class NestedModel(BaseModel): nested: List[str] m = NestedModel.construct(nested=False) assert cast(Any, m.nested) is False def test_raw_dictionary() -> None: class NestedModel(BaseModel): nested: Dict[str, str] m = NestedModel.construct(nested={"hello": "world"}) assert m.nested == {"hello": "world"} # mismatched types m = NestedModel.construct(nested=False) assert cast(Any, m.nested) is False def test_nested_dictionary_model() -> None: class NestedModel(BaseModel): nested: Dict[str, BasicModel] m = NestedModel.construct(nested={"hello": {"foo": "bar"}}) assert isinstance(m.nested, dict) assert m.nested["hello"].foo == "bar" # mismatched types m = NestedModel.construct(nested={"hello": False}) assert cast(Any, m.nested["hello"]) is False def test_unknown_fields() -> None: m1 = BasicModel.construct(foo="foo", unknown=1) assert m1.foo == "foo" assert cast(Any, m1).unknown == 1 m2 = BasicModel.construct(foo="foo", unknown={"foo_bar": True}) assert m2.foo == "foo" assert cast(Any, m2).unknown == {"foo_bar": True} assert model_dump(m2) == {"foo": "foo", "unknown": {"foo_bar": True}} def test_strict_validation_unknown_fields() -> None: class Model(BaseModel): foo: str model = parse_obj(Model, dict(foo="hello!", user="Robert")) assert model.foo == "hello!" assert cast(Any, model).user == "Robert" assert model_dump(model) == {"foo": "hello!", "user": "Robert"} def test_aliases() -> None: class Model(BaseModel): my_field: int = Field(alias="myField") m = Model.construct(myField=1) assert m.my_field == 1 # mismatched types m = Model.construct(myField={"hello": False}) assert cast(Any, m.my_field) == {"hello": False} def test_repr() -> None: model = BasicModel(foo="bar") assert str(model) == "BasicModel(foo='bar')" assert repr(model) == "BasicModel(foo='bar')" def test_repr_nested_model() -> None: class Child(BaseModel): name: str age: int class Parent(BaseModel): name: str child: Child model = Parent(name="Robert", child=Child(name="Foo", age=5)) assert str(model) == "Parent(name='Robert', child=Child(name='Foo', age=5))" assert repr(model) == "Parent(name='Robert', child=Child(name='Foo', age=5))" def test_optional_list() -> None: class Submodel(BaseModel): name: str class Model(BaseModel): items: Optional[List[Submodel]] m = Model.construct(items=None) assert m.items is None m = Model.construct(items=[]) assert m.items == [] m = Model.construct(items=[{"name": "Robert"}]) assert m.items is not None assert len(m.items) == 1 assert m.items[0].name == "Robert" def test_nested_union_of_models() -> None: class Submodel1(BaseModel): bar: bool class Submodel2(BaseModel): thing: str class Model(BaseModel): foo: Union[Submodel1, Submodel2] m = Model.construct(foo={"thing": "hello"}) assert isinstance(m.foo, Submodel2) assert m.foo.thing == "hello" def test_nested_union_of_mixed_types() -> None: class Submodel1(BaseModel): bar: bool class Model(BaseModel): foo: Union[Submodel1, Literal[True], Literal["CARD_HOLDER"]] m = Model.construct(foo=True) assert m.foo is True m = Model.construct(foo="CARD_HOLDER") assert m.foo == "CARD_HOLDER" m = Model.construct(foo={"bar": False}) assert isinstance(m.foo, Submodel1) assert m.foo.bar is False def test_nested_union_multiple_variants() -> None: class Submodel1(BaseModel): bar: bool class Submodel2(BaseModel): thing: str class Submodel3(BaseModel): foo: int class Model(BaseModel): foo: Union[Submodel1, Submodel2, None, Submodel3] m = Model.construct(foo={"thing": "hello"}) assert isinstance(m.foo, Submodel2) assert m.foo.thing == "hello" m = Model.construct(foo=None) assert m.foo is None m = Model.construct() assert m.foo is None m = Model.construct(foo={"foo": "1"}) assert isinstance(m.foo, Submodel3) assert m.foo.foo == 1 def test_nested_union_invalid_data() -> None: class Submodel1(BaseModel): level: int class Submodel2(BaseModel): name: str class Model(BaseModel): foo: Union[Submodel1, Submodel2] m = Model.construct(foo=True) assert cast(bool, m.foo) is True m = Model.construct(foo={"name": 3}) if PYDANTIC_V1: assert isinstance(m.foo, Submodel2) assert m.foo.name == "3" else: assert isinstance(m.foo, Submodel1) assert m.foo.name == 3 # type: ignore def test_list_of_unions() -> None: class Submodel1(BaseModel): level: int class Submodel2(BaseModel): name: str class Model(BaseModel): items: List[Union[Submodel1, Submodel2]] m = Model.construct(items=[{"level": 1}, {"name": "Robert"}]) assert len(m.items) == 2 assert isinstance(m.items[0], Submodel1) assert m.items[0].level == 1 assert isinstance(m.items[1], Submodel2) assert m.items[1].name == "Robert" m = Model.construct(items=[{"level": -1}, 156]) assert len(m.items) == 2 assert isinstance(m.items[0], Submodel1) assert m.items[0].level == -1 assert cast(Any, m.items[1]) == 156 def test_union_of_lists() -> None: class SubModel1(BaseModel): level: int class SubModel2(BaseModel): name: str class Model(BaseModel): items: Union[List[SubModel1], List[SubModel2]] # with one valid entry m = Model.construct(items=[{"name": "Robert"}]) assert len(m.items) == 1 assert isinstance(m.items[0], SubModel2) assert m.items[0].name == "Robert" # with two entries pointing to different types m = Model.construct(items=[{"level": 1}, {"name": "Robert"}]) assert len(m.items) == 2 assert isinstance(m.items[0], SubModel1) assert m.items[0].level == 1 assert isinstance(m.items[1], SubModel1) assert cast(Any, m.items[1]).name == "Robert" # with two entries pointing to *completely* different types m = Model.construct(items=[{"level": -1}, 156]) assert len(m.items) == 2 assert isinstance(m.items[0], SubModel1) assert m.items[0].level == -1 assert cast(Any, m.items[1]) == 156 def test_dict_of_union() -> None: class SubModel1(BaseModel): name: str class SubModel2(BaseModel): foo: str class Model(BaseModel): data: Dict[str, Union[SubModel1, SubModel2]] m = Model.construct(data={"hello": {"name": "there"}, "foo": {"foo": "bar"}}) assert len(list(m.data.keys())) == 2 assert isinstance(m.data["hello"], SubModel1) assert m.data["hello"].name == "there" assert isinstance(m.data["foo"], SubModel2) assert m.data["foo"].foo == "bar" # TODO: test mismatched type def test_double_nested_union() -> None: class SubModel1(BaseModel): name: str class SubModel2(BaseModel): bar: str class Model(BaseModel): data: Dict[str, List[Union[SubModel1, SubModel2]]] m = Model.construct(data={"foo": [{"bar": "baz"}, {"name": "Robert"}]}) assert len(m.data["foo"]) == 2 entry1 = m.data["foo"][0] assert isinstance(entry1, SubModel2) assert entry1.bar == "baz" entry2 = m.data["foo"][1] assert isinstance(entry2, SubModel1) assert entry2.name == "Robert" # TODO: test mismatched type def test_union_of_dict() -> None: class SubModel1(BaseModel): name: str class SubModel2(BaseModel): foo: str class Model(BaseModel): data: Union[Dict[str, SubModel1], Dict[str, SubModel2]] m = Model.construct(data={"hello": {"name": "there"}, "foo": {"foo": "bar"}}) assert len(list(m.data.keys())) == 2 assert isinstance(m.data["hello"], SubModel1) assert m.data["hello"].name == "there" assert isinstance(m.data["foo"], SubModel1) assert cast(Any, m.data["foo"]).foo == "bar" def test_iso8601_datetime() -> None: class Model(BaseModel): created_at: datetime expected = datetime(2019, 12, 27, 18, 11, 19, 117000, tzinfo=timezone.utc) if PYDANTIC_V1: expected_json = '{"created_at": "2019-12-27T18:11:19.117000+00:00"}' else: expected_json = '{"created_at":"2019-12-27T18:11:19.117000Z"}' model = Model.construct(created_at="2019-12-27T18:11:19.117Z") assert model.created_at == expected assert model_json(model) == expected_json model = parse_obj(Model, dict(created_at="2019-12-27T18:11:19.117Z")) assert model.created_at == expected assert model_json(model) == expected_json def test_does_not_coerce_int() -> None: class Model(BaseModel): bar: int assert Model.construct(bar=1).bar == 1 assert Model.construct(bar=10.9).bar == 10.9 assert Model.construct(bar="19").bar == "19" # type: ignore[comparison-overlap] assert Model.construct(bar=False).bar is False def test_int_to_float_safe_conversion() -> None: class Model(BaseModel): float_field: float m = Model.construct(float_field=10) assert m.float_field == 10.0 assert isinstance(m.float_field, float) m = Model.construct(float_field=10.12) assert m.float_field == 10.12 assert isinstance(m.float_field, float) # number too big m = Model.construct(float_field=2**53 + 1) assert m.float_field == 2**53 + 1 assert isinstance(m.float_field, int) def test_deprecated_alias() -> None: class Model(BaseModel): resource_id: str = Field(alias="model_id") @property def model_id(self) -> str: return self.resource_id m = Model.construct(model_id="id") assert m.model_id == "id" assert m.resource_id == "id" assert m.resource_id is m.model_id m = parse_obj(Model, {"model_id": "id"}) assert m.model_id == "id" assert m.resource_id == "id" assert m.resource_id is m.model_id def test_omitted_fields() -> None: class Model(BaseModel): resource_id: Optional[str] = None m = Model.construct() assert m.resource_id is None assert "resource_id" not in m.model_fields_set m = Model.construct(resource_id=None) assert m.resource_id is None assert "resource_id" in m.model_fields_set m = Model.construct(resource_id="foo") assert m.resource_id == "foo" assert "resource_id" in m.model_fields_set def test_to_dict() -> None: class Model(BaseModel): foo: Optional[str] = Field(alias="FOO", default=None) m = Model(FOO="hello") assert m.to_dict() == {"FOO": "hello"} assert m.to_dict(use_api_names=False) == {"foo": "hello"} m2 = Model() assert m2.to_dict() == {} assert m2.to_dict(exclude_unset=False) == {"FOO": None} assert m2.to_dict(exclude_unset=False, exclude_none=True) == {} assert m2.to_dict(exclude_unset=False, exclude_defaults=True) == {} m3 = Model(FOO=None) assert m3.to_dict() == {"FOO": None} assert m3.to_dict(exclude_none=True) == {} assert m3.to_dict(exclude_defaults=True) == {} class Model2(BaseModel): created_at: datetime time_str = "2024-03-21T11:39:01.275859" m4 = Model2.construct(created_at=time_str) assert m4.to_dict(mode="python") == {"created_at": datetime.fromisoformat(time_str)} assert m4.to_dict(mode="json") == {"created_at": time_str} if PYDANTIC_V1: with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): m.to_dict(warnings=False) def test_forwards_compat_model_dump_method() -> None: class Model(BaseModel): foo: Optional[str] = Field(alias="FOO", default=None) m = Model(FOO="hello") assert m.model_dump() == {"foo": "hello"} assert m.model_dump(include={"bar"}) == {} assert m.model_dump(exclude={"foo"}) == {} assert m.model_dump(by_alias=True) == {"FOO": "hello"} m2 = Model() assert m2.model_dump() == {"foo": None} assert m2.model_dump(exclude_unset=True) == {} assert m2.model_dump(exclude_none=True) == {} assert m2.model_dump(exclude_defaults=True) == {} m3 = Model(FOO=None) assert m3.model_dump() == {"foo": None} assert m3.model_dump(exclude_none=True) == {} if PYDANTIC_V1: with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"): m.model_dump(round_trip=True) with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): m.model_dump(warnings=False) def test_compat_method_no_error_for_warnings() -> None: class Model(BaseModel): foo: Optional[str] m = Model(foo="hello") assert isinstance(model_dump(m, warnings=False), dict) def test_to_json() -> None: class Model(BaseModel): foo: Optional[str] = Field(alias="FOO", default=None) m = Model(FOO="hello") assert json.loads(m.to_json()) == {"FOO": "hello"} assert json.loads(m.to_json(use_api_names=False)) == {"foo": "hello"} if PYDANTIC_V1: assert m.to_json(indent=None) == '{"FOO": "hello"}' else: assert m.to_json(indent=None) == '{"FOO":"hello"}' m2 = Model() assert json.loads(m2.to_json()) == {} assert json.loads(m2.to_json(exclude_unset=False)) == {"FOO": None} assert json.loads(m2.to_json(exclude_unset=False, exclude_none=True)) == {} assert json.loads(m2.to_json(exclude_unset=False, exclude_defaults=True)) == {} m3 = Model(FOO=None) assert json.loads(m3.to_json()) == {"FOO": None} assert json.loads(m3.to_json(exclude_none=True)) == {} if PYDANTIC_V1: with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): m.to_json(warnings=False) def test_forwards_compat_model_dump_json_method() -> None: class Model(BaseModel): foo: Optional[str] = Field(alias="FOO", default=None) m = Model(FOO="hello") assert json.loads(m.model_dump_json()) == {"foo": "hello"} assert json.loads(m.model_dump_json(include={"bar"})) == {} assert json.loads(m.model_dump_json(include={"foo"})) == {"foo": "hello"} assert json.loads(m.model_dump_json(by_alias=True)) == {"FOO": "hello"} assert m.model_dump_json(indent=2) == '{\n "foo": "hello"\n}' m2 = Model() assert json.loads(m2.model_dump_json()) == {"foo": None} assert json.loads(m2.model_dump_json(exclude_unset=True)) == {} assert json.loads(m2.model_dump_json(exclude_none=True)) == {} assert json.loads(m2.model_dump_json(exclude_defaults=True)) == {} m3 = Model(FOO=None) assert json.loads(m3.model_dump_json()) == {"foo": None} assert json.loads(m3.model_dump_json(exclude_none=True)) == {} if PYDANTIC_V1: with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"): m.model_dump_json(round_trip=True) with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): m.model_dump_json(warnings=False) def test_type_compat() -> None: # our model type can be assigned to Pydantic's model type def takes_pydantic(model: pydantic.BaseModel) -> None: # noqa: ARG001 ... class OurModel(BaseModel): foo: Optional[str] = None takes_pydantic(OurModel()) def test_annotated_types() -> None: class Model(BaseModel): value: str m = construct_type( value={"value": "foo"}, type_=cast(Any, Annotated[Model, "random metadata"]), ) assert isinstance(m, Model) assert m.value == "foo" def test_discriminated_unions_invalid_data() -> None: class A(BaseModel): type: Literal["a"] data: str class B(BaseModel): type: Literal["b"] data: int m = construct_type( value={"type": "b", "data": "foo"}, type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="type")]), ) assert isinstance(m, B) assert m.type == "b" assert m.data == "foo" # type: ignore[comparison-overlap] m = construct_type( value={"type": "a", "data": 100}, type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="type")]), ) assert isinstance(m, A) assert m.type == "a" if PYDANTIC_V1: # pydantic v1 automatically converts inputs to strings # if the expected type is a str assert m.data == "100" else: assert m.data == 100 # type: ignore[comparison-overlap] def test_discriminated_unions_unknown_variant() -> None: class A(BaseModel): type: Literal["a"] data: str class B(BaseModel): type: Literal["b"] data: int m = construct_type( value={"type": "c", "data": None, "new_thing": "bar"}, type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="type")]), ) # just chooses the first variant assert isinstance(m, A) assert m.type == "c" # type: ignore[comparison-overlap] assert m.data == None # type: ignore[unreachable] assert m.new_thing == "bar" def test_discriminated_unions_invalid_data_nested_unions() -> None: class A(BaseModel): type: Literal["a"] data: str class B(BaseModel): type: Literal["b"] data: int class C(BaseModel): type: Literal["c"] data: bool m = construct_type( value={"type": "b", "data": "foo"}, type_=cast(Any, Annotated[Union[Union[A, B], C], PropertyInfo(discriminator="type")]), ) assert isinstance(m, B) assert m.type == "b" assert m.data == "foo" # type: ignore[comparison-overlap] m = construct_type( value={"type": "c", "data": "foo"}, type_=cast(Any, Annotated[Union[Union[A, B], C], PropertyInfo(discriminator="type")]), ) assert isinstance(m, C) assert m.type == "c" assert m.data == "foo" # type: ignore[comparison-overlap] def test_discriminated_unions_with_aliases_invalid_data() -> None: class A(BaseModel): foo_type: Literal["a"] = Field(alias="type") data: str class B(BaseModel): foo_type: Literal["b"] = Field(alias="type") data: int m = construct_type( value={"type": "b", "data": "foo"}, type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="foo_type")]), ) assert isinstance(m, B) assert m.foo_type == "b" assert m.data == "foo" # type: ignore[comparison-overlap] m = construct_type( value={"type": "a", "data": 100}, type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="foo_type")]), ) assert isinstance(m, A) assert m.foo_type == "a" if PYDANTIC_V1: # pydantic v1 automatically converts inputs to strings # if the expected type is a str assert m.data == "100" else: assert m.data == 100 # type: ignore[comparison-overlap] def test_discriminated_unions_overlapping_discriminators_invalid_data() -> None: class A(BaseModel): type: Literal["a"] data: bool class B(BaseModel): type: Literal["a"] data: int m = construct_type( value={"type": "a", "data": "foo"}, type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="type")]), ) assert isinstance(m, B) assert m.type == "a" assert m.data == "foo" # type: ignore[comparison-overlap] def test_discriminated_unions_invalid_data_uses_cache() -> None: class A(BaseModel): type: Literal["a"] data: str class B(BaseModel): type: Literal["b"] data: int UnionType = cast(Any, Union[A, B]) assert not DISCRIMINATOR_CACHE.get(UnionType) m = construct_type( value={"type": "b", "data": "foo"}, type_=cast(Any, Annotated[UnionType, PropertyInfo(discriminator="type")]) ) assert isinstance(m, B) assert m.type == "b" assert m.data == "foo" # type: ignore[comparison-overlap] discriminator = DISCRIMINATOR_CACHE.get(UnionType) assert discriminator is not None m = construct_type( value={"type": "b", "data": "foo"}, type_=cast(Any, Annotated[UnionType, PropertyInfo(discriminator="type")]) ) assert isinstance(m, B) assert m.type == "b" assert m.data == "foo" # type: ignore[comparison-overlap] # if the discriminator details object stays the same between invocations then # we hit the cache assert DISCRIMINATOR_CACHE.get(UnionType) is discriminator @pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1") def test_type_alias_type() -> None: Alias = TypeAliasType("Alias", str) # pyright: ignore class Model(BaseModel): alias: Alias union: Union[int, Alias] m = construct_type(value={"alias": "foo", "union": "bar"}, type_=Model) assert isinstance(m, Model) assert isinstance(m.alias, str) assert m.alias == "foo" assert isinstance(m.union, str) assert m.union == "bar" @pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1") def test_field_named_cls() -> None: class Model(BaseModel): cls: str m = construct_type(value={"cls": "foo"}, type_=Model) assert isinstance(m, Model) assert isinstance(m.cls, str) def test_discriminated_union_case() -> None: class A(BaseModel): type: Literal["a"] data: bool class B(BaseModel): type: Literal["b"] data: List[Union[A, object]] class ModelA(BaseModel): type: Literal["modelA"] data: int class ModelB(BaseModel): type: Literal["modelB"] required: str data: Union[A, B] # when constructing ModelA | ModelB, value data doesn't match ModelB exactly - missing `required` m = construct_type( value={"type": "modelB", "data": {"type": "a", "data": True}}, type_=cast(Any, Annotated[Union[ModelA, ModelB], PropertyInfo(discriminator="type")]), ) assert isinstance(m, ModelB) def test_nested_discriminated_union() -> None: class InnerType1(BaseModel): type: Literal["type_1"] class InnerModel(BaseModel): inner_value: str class InnerType2(BaseModel): type: Literal["type_2"] some_inner_model: InnerModel class Type1(BaseModel): base_type: Literal["base_type_1"] value: Annotated[ Union[ InnerType1, InnerType2, ], PropertyInfo(discriminator="type"), ] class Type2(BaseModel): base_type: Literal["base_type_2"] T = Annotated[ Union[ Type1, Type2, ], PropertyInfo(discriminator="base_type"), ] model = construct_type( type_=T, value={ "base_type": "base_type_1", "value": { "type": "type_2", }, }, ) assert isinstance(model, Type1) assert isinstance(model.value, InnerType2) @pytest.mark.skipif(PYDANTIC_V1, reason="this is only supported in pydantic v2 for now") def test_extra_properties() -> None: class Item(BaseModel): prop: int class Model(BaseModel): __pydantic_extra__: Dict[str, Item] = Field(init=False) # pyright: ignore[reportIncompatibleVariableOverride] other: str if TYPE_CHECKING: def __getattr__(self, attr: str) -> Item: ... model = construct_type( type_=Model, value={ "a": {"prop": 1}, "other": "foo", }, ) assert isinstance(model, Model) assert model.a.prop == 1 assert isinstance(model.a, Item) assert model.other == "foo"
BasicModel
python
spyder-ide__spyder
spyder/plugins/findinfiles/widgets/main_widget.py
{ "start": 2619, "end": 29862 }
class ____(PluginMainWidget): """ Find in files main widget. """ # PluginMainWidget constants ENABLE_SPINNER = True MARGIN_TOP = AppStyle.MarginSize + 5 SHOW_MESSAGE_WHEN_EMPTY = True IMAGE_WHEN_EMPTY = "find_empty" MESSAGE_WHEN_EMPTY = _("Nothing searched for yet") DESCRIPTION_WHEN_EMPTY = _( "Search the content of text files in any directory using the search " "box." ) # Other constants REGEX_ERROR = _("Regular expression error") # Signals sig_edit_goto_requested = Signal(str, int, str, int, int) """ This signal will request to open a file in a given row and column using a code editor. Parameters ---------- path: str Path to file. row: int Cursor starting row position. word: str Word to select on given row. start_column: int Starting column of found word. end_column: Ending column of found word. """ sig_finished = Signal() """ This signal is emitted to inform the search process has finished. """ sig_max_results_reached = Signal() """ This signal is emitted to inform the search process has finished due to reaching the maximum number of results. """ def __init__(self, name=None, plugin=None, parent=None): if not PYSIDE2: super().__init__(name, plugin, parent=parent) else: PluginMainWidget.__init__(self, name, plugin, parent=parent) self.set_conf('text_color', MAIN_TEXT_COLOR) self.set_conf('hist_limit', MAX_PATH_HISTORY) # Attributes self.text_color = self.get_conf('text_color') self.supported_encodings = self.get_conf('supported_encodings') self.search_thread = None self.running = False self.more_options_action = None self.extras_toolbar = None self._search_in_label_width = None self._exclude_label_width = None self._is_shown = False self._is_first_time = False self.error_icon = ima.icon('error') search_text = self.get_conf('search_text', '') path_history = self.get_conf('path_history', []) exclude = self.get_conf('exclude') if not isinstance(search_text, (list, tuple)): search_text = [search_text] if not isinstance(exclude, (list, tuple)): exclude = [exclude] if not isinstance(path_history, (list, tuple)): path_history = [path_history] # Widgets self.search_text_edit = PatternComboBox( self, items=search_text, adjust_to_minimum=False, id_=FindInFilesWidgetToolbarItems.SearchPatternCombo, items_elide_mode=Qt.ElideMiddle ) self.search_text_edit.lineEdit().setPlaceholderText( _('Write text to search')) self.search_text_edit.setMinimumSize( MIN_COMBOBOX_WIDTH, AppStyle.FindHeight ) # This is necessary to prevent the width of search_text_edit to control # the width of the pane. # Fixes spyder-ide/spyder#24188 self.search_text_edit.setMaximumWidth(MAX_COMBOBOX_WIDTH) self.messages_action = QAction(self) self.messages_action.setVisible(False) self.search_text_edit.lineEdit().addAction( self.messages_action, QLineEdit.TrailingPosition ) self.messages_button = ( self.search_text_edit.lineEdit().findChildren(QToolButton)[1] ) self.search_in_label = QLabel(_('Search in:')) self.search_in_label.ID = FindInFilesWidgetToolbarItems.SearchInLabel self.exclude_label = QLabel(_('Exclude:')) self.exclude_label.ID = FindInFilesWidgetToolbarItems.ExcludeLabel self.path_selection_combo = SearchInComboBox( path_history, self, id_=FindInFilesWidgetToolbarItems.SearchInCombo ) self.path_selection_combo.setMinimumSize( MIN_COMBOBOX_WIDTH, AppStyle.FindHeight ) self.path_selection_combo.setMaximumWidth(MAX_COMBOBOX_WIDTH) self.exclude_pattern_edit = PatternComboBox( self, exclude, _("Exclude pattern"), id_=FindInFilesWidgetToolbarItems.ExcludePatternCombo ) self.exclude_pattern_edit.setMinimumSize( MIN_COMBOBOX_WIDTH, AppStyle.FindHeight ) self.exclude_pattern_edit.setMaximumWidth(MAX_COMBOBOX_WIDTH) self.messages_exclude_action = QAction(self) self.messages_exclude_action.setVisible(False) self.exclude_pattern_edit.lineEdit().addAction( self.messages_exclude_action, QLineEdit.TrailingPosition ) self.messages_button_exclude = ( self.exclude_pattern_edit.lineEdit().findChildren(QToolButton)[1] ) self.result_browser = ResultsBrowser( self, text_color=self.text_color, max_results=self.get_conf('max_results'), ) self.set_content_widget(self.result_browser) # Setup exclude_idx = self.get_conf('exclude_index', None) if (exclude_idx is not None and exclude_idx >= 0 and exclude_idx < self.exclude_pattern_edit.count()): self.exclude_pattern_edit.setCurrentIndex(exclude_idx) search_in_index = self.get_conf('search_in_index', None) self.path_selection_combo.set_current_searchpath_index( search_in_index) # Install event filter for search_text_edit & exclude_pattern_edit self.search_text_edit.installEventFilter(self) self.exclude_pattern_edit.installEventFilter(self) # Signals self.path_selection_combo.sig_redirect_stdio_requested.connect( self.sig_redirect_stdio_requested) self.search_text_edit.valid.connect(lambda valid: self.find()) self.exclude_pattern_edit.valid.connect(lambda valid: self.find()) self.result_browser.sig_edit_goto_requested.connect( self.sig_edit_goto_requested) self.result_browser.sig_max_results_reached.connect( self.sig_max_results_reached) self.result_browser.sig_max_results_reached.connect( self._stop_and_reset_thread) def eventFilter(self, widget, event): """ Event filter for search_text_edit & exclude_pattern_edit widget. Notes ----- * Reduce space between the messages_button and the clear one. """ # Type check: Prevent error in PySide where 'event' may be of type # QtGui.QPainter (for whatever reason). if not isinstance(event, QEvent): return True if event.type() == QEvent.Paint: if widget == self.exclude_pattern_edit: self.messages_button_exclude.move( self.exclude_pattern_edit.lineEdit().width() - 42, self.messages_button_exclude.y() ) elif widget == self.search_text_edit: self.messages_button.move( self.search_text_edit.lineEdit().width() - 42, self.messages_button.y() ) return super().eventFilter(widget, event) # ---- PluginMainWidget API # ------------------------------------------------------------------------ def get_title(self): return _("Find") def get_focus_widget(self): return self.search_text_edit def setup(self): self.search_regexp_action = self.create_action( FindInFilesWidgetActions.ToggleSearchRegex, text=_('Regular expression'), tip=_('Use regular expressions'), icon=self.create_icon('regex'), toggled=True, initial=self.get_conf('search_text_regexp'), option='search_text_regexp' ) self.case_action = self.create_action( FindInFilesWidgetActions.ToggleExcludeCase, text=_("Case sensitive"), tip=_("Case sensitive search"), icon=self.create_icon("format_letter_case"), toggled=True, initial=self.get_conf('case_sensitive'), option='case_sensitive' ) self.find_action = self.create_action( FindInFilesWidgetActions.Find, text=_("&Find in files"), tip=_("Search text"), icon=self.create_icon('find'), triggered=self.find, register_shortcut=False, ) self.exclude_regexp_action = self.create_action( FindInFilesWidgetActions.ToggleExcludeRegex, text=_('Regular expression'), tip=_('Use regular expressions'), icon=self.create_icon('regex'), toggled=True, initial=self.get_conf('exclude_regexp'), option='exclude_regexp' ) self.exclude_case_action = self.create_action( FindInFilesWidgetActions.ToggleCase, text=_("Exclude case sensitive"), tip=_("Exclude case sensitive"), icon=self.create_icon("format_letter_case"), toggled=True, initial=self.get_conf('exclude_case_sensitive'), option='exclude_case_sensitive' ) self.more_options_action = self.create_action( FindInFilesWidgetActions.ToggleMoreOptions, text=_('Show advanced options'), tip=_('Show advanced options'), icon=self.create_icon("options_more"), toggled=True, initial=self.get_conf('more_options'), option='more_options' ) self.set_max_results_action = self.create_action( FindInFilesWidgetActions.MaxResults, text=_('Set maximum number of results'), icon=self.create_icon("transparent"), tip=_('Set maximum number of results'), triggered=lambda x=None: self.set_max_results(), ) # Toolbar toolbar = self.get_main_toolbar() for item in [self.search_text_edit, self.find_action, self.search_regexp_action, self.case_action, self.more_options_action]: self.add_item_to_toolbar( item, toolbar=toolbar, section=FindInFilesWidgetMainToolbarSections.Main, ) # Exclude Toolbar self.extras_toolbar = self.create_toolbar( FindInFilesWidgetToolbars.Exclude) stretcher1 = self.create_stretcher( FindInFilesWidgetToolbarItems.Stretcher1) for item in [self.exclude_label, self.exclude_pattern_edit, self.exclude_regexp_action, stretcher1]: self.add_item_to_toolbar( item, toolbar=self.extras_toolbar, section=FindInFilesWidgetExcludeToolbarSections.Main, ) # Location toolbar location_toolbar = self.create_toolbar( FindInFilesWidgetToolbars.Location) stretcher2 = self.create_stretcher( FindInFilesWidgetToolbarItems.Stretcher2) for item in [self.search_in_label, self.path_selection_combo, stretcher2]: self.add_item_to_toolbar( item, toolbar=location_toolbar, section=FindInFilesWidgetLocationToolbarSections.Main, ) menu = self.get_options_menu() self.add_item_to_menu( self.set_max_results_action, menu=menu, ) def update_actions(self): self.find_action.setIcon(self.create_icon( 'stop' if self.running else 'find') ) if self.extras_toolbar and self.more_options_action: self.extras_toolbar.setVisible( self.more_options_action.isChecked()) @on_conf_change(option='more_options') def on_more_options_update(self, value): if value: icon = self.create_icon('options_less') tip = _('Hide advanced options') else: icon = self.create_icon('options_more') tip = _('Show advanced options') if self.extras_toolbar: self.extras_toolbar.setVisible(value) # These adjustments can only be done when the widget is visible. if self._is_shown: if value: # Resize either the search_in or exclude label so that their # comboboxes are aligned to the left. if self._search_in_label_width > self._exclude_label_width: self.exclude_label.setMinimumSize( self._exclude_label_width + (self._search_in_label_width - self._exclude_label_width), AppStyle.FindHeight ) else: self.search_in_label.setMinimumSize( self._search_in_label_width + (self._exclude_label_width - self._search_in_label_width), AppStyle.FindHeight ) else: # Restore initial search_in label width when it's shorter than # exclude_label to not show an empty space next to it. if self._search_in_label_width < self._exclude_label_width: self.search_in_label.setMinimumSize( self._search_in_label_width, AppStyle.FindHeight ) if self.more_options_action: self.more_options_action.setIcon(icon) self.more_options_action.setToolTip(tip) @on_conf_change(option='max_results') def on_max_results_update(self, value): self.result_browser.set_max_results(value) # ---- Qt methods # ------------------------------------------------------------------------ def showEvent(self, event): """Adjustments when the widget is shown.""" if not self._is_shown: # Save default widths of search_in and exclude labels self._search_in_label_width = self.search_in_label.size().width() if not self.extras_toolbar.isVisible(): # This correctly computes the exclude label width when the # extras_toolbar is not visible. metrics = QFontMetricsF(self.font()) exclude_text_width = metrics.width(self.exclude_label.text()) self._exclude_label_width = ( math.ceil(exclude_text_width) + self.font().pointSize() ) else: self._exclude_label_width = self.exclude_label.size().width() self._is_shown = True super().showEvent(event) def resizeEvent(self, event): """Adjustments when the widget is resized.""" super().resizeEvent(event) # This is necessary to prevent the width of search_text_edit to control # the width of the pane. # Fixes spyder-ide/spyder#24188 self.search_text_edit.setMaximumWidth(self.width()) # This recomputes the result items width according to this widget's # width, which makes the UI be rendered as expected. # NOTE: Don't debounce or throttle `set_width` because then it wouldn't # do its job as expected. self.result_browser.set_width() # ---- Private API # ------------------------------------------------------------------------ def _get_options(self): """ Get search options. """ text_re = self.search_regexp_action.isChecked() exclude_re = self.exclude_regexp_action.isChecked() case_sensitive = self.case_action.isChecked() # Clear fields self.messages_action.setVisible(False) self.messages_exclude_action.setVisible(False) utext = str(self.search_text_edit.currentText()) if not utext: return try: texts = [(utext.encode('utf-8'), 'utf-8')] except UnicodeEncodeError: texts = [] for enc in self.supported_encodings: try: texts.append((utext.encode(enc), enc)) except UnicodeDecodeError: pass exclude = str(self.exclude_pattern_edit.currentText()) if not case_sensitive: texts = [(text[0].lower(), text[1]) for text in texts] file_search = self.path_selection_combo.is_file_search() path = self.path_selection_combo.get_current_searchpath() if not exclude_re: items = [fnmatch.translate(item.strip()) for item in exclude.split(",") if item.strip() != ''] exclude = '|'.join(items) # Validate exclude regular expression if exclude: error_msg = regexp_error_msg(exclude) if error_msg: self._show_error(str(error_msg), True) return None else: exclude = re.compile(exclude) # Validate text regular expression if text_re: error_msg = regexp_error_msg(texts[0][0]) if error_msg: self._show_error(str(error_msg), False) return None else: texts = [(re.compile(x[0]), x[1]) for x in texts] return (path, file_search, exclude, texts, text_re, case_sensitive) def _update_options(self): """ Extract search options from widgets and set the corresponding option. """ hist_limit = self.get_conf('hist_limit') search_texts = [str(self.search_text_edit.itemText(index)) for index in range(self.search_text_edit.count())] excludes = [str(self.exclude_pattern_edit.itemText(index)) for index in range(self.exclude_pattern_edit.count())] path_history = self.path_selection_combo.get_external_paths() self.set_conf('path_history', path_history) self.set_conf('search_text', search_texts[:hist_limit]) self.set_conf('exclude', excludes[:hist_limit]) self.set_conf('path_history', path_history[-hist_limit:]) self.set_conf( 'exclude_index', self.exclude_pattern_edit.currentIndex()) self.set_conf( 'search_in_index', self.path_selection_combo.currentIndex()) def _handle_search_complete(self, completed): """ Current search thread has finished. """ self.result_browser.set_sorting(ON) self.result_browser.set_width() self.result_browser.expandAll() if self.search_thread is None: return self.sig_finished.emit() found = self.search_thread.get_results() self._stop_and_reset_thread() if found is not None: self.result_browser.show() self.stop_spinner() self.update_actions() def _stop_and_reset_thread(self, ignore_results=False): """Stop current search thread and clean-up.""" if self.search_thread is not None: if self.search_thread.isRunning(): if ignore_results: self.search_thread.sig_finished.disconnect( self._handle_search_complete) self.search_thread.stop() self.search_thread.wait() self.search_thread.setParent(None) self.search_thread = None self.running = False self.stop_spinner() self.update_actions() # ---- Public API # ------------------------------------------------------------------------ @property def path(self): """Return the current path.""" return self.path_selection_combo.path @property def project_path(self): """Return the current project path.""" return self.path_selection_combo.project_path @property def file_path(self): """Return the current file path.""" return self.path_selection_combo.file_path def set_directory(self, directory): """ Set directory as current path. Parameters ---------- directory: str Directory path string. """ self.path_selection_combo.path = osp.abspath(directory) def set_project_path(self, path): """ Set path as current project path. Parameters ---------- path: str Project path string. """ self.path_selection_combo.set_project_path(path) def disable_project_search(self): """Disable project search path in combobox.""" self.path_selection_combo.set_project_path(None) def set_file_path(self, path): """ Set path as current file path. Parameters ---------- path: str File path string. """ self.path_selection_combo.file_path = path def set_search_text(self, text): """ Set current search text. Parameters ---------- text: str Search string. Notes ----- If `text` is empty, focus will be given to the search lineedit and no search will be performed. """ if text: self.search_text_edit.add_text(text) self.search_text_edit.lineEdit().selectAll() self.search_text_edit.setFocus() def find(self): """ Start/stop find action. Notes ----- If there is no search running, this will start the search. If there is a search running, this will stop it. """ # Show result_browser the first time a user performs a search and leave # it shown afterwards. if not self._is_first_time: self.show_content_widget() self._is_first_time = True if self.running: self.stop() else: self.start() def stop(self): """Stop find thread.""" self._stop_and_reset_thread() def start(self): """Start find thread.""" options = self._get_options() if options is None: return self._stop_and_reset_thread(ignore_results=True) search_text = self.search_text_edit.currentText() # Update and set options self._update_options() # Setup result_browser self.result_browser.set_path(options[0]) self.result_browser.longest_file_item = '' self.result_browser.longest_line_item = '' # Start self.running = True self.start_spinner() self.search_thread = SearchThread( None, search_text, self.text_color, self.get_conf('max_results') ) self.search_thread.sig_finished.connect(self._handle_search_complete) self.search_thread.sig_file_match.connect( self.result_browser.append_file_result ) self.search_thread.sig_line_match.connect( self.result_browser.append_result ) self.result_browser.clear_title(search_text) self.search_thread.initialize(*self._get_options()) self.search_thread.start() self.update_actions() def add_external_path(self, path): """ Parameters ---------- path: str Path to add to combobox. """ self.path_selection_combo.add_external_path(path) def set_max_results(self, value=None): """ Set maximum amount of results to add to the result browser. Parameters ---------- value: int, optional Number of results. If None an input dialog will be used. Default is None. """ if value is None: # Create dialog dialog = QInputDialog(self) # Set dialog properties dialog.setModal(False) dialog.setWindowTitle(_('Max results')) dialog.setLabelText(_('Set maximum number of results: ')) dialog.setInputMode(QInputDialog.IntInput) dialog.setIntStep(1) dialog.setIntValue(self.get_conf('max_results')) # In order to show the right number of results when max_results is # reached, we can't allow users to introduce less than 2 in this # dialog. Since that value seems a bit arbitrary, we decided to set # it to 5. # See spyder-ide/spyder#16256 dialog.setIntRange(5, 10000) # Connect slot dialog.intValueSelected.connect( lambda value: self.set_conf('max_results', value)) dialog.show() else: self.set_conf('max_results', value) # ---- Private API # ------------------------------------------------------------------------ def _show_error(self, error_msg, exclude): """ Show a regexp error message with an icon. Parameters ---------- error_msg: Message to add to the action icon's tooltip. exclude: bool Whether to show the error in the exclude pattern combobox. """ tooltip = self.REGEX_ERROR + ': ' + error_msg icon = self.error_icon if exclude: self.messages_exclude_action.setIcon(icon) self.messages_exclude_action.setToolTip(tooltip) self.messages_exclude_action.setVisible(True) tooltip_search = _("Regular expression error in Exclude field") self.messages_action.setIcon(icon) self.messages_action.setToolTip(tooltip_search) self.messages_action.setVisible(True) else: self.messages_action.setIcon(icon) self.messages_action.setToolTip(tooltip) self.messages_action.setVisible(True) # ---- Test # ----------------------------------------------------------------------------- def test(): """ Run Find in Files widget test. """ # Standard library imports from os.path import dirname import sys from unittest.mock import MagicMock # Local imports from spyder.utils.qthelpers import qapplication app = qapplication() plugin_mock = MagicMock() plugin_mock.CONF_SECTION = 'find_in_files' widget = FindInFilesWidget('find_in_files', plugin=plugin_mock) widget.CONF_SECTION = 'find_in_files' widget._setup() widget.setup() widget.resize(640, 480) widget.show() external_paths = [ dirname(__file__), dirname(dirname(__file__)), dirname(dirname(dirname(__file__))), dirname(dirname(dirname(dirname(__file__)))), ] for path in external_paths: widget.add_external_path(path) sys.exit(app.exec_()) if __name__ == '__main__': test()
FindInFilesWidget
python
PyCQA__pylint
pylint/config/callback_actions.py
{ "start": 7527, "end": 8116 }
class ____(_AccessRunObjectAction): """Turn on errors-only mode. Error mode: * disable all but error messages * disable the 'miscellaneous' checker which can be safely deactivated in debug * disable reports * do not save execution information """ def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: str | Sequence[Any] | None, option_string: str | None = "--errors-only", ) -> None: self.run.linter._error_mode = True
_ErrorsOnlyModeAction
python
vyperlang__vyper
vyper/ast/pre_parser.py
{ "start": 4273, "end": 4619 }
class ____(enum.Enum): NOT_RUNNING = enum.auto() START_SOON = enum.auto() RUNNING = enum.auto() # a simple state machine which allows us to handle loop variable annotations # (which are rejected by the python parser due to pep-526, so we scoop up the # tokens between `:` and `in` and parse them and add them back in later).
ParserState
python
pydantic__pydantic
pydantic/_internal/_decorators.py
{ "start": 5142, "end": 6088 }
class ____: """A container for data from `@model_validator` so that we can access it while building the pydantic-core schema. Attributes: decorator_repr: A class variable representing the decorator string, '@model_validator'. mode: The proposed serializer mode. """ decorator_repr: ClassVar[str] = '@model_validator' mode: Literal['wrap', 'before', 'after'] DecoratorInfo: TypeAlias = """Union[ ValidatorDecoratorInfo, FieldValidatorDecoratorInfo, RootValidatorDecoratorInfo, FieldSerializerDecoratorInfo, ModelSerializerDecoratorInfo, ModelValidatorDecoratorInfo, ComputedFieldInfo, ]""" ReturnType = TypeVar('ReturnType') DecoratedType: TypeAlias = ( 'Union[classmethod[Any, Any, ReturnType], staticmethod[Any, ReturnType], Callable[..., ReturnType], property]' ) @dataclass # can't use slots here since we set attributes on `__post_init__`
ModelValidatorDecoratorInfo
python
python__mypy
mypy/fswatcher.py
{ "start": 309, "end": 3985 }
class ____: """Watcher for file system changes among specific paths. All file system access is performed using FileSystemCache. We detect changed files by stat()ing them all and comparing hashes of potentially changed files. If a file has both size and mtime unmodified, the file is assumed to be unchanged. An important goal of this class is to make it easier to eventually use file system events to detect file changes. Note: This class doesn't flush the file system cache. If you don't manually flush it, changes won't be seen. """ # TODO: Watching directories? # TODO: Handle non-files def __init__(self, fs: FileSystemCache) -> None: self.fs = fs self._paths: set[str] = set() self._file_data: dict[str, FileData | None] = {} def dump_file_data(self) -> dict[str, tuple[float, int, str]]: return {k: v for k, v in self._file_data.items() if v is not None} def set_file_data(self, path: str, data: FileData) -> None: self._file_data[path] = data def add_watched_paths(self, paths: Iterable[str]) -> None: for path in paths: if path not in self._paths: # By storing None this path will get reported as changed by # find_changed if it exists. self._file_data[path] = None self._paths |= set(paths) def remove_watched_paths(self, paths: Iterable[str]) -> None: for path in paths: if path in self._file_data: del self._file_data[path] self._paths -= set(paths) def _update(self, path: str, st: os.stat_result) -> None: hash_digest = self.fs.hash_digest(path) self._file_data[path] = FileData(st.st_mtime, st.st_size, hash_digest) def _find_changed(self, paths: Iterable[str]) -> AbstractSet[str]: changed = set() for path in paths: old = self._file_data[path] st = self.fs.stat_or_none(path) if st is None: if old is not None: # File was deleted. changed.add(path) self._file_data[path] = None else: if old is None: # File is new. changed.add(path) self._update(path, st) # Round mtimes down, to match the mtimes we write to meta files elif st.st_size != old.st_size or int(st.st_mtime) != int(old.st_mtime): # Only look for changes if size or mtime has changed as an # optimization, since calculating hash is expensive. new_hash = self.fs.hash_digest(path) self._update(path, st) if st.st_size != old.st_size or new_hash != old.hash: # Changed file. changed.add(path) return changed def find_changed(self) -> AbstractSet[str]: """Return paths that have changes since the last call, in the watched set.""" return self._find_changed(self._paths) def update_changed(self, remove: list[str], update: list[str]) -> AbstractSet[str]: """Alternative to find_changed() given explicit changes. This only calls self.fs.stat() on added or updated files, not on all files. It believes all other files are unchanged! Implies add_watched_paths() for add and update, and remove_watched_paths() for remove. """ self.remove_watched_paths(remove) self.add_watched_paths(update) return self._find_changed(update)
FileSystemWatcher
python
wandb__wandb
wandb/sdk/artifacts/_generated/project_artifact_types.py
{ "start": 277, "end": 369 }
class ____(GQLResult): project: Optional[ProjectArtifactTypesProject]
ProjectArtifactTypes
python
facebookresearch__faiss
tests/test_extra_distances.py
{ "start": 436, "end": 8242 }
class ____(unittest.TestCase): """ check wrt. the scipy implementation """ def make_example(self): rs = np.random.RandomState(123) x = rs.rand(5, 32).astype('float32') y = rs.rand(3, 32).astype('float32') return x, y def run_simple_dis_test(self, ref_func, metric_type): xq, yb = self.make_example() ref_dis = np.array([ [ref_func(x, y) for y in yb] for x in xq ]) new_dis = faiss.pairwise_distances(xq, yb, metric_type) self.assertTrue(np.allclose(ref_dis, new_dis)) def test_L1(self): self.run_simple_dis_test(scipy.spatial.distance.cityblock, faiss.METRIC_L1) def test_Linf(self): self.run_simple_dis_test(scipy.spatial.distance.chebyshev, faiss.METRIC_Linf) def test_L2(self): xq, yb = self.make_example() ref_dis = np.array([ [scipy.spatial.distance.sqeuclidean(x, y) for y in yb] for x in xq ]) new_dis = faiss.pairwise_distances(xq, yb, faiss.METRIC_L2) self.assertTrue(np.allclose(ref_dis, new_dis)) ref_dis = np.array([ [scipy.spatial.distance.euclidean(x, y) for y in yb] for x in xq ]) new_dis = np.sqrt(new_dis) # post processing self.assertTrue(np.allclose(ref_dis, new_dis)) def test_Lp(self): p = 1.5 xq, yb = self.make_example() ref_dis = np.array([ [scipy.spatial.distance.minkowski(x, y, p) for y in yb] for x in xq ]) new_dis = faiss.pairwise_distances(xq, yb, faiss.METRIC_Lp, p) new_dis = new_dis ** (1 / p) # post processing self.assertTrue(np.allclose(ref_dis, new_dis)) def test_canberra(self): self.run_simple_dis_test(scipy.spatial.distance.canberra, faiss.METRIC_Canberra) def test_braycurtis(self): self.run_simple_dis_test(scipy.spatial.distance.braycurtis, faiss.METRIC_BrayCurtis) def xx_test_jensenshannon(self): # this distance does not seem to be implemented in scipy # vectors should probably be L1 normalized self.run_simple_dis_test(scipy.spatial.distance.jensenshannon, faiss.METRIC_JensenShannon) def test_jaccard(self): xq, yb = self.make_example() ref_dis = np.array([ [ (np.min([x, y], axis=0).sum() / np.max([x, y], axis=0).sum()) for y in yb ] for x in xq ]) new_dis = faiss.pairwise_distances(xq, yb, faiss.METRIC_Jaccard) self.assertTrue(np.allclose(ref_dis, new_dis)) def test_nan_euclidean(self): xq, yb = self.make_example() ref_dis = np.array([ [scipy.spatial.distance.sqeuclidean(x, y) for y in yb] for x in xq ]) new_dis = faiss.pairwise_distances(xq, yb, faiss.METRIC_NaNEuclidean) self.assertTrue(np.allclose(ref_dis, new_dis)) x = [[3, np.nan, np.nan, 6]] q = [[1, np.nan, np.nan, 5]] dis = [(4 / 2 * ((3 - 1)**2 + (6 - 5)**2))] new_dis = faiss.pairwise_distances(x, q, faiss.METRIC_NaNEuclidean) self.assertTrue(np.allclose(new_dis, dis)) x = [[np.nan] * 4] q = [[np.nan] * 4] new_dis = faiss.pairwise_distances(x, q, faiss.METRIC_NaNEuclidean) self.assertTrue(np.isnan(new_dis[0])) def test_gower(self): # Create test data with mixed numeric and categorical features # First 2 dimensions are numeric (0-1), last 2 are categorical # (negative integers) xq = np.array( [ [0.5, 0.3, -1, -2], # First query vector [0.7, 0.8, -1, -3], # Second query vector ], dtype="float32", ) yb = np.array( [ [0.4, 0.2, -1, -2], # Same categories, similar numeric values [0.9, 0.1, -1, -2], # Same categories, different numeric values [0.5, 0.3, -2, -2], # Different first category, same second [0.5, 0.3, -2, -3], # Different categories ], dtype="float32", ) # Compute distances using FAISS dis = faiss.pairwise_distances(xq, yb, faiss.METRIC_GOWER) # Expected distances: # For first query [0.5, 0.3, -1, -2]: # - [0.4, 0.2, -1, -2]: (|0.5-0.4| + |0.3-0.2| + 0 + 0) / 4 = 0.05 # - [0.9, 0.1, -1, -2]: (|0.5-0.9| + |0.3-0.1| + 0 + 0) / 4 = 0.15 # - [0.5, 0.3, -2, -2]: (|0.5-0.5| + |0.3-0.3| + 1 + 0) / 4 = 0.25 # - [0.5, 0.3, -2, -3]: (|0.5-0.5| + |0.3-0.3| + 1 + 1) / 4 = 0.5 # For second query [0.7, 0.8, -1, -3]: # - [0.4, 0.2, -1, -2]: (|0.7-0.4| + |0.8-0.2| + 0 + 1) / 4 = 0.475 # - [0.9, 0.1, -1, -2]: (|0.7-0.9| + |0.8-0.1| + 0 + 1) / 4 = 0.475 # - [0.5, 0.3, -2, -2]: (|0.7-0.5| + |0.8-0.3| + 1 + 1) / 4 = 0.675 # - [0.5, 0.3, -2, -3]: (|0.7-0.5| + |0.8-0.3| + 1 + 0) / 4 = 0.425 expected = np.array( [ [0.05, 0.15, 0.25, 0.50], # Distances for first query [0.475, 0.475, 0.675, 0.425], # Distances for second query ], dtype="float32", ) self.assertTrue(np.allclose(dis, expected, rtol=1e-5)) # Test with NaN values xq_nan = np.array( [ [0.5, np.nan, -1, -2], [0.7, 0.8, -1, -3], ], dtype="float32", ) yb_nan = np.array( [ [0.4, 0.2, -1, -2], [0.9, np.nan, -1, -2], ], dtype="float32", ) dis_nan = faiss.pairwise_distances(xq_nan, yb_nan, faiss.METRIC_GOWER) # For first query [0.5, nan, -1, -2]: # - [0.4, 0.2, -1, -2]: (|0.5-0.4| + 0 + 0) / 3 = 0.0333... # - [0.9, nan, -1, -2]: (|0.5-0.9| + 0 + 0) / 3 = 0.1333... # For second query [0.7, 0.8, -1, -3]: # - [0.4, 0.2, -1, -2]: (|0.7-0.4| + |0.8-0.2| + 0 + 1) / 4 = 0.475 # - [0.9, nan, -1, -2]: (|0.7-0.9| + 0 + 0 + 1) / 3 = 0.4 expected_nan = np.array( [ [0.033333, 0.133333], [0.475, 0.4], ], dtype="float32", ) self.assertTrue(np.allclose(dis_nan, expected_nan, rtol=1e-5)) # Test error case: mixing numeric and categorical values xq_mixed = np.array( [ # Second value is categorical but first is numeric [0.5, -1, -1, -2], ], dtype="float32", ) yb_mixed = np.array( [ [0.4, 0.2, -1, -2], # Second value is numeric ], dtype="float32", ) dis_mixed = faiss.pairwise_distances(xq_mixed, yb_mixed, faiss.METRIC_GOWER) self.assertTrue(np.all(np.isnan(dis_mixed))) # Test error case: numeric values outside [0,1] range xq_out_of_range = np.array( [ [1.5, 0.3, -1, -2], # First value is outside [0,1] ], dtype="float32", ) yb_out_of_range = np.array( [ [0.4, 0.2, -1, -2], ], dtype="float32", ) # Should return NaN for invalid data (consistent with other metrics) dis_out_of_range = faiss.pairwise_distances( xq_out_of_range, yb_out_of_range, faiss.METRIC_GOWER ) self.assertTrue(np.all(np.isnan(dis_out_of_range)))
TestExtraDistances
python
langchain-ai__langchain
libs/langchain/langchain_classic/agents/agent_toolkits/vectorstore/toolkit.py
{ "start": 309, "end": 553 }
class ____(BaseModel): """Information about a `VectorStore`.""" vectorstore: VectorStore = Field(exclude=True) name: str description: str model_config = ConfigDict( arbitrary_types_allowed=True, )
VectorStoreInfo
python
ansible__ansible
test/lib/ansible_test/_internal/completion.py
{ "start": 978, "end": 1289 }
class ____(metaclass=abc.ABCMeta): """Base class for completion configuration.""" name: str @property @abc.abstractmethod def is_default(self) -> bool: """True if the completion entry is only used for defaults, otherwise False.""" @dataclasses.dataclass(frozen=True)
CompletionConfig
python
django-extensions__django-extensions
tests/management/commands/test_show_urls.py
{ "start": 1972, "end": 7700 }
class ____(TestCase): @patch("sys.stdout", new_callable=StringIO) def test_should_show_urls_unsorted_but_same_order_as_found_in_url_patterns( self, m_stdout ): call_command("show_urls", "-u", verbosity=3) lines = m_stdout.getvalue().splitlines() self.assertIn( "/lambda/view\ttests.management.commands.test_show_urls.<lambda>", lines[0] ) self.assertIn( "/function/based/\ttests.management.commands.test_show_urls.function_based_view\tfunction-based-view", lines[1], ) self.assertIn( "/class/based/\ttests.management.commands.test_show_urls.ClassView\tclass-based-view", lines[2], ) @patch("sys.stdout", new_callable=StringIO) def test_should_show_urls_sorted_alphabetically(self, m_stdout): call_command("show_urls", verbosity=3) lines = m_stdout.getvalue().splitlines() self.assertEqual( "/class/based/\ttests.management.commands.test_show_urls.ClassView\tclass-based-view", lines[0], ) self.assertEqual( "/function/based/\ttests.management.commands.test_show_urls.function_based_view\tfunction-based-view", lines[1], ) self.assertEqual( "/lambda/view\ttests.management.commands.test_show_urls.<lambda>", lines[2] ) @patch("sys.stdout", new_callable=StringIO) def test_should_show_urls_in_json_format(self, m_stdout): call_command("show_urls", "--format=json") self.assertJSONEqual( m_stdout.getvalue(), [ { "url": "/lambda/view", "module": "tests.management.commands.test_show_urls.<lambda>", "name": "", "decorators": "", }, { "url": "/function/based/", "module": "tests.management.commands.test_show_urls.function_based_view", "name": "function-based-view", "decorators": "", }, { "url": "/class/based/", "module": "tests.management.commands.test_show_urls.ClassView", "name": "class-based-view", "decorators": "", }, ], ) self.assertEqual(len(m_stdout.getvalue().splitlines()), 1) @patch("sys.stdout", new_callable=StringIO) def test_should_show_urls_in_pretty_json_format(self, m_stdout): call_command("show_urls", "--format=pretty-json") self.assertJSONEqual( m_stdout.getvalue(), [ { "url": "/lambda/view", "module": "tests.management.commands.test_show_urls.<lambda>", "name": "", "decorators": "", }, { "url": "/function/based/", "module": "tests.management.commands.test_show_urls.function_based_view", "name": "function-based-view", "decorators": "", }, { "url": "/class/based/", "module": "tests.management.commands.test_show_urls.ClassView", "name": "class-based-view", "decorators": "", }, ], ) self.assertEqual(len(m_stdout.getvalue().splitlines()), 20) @patch("sys.stdout", new_callable=StringIO) def test_should_show_urls_in_table_format(self, m_stdout): call_command("show_urls", "--format=table") self.assertIn( "/class/based/ | tests.management.commands.test_show_urls.ClassView | class-based-view |", m_stdout.getvalue(), ) self.assertIn( "/function/based/ | tests.management.commands.test_show_urls.function_based_view | function-based-view |", m_stdout.getvalue(), ) self.assertIn( "/lambda/view | tests.management.commands.test_show_urls.<lambda> | |", m_stdout.getvalue(), ) @patch("sys.stdout", new_callable=StringIO) def test_should_show_urls_in_aligned_format(self, m_stdout): call_command("show_urls", "--format=aligned") lines = m_stdout.getvalue().splitlines() self.assertEqual( "/class/based/ tests.management.commands.test_show_urls.ClassView class-based-view ", lines[0], ) self.assertEqual( "/function/based/ tests.management.commands.test_show_urls.function_based_view function-based-view ", lines[1], ) self.assertEqual( "/lambda/view tests.management.commands.test_show_urls.<lambda> ", lines[2], ) @patch("sys.stdout", new_callable=StringIO) def test_should_show_urls_with_no_color_option(self, m_stdout): call_command("show_urls", "--no-color") lines = m_stdout.getvalue().splitlines() self.assertEqual( "/class/based/\ttests.management.commands.test_show_urls.ClassView\tclass-based-view", lines[0], ) self.assertEqual( "/function/based/\ttests.management.commands.test_show_urls.function_based_view\tfunction-based-view", lines[1], ) self.assertEqual( "/lambda/view\ttests.management.commands.test_show_urls.<lambda>", lines[2] )
ShowUrlsTests
python
jina-ai__jina
tests/integration/reduce/test_reduce.py
{ "start": 3671, "end": 3834 }
class ____(Executor): @requests def endpoint(self, docs: DocumentArray, **kwargs): for doc in docs: doc.embedding = np.zeros(3)
Executor3
python
tensorflow__tensorflow
tensorflow/python/framework/error_interpolation_test.py
{ "start": 11323, "end": 11924 }
class ____(test.TestCase): def testAllowsUnitTests(self): self.assertFalse( error_interpolation._is_framework_filename( error_interpolation._FRAMEWORK_PATH_PREFIXES[0] + "foobar_test.py" ) ) def testFrameworkPythonFile(self): self.assertTrue( error_interpolation._is_framework_filename(error_interpolation.__file__) ) def testEmbedded(self): self.assertTrue( error_interpolation._is_framework_filename( "<embedded stdlib>/context_lib.py" ) ) if __name__ == "__main__": test.main()
IsFrameworkFilenameTest
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_image51.py
{ "start": 381, "end": 1735 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("image51.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.insert_image( "E9", self.image_dir + "red.png", {"url": "https://duckduckgo.com/?q=1"} ) worksheet.insert_image( "E13", self.image_dir + "red2.png", {"url": "https://duckduckgo.com/?q=2"} ) workbook.close() self.assertExcelEqual() def test_create_file_with_image_and_url_objects(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() url1 = Url("https://duckduckgo.com/?q=1") url2 = Url("https://duckduckgo.com/?q=2") image1 = Image(self.image_dir + "red.png") image2 = Image(self.image_dir + "red2.png") image1.url = url1 image2.url = url2 worksheet.insert_image("E9", image1) worksheet.insert_image("E13", image2) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
bokeh__bokeh
src/bokeh/core/serialization.py
{ "start": 2546, "end": 2653 }
class ____(TypedDict): type: Literal["number"] value: Literal["nan", "-inf", "+inf"] | float
NumberRep
python
pola-rs__polars
py-polars/src/polars/_utils/cache.py
{ "start": 561, "end": 5927 }
class ____(MutableMapping[K, V]): def __init__(self, maxsize: int) -> None: """ Initialize an LRU (Least Recently Used) cache with a specified maximum size. Parameters ---------- maxsize : int The maximum number of items the cache can hold. Examples -------- >>> from polars._utils.cache import LRUCache >>> cache = LRUCache[str, int](maxsize=3) >>> cache["a"] = 1 >>> cache["b"] = 2 >>> cache["c"] = 3 >>> cache["d"] = 4 # evicts the least recently used item ("a"), as maxsize=3 >>> print(cache["b"]) # accessing "b" marks it as recently used 2 >>> print(list(cache.keys())) # show the current keys in LRU order ['c', 'd', 'b'] >>> cache.get("xyz", "not found") 'not found' """ self._items: OrderedDict[K, V] = OrderedDict() self.maxsize = maxsize def __bool__(self) -> bool: """Returns True if the cache is not empty, False otherwise.""" return bool(self._items) def __contains__(self, key: Any) -> bool: """Check if the key is in the cache.""" return key in self._items def __delitem__(self, key: K) -> None: """Remove the item with the specified key from the cache.""" if key not in self._items: msg = f"{key!r} not found in cache" raise KeyError(msg) del self._items[key] def __getitem__(self, key: K) -> V: """Raises KeyError if the key is not found.""" if key not in self._items: msg = f"{key!r} not found in cache" raise KeyError(msg) # moving accessed items to the end marks them as recently used self._items.move_to_end(key) return self._items[key] def __iter__(self) -> Iterator[K]: """Iterate over the keys in the cache.""" yield from self._items def __len__(self) -> int: """Number of items in the cache.""" return len(self._items) def __setitem__(self, key: K, value: V) -> None: """Insert a value into the cache.""" if self._max_size == 0: return while len(self) >= self._max_size: self.popitem() if key in self: # moving accessed items to the end marks them as recently used self._items.move_to_end(key) self._items[key] = value def __repr__(self) -> str: """Return a string representation of the cache.""" all_items = list(self._items.items()) if len(self) > 4: items = ( ", ".join(f"{k!r}: {v!r}" for k, v in all_items[:2]) + " ..., " + ", ".join(f"{k!r}: {v!r}" for k, v in all_items[-2:]) ) else: items = ", ".join(f"{k!r}: {v!r}" for k, v in all_items) return f"{self.__class__.__name__}({{{items}}}, maxsize={self._max_size}, currsize={len(self)})" def clear(self) -> None: """Clear the cache, removing all items.""" self._items.clear() @overload def get(self, key: K, default: None = None) -> V | None: ... @overload def get(self, key: K, default: D = ...) -> V | D: ... def get(self, key: K, default: D | V | None = None) -> V | D | None: """Return value associated with `key` if present, otherwise return `default`.""" if key in self: # moving accessed items to the end marks them as recently used self._items.move_to_end(key) return self._items[key] return default @classmethod def fromkeys(cls, maxsize: int, *, keys: Iterable[K], value: V) -> Self: """Initialize cache with keys from an iterable, all set to the same value.""" cache = cls(maxsize) for key in keys: cache[key] = value return cache def items(self) -> ItemsView[K, V]: """Return an iterable view of the cache's items (keys and values).""" return self._items.items() def keys(self) -> KeysView[K]: """Return an iterable view of the cache's keys.""" return self._items.keys() @property def maxsize(self) -> int: return self._max_size @maxsize.setter def maxsize(self, n: int) -> None: """Set new maximum cache size; cache is trimmed if value is smaller.""" if n < 0: msg = f"`maxsize` cannot be negative; found {n}" raise ValueError(msg) while len(self) > n: self.popitem() self._max_size = n def pop(self, key: K, default: D | NoDefault = no_default) -> V | D: """ Remove specified key from the cache and return the associated value. If the key is not found, `default` is returned (if given). Otherwise, a KeyError is raised. """ if (item := self._items.pop(key, default)) is no_default: msg = f"{key!r} not found in cache" raise KeyError(msg) return item def popitem(self) -> tuple[K, V]: """Remove the least recently used value; raises KeyError if cache is empty.""" return self._items.popitem(last=False) def values(self) -> ValuesView[V]: """Return an iterable view of the cache's values.""" return self._items.values()
LRUCache
python
ipython__ipython
tests/test_pretty.py
{ "start": 1127, "end": 1176 }
class ____(Dummy1): _repr_pretty_ = None
Dummy2
python
jazzband__django-oauth-toolkit
oauth2_provider/migrations/0003_auto_20201211_1314.py
{ "start": 92, "end": 386 }
class ____(migrations.Migration): dependencies = [ ('oauth2_provider', '0002_auto_20190406_1805'), ] operations = [ migrations.AlterField( model_name='grant', name='redirect_uri', field=models.TextField(), ), ]
Migration
python
google__pytype
pytype/overlays/flax_overlay.py
{ "start": 3590, "end": 5247 }
class ____(abstract.PyTDClass): """Construct a dataclass for any class inheriting from Module.""" IMPLICIT_FIELDS = ("name", "parent") # 'Module' can also be imported through an alias in flax.linen, but we always # want to use its full, unaliased name. _MODULE = "flax.linen.module" def __init__(self, ctx, module): del module # unused pytd_cls = ctx.loader.lookup_pytd(self._MODULE, "Module") # flax.linen.Module loads as a LateType, we need to convert it and then get # the pytd.Class back out to use in our own constructor. if isinstance(pytd_cls, pytd.Constant): pytd_cls = ctx.convert.constant_to_value(pytd_cls).pytd_cls super().__init__("Module", pytd_cls, ctx) def init_subclass(self, node, cls): # Subclasses of Module call self.setup() when creating instances. cls.additional_init_methods.append("setup") dc = ModuleDataclass.make(self.ctx) cls_var = cls.to_variable(node) args = function.Args(posargs=(cls_var,), namedargs={}) node, _ = dc.call(node, None, args) return node def to_pytd_type_of_instance( self, node=None, instance=None, seen=None, view=None ): """Get the type an instance of us would have.""" # The class is imported as flax.linen.Module but aliases # flax.linen.module.Module internally return pytd.NamedType(self.full_name) @property def full_name(self): # Override the full name here rather than overriding the module name in the # overlay because we might want to overlay other things from flax.linen. return f"{self._MODULE}.{self.name}" def __repr__(self): return f"Overlay({self.full_name})"
Module
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/cache_key.py
{ "start": 1454, "end": 1667 }
class ____(enum.Enum): NO_CACHE = 0 PARAMS = 1 NO_CACHE: Final = CacheConst.NO_CACHE _CacheKeyTraversalType = Union[ "_TraverseInternalsType", Literal[CacheConst.NO_CACHE], Literal[None] ]
CacheConst
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/uninitializedVariable2.py
{ "start": 428, "end": 466 }
class ____(Abstract1): pass @final
B
python
conda__conda
conda/cli/actions.py
{ "start": 640, "end": 1558 }
class ____(Action): """ A derivative of _AppendConstAction and Python 3.8's _ExtendAction """ def __init__( self, option_strings, dest, const, default=None, type=None, choices=None, required=False, help=None, metavar=None, ): super().__init__( option_strings=option_strings, dest=dest, nargs="*", const=const, default=default, type=type, choices=choices, required=required, help=help, metavar=metavar, ) def __call__(self, parser, namespace, values, option_string=None): items = getattr(namespace, self.dest, None) items = [] if items is None else items[:] items.extend(values or [self.const]) setattr(namespace, self.dest, items)
ExtendConstAction
python
apache__avro
lang/py/avro/compatibility.py
{ "start": 1491, "end": 1646 }
class ____(Enum): compatible = "compatible" incompatible = "incompatible" recursion_in_progress = "recursion_in_progress"
SchemaCompatibilityType
python
tensorflow__tensorflow
tensorflow/core/function/trace_type/trace_type_builder.py
{ "start": 4122, "end": 7512 }
class ____(trace.CastContext): """Default casting behaviors.""" def __init__(self, allow_specs=False): self._allow_specs = allow_specs @property def allow_specs(self) -> bool: """Allow TypeSpecs to be casted (instead of the actual CompositeTensors).""" # Public APIs like get_concrete_function allow users to pass in specs # instead which need to pass through input binding etc. return self._allow_specs def from_value(value: Any, context: trace.TracingContext = None) -> trace.TraceType: """Returns a TraceType corresponding to the value based on the context. Args: value: The value to generate a TraceType for. context: The TracingContext to be shared during protocol calls. Returns: A TraceType object representing the given value. """ if context is None: context = InternalTracingContext() if context.is_legacy_signature and isinstance(value, trace.TraceType): return value elif isinstance(value, trace.SupportsTracingProtocol): generated_type = value.__tf_tracing_type__(context) if not isinstance(generated_type, trace.TraceType): raise TypeError( "Expected an instance of TraceType for Tracing Protocol call to " + str(value) + " but got " + str(generated_type)) return generated_type # TODO(b/183107079): Allow these once they're handled properly. if isinstance(value, weakref.ref): raise TypeError( f"weakref input {value} not supported for tf.function." ) if hasattr(value, "__wrapped__"): return from_value(value.__wrapped__, context) if isinstance(value, list): return default_types.List(*(from_value(c, context) for c in value)) if isinstance(value, tuple): if util.is_namedtuple(value): named_tuple_type = type(value) return default_types.NamedTuple.from_type_and_attributes( named_tuple_type, tuple(from_value(c, context) for c in value)) else: return default_types.Tuple(*(from_value(c, context) for c in value)) if isinstance(value, collections.abc.Mapping): mapping_type = type(value) return default_types.Dict( {k: from_value(value[k], context) for k in value}, mapping_type) if util.is_attrs(value): return default_types.Attrs.from_type_and_attributes( type(value), tuple( from_value(getattr(value, a.name), context) for a in value.__attrs_attrs__)) if util.is_np_ndarray(value): ndarray = value.__array__() return default_types.TENSOR(ndarray.shape, ndarray.dtype) if isinstance(value, custom_nest_protocol.CustomNestProtocol): metadata, components = value.__tf_flatten__() return custom_nest_trace_type.CustomNestTraceType( type(value), metadata, tuple(from_value(c, context) for c in components) ) try: ref = weakref.ref(value) if ref is None: raise TypeError( f"Deleted objects are not valid tf.function arguments, Got {value!r}") else: return default_types.Weakref(ref) except TypeError: try: return default_types.Literal(value) except: raise TypeError( # pylint: disable=raise-missing-from f"Could not generate a generic TraceType for {value!r}." f"Please verify that it is immutable/hashable. Otherwise, consider " f"implementing the Tracing Protocol for it.")
InternalCastContext
python
apache__airflow
providers/google/tests/unit/google/suite/hooks/test_sheets.py
{ "start": 1535, "end": 14352 }
class ____: def setup_method(self): with mock.patch( "airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__", new=mock_base_gcp_hook_default_project_id, ): self.hook = GSheetsHook(gcp_conn_id=GCP_CONN_ID) @mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook._authorize") @mock.patch("airflow.providers.google.suite.hooks.sheets.build") def test_gsheets_client_creation(self, mock_build, mock_authorize): result = self.hook.get_conn() mock_build.assert_called_once() args, kwargs = mock_build.call_args assert kwargs["http"] == mock_authorize.return_value assert kwargs["cache_discovery"] is False assert mock_build.return_value == result @mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook._authorize") @mock.patch("airflow.providers.google.suite.hooks.sheets.build") def test_gsheets_hook_custom_endpoint(self, mock_build, mock_authorize): self.hook.api_endpoint = "https://private.googleapis.com" self.hook.get_conn() mock_build.assert_called_once() _, kwargs = mock_build.call_args client_options = kwargs.get("client_options") if client_options is None: api_endpoint = None else: api_endpoint = getattr(client_options, "api_endpoint", None) assert api_endpoint == "https://private.googleapis.com" @mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn") def test_get_values(self, get_conn): get_method = get_conn.return_value.spreadsheets.return_value.values.return_value.get execute_method = get_method.return_value.execute execute_method.return_value = {"values": VALUES} result = self.hook.get_values( spreadsheet_id=SPREADSHEET_ID, range_=RANGE_, major_dimension=MAJOR_DIMENSION, value_render_option=VALUE_RENDER_OPTION, date_time_render_option=DATE_TIME_RENDER_OPTION, ) assert result is VALUES execute_method.assert_called_once_with(num_retries=NUM_RETRIES) get_method.assert_called_once_with( spreadsheetId=SPREADSHEET_ID, range=RANGE_, majorDimension=MAJOR_DIMENSION, valueRenderOption=VALUE_RENDER_OPTION, dateTimeRenderOption=DATE_TIME_RENDER_OPTION, ) @mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn") def test_get_values_empty(self, get_conn): get_method = get_conn.return_value.spreadsheets.return_value.values.return_value.get execute_method = get_method.return_value.execute execute_method.return_value = {} result = self.hook.get_values( spreadsheet_id=SPREADSHEET_ID, range_=RANGE_, major_dimension=MAJOR_DIMENSION, value_render_option=VALUE_RENDER_OPTION, date_time_render_option=DATE_TIME_RENDER_OPTION, ) assert result == [] execute_method.assert_called_once_with(num_retries=NUM_RETRIES) get_method.assert_called_once_with( spreadsheetId=SPREADSHEET_ID, range=RANGE_, majorDimension=MAJOR_DIMENSION, valueRenderOption=VALUE_RENDER_OPTION, dateTimeRenderOption=DATE_TIME_RENDER_OPTION, ) @mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn") def test_batch_get_values(self, get_conn): batch_get_method = get_conn.return_value.spreadsheets.return_value.values.return_value.batchGet execute_method = batch_get_method.return_value.execute execute_method.return_value = API_RESPONSE result = self.hook.batch_get_values( spreadsheet_id=SPREADSHEET_ID, ranges=RANGES, major_dimension=MAJOR_DIMENSION, value_render_option=VALUE_RENDER_OPTION, date_time_render_option=DATE_TIME_RENDER_OPTION, ) assert result is API_RESPONSE execute_method.assert_called_once_with(num_retries=NUM_RETRIES) batch_get_method.assert_called_once_with( spreadsheetId=SPREADSHEET_ID, ranges=RANGES, majorDimension=MAJOR_DIMENSION, valueRenderOption=VALUE_RENDER_OPTION, dateTimeRenderOption=DATE_TIME_RENDER_OPTION, ) @mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn") def test_update_values(self, get_conn): update_method = get_conn.return_value.spreadsheets.return_value.values.return_value.update execute_method = update_method.return_value.execute execute_method.return_value = API_RESPONSE result = self.hook.update_values( spreadsheet_id=SPREADSHEET_ID, range_=RANGE_, values=VALUES, major_dimension=MAJOR_DIMENSION, value_input_option=VALUE_INPUT_OPTION, include_values_in_response=INCLUDE_VALUES_IN_RESPONSE, value_render_option=VALUE_RENDER_OPTION, date_time_render_option=DATE_TIME_RENDER_OPTION, ) body = {"range": RANGE_, "majorDimension": MAJOR_DIMENSION, "values": VALUES} assert result is API_RESPONSE execute_method.assert_called_once_with(num_retries=NUM_RETRIES) update_method.assert_called_once_with( spreadsheetId=SPREADSHEET_ID, range=RANGE_, valueInputOption=VALUE_INPUT_OPTION, includeValuesInResponse=INCLUDE_VALUES_IN_RESPONSE, responseValueRenderOption=VALUE_RENDER_OPTION, responseDateTimeRenderOption=DATE_TIME_RENDER_OPTION, body=body, ) @mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn") def test_batch_update_values(self, get_conn): batch_update_method = get_conn.return_value.spreadsheets.return_value.values.return_value.batchUpdate execute_method = batch_update_method.return_value.execute execute_method.return_value = API_RESPONSE result = self.hook.batch_update_values( spreadsheet_id=SPREADSHEET_ID, ranges=RANGES, values=VALUES_BATCH, major_dimension=MAJOR_DIMENSION, value_input_option=VALUE_INPUT_OPTION, include_values_in_response=INCLUDE_VALUES_IN_RESPONSE, value_render_option=VALUE_RENDER_OPTION, date_time_render_option=DATE_TIME_RENDER_OPTION, ) data = [] for idx, range_ in enumerate(RANGES): value_range = {"range": range_, "majorDimension": MAJOR_DIMENSION, "values": VALUES_BATCH[idx]} data.append(value_range) body = { "valueInputOption": VALUE_INPUT_OPTION, "data": data, "includeValuesInResponse": INCLUDE_VALUES_IN_RESPONSE, "responseValueRenderOption": VALUE_RENDER_OPTION, "responseDateTimeRenderOption": DATE_TIME_RENDER_OPTION, } assert result is API_RESPONSE execute_method.assert_called_once_with(num_retries=NUM_RETRIES) batch_update_method.assert_called_once_with(spreadsheetId=SPREADSHEET_ID, body=body) @mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn") def test_batch_update_values_with_bad_data(self, get_conn): batch_update_method = get_conn.return_value.spreadsheets.return_value.values.return_value.batchUpdate execute_method = batch_update_method.return_value.execute execute_method.return_value = API_RESPONSE with pytest.raises(AirflowException) as ctx: self.hook.batch_update_values( spreadsheet_id=SPREADSHEET_ID, ranges=["test!A1:B2", "test!C1:C2"], values=[[1, 2, 3]], # bad data major_dimension=MAJOR_DIMENSION, value_input_option=VALUE_INPUT_OPTION, include_values_in_response=INCLUDE_VALUES_IN_RESPONSE, value_render_option=VALUE_RENDER_OPTION, date_time_render_option=DATE_TIME_RENDER_OPTION, ) batch_update_method.assert_not_called() execute_method.assert_not_called() err = ctx.value assert "must be of equal length." in str(err) @mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn") def test_append_values(self, get_conn): append_method = get_conn.return_value.spreadsheets.return_value.values.return_value.append execute_method = append_method.return_value.execute execute_method.return_value = API_RESPONSE result = self.hook.append_values( spreadsheet_id=SPREADSHEET_ID, range_=RANGE_, values=VALUES, major_dimension=MAJOR_DIMENSION, value_input_option=VALUE_INPUT_OPTION, insert_data_option=INSERT_DATA_OPTION, include_values_in_response=INCLUDE_VALUES_IN_RESPONSE, value_render_option=VALUE_RENDER_OPTION, date_time_render_option=DATE_TIME_RENDER_OPTION, ) body = {"range": RANGE_, "majorDimension": MAJOR_DIMENSION, "values": VALUES} assert result is API_RESPONSE execute_method.assert_called_once_with(num_retries=NUM_RETRIES) append_method.assert_called_once_with( spreadsheetId=SPREADSHEET_ID, range=RANGE_, valueInputOption=VALUE_INPUT_OPTION, insertDataOption=INSERT_DATA_OPTION, includeValuesInResponse=INCLUDE_VALUES_IN_RESPONSE, responseValueRenderOption=VALUE_RENDER_OPTION, responseDateTimeRenderOption=DATE_TIME_RENDER_OPTION, body=body, ) @mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn") def test_clear_values(self, get_conn): clear_method = get_conn.return_value.spreadsheets.return_value.values.return_value.clear execute_method = clear_method.return_value.execute execute_method.return_value = API_RESPONSE result = self.hook.clear(spreadsheet_id=SPREADSHEET_ID, range_=RANGE_) assert result is API_RESPONSE execute_method.assert_called_once_with(num_retries=NUM_RETRIES) clear_method.assert_called_once_with(spreadsheetId=SPREADSHEET_ID, range=RANGE_) @mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn") def test_batch_clear_values(self, get_conn): batch_clear_method = get_conn.return_value.spreadsheets.return_value.values.return_value.batchClear execute_method = batch_clear_method.return_value.execute execute_method.return_value = API_RESPONSE result = self.hook.batch_clear(spreadsheet_id=SPREADSHEET_ID, ranges=RANGES) body = {"ranges": RANGES} assert result is API_RESPONSE execute_method.assert_called_once_with(num_retries=NUM_RETRIES) batch_clear_method.assert_called_once_with(spreadsheetId=SPREADSHEET_ID, body=body) @mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn") def test_get_spreadsheet(self, mock_get_conn): get_mock = mock_get_conn.return_value.spreadsheets.return_value.get get_mock.return_value.execute.return_value = API_RESPONSE result = self.hook.get_spreadsheet(spreadsheet_id=SPREADSHEET_ID) get_mock.assert_called_once_with(spreadsheetId=SPREADSHEET_ID) assert result == API_RESPONSE @mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_spreadsheet") def test_get_sheet_titles(self, mock_get_spreadsheet): sheet1 = {"properties": {"title": "title1"}} sheet2 = {"properties": {"title": "title2"}} mock_get_spreadsheet.return_value = {"sheets": [sheet1, sheet2]} result = self.hook.get_sheet_titles(spreadsheet_id=SPREADSHEET_ID) mock_get_spreadsheet.assert_called_once_with(spreadsheet_id=SPREADSHEET_ID) assert result == ["title1", "title2"] result = self.hook.get_sheet_titles(spreadsheet_id=SPREADSHEET_ID, sheet_filter=["title1"]) assert result == ["title1"] @mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn") def test_create_spreadsheet(self, mock_get_conn): spreadsheet = mock.MagicMock() create_mock = mock_get_conn.return_value.spreadsheets.return_value.create create_mock.return_value.execute.return_value = API_RESPONSE result = self.hook.create_spreadsheet(spreadsheet=spreadsheet) create_mock.assert_called_once_with(body=spreadsheet) assert result == API_RESPONSE
TestGSheetsHook
python
pytorch__pytorch
test/dynamo/test_autograd_function.py
{ "start": 4802, "end": 4922 }
class ____(torch.nn.Module): def forward(self, foo): return CustomFuncSaveForBwd().apply(foo)
SaveForBwdModule
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-gitbook/tests/test_simple_gitbook_reader.py
{ "start": 390, "end": 2428 }
class ____: """Mock class that simulates the GitBook API client.""" def __init__(self, api_token: str, api_url: Optional[str] = None): self.pages_data = [ { "id": "page1", "title": "Getting Started", "path": "/getting-started", "description": "Guide to get started", "parent": None, }, { "id": "page2", "title": "Advanced Features", "path": "/advanced", "description": "Advanced usage guide", "parent": "page1", }, ] self.page_content = { "page1": { "title": "Getting Started", "path": "/getting-started", "description": "Guide to get started", "parent": None, "markdown": "# Getting Started\n\nThis is a guide to get started.\n\n## Introduction\nLearn the basic usage.", }, "page2": { "title": "Advanced Features", "path": "/advanced", "description": "Advanced usage guide", "parent": "page1", "markdown": "# Advanced Features\n\nThis document is for advanced users.\n\n## Detailed Settings\nLearn about advanced configuration options.", }, } def list_pages(self, space_id: str) -> List[Dict]: """Returns list of pages in a given space.""" if space_id == "non_existent_space": return [] return self.pages_data def get_page(self, space_id: str, page_id: str) -> Dict: """Returns the content of a specific page.""" return self.page_content.get(page_id, {"markdown": ""}) def get_page_markdown(self, space_id, page_id) -> str: """Returns the content of a specific page in Markdown format.""" page_content = self.get_page(space_id, page_id) return page_content.get("markdown")
MockGitbookClient
python
PyCQA__pylint
doc/data/messages/t/too-many-public-methods/bad.py
{ "start": 0, "end": 528 }
class ____: # [too-many-public-methods] def __init__(self): pass def fire_laser_beam(self): pass def deploy_shield(self): pass def launch_missile(self): pass def activate_super_laser(self): pass def summon_mothership(self): pass def destroy_planet(self): pass def teleport(self): pass def invoke_aliens(self): pass def invade_earth(self): pass def takeover_galaxy(self): pass
SpaceInvaders
python
getsentry__sentry
src/sentry/db/deletion.py
{ "start": 397, "end": 4221 }
class ____: def __init__( self, model: type[BaseModel], project_id: int | None = None, organization_id: int | None = None, dtfield: str | None = None, days: int | None = None, order_by: str | None = None, ): self.model = model self.project_id = int(project_id) if project_id else None self.organization_id = int(organization_id) if organization_id else None self.dtfield = dtfield self.days = int(days) if days is not None else None self.order_by = order_by self.using = router.db_for_write(model) def execute(self, chunk_size: int = 10000) -> None: quote_name = connections[self.using].ops.quote_name where = [] if self.dtfield and self.days is not None: where.append( "{} < '{}'::timestamptz".format( quote_name(self.dtfield), (timezone.now() - timedelta(days=self.days)).isoformat(), ) ) if self.project_id: where.append(f"project_id = {self.project_id}") if self.organization_id: where.append(f"organization_id = {self.organization_id}") if where: where_clause = "where {}".format(" and ".join(where)) else: where_clause = "" if self.order_by: if self.order_by[0] == "-": direction = "desc" order_field = self.order_by[1:] else: direction = "asc" order_field = self.order_by order_clause = f"order by {quote_name(order_field)} {direction}" else: order_clause = "" query = """ delete from {table} where id = any(array( select id from {table} {where} {order} limit {chunk_size} )); """.format( table=self.model._meta.db_table, chunk_size=chunk_size, where=where_clause, order=order_clause, ) return self._continuous_query(query) def _continuous_query(self, query: str) -> None: results = True cursor = connections[self.using].cursor() while results: cursor.execute(query) results = cursor.rowcount > 0 def iterator( self, chunk_size: int = 100, batch_size: int = 10000 ) -> Generator[tuple[int, ...]]: assert self.days is not None assert self.dtfield is not None cutoff = timezone.now() - timedelta(days=self.days) queryset = self.model.objects.filter(**{f"{self.dtfield}__lt": cutoff}) if self.project_id: queryset = queryset.filter(project_id=self.project_id) # type: ignore[misc] if self.organization_id: queryset = queryset.filter(organization_id=self.organization_id) # type: ignore[misc] step = batch_size order_field = "id" if self.order_by: if self.order_by[0] == "-": step = -batch_size order_field = self.order_by[1:] else: order_field = self.order_by # values_list returns tuples of (id, order_field_value) queryset_values: QuerySet[Any, tuple[Any, Any]] = queryset.values_list("id", order_field) wrapper = RangeQuerySetWrapper( queryset_values, step=step, order_by=order_field, override_unique_safety_check=True, result_value_getter=lambda item: item[1], query_timeout_retries=10, ) for batch in itertools.batched(wrapper, chunk_size): yield tuple(item[0] for item in batch)
BulkDeleteQuery
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 48898, "end": 50954 }
class ____(BaseModel, extra="forbid"): m: Optional[int] = Field( default=None, description="Number of edges per node in the index graph. Larger the value - more accurate the search, more space required.", ) ef_construct: Optional[int] = Field( default=None, description="Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build the index.", ) full_scan_threshold: Optional[int] = Field( default=None, description="Minimal size threshold (in KiloBytes) below which full-scan is preferred over HNSW search. This measures the total size of vectors being queried against. When the maximum estimated amount of points that a condition satisfies is smaller than `full_scan_threshold_kb`, the query planner will use full-scan search instead of HNSW index traversal for better performance. Note: 1Kb = 1 vector of size 256", ) max_indexing_threads: Optional[int] = Field( default=None, description="Number of parallel threads used for background index building. If 0 - automatically select from 8 to 16. Best to keep between 8 and 16 to prevent likelihood of building broken/inefficient HNSW graphs. On small CPUs, less threads are used.", ) on_disk: Optional[bool] = Field( default=None, description="Store HNSW index on disk. If set to false, the index will be stored in RAM. Default: false", ) payload_m: Optional[int] = Field( default=None, description="Custom M param for additional payload-aware HNSW links. If not set, default M will be used.", ) inline_storage: Optional[bool] = Field( default=None, description="Store copies of original and quantized vectors within the HNSW index file. Default: false. Enabling this option will trade the search speed for disk usage by reducing amount of random seeks during the search. Requires quantized vectors to be enabled. Multi-vectors are not supported.", )
HnswConfigDiff
python
google__pytype
pytype/pyc/opcodes.py
{ "start": 13206, "end": 13324 }
class ____(OpcodeWithArg): # Loads local variable number _FLAGS = HAS_LOCAL | HAS_ARGUMENT __slots__ = ()
LOAD_FAST
python
pytorch__pytorch
torch/optim/adamw.py
{ "start": 307, "end": 7477 }
class ____(Adam): def __init__( self, params: ParamsT, lr: Union[float, Tensor] = 1e-3, betas: tuple[Union[float, Tensor], Union[float, Tensor]] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 1e-2, amsgrad: bool = False, *, maximize: bool = False, foreach: Optional[bool] = None, capturable: bool = False, differentiable: bool = False, fused: Optional[bool] = None, ) -> None: super().__init__( params, lr, betas, eps, weight_decay, amsgrad, foreach=foreach, maximize=maximize, capturable=capturable, differentiable=differentiable, fused=fused, decoupled_weight_decay=True, ) # Preserve decoupled_weight_decay from AdamW for backwards compatibility. The following # guarantees that decoupled_weight_decay will always be True for loading any state into # AdamW def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group["decoupled_weight_decay"] = True AdamW.__doc__ = ( r"""Implements AdamW algorithm, where weight decay does not accumulate in the momentum nor variance. .. math:: \begin{aligned} &\rule{110mm}{0.4pt} \\ &\textbf{input} : \gamma \text{(lr)}, \: \beta_1, \beta_2 \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)}, \: \epsilon \text{ (epsilon)} \\ &\hspace{13mm} \lambda \text{(weight decay)}, \: \textit{amsgrad}, \: \textit{maximize} \\ &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0 \text{ ( second moment)}, \: v_0^{max}\leftarrow 0 \\[-1.ex] &\rule{110mm}{0.4pt} \\ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm}\textbf{else} \\ &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\ &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ &\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ &\hspace{5mm}\textbf{if} \: amsgrad \\ &\hspace{10mm} v_t^{max} \leftarrow \mathrm{max}(v_{t-1}^{max},v_t) \\ &\hspace{10mm}\widehat{v_t} \leftarrow v_t^{max}/\big(1-\beta_2^t \big) \\ &\hspace{5mm}\textbf{else} \\ &\hspace{10mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ &\hspace{5mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ &\rule{110mm}{0.4pt} \\[-1.ex] &\bf{return} \: \theta_t \\[-1.ex] &\rule{110mm}{0.4pt} \\[-1.ex] \end{aligned} For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_. """ + rf""" Args: {_params_doc} lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR is not yet supported for all our implementations. Please use a float LR if you are not also specifying fused=True or capturable=True. betas (tuple[Union[float, Tensor], Union[float, Tensor]], optional): coefficients used for computing running averages of gradient and its square. If a tensor is provided, must be 1-element. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay coefficient (default: 1e-2) amsgrad (bool, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) {_maximize_doc} {_foreach_doc} {_capturable_doc} {_differentiable_doc} {_fused_doc} .. Note:: A prototype implementation of Adam and AdamW for MPS supports `torch.float32` and `torch.float16`. .. _Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ ) # @_disable_dynamo_if_unsupported logic occurs in the decorator that's applied to F.adam def adamw( params: list[Tensor], grads: list[Tensor], exp_avgs: list[Tensor], exp_avg_sqs: list[Tensor], max_exp_avg_sqs: list[Tensor], state_steps: list[Tensor], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting this as kwarg for now as functional API is compiled by torch/distributed/optim foreach: Optional[bool] = None, capturable: bool = False, differentiable: bool = False, fused: Optional[bool] = None, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None, has_complex: bool = False, *, amsgrad: bool, beta1: Union[float, Tensor], beta2: Union[float, Tensor], lr: Union[float, Tensor], weight_decay: float, eps: float, maximize: bool, ) -> None: r"""Functional API that performs AdamW algorithm computation. See :class:`~torch.optim.AdamW` for details. """ adam( params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, foreach=foreach, capturable=capturable, differentiable=differentiable, fused=fused, grad_scale=grad_scale, found_inf=found_inf, has_complex=has_complex, amsgrad=amsgrad, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, eps=eps, maximize=maximize, decoupled_weight_decay=True, )
AdamW
python
gevent__gevent
src/gevent/_fileobjectcommon.py
{ "start": 759, "end": 1931 }
class ____(io.TextIOWrapper): """ Uses TextWrapper to decode universal newlines, but returns the results as bytes. This is for Python 2 where the 'rU' mode did that. """ mode = None def __init__(self, fobj, line_buffering): # latin-1 has the ability to round-trip arbitrary bytes. io.TextIOWrapper.__init__(self, fobj, encoding='latin-1', newline=None, line_buffering=line_buffering) def read(self, *args, **kwargs): result = io.TextIOWrapper.read(self, *args, **kwargs) return result.encode('latin-1') def readline(self, limit=-1): result = io.TextIOWrapper.readline(self, limit) return result.encode('latin-1') def __iter__(self): # readlines() is implemented in terms of __iter__ # and TextIOWrapper.__iter__ checks that readline returns # a unicode object, which we don't, so we override return self def __next__(self): line = self.readline() if not line: raise StopIteration return line next = __next__
UniversalNewlineBytesWrapper
python
doocs__leetcode
solution/1600-1699/1628.Design an Expression Tree With Evaluate Function/Solution.py
{ "start": 972, "end": 1443 }
class ____(object): def buildTree(self, postfix: List[str]) -> 'Node': stk = [] for s in postfix: node = MyNode(s) if not s.isdigit(): node.right = stk.pop() node.left = stk.pop() stk.append(node) return stk[-1] """ Your TreeBuilder object will be instantiated and called as such: obj = TreeBuilder(); expTree = obj.buildTree(postfix); ans = expTree.evaluate(); """
TreeBuilder
python
encode__django-rest-framework
tests/test_serializer_lists.py
{ "start": 287, "end": 761 }
class ____: """ A mock object for testing serializer save behavior. """ def __init__(self, **kwargs): self._data = kwargs for key, value in kwargs.items(): setattr(self, key, value) def __eq__(self, other): if self._data.keys() != other._data.keys(): return False for key in self._data: if self._data[key] != other._data[key]: return False return True
BasicObject
python
chroma-core__chroma
chromadb/segment/__init__.py
{ "start": 1796, "end": 2358 }
class ____(SegmentImplementation): """Embedding Metadata segment interface""" @abstractmethod def get_metadata( self, request_version_context: RequestVersionContext, where: Optional[Where] = None, where_document: Optional[WhereDocument] = None, ids: Optional[Sequence[str]] = None, limit: Optional[int] = None, offset: Optional[int] = None, include_metadata: bool = True, ) -> Sequence[MetadataEmbeddingRecord]: """Query for embedding metadata.""" pass
MetadataReader
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/sensors/glue.py
{ "start": 6156, "end": 11590 }
class ____(AwsBaseSensor[GlueDataQualityHook]): """ Waits for an AWS Glue data quality ruleset evaluation run to reach any of the status below. 'FAILED', 'STOPPED', 'STOPPING', 'TIMEOUT', 'SUCCEEDED' .. seealso:: For more information on how to use this sensor, take a look at the guide: :ref:`howto/sensor:GlueDataQualityRuleSetEvaluationRunSensor` :param evaluation_run_id: The AWS Glue data quality ruleset evaluation run identifier. :param verify_result_status: Validate all the ruleset rules evaluation run results, If any of the rule status is Fail or Error then an exception is thrown. (default: True) :param show_results: Displays all the ruleset rules evaluation run results. (default: True) :param deferrable: If True, the sensor will operate in deferrable mode. This mode requires aiobotocore module to be installed. (default: False, but can be overridden in config file by setting default_deferrable to True) :param poke_interval: Polling period in seconds to check for the status of the job. (default: 120) :param max_retries: Number of times before returning the current state. (default: 60) :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param botocore_config: Configuration dictionary (key-values) for botocore client. See: https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html """ SUCCESS_STATES = ("SUCCEEDED",) FAILURE_STATES = ("FAILED", "STOPPED", "STOPPING", "TIMEOUT") aws_hook_class = GlueDataQualityHook template_fields: Sequence[str] = aws_template_fields("evaluation_run_id") def __init__( self, *, evaluation_run_id: str, show_results: bool = True, verify_result_status: bool = True, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), poke_interval: int = 120, max_retries: int = 60, aws_conn_id: str | None = "aws_default", **kwargs, ): super().__init__(**kwargs) self.evaluation_run_id = evaluation_run_id self.show_results = show_results self.verify_result_status = verify_result_status self.aws_conn_id = aws_conn_id self.max_retries = max_retries self.poke_interval = poke_interval self.deferrable = deferrable def execute(self, context: Context) -> Any: if self.deferrable: self.defer( trigger=GlueDataQualityRuleSetEvaluationRunCompleteTrigger( evaluation_run_id=self.evaluation_run_id, waiter_delay=int(self.poke_interval), waiter_max_attempts=self.max_retries, aws_conn_id=self.aws_conn_id, ), method_name="execute_complete", ) else: super().execute(context=context) def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None: validated_event = validate_execute_complete_event(event) if validated_event["status"] != "success": message = f"Error: AWS Glue data quality ruleset evaluation run: {validated_event}" raise AirflowException(message) self.hook.validate_evaluation_run_results( evaluation_run_id=validated_event["evaluation_run_id"], show_results=self.show_results, verify_result_status=self.verify_result_status, ) self.log.info("AWS Glue data quality ruleset evaluation run completed.") def poke(self, context: Context): self.log.info( "Poking for AWS Glue data quality ruleset evaluation run RunId: %s", self.evaluation_run_id ) response = self.hook.conn.get_data_quality_ruleset_evaluation_run(RunId=self.evaluation_run_id) status = response.get("Status") if status in self.SUCCESS_STATES: self.hook.validate_evaluation_run_results( evaluation_run_id=self.evaluation_run_id, show_results=self.show_results, verify_result_status=self.verify_result_status, ) self.log.info( "AWS Glue data quality ruleset evaluation run completed RunId: %s Run State: %s", self.evaluation_run_id, response["Status"], ) return True if status in self.FAILURE_STATES: job_error_message = ( f"Error: AWS Glue data quality ruleset evaluation run RunId: {self.evaluation_run_id} Run " f"Status: {status}" f": {response.get('ErrorString')}" ) self.log.info(job_error_message) raise AirflowException(job_error_message) return False
GlueDataQualityRuleSetEvaluationRunSensor
python
cython__cython
Cython/Coverage.py
{ "start": 16907, "end": 18783 }
class ____(FileReporter): """ Provide detailed trace information for one source file to coverage.py. """ def __init__(self, c_file, source_file, rel_file_path, code, excluded_lines): super().__init__(source_file) self.name = rel_file_path self.c_file = c_file self._code = code self._excluded_lines = excluded_lines def lines(self): """ Return set of line numbers that are possibly executable. """ return set(self._code) def excluded_lines(self): """ Return set of line numbers that are excluded from coverage. """ return self._excluded_lines def _iter_source_tokens(self): current_line = 1 for line_no, code_line in sorted(self._code.items()): while line_no > current_line: yield [] current_line += 1 yield [('txt', code_line)] current_line += 1 def source(self): """ Return the source code of the file as a string. """ if os.path.exists(self.filename): with open_source_file(self.filename) as f: return f.read() else: return '\n'.join( (tokens[0][1] if tokens else '') for tokens in self._iter_source_tokens()) def source_token_lines(self): """ Iterate over the source code tokens. """ if os.path.exists(self.filename): with open_source_file(self.filename) as f: for line in f: yield [('txt', line.rstrip('\n'))] else: for line in self._iter_source_tokens(): yield [('txt', line)] def coverage_init(reg, options): plugin = Plugin() reg.add_configurer(plugin) reg.add_file_tracer(plugin)
CythonModuleReporter
python
jazzband__django-waffle
waffle/tests/test_testutils.py
{ "start": 11046, "end": 11253 }
class ____(OverrideSampleOnClassTestsMixin, TestCase): """ Run tests with Django TestCase """ @override_sample('foo', active=False)
OverrideSampleOnClassTestCase
python
doocs__leetcode
solution/1900-1999/1931.Painting a Grid With Three Different Colors/Solution.py
{ "start": 0, "end": 992 }
class ____: def colorTheGrid(self, m: int, n: int) -> int: def f1(x: int) -> bool: last = -1 for _ in range(m): if x % 3 == last: return False last = x % 3 x //= 3 return True def f2(x: int, y: int) -> bool: for _ in range(m): if x % 3 == y % 3: return False x, y = x // 3, y // 3 return True mod = 10**9 + 7 mx = 3**m valid = {i for i in range(mx) if f1(i)} d = defaultdict(list) for x in valid: for y in valid: if f2(x, y): d[x].append(y) f = [int(i in valid) for i in range(mx)] for _ in range(n - 1): g = [0] * mx for i in valid: for j in d[i]: g[i] = (g[i] + f[j]) % mod f = g return sum(f) % mod
Solution
python
getsentry__sentry
tests/flagpole/test_flagpole_eval.py
{ "start": 319, "end": 2876 }
class ____: """Test get_arguments() function for parsing command line arguments and context.""" @mock.patch("flagpole.flagpole_eval.sys.argv", ["script.py", "--flag-name", "test-flag"]) def test_get_arguments_with_flag_name_only(self): """Test get_arguments returns correct dict with only flag name.""" result = get_arguments() assert result["flag_name"] == "test-flag" assert result["context"] == {} assert "flagpole_file" in result @mock.patch( "flagpole.flagpole_eval.sys.argv", ["script.py", "--flag-name", "test-flag", "--context", '{"user_id": 123}'], ) def test_get_arguments_with_context_flag(self): """Test get_arguments parses context from --context flag.""" result = get_arguments() assert result["flag_name"] == "test-flag" assert result["context"] == {"user_id": 123} @mock.patch( "flagpole.flagpole_eval.sys.argv", ["script.py", "--flag-name", "test-flag", '{"org_id": 456, "user_id": 789}'], ) def test_get_arguments_with_positional_context(self): """Test get_arguments parses context from positional argument.""" result = get_arguments() assert result["flag_name"] == "test-flag" assert result["context"] == {"org_id": 456, "user_id": 789} @mock.patch( "flagpole.flagpole_eval.sys.argv", [ "script.py", "--flag-name", "test-flag", "--context", '{"user_id": 123}', '{"org_id": 456}', ], ) def test_get_arguments_context_flag_takes_precedence(self): """Test that --context flag takes precedence over positional context.""" result = get_arguments() assert result["context"] == {"user_id": 123} @mock.patch( "flagpole.flagpole_eval.sys.argv", ["script.py", "--flag-name", "test-flag", "invalid-json"] ) def test_get_arguments_invalid_json_falls_back_to_empty(self): """Test that invalid JSON in positional context falls back to empty dict.""" result = get_arguments() assert result["context"] == {} @mock.patch( "flagpole.flagpole_eval.sys.argv", ["script.py", "--flag-name", "test-flag", "--flagpole-file", "/custom/path.yaml"], ) def test_get_arguments_custom_flagpole_file(self): """Test get_arguments with custom flagpole file path.""" result = get_arguments() assert result["flagpole_file"] == "/custom/path.yaml"
TestGetArguments
python
astropy__astropy
astropy/time/__init__.py
{ "start": 104, "end": 1710 }
class ____(_config.ConfigNamespace): """ Configuration parameters for `astropy.time`. """ use_fast_parser = _config.ConfigItem( ["True", "False", "force"], "Use fast C parser for supported time strings formats, including ISO, " "ISOT, and YearDayTime. Allowed values are the 'False' (use Python parser)," "'True' (use C parser and fall through to Python parser if fails), and " "'force' (use C parser and raise exception if it fails). Note that the" "options are all strings.", ) masked_array_type = _config.ConfigItem( ["astropy", "numpy"], 'The type of masked array used for masked output data. Can be "astropy" ' 'for `astropy.utils.masked.Masked` or "numpy" to use `numpy.ma.MaskedArray`. ' "Note that if `astropy.units.Quantity` is produced, the output always " "uses `astropy.utils.masked.Masked`, since `numpy.ma.MaskedArray` does not " "work with quantities.", ) # Create a dict of available masked classes for speed. # Use local imports so we do not pollute the module namespace. from numpy.ma import MaskedArray from astropy.utils.masked import Masked _MASKED_CLASSES = {"astropy": Masked, "numpy": MaskedArray} @property def _masked_cls(self): """The masked class set by ``masked_array_type``. This is |Masked| for "astropy", `numpy.ma.MaskedArray` for "numpy". """ return self._MASKED_CLASSES[self.masked_array_type] conf = Conf() # isort: off from .formats import * from .core import * # isort: on
Conf
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py
{ "start": 7222, "end": 7583 }
class ____(EnumMeta): def __new__(cls) -> MetaclassInWhichSelfCannotBeUsed2: ... def __enter__(self) -> MetaclassInWhichSelfCannotBeUsed2: ... async def __aenter__(self) -> MetaclassInWhichSelfCannotBeUsed2: ... def __isub__(self, other: MetaclassInWhichSelfCannotBeUsed2) -> MetaclassInWhichSelfCannotBeUsed2: ...
MetaclassInWhichSelfCannotBeUsed2
python
scrapy__scrapy
tests/test_webclient.py
{ "start": 6747, "end": 6932 }
class ____(resource.Resource): def render(self, request): return b"nolength" @pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
NoLengthResource
python
wandb__wandb
wandb/sdk/lib/service/service_connection.py
{ "start": 2198, "end": 10169 }
class ____: """A connection to the W&B internal service process. None of the synchronous methods may be called in an asyncio context. """ def __init__( self, asyncer: asyncio_manager.AsyncioManager, client: ServiceClient, proc: service_process.ServiceProcess | None, cleanup: Callable[[], None] | None = None, ): """Returns a new ServiceConnection. Args: asyncer: An asyncio runner. client: A client for communicating with the service over a socket. proc: The service process if we own it, or None otherwise. cleanup: A callback to run on teardown before doing anything. """ self._asyncer = asyncer self._client = client self._proc = proc self._torn_down = False self._cleanup = cleanup def make_interface(self, stream_id: str) -> InterfaceBase: """Returns an interface for communicating with the service.""" return InterfaceSock( self._asyncer, self._client, stream_id=stream_id, ) async def init_sync( self, paths: set[pathlib.Path], settings: wandb_settings.Settings, ) -> MailboxHandle[wandb_sync_pb2.ServerInitSyncResponse]: """Send a ServerInitSyncRequest.""" init_sync = wandb_sync_pb2.ServerInitSyncRequest( path=(str(path) for path in paths), settings=settings.to_proto(), ) request = spb.ServerRequest(init_sync=init_sync) handle = await self._client.deliver(request) return handle.map(lambda r: r.init_sync_response) async def sync( self, id: str, *, parallelism: int, ) -> MailboxHandle[wandb_sync_pb2.ServerSyncResponse]: """Send a ServerSyncRequest.""" sync = wandb_sync_pb2.ServerSyncRequest(id=id, parallelism=parallelism) request = spb.ServerRequest(sync=sync) handle = await self._client.deliver(request) return handle.map(lambda r: r.sync_response) def api_init_request( self, settings: wandb_settings_pb2.Settings, ) -> None: """Tells wandb-core to initialize resources for handling API requests.""" api_init_request = wandb_api_pb2.ServerApiInitRequest(settings=settings) request = spb.ServerRequest(api_init_request=api_init_request) handle = self._asyncer.run(lambda: self._client.deliver(request)) try: response = handle.wait_or(timeout=10) except (MailboxClosedError, HandleAbandonedError): raise WandbApiFailedError( "Failed to initialize API resources:" + " the service process is not running.", ) from None except TimeoutError: raise WandbApiFailedError( "Failed to initialize API resources:" + " the service process is busy and did not respond in time.", ) from None api_init_response = response.api_init_response if api_init_response.error_message: raise WandbApiFailedError(api_init_response.error_message) async def sync_status( self, id: str, ) -> MailboxHandle[wandb_sync_pb2.ServerSyncStatusResponse]: """Send a ServerSyncStatusRequest.""" sync_status = wandb_sync_pb2.ServerSyncStatusRequest(id=id) request = spb.ServerRequest(sync_status=sync_status) handle = await self._client.deliver(request) return handle.map(lambda r: r.sync_status_response) def api_request( self, api_request: wandb_api_pb2.ApiRequest, ) -> wandb_api_pb2.ApiResponse: """Send an ApiRequest and wait for a response.""" request = spb.ServerRequest() request.api_request.CopyFrom(api_request) handle = self._asyncer.run(lambda: self._client.deliver(request)) try: response = handle.wait_or(timeout=10) except (MailboxClosedError, HandleAbandonedError): raise WandbApiFailedError( "Failed to initialize API resources:" + " the service process is not running.", ) from None except TimeoutError: raise WandbApiFailedError( "Failed to initialize API resources:" + " the service process is busy and did not respond in time.", ) from None api_response = response.api_response if api_response.HasField("api_error_response"): raise Exception(api_response.api_error_response.message) return api_response def api_publish(self, api_request: wandb_api_pb2.ApiRequest) -> None: """Publish an ApiRequest without waiting for a response.""" request = spb.ServerRequest() request.api_request.CopyFrom(api_request) self._asyncer.run(lambda: self._client.publish(request)) def inform_init( self, settings: wandb_settings_pb2.Settings, run_id: str, ) -> None: """Send an init request to the service.""" request = spb.ServerInformInitRequest() request.settings.CopyFrom(settings) request._info.stream_id = run_id self._asyncer.run( lambda: self._client.publish(spb.ServerRequest(inform_init=request)) ) def inform_finish(self, run_id: str) -> None: """Send an finish request to the service.""" request = spb.ServerInformFinishRequest() request._info.stream_id = run_id self._asyncer.run( lambda: self._client.publish(spb.ServerRequest(inform_finish=request)) ) def inform_attach( self, attach_id: str, ) -> wandb_settings_pb2.Settings: """Send an attach request to the service. Raises a WandbAttachFailedError if attaching is not possible. """ request = spb.ServerRequest() request.inform_attach._info.stream_id = attach_id try: handle = self._asyncer.run(lambda: self._client.deliver(request)) response = handle.wait_or(timeout=10) except (MailboxClosedError, HandleAbandonedError): raise WandbAttachFailedError( "Failed to attach: the service process is not running.", ) from None except TimeoutError: raise WandbAttachFailedError( "Failed to attach because the run does not belong to" + " the current service process, or because the service" + " process is busy (unlikely)." ) from None else: return response.inform_attach_response.settings def teardown(self, exit_code: int) -> int | None: """Close the connection. Stop reading responses on the connection, and if this connection owns the service process, send a teardown message and wait for it to shut down. This may only be called once. Returns: The exit code of the service process, or None if the process was not owned by this connection. """ if self._torn_down: raise AssertionError("Already torn down.") self._torn_down = True if self._cleanup: self._cleanup() if not self._proc: return None # Clear the service token to prevent new connections to the process. service_token.clear_service_in_env() async def publish_teardown_and_close() -> None: await self._client.publish( spb.ServerRequest( inform_teardown=spb.ServerInformTeardownRequest( exit_code=exit_code, ) ), ) await self._client.close() self._asyncer.run(publish_teardown_and_close) return self._proc.join()
ServiceConnection
python
arrow-py__arrow
tests/test_locales.py
{ "start": 72272, "end": 75041 }
class ____: def test_year_full(self): assert self.locale.year_full(2015) == "2558" def test_year_abbreviation(self): assert self.locale.year_abbreviation(2015) == "58" def test_format_relative_now(self): result = self.locale._format_relative("ขณะนี้", "now", 0) assert result == "ขณะนี้" def test_format_relative_past(self): result = self.locale._format_relative("1 ชั่วโมง", "hour", 1) assert result == "ในอีก 1 ชั่วโมง" result = self.locale._format_relative("{0} ชั่วโมง", "hours", 2) assert result == "ในอีก {0} ชั่วโมง" result = self.locale._format_relative("ไม่กี่วินาที", "seconds", 42) assert result == "ในอีกไม่กี่วินาที" def test_format_relative_future(self): result = self.locale._format_relative("1 ชั่วโมง", "hour", -1) assert result == "1 ชั่วโมง ที่ผ่านมา" def test_format_timeframe(self): # Now assert self.locale._format_timeframe("now", 0) == "ขณะนี้" # Second(s) assert self.locale._format_timeframe("second", 1) == "วินาที" assert self.locale._format_timeframe("seconds", 2) == "2 วินาที" # Minute(s) assert self.locale._format_timeframe("minute", 1) == "นาที" assert self.locale._format_timeframe("minutes", 5) == "5 นาที" # Hour(s) assert self.locale._format_timeframe("hour", 1) == "ชั่วโมง" assert self.locale._format_timeframe("hours", 3) == "3 ชั่วโมง" # Day(s) assert self.locale._format_timeframe("day", 1) == "วัน" assert self.locale._format_timeframe("days", 7) == "7 วัน" # Week(s) assert self.locale._format_timeframe("week", 1) == "สัปดาห์" assert self.locale._format_timeframe("weeks", 2) == "2 สัปดาห์" # Month(s) assert self.locale._format_timeframe("month", 1) == "เดือน" assert self.locale._format_timeframe("months", 4) == "4 เดือน" # Year(s) assert self.locale._format_timeframe("year", 1) == "ปี" assert self.locale._format_timeframe("years", 10) == "10 ปี" def test_weekday(self): dt = arrow.Arrow(2015, 4, 11, 17, 30, 0) # These values depend on the actual Thai locale implementation # Replace with correct Thai names if available assert self.locale.day_name(dt.isoweekday()) == "วันเสาร์" assert self.locale.day_abbreviation(dt.isoweekday()) == "ส." def test_ordinal_number(self): # Thai ordinal numbers are not commonly used, but test for fallback assert self.locale.ordinal_number(1) == "1" assert self.locale.ordinal_number(10) == "10" assert self.locale.ordinal_number(0) == "0" @pytest.mark.usefixtures("lang_locale")
TestThaiLocale
python
FactoryBoy__factory_boy
tests/alchemyapp/models.py
{ "start": 382, "end": 528 }
class ____(Base): __tablename__ = 'StandardModelTable' id = Column(Integer(), primary_key=True) foo = Column(Unicode(20))
StandardModel
python
getsentry__sentry
src/sentry/sentry_apps/services/app/model.py
{ "start": 4519, "end": 4753 }
class ____(RpcModel): success: bool message: str error_type: SentryAppErrorType | None webhook_context: dict[str, Any] | None public_context: dict[str, Any] | None status_code: int | None
RpcAlertRuleActionResult
python
boto__boto3
boto3/docs/collection.py
{ "start": 985, "end": 11296 }
class ____(NestedDocumenter): def document_collections(self, section): collections = self._resource.meta.resource_model.collections collections_list = [] add_resource_type_overview( section=section, resource_type='Collections', description=( 'Collections provide an interface to iterate over and ' 'manipulate groups of resources. ' ), intro_link='guide_collections', ) self.member_map['collections'] = collections_list for collection in collections: collections_list.append(collection.name) # Create a new DocumentStructure for each collection and add contents. collection_doc = DocumentStructure(collection.name, target='html') breadcrumb_section = collection_doc.add_new_section('breadcrumb') breadcrumb_section.style.ref(self._resource_class_name, 'index') breadcrumb_section.write(f' / Collection / {collection.name}') collection_doc.add_title_section(collection.name) collection_section = collection_doc.add_new_section( collection.name, context={'qualifier': f'{self.class_name}.'}, ) self._document_collection(collection_section, collection) # Write collections in individual/nested files. # Path: <root>/reference/services/<service>/<resource_name>/<collection_name>.rst collections_dir_path = os.path.join( self._root_docs_path, f'{self._service_name}', f'{self._resource_sub_path}', ) collection_doc.write_to_file(collections_dir_path, collection.name) def _document_collection(self, section, collection): methods = get_instance_public_methods( getattr(self._resource, collection.name) ) document_collection_object(section, collection) batch_actions = {} for batch_action in collection.batch_actions: batch_actions[batch_action.name] = batch_action for method in sorted(methods): method_section = section.add_new_section(method) if method in batch_actions: document_batch_action( section=method_section, resource_name=self._resource_name, event_emitter=self._resource.meta.client.meta.events, batch_action_model=batch_actions[method], collection_model=collection, service_model=self._resource.meta.client.meta.service_model, ) else: document_collection_method( section=method_section, resource_name=self._resource_name, action_name=method, event_emitter=self._resource.meta.client.meta.events, collection_model=collection, service_model=self._resource.meta.client.meta.service_model, ) def document_collection_object( section, collection_model, include_signature=True, ): """Documents a collection resource object :param section: The section to write to :param collection_model: The model of the collection :param include_signature: Whether or not to include the signature. It is useful for generating docstrings. """ if include_signature: full_collection_name = ( f"{section.context.get('qualifier', '')}{collection_model.name}" ) section.style.start_sphinx_py_attr(full_collection_name) section.include_doc_string( f'A collection of {collection_model.resource.type} resources.' ) section.include_doc_string( f'A {collection_model.resource.type} Collection will include all ' f'resources by default, and extreme caution should be taken when ' f'performing actions on all resources.' ) def document_batch_action( section, resource_name, event_emitter, batch_action_model, service_model, collection_model, include_signature=True, ): """Documents a collection's batch action :param section: The section to write to :param resource_name: The name of the resource :param action_name: The name of collection action. Currently only can be all, filter, limit, or page_size :param event_emitter: The event emitter to use to emit events :param batch_action_model: The model of the batch action :param collection_model: The model of the collection :param service_model: The model of the service :param include_signature: Whether or not to include the signature. It is useful for generating docstrings. """ operation_model = service_model.operation_model( batch_action_model.request.operation ) ignore_params = get_resource_ignore_params( batch_action_model.request.params ) example_return_value = 'response' if batch_action_model.resource: example_return_value = xform_name(batch_action_model.resource.type) example_resource_name = xform_name(resource_name) if service_model.service_name == resource_name: example_resource_name = resource_name example_prefix = f'{example_return_value} = {example_resource_name}.{collection_model.name}.{batch_action_model.name}' document_model_driven_resource_method( section=section, method_name=batch_action_model.name, operation_model=operation_model, event_emitter=event_emitter, method_description=operation_model.documentation, example_prefix=example_prefix, exclude_input=ignore_params, resource_action_model=batch_action_model, include_signature=include_signature, ) def document_collection_method( section, resource_name, action_name, event_emitter, collection_model, service_model, include_signature=True, ): """Documents a collection method :param section: The section to write to :param resource_name: The name of the resource :param action_name: The name of collection action. Currently only can be all, filter, limit, or page_size :param event_emitter: The event emitter to use to emit events :param collection_model: The model of the collection :param service_model: The model of the service :param include_signature: Whether or not to include the signature. It is useful for generating docstrings. """ operation_model = service_model.operation_model( collection_model.request.operation ) underlying_operation_members = [] if operation_model.input_shape: underlying_operation_members = operation_model.input_shape.members example_resource_name = xform_name(resource_name) if service_model.service_name == resource_name: example_resource_name = resource_name custom_action_info_dict = { 'all': { 'method_description': ( f'Creates an iterable of all {collection_model.resource.type} ' f'resources in the collection.' ), 'example_prefix': f'{xform_name(collection_model.resource.type)}_iterator = {example_resource_name}.{collection_model.name}.all', 'exclude_input': underlying_operation_members, }, 'filter': { 'method_description': ( f'Creates an iterable of all {collection_model.resource.type} ' f'resources in the collection filtered by kwargs passed to ' f'method. A {collection_model.resource.type} collection will ' f'include all resources by default if no filters are provided, ' f'and extreme caution should be taken when performing actions ' f'on all resources.' ), 'example_prefix': f'{xform_name(collection_model.resource.type)}_iterator = {example_resource_name}.{collection_model.name}.filter', 'exclude_input': get_resource_ignore_params( collection_model.request.params ), }, 'limit': { 'method_description': ( f'Creates an iterable up to a specified amount of ' f'{collection_model.resource.type} resources in the collection.' ), 'example_prefix': f'{xform_name(collection_model.resource.type)}_iterator = {example_resource_name}.{collection_model.name}.limit', 'include_input': [ DocumentedShape( name='count', type_name='integer', documentation=( 'The limit to the number of resources in the iterable.' ), ) ], 'exclude_input': underlying_operation_members, }, 'page_size': { 'method_description': ( f'Creates an iterable of all {collection_model.resource.type} ' f'resources in the collection, but limits the number of ' f'items returned by each service call by the specified amount.' ), 'example_prefix': f'{xform_name(collection_model.resource.type)}_iterator = {example_resource_name}.{collection_model.name}.page_size', 'include_input': [ DocumentedShape( name='count', type_name='integer', documentation=( 'The number of items returned by each service call' ), ) ], 'exclude_input': underlying_operation_members, }, } if action_name in custom_action_info_dict: action_info = custom_action_info_dict[action_name] document_model_driven_resource_method( section=section, method_name=action_name, operation_model=operation_model, event_emitter=event_emitter, resource_action_model=collection_model, include_signature=include_signature, **action_info, )
CollectionDocumenter
python
django__django
tests/select_related/models.py
{ "start": 855, "end": 982 }
class ____(models.Model): name = models.CharField(max_length=50) phylum = models.ForeignKey(Phylum, models.CASCADE)
Klass
python
charliermarsh__ruff
crates/ruff_python_formatter/resources/test/fixtures/ruff/fmt_skip/decorators.py
{ "start": 601, "end": 761 }
class ____: # fmt: skip pass # Regression test for https://github.com/astral-sh/ruff/issues/7735 @decorator1 @decorator2 def foo(): # fmt: skip pass
Foo
python
zarr-developers__zarr-python
src/zarr/core/dtype/wrapper.py
{ "start": 2311, "end": 9725 }
class ____(ABC, Generic[TDType_co, TScalar_co]): """ Abstract base class for wrapping native array data types, e.g. numpy dtypes Attributes ---------- dtype_cls : ClassVar[type[TDType]] The wrapped dtype class. This is a class variable. _zarr_v3_name : ClassVar[str] The name given to the data type by a Zarr v3 data type specification. This is a class variable, and it should generally be unique across different data types. """ # this class will create a native data type dtype_cls: ClassVar[type[TDType_co]] _zarr_v3_name: ClassVar[str] @classmethod def _check_native_dtype(cls: type[Self], dtype: TBaseDType) -> TypeGuard[TDType_co]: """ Check that a native data type matches the dtype_cls class attribute. Used as a type guard. Parameters ---------- dtype : TDType The dtype to check. Returns ------- Bool True if the dtype matches, False otherwise. """ return type(dtype) is cls.dtype_cls @classmethod @abstractmethod def from_native_dtype(cls: type[Self], dtype: TBaseDType) -> Self: """ Create a ZDType instance from a native data type. This method is used when taking a user-provided native data type, like a NumPy data type, and creating the corresponding ZDType instance from them. Parameters ---------- dtype : TDType The native data type object to wrap. Returns ------- Self The ZDType that wraps the native data type. Raises ------ TypeError If the native data type is not consistent with the wrapped data type. """ raise NotImplementedError # pragma: no cover @abstractmethod def to_native_dtype(self: Self) -> TDType_co: """ Return an instance of the wrapped data type. This operation inverts ``from_native_dtype``. Returns ------- TDType The native data type wrapped by this ZDType. """ raise NotImplementedError # pragma: no cover @classmethod @abstractmethod def _from_json_v2(cls: type[Self], data: DTypeJSON) -> Self: raise NotImplementedError # pragma: no cover @classmethod @abstractmethod def _from_json_v3(cls: type[Self], data: DTypeJSON) -> Self: raise NotImplementedError # pragma: no cover @classmethod def from_json(cls: type[Self], data: DTypeJSON, *, zarr_format: ZarrFormat) -> Self: """ Create an instance of this ZDType from JSON data. Parameters ---------- data : DTypeJSON The JSON representation of the data type. zarr_format : ZarrFormat The zarr format version. Returns ------- Self An instance of this data type. """ if zarr_format == 2: return cls._from_json_v2(data) if zarr_format == 3: return cls._from_json_v3(data) raise ValueError(f"zarr_format must be 2 or 3, got {zarr_format}") # pragma: no cover @overload def to_json(self, zarr_format: Literal[2]) -> DTypeSpec_V2: ... @overload def to_json(self, zarr_format: Literal[3]) -> DTypeSpec_V3: ... @abstractmethod def to_json(self, zarr_format: ZarrFormat) -> DTypeSpec_V2 | DTypeSpec_V3: """ Serialize this ZDType to JSON. Parameters ---------- zarr_format : ZarrFormat The zarr format version. Returns ------- DTypeJSON_V2 | DTypeJSON_V3 The JSON-serializable representation of the wrapped data type """ raise NotImplementedError # pragma: no cover @abstractmethod def _check_scalar(self, data: object) -> bool: """ Check that an python object is a valid scalar value for the wrapped data type. Parameters ---------- data : object A value to check. Returns ------- Bool True if the object is valid, False otherwise. """ raise NotImplementedError # pragma: no cover @abstractmethod def cast_scalar(self, data: object) -> TScalar_co: """ Cast a python object to the wrapped scalar type. The type of the provided scalar is first checked for compatibility. If it's incompatible with the associated scalar type, a ``TypeError`` will be raised. Parameters ---------- data : object The python object to cast. Returns ------- TScalar The cast value. """ raise NotImplementedError # pragma: no cover @abstractmethod def default_scalar(self) -> TScalar_co: """ Get the default scalar value for the wrapped data type. This is a method, rather than an attribute, because the default value for some data types depends on parameters that are not known until a concrete data type is wrapped. For example, data types parametrized by a length like fixed-length strings or bytes will generate scalars consistent with that length. Returns ------- TScalar The default value for this data type. """ raise NotImplementedError # pragma: no cover @abstractmethod def from_json_scalar(self: Self, data: JSON, *, zarr_format: ZarrFormat) -> TScalar_co: """ Read a JSON-serializable value as a scalar. Parameters ---------- data : JSON A JSON representation of a scalar value. zarr_format : ZarrFormat The zarr format version. This is specified because the JSON serialization of scalars differs between Zarr V2 and Zarr V3. Returns ------- TScalar The deserialized scalar value. """ raise NotImplementedError # pragma: no cover @abstractmethod def to_json_scalar(self, data: object, *, zarr_format: ZarrFormat) -> JSON: """ Serialize a python object to the JSON representation of a scalar. The value will first be cast to the scalar type associated with this ZDType, then serialized to JSON. Parameters ---------- data : object The value to convert. zarr_format : ZarrFormat The zarr format version. This is specified because the JSON serialization of scalars differs between Zarr V2 and Zarr V3. Returns ------- JSON The JSON-serialized scalar. """ raise NotImplementedError # pragma: no cover def scalar_failed_type_check_msg( cls_instance: ZDType[TBaseDType, TBaseScalar], bad_scalar: object ) -> str: """ Generate an error message reporting that a particular value failed a type check when attempting to cast that value to a scalar. """ return ( f"The value {bad_scalar!r} failed a type check. " f"It cannot be safely cast to a scalar compatible with {cls_instance}. " f"Consult the documentation for {cls_instance} to determine the possible values that can " "be cast to scalars of the wrapped data type." )
ZDType
python
dagster-io__dagster
python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/core/workspace.py
{ "start": 629, "end": 1210 }
class ____: # pex_tag is a string like 'deps-234y4384.pex:source-39y3474.pex' that idenfies # the pex files to execute pex_tag: str # python_version determines which pex base docker image to use # only one of PexMetadata.python_version or CodeLocationDeployData.image should be specified python_version: Optional[str] = None # History of CodeLocationDeployData # 1. Removal of `enable_metrics` field # 2. Renamed from `CodeDeploymentMetadata` to `CodeLocationDeployData`` @whitelist_for_serdes(storage_name="CodeDeploymentMetadata") @record_custom
PexMetadata
python
ray-project__ray
python/ray/serve/schema.py
{ "start": 27900, "end": 32187 }
class ____(BaseModel): """ Multi-application config for deploying a list of Serve applications to the Ray cluster. This is the request JSON schema for the v2 REST API `PUT "/api/serve/applications/"`. NOTE: This config allows extra parameters to make it forward-compatible (ie older versions of Serve are able to accept configs from a newer versions, simply ignoring new parameters) """ proxy_location: ProxyLocation = Field( default=ProxyLocation.EveryNode, description=( "Config for where to run proxies for ingress traffic to the cluster." ), ) http_options: HTTPOptionsSchema = Field( default=HTTPOptionsSchema(), description="Options to start the HTTP Proxy with." ) grpc_options: gRPCOptionsSchema = Field( default=gRPCOptionsSchema(), description="Options to start the gRPC Proxy with." ) logging_config: LoggingConfig = Field( default=None, description="Logging config for configuring serve components logs.", ) applications: List[ServeApplicationSchema] = Field( ..., description="The set of applications to run on the Ray cluster." ) target_capacity: Optional[float] = TARGET_CAPACITY_FIELD @validator("applications") def application_names_unique(cls, v): # Ensure there are no duplicate applications listed names = [app.name for app in v] duplicates = {f'"{name}"' for name in names if names.count(name) > 1} if len(duplicates): apps_str = ("application " if len(duplicates) == 1 else "applications ") + ( ", ".join(duplicates) ) raise ValueError( f"Found multiple configs for {apps_str}. Please remove all duplicates." ) return v @validator("applications") def application_routes_unique(cls, v): # Ensure each application with a non-null route prefix has unique route prefixes routes = [app.route_prefix for app in v if app.route_prefix is not None] duplicates = {f'"{route}"' for route in routes if routes.count(route) > 1} if len(duplicates): routes_str = ( "route prefix " if len(duplicates) == 1 else "route prefixes " ) + (", ".join(duplicates)) raise ValueError( f"Found duplicate applications for {routes_str}. Please ensure each " "application's route_prefix is unique." ) return v @validator("applications") def application_names_nonempty(cls, v): for app in v: if len(app.name) == 0: raise ValueError("Application names must be nonempty.") return v @root_validator def nested_host_and_port(cls, values): # TODO (zcin): ServeApplicationSchema still needs to have host and port # fields to support single-app mode, but in multi-app mode the host and port # fields at the top-level deploy config is used instead. Eventually, after # migration, we should remove these fields from ServeApplicationSchema. for app_config in values.get("applications"): if "host" in app_config.dict(exclude_unset=True): raise ValueError( f'Host "{app_config.host}" is set in the config for application ' f"`{app_config.name}`. Please remove it and set host in the top " "level deploy config only." ) if "port" in app_config.dict(exclude_unset=True): raise ValueError( f"Port {app_config.port} is set in the config for application " f"`{app_config.name}`. Please remove it and set port in the top " "level deploy config only." ) return values @staticmethod def get_empty_schema_dict() -> Dict: """Returns an empty deploy schema dictionary. Schema can be used as a representation of an empty Serve deploy config. """ return {"applications": []} # Keep in sync with ServeSystemActorStatus in # python/ray/dashboard/client/src/type/serve.ts @PublicAPI(stability="stable")
ServeDeploySchema
python
apache__airflow
airflow-core/src/airflow/api_fastapi/core_api/datamodels/ui/dashboard.py
{ "start": 1169, "end": 1512 }
class ____(BaseModel): """TaskInstance serializer for responses.""" no_status: int removed: int scheduled: int queued: int running: int success: int restarting: int failed: int up_for_retry: int up_for_reschedule: int upstream_failed: int skipped: int deferred: int
TaskInstanceStateCount
python
python-openxml__python-docx
src/docx/oxml/simpletypes.py
{ "start": 12785, "end": 13019 }
class ____(XsdStringEnumeration): """Valid values for `w:vertAlign/@val`.""" BASELINE = "baseline" SUPERSCRIPT = "superscript" SUBSCRIPT = "subscript" _members = (BASELINE, SUPERSCRIPT, SUBSCRIPT)
ST_VerticalAlignRun
python
getsentry__sentry
src/sentry/spans/grouping/api.py
{ "start": 147, "end": 622 }
class ____(LookupError): pass def load_span_grouping_config(config: Any | None = None) -> SpanGroupingConfig: if config is None: config_id = DEFAULT_CONFIG_ID else: if "id" not in config: raise ValueError("Malformed configuration: missing 'id'") config_id = config["id"] if config_id not in CONFIGURATIONS: raise SpanGroupingConfigNotFound(config_id) return CONFIGURATIONS[config_id]
SpanGroupingConfigNotFound
python
plotly__plotly.py
plotly/graph_objs/volume/_caps.py
{ "start": 233, "end": 3771 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "volume" _path_str = "volume.caps" _valid_props = {"x", "y", "z"} @property def x(self): """ The 'x' property is an instance of X that may be specified as: - An instance of :class:`plotly.graph_objs.volume.caps.X` - A dict of string/value properties that will be passed to the X constructor Returns ------- plotly.graph_objs.volume.caps.X """ return self["x"] @x.setter def x(self, val): self["x"] = val @property def y(self): """ The 'y' property is an instance of Y that may be specified as: - An instance of :class:`plotly.graph_objs.volume.caps.Y` - A dict of string/value properties that will be passed to the Y constructor Returns ------- plotly.graph_objs.volume.caps.Y """ return self["y"] @y.setter def y(self, val): self["y"] = val @property def z(self): """ The 'z' property is an instance of Z that may be specified as: - An instance of :class:`plotly.graph_objs.volume.caps.Z` - A dict of string/value properties that will be passed to the Z constructor Returns ------- plotly.graph_objs.volume.caps.Z """ return self["z"] @z.setter def z(self, val): self["z"] = val @property def _prop_descriptions(self): return """\ x :class:`plotly.graph_objects.volume.caps.X` instance or dict with compatible properties y :class:`plotly.graph_objects.volume.caps.Y` instance or dict with compatible properties z :class:`plotly.graph_objects.volume.caps.Z` instance or dict with compatible properties """ def __init__(self, arg=None, x=None, y=None, z=None, **kwargs): """ Construct a new Caps object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.volume.Caps` x :class:`plotly.graph_objects.volume.caps.X` instance or dict with compatible properties y :class:`plotly.graph_objects.volume.caps.Y` instance or dict with compatible properties z :class:`plotly.graph_objects.volume.caps.Z` instance or dict with compatible properties Returns ------- Caps """ super().__init__("caps") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.volume.Caps constructor must be a dict or an instance of :class:`plotly.graph_objs.volume.Caps`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("x", arg, x) self._set_property("y", arg, y) self._set_property("z", arg, z) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Caps
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1187524, "end": 1188169 }
class ____(sgqlc.types.Type, Node): """Represents a 'head_ref_restored' event on a given pull request.""" __schema__ = github_schema __field_names__ = ("actor", "created_at", "pull_request") actor = sgqlc.types.Field(Actor, graphql_name="actor") """Identifies the actor who performed the event.""" created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt") """Identifies the date and time when the object was created.""" pull_request = sgqlc.types.Field(sgqlc.types.non_null("PullRequest"), graphql_name="pullRequest") """PullRequest referenced by event."""
HeadRefRestoredEvent
python
xlwings__xlwings
xlwings/constants.py
{ "start": 85728, "end": 86013 }
class ____: xlPhoneticAlignCenter = 2 # from enum XlPhoneticAlignment xlPhoneticAlignDistributed = 3 # from enum XlPhoneticAlignment xlPhoneticAlignLeft = 1 # from enum XlPhoneticAlignment xlPhoneticAlignNoControl = 0 # from enum XlPhoneticAlignment
PhoneticAlignment
python
sanic-org__sanic
sanic/models/asgi.py
{ "start": 469, "end": 1423 }
class ____: # no cov def __init__(self, transport: "MockTransport", loop): self.transport = transport self._not_paused = asyncio.Event() self._not_paused.set() self._complete = asyncio.Event() def pause_writing(self) -> None: self._not_paused.clear() def resume_writing(self) -> None: self._not_paused.set() async def complete(self) -> None: self._not_paused.set() await self.transport.send( {"type": "http.response.body", "body": b"", "more_body": False} ) @property def is_complete(self) -> bool: return self._complete.is_set() async def push_data(self, data: bytes) -> None: if not self.is_complete: await self.transport.send( {"type": "http.response.body", "body": data, "more_body": True} ) async def drain(self) -> None: await self._not_paused.wait()
MockProtocol
python
doocs__leetcode
solution/0600-0699/0611.Valid Triangle Number/Solution.py
{ "start": 0, "end": 312 }
class ____: def triangleNumber(self, nums: List[int]) -> int: nums.sort() ans, n = 0, len(nums) for i in range(n - 2): for j in range(i + 1, n - 1): k = bisect_left(nums, nums[i] + nums[j], lo=j + 1) - 1 ans += k - j return ans
Solution
python
eventlet__eventlet
tests/greenthread_test.py
{ "start": 3533, "end": 4506 }
class ____(LimitedTestCase, Asserts): def setUp(self): super().setUp() self.lst = [1] def test_timer_fired(self): def func(): greenthread.spawn_after_local(0.1, self.lst.pop) greenthread.sleep(0.2) greenthread.spawn(func) assert self.lst == [1], self.lst greenthread.sleep(0.3) assert self.lst == [], self.lst def test_timer_cancelled_upon_greenlet_exit(self): def func(): greenthread.spawn_after_local(0.1, self.lst.pop) greenthread.spawn(func) assert self.lst == [1], self.lst greenthread.sleep(0.2) assert self.lst == [1], self.lst def test_spawn_is_not_cancelled(self): def func(): greenthread.spawn(self.lst.pop) # exiting immediatelly, but self.lst.pop must be called greenthread.spawn(func) greenthread.sleep(0.1) assert self.lst == [], self.lst
SpawnAfterLocal
python
realpython__materials
python-protocol/person.py
{ "start": 0, "end": 258 }
class ____: def __init__(self, name): self.name = name def eat(self): print(f"{self.name} is eating.") def drink(self): print(f"{self.name} is drinking.") def talk(self): print(f"{self.name} is talking.")
Person
python
kamyu104__LeetCode-Solutions
Python/cinema-seat-allocation.py
{ "start": 50, "end": 819 }
class ____(object): def maxNumberOfFamilies(self, n, reservedSeats): """ :type n: int :type reservedSeats: List[List[int]] :rtype: int """ lookup = collections.defaultdict(lambda: [False]*3) for r, c in reservedSeats: if 2 <= c <= 5: lookup[r][0] = True if 4 <= c <= 7: lookup[r][1] = True if 6 <= c <= 9: lookup[r][2] = True result = 2*n for a, b, c in lookup.itervalues(): if not a and not c: continue if not a or not b or not c: result -= 1 continue result -= 2 return result # Time: O(nlogn) # Space: O(1)
Solution
python
airbytehq__airbyte
airbyte-ci/connectors/pipelines/pipelines/models/secrets.py
{ "start": 4023, "end": 4507 }
class ____(SecretStore): def __init__(self) -> None: self._store: Dict[str, str] = {} def add_secret(self, name: str, value: str) -> Secret: self._store[name] = value return Secret(name, self) def _fetch_secret(self, name: str) -> str: try: return self._store[name] except KeyError: raise SecretNotFoundError(f"Secret named {name} can't be found in the in memory secret store") @dataclass
InMemorySecretStore
python
PrefectHQ__prefect
tests/server/schemas/test_schedules.py
{ "start": 12622, "end": 15993 }
class ____: every_day = "0 0 * * *" every_hour = "0 * * * *" async def test_every_day(self): clock = CronSchedule(cron=self.every_day) dates = await clock.get_dates( n=5, start=datetime(2021, 1, 1, tzinfo=ZoneInfo("UTC")) ) assert dates == [ datetime(2021, 1, 1, tzinfo=ZoneInfo("UTC")) + timedelta(days=i) for i in range(5) ] assert all(d.tzname() == "UTC" for d in dates) async def test_every_hour(self): clock = CronSchedule(cron=self.every_hour) dates = await clock.get_dates( n=5, start=datetime(2021, 1, 1, tzinfo=ZoneInfo("UTC")) ) assert dates == [ datetime(2021, 1, 1, tzinfo=ZoneInfo("UTC")) + timedelta(hours=i) for i in range(5) ] assert all(d.tzname() == "UTC" for d in dates) async def test_every_day_with_timezone(self): clock = CronSchedule(cron=self.every_hour, timezone="America/New_York") dates = await clock.get_dates( n=5, start=datetime(2021, 1, 1, tzinfo=ZoneInfo("UTC")) ) assert dates == [ datetime(2021, 1, 1, tzinfo=ZoneInfo("UTC")) + timedelta(hours=i) for i in range(5) ] assert all(d.tzinfo.key == "America/New_York" for d in dates) async def test_every_day_with_timezone_start(self): clock = CronSchedule(cron=self.every_hour) dates = await clock.get_dates( n=5, start=ZonedDateTime(2021, 1, 1, tz="UTC") .to_tz("America/New_York") .py_datetime(), ) assert dates == [ datetime(2021, 1, 1, tzinfo=ZoneInfo("UTC")) + timedelta(hours=i) for i in range(5) ] assert all(d.tzname() == "UTC" for d in dates) async def test_n(self): clock = CronSchedule(cron=self.every_day) dates = await clock.get_dates( n=10, start=datetime(2021, 1, 1, tzinfo=ZoneInfo("UTC")) ) assert dates == [ datetime(2021, 1, 1, tzinfo=ZoneInfo("UTC")) + timedelta(days=i) for i in range(10) ] async def test_start_date(self): start_date = datetime(2025, 5, 5, tzinfo=ZoneInfo("UTC")) clock = CronSchedule(cron=self.every_day) dates = await clock.get_dates(n=10, start=start_date) assert dates == [start_date + timedelta(days=i) for i in range(10)] @pytest.mark.parametrize( "end_date", [ datetime(2018, 1, 1, tzinfo=ZoneInfo("UTC")), datetime(2021, 2, 2, tzinfo=ZoneInfo("UTC")), datetime(2022, 3, 3, tzinfo=ZoneInfo("UTC")), ], ) async def test_get_dates_until_end_date(self, end_date): clock = CronSchedule(cron=self.every_day) dates = await clock.get_dates( start=datetime(2018, 1, 1, tzinfo=ZoneInfo("UTC")), end=end_date ) assert len(dates) == min( MAX_ITERATIONS, (end_date - datetime(2018, 1, 1, tzinfo=ZoneInfo("UTC"))).days + 1, ) async def test_default_n_is_one_without_end_date(self): clock = CronSchedule(cron=self.every_day) dates = await clock.get_dates(start=datetime(2018, 1, 1, 6)) assert dates == [datetime(2018, 1, 2, tzinfo=ZoneInfo("UTC"))]
TestCronSchedule
python
Textualize__textual
tests/command_palette/test_command_source_environment.py
{ "start": 577, "end": 1247 }
class ____(App[None]): COMMANDS = {SimpleSource} def compose(self) -> ComposeResult: yield Input() def on_mount(self) -> None: self.action_command_palette() async def test_command_source_environment() -> None: """The command source should see the app and default screen.""" async with CommandPaletteApp().run_test() as pilot: base_screen, *_ = pilot.app.children assert base_screen is not None await pilot.press(*"test") assert len(SimpleSource.environment) == 1 assert SimpleSource.environment == { (pilot.app, base_screen, base_screen.query_one(Input)) }
CommandPaletteApp
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 178336, "end": 179056 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ( "draft_issue_id", "title", "body", "assignee_ids", "client_mutation_id", ) draft_issue_id = sgqlc.types.Field( sgqlc.types.non_null(ID), graphql_name="draftIssueId" ) title = sgqlc.types.Field(String, graphql_name="title") body = sgqlc.types.Field(String, graphql_name="body") assignee_ids = sgqlc.types.Field( sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="assigneeIds" ) client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
UpdateProjectDraftIssueInput
python
wandb__wandb
wandb/vendor/pygments/lexers/igor.py
{ "start": 372, "end": 19994 }
class ____(RegexLexer): """ Pygments Lexer for Igor Pro procedure files (.ipf). See http://www.wavemetrics.com/ and http://www.igorexchange.com/. .. versionadded:: 2.0 """ name = 'Igor' aliases = ['igor', 'igorpro'] filenames = ['*.ipf'] mimetypes = ['text/ipf'] flags = re.IGNORECASE | re.MULTILINE flowControl = ( 'if', 'else', 'elseif', 'endif', 'for', 'endfor', 'strswitch', 'switch', 'case', 'default', 'endswitch', 'do', 'while', 'try', 'catch', 'endtry', 'break', 'continue', 'return', 'AbortOnRTE', 'AbortOnValue' ) types = ( 'variable', 'string', 'constant', 'strconstant', 'NVAR', 'SVAR', 'WAVE', 'STRUCT', 'dfref', 'funcref', 'char', 'uchar', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64', 'float', 'double' ) keywords = ( 'override', 'ThreadSafe', 'MultiThread', 'static', 'Proc', 'Picture', 'Prompt', 'DoPrompt', 'macro', 'window', 'function', 'end', 'Structure', 'EndStructure', 'EndMacro', 'Menu', 'SubMenu' ) operations = ( 'Abort', 'AddFIFOData', 'AddFIFOVectData', 'AddMovieAudio', 'AddMovieFrame', 'AdoptFiles', 'APMath', 'Append', 'AppendImage', 'AppendLayoutObject', 'AppendMatrixContour', 'AppendText', 'AppendToGizmo', 'AppendToGraph', 'AppendToLayout', 'AppendToTable', 'AppendXYZContour', 'AutoPositionWindow', 'BackgroundInfo', 'Beep', 'BoundingBall', 'BoxSmooth', 'BrowseURL', 'BuildMenu', 'Button', 'cd', 'Chart', 'CheckBox', 'CheckDisplayed', 'ChooseColor', 'Close', 'CloseHelp', 'CloseMovie', 'CloseProc', 'ColorScale', 'ColorTab2Wave', 'Concatenate', 'ControlBar', 'ControlInfo', 'ControlUpdate', 'ConvertGlobalStringTextEncoding', 'ConvexHull', 'Convolve', 'CopyFile', 'CopyFolder', 'CopyScales', 'Correlate', 'CreateAliasShortcut', 'CreateBrowser', 'Cross', 'CtrlBackground', 'CtrlFIFO', 'CtrlNamedBackground', 'Cursor', 'CurveFit', 'CustomControl', 'CWT', 'Debugger', 'DebuggerOptions', 'DefaultFont', 'DefaultGuiControls', 'DefaultGuiFont', 'DefaultTextEncoding', 'DefineGuide', 'DelayUpdate', 'DeleteAnnotations', 'DeleteFile', 'DeleteFolder', 'DeletePoints', 'Differentiate', 'dir', 'Display', 'DisplayHelpTopic', 'DisplayProcedure', 'DoAlert', 'DoIgorMenu', 'DoUpdate', 'DoWindow', 'DoXOPIdle', 'DPSS', 'DrawAction', 'DrawArc', 'DrawBezier', 'DrawLine', 'DrawOval', 'DrawPICT', 'DrawPoly', 'DrawRect', 'DrawRRect', 'DrawText', 'DrawUserShape', 'DSPDetrend', 'DSPPeriodogram', 'Duplicate', 'DuplicateDataFolder', 'DWT', 'EdgeStats', 'Edit', 'ErrorBars', 'EstimatePeakSizes', 'Execute', 'ExecuteScriptText', 'ExperimentModified', 'ExportGizmo', 'Extract', 'FastGaussTransform', 'FastOp', 'FBinRead', 'FBinWrite', 'FFT', 'FIFOStatus', 'FIFO2Wave', 'FilterFIR', 'FilterIIR', 'FindAPeak', 'FindContour', 'FindDuplicates', 'FindLevel', 'FindLevels', 'FindPeak', 'FindPointsInPoly', 'FindRoots', 'FindSequence', 'FindValue', 'FPClustering', 'fprintf', 'FReadLine', 'FSetPos', 'FStatus', 'FTPCreateDirectory', 'FTPDelete', 'FTPDownload', 'FTPUpload', 'FuncFit', 'FuncFitMD', 'GBLoadWave', 'GetAxis', 'GetCamera', 'GetFileFolderInfo', 'GetGizmo', 'GetLastUserMenuInfo', 'GetMarquee', 'GetMouse', 'GetSelection', 'GetWindow', 'GPIBReadBinaryWave2', 'GPIBReadBinary2', 'GPIBReadWave2', 'GPIBRead2', 'GPIBWriteBinaryWave2', 'GPIBWriteBinary2', 'GPIBWriteWave2', 'GPIBWrite2', 'GPIB2', 'GraphNormal', 'GraphWaveDraw', 'GraphWaveEdit', 'Grep', 'GroupBox', 'Hanning', 'HDF5CloseFile', 'HDF5CloseGroup', 'HDF5ConvertColors', 'HDF5CreateFile', 'HDF5CreateGroup', 'HDF5CreateLink', 'HDF5Dump', 'HDF5DumpErrors', 'HDF5DumpState', 'HDF5ListAttributes', 'HDF5ListGroup', 'HDF5LoadData', 'HDF5LoadGroup', 'HDF5LoadImage', 'HDF5OpenFile', 'HDF5OpenGroup', 'HDF5SaveData', 'HDF5SaveGroup', 'HDF5SaveImage', 'HDF5TestOperation', 'HDF5UnlinkObject', 'HideIgorMenus', 'HideInfo', 'HideProcedures', 'HideTools', 'HilbertTransform', 'Histogram', 'ICA', 'IFFT', 'ImageAnalyzeParticles', 'ImageBlend', 'ImageBoundaryToMask', 'ImageEdgeDetection', 'ImageFileInfo', 'ImageFilter', 'ImageFocus', 'ImageFromXYZ', 'ImageGenerateROIMask', 'ImageGLCM', 'ImageHistModification', 'ImageHistogram', 'ImageInterpolate', 'ImageLineProfile', 'ImageLoad', 'ImageMorphology', 'ImageRegistration', 'ImageRemoveBackground', 'ImageRestore', 'ImageRotate', 'ImageSave', 'ImageSeedFill', 'ImageSkeleton3d', 'ImageSnake', 'ImageStats', 'ImageThreshold', 'ImageTransform', 'ImageUnwrapPhase', 'ImageWindow', 'IndexSort', 'InsertPoints', 'Integrate', 'IntegrateODE', 'Integrate2D', 'Interpolate2', 'Interpolate3D', 'Interp3DPath', 'JCAMPLoadWave', 'JointHistogram', 'KillBackground', 'KillControl', 'KillDataFolder', 'KillFIFO', 'KillFreeAxis', 'KillPath', 'KillPICTs', 'KillStrings', 'KillVariables', 'KillWaves', 'KillWindow', 'KMeans', 'Label', 'Layout', 'LayoutPageAction', 'LayoutSlideShow', 'Legend', 'LinearFeedbackShiftRegister', 'ListBox', 'LoadData', 'LoadPackagePreferences', 'LoadPICT', 'LoadWave', 'Loess', 'LombPeriodogram', 'Make', 'MakeIndex', 'MarkPerfTestTime', 'MatrixConvolve', 'MatrixCorr', 'MatrixEigenV', 'MatrixFilter', 'MatrixGaussJ', 'MatrixGLM', 'MatrixInverse', 'MatrixLinearSolve', 'MatrixLinearSolveTD', 'MatrixLLS', 'MatrixLUBkSub', 'MatrixLUD', 'MatrixLUDTD', 'MatrixMultiply', 'MatrixOP', 'MatrixSchur', 'MatrixSolve', 'MatrixSVBkSub', 'MatrixSVD', 'MatrixTranspose', 'MeasureStyledText', 'MLLoadWave', 'Modify', 'ModifyBrowser', 'ModifyCamera', 'ModifyContour', 'ModifyControl', 'ModifyControlList', 'ModifyFreeAxis', 'ModifyGizmo', 'ModifyGraph', 'ModifyImage', 'ModifyLayout', 'ModifyPanel', 'ModifyTable', 'ModifyWaterfall', 'MoveDataFolder', 'MoveFile', 'MoveFolder', 'MoveString', 'MoveSubwindow', 'MoveVariable', 'MoveWave', 'MoveWindow', 'MultiTaperPSD', 'MultiThreadingControl', 'NeuralNetworkRun', 'NeuralNetworkTrain', 'NewCamera', 'NewDataFolder', 'NewFIFO', 'NewFIFOChan', 'NewFreeAxis', 'NewGizmo', 'NewImage', 'NewLayout', 'NewMovie', 'NewNotebook', 'NewPanel', 'NewPath', 'NewWaterfall', 'NI4882', 'Note', 'Notebook', 'NotebookAction', 'Open', 'OpenHelp', 'OpenNotebook', 'Optimize', 'ParseOperationTemplate', 'PathInfo', 'PauseForUser', 'PauseUpdate', 'PCA', 'PlayMovie', 'PlayMovieAction', 'PlaySound', 'PopupContextualMenu', 'PopupMenu', 'Preferences', 'PrimeFactors', 'Print', 'printf', 'PrintGraphs', 'PrintLayout', 'PrintNotebook', 'PrintSettings', 'PrintTable', 'Project', 'PulseStats', 'PutScrapText', 'pwd', 'Quit', 'RatioFromNumber', 'Redimension', 'Remove', 'RemoveContour', 'RemoveFromGizmo', 'RemoveFromGraph', 'RemoveFromLayout', 'RemoveFromTable', 'RemoveImage', 'RemoveLayoutObjects', 'RemovePath', 'Rename', 'RenameDataFolder', 'RenamePath', 'RenamePICT', 'RenameWindow', 'ReorderImages', 'ReorderTraces', 'ReplaceText', 'ReplaceWave', 'Resample', 'ResumeUpdate', 'Reverse', 'Rotate', 'Save', 'SaveData', 'SaveExperiment', 'SaveGraphCopy', 'SaveNotebook', 'SavePackagePreferences', 'SavePICT', 'SaveTableCopy', 'SetActiveSubwindow', 'SetAxis', 'SetBackground', 'SetDashPattern', 'SetDataFolder', 'SetDimLabel', 'SetDrawEnv', 'SetDrawLayer', 'SetFileFolderInfo', 'SetFormula', 'SetIgorHook', 'SetIgorMenuMode', 'SetIgorOption', 'SetMarquee', 'SetProcessSleep', 'SetRandomSeed', 'SetScale', 'SetVariable', 'SetWaveLock', 'SetWaveTextEncoding', 'SetWindow', 'ShowIgorMenus', 'ShowInfo', 'ShowTools', 'Silent', 'Sleep', 'Slider', 'Smooth', 'SmoothCustom', 'Sort', 'SortColumns', 'SoundInRecord', 'SoundInSet', 'SoundInStartChart', 'SoundInStatus', 'SoundInStopChart', 'SoundLoadWave', 'SoundSaveWave', 'SphericalInterpolate', 'SphericalTriangulate', 'SplitString', 'SplitWave', 'sprintf', 'sscanf', 'Stack', 'StackWindows', 'StatsAngularDistanceTest', 'StatsANOVA1Test', 'StatsANOVA2NRTest', 'StatsANOVA2RMTest', 'StatsANOVA2Test', 'StatsChiTest', 'StatsCircularCorrelationTest', 'StatsCircularMeans', 'StatsCircularMoments', 'StatsCircularTwoSampleTest', 'StatsCochranTest', 'StatsContingencyTable', 'StatsDIPTest', 'StatsDunnettTest', 'StatsFriedmanTest', 'StatsFTest', 'StatsHodgesAjneTest', 'StatsJBTest', 'StatsKDE', 'StatsKendallTauTest', 'StatsKSTest', 'StatsKWTest', 'StatsLinearCorrelationTest', 'StatsLinearRegression', 'StatsMultiCorrelationTest', 'StatsNPMCTest', 'StatsNPNominalSRTest', 'StatsQuantiles', 'StatsRankCorrelationTest', 'StatsResample', 'StatsSample', 'StatsScheffeTest', 'StatsShapiroWilkTest', 'StatsSignTest', 'StatsSRTest', 'StatsTTest', 'StatsTukeyTest', 'StatsVariancesTest', 'StatsWatsonUSquaredTest', 'StatsWatsonWilliamsTest', 'StatsWheelerWatsonTest', 'StatsWilcoxonRankTest', 'StatsWRCorrelationTest', 'String', 'StructGet', 'StructPut', 'SumDimension', 'SumSeries', 'TabControl', 'Tag', 'TextBox', 'ThreadGroupPutDF', 'ThreadStart', 'Tile', 'TileWindows', 'TitleBox', 'ToCommandLine', 'ToolsGrid', 'Triangulate3d', 'Unwrap', 'URLRequest', 'ValDisplay', 'Variable', 'VDTClosePort2', 'VDTGetPortList2', 'VDTGetStatus2', 'VDTOpenPort2', 'VDTOperationsPort2', 'VDTReadBinaryWave2', 'VDTReadBinary2', 'VDTReadHexWave2', 'VDTReadHex2', 'VDTReadWave2', 'VDTRead2', 'VDTTerminalPort2', 'VDTWriteBinaryWave2', 'VDTWriteBinary2', 'VDTWriteHexWave2', 'VDTWriteHex2', 'VDTWriteWave2', 'VDTWrite2', 'VDT2', 'WaveMeanStdv', 'WaveStats', 'WaveTransform', 'wfprintf', 'WignerTransform', 'WindowFunction', 'XLLoadWave' ) functions = ( 'abs', 'acos', 'acosh', 'AddListItem', 'AiryA', 'AiryAD', 'AiryB', 'AiryBD', 'alog', 'AnnotationInfo', 'AnnotationList', 'area', 'areaXY', 'asin', 'asinh', 'atan', 'atanh', 'atan2', 'AxisInfo', 'AxisList', 'AxisValFromPixel', 'Besseli', 'Besselj', 'Besselk', 'Bessely', 'beta', 'betai', 'BinarySearch', 'BinarySearchInterp', 'binomial', 'binomialln', 'binomialNoise', 'cabs', 'CaptureHistory', 'CaptureHistoryStart', 'ceil', 'cequal', 'char2num', 'chebyshev', 'chebyshevU', 'CheckName', 'ChildWindowList', 'CleanupName', 'cmplx', 'cmpstr', 'conj', 'ContourInfo', 'ContourNameList', 'ContourNameToWaveRef', 'ContourZ', 'ControlNameList', 'ConvertTextEncoding', 'cos', 'cosh', 'cosIntegral', 'cot', 'coth', 'CountObjects', 'CountObjectsDFR', 'cpowi', 'CreationDate', 'csc', 'csch', 'CsrInfo', 'CsrWave', 'CsrWaveRef', 'CsrXWave', 'CsrXWaveRef', 'CTabList', 'DataFolderDir', 'DataFolderExists', 'DataFolderRefsEqual', 'DataFolderRefStatus', 'date', 'datetime', 'DateToJulian', 'date2secs', 'Dawson', 'DDERequestString', 'defined', 'deltax', 'digamma', 'dilogarithm', 'DimDelta', 'DimOffset', 'DimSize', 'ei', 'enoise', 'equalWaves', 'erf', 'erfc', 'erfcw', 'exists', 'exp', 'ExpConvExp', 'ExpConvExpFit', 'ExpConvExpFitBL', 'ExpConvExpFit1Shape', 'ExpConvExpFit1ShapeBL', 'ExpGauss', 'ExpGaussFit', 'ExpGaussFitBL', 'ExpGaussFit1Shape', 'ExpGaussFit1ShapeBL', 'expInt', 'expIntegralE1', 'expNoise', 'factorial', 'fakedata', 'faverage', 'faverageXY', 'FetchURL', 'FindDimLabel', 'FindListItem', 'floor', 'FontList', 'FontSizeHeight', 'FontSizeStringWidth', 'FresnelCos', 'FresnelSin', 'FuncRefInfo', 'FunctionInfo', 'FunctionList', 'FunctionPath', 'gamma', 'gammaEuler', 'gammaInc', 'gammaNoise', 'gammln', 'gammp', 'gammq', 'Gauss', 'GaussFit', 'GaussFitBL', 'GaussFit1Width', 'GaussFit1WidthBL', 'Gauss1D', 'Gauss2D', 'gcd', 'GetBrowserLine', 'GetBrowserSelection', 'GetDataFolder', 'GetDataFolderDFR', 'GetDefaultFont', 'GetDefaultFontSize', 'GetDefaultFontStyle', 'GetDimLabel', 'GetEnvironmentVariable', 'GetErrMessage', 'GetFormula', 'GetIndependentModuleName', 'GetIndexedObjName', 'GetIndexedObjNameDFR', 'GetKeyState', 'GetRTErrMessage', 'GetRTError', 'GetRTLocation', 'GetRTLocInfo', 'GetRTStackInfo', 'GetScrapText', 'GetUserData', 'GetWavesDataFolder', 'GetWavesDataFolderDFR', 'GizmoInfo', 'GizmoScale', 'gnoise', 'GrepList', 'GrepString', 'GuideInfo', 'GuideNameList', 'Hash', 'hcsr', 'HDF5AttributeInfo', 'HDF5DatasetInfo', 'HDF5LibraryInfo', 'HDF5TypeInfo', 'hermite', 'hermiteGauss', 'HyperGNoise', 'HyperGPFQ', 'HyperG0F1', 'HyperG1F1', 'HyperG2F1', 'IgorInfo', 'IgorVersion', 'imag', 'ImageInfo', 'ImageNameList', 'ImageNameToWaveRef', 'IndependentModuleList', 'IndexedDir', 'IndexedFile', 'Inf', 'Integrate1D', 'interp', 'Interp2D', 'Interp3D', 'inverseERF', 'inverseERFC', 'ItemsInList', 'JacobiCn', 'JacobiSn', 'JulianToDate', 'Laguerre', 'LaguerreA', 'LaguerreGauss', 'LambertW', 'LayoutInfo', 'leftx', 'LegendreA', 'limit', 'ListMatch', 'ListToTextWave', 'ListToWaveRefWave', 'ln', 'log', 'logNormalNoise', 'LorentzianFit', 'LorentzianFitBL', 'LorentzianFit1Width', 'LorentzianFit1WidthBL', 'lorentzianNoise', 'LowerStr', 'MacroList', 'magsqr', 'MandelbrotPoint', 'MarcumQ', 'MatrixCondition', 'MatrixDet', 'MatrixDot', 'MatrixRank', 'MatrixTrace', 'max', 'mean', 'median', 'min', 'mod', 'ModDate', 'MPFXEMGPeak', 'MPFXExpConvExpPeak', 'MPFXGaussPeak', 'MPFXLorenzianPeak', 'MPFXVoigtPeak', 'NameOfWave', 'NaN', 'NewFreeDataFolder', 'NewFreeWave', 'norm', 'NormalizeUnicode', 'note', 'NumberByKey', 'numpnts', 'numtype', 'NumVarOrDefault', 'num2char', 'num2istr', 'num2str', 'NVAR_Exists', 'OperationList', 'PadString', 'PanelResolution', 'ParamIsDefault', 'ParseFilePath', 'PathList', 'pcsr', 'Pi', 'PICTInfo', 'PICTList', 'PixelFromAxisVal', 'pnt2x', 'poissonNoise', 'poly', 'PolygonArea', 'poly2D', 'PossiblyQuoteName', 'ProcedureText', 'p2rect', 'qcsr', 'real', 'RemoveByKey', 'RemoveEnding', 'RemoveFromList', 'RemoveListItem', 'ReplaceNumberByKey', 'ReplaceString', 'ReplaceStringByKey', 'rightx', 'round', 'r2polar', 'sawtooth', 'scaleToIndex', 'ScreenResolution', 'sec', 'sech', 'Secs2Date', 'Secs2Time', 'SelectNumber', 'SelectString', 'SetEnvironmentVariable', 'sign', 'sin', 'sinc', 'sinh', 'sinIntegral', 'SortList', 'SpecialCharacterInfo', 'SpecialCharacterList', 'SpecialDirPath', 'SphericalBessJ', 'SphericalBessJD', 'SphericalBessY', 'SphericalBessYD', 'SphericalHarmonics', 'sqrt', 'StartMSTimer', 'StatsBetaCDF', 'StatsBetaPDF', 'StatsBinomialCDF', 'StatsBinomialPDF', 'StatsCauchyCDF', 'StatsCauchyPDF', 'StatsChiCDF', 'StatsChiPDF', 'StatsCMSSDCDF', 'StatsCorrelation', 'StatsDExpCDF', 'StatsDExpPDF', 'StatsErlangCDF', 'StatsErlangPDF', 'StatsErrorPDF', 'StatsEValueCDF', 'StatsEValuePDF', 'StatsExpCDF', 'StatsExpPDF', 'StatsFCDF', 'StatsFPDF', 'StatsFriedmanCDF', 'StatsGammaCDF', 'StatsGammaPDF', 'StatsGeometricCDF', 'StatsGeometricPDF', 'StatsGEVCDF', 'StatsGEVPDF', 'StatsHyperGCDF', 'StatsHyperGPDF', 'StatsInvBetaCDF', 'StatsInvBinomialCDF', 'StatsInvCauchyCDF', 'StatsInvChiCDF', 'StatsInvCMSSDCDF', 'StatsInvDExpCDF', 'StatsInvEValueCDF', 'StatsInvExpCDF', 'StatsInvFCDF', 'StatsInvFriedmanCDF', 'StatsInvGammaCDF', 'StatsInvGeometricCDF', 'StatsInvKuiperCDF', 'StatsInvLogisticCDF', 'StatsInvLogNormalCDF', 'StatsInvMaxwellCDF', 'StatsInvMooreCDF', 'StatsInvNBinomialCDF', 'StatsInvNCChiCDF', 'StatsInvNCFCDF', 'StatsInvNormalCDF', 'StatsInvParetoCDF', 'StatsInvPoissonCDF', 'StatsInvPowerCDF', 'StatsInvQCDF', 'StatsInvQpCDF', 'StatsInvRayleighCDF', 'StatsInvRectangularCDF', 'StatsInvSpearmanCDF', 'StatsInvStudentCDF', 'StatsInvTopDownCDF', 'StatsInvTriangularCDF', 'StatsInvUsquaredCDF', 'StatsInvVonMisesCDF', 'StatsInvWeibullCDF', 'StatsKuiperCDF', 'StatsLogisticCDF', 'StatsLogisticPDF', 'StatsLogNormalCDF', 'StatsLogNormalPDF', 'StatsMaxwellCDF', 'StatsMaxwellPDF', 'StatsMedian', 'StatsMooreCDF', 'StatsNBinomialCDF', 'StatsNBinomialPDF', 'StatsNCChiCDF', 'StatsNCChiPDF', 'StatsNCFCDF', 'StatsNCFPDF', 'StatsNCTCDF', 'StatsNCTPDF', 'StatsNormalCDF', 'StatsNormalPDF', 'StatsParetoCDF', 'StatsParetoPDF', 'StatsPermute', 'StatsPoissonCDF', 'StatsPoissonPDF', 'StatsPowerCDF', 'StatsPowerNoise', 'StatsPowerPDF', 'StatsQCDF', 'StatsQpCDF', 'StatsRayleighCDF', 'StatsRayleighPDF', 'StatsRectangularCDF', 'StatsRectangularPDF', 'StatsRunsCDF', 'StatsSpearmanRhoCDF', 'StatsStudentCDF', 'StatsStudentPDF', 'StatsTopDownCDF', 'StatsTriangularCDF', 'StatsTriangularPDF', 'StatsTrimmedMean', 'StatsUSquaredCDF', 'StatsVonMisesCDF', 'StatsVonMisesNoise', 'StatsVonMisesPDF', 'StatsWaldCDF', 'StatsWaldPDF', 'StatsWeibullCDF', 'StatsWeibullPDF', 'StopMSTimer', 'StringByKey', 'stringCRC', 'StringFromList', 'StringList', 'stringmatch', 'strlen', 'strsearch', 'StrVarOrDefault', 'str2num', 'StudentA', 'StudentT', 'sum', 'SVAR_Exists', 'TableInfo', 'TagVal', 'TagWaveRef', 'tan', 'tanh', 'TextEncodingCode', 'TextEncodingName', 'TextFile', 'ThreadGroupCreate', 'ThreadGroupGetDF', 'ThreadGroupGetDFR', 'ThreadGroupRelease', 'ThreadGroupWait', 'ThreadProcessorCount', 'ThreadReturnValue', 'ticks', 'time', 'TraceFromPixel', 'TraceInfo', 'TraceNameList', 'TraceNameToWaveRef', 'trunc', 'UniqueName', 'UnPadString', 'UnsetEnvironmentVariable', 'UpperStr', 'URLDecode', 'URLEncode', 'VariableList', 'Variance', 'vcsr', 'Voigt', 'VoigtFit', 'VoigtFitBL', 'VoigtFit1Shape', 'VoigtFit1ShapeBL', 'VoigtFit1Shape1Width', 'VoigtFit1Shape1WidthBL', 'VoigtFunc', 'WaveCRC', 'WaveDims', 'WaveExists', 'WaveInfo', 'WaveList', 'WaveMax', 'WaveMin', 'WaveName', 'WaveRefIndexed', 'WaveRefIndexedDFR', 'WaveRefsEqual', 'WaveRefWaveToList', 'WaveTextEncoding', 'WaveType', 'WaveUnits', 'WhichListItem', 'WinList', 'WinName', 'WinRecreation', 'WinType', 'WMFindWholeWord', 'WNoise', 'xcsr', 'XWaveName', 'XWaveRefFromTrace', 'x2pnt', 'zcsr', 'ZernikeR', 'zeta' ) tokens = { 'root': [ (r'//.*$', Comment.Single), (r'"([^"\\]|\\.)*"', String), # Flow Control. (words(flowControl, prefix=r'\b', suffix=r'\b'), Keyword), # Types. (words(types, prefix=r'\b', suffix=r'\b'), Keyword.Type), # Keywords. (words(keywords, prefix=r'\b', suffix=r'\b'), Keyword.Reserved), # Built-in operations. (words(operations, prefix=r'\b', suffix=r'\b'), Name.Class), # Built-in functions. (words(functions, prefix=r'\b', suffix=r'\b'), Name.Function), # Compiler directives. (r'^#(include|pragma|define|undef|ifdef|ifndef|if|elif|else|endif)', Name.Decorator), (r'[^a-z"/]+$', Text), (r'.', Text), ], }
IgorLexer
python
jd__tenacity
tenacity/wait.py
{ "start": 779, "end": 1458 }
class ____(abc.ABC): """Abstract base class for wait strategies.""" @abc.abstractmethod def __call__(self, retry_state: "RetryCallState") -> float: pass def __add__(self, other: "wait_base") -> "wait_combine": return wait_combine(self, other) def __radd__(self, other: "wait_base") -> typing.Union["wait_combine", "wait_base"]: # make it possible to use multiple waits with the built-in sum function if other == 0: # type: ignore[comparison-overlap] return self return self.__add__(other) WaitBaseT = typing.Union[ wait_base, typing.Callable[["RetryCallState"], typing.Union[float, int]] ]
wait_base
python
lepture__authlib
tests/flask/test_oauth2/test_jwt_bearer_grant.py
{ "start": 257, "end": 4360 }
class ____(_JWTBearerGrant): def resolve_issuer_client(self, issuer): return Client.query.filter_by(client_id=issuer).first() def resolve_client_key(self, client, headers, payload): keys = {"1": "foo", "2": "bar"} return keys[headers["kid"]] def authenticate_user(self, subject): return None def has_granted_permission(self, client, user): return True @pytest.fixture(autouse=True) def server(server): server.register_grant(JWTBearerGrant) return server @pytest.fixture(autouse=True) def client(client, db): client.set_client_metadata( { "scope": "profile", "redirect_uris": ["https://client.test/authorized"], "grant_types": [JWTBearerGrant.GRANT_TYPE], } ) db.session.add(client) db.session.commit() return client def test_missing_assertion(test_client): rv = test_client.post( "/oauth/token", data={"grant_type": JWTBearerGrant.GRANT_TYPE} ) resp = json.loads(rv.data) assert resp["error"] == "invalid_request" assert "assertion" in resp["error_description"] def test_invalid_assertion(test_client): assertion = JWTBearerGrant.sign( "foo", issuer="client-id", audience="https://provider.test/token", subject="none", header={"alg": "HS256", "kid": "1"}, ) rv = test_client.post( "/oauth/token", data={"grant_type": JWTBearerGrant.GRANT_TYPE, "assertion": assertion}, ) resp = json.loads(rv.data) assert resp["error"] == "invalid_grant" def test_authorize_token(test_client): assertion = JWTBearerGrant.sign( "foo", issuer="client-id", audience="https://provider.test/token", subject=None, header={"alg": "HS256", "kid": "1"}, ) rv = test_client.post( "/oauth/token", data={"grant_type": JWTBearerGrant.GRANT_TYPE, "assertion": assertion}, ) resp = json.loads(rv.data) assert "access_token" in resp def test_unauthorized_client(test_client, client): client.set_client_metadata( { "scope": "profile", "redirect_uris": ["https://client.test/authorized"], "grant_types": ["password"], } ) db.session.add(client) db.session.commit() assertion = JWTBearerGrant.sign( "bar", issuer="client-id", audience="https://provider.test/token", subject=None, header={"alg": "HS256", "kid": "2"}, ) rv = test_client.post( "/oauth/token", data={"grant_type": JWTBearerGrant.GRANT_TYPE, "assertion": assertion}, ) resp = json.loads(rv.data) assert resp["error"] == "unauthorized_client" def test_token_generator(test_client, app, server): m = "tests.flask.test_oauth2.oauth2_server:token_generator" app.config.update({"OAUTH2_ACCESS_TOKEN_GENERATOR": m}) server.load_config(app.config) assertion = JWTBearerGrant.sign( "foo", issuer="client-id", audience="https://provider.test/token", subject=None, header={"alg": "HS256", "kid": "1"}, ) rv = test_client.post( "/oauth/token", data={"grant_type": JWTBearerGrant.GRANT_TYPE, "assertion": assertion}, ) resp = json.loads(rv.data) assert "access_token" in resp assert "c-" in resp["access_token"] def test_jwt_bearer_token_generator(test_client, server): private_key = read_file_path("jwks_private.json") server.register_token_generator( JWTBearerGrant.GRANT_TYPE, JWTBearerTokenGenerator(private_key) ) assertion = JWTBearerGrant.sign( "foo", issuer="client-id", audience="https://provider.test/token", subject=None, header={"alg": "HS256", "kid": "1"}, ) rv = test_client.post( "/oauth/token", data={"grant_type": JWTBearerGrant.GRANT_TYPE, "assertion": assertion}, ) resp = json.loads(rv.data) assert "access_token" in resp assert resp["access_token"].count(".") == 2
JWTBearerGrant
python
psf__black
tests/data/cases/stub.py
{ "start": 1868, "end": 2281 }
class ____: def f(self): ... if sys.version_info >= (3, 8): def g(self): ... else: def g(self): ... def h(self): ... def i(self): ... if sys.version_info >= (3, 8): def j(self): ... def k(self): ... if sys.version_info >= (3, 8): class A: ... class B: ... class C: def l(self): ... def m(self): ...
Conditional
python
google__pytype
pytype/rewrite/overlays/special_builtins_test.py
{ "start": 108, "end": 362 }
class ____(test_utils.ContextfulTestBase): def load_builtin_function(self, name: str) -> abstract.PytdFunction: func = self.ctx.abstract_loader.load_builtin(name) assert isinstance(func, abstract.PytdFunction) return func
SpecialBuiltinsTest
python
Pylons__pyramid
tests/test_integration.py
{ "start": 16879, "end": 17654 }
class ____(IntegrationBase, unittest.TestCase): package = 'tests.pkgs.notfoundview' def test_it(self): res = self.testapp.get('/wontbefound', status=200) self.assertTrue(b'generic_notfound' in res.body) res = self.testapp.get('/bar', status=307) self.assertEqual(res.location, 'http://localhost/bar/') res = self.testapp.get('/bar/', status=200) self.assertTrue(b'OK bar' in res.body) res = self.testapp.get('/foo', status=307) self.assertEqual(res.location, 'http://localhost/foo/') res = self.testapp.get('/foo/', status=200) self.assertTrue(b'OK foo2' in res.body) res = self.testapp.get('/baz', status=200) self.assertTrue(b'baz_notfound' in res.body)
TestNotFoundView
python
getsentry__sentry
tests/sentry/api/serializers/test_group.py
{ "start": 18530, "end": 19718 }
class ____(TestCase): def test_simple_group_serializer(self) -> None: group = self.create_group() serialized = serialize(group, self.user, SimpleGroupSerializer()) assert serialized["id"] == str(group.id) assert serialized["title"] == group.title assert serialized["culprit"] == group.culprit assert serialized["level"] == "error" assert serialized["project"] == { "id": str(group.project.id), "name": group.project.name, "slug": group.project.slug, "platform": group.project.platform, } assert serialized["shortId"] == group.qualified_short_id assert serialized["status"] == "unresolved" assert serialized["substatus"] == "new" assert serialized["type"] == "default" assert serialized["issueType"] == "error" assert serialized["issueCategory"] == "error" assert serialized["metadata"] == group.get_event_metadata() assert serialized["numComments"] == group.num_comments assert serialized["firstSeen"] == group.first_seen assert serialized["lastSeen"] == group.last_seen
SimpleGroupSerializerTest
python
great-expectations__great_expectations
great_expectations/expectations/core/expect_column_values_to_be_between.py
{ "start": 2671, "end": 17965 }
class ____(ColumnMapExpectation): __doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION} ExpectColumnValuesToBeBetween is a \ Column Map Expectation Column Map Expectations are one of the most common types of Expectation. They are evaluated for a single column and ask a yes/no question for every row in that column. Based on the result, they then calculate the percentage of rows that gave a positive answer. If the percentage is high enough, the Expectation considers that data valid. Args: column (str): \ {COLUMN_DESCRIPTION} min_value (comparable type or None): \ {MIN_VALUE_DESCRIPTION} max_value (comparable type or None): \ {MAX_VALUE_DESCRIPTION} strict_min (boolean): \ {STRICT_MIN_DESCRIPTION} Default=False. strict_max (boolean): \ {STRICT_MAX_DESCRIPTION} Default=False. Other Parameters: mostly (None or a float between 0 and 1): \ {MOSTLY_DESCRIPTION} \ For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly). result_format (str or None): \ Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \ For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format). catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions). meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without \ modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta). severity (str or None): \ {FAILURE_SEVERITY_DESCRIPTION} \ For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity). Returns: An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result) Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta. Notes: * min_value and max_value are both inclusive unless strict_min or strict_max are set to True. * If min_value is None, then max_value is treated as an upper bound, and there is no minimum value checked. * If max_value is None, then min_value is treated as a lower bound, and there is no maximum value checked. See Also: [ExpectColumnValueLengthsToBeBetween](https://greatexpectations.io/expectations/expect_column_value_lengths_to_be_between) Supported Data Sources: [{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/) Data Quality Issues: {DATA_QUALITY_ISSUES[0]} Example Data: test test2 0 1 1 1 1.3 7 2 .8 2.5 3 2 3 Code Examples: Passing Case: Input: ExpectColumnValuesToBeBetween( column="test", min_value=.5, max_value=2 ) Output: {{ "exception_info": {{ "raised_exception": false, "exception_traceback": null, "exception_message": null }}, "result": {{ "element_count": 4, "unexpected_count": 0, "unexpected_percent": 0.0, "partial_unexpected_list": [], "missing_count": 0, "missing_percent": 0.0, "unexpected_percent_total": 0.0, "unexpected_percent_nonmissing": 0.0 }}, "meta": {{}}, "success": true }} Failing Case: Input: ExpectColumnValuesToBeBetween( column="test2", min_value=1, max_value=7, strict_min=False, strict_max=True ) Output: {{ "exception_info": {{ "raised_exception": false, "exception_traceback": null, "exception_message": null }}, "result": {{ "element_count": 4, "unexpected_count": 1, "unexpected_percent": 25.0, "partial_unexpected_list": [ 7.0 ], "missing_count": 0, "missing_percent": 0.0, "unexpected_percent_total": 25.0, "unexpected_percent_nonmissing": 25.0 }}, "meta": {{}}, "success": false }} """ # noqa: E501 # FIXME CoP min_value: Optional[Comparable] = pydantic.Field( default=None, description=MIN_VALUE_DESCRIPTION ) max_value: Optional[Comparable] = pydantic.Field( default=None, description=MAX_VALUE_DESCRIPTION ) strict_min: Union[bool, SuiteParameterDict] = pydantic.Field( default=False, description=STRICT_MIN_DESCRIPTION ) strict_max: Union[bool, SuiteParameterDict] = pydantic.Field( default=False, description=MAX_VALUE_DESCRIPTION ) @root_validator def check_min_val_or_max_val(cls, values: dict) -> dict: min_value = values.get("min_value") max_value = values.get("max_value") if min_value is None and max_value is None: raise ValueError("min_value and max_value cannot both be None") # noqa: TRY003 # FIXME CoP # Check for empty dicts since Pydantic coerces empty strings # to empty dicts (SuiteParameterDict) during validation of Comparable field if min_value == {} or max_value == {}: raise ValueError("values cannot be empty strings") # noqa: TRY003 # FIXME CoP return values # This dictionary contains metadata for display in the public gallery library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = { "maturity": "production", "tags": ["core expectation", "column map expectation"], "contributors": ["@great_expectations"], "requirements": [], "has_full_test_suite": True, "manually_reviewed_code": True, } _library_metadata = library_metadata map_metric = "column_values.between" success_keys = ( "min_value", "max_value", "strict_min", "strict_max", "mostly", ) args_keys = ( "column", "min_value", "max_value", "strict_min", "strict_max", ) class Config: title = "Expect column values to be between" @staticmethod def schema_extra( schema: Dict[str, Any], model: Type[ExpectColumnValuesToBeBetween] ) -> None: ColumnMapExpectation.Config.schema_extra(schema, model) schema["properties"]["metadata"]["properties"].update( { "data_quality_issues": { "title": "Data Quality Issues", "type": "array", "const": DATA_QUALITY_ISSUES, }, "library_metadata": { "title": "Library Metadata", "type": "object", "const": model._library_metadata, }, "short_description": { "title": "Short Description", "type": "string", "const": EXPECTATION_SHORT_DESCRIPTION, }, "supported_data_sources": { "title": "Supported Data Sources", "type": "array", "const": SUPPORTED_DATA_SOURCES, }, } ) @classmethod @override def _prescriptive_template( # noqa: C901 # too complex cls, renderer_configuration: RendererConfiguration, ): add_param_args: AddParamArgs = ( ("column", RendererValueType.STRING), ("min_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]), ("max_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]), ("mostly", RendererValueType.NUMBER), ("strict_min", RendererValueType.BOOLEAN), ("strict_max", RendererValueType.BOOLEAN), ) for name, param_type in add_param_args: renderer_configuration.add_param(name=name, param_type=param_type) params = renderer_configuration.params template_str = "" at_least_str = "" at_most_str = "" if not params.min_value and not params.max_value: template_str += "may have any numerical value." else: at_least_str = "greater than or equal to" if params.strict_min: at_least_str = cls._get_strict_min_string( renderer_configuration=renderer_configuration ) at_most_str = "less than or equal to" if params.strict_max: at_most_str = cls._get_strict_max_string( renderer_configuration=renderer_configuration ) if params.min_value and params.max_value: template_str += ( f"values must be {at_least_str} $min_value and {at_most_str} $max_value" ) elif not params.min_value: template_str += f"values must be {at_most_str} $max_value" else: template_str += f"values must be {at_least_str} $min_value" if params.mostly and params.mostly.value < 1.0: renderer_configuration = cls._add_mostly_pct_param( renderer_configuration=renderer_configuration ) template_str += ", at least $mostly_pct % of the time." else: template_str += "." if renderer_configuration.include_column_name: template_str = f"$column {template_str}" renderer_configuration.template_str = template_str return renderer_configuration # NOTE: This method is a pretty good example of good usage of `params`. @classmethod @override @renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE) @render_suite_parameter_string def _prescriptive_renderer( # noqa: C901 cls, configuration: Optional[ExpectationConfiguration] = None, result: Optional[ExpectationValidationResult] = None, runtime_configuration: Optional[dict] = None, **kwargs, ): runtime_configuration = runtime_configuration or {} include_column_name = runtime_configuration.get("include_column_name") is not False styling = runtime_configuration.get("styling") params = substitute_none_for_missing( configuration.kwargs if configuration else {}, [ "column", "min_value", "max_value", "mostly", "row_condition", "condition_parser", "strict_min", "strict_max", ], ) template_str = "" if (params["min_value"] is None) and (params["max_value"] is None): template_str += "may have any numerical value." else: at_least_str, at_most_str = handle_strict_min_max(params) mostly_str = "" if params["mostly"] is not None: if isinstance(params["mostly"], (int, float)) and params["mostly"] < 1.0: params["mostly_pct"] = num_to_str(params["mostly"] * 100, no_scientific=True) # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".") # noqa: E501 # FIXME CoP mostly_str = ", at least $mostly_pct % of the time" if params["min_value"] is not None and params["max_value"] is not None: template_str += f"values must be {at_least_str} $min_value and {at_most_str} $max_value{mostly_str}." # noqa: E501 # FIXME CoP elif params["min_value"] is None: template_str += f"values must be {at_most_str} $max_value{mostly_str}." elif params["max_value"] is None: template_str += f"values must be {at_least_str} $min_value{mostly_str}." if include_column_name: template_str = f"$column {template_str}" if params["row_condition"] is not None: conditional_template_str = parse_row_condition_string(params["row_condition"]) template_str, styling = _style_row_condition( conditional_template_str, template_str, params, styling, ) return [ RenderedStringTemplateContent( content_block_type="string_template", string_template={ "template": template_str, "params": params, "styling": styling, }, ) ]
ExpectColumnValuesToBeBetween
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 553833, "end": 554310 }
class ____(sgqlc.types.Type): """Autogenerated return type of DeleteDiscussion""" __schema__ = github_schema __field_names__ = ("client_mutation_id", "discussion") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation.""" discussion = sgqlc.types.Field("Discussion", graphql_name="discussion") """The discussion that was just deleted."""
DeleteDiscussionPayload
python
numpy__numpy
numpy/_core/tests/test_umath.py
{ "start": 166900, "end": 168350 }
class ____: def test_object_direct(self): """ test direct implementation of these magic methods """ class C: def __floor__(self): return 1 def __ceil__(self): return 2 def __trunc__(self): return 3 arr = np.array([C(), C()]) assert_equal(np.floor(arr), [1, 1]) assert_equal(np.ceil(arr), [2, 2]) assert_equal(np.trunc(arr), [3, 3]) def test_object_indirect(self): """ test implementations via __float__ """ class C: def __float__(self): return -2.5 arr = np.array([C(), C()]) assert_equal(np.floor(arr), [-3, -3]) assert_equal(np.ceil(arr), [-2, -2]) with pytest.raises(TypeError): np.trunc(arr) # consistent with math.trunc def test_fraction(self): f = Fraction(-4, 3) assert_equal(np.floor(f), -2) assert_equal(np.ceil(f), -1) assert_equal(np.trunc(f), -1) @pytest.mark.parametrize('func', [np.floor, np.ceil, np.trunc]) @pytest.mark.parametrize('dtype', [np.bool, np.float64, np.float32, np.int64, np.uint32]) def test_output_dtype(self, func, dtype): arr = np.array([-2, 0, 4, 8]).astype(dtype) result = func(arr) assert_equal(arr, result) assert result.dtype == dtype
TestRoundingFunctions
python
pytest-dev__pytest
testing/test_config.py
{ "start": 85582, "end": 86351 }
class ____: def test_pytest_setup_cfg_unsupported(self, pytester: Pytester) -> None: pytester.makefile( ".cfg", setup=""" [pytest] addopts = --verbose """, ) with pytest.raises(pytest.fail.Exception): pytester.runpytest() def test_pytest_custom_cfg_unsupported(self, pytester: Pytester) -> None: pytester.makefile( ".cfg", custom=""" [pytest] addopts = --verbose """, ) with pytest.raises(pytest.fail.Exception): pytester.runpytest("-c", "custom.cfg") with pytest.raises(pytest.fail.Exception): pytester.runpytest("--config-file", "custom.cfg")
TestSetupCfg
python
huggingface__transformers
src/transformers/models/pix2struct/modeling_pix2struct.py
{ "start": 1848, "end": 3350 }
class ____(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the T5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://huggingface.co/papers/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization import FusedRMSNorm Pix2StructLayerNorm = FusedRMSNorm logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of Pix2StructLayerNorm") except ImportError: # using the normal Pix2StructLayerNorm pass except Exception: logger.warning("Discovered apex but it failed to load, falling back to Pix2StructLayerNorm")
Pix2StructLayerNorm
python
facelessuser__soupsieve
soupsieve/css_types.py
{ "start": 4252, "end": 4942 }
class ____(ImmutableDict): """Custom selectors.""" def __init__(self, arg: dict[str, str] | Iterable[tuple[str, str]]) -> None: """Initialize.""" super().__init__(arg) def _validate(self, arg: dict[str, str] | Iterable[tuple[str, str]]) -> None: """Validate arguments.""" if isinstance(arg, dict): if not all(isinstance(v, str) for v in arg.values()): raise TypeError(f'{self.__class__.__name__} values must be hashable') elif not all(isinstance(k, str) and isinstance(v, str) for k, v in arg): raise TypeError(f'{self.__class__.__name__} keys and values must be Unicode strings')
CustomSelectors
python
django__django
tests/admin_inlines/models.py
{ "start": 5642, "end": 5845 }
class ____(models.Model): text = models.CharField(max_length=40) poll = models.ForeignKey(Poll, models.CASCADE) def clean(self): raise ValidationError("Always invalid model.")
Question
python
nedbat__coveragepy
tests/test_misc.py
{ "start": 3738, "end": 6019 }
class ____(CoverageTest): """Test import_third_party.""" run_in_temp_dir = False def test_success(self) -> None: # Make sure we don't have pytest in sys.modules before we start. del sys.modules["pytest"] # Import pytest mod, has = import_third_party("pytest") assert has # Yes, it's really pytest: assert mod.__name__ == "pytest" print(dir(mod)) assert all(hasattr(mod, name) for name in ["skip", "mark", "raises", "warns"]) # But it's not in sys.modules: assert "pytest" not in sys.modules def test_failure(self) -> None: _, has = import_third_party("xyzzy") assert not has assert "xyzzy" not in sys.modules HUMAN_DATA = [ ("z1 a2z a01 a2a a3 a1", "a01 a1 a2a a2z a3 z1"), ("a10 a9 a100 a1", "a1 a9 a10 a100"), ("4.0 3.10-win 3.10-mac 3.9-mac 3.9-win", "3.9-mac 3.9-win 3.10-mac 3.10-win 4.0"), ] @pytest.mark.parametrize("words, ordered", HUMAN_DATA) def test_human_sorted(words: str, ordered: str) -> None: assert " ".join(human_sorted(words.split())) == ordered @pytest.mark.parametrize("words, ordered", HUMAN_DATA) def test_human_sorted_items(words: str, ordered: str) -> None: keys = words.split() # Check that we never try to compare the values in the items human_sorted_items([(k, object()) for k in keys]) items = [(k, 1) for k in keys] + [(k, 2) for k in keys] okeys = ordered.split() oitems = [(k, v) for k in okeys for v in [1, 2]] assert human_sorted_items(items) == oitems assert human_sorted_items(items, reverse=True) == oitems[::-1] def test_stdout_link_tty() -> None: with mock.patch.object(sys.stdout, "isatty", lambda: True): link = stdout_link("some text", "some url") assert link == "\033]8;;some url\asome text\033]8;;\a" def test_stdout_link_not_tty() -> None: # Without mocking isatty, it reports False in a pytest suite. assert stdout_link("some text", "some url") == "some text" def test_stdout_link_with_fake_stdout() -> None: # If stdout is another object, we should still be ok. with mock.patch.object(sys, "stdout", object()): link = stdout_link("some text", "some url") assert link == "some text"
ImportThirdPartyTest
python
plotly__plotly.py
plotly/graph_objs/layout/scene/yaxis/title/_font.py
{ "start": 235, "end": 9921 }
class ____(_BaseLayoutHierarchyType): _parent_path_str = "layout.scene.yaxis.title" _path_str = "layout.scene.yaxis.title.font" _valid_props = { "color", "family", "lineposition", "shadow", "size", "style", "textcase", "variant", "weight", } @property def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. The 'family' property is a string and must be specified as: - A non-empty string Returns ------- str """ return self["family"] @family.setter def family(self, val): self["family"] = val @property def lineposition(self): """ Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. The 'lineposition' property is a flaglist and may be specified as a string containing: - Any combination of ['under', 'over', 'through'] joined with '+' characters (e.g. 'under+over') OR exactly one of ['none'] (e.g. 'none') Returns ------- Any """ return self["lineposition"] @lineposition.setter def lineposition(self, val): self["lineposition"] = val @property def shadow(self): """ Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow for additional options. The 'shadow' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["shadow"] @shadow.setter def shadow(self, val): self["shadow"] = val @property def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] Returns ------- int|float """ return self["size"] @size.setter def size(self, val): self["size"] = val @property def style(self): """ Sets whether a font should be styled with a normal or italic face from its family. The 'style' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'italic'] Returns ------- Any """ return self["style"] @style.setter def style(self, val): self["style"] = val @property def textcase(self): """ Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. The 'textcase' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'word caps', 'upper', 'lower'] Returns ------- Any """ return self["textcase"] @textcase.setter def textcase(self, val): self["textcase"] = val @property def variant(self): """ Sets the variant of the font. The 'variant' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'small-caps', 'all-small-caps', 'all-petite-caps', 'petite-caps', 'unicase'] Returns ------- Any """ return self["variant"] @variant.setter def variant(self, val): self["variant"] = val @property def weight(self): """ Sets the weight (or boldness) of the font. The 'weight' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [1, 1000] OR exactly one of ['normal', 'bold'] (e.g. 'bold') Returns ------- int """ return self["weight"] @weight.setter def weight(self, val): self["weight"] = val @property def _prop_descriptions(self): return """\ color family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. size style Sets whether a font should be styled with a normal or italic face from its family. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. variant Sets the variant of the font. weight Sets the weight (or boldness) of the font. """ def __init__( self, arg=None, color=None, family=None, lineposition=None, shadow=None, size=None, style=None, textcase=None, variant=None, weight=None, **kwargs, ): """ Construct a new Font object Sets this axis' title font. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.layout.scene.y axis.title.Font` color family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. size style Sets whether a font should be styled with a normal or italic face from its family. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. variant Sets the variant of the font. weight Sets the weight (or boldness) of the font. Returns ------- Font """ super().__init__("font") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.layout.scene.yaxis.title.Font constructor must be a dict or an instance of :class:`plotly.graph_objs.layout.scene.yaxis.title.Font`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("family", arg, family) self._set_property("lineposition", arg, lineposition) self._set_property("shadow", arg, shadow) self._set_property("size", arg, size) self._set_property("style", arg, style) self._set_property("textcase", arg, textcase) self._set_property("variant", arg, variant) self._set_property("weight", arg, weight) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Font
python
explosion__spaCy
spacy/scorer.py
{ "start": 1892, "end": 2818 }
class ____: """An AUC ROC score. This is only defined for binary classification. Use the method is_binary before calculating the score, otherwise it may throw an error.""" def __init__(self) -> None: self.golds: List[Any] = [] self.cands: List[Any] = [] self.saved_score = 0.0 self.saved_score_at_len = 0 def score_set(self, cand, gold) -> None: self.cands.append(cand) self.golds.append(gold) def is_binary(self): return len(np.unique(self.golds)) == 2 @property def score(self): if not self.is_binary(): raise ValueError(Errors.E165.format(label=set(self.golds))) if len(self.golds) == self.saved_score_at_len: return self.saved_score self.saved_score = _roc_auc_score(self.golds, self.cands) self.saved_score_at_len = len(self.golds) return self.saved_score
ROCAUCScore
python
apache__airflow
providers/opensearch/tests/unit/opensearch/operators/test_opensearch.py
{ "start": 2576, "end": 3203 }
class ____: # This test does not test execute logic because there is only a redirect to the OpenSearch # client. def setup_method(self, dag_setup): self.dag = dag_setup self.open_search = OpenSearchCreateIndexOperator( task_id="test_opensearch_query_operator", index_name="test_index", index_body={"test": 1} ) def test_init(self): assert self.open_search.task_id == "test_opensearch_query_operator" assert self.open_search.opensearch_conn_id == "opensearch_default" assert self.open_search.index_name == "test_index"
TestOpenSearchCreateIndexOperator
python
django__django
tests/admin_widgets/tests.py
{ "start": 34476, "end": 39790 }
class ____(SimpleTestCase): def test_no_can_add_related(self): rel = Individual._meta.get_field("parent").remote_field w = widgets.AdminRadioSelect() # Used to fail with a name error. w = widgets.RelatedFieldWidgetWrapper(w, rel, widget_admin_site) self.assertFalse(w.can_add_related) def test_select_multiple_widget_cant_change_delete_related(self): rel = Individual._meta.get_field("parent").remote_field widget = forms.SelectMultiple() wrapper = widgets.RelatedFieldWidgetWrapper( widget, rel, widget_admin_site, can_add_related=True, can_change_related=True, can_delete_related=True, ) self.assertTrue(wrapper.can_add_related) self.assertFalse(wrapper.can_change_related) self.assertFalse(wrapper.can_delete_related) def test_on_delete_cascade_rel_cant_delete_related(self): rel = Individual._meta.get_field("soulmate").remote_field widget = forms.Select() wrapper = widgets.RelatedFieldWidgetWrapper( widget, rel, widget_admin_site, can_add_related=True, can_change_related=True, can_delete_related=True, ) self.assertTrue(wrapper.can_add_related) self.assertTrue(wrapper.can_change_related) self.assertFalse(wrapper.can_delete_related) def test_custom_widget_render(self): class CustomWidget(forms.Select): def render(self, *args, **kwargs): return "custom render output" rel = Album._meta.get_field("band").remote_field widget = CustomWidget() wrapper = widgets.RelatedFieldWidgetWrapper( widget, rel, widget_admin_site, can_add_related=True, can_change_related=True, can_delete_related=True, ) output = wrapper.render("name", "value") self.assertIn("custom render output", output) def test_widget_delegates_value_omitted_from_data(self): class CustomWidget(forms.Select): def value_omitted_from_data(self, data, files, name): return False rel = Album._meta.get_field("band").remote_field widget = CustomWidget() wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site) self.assertIs(wrapper.value_omitted_from_data({}, {}, "band"), False) def test_widget_is_hidden(self): rel = Album._meta.get_field("band").remote_field widget = forms.HiddenInput() widget.choices = () wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site) self.assertIs(wrapper.is_hidden, True) context = wrapper.get_context("band", None, {}) self.assertIs(context["is_hidden"], True) output = wrapper.render("name", "value") # Related item links are hidden. self.assertNotIn("<a ", output) def test_widget_is_not_hidden(self): rel = Album._meta.get_field("band").remote_field widget = forms.Select() wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site) self.assertIs(wrapper.is_hidden, False) context = wrapper.get_context("band", None, {}) self.assertIs(context["is_hidden"], False) output = wrapper.render("name", "value") # Related item links are present. self.assertIn("<a ", output) def test_data_model_ref_when_model_name_is_camel_case(self): rel = VideoStream._meta.get_field("release_event").remote_field widget = forms.Select() wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site) self.assertIs(wrapper.is_hidden, False) context = wrapper.get_context("release_event", None, {}) self.assertEqual(context["model"], "release event") self.assertEqual(context["model_name"], "releaseevent") output = wrapper.render("stream", "value") expected = """ <div class="related-widget-wrapper" data-model-ref="releaseevent"> <select name="stream" data-context="available-source"> </select> <a class="related-widget-wrapper-link add-related" id="add_id_stream" data-popup="yes" title="Add another release event" href="/admin_widgets/releaseevent/add/?_to_field=album&amp;_popup=1"> <img src="/static/admin/img/icon-addlink.svg" alt="" width="24" height="24"> </a> </div> """ self.assertHTMLEqual(output, expected) def test_non_select_widget_cant_change_delete_related(self): main_band = Event._meta.get_field("main_band") widget = widgets.AdminRadioSelect() wrapper = widgets.RelatedFieldWidgetWrapper( widget, main_band, widget_admin_site, can_add_related=True, can_change_related=True, can_delete_related=True, ) self.assertTrue(wrapper.can_add_related) self.assertFalse(wrapper.can_change_related) self.assertFalse(wrapper.can_delete_related) @override_settings(ROOT_URLCONF="admin_widgets.urls")
RelatedFieldWidgetWrapperTests
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 638763, "end": 639228 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("cursor", "node", "starred_at") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") node = sgqlc.types.Field(sgqlc.types.non_null("Repository"), graphql_name="node") starred_at = sgqlc.types.Field( sgqlc.types.non_null(DateTime), graphql_name="starredAt" )
StarredRepositoryEdge
python
apache__airflow
task-sdk/src/airflow/sdk/exceptions.py
{ "start": 3185, "end": 4281 }
class ____(AirflowFailException): main_message: str def __init__(self, inactive_asset_keys: Collection[AssetUniqueKey | AssetNameRef | AssetUriRef]) -> None: self.inactive_asset_keys = inactive_asset_keys @staticmethod def _render_asset_key(key: AssetUniqueKey | AssetNameRef | AssetUriRef) -> str: from airflow.sdk.definitions.asset import AssetNameRef, AssetUniqueKey, AssetUriRef if isinstance(key, AssetUniqueKey): return f"Asset(name={key.name!r}, uri={key.uri!r})" if isinstance(key, AssetNameRef): return f"Asset.ref(name={key.name!r})" if isinstance(key, AssetUriRef): return f"Asset.ref(uri={key.uri!r})" return repr(key) # Should not happen, but let's fails more gracefully in an exception. def __str__(self) -> str: return f"{self.main_message}: {self.inactive_assets_message}" @property def inactive_assets_message(self) -> str: return ", ".join(self._render_asset_key(key) for key in self.inactive_asset_keys)
_AirflowExecuteWithInactiveAssetExecption
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/methodOverride2.py
{ "start": 1919, "end": 2084 }
class ____(Generic[P, R]): def method1(self, *args: P.args, **kwargs: P.kwargs) -> R: ... def method2(self, *args: P.args, **kwargs: P.kwargs) -> R: ...
Base2
python
google__pytype
pytype/tools/merge_pyi/merge_pyi.py
{ "start": 384, "end": 816 }
class ____(Exception): """Wrap exceptions thrown while merging files.""" def _merge_csts(*, py_tree, pyi_tree): context = codemod.CodemodContext() vis = visitors.ApplyTypeAnnotationsVisitor vis.store_stub_in_context(context, pyi_tree) return vis( context, overwrite_existing_annotations=False, strict_posargs_matching=False, strict_annotation_matching=True, ).transform_module(py_tree)
MergeError
python
getsentry__sentry
src/sentry/users/models/useremail.py
{ "start": 2096, "end": 6635 }
class ____(ControlOutboxProducingModel): __relocation_scope__ = RelocationScope.User __relocation_dependencies__ = {"sentry.Email"} __relocation_custom_ordinal__ = ["user", "email"] user = FlexibleForeignKey(settings.AUTH_USER_MODEL, related_name="emails") email = models.EmailField(_("email address"), max_length=200) validation_hash = models.CharField(max_length=32, default=get_secure_token) date_hash_added = models.DateTimeField(default=timezone.now) is_verified = models.BooleanField( _("verified"), default=False, help_text=_("Designates whether this user has confirmed their email."), ) objects: ClassVar[UserEmailManager] = UserEmailManager() class Meta: app_label = "sentry" db_table = "sentry_useremail" unique_together = (("user", "email"),) __repr__ = sane_repr("user_id", "email") def outboxes_for_update(self, shard_identifier: int | None = None) -> list[ControlOutboxBase]: regions = find_regions_for_user(self.user_id) return [ outbox for outbox in OutboxCategory.USER_UPDATE.as_control_outboxes( region_names=regions, shard_identifier=self.user_id, object_identifier=self.user_id, ) ] def set_hash(self) -> None: self.date_hash_added = timezone.now() self.validation_hash = get_secure_token() def hash_is_valid(self) -> bool: return bool( self.validation_hash and self.date_hash_added > timezone.now() - timedelta(hours=48) ) def is_primary(self) -> bool: return self.user.email == self.email @classmethod def get_primary_email(cls, user: User) -> UserEmail: """@deprecated""" return cls.objects.get_primary_email(user) def normalize_before_relocation_import( self, pk_map: PrimaryKeyMap, scope: ImportScope, flags: ImportFlags ) -> int | None: from sentry.users.models.user import User old_user_id = self.user_id old_pk = super().normalize_before_relocation_import(pk_map, scope, flags) if old_pk is None: return None # If we are merging users, ignore the imported email and use the existing user's email # instead. if pk_map.get_kind(get_model_name(User), old_user_id) == ImportKind.Existing: return None # Only preserve validation hashes in the backup/restore scope - in all others, have the user # verify their email again. if scope != ImportScope.Global: self.is_verified = False self.validation_hash = get_secure_token() self.date_hash_added = timezone.now() return old_pk def write_relocation_import( self, _s: ImportScope, _f: ImportFlags ) -> tuple[int, ImportKind] | None: # The `UserEmail` was automatically generated `post_save()`, but only if it was the user's # primary email. We just need to update it with the data being imported. Note that if we've # reached this point, we cannot be merging into an existing user, and are instead modifying # the just-created `UserEmail` for a new one. try: useremail = self.__class__.objects.get(user=self.user, email=self.email) for f in self._meta.fields: if f.name not in ["id", "pk"]: setattr(useremail, f.name, getattr(self, f.name)) except self.__class__.DoesNotExist: # This is a non-primary email, so was not auto-created - go ahead and add it in. useremail = self useremail.save() # If we've entered this method at all, we can be sure that the `UserEmail` was created as # part of the import, since this is a new `User` (the "existing" `User` due to # `--merge_users=true` case is handled in the `normalize_before_relocation_import()` method # above). return (useremail.pk, ImportKind.Inserted) @classmethod def sanitize_relocation_json( cls, json: Any, sanitizer: Sanitizer, model_name: NormalizedModelName | None = None ) -> None: model_name = get_model_name(cls) if model_name is None else model_name super().sanitize_relocation_json(json, sanitizer, model_name) validation_hash = get_secure_token() sanitizer.set_string( json, SanitizableField(model_name, "validation_hash"), lambda _: validation_hash )
UserEmail
python
PyCQA__pylint
tests/functional/m/method_hidden.py
{ "start": 2204, "end": 2271 }
class ____(ParentTwo): def __private(self): pass
ChildTwo
python
spack__spack
lib/spack/spack/detection/common.py
{ "start": 9406, "end": 10892 }
class ____: @staticmethod def find_windows_compiler_root_paths() -> List[str]: """Helper for Windows compiler installation root discovery At the moment simply returns location of VS install paths from VSWhere But should be extended to include more information as relevant""" return list(winOs.WindowsOs().vs_install_paths) @staticmethod def find_windows_compiler_cmake_paths() -> List[str]: """Semi hard-coded search path for cmake bundled with MSVC""" return [ os.path.join( path, "Common7", "IDE", "CommonExtensions", "Microsoft", "CMake", "CMake", "bin" ) for path in WindowsCompilerExternalPaths.find_windows_compiler_root_paths() ] @staticmethod def find_windows_compiler_ninja_paths() -> List[str]: """Semi hard-coded search heuristic for locating ninja bundled with MSVC""" return [ os.path.join(path, "Common7", "IDE", "CommonExtensions", "Microsoft", "CMake", "Ninja") for path in WindowsCompilerExternalPaths.find_windows_compiler_root_paths() ] @staticmethod def find_windows_compiler_bundled_packages() -> List[str]: """Return all MSVC compiler bundled packages""" return ( WindowsCompilerExternalPaths.find_windows_compiler_cmake_paths() + WindowsCompilerExternalPaths.find_windows_compiler_ninja_paths() )
WindowsCompilerExternalPaths
python
huggingface__transformers
src/transformers/tokenization_utils_base.py
{ "start": 5322, "end": 5602 }
class ____(NamedTuple): """ Token span in an encoded string (list of tokens). Args: start (`int`): Index of the first token in the span. end (`int`): Index of the token following the last token in the span. """ start: int end: int
TokenSpan