language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
sympy__sympy
sympy/stats/crv_types.py
{ "start": 75011, "end": 76825 }
class ____(SingleContinuousDistribution): _argnames = ('alpha', 'lamda',) set = Interval(0, oo) @staticmethod def check(alpha, lamda): _value_check(alpha.is_real, "Shape parameter should be real.") _value_check(lamda.is_real, "Scale parameter should be real.") _value_check(alpha.is_positive, "Shape parameter should be positive.") _value_check(lamda.is_positive, "Scale parameter should be positive.") def pdf(self, x): lamba, alpha = self.lamda, self.alpha return (alpha/lamba) * (S.One + x/lamba)**(-alpha-1) def Lomax(name, alpha, lamda): r""" Create a continuous random variable with a Lomax distribution. Explanation =========== The density of the Lomax distribution is given by .. math:: f(x) := \frac{\alpha}{\lambda}\left[1+\frac{x}{\lambda}\right]^{-(\alpha+1)} Parameters ========== alpha : Real Number, `\alpha > 0` Shape parameter lamda : Real Number, `\lambda > 0` Scale parameter Examples ======== >>> from sympy.stats import Lomax, density, cdf, E >>> from sympy import symbols >>> a, l = symbols('a, l', positive=True) >>> X = Lomax('X', a, l) >>> x = symbols('x') >>> density(X)(x) a*(1 + x/l)**(-a - 1)/l >>> cdf(X)(x) Piecewise((1 - 1/(1 + x/l)**a, x >= 0), (0, True)) >>> a = 2 >>> X = Lomax('X', a, l) >>> E(X) l Returns ======= RandomSymbol References ========== .. [1] https://en.wikipedia.org/wiki/Lomax_distribution """ return rv(name, LomaxDistribution, (alpha, lamda)) #------------------------------------------------------------------------------- # Maxwell distribution ---------------------------------------------------------
LomaxDistribution
python
python__mypy
mypyc/analysis/dataflow.py
{ "start": 4773, "end": 8577 }
class ____(OpVisitor[GenAndKill[T]]): def visit_goto(self, op: Goto) -> GenAndKill[T]: return set(), set() @abstractmethod def visit_register_op(self, op: RegisterOp) -> GenAndKill[T]: raise NotImplementedError @abstractmethod def visit_assign(self, op: Assign) -> GenAndKill[T]: raise NotImplementedError @abstractmethod def visit_assign_multi(self, op: AssignMulti) -> GenAndKill[T]: raise NotImplementedError @abstractmethod def visit_set_mem(self, op: SetMem) -> GenAndKill[T]: raise NotImplementedError def visit_call(self, op: Call) -> GenAndKill[T]: return self.visit_register_op(op) def visit_method_call(self, op: MethodCall) -> GenAndKill[T]: return self.visit_register_op(op) def visit_load_error_value(self, op: LoadErrorValue) -> GenAndKill[T]: return self.visit_register_op(op) def visit_load_literal(self, op: LoadLiteral) -> GenAndKill[T]: return self.visit_register_op(op) def visit_get_attr(self, op: GetAttr) -> GenAndKill[T]: return self.visit_register_op(op) def visit_set_attr(self, op: SetAttr) -> GenAndKill[T]: return self.visit_register_op(op) def visit_load_static(self, op: LoadStatic) -> GenAndKill[T]: return self.visit_register_op(op) def visit_init_static(self, op: InitStatic) -> GenAndKill[T]: return self.visit_register_op(op) def visit_tuple_get(self, op: TupleGet) -> GenAndKill[T]: return self.visit_register_op(op) def visit_tuple_set(self, op: TupleSet) -> GenAndKill[T]: return self.visit_register_op(op) def visit_box(self, op: Box) -> GenAndKill[T]: return self.visit_register_op(op) def visit_unbox(self, op: Unbox) -> GenAndKill[T]: return self.visit_register_op(op) def visit_cast(self, op: Cast) -> GenAndKill[T]: return self.visit_register_op(op) def visit_raise_standard_error(self, op: RaiseStandardError) -> GenAndKill[T]: return self.visit_register_op(op) def visit_call_c(self, op: CallC) -> GenAndKill[T]: return self.visit_register_op(op) def visit_primitive_op(self, op: PrimitiveOp) -> GenAndKill[T]: return self.visit_register_op(op) def visit_truncate(self, op: Truncate) -> GenAndKill[T]: return self.visit_register_op(op) def visit_extend(self, op: Extend) -> GenAndKill[T]: return self.visit_register_op(op) def visit_load_global(self, op: LoadGlobal) -> GenAndKill[T]: return self.visit_register_op(op) def visit_int_op(self, op: IntOp) -> GenAndKill[T]: return self.visit_register_op(op) def visit_float_op(self, op: FloatOp) -> GenAndKill[T]: return self.visit_register_op(op) def visit_float_neg(self, op: FloatNeg) -> GenAndKill[T]: return self.visit_register_op(op) def visit_comparison_op(self, op: ComparisonOp) -> GenAndKill[T]: return self.visit_register_op(op) def visit_float_comparison_op(self, op: FloatComparisonOp) -> GenAndKill[T]: return self.visit_register_op(op) def visit_load_mem(self, op: LoadMem) -> GenAndKill[T]: return self.visit_register_op(op) def visit_get_element_ptr(self, op: GetElementPtr) -> GenAndKill[T]: return self.visit_register_op(op) def visit_set_element(self, op: SetElement) -> GenAndKill[T]: return self.visit_register_op(op) def visit_load_address(self, op: LoadAddress) -> GenAndKill[T]: return self.visit_register_op(op) def visit_keep_alive(self, op: KeepAlive) -> GenAndKill[T]: return self.visit_register_op(op) def visit_unborrow(self, op: Unborrow) -> GenAndKill[T]: return self.visit_register_op(op)
BaseAnalysisVisitor
python
anthropics__anthropic-sdk-python
src/anthropic/_models.py
{ "start": 23941, "end": 30891 }
class ____: field_name: str """The name of the discriminator field in the variant class, e.g. ```py class Foo(BaseModel): type: Literal['foo'] ``` Will result in field_name='type' """ field_alias_from: str | None """The name of the discriminator field in the API response, e.g. ```py class Foo(BaseModel): type: Literal['foo'] = Field(alias='type_from_api') ``` Will result in field_alias_from='type_from_api' """ mapping: dict[str, type] """Mapping of discriminator value to variant type, e.g. {'foo': FooVariant, 'bar': BarVariant} """ def __init__( self, *, mapping: dict[str, type], discriminator_field: str, discriminator_alias: str | None, ) -> None: self.mapping = mapping self.field_name = discriminator_field self.field_alias_from = discriminator_alias def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, ...]) -> DiscriminatorDetails | None: cached = DISCRIMINATOR_CACHE.get(union) if cached is not None: return cached discriminator_field_name: str | None = None for annotation in meta_annotations: if isinstance(annotation, PropertyInfo) and annotation.discriminator is not None: discriminator_field_name = annotation.discriminator break if not discriminator_field_name: return None mapping: dict[str, type] = {} discriminator_alias: str | None = None for variant in get_args(union): variant = strip_annotated_type(variant) if is_basemodel_type(variant): if PYDANTIC_V1: field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] if not field_info: continue # Note: if one variant defines an alias then they all should discriminator_alias = field_info.alias if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation): for entry in get_args(annotation): if isinstance(entry, str): mapping[entry] = variant else: field = _extract_field_schema_pv2(variant, discriminator_field_name) if not field: continue # Note: if one variant defines an alias then they all should discriminator_alias = field.get("serialization_alias") field_schema = field["schema"] if field_schema["type"] == "literal": for entry in cast("LiteralSchema", field_schema)["expected"]: if isinstance(entry, str): mapping[entry] = variant if not mapping: return None details = DiscriminatorDetails( mapping=mapping, discriminator_field=discriminator_field_name, discriminator_alias=discriminator_alias, ) DISCRIMINATOR_CACHE.setdefault(union, details) return details def _extract_field_schema_pv2(model: type[BaseModel], field_name: str) -> ModelField | None: schema = model.__pydantic_core_schema__ if schema["type"] == "definitions": schema = schema["schema"] if schema["type"] != "model": return None schema = cast("ModelSchema", schema) fields_schema = schema["schema"] if fields_schema["type"] != "model-fields": return None fields_schema = cast("ModelFieldsSchema", fields_schema) field = fields_schema["fields"].get(field_name) if not field: return None return cast("ModelField", field) # pyright: ignore[reportUnnecessaryCast] def validate_type(*, type_: type[_T], value: object) -> _T: """Strict validation that the given value matches the expected type""" if inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel): return cast(_T, parse_obj(type_, value)) return cast(_T, _validate_non_model_type(type_=type_, value=value)) def set_pydantic_config(typ: Any, config: pydantic.ConfigDict) -> None: """Add a pydantic config for the given type. Note: this is a no-op on Pydantic v1. """ setattr(typ, "__pydantic_config__", config) # noqa: B010 def add_request_id(obj: BaseModel, request_id: str | None) -> None: obj._request_id = request_id # in Pydantic v1, using setattr like we do above causes the attribute # to be included when serializing the model which we don't want in this # case so we need to explicitly exclude it if PYDANTIC_V1: try: exclude_fields = obj.__exclude_fields__ # type: ignore except AttributeError: cast(Any, obj).__exclude_fields__ = {"_request_id", "__exclude_fields__"} else: cast(Any, obj).__exclude_fields__ = {*(exclude_fields or {}), "_request_id", "__exclude_fields__"} # our use of subclassing here causes weirdness for type checkers, # so we just pretend that we don't subclass if TYPE_CHECKING: GenericModel = BaseModel else: class GenericModel(BaseGenericModel, BaseModel): pass if not PYDANTIC_V1: from pydantic import TypeAdapter as _TypeAdapter, computed_field as computed_field _CachedTypeAdapter = cast("TypeAdapter[object]", lru_cache(maxsize=None)(_TypeAdapter)) if TYPE_CHECKING: from pydantic import TypeAdapter else: TypeAdapter = _CachedTypeAdapter def _validate_non_model_type(*, type_: type[_T], value: object) -> _T: return TypeAdapter(type_).validate_python(value) elif not TYPE_CHECKING: # TODO: condition is weird class RootModel(GenericModel, Generic[_T]): """Used as a placeholder to easily convert runtime types to a Pydantic format to provide validation. For example: ```py validated = RootModel[int](__root__="5").__root__ # validated: 5 ``` """ __root__: _T def _validate_non_model_type(*, type_: type[_T], value: object) -> _T: model = _create_pydantic_model(type_).validate(value) return cast(_T, model.__root__) def _create_pydantic_model(type_: _T) -> Type[RootModel[_T]]: return RootModel[type_] # type: ignore def TypeAdapter(*_args: Any, **_kwargs: Any) -> Any: raise RuntimeError("attempted to use TypeAdapter in pydantic v1") def computed_field(func: Any | None = None, /, **__: Any) -> Any: def _exc_func(*_: Any, **__: Any) -> Any: raise RuntimeError("attempted to use computed_field in pydantic v1") def _dec(*_: Any, **__: Any) -> Any: return _exc_func if func is not None: return _dec(func) else: return _dec
DiscriminatorDetails
python
doocs__leetcode
solution/2600-2699/2606.Find the Substring With Maximum Cost/Solution.py
{ "start": 0, "end": 346 }
class ____: def maximumCostSubstring(self, s: str, chars: str, vals: List[int]) -> int: d = {c: v for c, v in zip(chars, vals)} ans = tot = mi = 0 for c in s: v = d.get(c, ord(c) - ord('a') + 1) tot += v ans = max(ans, tot - mi) mi = min(mi, tot) return ans
Solution
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/genericType28.py
{ "start": 3342, "end": 3425 }
class ____(Variadic_TA[T]): ... # This should generate an error.
VariadicChild_WithTA
python
realpython__materials
python-iterators-iterables/inf_fib.py
{ "start": 0, "end": 356 }
class ____: def __init__(self): self._index = 0 self._current = 0 self._next = 1 def __iter__(self): return self def __next__(self): self._index += 1 fib_number = self._current self._current, self._next = self._next, self._current + self._next return fib_number
FibonacciInfIterator
python
django__django
tests/sitemaps_tests/urls/http.py
{ "start": 1007, "end": 1080 }
class ____(AlternatesI18nSitemap): x_default = True
XDefaultI18nSitemap
python
dask__dask
dask/dataframe/dask_expr/_dummies.py
{ "start": 5061, "end": 5699 }
class ____(Blockwise): _parameters = [ "frame", "prefix", "prefix_sep", "dummy_na", "columns", "sparse", "drop_first", "dtype", ] _defaults = { "prefix": None, "prefix_sep": "_", "dummy_na": False, "columns": None, "sparse": False, "drop_first": False, "dtype": bool, } # cudf has extra kwargs after `columns` _keyword_only = ["sparse", "drop_first", "dtype"] @staticmethod def operation(df, *args, **kwargs): return get_meta_library(df).get_dummies(df, *args, **kwargs)
GetDummies
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_lookup.py
{ "start": 9486, "end": 9539 }
class ____: def __init__(self, x): pass
Foo
python
getsentry__sentry
src/sentry/notifications/api/endpoints/notification_actions_details.py
{ "start": 1396, "end": 7630 }
class ____(OrganizationEndpoint): owner = ApiOwner.ECOSYSTEM publish_status = { "DELETE": ApiPublishStatus.PUBLIC, "GET": ApiPublishStatus.PUBLIC, "PUT": ApiPublishStatus.PUBLIC, } """ Manages a single NotificationAction via the action_id passed in the path. GET: Returns the serialized NotificationAction PUT: Update the entire NotificationAction, overwriting previous values DELETE: Delete the NotificationAction """ permission_classes = (NotificationActionsPermission,) def convert_args(self, request: Request, action_id: int, *args, **kwargs): parsed_args, parsed_kwargs = super().convert_args(request, *args, **kwargs) organization = parsed_kwargs["organization"] # Get the relevant action associated with the organization and request try: action = NotificationAction.objects.get(id=action_id, organization_id=organization.id) except NotificationAction.DoesNotExist: raise ResourceDoesNotExist parsed_kwargs["action"] = action action_projects = action.projects.all() # If the action has no projects, skip the project access check if not action_projects: return (parsed_args, parsed_kwargs) if request.method == "GET": # If we're reading the action, the user must have access to one of the associated projects if not any( request.access.has_project_scope(project, "project:read") for project in action_projects ): raise PermissionDenied else: # If we're modifying the action, the user must have access to all associated projects if not all( request.access.has_project_scope(project, "project:write") for project in action_projects ): raise PermissionDenied( detail="You don't have sufficient permissions to all the projects associated with this action." ) return (parsed_args, parsed_kwargs) @extend_schema( operation_id="Retrieve a Spike Protection Notification Action", parameters=[ GlobalParams.ORG_ID_OR_SLUG, NotificationParams.ACTION_ID, ], responses={200: OutgoingNotificationActionSerializer}, examples=NotificationActionExamples.GET_NOTIFICATION_ACTION, ) def get( self, request: Request, organization: Organization, action: NotificationAction ) -> Response: """ Returns a serialized Spike Protection Notification Action object. Notification Actions notify a set of members when an action has been triggered through a notification service such as Slack or Sentry. For example, organization owners and managers can receive an email when a spike occurs. """ logger.info( "notification_action.get_one", extra={"organization_id": organization.id, "action_id": action.id}, ) return Response(serialize(action, request.user)) @extend_schema( operation_id="Update a Spike Protection Notification Action", parameters=[ GlobalParams.ORG_ID_OR_SLUG, NotificationParams.ACTION_ID, ], request=NotificationActionSerializer, responses={ 202: OutgoingNotificationActionSerializer, 400: RESPONSE_BAD_REQUEST, }, examples=NotificationActionExamples.UPDATE_NOTIFICATION_ACTION, ) def put( self, request: Request, organization: Organization, action: NotificationAction ) -> Response: """ Updates a Spike Protection Notification Action. Notification Actions notify a set of members when an action has been triggered through a notification service such as Slack or Sentry. For example, organization owners and managers can receive an email when a spike occurs. """ serializer = NotificationActionSerializer( instance=action, context={ "access": request.access, "organization": organization, "user": request.user, }, data=request.data, ) if not serializer.is_valid(): return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) action = serializer.save() logger.info( "notification_action.update", extra={"organization_id": organization.id, "action_id": action.id}, ) self.create_audit_entry( request=request, organization=organization, target_object=action.id, event=audit_log.get_event_id("NOTIFICATION_ACTION_EDIT"), data=action.get_audit_log_data(), ) return Response(serialize(action, user=request.user), status=status.HTTP_202_ACCEPTED) @extend_schema( operation_id="Delete a Spike Protection Notification Action", parameters=[ GlobalParams.ORG_ID_OR_SLUG, NotificationParams.ACTION_ID, ], responses={ 204: RESPONSE_NO_CONTENT, }, ) def delete( self, request: Request, organization: Organization, action: NotificationAction ) -> Response: """ Deletes a Spike Protection Notification Action. Notification Actions notify a set of members when an action has been triggered through a notification service such as Slack or Sentry. For example, organization owners and managers can receive an email when a spike occurs. """ logger.info( "notification_action.delete", extra={"organization_id": organization.id, "action_data": serialize(action)}, ) self.create_audit_entry( request=request, organization=organization, target_object=action.id, event=audit_log.get_event_id("NOTIFICATION_ACTION_REMOVE"), data=action.get_audit_log_data(), ) action.delete() return Response(status=status.HTTP_204_NO_CONTENT)
NotificationActionsDetailsEndpoint
python
django__django
django/db/models/functions/math.py
{ "start": 3724, "end": 3980 }
class ____(NumericOutputFieldMixin, Func): function = "PI" arity = 0 def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template=str(math.pi), **extra_context )
Pi
python
tensorflow__tensorflow
tensorflow/python/data/kernel_tests/interleave_test.py
{ "start": 3738, "end": 17134 }
class ____(test_base.DatasetTestBase, parameterized.TestCase): @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine( input_values=[[4, 5, 6]], cycle_length=1, block_length=1, expected_elements=[[ 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6 ]]) + combinations.combine( input_values=[[4, 5, 6]], cycle_length=2, block_length=1, expected_elements=[[ 4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 6 ]]) + combinations.combine( input_values=[[4, 5, 6]], cycle_length=2, block_length=3, expected_elements=[[ 4, 4, 4, 5, 5, 5, 4, 5, 5, 6, 6, 6, 4, 4, 4, 6, 6, 6, 4, 5, 5, 5, 6, 6, 6, 5, 5, 6, 6, 6 ]]) + combinations.combine( input_values=[[4, 5, 6]], cycle_length=7, block_length=2, expected_elements=[[ 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 5, 6, 6, 5, 6, 6 ]]) + combinations.combine( input_values=[[4, 0, 6]], cycle_length=2, block_length=1, expected_elements=[[ 4, 4, 6, 4, 6, 4, 6, 6, 4, 6, 4, 6, 4, 4, 6, 6, 6, 6, 6, 6 ]]))) def testPythonImplementation(self, input_values, cycle_length, block_length, expected_elements): input_lists = _repeat(input_values, 2) for expected, produced in zip( expected_elements, _interleave(input_lists, cycle_length, block_length)): self.assertEqual(expected, produced) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine( input_values=[np.int64([4, 5, 6])], cycle_length=1, block_length=3, num_parallel_calls=[None, 1]) + combinations.combine( input_values=[np.int64([4, 5, 6])], cycle_length=2, block_length=[1, 3], num_parallel_calls=[None, 1, 2]) + combinations.combine( input_values=[np.int64([4, 5, 6])], cycle_length=7, block_length=2, num_parallel_calls=[None, 1, 3, 5, 7]) + combinations.combine( input_values=[np.int64([4, 5, 6, 7])], cycle_length=None, block_length=3, num_parallel_calls=[None, 1]) + combinations.combine( input_values=[np.int64([]), np.int64([0, 0, 0])], cycle_length=2, block_length=3, num_parallel_calls=[None]) + combinations.combine( input_values=[np.int64([4, 0, 6])], cycle_length=2, block_length=3, num_parallel_calls=[None, 1, 2]))) def testInterleaveDataset(self, input_values, cycle_length, block_length, num_parallel_calls): count = 2 dataset = dataset_ops.Dataset.from_tensor_slices(input_values).repeat( count).interleave( lambda x: dataset_ops.Dataset.from_tensors(x).repeat(x), cycle_length, block_length, num_parallel_calls) expected_output = [ element for element in _interleave( _repeat(input_values, count), cycle_length, block_length, num_parallel_calls) ] self.assertDatasetProduces(dataset, expected_output) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine( input_values=[np.float32([1., np.nan, 2., np.nan, 3.])], cycle_length=1, block_length=3, num_parallel_calls=[None, 1]) + combinations.combine( input_values=[np.float32([1., np.nan, 2., np.nan, 3.])], cycle_length=2, block_length=[1, 3], num_parallel_calls=[None, 1, 2]) + combinations.combine( input_values=[np.float32([1., np.nan, 2., np.nan, 3.])], cycle_length=7, block_length=2, num_parallel_calls=[None, 1, 3, 5, 7]))) def testInterleaveDatasetError(self, input_values, cycle_length, block_length, num_parallel_calls): dataset = dataset_ops.Dataset.from_tensor_slices(input_values).map( lambda x: array_ops.check_numerics(x, "message")).interleave( dataset_ops.Dataset.from_tensors, cycle_length, block_length, num_parallel_calls) get_next = self.getNext(dataset) for value in input_values: if np.isnan(value): with self.assertRaises(errors.InvalidArgumentError): self.evaluate(get_next()) else: self.assertEqual(value, self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) @combinations.generate(test_base.default_test_combinations()) def testInterleaveSparse(self): def _map_fn(i): return sparse_tensor.SparseTensorValue( indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2]) def _interleave_fn(x): return dataset_ops.Dataset.from_tensor_slices( sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values)) dataset = dataset_ops.Dataset.range(10).map(_map_fn).interleave( _interleave_fn, cycle_length=1) get_next = self.getNext(dataset) for i in range(10): for j in range(2): expected = [i, 0] if j % 2 == 0 else [0, -i] self.assertAllEqual(expected, self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine( input_values=[np.int64([4, 5, 6])], cycle_length=1, block_length=3, num_parallel_calls=1) + combinations.combine( input_values=[np.int64([4, 5, 6])], cycle_length=2, block_length=[1, 3], num_parallel_calls=[1, 2]) + combinations.combine( input_values=[np.int64([4, 5, 6])], cycle_length=7, block_length=2, num_parallel_calls=[1, 3, 5, 7]) + combinations.combine( input_values=[np.int64([4, 5, 6, 7])], cycle_length=None, block_length=3, num_parallel_calls=1) + combinations.combine( input_values=[np.int64([4, 0, 6])], cycle_length=2, block_length=3, num_parallel_calls=[1, 2]))) def testSloppyInterleaveDataset(self, input_values, cycle_length, block_length, num_parallel_calls): count = 2 dataset = dataset_ops.Dataset.from_tensor_slices(input_values).repeat( count).interleave( lambda x: dataset_ops.Dataset.from_tensors(x).repeat(x), cycle_length, block_length, num_parallel_calls) options = options_lib.Options() options.deterministic = False dataset = dataset.with_options(options) expected_output = [ element for element in _interleave( _repeat(input_values, count), cycle_length, block_length, num_parallel_calls) ] get_next = self.getNext(dataset) actual_output = [] for _ in range(len(expected_output)): actual_output.append(self.evaluate(get_next())) self.assertAllEqual(expected_output.sort(), actual_output.sort()) @combinations.generate(test_base.default_test_combinations()) def testInterleaveMap(self): dataset = dataset_ops.Dataset.range(100) def interleave_fn(x): dataset = dataset_ops.Dataset.from_tensors(x) return dataset.map(lambda x: x + x) dataset = dataset.interleave(interleave_fn, cycle_length=5) dataset = dataset.interleave(interleave_fn, cycle_length=5) self.assertDatasetProduces(dataset, [4 * x for x in range(100)]) @combinations.generate(test_base.default_test_combinations()) def testParallelInterleaveCached(self): dataset = dataset_ops.Dataset.range(5) dataset = dataset.cache(os.path.join(self.get_temp_dir(), "cache_dir")) def interleave_fn(x): return dataset_ops.Dataset.from_tensors(x) dataset = dataset.interleave( interleave_fn, cycle_length=2, num_parallel_calls=2) self.assertDatasetProduces(dataset, list(range(5))) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine( local_determinism=[None, True, False], global_determinism=[True, False]))) def testDeterminismConfiguration(self, local_determinism, global_determinism): expect_determinism = local_determinism or (local_determinism is None and global_determinism) elements = list(range(1000)) def dataset_fn(delay_ms): def interleave_fn(x): ds = dataset_ops.Dataset.from_tensors(x) if math_ops.equal(x, 0): ds = ds.apply(testing.sleep(delay_ms * 1000)) else: ds = ds.apply(testing.sleep(0)) return ds dataset = dataset_ops.Dataset.from_tensor_slices(elements) dataset = dataset.interleave( interleave_fn, cycle_length=10, num_parallel_calls=10, deterministic=local_determinism) opts = options_lib.Options() opts.deterministic = global_determinism dataset = dataset.with_options(opts) return dataset self.checkDeterminism(dataset_fn, expect_determinism, elements) @combinations.generate( combinations.times(test_base.default_test_combinations(), combinations.combine(num_parallel_calls=[None, 1]))) def testName(self, num_parallel_calls): def fn(x): return dataset_ops.Dataset.from_tensors(x) dataset = dataset_ops.Dataset.from_tensors(42).interleave( fn, num_parallel_calls=num_parallel_calls, name="interleave") self.assertDatasetProduces(dataset, [42]) @combinations.generate( combinations.times(test_base.default_test_combinations(), combinations.combine(num_parallel_calls=[None, 1]))) def testMapFuncMustReturnDataset(self, num_parallel_calls): def map_fn(x): return [x] with self.assertRaisesRegex( TypeError, "The `map_func` argument must return a `Dataset` object."): dataset_ops.Dataset.from_tensors(42).interleave( map_fn, num_parallel_calls=num_parallel_calls) @combinations.generate( combinations.times(test_base.default_test_combinations(), combinations.combine(num_parallel_calls=[None, 1]))) def testMapFuncFailWithErrorContext(self, num_parallel_calls): def fn(x): return dataset_ops.Dataset.from_tensors(x // 0) dataset = dataset_ops.Dataset.from_tensors(42).interleave( fn, num_parallel_calls=num_parallel_calls, name="interleave") get_next = self.getNext(dataset) with self.assertRaisesRegex( errors.InvalidArgumentError, r".*Error in user-defined function passed to .* transformation with " r"iterator: Iterator::Root::.*"): self.evaluate(get_next()) @combinations.generate(test_base.v2_eager_only_combinations()) def testSymbolicCheckpointSize(self): if sys.platform == "darwin": self.skipTest( "MacOS does not support symbolic checkpointing." ) # b/284304023 dataset = dataset_ops.Dataset.range(10) # Each input element to `.interleave` is > 1MB dataset = dataset.map( # Create a huge input element lambda x: stateless_random_ops.stateless_random_uniform( [1_000_000], seed=(42, 42) ) ) dataset = dataset.interleave( lambda x: dataset_ops.Dataset.range(200), cycle_length=5, num_parallel_calls=None, ) options = options_lib.Options() options.experimental_symbolic_checkpoint = True dataset = dataset.with_options(options) it = dataset.as_numpy_iterator() for _ in range(5): next(it) checkpoint = it.save().numpy() self.assertLess( len(checkpoint), 5_000, f"The checkpoint should be small enough. Got {len(checkpoint)} bytes", )
InterleaveTest
python
ray-project__ray
python/ray/data/_internal/planner/plan_expression/expression_evaluator.py
{ "start": 6166, "end": 13996 }
class ____(ast.NodeVisitor): # TODO: Deprecate this visitor after we remove string support in filter API. def visit_Compare(self, node: ast.Compare) -> ds.Expression: """Handle comparison operations (e.g., a == b, a < b, a in b). Args: node: The AST node representing a comparison operation. Returns: An expression representing the comparison. """ # Handle left operand # TODO Validate columns if isinstance(node.left, ast.Attribute): # Visit and handle attributes left_expr = self.visit(node.left) elif isinstance(node.left, ast.Name): # Treat as a simple field left_expr = self.visit(node.left) elif isinstance(node.left, ast.Constant): # Constant values are used directly left_expr = node.left.value else: raise ValueError(f"Unsupported left operand type: {type(node.left)}") comparators = [self.visit(comp) for comp in node.comparators] op = node.ops[0] if isinstance(op, ast.In): return pc.is_in(left_expr, comparators[0]) elif isinstance(op, ast.NotIn): return ~pc.is_in(left_expr, comparators[0]) elif isinstance(op, ast.Eq): return left_expr == comparators[0] elif isinstance(op, ast.NotEq): return left_expr != comparators[0] elif isinstance(op, ast.Lt): return left_expr < comparators[0] elif isinstance(op, ast.LtE): return left_expr <= comparators[0] elif isinstance(op, ast.Gt): return left_expr > comparators[0] elif isinstance(op, ast.GtE): return left_expr >= comparators[0] else: raise ValueError(f"Unsupported operator type: {op}") def visit_BoolOp(self, node: ast.BoolOp) -> ds.Expression: """Handle logical operations (e.g., a and b, a or b). Args: node: The AST node representing a boolean operation. Returns: An expression representing the logical operation. """ conditions = [self.visit(value) for value in node.values] combined_expr = conditions[0] for condition in conditions[1:]: if isinstance(node.op, ast.And): # Combine conditions with logical AND combined_expr &= condition elif isinstance(node.op, ast.Or): # Combine conditions with logical OR combined_expr |= condition else: raise ValueError( f"Unsupported logical operator: {type(node.op).__name__}" ) return combined_expr def visit_Name(self, node: ast.Name) -> ds.Expression: """Handle variable (name) nodes and return them as pa.dataset.Expression. Even if the name contains periods, it's treated as a single string. Args: node: The AST node representing a variable. Returns: The variable wrapped as a pa.dataset.Expression. """ # Directly use the field name as a string (even if it contains periods) field_name = node.id return pc.field(field_name) def visit_Attribute(self, node: ast.Attribute) -> object: """Handle attribute access (e.g., np.nan). Args: node: The AST node representing an attribute access. Returns: object: The attribute value. Raises: ValueError: If the attribute is unsupported. """ # Recursively visit the left side (base object or previous attribute) if isinstance(node.value, ast.Attribute): # If the value is an attribute, recursively resolve it left_expr = self.visit(node.value) return pc.field(f"{left_expr}.{node.attr}") elif isinstance(node.value, ast.Name): # If the value is a name (e.g., "foo"), we can directly return the field left_name = node.value.id # The base name, e.g., "foo" return pc.field(f"{left_name}.{node.attr}") raise ValueError(f"Unsupported attribute: {node.attr}") def visit_List(self, node: ast.List) -> ds.Expression: """Handle list literals. Args: node: The AST node representing a list. Returns: The list of elements wrapped as a pa.dataset.Expression. """ elements = [self.visit(elt) for elt in node.elts] return pa.array(elements) def visit_UnaryOp(self, node: ast.UnaryOp) -> ds.Expression: """Handle case where comparator is UnaryOP (e.g., a == -1). AST for this expression will be Compare(left=Name(id='a'), ops=[Eq()], comparators=[UnaryOp(op=USub(), operand=Constant(value=1))]) Args: node: The constant value.""" op = node.op if isinstance(op, ast.USub): return pc.scalar(-node.operand.value) else: raise ValueError(f"Unsupported unary operator: {op}") # TODO (srinathk) Note that visit_Constant does not return pa.dataset.Expression # because to support function in() which takes in a List, the elements in the List # needs to values instead of pa.dataset.Expression per pyarrow.dataset.Expression # specification. May be down the road, we can update it as Arrow relaxes this # constraint. def visit_Constant(self, node: ast.Constant) -> object: """Handle constant values (e.g., numbers, strings). Args: node: The AST node representing a constant value. Returns: object: The constant value itself (e.g., number, string, or boolean). """ return node.value # Return the constant value directly. def visit_Call(self, node: ast.Call) -> ds.Expression: """Handle function calls (e.g., is_nan(a), is_valid(b)). Args: node: The AST node representing a function call. Returns: The corresponding expression based on the function called. Raises: ValueError: If the function is unsupported or has incorrect arguments. """ func_name = node.func.id function_map = { "is_nan": lambda arg: arg.is_nan(), "is_null": lambda arg, nan_is_null=False: arg.is_null( nan_is_null=nan_is_null ), "is_valid": lambda arg: arg.is_valid(), "is_in": lambda arg1, arg2: pc.is_in(arg1, arg2), } if func_name in function_map: # Visit all arguments of the function call args = [self.visit(arg) for arg in node.args] # Handle the "is_null" function with one or two arguments if func_name == "is_null": if len(args) == 1: return function_map[func_name](args[0]) elif len(args) == 2: return function_map[func_name](args[0], args[1]) else: raise ValueError("is_null function requires one or two arguments.") # Handle the "is_in" function with exactly two arguments elif func_name == "is_in" and len(args) != 2: raise ValueError("is_in function requires two arguments.") # Ensure the function has one argument (for functions like is_valid) elif func_name != "is_in" and len(args) != 1: raise ValueError(f"{func_name} function requires exactly one argument.") # Call the corresponding function with the arguments return function_map[func_name](*args) else: raise ValueError(f"Unsupported function: {func_name}")
_ConvertToArrowExpressionVisitor
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 259382, "end": 259900 }
class ____(sgqlc.types.Input): """Ways in which lists of package versions can be ordered upon return. """ __schema__ = github_schema __field_names__ = ("field", "direction") field = sgqlc.types.Field(PackageVersionOrderField, graphql_name="field") """The field in which to order package versions by.""" direction = sgqlc.types.Field(OrderDirection, graphql_name="direction") """The direction in which to order package versions by the specified field. """
PackageVersionOrder
python
lepture__mistune
src/mistune/directives/_rst.py
{ "start": 1025, "end": 2282 }
class ____(BaseDirective): """A RST style of directive syntax is inspired by reStructuredText. The syntax is very powerful that you can define a lot of custom features on your own. The syntax looks like: .. code-block:: text .. directive-type:: directive value :option-key: option value :option-key: option value content text here To use ``RSTDirective``, developers can add it into plugin list in the :class:`Markdown` instance: .. code-block:: python import mistune from mistune.directives import RSTDirective, Admonition md = mistune.create_markdown(plugins=[ # ... RSTDirective([Admonition()]), ]) """ parser = RSTParser directive_pattern = r"^\.\. +[a-zA-Z0-9_-]+\:\:" def parse_directive(self, block: "BlockParser", m: Match[str], state: "BlockState") -> Optional[int]: m2 = _directive_re.match(state.src, state.cursor) if not m2: return None self.parse_method(block, m2, state) return m2.end() def __call__(self, markdown: "Markdown") -> None: super(RSTDirective, self).__call__(markdown) self.register_block_parser(markdown)
RSTDirective
python
Lightning-AI__lightning
tests/tests_pytorch/utilities/test_dtype_device_mixin.py
{ "start": 1019, "end": 1176 }
class ____(BoringModel): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.module = SubModule()
TopModule
python
Textualize__textual
src/textual/widgets/_rule.py
{ "start": 1448, "end": 1544 }
class ____(Exception): """Exception raised for an invalid rule line style."""
InvalidLineStyle
python
coleifer__peewee
peewee.py
{ "start": 41145, "end": 41849 }
class ____(ColumnBase): def __init__(self, value, converter=None, unpack=True): self.value = value self.converter = converter self.multi = unpack and isinstance(self.value, multi_types) if self.multi: self.values = [] for item in self.value: if isinstance(item, Node): self.values.append(item) else: self.values.append(Value(item, self.converter)) def __sql__(self, ctx): if self.multi: # For multi-part values (e.g. lists of IDs). return ctx.sql(EnclosedNodeList(self.values)) return ctx.value(self.value, self.converter)
Value
python
dagster-io__dagster
python_modules/libraries/dagster-databricks/dagster_databricks/pipes.py
{ "start": 5322, "end": 15099 }
class ____(BasePipesDatabricksClient, TreatAsResourceParam): """Pipes client for databricks. Args: client (WorkspaceClient): A databricks `WorkspaceClient` object. env (Optional[Mapping[str,str]]: An optional dict of environment variables to pass to the databricks job. context_injector (Optional[PipesContextInjector]): A context injector to use to inject context into the k8s container process. Defaults to :py:class:`PipesDbfsContextInjector`. message_reader (Optional[PipesMessageReader]): A message reader to use to read messages from the databricks job. Defaults to :py:class:`PipesDbfsMessageReader`. poll_interval_seconds (float): How long to sleep between checking the status of the job run. Defaults to 5. forward_termination (bool): Whether to cancel the Databricks job if the orchestration process is interrupted or canceled. Defaults to True. """ env: Optional[Mapping[str, str]] = Field( default=None, description="An optional dict of environment variables to pass to the subprocess.", ) def __init__( self, client: WorkspaceClient, env: Optional[Mapping[str, str]] = None, context_injector: Optional[PipesContextInjector] = None, message_reader: Optional[PipesMessageReader] = None, poll_interval_seconds: float = 5, forward_termination: bool = True, ): self.env = env super().__init__( client=client, context_injector=context_injector, message_reader=message_reader, poll_interval_seconds=poll_interval_seconds, forward_termination=forward_termination, ) @classmethod def _is_dagster_maintained(cls) -> bool: return True def get_default_message_reader(self, task: jobs.SubmitTask) -> "PipesDbfsMessageReader": # include log readers if the user is writing their logs to DBFS new_cluster_logging_configured = ( task.as_dict().get("new_cluster", {}).get("cluster_log_conf", {}).get("dbfs", None) ) existing_cluster_has_logging_configured = False if task.existing_cluster_id is not None: cluster = self.client.clusters.get(cluster_id=task.existing_cluster_id).as_dict() if cluster.get("cluster_log_conf", {}).get("dbfs", None): existing_cluster_has_logging_configured = True if ( isinstance(self.message_reader, PipesDbfsMessageReader) and self.message_reader.include_stdio_in_messages ): # logs will be coming from Pipes messages, we don't need to create log readers create_log_readers = False elif new_cluster_logging_configured or existing_cluster_has_logging_configured: create_log_readers = True else: create_log_readers = False if create_log_readers: log_readers = [ PipesDbfsLogReader( client=self.client, remote_log_name="stdout", target_stream=sys.stdout, debug_info="reader for stdout", ), PipesDbfsLogReader( client=self.client, remote_log_name="stderr", target_stream=sys.stderr, debug_info="reader fo stderr", ), ] else: log_readers = None return PipesDbfsMessageReader( client=self.client, log_readers=log_readers, ) def run( # pyright: ignore[reportIncompatibleMethodOverride] self, *, context: Union[OpExecutionContext, AssetExecutionContext], extras: Optional[PipesExtras] = None, task: jobs.SubmitTask, submit_args: Optional[Mapping[str, Any]] = None, ) -> PipesClientCompletedInvocation: """Synchronously execute a Databricks job with the pipes protocol. Args: task (databricks.sdk.service.jobs.SubmitTask): Specification of the databricks task to run. If `existing_cluster_id` key is set, Pipes bootstrap parameters will be passed via task parameters, which are exposed as CLI arguments for Python scripts. They are going to be merged with any existing parameters in the task. See `Databricks documentation <https://docs.databricks.com/en/jobs/create-run-jobs.html#pass-parameters-to-a-databricks-job-task>`_ for more information. In order to initialize Pipes in the task, the task code must have py:class:`dagster_pipes.PipesCliArgsParamsLoader` explicitly passed to py:function:`dagster_pipes.open_pipes_session. If `existing_cluster_id` key is not set, a new cluster will be created, and Pipes bootstrap parameters will be passed via environment variables in `spark_env_vars` (if there is an existing dictionary here, the Pipes environment variables will be merged in). This doesn't require any special setup in the task code. All other fields will be passed unaltered under the `tasks` arg to `WorkspaceClient.jobs.submit`. context (Union[OpExecutionContext, AssetExecutionContext]): The context from the executing op or asset. extras (Optional[PipesExtras]): An optional dict of extra parameters to pass to the subprocess. submit_args (Optional[Mapping[str, Any]]): Additional keyword arguments that will be forwarded as-is to `WorkspaceClient.jobs.submit`. Returns: PipesClientCompletedInvocation: Wrapper containing results reported by the external process. """ context_injector = self.context_injector or PipesDbfsContextInjector(client=self.client) message_reader = self.message_reader or self.get_default_message_reader(task) with open_pipes_session( context=context, extras=extras, context_injector=context_injector, message_reader=message_reader, ) as pipes_session: submit_task_dict = task.as_dict() submit_task_dict = self._enrich_submit_task_dict( context=context, session=pipes_session, submit_task_dict=submit_task_dict ) task = jobs.SubmitTask.from_dict(submit_task_dict) run_id = self.client.jobs.submit( tasks=[task], **(submit_args or {}), ).bind()["run_id"] try: self._poll_til_success(context, run_id) except DagsterExecutionInterruptedError: if self.forward_termination: context.log.info("[pipes] execution interrupted, canceling Databricks job.") self.client.jobs.cancel_run(run_id) self._poll_til_terminating(run_id) return PipesClientCompletedInvocation( pipes_session, metadata=self._extract_dagster_metadata(run_id) ) def _enrich_submit_task_dict( self, context: Union[OpExecutionContext, AssetExecutionContext], session: PipesSession, submit_task_dict: dict[str, Any], ) -> dict[str, Any]: if "existing_cluster_id" in submit_task_dict: # we can't set env vars on an existing cluster # so we must use CLI to pass Pipes params cli_args = session.get_bootstrap_cli_arguments() # this is a mapping for task_type in self.get_task_fields_which_support_cli_parameters(): if task_type in submit_task_dict: existing_params = submit_task_dict[task_type].get("parameters", []) # merge the existing parameters with the CLI arguments for key, value in cli_args.items(): existing_params.extend([key, value]) submit_task_dict[task_type]["parameters"] = existing_params context.log.debug( f'Passing Pipes bootstrap parameters via Databricks parameters as "{key}.parameters". Make sure to use the PipesCliArgsParamsLoader in the task.' # pyright: ignore[reportPossiblyUnboundVariable] ) break # use env vars to pass pipes context in case of notebook_task if submit_task_dict.get("notebook_task"): existing_params = submit_task_dict["notebook_task"].get("base_parameters", {}) # merge the existing parameters with the CLI arguments existing_params = {**existing_params, **session.get_bootstrap_env_vars()} submit_task_dict["notebook_task"]["base_parameters"] = existing_params else: pipes_env_vars = session.get_bootstrap_env_vars() submit_task_dict["new_cluster"]["spark_env_vars"] = { **submit_task_dict["new_cluster"].get("spark_env_vars", {}), **(self.env or {}), **pipes_env_vars, } submit_task_dict["tags"] = { **submit_task_dict.get("tags", {}), **session.default_remote_invocation_info, } return submit_task_dict @contextmanager def dbfs_tempdir(dbfs_client: files.DbfsAPI) -> Iterator[str]: dirname = "".join(random.choices(string.ascii_letters, k=30)) tempdir = f"/tmp/{dirname}" dbfs_client.mkdirs(tempdir) try: yield tempdir finally: dbfs_client.delete(tempdir, recursive=True)
PipesDatabricksClient
python
getsentry__sentry
tests/sentry/incidents/models/test_alert_rule.py
{ "start": 5687, "end": 7991 }
class ____(TestCase): def test_empty(self) -> None: alert_rule = AlertRule.objects.fetch_for_organization(self.organization) assert [] == list(alert_rule) def test_simple(self) -> None: alert_rule = self.create_alert_rule() assert [alert_rule] == list(AlertRule.objects.fetch_for_organization(self.organization)) def test_with_projects(self) -> None: project = self.create_project() alert_rule = self.create_alert_rule(projects=[project]) assert [] == list( AlertRule.objects.fetch_for_organization(self.organization, [self.project]) ) assert [alert_rule] == list( AlertRule.objects.fetch_for_organization(self.organization, [project]) ) def test_multi_project(self) -> None: project = self.create_project() alert_rule1 = self.create_alert_rule(projects=[project, self.project]) alert_rule2 = self.create_alert_rule(projects=[project]) assert [alert_rule1] == list( AlertRule.objects.fetch_for_organization(self.organization, [self.project]) ) assert {alert_rule1, alert_rule2} == set( AlertRule.objects.fetch_for_organization(self.organization, [project]) ) def test_project_on_alert(self) -> None: project = self.create_project() alert_rule = self.create_alert_rule() alert_rule.projects.add(project) assert [alert_rule] == list(AlertRule.objects.fetch_for_organization(self.organization)) def test_project_on_alert_and_snuba(self) -> None: project1 = self.create_project() alert_rule1 = self.create_alert_rule(projects=[project1]) alert_rule1.projects.add(project1) # will fetch if there's 1 project in snuba assert [alert_rule1] == list(AlertRule.objects.fetch_for_organization(self.organization)) project2 = self.create_project() alert_rule2 = self.create_alert_rule(projects=[project2, self.project]) alert_rule2.projects.add(project1) # Will fetch if there's 1 project in snuba and 1 in alert rule assert {alert_rule1, alert_rule2} == set( AlertRule.objects.fetch_for_organization(self.organization, [project1]) )
AlertRuleFetchForOrganizationTest
python
plotly__plotly.py
plotly/graph_objs/histogram/_cumulative.py
{ "start": 233, "end": 6560 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "histogram" _path_str = "histogram.cumulative" _valid_props = {"currentbin", "direction", "enabled"} @property def currentbin(self): """ Only applies if cumulative is enabled. Sets whether the current bin is included, excluded, or has half of its value included in the current cumulative value. "include" is the default for compatibility with various other tools, however it introduces a half-bin bias to the results. "exclude" makes the opposite half-bin bias, and "half" removes it. The 'currentbin' property is an enumeration that may be specified as: - One of the following enumeration values: ['include', 'exclude', 'half'] Returns ------- Any """ return self["currentbin"] @currentbin.setter def currentbin(self, val): self["currentbin"] = val @property def direction(self): """ Only applies if cumulative is enabled. If "increasing" (default) we sum all prior bins, so the result increases from left to right. If "decreasing" we sum later bins so the result decreases from left to right. The 'direction' property is an enumeration that may be specified as: - One of the following enumeration values: ['increasing', 'decreasing'] Returns ------- Any """ return self["direction"] @direction.setter def direction(self, val): self["direction"] = val @property def enabled(self): """ If true, display the cumulative distribution by summing the binned values. Use the `direction` and `centralbin` attributes to tune the accumulation method. Note: in this mode, the "density" `histnorm` settings behave the same as their equivalents without "density": "" and "density" both rise to the number of data points, and "probability" and *probability density* both rise to the number of sample points. The 'enabled' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["enabled"] @enabled.setter def enabled(self, val): self["enabled"] = val @property def _prop_descriptions(self): return """\ currentbin Only applies if cumulative is enabled. Sets whether the current bin is included, excluded, or has half of its value included in the current cumulative value. "include" is the default for compatibility with various other tools, however it introduces a half-bin bias to the results. "exclude" makes the opposite half-bin bias, and "half" removes it. direction Only applies if cumulative is enabled. If "increasing" (default) we sum all prior bins, so the result increases from left to right. If "decreasing" we sum later bins so the result decreases from left to right. enabled If true, display the cumulative distribution by summing the binned values. Use the `direction` and `centralbin` attributes to tune the accumulation method. Note: in this mode, the "density" `histnorm` settings behave the same as their equivalents without "density": "" and "density" both rise to the number of data points, and "probability" and *probability density* both rise to the number of sample points. """ def __init__( self, arg=None, currentbin=None, direction=None, enabled=None, **kwargs ): """ Construct a new Cumulative object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.histogram.Cumulative` currentbin Only applies if cumulative is enabled. Sets whether the current bin is included, excluded, or has half of its value included in the current cumulative value. "include" is the default for compatibility with various other tools, however it introduces a half-bin bias to the results. "exclude" makes the opposite half-bin bias, and "half" removes it. direction Only applies if cumulative is enabled. If "increasing" (default) we sum all prior bins, so the result increases from left to right. If "decreasing" we sum later bins so the result decreases from left to right. enabled If true, display the cumulative distribution by summing the binned values. Use the `direction` and `centralbin` attributes to tune the accumulation method. Note: in this mode, the "density" `histnorm` settings behave the same as their equivalents without "density": "" and "density" both rise to the number of data points, and "probability" and *probability density* both rise to the number of sample points. Returns ------- Cumulative """ super().__init__("cumulative") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.histogram.Cumulative constructor must be a dict or an instance of :class:`plotly.graph_objs.histogram.Cumulative`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("currentbin", arg, currentbin) self._set_property("direction", arg, direction) self._set_property("enabled", arg, enabled) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Cumulative
python
huggingface__transformers
tests/test_image_transforms.py
{ "start": 1563, "end": 25416 }
class ____(unittest.TestCase): @parameterized.expand( [ ("numpy_float_channels_first", (3, 4, 5), np.float32), ("numpy_float_channels_last", (4, 5, 3), np.float32), ("numpy_float_channels_first", (3, 4, 5), np.float64), ("numpy_float_channels_last", (4, 5, 3), np.float64), ("numpy_int_channels_first", (3, 4, 5), np.int32), ("numpy_uint_channels_first", (3, 4, 5), np.uint8), ] ) @require_vision def test_to_pil_image(self, name, image_shape, dtype): image = np.random.randint(0, 256, image_shape).astype(dtype) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # make sure image is correctly rescaled self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0) @parameterized.expand( [ ("numpy_float_channels_first", (3, 4, 5), np.float32), ("numpy_float_channels_first", (3, 4, 5), np.float64), ("numpy_float_channels_last", (4, 5, 3), np.float32), ("numpy_float_channels_last", (4, 5, 3), np.float64), ] ) @require_vision def test_to_pil_image_from_float(self, name, image_shape, dtype): image = np.random.rand(*image_shape).astype(dtype) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # make sure image is correctly rescaled self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0) # Make sure that an exception is raised if image is not in [0, 1] image = np.random.randn(*image_shape).astype(dtype) with self.assertRaises(ValueError): to_pil_image(image) @require_vision def test_to_pil_image_from_mask(self): # Make sure binary mask remains a binary mask image = np.random.randint(0, 2, (3, 4, 5)).astype(np.uint8) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) np_img = np.asarray(pil_image) self.assertTrue(np_img.min() == 0) self.assertTrue(np_img.max() == 1) image = np.random.randint(0, 2, (3, 4, 5)).astype(np.float32) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) np_img = np.asarray(pil_image) self.assertTrue(np_img.min() == 0) self.assertTrue(np_img.max() == 1) @require_torch def test_to_pil_image_from_torch(self): # channels first image = torch.rand((3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # channels last image = torch.rand((4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) def test_to_channel_dimension_format(self): # Test that function doesn't reorder if channel dim matches the input. image = np.random.rand(3, 4, 5) image = to_channel_dimension_format(image, "channels_first") self.assertEqual(image.shape, (3, 4, 5)) image = np.random.rand(4, 5, 3) image = to_channel_dimension_format(image, "channels_last") self.assertEqual(image.shape, (4, 5, 3)) # Test that function reorders if channel dim doesn't match the input. image = np.random.rand(3, 4, 5) image = to_channel_dimension_format(image, "channels_last") self.assertEqual(image.shape, (4, 5, 3)) image = np.random.rand(4, 5, 3) image = to_channel_dimension_format(image, "channels_first") self.assertEqual(image.shape, (3, 4, 5)) # Can pass in input_data_format and works if data format is ambiguous or unknown. image = np.random.rand(4, 5, 6) image = to_channel_dimension_format(image, "channels_first", input_channel_dim="channels_last") self.assertEqual(image.shape, (6, 4, 5)) def test_get_resize_output_image_size(self): image = np.random.randint(0, 256, (3, 224, 224)) # Test the output size defaults to (x, x) if an int is given. self.assertEqual(get_resize_output_image_size(image, 10), (10, 10)) self.assertEqual(get_resize_output_image_size(image, [10]), (10, 10)) self.assertEqual(get_resize_output_image_size(image, (10,)), (10, 10)) # Test the output size is the same as the input if a two element tuple/list is given. self.assertEqual(get_resize_output_image_size(image, (10, 20)), (10, 20)) self.assertEqual(get_resize_output_image_size(image, [10, 20]), (10, 20)) self.assertEqual(get_resize_output_image_size(image, (10, 20), default_to_square=True), (10, 20)) # To match pytorch behaviour, max_size is only relevant if size is an int self.assertEqual(get_resize_output_image_size(image, (10, 20), max_size=5), (10, 20)) # Test output size = (int(size * height / width), size) if size is an int and height > width image = np.random.randint(0, 256, (3, 50, 40)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (25, 20)) # Test output size = (size, int(size * width / height)) if size is an int and width <= height image = np.random.randint(0, 256, (3, 40, 50)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (20, 25)) # Test size is resized if longer size > max_size image = np.random.randint(0, 256, (3, 50, 40)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False, max_size=22), (22, 17)) # Test output size = (int(size * height / width), size) if size is an int and height > width and # input has 4 channels image = np.random.randint(0, 256, (4, 50, 40)) self.assertEqual( get_resize_output_image_size(image, 20, default_to_square=False, input_data_format="channels_first"), (25, 20), ) # Test correct channel dimension is returned if output size if height == 3 # Defaults to input format - channels first image = np.random.randint(0, 256, (3, 18, 97)) resized_image = resize(image, (3, 20)) self.assertEqual(resized_image.shape, (3, 3, 20)) # Defaults to input format - channels last image = np.random.randint(0, 256, (18, 97, 3)) resized_image = resize(image, (3, 20)) self.assertEqual(resized_image.shape, (3, 20, 3)) image = np.random.randint(0, 256, (3, 18, 97)) resized_image = resize(image, (3, 20), data_format="channels_last") self.assertEqual(resized_image.shape, (3, 20, 3)) image = np.random.randint(0, 256, (18, 97, 3)) resized_image = resize(image, (3, 20), data_format="channels_first") self.assertEqual(resized_image.shape, (3, 3, 20)) def test_resize(self): image = np.random.randint(0, 256, (3, 224, 224)) # Check the channel order is the same by default resized_image = resize(image, (30, 40)) self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (3, 30, 40)) # Check channel order is changed if specified resized_image = resize(image, (30, 40), data_format="channels_last") self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (30, 40, 3)) # Check PIL.Image.Image is returned if return_numpy=False resized_image = resize(image, (30, 40), return_numpy=False) self.assertIsInstance(resized_image, PIL.Image.Image) # PIL size is in (width, height) order self.assertEqual(resized_image.size, (40, 30)) # Check an image with float values between 0-1 is returned with values in this range image = np.random.rand(3, 224, 224) resized_image = resize(image, (30, 40)) self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (3, 30, 40)) self.assertTrue(np.all(resized_image >= 0)) self.assertTrue(np.all(resized_image <= 1)) # Check that an image with 4 channels is resized correctly image = np.random.randint(0, 256, (4, 224, 224)) resized_image = resize(image, (30, 40), input_data_format="channels_first") self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (4, 30, 40)) def test_normalize(self): image = np.random.randint(0, 256, (224, 224, 3)) / 255 # Test that exception is raised if inputs are incorrect # Not a numpy array image with self.assertRaises(TypeError): normalize(5, 5, 5) # Number of mean values != number of channels with self.assertRaises(ValueError): normalize(image, mean=(0.5, 0.6), std=1) # Number of std values != number of channels with self.assertRaises(ValueError): normalize(image, mean=1, std=(0.5, 0.6)) # Test result is correct - output data format is channels_first and normalization # correctly computed mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = ((image - mean) / std).transpose((2, 0, 1)) normalized_image = normalize(image, mean=mean, std=std, data_format="channels_first") self.assertIsInstance(normalized_image, np.ndarray) self.assertEqual(normalized_image.shape, (3, 224, 224)) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test image with 4 channels is normalized correctly image = np.random.randint(0, 256, (224, 224, 4)) / 255 mean = (0.5, 0.6, 0.7, 0.8) std = (0.1, 0.2, 0.3, 0.4) expected_image = (image - mean) / std self.assertTrue( np.allclose( normalize(image, mean=mean, std=std, input_data_format="channels_last"), expected_image, atol=1e-6 ) ) # Test float32 image input keeps float32 dtype image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float32) / 255 mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = ((image - mean) / std).astype(np.float32) normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float32) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test float16 image input keeps float16 dtype image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float16) / 255 mean = np.array((0.5, 0.6, 0.7)) std = np.array((0.1, 0.2, 0.3)) # The mean and std are cast to match the dtype of the input image cast_mean = np.array(mean, dtype=np.float16) cast_std = np.array(std, dtype=np.float16) expected_image = (image - cast_mean) / cast_std normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float16) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test int image input is converted to float32 image = np.random.randint(0, 2, (224, 224, 3), dtype=np.uint8) mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = (image.astype(np.float32) - mean) / std normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float32) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) def test_center_crop(self): image = np.random.randint(0, 256, (3, 224, 224)) # Test that exception is raised if inputs are incorrect with self.assertRaises(ValueError): center_crop(image, 10) # Test result is correct - output data format is channels_first and center crop # correctly computed expected_image = image[:, 52:172, 82:142].transpose(1, 2, 0) cropped_image = center_crop(image, (120, 60), data_format="channels_last") self.assertIsInstance(cropped_image, np.ndarray) self.assertEqual(cropped_image.shape, (120, 60, 3)) self.assertTrue(np.allclose(cropped_image, expected_image)) # Test that image is padded with zeros if crop size is larger than image size expected_image = np.zeros((300, 260, 3)) expected_image[38:262, 18:242, :] = image.transpose((1, 2, 0)) cropped_image = center_crop(image, (300, 260), data_format="channels_last") self.assertIsInstance(cropped_image, np.ndarray) self.assertEqual(cropped_image.shape, (300, 260, 3)) self.assertTrue(np.allclose(cropped_image, expected_image)) # Test that odd numbered padding requirement still leads to correct output dimensions cropped_image = center_crop(image, (300, 259), data_format="channels_last") self.assertEqual(cropped_image.shape, (300, 259, 3)) # Test image with 4 channels is cropped correctly image = np.random.randint(0, 256, (224, 224, 4)) expected_image = image[52:172, 82:142, :] self.assertTrue(np.allclose(center_crop(image, (120, 60), input_data_format="channels_last"), expected_image)) def test_center_to_corners_format(self): bbox_center = np.array([[10, 20, 4, 8], [15, 16, 3, 4]]) expected = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]]) self.assertTrue(np.allclose(center_to_corners_format(bbox_center), expected)) # Check that the function and inverse function are inverse of each other self.assertTrue(np.allclose(corners_to_center_format(center_to_corners_format(bbox_center)), bbox_center)) def test_corners_to_center_format(self): bbox_corners = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]]) expected = np.array([[10, 20, 4, 8], [15, 16, 3, 4]]) self.assertTrue(np.allclose(corners_to_center_format(bbox_corners), expected)) # Check that the function and inverse function are inverse of each other self.assertTrue(np.allclose(center_to_corners_format(corners_to_center_format(bbox_corners)), bbox_corners)) def test_rgb_to_id(self): # test list input rgb = [125, 4, 255] self.assertEqual(rgb_to_id(rgb), 16712829) # test numpy array input color = np.array( [ [ [213, 54, 165], [88, 207, 39], [156, 108, 128], ], [ [183, 194, 46], [137, 58, 88], [114, 131, 233], ], ] ) expected = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]]) self.assertTrue(np.allclose(rgb_to_id(color), expected)) def test_id_to_rgb(self): # test int input self.assertEqual(id_to_rgb(16712829), [125, 4, 255]) # test array input id_array = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]]) color = np.array( [ [ [213, 54, 165], [88, 207, 39], [156, 108, 128], ], [ [183, 194, 46], [137, 58, 88], [114, 131, 233], ], ] ) self.assertTrue(np.allclose(id_to_rgb(id_array), color)) def test_pad(self): # fmt: off image = np.array([[ [0, 1], [2, 3], ]]) # fmt: on # Test that exception is raised if unknown padding mode is specified with self.assertRaises(ValueError): pad(image, 10, mode="unknown") # Test that exception is raised if invalid padding is specified with self.assertRaises(ValueError): # Cannot pad on channel dimension pad(image, (5, 10, 10)) # Test image is padded equally on all sides is padding is an int # fmt: off expected_image = np.array([ [[0, 0, 0, 0], [0, 0, 1, 0], [0, 2, 3, 0], [0, 0, 0, 0]], ]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, 1))) # Test the left and right of each axis is padded (pad_left, pad_right) # fmt: off expected_image = np.array( [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 2, 3, 0], [0, 0, 0, 0, 0]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, (2, 1)))) # Test only one axis is padded (pad_left, pad_right) # fmt: off expected_image = np.array([[ [9, 9], [9, 9], [0, 1], [2, 3], [9, 9] ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((2, 1), (0, 0)), constant_values=9))) # Test padding with a constant value # fmt: off expected_image = np.array([[ [8, 8, 0, 1, 9], [8, 8, 2, 3, 9], [8, 8, 7, 7, 9], [8, 8, 7, 7, 9] ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), constant_values=((6, 7), (8, 9))))) # fmt: off image = np.array([[ [0, 1, 2], [3, 4, 5], [6, 7, 8], ]]) # fmt: on # Test padding with PaddingMode.REFLECT # fmt: off expected_image = np.array([[ [2, 1, 0, 1, 2, 1], [5, 4, 3, 4, 5, 4], [8, 7, 6, 7, 8, 7], [5, 4, 3, 4, 5, 4], [2, 1, 0, 1, 2, 1], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect"))) # Test padding with PaddingMode.REPLICATE # fmt: off expected_image = np.array([[ [0, 0, 0, 1, 2, 2], [3, 3, 3, 4, 5, 5], [6, 6, 6, 7, 8, 8], [6, 6, 6, 7, 8, 8], [6, 6, 6, 7, 8, 8], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="replicate"))) # Test padding with PaddingMode.SYMMETRIC # fmt: off expected_image = np.array([[ [1, 0, 0, 1, 2, 2], [4, 3, 3, 4, 5, 5], [7, 6, 6, 7, 8, 8], [7, 6, 6, 7, 8, 8], [4, 3, 3, 4, 5, 5], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="symmetric"))) # Test we can specify the output data format # Test padding with PaddingMode.REFLECT # fmt: off image = np.array([[ [0, 1], [2, 3], ]]) expected_image = np.array([ [[0], [1], [0], [1], [0]], [[2], [3], [2], [3], [2]], [[0], [1], [0], [1], [0]], [[2], [3], [2], [3], [2]] ]) # fmt: on self.assertTrue( np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect", data_format="channels_last")) ) # Test we can pad on an image with 2 channels # fmt: off image = np.array([ [[0, 1], [2, 3]], ]) expected_image = np.array([ [[0, 0], [0, 1], [2, 3]], [[0, 0], [0, 0], [0, 0]], ]) # fmt: on self.assertTrue( np.allclose( expected_image, pad(image, ((0, 1), (1, 0)), mode="constant", input_data_format="channels_last") ) ) # Test that padding works on batched images image = np.array( [ [[0, 1], [2, 3]], ] )[None, ...] expected_image = np.array( [ [[0, 0], [0, 1], [2, 3]], [[0, 0], [0, 0], [0, 0]], ] )[None, ...] # fmt: on self.assertTrue( np.allclose( expected_image, pad(image, ((0, 1), (1, 0)), mode="constant", input_data_format="channels_last") ) ) @require_vision def test_convert_to_rgb(self): # Test that an RGBA image is converted to RGB image = np.array([[[1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.uint8) pil_image = PIL.Image.fromarray(image) self.assertEqual(pil_image.mode, "RGBA") self.assertEqual(pil_image.size, (2, 1)) # For the moment, numpy images are returned as is rgb_image = convert_to_rgb(image) self.assertEqual(rgb_image.shape, (1, 2, 4)) self.assertTrue(np.allclose(rgb_image, image)) # And PIL images are converted rgb_image = convert_to_rgb(pil_image) self.assertEqual(rgb_image.mode, "RGB") self.assertEqual(rgb_image.size, (2, 1)) self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[1, 2, 3], [5, 6, 7]]], dtype=np.uint8))) # Test that a grayscale image is converted to RGB image = np.array([[0, 255]], dtype=np.uint8) pil_image = PIL.Image.fromarray(image) self.assertEqual(pil_image.mode, "L") self.assertEqual(pil_image.size, (2, 1)) rgb_image = convert_to_rgb(pil_image) self.assertEqual(rgb_image.mode, "RGB") self.assertEqual(rgb_image.size, (2, 1)) self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[0, 0, 0], [255, 255, 255]]], dtype=np.uint8))) def test_flip_channel_order(self): # fmt: off img_channels_first = np.array([ [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], [[16, 17, 18, 19], [20, 21, 22, 23]], ]) # fmt: on img_channels_last = np.moveaxis(img_channels_first, 0, -1) # fmt: off flipped_img_channels_first = np.array([ [[16, 17, 18, 19], [20, 21, 22, 23]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], ]) # fmt: on flipped_img_channels_last = np.moveaxis(flipped_img_channels_first, 0, -1) self.assertTrue(np.allclose(flip_channel_order(img_channels_first), flipped_img_channels_first)) self.assertTrue( np.allclose(flip_channel_order(img_channels_first, "channels_last"), flipped_img_channels_last) ) self.assertTrue(np.allclose(flip_channel_order(img_channels_last), flipped_img_channels_last)) self.assertTrue( np.allclose(flip_channel_order(img_channels_last, "channels_first"), flipped_img_channels_first) ) # Can flip when the image has 2 channels # fmt: off img_channels_first = np.array([ [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], ]) # fmt: on flipped_img_channels_first = img_channels_first[::-1, :, :] self.assertTrue( np.allclose( flip_channel_order(img_channels_first, input_data_format="channels_first"), flipped_img_channels_first ) )
ImageTransformsTester
python
Textualize__textual
src/textual/notifications.py
{ "start": 465, "end": 589 }
class ____(Message, bubble=False): """Message to show a notification.""" notification: Notification @dataclass
Notify
python
scrapy__scrapy
scrapy/robotstxt.py
{ "start": 1249, "end": 2139 }
class ____(metaclass=ABCMeta): @classmethod @abstractmethod def from_crawler(cls, crawler: Crawler, robotstxt_body: bytes) -> Self: """Parse the content of a robots.txt_ file as bytes. This must be a class method. It must return a new instance of the parser backend. :param crawler: crawler which made the request :type crawler: :class:`~scrapy.crawler.Crawler` instance :param robotstxt_body: content of a robots.txt_ file. :type robotstxt_body: bytes """ @abstractmethod def allowed(self, url: str | bytes, user_agent: str | bytes) -> bool: """Return ``True`` if ``user_agent`` is allowed to crawl ``url``, otherwise return ``False``. :param url: Absolute URL :type url: str or bytes :param user_agent: User agent :type user_agent: str or bytes """
RobotParser
python
getsentry__sentry
src/sentry/utils/warnings.py
{ "start": 1257, "end": 2286 }
class ____: """ Transforms warnings into a standard form and invokes handlers. """ def __init__( self, handlers: tuple[_WarningHandler, ...], default_category: type[Warning] = Warning ) -> None: self.__handlers = handlers self.__default_category = default_category def warn( self, message: str | Warning, category: type[Warning] | None = None, stacklevel: int | None = None, ) -> None: if isinstance(message, Warning): # Maybe log if `category` was passed and isn't a subclass of # `type(message)`? warning = message else: if category is None: category = self.__default_category assert issubclass(category, Warning) warning = category(message) kwargs = {} if stacklevel is not None: kwargs["stacklevel"] = stacklevel for handler in self.__handlers: handler(warning, **kwargs)
WarningManager
python
getsentry__sentry
tests/sentry/rules/processing/test_delayed_processing.py
{ "start": 25499, "end": 26494 }
class ____(TestCase): def test_empty_input(self) -> None: result = get_rules_to_groups({}) assert result == defaultdict(set) def test_single_rule_group(self) -> None: input_data = {"1:100": "event_data"} expected = defaultdict(set, {1: {100}}) result = get_rules_to_groups(input_data) assert result == expected def test_multiple_rule_groups(self) -> None: input_data = { "1:100": "event_data1", "1:101": "event_data2", "2:200": "event_data3", "3:300": "event_data4", "3:301": "event_data5", } expected = defaultdict(set, {1: {100, 101}, 2: {200}, 3: {300, 301}}) result = get_rules_to_groups(input_data) assert result == expected def test_invalid_input_format(self) -> None: input_data = {"invalid_key": "event_data"} with pytest.raises(ValueError): get_rules_to_groups(input_data)
GetRulesToGroupsTest
python
sphinx-doc__sphinx
sphinx/domains/cpp/_ast.py
{ "start": 16307, "end": 17745 }
class ____(ASTLiteral): def __init__(self, prefix: str, data: str) -> None: self.prefix = prefix # may be None when no prefix self.data = data assert prefix in _id_char_from_prefix self.type = _id_char_from_prefix[prefix] decoded = data.encode().decode('unicode-escape') if len(decoded) == 1: self.value = ord(decoded) else: raise UnsupportedMultiCharacterCharLiteral(decoded) def __eq__(self, other: object) -> bool: if not isinstance(other, ASTCharLiteral): return NotImplemented return self.prefix == other.prefix and self.value == other.value def __hash__(self) -> int: return hash((self.prefix, self.value)) def _stringify(self, transform: StringifyTransform) -> str: if self.prefix is None: return "'" + self.data + "'" else: return self.prefix + "'" + self.data + "'" def get_id(self, version: int) -> str: # TODO: the ID should be have L E around it return self.type + str(self.value) def describe_signature( self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol ) -> None: if self.prefix is not None: signode += addnodes.desc_sig_keyword(self.prefix, self.prefix) txt = "'" + self.data + "'" signode += addnodes.desc_sig_literal_char(txt, txt)
ASTCharLiteral
python
huggingface__transformers
src/transformers/models/cohere2_vision/modeling_cohere2_vision.py
{ "start": 11727, "end": 17488 }
class ____(Cohere2VisionPreTrainedModel, GenerationMixin): _checkpoint_conversion_mapping = {} _tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"} def __init__(self, config: Cohere2VisionConfig): super().__init__(config) self.model = Cohere2VisionModel(config) self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) def get_output_embeddings(self) -> nn.Module: return self.lm_head def get_image_features(self, pixel_values: torch.FloatTensor): return self.model.get_image_features(pixel_values=pixel_values) @check_model_inputs() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, image_sizes: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, Cohere2VisionCausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoProcessor, Cohere2VisionForConditionalGeneration >>> import torch >>> processor = AutoProcessor.from_pretrained("CohereLabs/command-a-vision-07-2025", use_fast=True) >>> model = Cohere2VisionForConditionalGeneration.from_pretrained("CohereLabs/command-a-vision-07-2025", device_map="auto") >>> messages = [ ... { ... "role": "user", ... "content": [ ... { ... "type": "image", ... "url": "https://images.pexels.com/photos/1108099/pexels-photo-1108099.jpeg", ... }, ... {"type": "text", "text": "what is in this image?"}, ... ], ... }, ... ] >>> inputs = processor.apply_chat_template( ... messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ... ).to(model.device) >>> gen_tokens = model.generate(**inputs, max_new_tokens=300, do_sample=True, temperature=0.3) >>> processor.tokenizer.decode(gen_tokens[0][inputs.input_ids.shape[1]:], skip_special_tokens=True) ```""" outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, image_sizes=image_sizes, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs ) return Cohere2VisionCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs, ): # Overwritten -- in specific circumstances we don't want to forward image inputs to the model model_inputs = super().prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs, ) if cache_position[0] == 0: # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore # Otherwise we need pixel values to be passed to model model_inputs["pixel_values"] = pixel_values return model_inputs __all__ = ["Cohere2VisionForConditionalGeneration", "Cohere2VisionPreTrainedModel", "Cohere2VisionModel"]
Cohere2VisionForConditionalGeneration
python
gabrielfalcao__HTTPretty
tests/bugfixes/pytest/test_426_mypy_segfault.py
{ "start": 85, "end": 1059 }
class ____(type): def __init__(cls, name, bases, attrs): if name in ('GenerateTestMeta',): return count = getattr(cls, '__generate_count__', attrs.get('__generate_count__')) if not isinstance(count, int): raise SyntaxError(f'Metaclass requires def `__generate_count__ = NUMBER_OF_TESTS` to be set to an integer') generate_method = getattr(cls, '__generate_method__', attrs.get('__generate_method__')) if not callable(generate_method): raise SyntaxError(f'Metaclass requires def `__generate_method__(test_name):` to be implemented') for x in range(count): test_name = "test_{}".format(x) def test_func(self, *args, **kwargs): run_test = generate_method(test_name) run_test(self, *args, **kwargs) test_func.__name__ = test_name attrs[test_name] = test_func setattr(cls, test_name, test_func)
GenerateTests
python
mlflow__mlflow
mlflow/entities/run_info.py
{ "start": 757, "end": 909 }
class ____(property): # Wrapper class over property to designate some of the properties as orderable # run attributes pass
orderable_attribute
python
walkccc__LeetCode
solutions/509. Fibonacci Number/509.py
{ "start": 0, "end": 214 }
class ____: def fib(self, n: int) -> int: if n < 2: return n dp = [0, 0, 1] for i in range(2, n + 1): dp[0] = dp[1] dp[1] = dp[2] dp[2] = dp[0] + dp[1] return dp[2]
Solution
python
astropy__astropy
astropy/cosmology/_src/tests/flrw/test_w0wzcdm.py
{ "start": 6969, "end": 11093 }
class ____(FlatFLRWMixinTest, Testw0wzCDM): """Test :class:`astropy.cosmology.Flatw0wzCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = Flatw0wzCDM def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) assert repr(cosmo) == ( "Flatw0wzCDM(name='ABCMeta', H0=<Quantity 70. km / (Mpc s)>, Om0=0.27, " "Tcmb0=<Quantity 3. K>, Neff=3.04, m_nu=<Quantity [0., 0., 0.] eV>, " "Ob0=0.03, w0=-1.0, wz=0.5)" ) # --------------------------------------------------------------- @pytest.mark.parametrize("z", valid_zs) def test_Otot(self, cosmo, z): """Test :meth:`astropy.cosmology.Flatw0wzCDM.Otot`. This is tested in the base class, but we need to override it here because this class is quite unstable. """ super().test_Otot(cosmo, z) def test_Otot_overflow(self, cosmo): """Test :meth:`astropy.cosmology.Flatw0wzCDM.Otot` for NOT overflowing.""" cosmo.Otot(1e5) # --------------------------------------------------------------- @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize( ("args", "kwargs", "expected"), [ ( # no relativistic species (75.0, 0.3), {}, [3004.55645039, 4694.15295565, 5760.90038238, 6504.07869144] * u.Mpc, ), ( # massless neutrinos (75.0, 0.25), {"Tcmb0": 3.0, "Neff": 3, "m_nu": 0 * u.eV}, [3086.14574034, 4885.09170925, 6035.4563298, 6840.89215656] * u.Mpc, ), ( # massive neutrinos (75.0, 0.25), {"Tcmb0": 3.0, "Neff": 4, "m_nu": 5 * u.eV}, [2510.44035219, 3683.87910326, 4389.97760294, 4873.33577288] * u.Mpc, ), ], ) def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected): """Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`. These do not come from external codes -- they are just internal checks to make sure nothing changes if we muck with the distance calculators. """ super().test_comoving_distance_example( cosmo_cls, args, {**COMOVING_DISTANCE_EXAMPLE_KWARGS, **kwargs}, expected ) @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") def test_comoving_distance_mathematica(self, cosmo_cls): """Test with Mathematica example. This test should be updated as the code changes. :: In[1]:= {Om0, w0, wz, H0, c}={0.3,-0.9, 0.2, 70, 299792.458}; c/H0 NIntegrate[1/Sqrt[Om0*(1+z)^3+(1-Om0)(1+z)^(3(1+w0-wz)) Exp[3 *wz*z]],{z, 0, 0.5}] Out[1]= 1849.75 """ assert u.allclose( cosmo_cls(H0=70, Om0=0.3, w0=-0.9, wz=0.2).comoving_distance(0.5), 1849.75 * u.Mpc, rtol=1e-4, ) ############################################################################## # Miscellaneous # TODO: these should be better integrated into the new test framework @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_de_densityscale(): cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5) z = np.array([0.1, 0.2, 0.5, 1.5, 2.5]) assert u.allclose( cosmo.de_density_scale(z), [1.00705953, 1.02687239, 1.15234885, 2.40022841, 6.49384982], rtol=1e-4, ) assert u.allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7) assert u.allclose( cosmo.de_density_scale([1, 2, 3]), cosmo.de_density_scale([1.0, 2.0, 3.0]), rtol=1e-7, ) # Flat tests cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-1, wz=0.5) flatcosmo = Flatw0wzCDM(H0=70, Om0=0.3, w0=-1, wz=0.5) assert u.allclose( cosmo.de_density_scale(z), flatcosmo.de_density_scale(z), rtol=1e-4 )
TestFlatw0wzCDM
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 356594, "end": 357449 }
class ____(sgqlc.types.Input): """Autogenerated input type of UpdateOrganizationWebCommitSignoffSetting """ __schema__ = github_schema __field_names__ = ("organization_id", "web_commit_signoff_required", "client_mutation_id") organization_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="organizationId") """The ID of the organization on which to set the web commit signoff setting. """ web_commit_signoff_required = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="webCommitSignoffRequired") """Enable signoff on web-based commits for repositories in the organization? """ client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
UpdateOrganizationWebCommitSignoffSettingInput
python
pytorch__pytorch
torch/_inductor/ir.py
{ "start": 262020, "end": 263426 }
class ____(ExternKernel): """ This needs to be a custom class to handle mutation and indices properly """ def codegen(self, wrapper: PythonWrapperCodegen) -> None: wrapper.generate_index_put_fallback(self) def should_allocate(self) -> bool: return False def get_mutation_names(self) -> Sequence[str]: return [self.input_name(0)] def get_unbacked_symbol_defs(self) -> OrderedSet[sympy.Symbol]: return OrderedSet() def __init__( self, op_overload: torch._ops.OpOverload, x: IRNode, indices: list[Any], values: Sequence[Any], accumulate: Any, ) -> None: self.indices = indices valid_indices = [i for i in indices if i is not None] # pyrefly: ignore [bad-argument-type] tensors = [self.realize_input(x) for x in [x, values, *valid_indices]] cpp_kernel_name = "aoti_torch_index_put_out" super().__init__( None, NoneLayout(device=x.get_device()), self.unwrap_storage(tensors), (accumulate,), python_kernel_name="aten.index_put_", cpp_kernel_name=cpp_kernel_name, op_overload=op_overload, ) V.graph.mark_buffer_mutated(self.input_name(0)) self.name = V.graph.register_buffer(self) V.graph.register_operation(self)
IndexPutFallback
python
pallets__jinja
src/jinja2/compiler.py
{ "start": 4664, "end": 7556 }
class ____: """Holds compile time information for us.""" def __init__( self, eval_ctx: EvalContext, parent: t.Optional["Frame"] = None, level: int | None = None, ) -> None: self.eval_ctx = eval_ctx # the parent of this frame self.parent = parent if parent is None: self.symbols = Symbols(level=level) # in some dynamic inheritance situations the compiler needs to add # write tests around output statements. self.require_output_check = False # inside some tags we are using a buffer rather than yield statements. # this for example affects {% filter %} or {% macro %}. If a frame # is buffered this variable points to the name of the list used as # buffer. self.buffer: str | None = None # the name of the block we're in, otherwise None. self.block: str | None = None else: self.symbols = Symbols(parent.symbols, level=level) self.require_output_check = parent.require_output_check self.buffer = parent.buffer self.block = parent.block # a toplevel frame is the root + soft frames such as if conditions. self.toplevel = False # the root frame is basically just the outermost frame, so no if # conditions. This information is used to optimize inheritance # situations. self.rootlevel = False # variables set inside of loops and blocks should not affect outer frames, # but they still needs to be kept track of as part of the active context. self.loop_frame = False self.block_frame = False # track whether the frame is being used in an if-statement or conditional # expression as it determines which errors should be raised during runtime # or compile time. self.soft_frame = False def copy(self) -> "te.Self": """Create a copy of the current one.""" rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.symbols = self.symbols.copy() return rv def inner(self, isolated: bool = False) -> "Frame": """Return an inner frame.""" if isolated: return Frame(self.eval_ctx, level=self.symbols.level + 1) return Frame(self.eval_ctx, self) def soft(self) -> "te.Self": """Return a soft frame. A soft frame may not be modified as standalone thing as it shares the resources with the frame it was created of, but it's not a rootlevel frame any longer. This is only used to implement if-statements and conditional expressions. """ rv = self.copy() rv.rootlevel = False rv.soft_frame = True return rv __copy__ = copy
Frame
python
google__pytype
pytype/tools/analyze_project/pytype_runner_test.py
{ "start": 17151, "end": 18174 }
class ____(TestBase): """Tests for PytypeRunner.write_ninja_preamble.""" def test_write(self): conf = self.parser.config_from_defaults() with test_utils.Tempdir() as d: conf.output = d.path runner = make_runner([], [], conf) runner.write_ninja_preamble() with open(runner.ninja_file) as f: preamble = f.read().splitlines() self.assertEqual(len(preamble), _PREAMBLE_LENGTH) # The preamble consists of triples of lines of the format: # rule {name} # command = pytype-single {args} $in # description = {name} $module # Check that the lines cycle through these patterns. for i, line in enumerate(preamble): if not i % 3: self.assertRegex(line, r'rule \w*') elif i % 3 == 1: expected = r' command = {} .* \$in'.format( re.escape(' '.join(pytype_runner.PYTYPE_SINGLE)) ) self.assertRegex(line, expected) else: self.assertRegex(line, r' description = \w* \$module')
TestNinjaPreamble
python
pypa__setuptools
setuptools/_scripts.py
{ "start": 8796, "end": 11247 }
class ____(WindowsScriptWriter): @classmethod def _get_script_args(cls, type_, name, header, script_text): """ For Windows, add a .py extension and an .exe launcher """ if type_ == 'gui': launcher_type = 'gui' ext = '-script.pyw' old = ['.pyw'] else: launcher_type = 'cli' ext = '-script.py' old = ['.py', '.pyc', '.pyo'] hdr = cls._adjust_header(type_, header) blockers = [name + x for x in old] yield (name + ext, hdr + script_text, 't', blockers) yield ( name + '.exe', get_win_launcher(launcher_type), 'b', # write in binary mode ) if not is_64bit(): # install a manifest for the launcher to prevent Windows # from detecting it as an installer (which it will for # launchers like easy_install.exe). Consider only # adding a manifest for launchers detected as installers. # See Distribute #143 for details. m_name = name + '.exe.manifest' yield (m_name, load_launcher_manifest(name), 't') def get_win_launcher(type): """ Load the Windows launcher (executable) suitable for launching a script. `type` should be either 'cli' or 'gui' Returns the executable as a byte string. """ launcher_fn = f'{type}.exe' if is_64bit(): if get_platform() == "win-arm64": launcher_fn = launcher_fn.replace(".", "-arm64.") else: launcher_fn = launcher_fn.replace(".", "-64.") else: launcher_fn = launcher_fn.replace(".", "-32.") return resources.files('setuptools').joinpath(launcher_fn).read_bytes() def load_launcher_manifest(name): res = resources.files(__name__).joinpath('launcher manifest.xml') return res.read_text(encoding='utf-8') % vars() def _first_line_re(): """ Return a regular expression based on first_line_re suitable for matching strings. """ if isinstance(first_line_re.pattern, str): return first_line_re # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern. return re.compile(first_line_re.pattern.decode()) def is_64bit(): return struct.calcsize("P") == 8 def isascii(s): try: s.encode('ascii') except UnicodeError: return False return True
WindowsExecutableLauncherWriter
python
numba__numba
numba/tests/gdb/test_conditional_breakpoint.py
{ "start": 238, "end": 1224 }
class ____(TestCase): def test(self): @njit(debug=True) def foo(x, y): c = x + y # break-here return c @njit(debug=True) def call_foo(a): acc = 0 for i in range(10): acc += foo(i, a) return acc call_foo(10) driver = GdbMIDriver(__file__) driver.set_breakpoint(line=15, condition='x == 4') driver.run() driver.check_hit_breakpoint(1) driver.stack_list_arguments(1) expect = ('[frame={level="0",args=[{name="x",value="4"},' '{name="y",value="10"}]}]') driver.assert_output(expect) driver.set_breakpoint(line=22, condition='i == 8') driver.cont() driver.check_hit_breakpoint(2) driver.stack_list_variables(1) # i should be 8 driver.assert_output('{name="i",value="8"}') driver.quit() if __name__ == '__main__': unittest.main()
Test
python
walkccc__LeetCode
solutions/1557. Minimum Number of Vertices to Reach All Nodes/1557.py
{ "start": 0, "end": 254 }
class ____: def findSmallestSetOfVertices( self, n: int, edges: list[list[int]], ) -> list[int]: inDegrees = [0] * n for _, v in edges: inDegrees[v] += 1 return [i for i, d in enumerate(inDegrees) if d == 0]
Solution
python
huggingface__transformers
tests/quantization/quanto_integration/test_quanto.py
{ "start": 14558, "end": 15791 }
class ____(QuantoQuantizationTest): """ Perform the same tests as in QuantoQuantizationTest but with a serialized model. """ def setUp(self): """ Setup quantized model """ quantization_config = QuantoConfig( weights=self.weights, activations=self.activations, ) quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=self.device_map, quantization_config=quantization_config, dtype=torch.float32, ) with tempfile.TemporaryDirectory() as tmpdirname: quantized_model.save_pretrained(tmpdirname, safe_serialization=False) self.quantized_model = AutoModelForCausalLM.from_pretrained( tmpdirname, dtype=torch.float32, device_map=self.device_map ) self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) self.have_accelerate_hooks = ( getattr(self.quantized_model, "hf_device_map", False) and len(self.quantized_model.hf_device_map) > 1 ) @unittest.skip(reason="Skipping test class because serialization is not supported yet")
QuantoQuantizationSerializationTest
python
getsentry__sentry
tests/snuba/api/endpoints/test_organization_events_profile_functions.py
{ "start": 127, "end": 1952 }
class ____(OrganizationEventsEndpointTestBase): dataset = "profile_functions" def test_simple(self) -> None: profile_functions = [ self.create_profile_function(attributes={"name": "foo", "self_time_ns": 1}), self.create_profile_function(attributes={"name": "bar", "self_time_ns": 2}), ] self.store_profile_functions(profile_functions) response = self.do_request( { "field": ["function", "function.self_time"], "orderby": "function.self_time", "query": "function:foo", "dataset": self.dataset, } ) assert response.status_code == 200, response.content assert response.data["data"] == [ { "id": mock.ANY, "project.name": self.project.slug, "function": "foo", "function.self_time": 1, }, ] def test_simple_aggregation(self) -> None: profile_functions = [ self.create_profile_function(attributes={"name": "foo", "self_time_ns": 1}), self.create_profile_function(attributes={"name": "bar", "self_time_ns": 2}), ] self.store_profile_functions(profile_functions) response = self.do_request( { "field": ["function", "sum(function.self_time)"], "query": "function:foo", "orderby": "sum(function.self_time)", "dataset": self.dataset, } ) assert response.status_code == 200, response.content assert response.data["data"] == [ { "function": "foo", "sum(function.self_time)": 1, }, ]
OrganizationEventsProfileFunctionsEndpointTest
python
gevent__gevent
src/greentest/3.9/test_asyncore.py
{ "start": 14702, "end": 25058 }
class ____: def tearDown(self): asyncore.close_all(ignore_all=True) def loop_waiting_for_flag(self, instance, timeout=5): timeout = float(timeout) / 100 count = 100 while asyncore.socket_map and count > 0: asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll) if instance.flag: return count -= 1 time.sleep(timeout) self.fail("flag not set") def test_handle_connect(self): # make sure handle_connect is called on connect() class TestClient(BaseClient): def handle_connect(self): self.flag = True server = BaseServer(self.family, self.addr) client = TestClient(self.family, server.address) self.loop_waiting_for_flag(client) def test_handle_accept(self): # make sure handle_accept() is called when a client connects class TestListener(BaseTestHandler): def __init__(self, family, addr): BaseTestHandler.__init__(self) self.create_socket(family) bind_af_aware(self.socket, addr) self.listen(5) self.address = self.socket.getsockname() def handle_accept(self): self.flag = True server = TestListener(self.family, self.addr) client = BaseClient(self.family, server.address) self.loop_waiting_for_flag(server) def test_handle_accepted(self): # make sure handle_accepted() is called when a client connects class TestListener(BaseTestHandler): def __init__(self, family, addr): BaseTestHandler.__init__(self) self.create_socket(family) bind_af_aware(self.socket, addr) self.listen(5) self.address = self.socket.getsockname() def handle_accept(self): asyncore.dispatcher.handle_accept(self) def handle_accepted(self, sock, addr): sock.close() self.flag = True server = TestListener(self.family, self.addr) client = BaseClient(self.family, server.address) self.loop_waiting_for_flag(server) def test_handle_read(self): # make sure handle_read is called on data received class TestClient(BaseClient): def handle_read(self): self.flag = True class TestHandler(BaseTestHandler): def __init__(self, conn): BaseTestHandler.__init__(self, conn) self.send(b'x' * 1024) server = BaseServer(self.family, self.addr, TestHandler) client = TestClient(self.family, server.address) self.loop_waiting_for_flag(client) def test_handle_write(self): # make sure handle_write is called class TestClient(BaseClient): def handle_write(self): self.flag = True server = BaseServer(self.family, self.addr) client = TestClient(self.family, server.address) self.loop_waiting_for_flag(client) def test_handle_close(self): # make sure handle_close is called when the other end closes # the connection class TestClient(BaseClient): def handle_read(self): # in order to make handle_close be called we are supposed # to make at least one recv() call self.recv(1024) def handle_close(self): self.flag = True self.close() class TestHandler(BaseTestHandler): def __init__(self, conn): BaseTestHandler.__init__(self, conn) self.close() server = BaseServer(self.family, self.addr, TestHandler) client = TestClient(self.family, server.address) self.loop_waiting_for_flag(client) def test_handle_close_after_conn_broken(self): # Check that ECONNRESET/EPIPE is correctly handled (issues #5661 and # #11265). data = b'\0' * 128 class TestClient(BaseClient): def handle_write(self): self.send(data) def handle_close(self): self.flag = True self.close() def handle_expt(self): self.flag = True self.close() class TestHandler(BaseTestHandler): def handle_read(self): self.recv(len(data)) self.close() def writable(self): return False server = BaseServer(self.family, self.addr, TestHandler) client = TestClient(self.family, server.address) self.loop_waiting_for_flag(client) @unittest.skipIf(sys.platform.startswith("sunos"), "OOB support is broken on Solaris") def test_handle_expt(self): # Make sure handle_expt is called on OOB data received. # Note: this might fail on some platforms as OOB data is # tenuously supported and rarely used. if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX: self.skipTest("Not applicable to AF_UNIX sockets.") if sys.platform == "darwin" and self.use_poll: self.skipTest("poll may fail on macOS; see issue #28087") class TestClient(BaseClient): def handle_expt(self): self.socket.recv(1024, socket.MSG_OOB) self.flag = True class TestHandler(BaseTestHandler): def __init__(self, conn): BaseTestHandler.__init__(self, conn) self.socket.send(bytes(chr(244), 'latin-1'), socket.MSG_OOB) server = BaseServer(self.family, self.addr, TestHandler) client = TestClient(self.family, server.address) self.loop_waiting_for_flag(client) def test_handle_error(self): class TestClient(BaseClient): def handle_write(self): 1.0 / 0 def handle_error(self): self.flag = True try: raise except ZeroDivisionError: pass else: raise Exception("exception not raised") server = BaseServer(self.family, self.addr) client = TestClient(self.family, server.address) self.loop_waiting_for_flag(client) def test_connection_attributes(self): server = BaseServer(self.family, self.addr) client = BaseClient(self.family, server.address) # we start disconnected self.assertFalse(server.connected) self.assertTrue(server.accepting) # this can't be taken for granted across all platforms #self.assertFalse(client.connected) self.assertFalse(client.accepting) # execute some loops so that client connects to server asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100) self.assertFalse(server.connected) self.assertTrue(server.accepting) self.assertTrue(client.connected) self.assertFalse(client.accepting) # disconnect the client client.close() self.assertFalse(server.connected) self.assertTrue(server.accepting) self.assertFalse(client.connected) self.assertFalse(client.accepting) # stop serving server.close() self.assertFalse(server.connected) self.assertFalse(server.accepting) def test_create_socket(self): s = asyncore.dispatcher() s.create_socket(self.family) self.assertEqual(s.socket.type, socket.SOCK_STREAM) self.assertEqual(s.socket.family, self.family) self.assertEqual(s.socket.gettimeout(), 0) self.assertFalse(s.socket.get_inheritable()) def test_bind(self): if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX: self.skipTest("Not applicable to AF_UNIX sockets.") s1 = asyncore.dispatcher() s1.create_socket(self.family) s1.bind(self.addr) s1.listen(5) port = s1.socket.getsockname()[1] s2 = asyncore.dispatcher() s2.create_socket(self.family) # EADDRINUSE indicates the socket was correctly bound self.assertRaises(OSError, s2.bind, (self.addr[0], port)) def test_set_reuse_addr(self): if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX: self.skipTest("Not applicable to AF_UNIX sockets.") with socket.socket(self.family) as sock: try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) except OSError: unittest.skip("SO_REUSEADDR not supported on this platform") else: # if SO_REUSEADDR succeeded for sock we expect asyncore # to do the same s = asyncore.dispatcher(socket.socket(self.family)) self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)) s.socket.close() s.create_socket(self.family) s.set_reuse_addr() self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)) @support.reap_threads def test_quick_connect(self): # see: http://bugs.python.org/issue10340 if self.family not in (socket.AF_INET, getattr(socket, "AF_INET6", object())): self.skipTest("test specific to AF_INET and AF_INET6") server = BaseServer(self.family, self.addr) # run the thread 500 ms: the socket should be connected in 200 ms t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1, count=5)) t.start() try: with socket.socket(self.family, socket.SOCK_STREAM) as s: s.settimeout(.2) s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0)) try: s.connect(server.address) except OSError: pass finally: support.join_thread(t)
BaseTestAPI
python
getsentry__sentry
src/sentry/api/serializers/models/project_key.py
{ "start": 970, "end": 1447 }
class ____(TypedDict): """ This represents a Sentry Project Client Key. """ id: str name: str label: str public: str | None secret: str | None projectId: int isActive: bool rateLimit: RateLimit | None dsn: DSN browserSdkVersion: str browserSdk: BrowserSDK dateCreated: datetime | None dynamicSdkLoaderOptions: DynamicSDKLoaderOptions useCase: NotRequired[str] @register(ProjectKey)
ProjectKeySerializerResponse
python
pypa__warehouse
warehouse/authnz/_permissions.py
{ "start": 66, "end": 4355 }
class ____(StrEnum): """ Permissions can be specified in an ACL (`__acl__`) or `@view_config(permission=...)` instead of using a string literal, minimizing the chance of typos. They are also disconnected from Principals (users, groups, etc.), so they can be used in a more generic way. For example, a permission could be used to allow a user to edit their own profile, or to allow a group to edit any profile. Naming should follow the format: <optional scope>:<resource>:<action> Where: scope: The scope of the permission, such as "admin", "manage", "api" resource: The resource being accessed action: The action being performed on the resource Keep the list alphabetized. Add spacing between logical groupings. """ # Admin Permissions AdminBannerRead = "admin:banner:read" AdminBannerWrite = "admin:banner:write" AdminDashboardRead = "admin:dashboard:read" # TODO: This is broad, and could be replaced in the base template with more # specific permissions per section. Other `__acl__`s need to be updated. AdminDashboardSidebarRead = "admin:dashboard-sidebar:read" AdminEmailsRead = "admin:emails:read" AdminEmailsWrite = "admin:emails:write" AdminFlagsRead = "admin:flags:read" AdminFlagsWrite = "admin:flags:write" AdminIpAddressesRead = "admin:ip-addresses:read" AdminJournalRead = "admin:journal:read" AdminMacaroonsRead = "admin:macaroons:read" AdminMacaroonsWrite = "admin:macaroons:write" AdminObservationsRead = "admin:observations:read" AdminObservationsWrite = "admin:observations:write" AdminOrganizationsRead = "admin:organizations:read" AdminOrganizationsSetLimit = "admin:organizations:set-limit" AdminOrganizationsWrite = "admin:organizations:write" AdminOrganizationsNameWrite = "admin:organizations:name:write" AdminProhibitedEmailDomainsRead = "admin:prohibited-email-domains:read" AdminProhibitedEmailDomainsWrite = "admin:prohibited-email-domains:write" AdminProhibitedProjectsRead = "admin:prohibited-projects:read" AdminProhibitedProjectsWrite = "admin:prohibited-projects:write" AdminProhibitedProjectsRelease = "admin:prohibited-projects:release" AdminProhibitedUsernameRead = "admin:prohibited-username:read" AdminProhibitedUsernameWrite = "admin:prohibited-username:write" AdminProjectsDelete = "admin:projects:delete" AdminProjectsRead = "admin:projects:read" AdminProjectsSetLimit = "admin:projects:set-limit" AdminProjectsWrite = "admin:projects:write" AdminRoleAdd = "admin:role:add" AdminRoleDelete = "admin:role:delete" AdminRoleUpdate = "admin:role:update" AdminSponsorsRead = "admin:sponsors:read" AdminSponsorsWrite = "admin:sponsors:write" AdminUsersRead = "admin:users:read" AdminUsersWrite = "admin:users:write" AdminUsersEmailWrite = "admin:users:email:write" AdminUsersAccountRecoveryWrite = "admin:users:account-recovery:write" # API Permissions APIEcho = "api:echo" APIObservationsAdd = "api:observations:add" # User Permissions Account2FA = "account:2fa" AccountAPITokens = "account:api-tokens" AccountManage = "account:manage" AccountManagePublishing = "account:manage-publishing" AccountVerifyEmail = "account:verify-email" AccountVerifyOrgRole = "account:verify-org-role" AccountVerifyProjectRole = "account:verify-project-role" # Projects Permissions ProjectsRead = "projects:read" ProjectsUpload = "projects:upload" ProjectsWrite = "projects:write" # TODO: Worth splitting out ProjectDelete? # Organization Permissions OrganizationApplicationsManage = "organizations:applications:manage" OrganizationsManage = "organizations:manage" OrganizationsBillingManage = "organizations:billing:manage" OrganizationsRead = "organizations:read" OrganizationProjectsAdd = "organizations:projects:add" OrganizationProjectsRemove = "organizations:projects:remove" # TODO: unused? OrganizationTeamsManage = "organizations:teams:manage" OrganizationTeamsRead = "organizations:teams:read" # Observer Permissions SubmitMalwareObservation = "observer:submit-malware-observation"
Permissions
python
encode__django-rest-framework
tests/test_serializer_nested.py
{ "start": 11130, "end": 12826 }
class ____: """ Test that raise_errors_on_nested_writes does not raise `AssertionError` when the model field is not a relation. """ def test_nested_serializer_create_and_update(self): class NonRelationalPersonDataSerializer(serializers.Serializer): occupation = serializers.CharField() class NonRelationalPersonSerializer(serializers.ModelSerializer): data = NonRelationalPersonDataSerializer() class Meta: model = NonRelationalPersonModel fields = ['data'] serializer = NonRelationalPersonSerializer(data={'data': {'occupation': 'developer'}}) assert serializer.is_valid() assert serializer.validated_data == {'data': {'occupation': 'developer'}} raise_errors_on_nested_writes('create', serializer, serializer.validated_data) raise_errors_on_nested_writes('update', serializer, serializer.validated_data) def test_dotted_source_field_create_and_update(self): class DottedNonRelationalPersonSerializer(serializers.ModelSerializer): occupation = serializers.CharField(source='data.occupation') class Meta: model = NonRelationalPersonModel fields = ['occupation'] serializer = DottedNonRelationalPersonSerializer(data={'occupation': 'developer'}) assert serializer.is_valid() assert serializer.validated_data == {'data': {'occupation': 'developer'}} raise_errors_on_nested_writes('create', serializer, serializer.validated_data) raise_errors_on_nested_writes('update', serializer, serializer.validated_data)
TestNestedNonRelationalFieldWrite
python
encode__django-rest-framework
rest_framework/fields.py
{ "start": 9202, "end": 23330 }
class ____: _creation_counter = 0 default_error_messages = { 'required': _('This field is required.'), 'null': _('This field may not be null.') } default_validators = [] default_empty_html = empty initial = None def __init__(self, *, read_only=False, write_only=False, required=None, default=empty, initial=empty, source=None, label=None, help_text=None, style=None, error_messages=None, validators=None, allow_null=False): self._creation_counter = Field._creation_counter Field._creation_counter += 1 # If `required` is unset, then use `True` unless a default is provided. if required is None: required = default is empty and not read_only # Some combinations of keyword arguments do not make sense. assert not (read_only and write_only), NOT_READ_ONLY_WRITE_ONLY assert not (read_only and required), NOT_READ_ONLY_REQUIRED assert not (required and default is not empty), NOT_REQUIRED_DEFAULT assert not (read_only and self.__class__ == Field), USE_READONLYFIELD self.read_only = read_only self.write_only = write_only self.required = required self.default = default self.source = source self.initial = self.initial if (initial is empty) else initial self.label = label self.help_text = help_text self.style = {} if style is None else style self.allow_null = allow_null if self.default_empty_html is not empty: if default is not empty: self.default_empty_html = default if validators is not None: self.validators = list(validators) # These are set up by `.bind()` when the field is added to a serializer. self.field_name = None self.parent = None # Collect default error message from self and parent classes messages = {} for cls in reversed(self.__class__.__mro__): messages.update(getattr(cls, 'default_error_messages', {})) messages.update(error_messages or {}) self.error_messages = messages # Allow generic typing checking for fields. def __class_getitem__(cls, *args, **kwargs): return cls def bind(self, field_name, parent): """ Initializes the field name and parent for the field instance. Called when a field is added to the parent serializer instance. """ # In order to enforce a consistent style, we error if a redundant # 'source' argument has been used. For example: # my_field = serializer.CharField(source='my_field') assert self.source != field_name, ( "It is redundant to specify `source='%s'` on field '%s' in " "serializer '%s', because it is the same as the field name. " "Remove the `source` keyword argument." % (field_name, self.__class__.__name__, parent.__class__.__name__) ) self.field_name = field_name self.parent = parent # `self.label` should default to being based on the field name. if self.label is None: self.label = field_name.replace('_', ' ').capitalize() # self.source should default to being the same as the field name. if self.source is None: self.source = field_name # self.source_attrs is a list of attributes that need to be looked up # when serializing the instance, or populating the validated data. if self.source == '*': self.source_attrs = [] else: self.source_attrs = self.source.split('.') # .validators is a lazily loaded property, that gets its default # value from `get_validators`. @property def validators(self): if not hasattr(self, '_validators'): self._validators = self.get_validators() return self._validators @validators.setter def validators(self, validators): self._validators = validators def get_validators(self): return list(self.default_validators) def get_initial(self): """ Return a value to use when the field is being returned as a primitive value, without any object instance. """ if callable(self.initial): return self.initial() return self.initial def get_value(self, dictionary): """ Given the *incoming* primitive data, return the value for this field that should be validated and transformed to a native value. """ if html.is_html_input(dictionary): # HTML forms will represent empty fields as '', and cannot # represent None or False values directly. if self.field_name not in dictionary: if getattr(self.root, 'partial', False): return empty return self.default_empty_html ret = dictionary[self.field_name] if ret == '' and self.allow_null: # If the field is blank, and null is a valid value then # determine if we should use null instead. return '' if getattr(self, 'allow_blank', False) else None elif ret == '' and not self.required: # If the field is blank, and emptiness is valid then # determine if we should use emptiness instead. return '' if getattr(self, 'allow_blank', False) else empty return ret return dictionary.get(self.field_name, empty) def get_attribute(self, instance): """ Given the *outgoing* object instance, return the primitive value that should be used for this field. """ try: return get_attribute(instance, self.source_attrs) except BuiltinSignatureError as exc: msg = ( 'Field source for `{serializer}.{field}` maps to a built-in ' 'function type and is invalid. Define a property or method on ' 'the `{instance}` instance that wraps the call to the built-in ' 'function.'.format( serializer=self.parent.__class__.__name__, field=self.field_name, instance=instance.__class__.__name__, ) ) raise type(exc)(msg) except (KeyError, AttributeError) as exc: if self.default is not empty: return self.get_default() if self.allow_null: return None if not self.required: raise SkipField() msg = ( 'Got {exc_type} when attempting to get a value for field ' '`{field}` on serializer `{serializer}`.\nThe serializer ' 'field might be named incorrectly and not match ' 'any attribute or key on the `{instance}` instance.\n' 'Original exception text was: {exc}.'.format( exc_type=type(exc).__name__, field=self.field_name, serializer=self.parent.__class__.__name__, instance=instance.__class__.__name__, exc=exc ) ) raise type(exc)(msg) def get_default(self): """ Return the default value to use when validating data if no input is provided for this field. If a default has not been set for this field then this will simply raise `SkipField`, indicating that no value should be set in the validated data for this field. """ if self.default is empty or getattr(self.root, 'partial', False): # No default, or this is a partial update. raise SkipField() if callable(self.default): if getattr(self.default, 'requires_context', False): return self.default(self) else: return self.default() return self.default def validate_empty_values(self, data): """ Validate empty values, and either: * Raise `ValidationError`, indicating invalid data. * Raise `SkipField`, indicating that the field should be ignored. * Return (True, data), indicating an empty value that should be returned without any further validation being applied. * Return (False, data), indicating a non-empty value, that should have validation applied as normal. """ if self.read_only: return (True, self.get_default()) if data is empty: if getattr(self.root, 'partial', False): raise SkipField() if self.required: self.fail('required') return (True, self.get_default()) if data is None: if not self.allow_null: self.fail('null') # Nullable `source='*'` fields should not be skipped when its named # field is given a null value. This is because `source='*'` means # the field is passed the entire object, which is not null. elif self.source == '*': return (False, None) return (True, None) return (False, data) def run_validation(self, data=empty): """ Validate a simple representation and return the internal value. The provided data may be `empty` if no representation was included in the input. May raise `SkipField` if the field should not be included in the validated data. """ (is_empty_value, data) = self.validate_empty_values(data) if is_empty_value: return data value = self.to_internal_value(data) self.run_validators(value) return value def run_validators(self, value): """ Test the given value against all the validators on the field, and either raise a `ValidationError` or simply return. """ errors = [] for validator in self.validators: try: if getattr(validator, 'requires_context', False): validator(value, self) else: validator(value) except ValidationError as exc: # If the validation error contains a mapping of fields to # errors then simply raise it immediately rather than # attempting to accumulate a list of errors. if isinstance(exc.detail, dict): raise errors.extend(exc.detail) except DjangoValidationError as exc: errors.extend(get_error_detail(exc)) if errors: raise ValidationError(errors) def to_internal_value(self, data): """ Transform the *incoming* primitive data into a native value. """ raise NotImplementedError( '{cls}.to_internal_value() must be implemented for field ' '{field_name}. If you do not need to support write operations ' 'you probably want to subclass `ReadOnlyField` instead.'.format( cls=self.__class__.__name__, field_name=self.field_name, ) ) def to_representation(self, value): """ Transform the *outgoing* native value into primitive data. """ raise NotImplementedError( '{cls}.to_representation() must be implemented for field {field_name}.'.format( cls=self.__class__.__name__, field_name=self.field_name, ) ) def fail(self, key, **kwargs): """ A helper method that simply raises a validation error. """ try: msg = self.error_messages[key] except KeyError: class_name = self.__class__.__name__ msg = MISSING_ERROR_MESSAGE.format(class_name=class_name, key=key) raise AssertionError(msg) message_string = msg.format(**kwargs) raise ValidationError(message_string, code=key) @property def root(self): """ Returns the top-level serializer for this field. """ root = self while root.parent is not None: root = root.parent return root @property def context(self): """ Returns the context as passed to the root serializer on initialization. """ return getattr(self.root, '_context', {}) def __new__(cls, *args, **kwargs): """ When a field is instantiated, we store the arguments that were used, so that we can present a helpful representation of the object. """ instance = super().__new__(cls) instance._args = args instance._kwargs = kwargs return instance def __deepcopy__(self, memo): """ When cloning fields we instantiate using the arguments it was originally created with, rather than copying the complete state. """ # Treat regexes and validators as immutable. # See https://github.com/encode/django-rest-framework/issues/1954 # and https://github.com/encode/django-rest-framework/pull/4489 args = [ copy.deepcopy(item) if not isinstance(item, REGEX_TYPE) else item for item in self._args ] kwargs = { key: (copy.deepcopy(value, memo) if (key not in ('validators', 'regex')) else value) for key, value in self._kwargs.items() } return self.__class__(*args, **kwargs) def __repr__(self): """ Fields are represented using their initial calling arguments. This allows us to create descriptive representations for serializer instances that show all the declared fields on the serializer. """ return representation.field_repr(self) # Boolean types...
Field
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/roots/assets.py
{ "start": 808, "end": 983 }
class ____(graphene.Union): class Meta: types = (GrapheneAssetRecordConnection, GraphenePythonError) name = "AssetRecordsOrError"
GrapheneAssetRecordsOrError
python
spack__spack
lib/spack/spack/binary_distribution.py
{ "start": 109053, "end": 109382 }
class ____(spack.error.SpackError): """ Raised when multiple keys can be used to sign. """ def __init__(self, keys): err_msg = "Multiple keys available for signing\n%s\n" % keys err_msg += "Use spack buildcache create -k <key hash> to pick a key." super().__init__(err_msg)
PickKeyException
python
weaviate__weaviate-python-client
weaviate/collections/classes/internal.py
{ "start": 15553, "end": 17271 }
class ____(Generic[Properties, IReferences]): def __init__( self, objects: Optional[List[Object[Properties, IReferences]]], ): self.__objects = objects @classmethod def _from( cls, objects: List[Object[Properties, IReferences]] ) -> "_CrossReference[Properties, IReferences]": return cls(objects) @property def objects(self) -> List[Object[Properties, IReferences]]: """Returns the objects of the cross reference.""" return self.__objects or [] CrossReference: TypeAlias = _CrossReference[Properties, IReferences] """Use this TypeAlias when you want to type hint a cross reference within a generic data model. If you want to define a reference property when creating your collection, use `ReferenceProperty` or `ReferencePropertyMultiTarget` instead. If you want to create a reference when inserting an object, supply the UUIDs directly or use `Reference.to_multi()` instead. Example: >>> import typing >>> import weaviate.classes as wvc >>> >>> class One(typing.TypedDict): ... prop: str >>> >>> class Two(typing.TypedDict): ... one: wvc.CrossReference[One] """ CrossReferences = Mapping[str, _CrossReference[WeaviateProperties, "CrossReferences"]] SingleReferenceInput = Union[UUID, ReferenceToMulti] ReferenceInput: TypeAlias = Union[UUID, Sequence[UUID], ReferenceToMulti] """This type alias is used when providing references as inputs within the `.data` namespace of a collection.""" ReferenceInputs: TypeAlias = Mapping[str, ReferenceInput] """This type alias is used when providing references as inputs within the `.data` namespace of a collection.""" @dataclass
_CrossReference
python
google__pytype
pytype/pytd/visitors_test.py
{ "start": 34881, "end": 39169 }
class ____(parser_test_base.ParserTest): """Tests for RemoveNamePrefix.""" def test_remove_name_prefix(self): src = textwrap.dedent(""" from typing import TypeVar def f(a: T) -> T: ... T = TypeVar("T") class X(Generic[T]): pass """) expected = textwrap.dedent(""" from typing import TypeVar T = TypeVar('T') class X(Generic[T]): ... def f(a: T) -> T: ... """).strip() tree = self.Parse(src) # type parameters t = tree.Lookup("T").Replace(scope="foo") # classes x = tree.Lookup("X") x_template = x.template[0] x_type_param = x_template.type_param.Replace(scope="foo.X") x_template = x_template.Replace(type_param=x_type_param) x = x.Replace(name="foo.X", template=(x_template,)) # functions f = tree.Lookup("f") f_sig = f.signatures[0] f_param = f_sig.params[0] f_type_param = f_param.type.Replace(scope="foo.f") f_param = f_param.Replace(type=f_type_param) f_template = f_sig.template[0].Replace(type_param=f_type_param) f_sig = f_sig.Replace( params=(f_param,), return_type=f_type_param, template=(f_template,) ) f = f.Replace(name="foo.f", signatures=(f_sig,)) tree = tree.Replace( classes=(x,), functions=(f,), type_params=(t,), name="foo" ) tree = tree.Visit(visitors.RemoveNamePrefix()) self.assertMultiLineEqual(expected, pytd_utils.Print(tree)) def test_remove_name_prefix_twice(self): src = textwrap.dedent(""" from typing import Any, TypeVar x = ... # type: Any T = TypeVar("T") class X(Generic[T]): ... """) expected_one = textwrap.dedent(""" from typing import Any, TypeVar T = TypeVar('T') foo.x: Any class foo.X(Generic[T]): ... """).strip() expected_two = textwrap.dedent(""" from typing import Any, TypeVar T = TypeVar('T') x: Any class X(Generic[T]): ... """).strip() tree = self.Parse(src) # constants x = tree.Lookup("x").Replace(name="foo.foo.x") # type parameters t = tree.Lookup("T").Replace(scope="foo.foo") # classes x_cls = tree.Lookup("X") x_template = x_cls.template[0] x_type_param = x_template.type_param.Replace(scope="foo.foo.X") x_template = x_template.Replace(type_param=x_type_param) x_cls = x_cls.Replace(name="foo.foo.X", template=(x_template,)) tree = tree.Replace( classes=(x_cls,), constants=(x,), type_params=(t,), name="foo" ) tree = tree.Visit(visitors.RemoveNamePrefix()) self.assertMultiLineEqual(expected_one, pytd_utils.Print(tree)) tree = tree.Visit(visitors.RemoveNamePrefix()) self.assertMultiLineEqual(expected_two, pytd_utils.Print(tree)) def test_remove_name_prefix_on_class_type(self): src = textwrap.dedent(""" x = ... # type: y class Y: ... """) expected = textwrap.dedent(""" x: Y class Y: ... """).strip() tree = self.Parse(src) # constants x = tree.Lookup("x").Replace(name="foo.x", type=pytd.ClassType("foo.Y")) # classes y = tree.Lookup("Y").Replace(name="foo.Y") tree = tree.Replace(classes=(y,), constants=(x,), name="foo") tree = tree.Visit(visitors.RemoveNamePrefix()) self.assertMultiLineEqual(expected, pytd_utils.Print(tree)) def test_remove_name_prefix_on_nested_class(self): src = textwrap.dedent(""" class A: class B: class C: ... D = A.B.C """) expected = textwrap.dedent(""" class A: class B: class C: ... D: type[A.B.C] """).strip() tree = self.Parse(src) # classes a = tree.Lookup("A") b = a.Lookup("B") c = b.Lookup("C").Replace(name="foo.A.B.C") d = b.Lookup("D") d_type = d.type d_generic = d.type.parameters[0].Replace(name="foo.A.B.C") d_type = d_type.Replace(parameters=(d_generic,)) d = d.Replace(type=d_type) b = b.Replace(classes=(c,), constants=(d,), name="foo.A.B") a = a.Replace(classes=(b,), name="foo.A") tree = tree.Replace(classes=(a,), name="foo") tree = tree.Visit(visitors.RemoveNamePrefix()) self.assertMultiLineEqual(expected, pytd_utils.Print(tree))
RemoveNamePrefixTest
python
Lightning-AI__lightning
src/lightning/pytorch/loggers/csv_logs.py
{ "start": 1338, "end": 2156 }
class ____(_FabricExperimentWriter): r"""Experiment writer for CSVLogger. Currently, supports to log hyperparameters and metrics in YAML and CSV format, respectively. This logger supports logging to remote filesystems via ``fsspec``. Make sure you have it installed. Args: log_dir: Directory for the experiment logs """ NAME_HPARAMS_FILE = "hparams.yaml" def __init__(self, log_dir: str) -> None: super().__init__(log_dir=log_dir) self.hparams: dict[str, Any] = {} def log_hparams(self, params: dict[str, Any]) -> None: """Record hparams and save into files.""" self.hparams.update(params) hparams_file = os.path.join(self.log_dir, self.NAME_HPARAMS_FILE) save_hparams_to_yaml(hparams_file, self.hparams)
ExperimentWriter
python
bokeh__bokeh
src/bokeh/models/tools.py
{ "start": 9304, "end": 9780 }
class ____(GestureTool): ''' A base class for tools that perform "selections", e.g. ``BoxSelectTool``. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) renderers = Either(Auto, List(Instance(DataRenderer)), default="auto", help=""" A list of renderers to hit test against. If unset, defaults to all renderers on a plot. """) @abstract
SelectTool
python
django__django
tests/m2m_through/tests.py
{ "start": 21536, "end": 22898 }
class ____(TestCase): @classmethod def setUpTestData(cls): cls.pea = Ingredient.objects.create(iname="pea") cls.potato = Ingredient.objects.create(iname="potato") cls.tomato = Ingredient.objects.create(iname="tomato") cls.curry = Recipe.objects.create(rname="curry") RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.potato) RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.pea) RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.tomato) def test_retrieval(self): # Forward retrieval self.assertSequenceEqual( self.curry.ingredients.all(), [self.pea, self.potato, self.tomato] ) # Backward retrieval self.assertEqual(self.tomato.recipes.get(), self.curry) def test_choices(self): field = Recipe._meta.get_field("ingredients") self.assertEqual( [choice[0] for choice in field.get_choices(include_blank=False)], ["pea", "potato", "tomato"], ) def test_count(self): self.assertEqual(self.curry.ingredients.count(), 3) self.assertEqual(self.tomato.recipes.count(), 1) def test_exists(self): self.assertTrue(self.curry.ingredients.exists()) self.assertTrue(self.tomato.recipes.exists())
M2mThroughToFieldsTests
python
lxml__lxml
src/lxml/html/__init__.py
{ "start": 23266, "end": 24971 }
class ____: """ An object that represents a method on an element as a function; the function takes either an element or an HTML string. It returns whatever the function normally returns, or if the function works in-place (and so returns None) it returns a serialized form of the resulting document. """ def __init__(self, name, copy=False, source_class=HtmlMixin): self.name = name self.copy = copy self.__doc__ = getattr(source_class, self.name).__doc__ def __call__(self, doc, *args, **kw): result_type = type(doc) if isinstance(doc, (str, bytes)): if 'copy' in kw: raise TypeError( "The keyword 'copy' can only be used with element inputs to %s, not a string input" % self.name) doc = fromstring(doc, **kw) else: if 'copy' in kw: make_a_copy = kw.pop('copy') else: make_a_copy = self.copy if make_a_copy: doc = copy.deepcopy(doc) meth = getattr(doc, self.name) result = meth(*args, **kw) # FIXME: this None test is a bit sloppy if result is None: # Then return what we got in return _transform_result(result_type, doc) else: return result find_rel_links = _MethodFunc('find_rel_links', copy=False) find_class = _MethodFunc('find_class', copy=False) make_links_absolute = _MethodFunc('make_links_absolute', copy=True) resolve_base_href = _MethodFunc('resolve_base_href', copy=True) iterlinks = _MethodFunc('iterlinks', copy=False) rewrite_links = _MethodFunc('rewrite_links', copy=True)
_MethodFunc
python
tiangolo__fastapi
fastapi/routing.py
{ "start": 18916, "end": 27202 }
class ____(routing.Route): def __init__( self, path: str, endpoint: Callable[..., Any], *, response_model: Any = Default(None), status_code: Optional[int] = None, tags: Optional[List[Union[str, Enum]]] = None, dependencies: Optional[Sequence[params.Depends]] = None, summary: Optional[str] = None, description: Optional[str] = None, response_description: str = "Successful Response", responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, deprecated: Optional[bool] = None, name: Optional[str] = None, methods: Optional[Union[Set[str], List[str]]] = None, operation_id: Optional[str] = None, response_model_include: Optional[IncEx] = None, response_model_exclude: Optional[IncEx] = None, response_model_by_alias: bool = True, response_model_exclude_unset: bool = False, response_model_exclude_defaults: bool = False, response_model_exclude_none: bool = False, include_in_schema: bool = True, response_class: Union[Type[Response], DefaultPlaceholder] = Default( JSONResponse ), dependency_overrides_provider: Optional[Any] = None, callbacks: Optional[List[BaseRoute]] = None, openapi_extra: Optional[Dict[str, Any]] = None, generate_unique_id_function: Union[ Callable[["APIRoute"], str], DefaultPlaceholder ] = Default(generate_unique_id), ) -> None: self.path = path self.endpoint = endpoint if isinstance(response_model, DefaultPlaceholder): return_annotation = get_typed_return_annotation(endpoint) if lenient_issubclass(return_annotation, Response): response_model = None else: response_model = return_annotation self.response_model = response_model self.summary = summary self.response_description = response_description self.deprecated = deprecated self.operation_id = operation_id self.response_model_include = response_model_include self.response_model_exclude = response_model_exclude self.response_model_by_alias = response_model_by_alias self.response_model_exclude_unset = response_model_exclude_unset self.response_model_exclude_defaults = response_model_exclude_defaults self.response_model_exclude_none = response_model_exclude_none self.include_in_schema = include_in_schema self.response_class = response_class self.dependency_overrides_provider = dependency_overrides_provider self.callbacks = callbacks self.openapi_extra = openapi_extra self.generate_unique_id_function = generate_unique_id_function self.tags = tags or [] self.responses = responses or {} self.name = get_name(endpoint) if name is None else name self.path_regex, self.path_format, self.param_convertors = compile_path(path) if methods is None: methods = ["GET"] self.methods: Set[str] = {method.upper() for method in methods} if isinstance(generate_unique_id_function, DefaultPlaceholder): current_generate_unique_id: Callable[[APIRoute], str] = ( generate_unique_id_function.value ) else: current_generate_unique_id = generate_unique_id_function self.unique_id = self.operation_id or current_generate_unique_id(self) # normalize enums e.g. http.HTTPStatus if isinstance(status_code, IntEnum): status_code = int(status_code) self.status_code = status_code if self.response_model: assert is_body_allowed_for_status_code(status_code), ( f"Status code {status_code} must not have a response body" ) response_name = "Response_" + self.unique_id self.response_field = create_model_field( name=response_name, type_=self.response_model, mode="serialization", ) # Create a clone of the field, so that a Pydantic submodel is not returned # as is just because it's an instance of a subclass of a more limited class # e.g. UserInDB (containing hashed_password) could be a subclass of User # that doesn't have the hashed_password. But because it's a subclass, it # would pass the validation and be returned as is. # By being a new field, no inheritance will be passed as is. A new model # will always be created. # TODO: remove when deprecating Pydantic v1 self.secure_cloned_response_field: Optional[ModelField] = ( create_cloned_field(self.response_field) ) else: self.response_field = None # type: ignore self.secure_cloned_response_field = None self.dependencies = list(dependencies or []) self.description = description or inspect.cleandoc(self.endpoint.__doc__ or "") # if a "form feed" character (page break) is found in the description text, # truncate description text to the content preceding the first "form feed" self.description = self.description.split("\f")[0].strip() response_fields = {} for additional_status_code, response in self.responses.items(): assert isinstance(response, dict), "An additional response must be a dict" model = response.get("model") if model: assert is_body_allowed_for_status_code(additional_status_code), ( f"Status code {additional_status_code} must not have a response body" ) response_name = f"Response_{additional_status_code}_{self.unique_id}" response_field = create_model_field( name=response_name, type_=model, mode="serialization" ) response_fields[additional_status_code] = response_field if response_fields: self.response_fields: Dict[Union[int, str], ModelField] = response_fields else: self.response_fields = {} assert callable(endpoint), "An endpoint must be a callable" self.dependant = get_dependant( path=self.path_format, call=self.endpoint, scope="function" ) for depends in self.dependencies[::-1]: self.dependant.dependencies.insert( 0, get_parameterless_sub_dependant(depends=depends, path=self.path_format), ) self._flat_dependant = get_flat_dependant(self.dependant) self._embed_body_fields = _should_embed_body_fields( self._flat_dependant.body_params ) self.body_field = get_body_field( flat_dependant=self._flat_dependant, name=self.unique_id, embed_body_fields=self._embed_body_fields, ) self.app = request_response(self.get_route_handler()) def get_route_handler(self) -> Callable[[Request], Coroutine[Any, Any, Response]]: return get_request_handler( dependant=self.dependant, body_field=self.body_field, status_code=self.status_code, response_class=self.response_class, response_field=self.secure_cloned_response_field, response_model_include=self.response_model_include, response_model_exclude=self.response_model_exclude, response_model_by_alias=self.response_model_by_alias, response_model_exclude_unset=self.response_model_exclude_unset, response_model_exclude_defaults=self.response_model_exclude_defaults, response_model_exclude_none=self.response_model_exclude_none, dependency_overrides_provider=self.dependency_overrides_provider, embed_body_fields=self._embed_body_fields, ) def matches(self, scope: Scope) -> Tuple[Match, Scope]: match, child_scope = super().matches(scope) if match != Match.NONE: child_scope["route"] = self return match, child_scope
APIRoute
python
langchain-ai__langchain
libs/partners/anthropic/tests/unit_tests/test_chat_models.py
{ "start": 44066, "end": 52792 }
class ____(BaseTracer): """Fake tracer to capture inputs to `chat_model_start`.""" def __init__(self) -> None: super().__init__() self.chat_model_start_inputs: list = [] def _persist_run(self, run: Run) -> None: """Persist a run.""" def on_chat_model_start(self, *args: Any, **kwargs: Any) -> Run: self.chat_model_start_inputs.append({"args": args, "kwargs": kwargs}) return super().on_chat_model_start(*args, **kwargs) def test_mcp_tracing() -> None: # Test we exclude sensitive information from traces mcp_servers = [ { "type": "url", "url": "https://mcp.deepwiki.com/mcp", "name": "deepwiki", "authorization_token": "PLACEHOLDER", }, ] llm = ChatAnthropic( model="claude-sonnet-4-5-20250929", betas=["mcp-client-2025-04-04"], mcp_servers=mcp_servers, ) tracer = FakeTracer() mock_client = MagicMock() def mock_create(*args: Any, **kwargs: Any) -> Message: return Message( id="foo", content=[TextBlock(type="text", text="bar")], model="baz", role="assistant", stop_reason=None, stop_sequence=None, usage=Usage(input_tokens=2, output_tokens=1), type="message", ) mock_client.messages.create = mock_create input_message = HumanMessage("Test query") with patch.object(llm, "_client", mock_client): _ = llm.invoke([input_message], config={"callbacks": [tracer]}) # Test headers are not traced assert len(tracer.chat_model_start_inputs) == 1 assert "PLACEHOLDER" not in str(tracer.chat_model_start_inputs) # Test headers are correctly propagated to request payload = llm._get_request_payload([input_message]) assert payload["mcp_servers"][0]["authorization_token"] == "PLACEHOLDER" # noqa: S105 def test_cache_control_kwarg() -> None: llm = ChatAnthropic(model=MODEL_NAME) messages = [HumanMessage("foo"), AIMessage("bar"), HumanMessage("baz")] payload = llm._get_request_payload(messages) assert payload["messages"] == [ {"role": "user", "content": "foo"}, {"role": "assistant", "content": "bar"}, {"role": "user", "content": "baz"}, ] payload = llm._get_request_payload(messages, cache_control={"type": "ephemeral"}) assert payload["messages"] == [ {"role": "user", "content": "foo"}, {"role": "assistant", "content": "bar"}, { "role": "user", "content": [ {"type": "text", "text": "baz", "cache_control": {"type": "ephemeral"}} ], }, ] messages = [ HumanMessage("foo"), AIMessage("bar"), HumanMessage( content=[ {"type": "text", "text": "baz"}, {"type": "text", "text": "qux"}, ] ), ] payload = llm._get_request_payload(messages, cache_control={"type": "ephemeral"}) assert payload["messages"] == [ {"role": "user", "content": "foo"}, {"role": "assistant", "content": "bar"}, { "role": "user", "content": [ {"type": "text", "text": "baz"}, {"type": "text", "text": "qux", "cache_control": {"type": "ephemeral"}}, ], }, ] def test_context_management_in_payload() -> None: llm = ChatAnthropic( model=MODEL_NAME, # type: ignore[call-arg] betas=["context-management-2025-06-27"], context_management={"edits": [{"type": "clear_tool_uses_20250919"}]}, ) llm_with_tools = llm.bind_tools( [{"type": "web_search_20250305", "name": "web_search"}] ) input_message = HumanMessage("Search for recent developments in AI") payload = llm_with_tools._get_request_payload([input_message]) # type: ignore[attr-defined] assert payload["context_management"] == { "edits": [{"type": "clear_tool_uses_20250919"}] } def test_anthropic_model_params() -> None: llm = ChatAnthropic(model=MODEL_NAME) ls_params = llm._get_ls_params() assert ls_params == { "ls_provider": "anthropic", "ls_model_type": "chat", "ls_model_name": MODEL_NAME, "ls_max_tokens": 64000, "ls_temperature": None, } ls_params = llm._get_ls_params(model=MODEL_NAME) assert ls_params.get("ls_model_name") == MODEL_NAME def test_streaming_cache_token_reporting() -> None: """Test that cache tokens are properly reported in streaming events.""" from unittest.mock import MagicMock from anthropic.types import MessageDeltaUsage from langchain_anthropic.chat_models import _make_message_chunk_from_anthropic_event # Create a mock message_start event mock_message = MagicMock() mock_message.model = MODEL_NAME mock_message.usage.input_tokens = 100 mock_message.usage.output_tokens = 0 mock_message.usage.cache_read_input_tokens = 25 mock_message.usage.cache_creation_input_tokens = 10 message_start_event = MagicMock() message_start_event.type = "message_start" message_start_event.message = mock_message # Create a mock message_delta event with complete usage info mock_delta_usage = MessageDeltaUsage( output_tokens=50, input_tokens=100, cache_read_input_tokens=25, cache_creation_input_tokens=10, ) mock_delta = MagicMock() mock_delta.stop_reason = "end_turn" mock_delta.stop_sequence = None message_delta_event = MagicMock() message_delta_event.type = "message_delta" message_delta_event.usage = mock_delta_usage message_delta_event.delta = mock_delta # Test message_start event start_chunk, _ = _make_message_chunk_from_anthropic_event( message_start_event, stream_usage=True, coerce_content_to_string=True, block_start_event=None, ) # Test message_delta event - should contain complete usage metadata (w/ cache) delta_chunk, _ = _make_message_chunk_from_anthropic_event( message_delta_event, stream_usage=True, coerce_content_to_string=True, block_start_event=None, ) # Verify message_delta has complete usage_metadata including cache tokens assert start_chunk is not None, "message_start should produce a chunk" assert getattr(start_chunk, "usage_metadata", None) is None, ( "message_start should not have usage_metadata" ) assert delta_chunk is not None, "message_delta should produce a chunk" assert delta_chunk.usage_metadata is not None, ( "message_delta should have usage_metadata" ) assert "input_token_details" in delta_chunk.usage_metadata input_details = delta_chunk.usage_metadata["input_token_details"] assert input_details.get("cache_read") == 25 assert input_details.get("cache_creation") == 10 # Verify totals are correct: 100 base + 25 cache_read + 10 cache_creation = 135 assert delta_chunk.usage_metadata["input_tokens"] == 135 assert delta_chunk.usage_metadata["output_tokens"] == 50 assert delta_chunk.usage_metadata["total_tokens"] == 185 def test_strict_tool_use() -> None: model = ChatAnthropic( model="claude-sonnet-4-5", # type: ignore[call-arg] betas=["structured-outputs-2025-11-13"], ) def get_weather(location: str, unit: Literal["C", "F"]) -> str: """Get the weather at a location.""" return "75 degrees Fahrenheit." model_with_tools = model.bind_tools([get_weather], strict=True) tool_definition = model_with_tools.kwargs["tools"][0] # type: ignore[attr-defined] assert tool_definition["strict"] is True def test_profile() -> None: model = ChatAnthropic(model="claude-sonnet-4-20250514") assert model.profile assert not model.profile["structured_output"] model = ChatAnthropic(model="claude-sonnet-4-5") assert model.profile assert model.profile["structured_output"] assert model.profile["tool_calling"] # Test overwriting a field model.profile["tool_calling"] = False assert not model.profile["tool_calling"] # Test we didn't mutate model = ChatAnthropic(model="claude-sonnet-4-5") assert model.profile assert model.profile["tool_calling"] # Test passing in profile model = ChatAnthropic(model="claude-sonnet-4-5", profile={"tool_calling": False}) assert model.profile == {"tool_calling": False} async def test_model_profile_not_blocking() -> None: with blockbuster_ctx(): model = ChatAnthropic(model="claude-sonnet-4-5") _ = model.profile
FakeTracer
python
spack__spack
lib/spack/spack/test/variant.py
{ "start": 11771, "end": 13884 }
class ____: def test_validation(self): a = Variant( "foo", default="", description="", values=("bar", "baz", "foobar"), multi=False ) # Valid vspec, shouldn't raise vspec = a.make_variant("bar") a.validate_or_raise(vspec, "test-package") # Multiple values are not allowed with pytest.raises(MultipleValuesInExclusiveVariantError): vspec.set("bar", "baz") # Inconsistent vspec vspec.name = "FOO" with pytest.raises(InconsistentValidationError): a.validate_or_raise(vspec, "test-package") # Valid multi-value vspec a.multi = True vspec = a.make_variant("bar", "baz") a.validate_or_raise(vspec, "test-package") # Add an invalid value vspec.set("bar", "baz", "barbaz") with pytest.raises(InvalidVariantValueError): a.validate_or_raise(vspec, "test-package") def test_callable_validator(self): def validator(x): try: return isinstance(int(x), numbers.Integral) except ValueError: return False a = Variant("foo", default="1024", description="", values=validator, multi=False) vspec = a.make_default() a.validate_or_raise(vspec, "test-package") vspec.set("2056") a.validate_or_raise(vspec, "test-package") vspec.set("foo") with pytest.raises(InvalidVariantValueError): a.validate_or_raise(vspec, "test-package") def test_representation(self): a = Variant( "foo", default="", description="", values=("bar", "baz", "foobar"), multi=False ) assert a.allowed_values == "bar, baz, foobar" def test_str(self): string = str( Variant( "foo", default="", description="", values=("bar", "baz", "foobar"), multi=False ) ) assert "'foo'" in string assert "default=''" in string assert "description=''" in string assert "values=('foo', 'bar', 'baz') in string"
TestVariant
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 483479, "end": 484185 }
class ____(sgqlc.types.relay.Connection): """The connection type for CWE.""" __schema__ = github_schema __field_names__ = ("edges", "nodes", "page_info", "total_count") edges = sgqlc.types.Field(sgqlc.types.list_of("CWEEdge"), graphql_name="edges") """A list of edges.""" nodes = sgqlc.types.Field(sgqlc.types.list_of("CWE"), graphql_name="nodes") """A list of nodes.""" page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo") """Information to aid in pagination.""" total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount") """Identifies the total count of items in the connection."""
CWEConnection
python
getsentry__sentry
src/sentry/integrations/jira/views/extension_configuration.py
{ "start": 406, "end": 1162 }
class ____(IntegrationExtensionConfigurationView): """ Handle the UI for adding the Jira integration to a Sentry org. """ provider = IntegrationProviderSlug.JIRA.value external_provider_key = IntegrationProviderSlug.JIRA.value def map_params_to_state(self, original_params): # decode the signed params and add them to whatever params we have params = original_params.copy() signed_params = params.pop("signed_params", {}) params.update( unsign( signed_params, max_age=INSTALL_EXPIRATION_TIME, salt=SALT, ) ) params["metadata"] = orjson.loads(params["metadata"]) return params
JiraExtensionConfigurationView
python
lazyprogrammer__machine_learning_examples
hmm_class/hmmd_tf.py
{ "start": 517, "end": 4802 }
class ____: def __init__(self, M): self.M = M # number of hidden states def set_session(self, session): self.session = session def fit(self, X, max_iter=10, print_period=1): # train the HMM model using stochastic gradient descent N = len(X) print("number of train samples:", N) costs = [] for it in range(max_iter): if it % print_period == 0: print("it:", it) for n in range(N): # this would of course be much faster if we didn't do this on # every iteration of the loop c = self.get_cost_multi(X).sum() costs.append(c) self.session.run(self.train_op, feed_dict={self.tfx: X[n]}) plt.plot(costs) plt.show() def get_cost(self, x): # returns log P(x | model) # using the forward part of the forward-backward algorithm # print "getting cost for:", x return self.session.run(self.cost, feed_dict={self.tfx: x}) def log_likelihood(self, x): return -self.session.run(self.cost, feed_dict={self.tfx: x}) def get_cost_multi(self, X): return np.array([self.get_cost(x) for x in X]) def build(self, preSoftmaxPi, preSoftmaxA, preSoftmaxB): M, V = preSoftmaxB.shape self.preSoftmaxPi = tf.Variable(preSoftmaxPi) self.preSoftmaxA = tf.Variable(preSoftmaxA) self.preSoftmaxB = tf.Variable(preSoftmaxB) pi = tf.nn.softmax(self.preSoftmaxPi) A = tf.nn.softmax(self.preSoftmaxA) B = tf.nn.softmax(self.preSoftmaxB) # define cost self.tfx = tf.placeholder(tf.int32, shape=(None,), name='x') def recurrence(old_a_old_s, x_t): old_a = tf.reshape(old_a_old_s[0], (1, M)) a = tf.matmul(old_a, A) * B[:, x_t] a = tf.reshape(a, (M,)) s = tf.reduce_sum(a) return (a / s), s # remember, tensorflow scan is going to loop through # all the values! # we treat the first value differently than the rest # so we only want to loop through tfx[1:] # the first scale being 1 doesn't affect the log-likelihood # because log(1) = 0 alpha, scale = tf.scan( fn=recurrence, elems=self.tfx[1:], initializer=(pi*B[:,self.tfx[0]], np.float32(1.0)), ) self.cost = -tf.reduce_sum(tf.log(scale)) self.train_op = tf.train.AdamOptimizer(1e-2).minimize(self.cost) def init_random(self, V): preSoftmaxPi0 = np.zeros(self.M).astype(np.float32) # initial state distribution preSoftmaxA0 = np.random.randn(self.M, self.M).astype(np.float32) # state transition matrix preSoftmaxB0 = np.random.randn(self.M, V).astype(np.float32) # output distribution self.build(preSoftmaxPi0, preSoftmaxA0, preSoftmaxB0) def set(self, preSoftmaxPi, preSoftmaxA, preSoftmaxB): op1 = self.preSoftmaxPi.assign(preSoftmaxPi) op2 = self.preSoftmaxA.assign(preSoftmaxA) op3 = self.preSoftmaxB.assign(preSoftmaxB) self.session.run([op1, op2, op3]) def fit_coin(): X = [] for line in open('coin_data.txt'): # 1 for H, 0 for T x = [1 if e == 'H' else 0 for e in line.rstrip()] X.append(x) # X = np.array(X).astype(np.int32) hmm = HMM(2) # the entire graph (including optimizer's variables) must be built # before calling global variables initializer! hmm.init_random(2) init = tf.global_variables_initializer() with tf.Session() as session: session.run(init) hmm.set_session(session) hmm.fit(X, max_iter=5) L = hmm.get_cost_multi(X).sum() print("LL with fitted params:", L) # try true values # remember these must be in their "pre-softmax" forms pi = np.log( np.array([0.5, 0.5]) ).astype(np.float32) A = np.log( np.array([[0.1, 0.9], [0.8, 0.2]]) ).astype(np.float32) B = np.log( np.array([[0.6, 0.4], [0.3, 0.7]]) ).astype(np.float32) hmm.set(pi, A, B) L = hmm.get_cost_multi(X).sum() print("LL with true params:", L) if __name__ == '__main__': fit_coin()
HMM
python
coleifer__peewee
tests/sql.py
{ "start": 74554, "end": 76569 }
class ____(BaseTestCase): def test_case_function(self): NameNum = Table('nn', ('name', 'number')) query = (NameNum .select(NameNum.name, Case(NameNum.number, ( (1, 'one'), (2, 'two')), '?').alias('num_str'))) self.assertSQL(query, ( 'SELECT "t1"."name", CASE "t1"."number" ' 'WHEN ? THEN ? ' 'WHEN ? THEN ? ' 'ELSE ? END AS "num_str" ' 'FROM "nn" AS "t1"'), [1, 'one', 2, 'two', '?']) query = (NameNum .select(NameNum.name, Case(None, ( (NameNum.number == 1, 'one'), (NameNum.number == 2, 'two')), '?'))) self.assertSQL(query, ( 'SELECT "t1"."name", CASE ' 'WHEN ("t1"."number" = ?) THEN ? ' 'WHEN ("t1"."number" = ?) THEN ? ' 'ELSE ? END ' 'FROM "nn" AS "t1"'), [1, 'one', 2, 'two', '?']) def test_case_subquery(self): Name = Table('n', ('id', 'name',)) case = Case(None, [(Name.id.in_(Name.select(Name.id)), 1)], 0) q = Name.select(fn.SUM(case)) self.assertSQL(q, ( 'SELECT SUM(' 'CASE WHEN ("t1"."id" IN (SELECT "t1"."id" FROM "n" AS "t1")) ' 'THEN ? ELSE ? END) FROM "n" AS "t1"'), [1, 0]) case = Case(None, [ (Name.id < 5, Name.select(fn.SUM(Name.id))), (Name.id > 5, Name.select(fn.COUNT(Name.name)).distinct())], Name.select(fn.MAX(Name.id))) q = Name.select(Name.name, case.alias('magic')) self.assertSQL(q, ( 'SELECT "t1"."name", CASE ' 'WHEN ("t1"."id" < ?) ' 'THEN (SELECT SUM("t1"."id") FROM "n" AS "t1") ' 'WHEN ("t1"."id" > ?) ' 'THEN (SELECT DISTINCT COUNT("t1"."name") FROM "n" AS "t1") ' 'ELSE (SELECT MAX("t1"."id") FROM "n" AS "t1") END AS "magic" ' 'FROM "n" AS "t1"'), [5, 5])
TestCaseFunction
python
allegroai__clearml
clearml/backend_api/services/v2_20/tasks.py
{ "start": 337411, "end": 338636 }
class ____(Response): """ Response of tasks.get_types endpoint. :param types: Unique list of the task types used in the requested projects :type types: Sequence[str] """ _service = "tasks" _action = "get_types" _version = "2.20" _schema = { "definitions": {}, "properties": { "types": { "description": "Unique list of the task types used in the requested projects", "items": {"type": "string"}, "type": ["array", "null"], } }, "type": "object", } def __init__(self, types: Optional[List[str]] = None, **kwargs: Any) -> None: super(GetTypesResponse, self).__init__(**kwargs) self.types = types @schema_property("types") def types(self) -> Optional[List[str]]: return self._property_types @types.setter def types(self, value: Optional[List[str]]) -> None: if value is None: self._property_types = None return self.assert_isinstance(value, "types", (list, tuple)) self.assert_isinstance(value, "types", six.string_types, is_array=True) self._property_types = value
GetTypesResponse
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/sensors/cloud_formation.py
{ "start": 3030, "end": 5019 }
class ____(AwsBaseSensor[CloudFormationHook]): """ Waits for a stack to be deleted successfully on AWS CloudFormation. .. seealso:: For more information on how to use this sensor, take a look at the guide: :ref:`howto/sensor:CloudFormationDeleteStackSensor` :param stack_name: The name of the stack to wait for (templated) :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether or not to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param botocore_config: Configuration dictionary (key-values) for botocore client. See: https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html """ aws_hook_class = CloudFormationHook template_fields: Sequence[str] = aws_template_fields("stack_name") ui_color = "#C5CAE9" def __init__( self, *, stack_name: str, aws_conn_id: str | None = "aws_default", region_name: str | None = None, **kwargs, ): super().__init__(**kwargs) self.aws_conn_id = aws_conn_id self.region_name = region_name self.stack_name = stack_name def poke(self, context: Context): stack_status = self.hook.get_stack_status(self.stack_name) if stack_status in ("DELETE_COMPLETE", None): return True if stack_status == "DELETE_IN_PROGRESS": return False raise ValueError(f"Stack {self.stack_name} in bad state: {stack_status}")
CloudFormationDeleteStackSensor
python
airbytehq__airbyte
airbyte-integrations/connectors/source-monday/components.py
{ "start": 21660, "end": 22200 }
class ____(RecordTransformation): def transform(self, record: MutableMapping[str, Any], config: Optional[Config] = None, **kwargs) -> MutableMapping[str, Any]: # Oncall issue: https://github.com/airbytehq/oncall/issues/4337 column_values = record.get("column_values", []) for values in column_values: display_value, text = values.get("display_value"), values.get("text") if display_value and not text: values["text"] = display_value return record
MondayTransformation
python
django-haystack__django-haystack
haystack/templatetags/more_like_this.py
{ "start": 178, "end": 3465 }
class ____(template.Node): def __init__(self, model, varname, for_types=None, limit=None): self.model = template.Variable(model) self.varname = varname self.for_types = for_types self.limit = limit if self.limit is not None: self.limit = int(self.limit) def render(self, context): try: model_instance = self.model.resolve(context) sqs = SearchQuerySet() if self.for_types is not None: intermediate = template.Variable(self.for_types) for_types = intermediate.resolve(context).split(",") search_models = [] for model in for_types: model_class = haystack_get_model(*model.split(".")) if model_class: search_models.append(model_class) sqs = sqs.models(*search_models) sqs = sqs.more_like_this(model_instance) if self.limit is not None: sqs = sqs[: self.limit] context[self.varname] = sqs except Exception: logging.exception( "Unhandled exception rendering %r", self, level=logging.WARNING, ) return "" @register.tag def more_like_this(parser, token): """ Fetches similar items from the search index to find content that is similar to the provided model's content. Syntax:: {% more_like_this model_instance as varname [for app_label.model_name,app_label.model_name,...] [limit n] %} Example:: # Pull a full SearchQuerySet (lazy loaded) of similar content. {% more_like_this entry as related_content %} # Pull just the top 5 similar pieces of content. {% more_like_this entry as related_content limit 5 %} # Pull just the top 5 similar entries or comments. {% more_like_this entry as related_content for "blog.entry,comments.comment" limit 5 %} """ bits = token.split_contents() if len(bits) not in (4, 6, 8): raise template.TemplateSyntaxError( "'%s' tag requires either 3, 5 or 7 arguments." % bits[0] ) model = bits[1] if bits[2] != "as": raise template.TemplateSyntaxError( "'%s' tag's second argument should be 'as'." % bits[0] ) varname = bits[3] limit = None for_types = None if len(bits) == 6: if bits[4] != "limit" and bits[4] != "for": raise template.TemplateSyntaxError( "'%s' tag's fourth argument should be either 'limit' or 'for'." % bits[0] ) if bits[4] == "limit": limit = bits[5] else: for_types = bits[5] if len(bits) == 8: if bits[4] != "for": raise template.TemplateSyntaxError( "'%s' tag's fourth argument should be 'for'." % bits[0] ) for_types = bits[5] if bits[6] != "limit": raise template.TemplateSyntaxError( "'%s' tag's sixth argument should be 'limit'." % bits[0] ) limit = bits[7] return MoreLikeThisNode(model, varname, for_types, limit)
MoreLikeThisNode
python
openai__openai-python
src/openai/resources/beta/realtime/transcription_sessions.py
{ "start": 13512, "end": 13811 }
class ____: def __init__(self, transcription_sessions: TranscriptionSessions) -> None: self._transcription_sessions = transcription_sessions self.create = to_streamed_response_wrapper( transcription_sessions.create, )
TranscriptionSessionsWithStreamingResponse
python
dagster-io__dagster
python_modules/dagster/dagster/_core/test_utils.py
{ "start": 14139, "end": 19232 }
class ____(SecretsLoader, ConfigurableClass): def __init__(self, inst_data: Optional[ConfigurableClassData], env_vars: dict[str, str]): self._inst_data = inst_data self.env_vars = env_vars def get_secrets_for_environment(self, location_name: str) -> dict[str, str]: # pyright: ignore[reportIncompatibleMethodOverride] return self.env_vars.copy() @property def inst_data(self) -> Optional[ConfigurableClassData]: return self._inst_data @classmethod def config_type(cls) -> Mapping[str, Any]: return {"env_vars": Field(Permissive())} @classmethod def from_config_value( cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any] ) -> Self: return cls(inst_data=inst_data, **config_value) def get_crash_signals() -> Sequence[Signals]: return [get_terminate_signal()] @contextmanager def in_process_test_workspace( instance: DagsterInstance, loadable_target_origin: LoadableTargetOrigin, container_image: Optional[str] = None, ) -> Iterator[WorkspaceRequestContext]: with WorkspaceProcessContext( instance, InProcessTestWorkspaceLoadTarget( InProcessCodeLocationOrigin( loadable_target_origin, container_image=container_image, ), ), ) as workspace_process_context: yield workspace_process_context.create_request_context() @contextmanager def create_test_daemon_workspace_context( workspace_load_target: WorkspaceLoadTarget, instance: DagsterInstance, ) -> Iterator[WorkspaceProcessContext]: """Creates a DynamicWorkspace suitable for passing into a DagsterDaemon loop when running tests.""" from dagster._daemon.controller import create_daemon_grpc_server_registry configure_loggers() with create_daemon_grpc_server_registry(instance) as grpc_server_registry: with WorkspaceProcessContext( instance, workspace_load_target, grpc_server_registry=grpc_server_registry, ) as workspace_process_context: yield workspace_process_context def load_remote_repo( workspace_context: WorkspaceProcessContext, repo_name: str ) -> RemoteRepository: code_location_entry = next( iter(workspace_context.create_request_context().get_code_location_entries().values()) ) assert code_location_entry.code_location, code_location_entry.load_error return code_location_entry.code_location.get_repository(repo_name) default_resources_for_test = {"io_manager": fs_io_manager} def strip_ansi(input_str: str) -> str: ansi_escape = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]") return ansi_escape.sub("", input_str) def get_logger_output_from_capfd(capfd: Any, logger_name: str) -> str: return "\n".join( [ line for line in strip_ansi(capfd.readouterr().out.replace("\r\n", "\n")).split("\n") if logger_name in line ] ) def _step_events(instance: DagsterInstance, run: DagsterRun) -> Mapping[str, AbstractSet[str]]: events_by_step = defaultdict(set) logs = instance.all_logs(run.run_id) for record in logs: if not record.is_dagster_event or not record.step_key: continue events_by_step[record.step_key].add(record.get_dagster_event().event_type_value) return events_by_step def step_did_not_run(instance: DagsterInstance, run: DagsterRun, step_name: str) -> bool: step_events = _step_events(instance, run)[step_name] return len(step_events) == 0 def step_succeeded(instance: DagsterInstance, run: DagsterRun, step_name: str) -> bool: step_events = _step_events(instance, run)[step_name] return "STEP_SUCCESS" in step_events def step_failed(instance: DagsterInstance, run: DagsterRun, step_name: str) -> bool: step_events = _step_events(instance, run)[step_name] return "STEP_FAILURE" in step_events def test_counter(): @traced async def foo(): pass @traced async def bar(): pass async def call_foo(num): await asyncio.gather(*[foo() for _ in range(num)]) async def call_bar(num): await asyncio.gather(*[bar() for _ in range(num)]) async def run(): await call_foo(10) await call_foo(10) await call_bar(10) traced_counter.set(Counter()) asyncio.run(run()) counter = traced_counter.get() assert isinstance(counter, Counter) counts = counter.counts() assert counts["foo"] == 20 assert counts["bar"] == 10 def wait_for_futures(futures: dict[str, Future], timeout: Optional[float] = None): start_time = time.time() results = {} for target_id, future in futures.copy().items(): if timeout is not None: future_timeout = max(0, timeout - (time.time() - start_time)) else: future_timeout = None if not future.done(): results[target_id] = future.result(timeout=future_timeout) del futures[target_id] return results
TestSecretsLoader
python
huggingface__transformers
tests/repo_utils/test_check_copies.py
{ "start": 2944, "end": 3488 }
class ____: attr_1 = 1 attr_2 = 2 def __init__(self, a=1, b=2): self.a = a self.b = b # Copied from transformers.models.dummy_gpt2.modeling_dummy_gpt2.GPT2DummyModel.forward def forward(self, c): return 1 def existing_common(self, c): return 4 def existing_diff_to_be_ignored(self, c): return 9 """ MOCK_DUMMY_ROBERTA_CODE_MATCH = """ # Copied from transformers.models.dummy_bert_match.modeling_dummy_bert_match.BertDummyModel with BertDummy->RobertaBertDummy
BertDummyModel
python
bokeh__bokeh
src/bokeh/models/mappers.py
{ "start": 2692, "end": 3348 }
class ____(Mapper): ''' Base class for color mapper types. ''' def __init__(self, *args, **kwargs) -> None: if len(args) == 1: kwargs['palette'] = args[0] super().__init__(**kwargs) palette = Seq(Color, help=""" A sequence of colors to use as the target palette for mapping. This property can also be set as a ``String``, to the name of any of the palettes shown in :ref:`bokeh.palettes`. """).accepts(Enum(Palette), lambda pal: getattr(palettes, pal)) nan_color = Color(default="gray", help=""" Color to be used if data is NaN or otherwise not mappable. """) @abstract
ColorMapper
python
encode__django-rest-framework
rest_framework/versioning.py
{ "start": 3321, "end": 5099 }
class ____(BaseVersioning): """ To the client this is the same style as `URLPathVersioning`. The difference is in the backend - this implementation uses Django's URL namespaces to determine the version. An example URL conf that is namespaced into two separate versions # users/urls.py urlpatterns = [ path('/users/', users_list, name='users-list'), path('/users/<int:pk>/', users_detail, name='users-detail') ] # urls.py urlpatterns = [ path('v1/', include('users.urls', namespace='v1')), path('v2/', include('users.urls', namespace='v2')) ] GET /1.0/something/ HTTP/1.1 Host: example.com Accept: application/json """ invalid_version_message = _('Invalid version in URL path. Does not match any version namespace.') def determine_version(self, request, *args, **kwargs): resolver_match = getattr(request, 'resolver_match', None) if resolver_match is None or not resolver_match.namespace: return self.default_version # Allow for possibly nested namespaces. possible_versions = resolver_match.namespace.split(':') for version in possible_versions: if self.is_allowed_version(version): return version raise exceptions.NotFound(self.invalid_version_message) def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra): if request.version is not None: viewname = self.get_versioned_viewname(viewname, request) return super().reverse( viewname, args, kwargs, request, format, **extra ) def get_versioned_viewname(self, viewname, request): return request.version + ':' + viewname
NamespaceVersioning
python
spyder-ide__spyder
spyder/utils/qthelpers.py
{ "start": 24965, "end": 31206 }
class ____(QApplication, SpyderConfigurationAccessor, SpyderFontsMixin): """Subclass with several adjustments for Spyder.""" sig_open_external_file = Signal(str) def __init__(self, *args): QApplication.__init__(self, *args) self._never_shown = True self._has_started = False self._pending_file_open = [] self._original_handlers = {} # This is filled at startup in spyder.app.utils.create_window self._main_window: QMainWindow = None # ---- Qt methods # ------------------------------------------------------------------------- def event(self, event): if sys.platform == 'darwin' and event.type() == QEvent.FileOpen: fname = str(event.file()) if sys.argv and sys.argv[0] == fname: # Ignore requests to open own script # Later, mainwindow.initialize() will set sys.argv[0] to '' pass elif self._has_started: self.sig_open_external_file.emit(fname) else: self._pending_file_open.append(fname) return QApplication.event(self, event) # ---- Public API # ------------------------------------------------------------------------- def set_font(self): """Set font for the entire application.""" # This selects the system font by default if self.get_conf('use_system_font', section='appearance'): family = self.font().family() size = self.font().pointSize() self.set_conf('app_font/family', family, section='appearance') self.set_conf('app_font/size', size, section='appearance') else: family = self.get_conf('app_font/family', section='appearance') size = self.get_conf('app_font/size', section='appearance') app_font = self.font() app_font.setFamily(family) if size > 0: app_font.setPointSize(size) self._set_monospace_interface_font(app_font) self.setFont(app_font) def get_mainwindow_position(self) -> QPoint: """Get main window position.""" return self._main_window.pos() def get_mainwindow_width(self) -> int: """Get main window width.""" return self._main_window.width() def get_mainwindow_height(self) -> int: """Get main window height.""" return self._main_window.height() # ---- Private API # ------------------------------------------------------------------------- def _set_monospace_interface_font(self, app_font): """ Set monospace interface font in our config system according to the app one. """ x_height = QFontMetrics(app_font).xHeight() size = app_font.pointSize() plain_font = self.get_font(SpyderFontType.Monospace) plain_font.setPointSize(size) # Select a size that matches the app font one, so that the UI looks # consistent. attempts = 0 monospace_size = size while ( # Keep going until the xHeight's of both fonts match QFontMetrics(plain_font).xHeight() != x_height # We only check three point sizes above and below the app font to # avoid getting stuck in an infinite loop. and ((size - 4) < monospace_size < (size + 4)) # Do this up to six times to not get stuck in an infinite loop. # Fixes spyder-ide/spyder#22661 and attempts < 6 ): if QFontMetrics(plain_font).xHeight() > x_height: monospace_size -= 1 else: monospace_size += 1 plain_font.setPointSize(monospace_size) attempts += 1 # There are some fonts (e.g. MS Serif) for which it seems that Qt # can't detect their xHeight's as expected. So, we check below # if the monospace font size ends up being too big or too small after # the above xHeight comparison and set it to the interface size if # that's the case. if not ((size - 4) < monospace_size < (size + 4)): monospace_size = size self.set_conf('monospace_app_font/family', plain_font.family(), section='appearance') self.set_conf('monospace_app_font/size', monospace_size, section='appearance') def restore_launchservices(): """Restore LaunchServices to the previous state""" app = QApplication.instance() for key, handler in app._original_handlers.items(): UTI, role = key als.set_UTI_handler(UTI, role, handler) def register_app_launchservices( uniform_type_identifier="public.python-script", role='editor'): """ Register app to the Apple launch services so it can open Python files """ app = QApplication.instance() old_handler = als.get_UTI_handler(uniform_type_identifier, role) app._original_handlers[(uniform_type_identifier, role)] = old_handler # Restore previous handle when quitting app.aboutToQuit.connect(restore_launchservices) if not app._never_shown: bundle_identifier = als.get_bundle_identifier() als.set_UTI_handler( uniform_type_identifier, role, bundle_identifier) return # Wait to be visible to set ourselves as the UTI handler def handle_applicationStateChanged(state): if state == Qt.ApplicationActive and app._never_shown: app._never_shown = False bundle_identifier = als.get_bundle_identifier() als.set_UTI_handler( uniform_type_identifier, role, bundle_identifier) app.applicationStateChanged.connect(handle_applicationStateChanged) def safe_disconnect(signal): """Disconnect a Qt signal, ignoring TypeError.""" try: signal.disconnect() except TypeError: # Raised when no slots are connected to the signal pass def qbytearray_to_str(qba): """Convert QByteArray object to str in a way compatible with Python 3""" return str(bytes(qba.toHex().data()).decode()) if __name__ == "__main__": show_std_icons()
SpyderApplication
python
kamyu104__LeetCode-Solutions
Python/find-two-non-overlapping-sub-arrays-each-with-target-sum.py
{ "start": 29, "end": 761 }
class ____(object): def minSumOfLengths(self, arr, target): """ :type arr: List[int] :type target: int :rtype: int """ prefix, dp = {0: -1}, [0]*len(arr) # dp[i], min len of target subarray until i result = min_len = float("inf") accu = 0 for right in xrange(len(arr)): accu += arr[right] prefix[accu] = right if accu-target in prefix: left = prefix[accu-target] min_len = min(min_len, right-left) if left != -1: result = min(result, dp[left] + (right-left)) dp[right] = min_len return result if result != float("inf") else -1
Solution
python
getsentry__sentry
src/sentry/workflow_engine/migrations/0101_remove_is_single_written_field.py
{ "start": 239, "end": 1562 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = False dependencies = [ ("workflow_engine", "0100_move_is_single_written_to_pending"), ] operations = [ SafeRemoveField( model_name="workflowfirehistory", name="is_single_written", deletion_action=DeletionAction.DELETE, ), ]
Migration
python
sympy__sympy
sympy/sets/ordinals.py
{ "start": 7163, "end": 7612 }
class ____(Ordinal): """The ordinal omega which forms the base of all ordinals in cantor normal form. OrdinalOmega can be imported as ``omega``. Examples ======== >>> from sympy.sets.ordinals import omega >>> omega + omega w*2 """ def __new__(cls): return Ordinal.__new__(cls) @property def terms(self): return (OmegaPower(1, 1),) ord0 = OrdinalZero() omega = OrdinalOmega()
OrdinalOmega
python
google__pytype
pytype/tests/test_stdlib2.py
{ "start": 116, "end": 3809 }
class ____(test_base.BaseTest, test_utils.TestCollectionsMixin): """Tests for files in typeshed/stdlib.""" def test_collections_deque(self): # This method is different from the preceding ones because we model # collections.deque as a subclass, rather than an alias, of typing.Deque. errors = self.CheckWithErrors(""" from typing import Deque import collections def f1(x: Deque): ... def f2(x: int): ... f1(collections.deque()) f2(collections.deque()) # wrong-arg-types[e] """) self.assertErrorRegexes(errors, {"e": r"int.*deque"}) def test_collections_deque_init(self): ty = self.Infer(""" import collections x = collections.deque([1, 2, 3], maxlen=10) """) self.assertTypesMatchPytd( ty, """ import collections x = ... # type: collections.deque[int] """, ) def test_partial(self): self.Check(""" import functools from typing import TypeVar T = TypeVar('T', float, str) def identity(x: T) -> T: return x functools.partial(identity) """) def test_collections_container(self): self._testCollectionsObject("Container", "[]", "42", r"Container.*int") def test_collections_hashable(self): self._testCollectionsObject("Hashable", "42", "[]", r"Hashable.*list") def test_collections_iterable(self): self._testCollectionsObject("Iterable", "[]", "42", r"Iterable.*int") def test_collections_iterator(self): self._testCollectionsObject("Iterator", "iter([])", "42", r"Iterator.*int") def test_collections_sized(self): self._testCollectionsObject("Sized", "[]", "42", r"Sized.*int") def test_collections_callable(self): self._testCollectionsObject("Callable", "list", "42", r"Callable.*int") def test_collections_sequence(self): self._testCollectionsObject("Sequence", "[]", "42", r"Sequence.*int") def test_collections_mutable_sequence(self): self._testCollectionsObject( "MutableSequence", "[]", "42", r"MutableSequence.*int" ) def test_collections_set(self): self._testCollectionsObject("Set", "set()", "42", r"set.*int") def test_collections_mutable_set(self): self._testCollectionsObject("MutableSet", "set()", "42", r"MutableSet.*int") def test_collections_mapping(self): self._testCollectionsObject("Mapping", "{}", "42", r"Mapping.*int") def test_collections_mutable_mapping(self): self._testCollectionsObject( "MutableMapping", "{}", "42", r"MutableMapping.*int" ) def test_tempdir_name(self): self.Check(""" import tempfile def f() -> str: return tempfile.TemporaryDirectory().name """) def test_fraction_subclass(self): ty = self.Infer(""" import fractions class MyClass(fractions.Fraction): pass def foo() -> MyClass: return MyClass(1, 2) """) self.assertTypesMatchPytd( ty, """ import fractions class MyClass(fractions.Fraction): ... def foo() -> MyClass: ... """, ) def test_codetype(self): self.Check(""" import types class Foo: x: types.CodeType def set_x(self): self.x = compile('', '', '') """) def test_os_path_basename(self): self.options.tweak(strict_parameter_checks=False) self.Check(""" import os from typing import Optional x: Optional[str] assert_type(os.path.basename(x), str) """) def test_decimal_round(self): self.Check(""" import decimal x = decimal.Decimal('5.02') assert_type(round(x), int) assert_type(round(x, 1), decimal.Decimal) """)
StdLibTestsBasic
python
kubernetes-client__python
kubernetes/client/api/admissionregistration_api.py
{ "start": 543, "end": 5215 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def get_api_group(self, **kwargs): # noqa: E501 """get_api_group # noqa: E501 get information of a group # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_group(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: V1APIGroup If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.get_api_group_with_http_info(**kwargs) # noqa: E501 def get_api_group_with_http_info(self, **kwargs): # noqa: E501 """get_api_group # noqa: E501 get information of a group # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_group_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_api_group" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/apis/admissionregistration.k8s.io/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIGroup', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
AdmissionregistrationApi
python
coleifer__peewee
tests/sqlite_udf.py
{ "start": 1622, "end": 1809 }
class ____(ModelTestCase): database = database def sql1(self, sql, *params): cursor = self.database.execute_sql(sql, params) return cursor.fetchone()[0]
BaseTestUDF
python
huggingface__transformers
tests/models/paligemma/test_modeling_paligemma.py
{ "start": 1332, "end": 6074 }
class ____: def __init__( self, parent, ignore_index=-100, image_token_index=0, projector_hidden_act="gelu", seq_length=25, vision_feature_select_strategy="default", vision_feature_layer=-1, projection_dim=32, text_config={ "model_type": "gemma", "seq_length": 128, "is_training": True, # "use_input_mask": True, "use_token_type_ids": False, "use_labels": True, "vocab_size": 99, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "num_key_value_heads": 1, "head_dim": 8, "intermediate_size": 37, "hidden_activation": "gelu_pytorch_tanh", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 512, "type_vocab_size": 16, "type_sequence_label_size": 2, "initializer_range": 0.02, "num_labels": 3, "num_choices": 4, "pad_token_id": 1, }, is_training=True, vision_config={ "use_labels": True, "image_size": 20, "patch_size": 5, "num_image_tokens": 4, "num_channels": 3, "is_training": True, "hidden_size": 32, "projection_dim": 32, "num_key_value_heads": 1, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "dropout": 0.1, "attention_dropout": 0.1, "initializer_range": 0.02, }, use_cache=False, ): self.parent = parent self.ignore_index = ignore_index # `image_token_index` is set to 0 to pass "resize_embeddings" test, do not modify self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer self.text_config = text_config self.vision_config = vision_config self.seq_length = seq_length self.projection_dim = projection_dim self.pad_token_id = text_config["pad_token_id"] self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] self.is_training = is_training self.batch_size = 3 self.num_channels = vision_config["num_channels"] self.image_size = vision_config["image_size"] self.encoder_seq_length = seq_length self.use_cache = use_cache def get_config(self): return PaliGemmaConfig( text_config=self.text_config, vision_config=self.vision_config, ignore_index=self.ignore_index, image_token_index=self.image_token_index, projector_hidden_act=self.projector_hidden_act, projection_dim=self.projection_dim, vision_feature_select_strategy=self.vision_feature_select_strategy, vision_feature_layer=self.vision_feature_layer, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [ self.batch_size, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1 attention_mask = input_ids.ne(self.pad_token_id).to(torch_device) # set the 16 first tokens to be image, and ensure that no other tokens are image tokens # do not change this unless you modified image size or patch size input_ids[input_ids == config.image_token_index] = self.pad_token_id input_ids[:, :16] = config.image_token_index inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "labels": input_ids, "token_type_ids": torch.zeros_like(input_ids), } return config, inputs_dict @require_torch
PaliGemmaVisionText2TextModelTester
python
huggingface__transformers
src/transformers/models/sam2/configuration_sam2.py
{ "start": 11236, "end": 13323 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Sam2PromptEncoder`]. The [`Sam2PromptEncoder`] module is used to encode the input 2D points and bounding boxes. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden states. image_size (`int`, *optional*, defaults to 1024): The expected output resolution of the image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. mask_input_channels (`int`, *optional*, defaults to 16): The number of channels to be fed to the `MaskDecoder` module. num_point_embeddings (`int`, *optional*, defaults to 4): The number of point embeddings to be used. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the encoder and pooler. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. scale (`float`, *optional*, defaults to 1): The scale factor for the prompt encoder. """ base_config_key = "prompt_encoder_config" def __init__( self, hidden_size=256, image_size=1024, patch_size=16, mask_input_channels=16, num_point_embeddings=4, hidden_act="gelu", layer_norm_eps=1e-6, scale=1, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.image_size = image_size self.patch_size = patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.scale = scale
Sam2PromptEncoderConfig
python
PrefectHQ__prefect
src/prefect/server/database/orm_models.py
{ "start": 32840, "end": 33292 }
class ____(Base): active: Mapped[bool] = mapped_column(default=True) name: Mapped[str] limit: Mapped[int] active_slots: Mapped[int] = mapped_column(default=0) denied_slots: Mapped[int] = mapped_column(default=0) slot_decay_per_second: Mapped[float] = mapped_column(default=0.0) avg_slot_occupancy_seconds: Mapped[float] = mapped_column(default=2.0) __table_args__: Any = (sa.UniqueConstraint("name"),)
ConcurrencyLimitV2
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/roles.py
{ "start": 5227, "end": 5452 }
class ____(FromClauseRole): __slots__ = () if TYPE_CHECKING: def _anonymous_fromclause( self, *, name: Optional[str] = None, flat: bool = False ) -> FromClause: ...
AnonymizedFromClauseRole
python
huggingface__transformers
tests/models/owlv2/test_modeling_owlv2.py
{ "start": 4720, "end": 7647 }
class ____(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as OWLV2 does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (Owlv2VisionModel,) if is_torch_available() else () test_resize_embeddings = False def setUp(self): self.model_tester = Owlv2VisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Owlv2VisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="OWLV2 does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="OwlV2 does not support training yet") def test_training(self): pass @unittest.skip(reason="OwlV2 does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): model_name = "google/owlv2-base-patch16-ensemble" model = Owlv2VisionModel.from_pretrained(model_name) self.assertIsNotNone(model) # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTTextModelTester with OwlViT->Owlv2
Owlv2VisionModelTest
python
urllib3__urllib3
src/urllib3/contrib/emscripten/request.py
{ "start": 134, "end": 566 }
class ____: method: str url: str params: dict[str, str] | None = None body: _TYPE_BODY | None = None headers: dict[str, str] = field(default_factory=dict) timeout: float = 0 decode_content: bool = True def set_header(self, name: str, value: str) -> None: self.headers[name.capitalize()] = value def set_body(self, body: _TYPE_BODY | None) -> None: self.body = body
EmscriptenRequest
python
run-llama__llama_index
llama-index-integrations/storage/index_store/llama-index-storage-index-store-redis/llama_index/storage/index_store/redis/base.py
{ "start": 216, "end": 1740 }
class ____(KVIndexStore): """ Redis Index store. Args: redis_kvstore (RedisKVStore): Redis key-value store namespace (str): namespace for the index store """ def __init__( self, redis_kvstore: RedisKVStore, namespace: Optional[str] = None, collection_suffix: Optional[str] = None, ) -> None: """Init a RedisIndexStore.""" super().__init__( redis_kvstore, namespace=namespace, collection_suffix=collection_suffix ) # avoid conflicts with redis docstore if self._collection.endswith(DEFAULT_COLLECTION_SUFFIX): self._collection = f"{self._namespace}/index" @classmethod def from_redis_client( cls, redis_client: Any, namespace: Optional[str] = None, collection_suffix: Optional[str] = None, ) -> "RedisIndexStore": """Load a RedisIndexStore from a Redis Client.""" redis_kvstore = RedisKVStore.from_redis_client(redis_client=redis_client) return cls(redis_kvstore, namespace, collection_suffix) @classmethod def from_host_and_port( cls, host: str, port: int, namespace: Optional[str] = None, collection_suffix: Optional[str] = None, ) -> "RedisIndexStore": """Load a RedisIndexStore from a Redis host and port.""" redis_kvstore = RedisKVStore.from_host_and_port(host, port) return cls(redis_kvstore, namespace, collection_suffix)
RedisIndexStore
python
numba__numba
numba/core/errors.py
{ "start": 17949, "end": 18087 }
class ____(IRError): """ An error occurred during interpretation of IR due to variable redefinition. """ pass
RedefinedError
python
kamyu104__LeetCode-Solutions
Python/index-pairs-of-a-string.py
{ "start": 2247, "end": 2727 }
class ____(object): def indexPairs(self, text, words): """ :type text: str :type words: List[str] :rtype: List[List[int]] """ result = [] reversed_words = [w[::-1] for w in words] trie = AhoTrie(reversed_words) for i in reversed(xrange(len(text))): for j in trie.step(text[i]): result.append([i, i+len(reversed_words[j])-1]) result.reverse() return result
Solution
python
wandb__wandb
wandb/apis/importers/wandb.py
{ "start": 2738, "end": 10533 }
class ____: def __init__( self, run: Run, *, src_base_url: str, src_api_key: str, dst_base_url: str, dst_api_key: str, ) -> None: self.run = run self.api = wandb.Api( api_key=src_api_key, overrides={"base_url": src_base_url}, ) self.dst_api = wandb.Api( api_key=dst_api_key, overrides={"base_url": dst_base_url}, ) # For caching self._files: Optional[Iterable[Tuple[str, str]]] = None self._artifacts: Optional[Iterable[Artifact]] = None self._used_artifacts: Optional[Iterable[Artifact]] = None self._parquet_history_paths: Optional[Iterable[str]] = None def __repr__(self) -> str: s = os.path.join(self.entity(), self.project(), self.run_id()) return f"WandbRun({s})" def run_id(self) -> str: return self.run.id def entity(self) -> str: return self.run.entity def project(self) -> str: return self.run.project def config(self) -> Dict[str, Any]: return self.run.config def summary(self) -> Dict[str, float]: s = self.run.summary return s def metrics(self) -> Iterable[Dict[str, float]]: if self._parquet_history_paths is None: self._parquet_history_paths = list(self._get_parquet_history_paths()) if self._parquet_history_paths: rows = self._get_rows_from_parquet_history_paths() else: logger.warning( "No parquet files detected; using scan history (this may not be reliable)" ) rows = self.run.scan_history() for row in rows: row = remove_keys_with_none_values(row) yield row def run_group(self) -> Optional[str]: return self.run.group def job_type(self) -> Optional[str]: return self.run.job_type def display_name(self) -> str: return self.run.display_name def notes(self) -> Optional[str]: # Notes includes the previous notes and serves as a catch-all for things we missed or can't add back previous_link = f"Imported from: {self.run.url}" previous_author = f"Author: {self.run.user.username}" header = [previous_link, previous_author] previous_notes = self.run.notes or "" return "\n".join(header) + "\n---\n" + previous_notes def tags(self) -> Optional[List[str]]: return self.run.tags def artifacts(self) -> Optional[Iterable[Artifact]]: if self._artifacts is None: _artifacts = [] for art in self.run.logged_artifacts(): a = _clone_art(art) _artifacts.append(a) self._artifacts = _artifacts yield from self._artifacts def used_artifacts(self) -> Optional[Iterable[Artifact]]: if self._used_artifacts is None: _used_artifacts = [] for art in self.run.used_artifacts(): a = _clone_art(art) _used_artifacts.append(a) self._used_artifacts = _used_artifacts yield from self._used_artifacts def os_version(self) -> Optional[str]: ... # pragma: no cover def python_version(self) -> Optional[str]: return self._metadata_file().get("python") def cuda_version(self) -> Optional[str]: ... # pragma: no cover def program(self) -> Optional[str]: ... # pragma: no cover def host(self) -> Optional[str]: return self._metadata_file().get("host") def username(self) -> Optional[str]: ... # pragma: no cover def executable(self) -> Optional[str]: ... # pragma: no cover def gpus_used(self) -> Optional[str]: ... # pragma: no cover def cpus_used(self) -> Optional[int]: # can we get the model? ... # pragma: no cover def memory_used(self) -> Optional[int]: ... # pragma: no cover def runtime(self) -> Optional[int]: wandb_runtime = self.run.summary.get("_wandb", {}).get("runtime") base_runtime = self.run.summary.get("_runtime") if (t := coalesce(wandb_runtime, base_runtime)) is None: return t return int(t) def start_time(self) -> Optional[int]: t = dt.fromisoformat(self.run.created_at).timestamp() * 1000 return int(t) def code_path(self) -> Optional[str]: path = self._metadata_file().get("codePath", "") return f"code/{path}" def cli_version(self) -> Optional[str]: return self._config_file().get("_wandb", {}).get("value", {}).get("cli_version") def files(self) -> Optional[Iterable[Tuple[PathStr, Policy]]]: if self._files is None: files_dir = f"{internal.ROOT_DIR}/{self.run_id()}/files" _files = [] for f in self.run.files(): f: File # These optimizations are intended to avoid rate limiting when importing many runs in parallel # Don't carry over empty files if f.size == 0: continue # Skip deadlist to avoid overloading S3 if "wandb_manifest.json.deadlist" in f.name: continue result = f.download(files_dir, exist_ok=True, api=self.api) file_and_policy = (result.name, "end") _files.append(file_and_policy) self._files = _files yield from self._files def logs(self) -> Optional[Iterable[str]]: log_files = self._find_all_in_files_regex(r"^.*output\.log$") for path in log_files: with open(path) as f: yield from f.readlines() def _metadata_file(self) -> Dict[str, Any]: if (fname := self._find_in_files("wandb-metadata.json")) is None: return {} with open(fname) as f: return json.loads(f.read()) def _config_file(self) -> Dict[str, Any]: if (fname := self._find_in_files("config.yaml")) is None: return {} with open(fname) as f: return yaml.safe_load(f) or {} def _get_rows_from_parquet_history_paths(self) -> Iterable[Dict[str, Any]]: # Unfortunately, it's not feasible to validate non-parquet history if not (paths := self._get_parquet_history_paths()): yield {} return # Collect and merge parquet history dfs = [ pl.read_parquet(p) for path in paths for p in Path(path).glob("*.parquet") ] if "_step" in (df := _merge_dfs(dfs)): df = df.with_columns(pl.col("_step").cast(pl.Int64)) yield from df.iter_rows(named=True) def _get_parquet_history_paths(self) -> Iterable[str]: if self._parquet_history_paths is None: paths = [] # self.artifacts() returns a copy of the artifacts; use this to get raw for art in self.run.logged_artifacts(): if art.type != "wandb-history": continue if ( path := _download_art(art, root=f"{SRC_ART_PATH}/{art.name}") ) is None: continue paths.append(path) self._parquet_history_paths = paths yield from self._parquet_history_paths def _find_in_files(self, name: str) -> Optional[str]: if files := self.files(): for path, _ in files: if name in path: return path return None def _find_all_in_files_regex(self, regex: str) -> Iterable[str]: if files := self.files(): for path, _ in files: if re.match(regex, path): yield path
WandbRun
python
jmcnamara__XlsxWriter
xlsxwriter/test/sharedstrings/test_write_si.py
{ "start": 309, "end": 794 }
class ____(unittest.TestCase): """ Test the SharedStrings _write_si() method. """ def setUp(self): self.fh = StringIO() self.sharedstrings = SharedStrings() self.sharedstrings._set_filehandle(self.fh) def test_write_si(self): """Test the _write_si() method""" self.sharedstrings._write_si("neptune") exp = """<si><t>neptune</t></si>""" got = self.fh.getvalue() self.assertEqual(exp, got)
TestWriteSi
python
apache__airflow
helm-tests/tests/helm_tests/other/test_git_sync_triggerer.py
{ "start": 900, "end": 4948 }
class ____: """Test git sync triggerer.""" def test_validate_sshkeysecret_not_added_when_persistence_is_enabled(self): docs = render_chart( values={ "dags": { "gitSync": { "enabled": True, "containerName": "git-sync-test", "sshKeySecret": "ssh-secret", "knownHosts": None, "branch": "test-branch", }, "persistence": {"enabled": True}, } }, show_only=["templates/triggerer/triggerer-deployment.yaml"], ) assert "git-sync-ssh-key" not in jmespath.search("spec.template.spec.volumes[].name", docs[0]) def test_validate_if_ssh_params_are_added_with_git_ssh_key(self): docs = render_chart( values={ "dags": { "gitSync": { "enabled": True, "sshKey": "dummy-ssh-key", } } }, show_only=["templates/triggerer/triggerer-deployment.yaml"], ) assert {"name": "GIT_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert {"name": "GITSYNC_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert {"name": "GIT_SYNC_SSH", "value": "true"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert {"name": "GITSYNC_SSH", "value": "true"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert {"name": "GIT_KNOWN_HOSTS", "value": "false"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert {"name": "GITSYNC_SSH_KNOWN_HOSTS", "value": "false"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert { "name": "git-sync-ssh-key", "secret": {"secretName": "release-name-ssh-secret", "defaultMode": 288}, } in jmespath.search("spec.template.spec.volumes", docs[0]) def test_liveliness_and_readiness_probes_are_configurable(self): livenessProbe = { "failureThreshold": 10, "exec": {"command": ["/bin/true"]}, "initialDelaySeconds": 0, "periodSeconds": 1, "successThreshold": 1, "timeoutSeconds": 5, } readinessProbe = { "failureThreshold": 10, "exec": {"command": ["/bin/true"]}, "initialDelaySeconds": 0, "periodSeconds": 1, "successThreshold": 1, "timeoutSeconds": 5, } docs = render_chart( values={ "dags": { "gitSync": { "enabled": True, "livenessProbe": livenessProbe, "readinessProbe": readinessProbe, }, } }, show_only=["templates/triggerer/triggerer-deployment.yaml"], ) container_search_result = jmespath.search( "spec.template.spec.containers[?name == 'git-sync']", docs[0] ) init_container_search_result = jmespath.search( "spec.template.spec.initContainers[?name == 'git-sync-init']", docs[0] ) assert "livenessProbe" in container_search_result[0] assert "readinessProbe" in container_search_result[0] assert "readinessProbe" not in init_container_search_result[0] assert "readinessProbe" not in init_container_search_result[0] assert livenessProbe == container_search_result[0]["livenessProbe"] assert readinessProbe == container_search_result[0]["readinessProbe"]
TestGitSyncTriggerer
python
getsentry__sentry
src/sentry/models/releasefile.py
{ "start": 7739, "end": 13981 }
class ____: """Ensures atomic write operations to the artifact index""" def __init__(self, release: Release, dist: Distribution | None, **filter_args): self._release = release self._dist = dist self._ident = ReleaseFile.get_ident(ARTIFACT_INDEX_FILENAME, dist and dist.name) self._filter_args = filter_args # Extra constraints on artifact index release file def readable_data(self) -> dict | None: """Simple read, no synchronization necessary""" try: releasefile = self._releasefile_qs()[0] except IndexError: return None else: fp = releasefile.file.getfile() with fp: return json.load(fp) @contextmanager def writable_data(self, create: bool, initial_artifact_count=None): """Context manager for editable artifact index""" with atomic_transaction( using=( router.db_for_write(ReleaseFile), router.db_for_write(File), ) ): created = False if create: releasefile, created = self._get_or_create_releasefile(initial_artifact_count) else: # Lock the row for editing: # NOTE: Do not select_related('file') here, because we do not # want to lock the File table qs = self._releasefile_qs().select_for_update() try: releasefile = qs[0] except IndexError: releasefile = None if releasefile is None: index_data = None else: if created: index_data = _ArtifactIndexData({}, fresh=True) else: source_file = releasefile.file if source_file.type != ARTIFACT_INDEX_TYPE: raise RuntimeError("Unexpected file type for artifact index") raw_data = json.load(source_file.getfile()) index_data = _ArtifactIndexData(raw_data) yield index_data # editable reference to index if index_data is not None and index_data.changed: if created: target_file = releasefile.file else: target_file = File.objects.create( name=ARTIFACT_INDEX_FILENAME, type=ARTIFACT_INDEX_TYPE ) target_file.putfile(BytesIO(json.dumps(index_data.data).encode())) artifact_count = index_data.num_files if not created: # Update and clean existing old_file = releasefile.file releasefile.update(file=target_file, artifact_count=artifact_count) old_file.delete() def _get_or_create_releasefile(self, initial_artifact_count): """Make sure that the release file exists""" return ReleaseFile.objects.select_for_update().get_or_create( **self._key_fields(), defaults={ "artifact_count": initial_artifact_count, "file": lambda: File.objects.create( name=ARTIFACT_INDEX_FILENAME, type=ARTIFACT_INDEX_TYPE, ), }, ) def _releasefile_qs(self): """QuerySet for selecting artifact index""" return ReleaseFile.objects.filter(**self._key_fields(), **self._filter_args) def _key_fields(self): """Columns needed to identify the artifact index in the db""" return dict( organization_id=self._release.organization_id, release_id=self._release.id, dist_id=self._dist.id if self._dist else self._dist, name=ARTIFACT_INDEX_FILENAME, ident=self._ident, ) @sentry_sdk.tracing.trace def read_artifact_index(release: Release, dist: Distribution | None, **filter_args) -> dict | None: """Get index data""" guard = _ArtifactIndexGuard(release, dist, **filter_args) return guard.readable_data() def _compute_sha1(archive: ReleaseArchive, url: str) -> str: data = archive.read(url) return sha1(data).hexdigest() @sentry_sdk.tracing.trace def update_artifact_index( release: Release, dist: Distribution | None, archive_file: File, temp_file: IO | None = None, ): """Add information from release archive to artifact index :returns: The created ReleaseFile instance """ releasefile = ReleaseFile.objects.create( name=archive_file.name, release_id=release.id, organization_id=release.organization_id, dist_id=dist.id if dist is not None else dist, file=archive_file, artifact_count=0, # Artifacts will be counted with artifact index ) files_out = {} with ReleaseArchive(temp_file or archive_file.getfile()) as archive: manifest = archive.manifest files = manifest.get("files", {}) if not files: return for filename, info in files.items(): info = info.copy() url = info.pop("url") info["filename"] = filename info["archive_ident"] = releasefile.ident info["date_created"] = archive_file.timestamp info["sha1"] = _compute_sha1(archive, filename) info["size"] = archive.info(filename).file_size files_out[url] = info guard = _ArtifactIndexGuard(release, dist) with guard.writable_data(create=True, initial_artifact_count=len(files_out)) as index_data: index_data.update_files(files_out) return releasefile @sentry_sdk.tracing.trace def delete_from_artifact_index(release: Release, dist: Distribution | None, url: str) -> bool: """Delete the file with the given url from the manifest. Does *not* delete the file from the zip archive. :returns: True if deleted """ guard = _ArtifactIndexGuard(release, dist) with guard.writable_data(create=False) as index_data: if index_data is not None: return index_data.delete(url) return False
_ArtifactIndexGuard
python
huggingface__transformers
tests/models/dbrx/test_modeling_dbrx.py
{ "start": 3560, "end": 4366 }
class ____(unittest.TestCase): @slow def test_tiny_model_logits(self): model = DbrxForCausalLM.from_pretrained("Rocketknight1/dbrx-tiny-random") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] vocab_size = model.vocab_size expected_shape = torch.Size((1, 6, vocab_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [ [ [-1.6300e-04, 5.0118e-04, 2.5437e-04], [2.0422e-05, 2.7210e-04, -1.5125e-04], [-1.5105e-04, 4.6879e-04, 3.3309e-04], ] ] ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
DbrxModelIntegrationTest
python
kubernetes-client__python
kubernetes/client/models/v1_namespace_spec.py
{ "start": 383, "end": 3826 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'finalizers': 'list[str]' } attribute_map = { 'finalizers': 'finalizers' } def __init__(self, finalizers=None, local_vars_configuration=None): # noqa: E501 """V1NamespaceSpec - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._finalizers = None self.discriminator = None if finalizers is not None: self.finalizers = finalizers @property def finalizers(self): """Gets the finalizers of this V1NamespaceSpec. # noqa: E501 Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ # noqa: E501 :return: The finalizers of this V1NamespaceSpec. # noqa: E501 :rtype: list[str] """ return self._finalizers @finalizers.setter def finalizers(self, finalizers): """Sets the finalizers of this V1NamespaceSpec. Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ # noqa: E501 :param finalizers: The finalizers of this V1NamespaceSpec. # noqa: E501 :type: list[str] """ self._finalizers = finalizers def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1NamespaceSpec): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1NamespaceSpec): return True return self.to_dict() != other.to_dict()
V1NamespaceSpec
python
kamyu104__LeetCode-Solutions
Python/best-sightseeing-pair.py
{ "start": 29, "end": 311 }
class ____(object): def maxScoreSightseeingPair(self, A): """ :type A: List[int] :rtype: int """ result, curr = 0, 0 for x in A: result = max(result, curr+x) curr = max(curr, x)-1 return result
Solution
python
great-expectations__great_expectations
great_expectations/data_context/data_context_variables.py
{ "start": 2087, "end": 8055 }
class ____(ABC): """ Wrapper object around data context variables set in the `great_expectations.yml` config file. Child classes should instantiate their own stores to ensure that changes made to this object are persisted for future usage (i.e. filesystem I/O or HTTP request to a Cloud endpoint). Should maintain parity with the `DataContextConfig`. Args: config: A reference to the DataContextConfig to perform CRUD on. config_provider: Responsible for determining config values and substituting them in GET calls. _store: An instance of a DataContextStore with the appropriate backend to persist config changes. """ # noqa: E501 # FIXME CoP config: DataContextConfig config_provider: _ConfigurationProvider _store: Optional[DataContextStore] = None @override def __str__(self) -> str: return str(self.config) @override def __repr__(self) -> str: return repr(self.config) @property def store(self) -> DataContextStore: if self._store is None: self._store = self._init_store() return self._store @abstractmethod def _init_store(self) -> DataContextStore: raise NotImplementedError def get_key(self) -> DataContextKey: """ Generates the appropriate Store key to retrieve/store configs. """ key = ConfigurationIdentifier(configuration_key=DataContextVariableSchema.ALL_VARIABLES) return key def _set(self, attr: DataContextVariableSchema, value: Any) -> None: key: str = attr.value self.config[key] = value def _get(self, attr: DataContextVariableSchema) -> Any: key: str = attr.value val: Any = self.config[key] substituted_val: Any = self.config_provider.substitute_config(val) return substituted_val @public_api def save(self) -> Any: """ Persist any changes made to variables utilizing the configured Store. """ key: ConfigurationIdentifier = self.get_key() # type: ignore[assignment] # FIXME CoP return self.store.set(key=key, value=self.config) @property def config_version(self) -> Optional[float]: return self._get(DataContextVariableSchema.CONFIG_VERSION) @config_version.setter def config_version(self, config_version: float) -> None: self._set(DataContextVariableSchema.CONFIG_VERSION, config_version) @property def config_variables_file_path(self) -> Optional[str]: return self._get(DataContextVariableSchema.CONFIG_VARIABLES_FILE_PATH) @config_variables_file_path.setter def config_variables_file_path(self, config_variables_file_path: str) -> None: self._set( DataContextVariableSchema.CONFIG_VARIABLES_FILE_PATH, config_variables_file_path, ) @property def plugins_directory(self) -> Optional[str]: return self._get(DataContextVariableSchema.PLUGINS_DIRECTORY) @plugins_directory.setter def plugins_directory(self, plugins_directory: str) -> None: self._set(DataContextVariableSchema.PLUGINS_DIRECTORY, plugins_directory) @property def expectations_store_name(self) -> Optional[str]: return self._get(DataContextVariableSchema.EXPECTATIONS_STORE_NAME) @expectations_store_name.setter def expectations_store_name(self, expectations_store_name: str) -> None: self._set(DataContextVariableSchema.EXPECTATIONS_STORE_NAME, expectations_store_name) @property def validation_results_store_name(self) -> Optional[str]: return self._get(DataContextVariableSchema.VALIDATIONS_STORE_NAME) @validation_results_store_name.setter def validation_results_store_name(self, validation_results_store_name: str) -> None: self._set(DataContextVariableSchema.VALIDATIONS_STORE_NAME, validation_results_store_name) @property def checkpoint_store_name(self) -> Optional[str]: return self._get(DataContextVariableSchema.CHECKPOINT_STORE_NAME) @checkpoint_store_name.setter def checkpoint_store_name(self, checkpoint_store_name: str) -> None: self._set( DataContextVariableSchema.CHECKPOINT_STORE_NAME, checkpoint_store_name, ) @property def stores(self) -> Optional[dict]: return self._get(DataContextVariableSchema.STORES) @stores.setter def stores(self, stores: dict) -> None: self._set(DataContextVariableSchema.STORES, stores) @property def data_docs_sites(self) -> Optional[dict]: return self._get(DataContextVariableSchema.DATA_DOCS_SITES) @data_docs_sites.setter def data_docs_sites(self, data_docs_sites: dict) -> None: self._set(DataContextVariableSchema.DATA_DOCS_SITES, data_docs_sites) @property def analytics_enabled( self, ) -> Optional[bool]: return self._get(DataContextVariableSchema.ANALYTICS_ENABLED) @analytics_enabled.setter def analytics_enabled(self, analytics_enabled: bool) -> None: self._set( DataContextVariableSchema.ANALYTICS_ENABLED, analytics_enabled, ) @property def data_context_id( self, ) -> Optional[uuid.UUID]: return self._get(DataContextVariableSchema.DATA_CONTEXT_ID) @data_context_id.setter def data_context_id(self, data_context_id: uuid.UUID) -> None: self._set( DataContextVariableSchema.DATA_CONTEXT_ID, data_context_id, ) @property def progress_bars(self) -> Optional[ProgressBarsConfig]: return self._get(DataContextVariableSchema.PROGRESS_BARS) @progress_bars.setter def progress_bars(self, progress_bars: ProgressBarsConfig) -> None: self._set( DataContextVariableSchema.PROGRESS_BARS, progress_bars, ) @dataclass(repr=False)
DataContextVariables
python
ray-project__ray
release/ray_release/exception.py
{ "start": 3332, "end": 3417 }
class ____(CommandTimeout): exit_code = ExitCode.COMMAND_TIMEOUT
TestCommandTimeout
python
viewflow__viewflow
viewflow/workflow/context.py
{ "start": 56, "end": 1540 }
class ____(object): """Thread-local activation context, dynamically scoped. :keyword propagate_exception: If True, on activation failure exception will be propagated to previous activation. If False, current task activation will be marked as failed. Usage :: with Context(propagate_exception=False): print(context.propagate_exception) # prints 'False' print(context.propagate_exception) # prints default 'True' """ def __init__(self, default=None, **kwargs): # noqa D102 self.default = default self.current_context_data = kwargs def __getattr__(self, name): stack = [] if hasattr(_context_stack, "data"): stack = _context_stack.data for scope in reversed(stack): if name in scope: return scope[name] if name in self.default: return self.default[name] raise AttributeError(name) def __enter__(self): if not hasattr(_context_stack, "data"): _context_stack.data = [] _context_stack.data.append(self.current_context_data) def __exit__(self, t, v, tb): _context_stack.data.pop() @staticmethod def create(**kwargs): # noqa D102 return Context(default=kwargs) context = Context.create(propagate_exception=True)
Context
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/waiters/test_batch.py
{ "start": 998, "end": 3007 }
class ____: JOB_ID = "test_job_id" @pytest.fixture(autouse=True) def _setup_test_cases(self, monkeypatch): self.client = boto3.client("batch", region_name="eu-west-3") monkeypatch.setattr(BatchClientHook, "conn", self.client) @pytest.fixture def mock_describe_jobs(self): """Mock ``BatchClientHook.Client.describe_jobs`` method.""" with mock.patch.object(self.client, "describe_jobs") as m: yield m def test_service_waiters(self): hook_waiters = BatchClientHook(aws_conn_id=None).list_waiters() assert "batch_job_complete" in hook_waiters @staticmethod def describe_jobs(status: str): """ Helper function for generate minimal DescribeJobs response for a single job. https://docs.aws.amazon.com/batch/latest/APIReference/API_DescribeJobs.html """ return { "jobs": [ { "status": status, }, ], } def test_job_succeeded(self, mock_describe_jobs): """Test job succeeded""" mock_describe_jobs.side_effect = [ self.describe_jobs(BatchClientHook.RUNNING_STATE), self.describe_jobs(BatchClientHook.SUCCESS_STATE), ] waiter = BatchClientHook(aws_conn_id=None).get_waiter("batch_job_complete") waiter.wait(jobs=[self.JOB_ID], WaiterConfig={"Delay": 0.01, "MaxAttempts": 2}) def test_job_failed(self, mock_describe_jobs): """Test job failed""" mock_describe_jobs.side_effect = [ self.describe_jobs(BatchClientHook.RUNNING_STATE), self.describe_jobs(BatchClientHook.FAILURE_STATE), ] waiter = BatchClientHook(aws_conn_id=None).get_waiter("batch_job_complete") with pytest.raises(WaiterError, match="Waiter encountered a terminal failure state"): waiter.wait(jobs=[self.JOB_ID], WaiterConfig={"Delay": 0.01, "MaxAttempts": 2})
TestCustomBatchServiceWaiters
python
davidhalter__jedi
test/test_inference/test_signature.py
{ "start": 1371, "end": 1726 }
class ____: @classmethod def x(cls, a, b): pass @staticmethod def static(a, b): pass ''' partial_code = ''' import functools def func(a, b, c): pass a = functools.partial(func) b = functools.partial(func, 1) c = functools.partial(func, 1, c=2) d = functools.partial() ''' partialmethod_code = ''' import functools
X
python
protocolbuffers__protobuf
python/google/protobuf/internal/well_known_types_test.py
{ "start": 37362, "end": 41626 }
class ____(unittest.TestCase): def testAnyMessage(self): # Creates and sets message. msg = well_known_types_test_pb2.TestAny() msg_descriptor = msg.DESCRIPTOR all_types = unittest_pb2.TestAllTypes() all_descriptor = all_types.DESCRIPTOR all_types.repeated_string.append('\u00fc\ua71f') # Packs to Any. msg.value.Pack(all_types) self.assertEqual( msg.value.type_url, 'type.googleapis.com/%s' % all_descriptor.full_name ) self.assertEqual(msg.value.value, all_types.SerializeToString()) # Tests Is() method. self.assertTrue(msg.value.Is(all_descriptor)) self.assertFalse(msg.value.Is(msg_descriptor)) # Unpacks Any. unpacked_message = unittest_pb2.TestAllTypes() self.assertTrue(msg.value.Unpack(unpacked_message)) self.assertEqual(all_types, unpacked_message) # Unpacks to different type. self.assertFalse(msg.value.Unpack(msg)) # Only Any messages have Pack method. try: msg.Pack(all_types) except AttributeError: pass else: raise AttributeError( '%s should not have Pack method.' % msg_descriptor.full_name ) def testUnpackWithNoSlashInTypeUrl(self): msg = well_known_types_test_pb2.TestAny() all_types = unittest_pb2.TestAllTypes() all_descriptor = all_types.DESCRIPTOR msg.value.Pack(all_types) # Reset type_url to part of type_url after '/' msg.value.type_url = msg.value.TypeName() self.assertFalse(msg.value.Is(all_descriptor)) unpacked_message = unittest_pb2.TestAllTypes() self.assertFalse(msg.value.Unpack(unpacked_message)) def testMessageName(self): # Creates and sets message. submessage = well_known_types_test_pb2.TestAny() submessage.int_value = 12345 msg = any_pb2.Any() msg.Pack(submessage) self.assertEqual(msg.TypeName(), 'google.protobuf.internal.TestAny') def testPackWithCustomTypeUrl(self): submessage = well_known_types_test_pb2.TestAny() submessage.int_value = 12345 msg = any_pb2.Any() # Pack with a custom type URL prefix. msg.Pack(submessage, 'type.myservice.com') self.assertEqual( msg.type_url, 'type.myservice.com/%s' % submessage.DESCRIPTOR.full_name ) # Pack with a custom type URL prefix ending with '/'. msg.Pack(submessage, 'type.myservice.com/') self.assertEqual( msg.type_url, 'type.myservice.com/%s' % submessage.DESCRIPTOR.full_name ) # Pack with an empty type URL prefix. msg.Pack(submessage, '') self.assertEqual(msg.type_url, '/%s' % submessage.DESCRIPTOR.full_name) # Test unpacking the type. unpacked_message = well_known_types_test_pb2.TestAny() self.assertTrue(msg.Unpack(unpacked_message)) self.assertEqual(submessage, unpacked_message) def testPackDeterministic(self): submessage = well_known_types_test_pb2.TestAny() for i in range(10): submessage.map_value[str(i)] = i * 2 msg = any_pb2.Any() msg.Pack(submessage, deterministic=True) serialized = msg.SerializeToString(deterministic=True) golden = ( b'\n4type.googleapis.com/google.protobuf.internal.TestAny\x12F' b'\x1a\x05\n\x010\x10\x00\x1a\x05\n\x011\x10\x02\x1a\x05\n\x01' b'2\x10\x04\x1a\x05\n\x013\x10\x06\x1a\x05\n\x014\x10\x08\x1a' b'\x05\n\x015\x10\n\x1a\x05\n\x016\x10\x0c\x1a\x05\n\x017\x10' b'\x0e\x1a\x05\n\x018\x10\x10\x1a\x05\n\x019\x10\x12' ) self.assertEqual(golden, serialized) def testJsonStruct(self): value = struct_pb2.Value(struct_value=struct_pb2.Struct()) value_dict = json_format.MessageToDict( value, always_print_fields_with_no_presence=True, preserving_proto_field_name=True, use_integers_for_enums=True, ) self.assertDictEqual(value_dict, {}) s = struct_pb2.Struct( fields={ 'a': struct_pb2.Value(struct_value=struct_pb2.Struct()), }, ) sdict = json_format.MessageToDict( s, always_print_fields_with_no_presence=True, preserving_proto_field_name=True, use_integers_for_enums=True, ) self.assertDictEqual( sdict, { 'a': {}, }, ) if __name__ == '__main__': unittest.main()
AnyTest
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_assets.py
{ "start": 154352, "end": 168670 }
class ____(ExecutingGraphQLContextTestMatrix): def test_asset_event_history_no_observation_events( self, graphql_context: WorkspaceRequestContext ): """Documents current behavior of the asset event history query for OSS. It currently does not include asset failed to materialize events. """ asset_key = AssetKey("asset_1") num_events = 5 for i in range(num_events): run_id_1 = make_new_run_id() failure_event = EventLogEntry( error_info=None, level="debug", user_message="", run_id=run_id_1, timestamp=get_current_timestamp(), dagster_event=DagsterEvent.build_asset_failed_to_materialize_event( job_name="the_job", step_key="the_step", asset_materialization_failure=AssetMaterializationFailure( asset_key=asset_key, partition=None, failure_type=AssetMaterializationFailureType.FAILED, reason=AssetMaterializationFailureReason.FAILED_TO_MATERIALIZE, ), ), ) graphql_context.instance.store_event(failure_event) run_id_2 = make_new_run_id() materialize_event = EventLogEntry( error_info=None, level="debug", user_message="", run_id=run_id_2, timestamp=get_current_timestamp(), dagster_event=DagsterEvent( DagsterEventType.ASSET_MATERIALIZATION.value, "the_job", event_specific_data=StepMaterializationData( AssetMaterialization( asset_key=asset_key, partition=None, ) ), ), ) graphql_context.instance.store_event(materialize_event) result = execute_dagster_graphql( graphql_context, GET_ASSET_EVENT_HISTORY, variables={ "assetKey": {"path": ["asset_1"]}, "eventTypeSelectors": ["MATERIALIZATION", "FAILED_TO_MATERIALIZE"], "limit": 100, }, ) assert result.data assert result.data["assetOrError"] assert len(result.data["assetOrError"]["assetEventHistory"]["results"]) == 5 min_timestamp_seen = None for event in result.data["assetOrError"]["assetEventHistory"]["results"]: assert event["__typename"] == "MaterializationEvent" assert event["assetKey"]["path"] == ["asset_1"] # events should be sorted by storage id with the newest event first. Use timestamp # as a proxy if min_timestamp_seen: assert int(event["timestamp"]) <= min_timestamp_seen min_timestamp_seen = int(event["timestamp"]) # test cursoring result = execute_dagster_graphql( graphql_context, GET_ASSET_EVENT_HISTORY, variables={ "assetKey": {"path": ["asset_1"]}, "eventTypeSelector": "ALL", "limit": 2, "eventTypeSelectors": ["MATERIALIZATION", "FAILED_TO_MATERIALIZE"], }, ) assert result.data assert result.data["assetOrError"] assert len(result.data["assetOrError"]["assetEventHistory"]["results"]) == 2 assert result.data["assetOrError"]["assetEventHistory"]["cursor"] is not None cursor = result.data["assetOrError"]["assetEventHistory"]["cursor"] result = execute_dagster_graphql( graphql_context, GET_ASSET_EVENT_HISTORY, variables={ "assetKey": {"path": ["asset_1"]}, "eventTypeSelector": "ALL", "cursor": cursor, "eventTypeSelectors": ["MATERIALIZATION", "FAILED_TO_MATERIALIZE"], "limit": 100, }, ) assert result.data assert result.data["assetOrError"] assert len(result.data["assetOrError"]["assetEventHistory"]["results"]) == 3 assert result.data["assetOrError"]["assetEventHistory"]["cursor"] != cursor def test_asset_event_history_with_observation_events( self, graphql_context: WorkspaceRequestContext ): """Documents current behavior of the asset event history query for OSS. It currently does not include asset failed to materialize events. """ asset_key = AssetKey("asset_1") num_events = 5 for i in range(num_events): run_id_1 = make_new_run_id() failure_event = EventLogEntry( error_info=None, level="debug", user_message="", run_id=run_id_1, timestamp=get_current_timestamp(), dagster_event=DagsterEvent.build_asset_failed_to_materialize_event( job_name="the_job", step_key="the_step", asset_materialization_failure=AssetMaterializationFailure( asset_key=asset_key, partition=None, failure_type=AssetMaterializationFailureType.FAILED, reason=AssetMaterializationFailureReason.FAILED_TO_MATERIALIZE, ), ), ) graphql_context.instance.store_event(failure_event) run_id_2 = make_new_run_id() materialize_event = EventLogEntry( error_info=None, level="debug", user_message="", run_id=run_id_2, timestamp=get_current_timestamp(), dagster_event=DagsterEvent( DagsterEventType.ASSET_MATERIALIZATION.value, "the_job", event_specific_data=StepMaterializationData( AssetMaterialization( asset_key=asset_key, partition=None, ) ), ), ) graphql_context.instance.store_event(materialize_event) run_id_3 = make_new_run_id() observation_event = EventLogEntry( error_info=None, level="debug", user_message="", run_id=run_id_3, timestamp=get_current_timestamp(), dagster_event=DagsterEvent( DagsterEventType.ASSET_OBSERVATION.value, "the_job", event_specific_data=AssetObservationData( AssetObservation( asset_key=asset_key, ) ), ), ) graphql_context.instance.store_event(observation_event) result = execute_dagster_graphql( graphql_context, GET_ASSET_EVENT_HISTORY, variables={ "assetKey": {"path": ["asset_1"]}, "eventTypeSelectors": ["MATERIALIZATION", "FAILED_TO_MATERIALIZE", "OBSERVATION"], "limit": 100, }, ) assert result.data assert result.data["assetOrError"] assert len(result.data["assetOrError"]["assetEventHistory"]["results"]) == 10 min_timestamp_seen = None for event in result.data["assetOrError"]["assetEventHistory"]["results"]: assert ( event["__typename"] == "MaterializationEvent" or event["__typename"] == "ObservationEvent" ) assert event["assetKey"]["path"] == ["asset_1"] # events should be sorted by storage id with the newest event first. Use timestamp # as a proxy if min_timestamp_seen: assert int(event["timestamp"]) <= min_timestamp_seen min_timestamp_seen = int(event["timestamp"]) # test cursoring result = execute_dagster_graphql( graphql_context, GET_ASSET_EVENT_HISTORY, variables={ "assetKey": {"path": ["asset_1"]}, "eventTypeSelector": "ALL", "limit": 2, "eventTypeSelectors": ["MATERIALIZATION", "FAILED_TO_MATERIALIZE", "OBSERVATION"], }, ) assert result.data assert result.data["assetOrError"] assert len(result.data["assetOrError"]["assetEventHistory"]["results"]) == 2 assert result.data["assetOrError"]["assetEventHistory"]["cursor"] is not None cursor = result.data["assetOrError"]["assetEventHistory"]["cursor"] result = execute_dagster_graphql( graphql_context, GET_ASSET_EVENT_HISTORY, variables={ "assetKey": {"path": ["asset_1"]}, "eventTypeSelector": "ALL", "cursor": cursor, "eventTypeSelectors": ["MATERIALIZATION", "FAILED_TO_MATERIALIZE", "OBSERVATION"], "limit": 100, }, ) assert result.data assert result.data["assetOrError"] assert len(result.data["assetOrError"]["assetEventHistory"]["results"]) == 8 assert result.data["assetOrError"]["assetEventHistory"]["cursor"] != cursor def test_asset_event_history_filtering(self, graphql_context: WorkspaceRequestContext): """Documents current behavior of the asset event history query for OSS. It currently does not include asset failed to materialize events. """ asset_key = AssetKey("asset_1") num_events = 5 for i in range(num_events): run_id_1 = make_new_run_id() failure_event = EventLogEntry( error_info=None, level="debug", user_message="", run_id=run_id_1, timestamp=get_current_timestamp(), dagster_event=DagsterEvent.build_asset_failed_to_materialize_event( job_name="the_job", step_key="the_step", asset_materialization_failure=AssetMaterializationFailure( asset_key=asset_key, partition=None, failure_type=AssetMaterializationFailureType.FAILED, reason=AssetMaterializationFailureReason.FAILED_TO_MATERIALIZE, ), ), ) graphql_context.instance.store_event(failure_event) run_id_2 = make_new_run_id() materialize_event = EventLogEntry( error_info=None, level="debug", user_message="", run_id=run_id_2, timestamp=get_current_timestamp(), dagster_event=DagsterEvent( DagsterEventType.ASSET_MATERIALIZATION.value, "the_job", event_specific_data=StepMaterializationData( AssetMaterialization( asset_key=asset_key, partition=None, ) ), ), ) graphql_context.instance.store_event(materialize_event) run_id_3 = make_new_run_id() observation_event = EventLogEntry( error_info=None, level="debug", user_message="", run_id=run_id_3, timestamp=get_current_timestamp(), dagster_event=DagsterEvent( DagsterEventType.ASSET_OBSERVATION.value, "the_job", event_specific_data=AssetObservationData( AssetObservation( asset_key=asset_key, ) ), ), ) graphql_context.instance.store_event(observation_event) result = execute_dagster_graphql( graphql_context, GET_ASSET_EVENT_HISTORY, variables={ "assetKey": {"path": ["asset_1"]}, "eventTypeSelectors": ["MATERIALIZATION"], "limit": 100, }, ) assert result.data assert result.data["assetOrError"] assert len(result.data["assetOrError"]["assetEventHistory"]["results"]) == 5 min_timestamp_seen = None for event in result.data["assetOrError"]["assetEventHistory"]["results"]: assert event["__typename"] == "MaterializationEvent" assert event["assetKey"]["path"] == ["asset_1"] # events should be sorted by storage id with the newest event first. Use timestamp # as a proxy if min_timestamp_seen: assert int(event["timestamp"]) <= min_timestamp_seen min_timestamp_seen = int(event["timestamp"]) result = execute_dagster_graphql( graphql_context, GET_ASSET_EVENT_HISTORY, variables={ "assetKey": {"path": ["asset_1"]}, "eventTypeSelectors": ["OBSERVATION"], "limit": 100, }, ) assert result.data assert result.data["assetOrError"] assert len(result.data["assetOrError"]["assetEventHistory"]["results"]) == 5 min_timestamp_seen = None for event in result.data["assetOrError"]["assetEventHistory"]["results"]: assert event["__typename"] == "ObservationEvent" assert event["assetKey"]["path"] == ["asset_1"] # events should be sorted by storage id with the newest event first. Use timestamp # as a proxy if min_timestamp_seen: assert int(event["timestamp"]) <= min_timestamp_seen min_timestamp_seen = int(event["timestamp"])
TestAssetEventHistory