language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
tensorflow__tensorflow
tensorflow/python/autograph/pyct/qual_names.py
{ "start": 6529, "end": 8127 }
class ____(gast.NodeTransformer): """Annotates nodes with QN information. Note: Not using NodeAnnos to avoid circular dependencies. """ def visit_Name(self, node): node = self.generic_visit(node) anno.setanno(node, anno.Basic.QN, QN(node.id)) return node def visit_Attribute(self, node): node = self.generic_visit(node) if anno.hasanno(node.value, anno.Basic.QN): anno.setanno(node, anno.Basic.QN, QN(anno.getanno(node.value, anno.Basic.QN), attr=node.attr)) return node def visit_Subscript(self, node): # TODO(mdan): This may no longer apply if we overload getitem. node = self.generic_visit(node) s = node.slice if isinstance(s, (gast.Tuple, gast.Slice)): # TODO(mdan): Support range and multi-dimensional indices. # Continuing silently because some demos use these. return node if isinstance(s, gast.Constant) and s.value != Ellipsis: subscript = QN(Literal(s.value)) else: # The index may be an expression, case in which a name doesn't make sense. if anno.hasanno(s, anno.Basic.QN): subscript = anno.getanno(s, anno.Basic.QN) else: return node if anno.hasanno(node.value, anno.Basic.QN): anno.setanno(node, anno.Basic.QN, QN(anno.getanno(node.value, anno.Basic.QN), subscript=subscript)) return node def resolve(node): return QnResolver().visit(node) def from_str(qn_str): node = parser.parse_expression(qn_str) node = resolve(node) return anno.getanno(node, anno.Basic.QN)
QnResolver
python
rapidsai__cudf
python/cudf/cudf/pandas/fast_slow_proxy.py
{ "start": 33528, "end": 33643 }
class ____(FallbackError): """Raises cuDF produces a NotImplementedError""" pass
NotImplementedFallbackError
python
automl__auto-sklearn
test/test_pipeline/components/classification/test_decision_tree.py
{ "start": 165, "end": 790 }
class ____(BaseClassificationComponentTest): __test__ = True res = dict() res["default_iris"] = 0.62 res["default_iris_iterative"] = -1 res["default_iris_proba"] = 0.51333963481747835 res["default_iris_sparse"] = 0.41999999999999998 res["default_digits"] = 0.15057680631451123 res["default_digits_iterative"] = -1 res["default_digits_binary"] = 0.92167577413479052 res["default_digits_multilabel"] = 0.076521739130434779 res["default_digits_multilabel_proba"] = 0.80426747311827962 sk_mod = sklearn.tree.DecisionTreeClassifier module = DecisionTree
DecisionTreeComponentTest
python
python__mypy
mypy/semanal_newtype.py
{ "start": 974, "end": 10576 }
class ____: def __init__( self, options: Options, api: SemanticAnalyzerInterface, msg: MessageBuilder ) -> None: self.options = options self.api = api self.msg = msg def process_newtype_declaration(self, s: AssignmentStmt) -> bool: """Check if s declares a NewType; if yes, store it in symbol table. Return True if it's a NewType declaration. The current target may be deferred as a side effect if the base type is not ready, even if the return value is True. The logic in this function mostly copies the logic for visit_class_def() with a single (non-Generic) base. """ var_name, call = self.analyze_newtype_declaration(s) if var_name is None or call is None: return False name = var_name # OK, now we know this is a NewType. But the base type may be not ready yet, # add placeholder as we do for ClassDef. if self.api.is_func_scope(): name += "@" + str(s.line) fullname = self.api.qualified_name(name) if not call.analyzed or isinstance(call.analyzed, NewTypeExpr) and not call.analyzed.info: # Start from labeling this as a future class, as we do for normal ClassDefs. placeholder = PlaceholderNode(fullname, s, s.line, becomes_typeinfo=True) self.api.add_symbol(var_name, placeholder, s, can_defer=False) old_type, should_defer = self.check_newtype_args(var_name, call, s) old_type = get_proper_type(old_type) if not isinstance(call.analyzed, NewTypeExpr): call.analyzed = NewTypeExpr(var_name, old_type, line=call.line, column=call.column) else: call.analyzed.old_type = old_type if old_type is None: if should_defer: # Base type is not ready. self.api.defer() return True # Create the corresponding class definition if the aliased type is subtypeable assert isinstance(call.analyzed, NewTypeExpr) if isinstance(old_type, TupleType): newtype_class_info = self.build_newtype_typeinfo( name, old_type, old_type.partial_fallback, s.line, call.analyzed.info ) newtype_class_info.update_tuple_type(old_type) elif isinstance(old_type, Instance): if old_type.type.is_protocol: self.fail("NewType cannot be used with protocol classes", s) newtype_class_info = self.build_newtype_typeinfo( name, old_type, old_type, s.line, call.analyzed.info ) else: if old_type is not None: message = "Argument 2 to NewType(...) must be subclassable (got {})" self.fail( message.format(format_type(old_type, self.options)), s, code=codes.VALID_NEWTYPE, ) # Otherwise the error was already reported. old_type = AnyType(TypeOfAny.from_error) object_type = self.api.named_type("builtins.object") newtype_class_info = self.build_newtype_typeinfo( name, old_type, object_type, s.line, call.analyzed.info ) newtype_class_info.fallback_to_any = True check_for_explicit_any( old_type, self.options, self.api.is_typeshed_stub_file, self.msg, context=s ) if self.options.disallow_any_unimported and has_any_from_unimported_type(old_type): self.msg.unimported_type_becomes_any("Argument 2 to NewType(...)", old_type, s) # If so, add it to the symbol table. assert isinstance(call.analyzed, NewTypeExpr) # As we do for normal classes, create the TypeInfo only once, then just # update base classes on next iterations (to get rid of placeholders there). if not call.analyzed.info: call.analyzed.info = newtype_class_info else: call.analyzed.info.bases = newtype_class_info.bases self.api.add_symbol(var_name, call.analyzed.info, s) if self.api.is_func_scope(): self.api.add_symbol_skip_local(name, call.analyzed.info) newtype_class_info.line = s.line return True def analyze_newtype_declaration(self, s: AssignmentStmt) -> tuple[str | None, CallExpr | None]: """Return the NewType call expression if `s` is a newtype declaration or None otherwise.""" name, call = None, None if ( len(s.lvalues) == 1 and isinstance(s.lvalues[0], NameExpr) and isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.callee, RefExpr) and (s.rvalue.callee.fullname in ("typing.NewType", "typing_extensions.NewType")) ): name = s.lvalues[0].name if s.type: self.fail("Cannot declare the type of a NewType declaration", s) names = self.api.current_symbol_table() existing = names.get(name) # Give a better error message than generic "Name already defined". if ( existing and not isinstance(existing.node, PlaceholderNode) and not s.rvalue.analyzed ): self.fail(f'Cannot redefine "{name}" as a NewType', s) # This dummy NewTypeExpr marks the call as sufficiently analyzed; it will be # overwritten later with a fully complete NewTypeExpr if there are no other # errors with the NewType() call. call = s.rvalue return name, call def check_newtype_args( self, name: str, call: CallExpr, context: Context ) -> tuple[Type | None, bool]: """Analyze base type in NewType call. Return a tuple (type, should defer). """ has_failed = False args, arg_kinds = call.args, call.arg_kinds if len(args) != 2 or arg_kinds[0] != ARG_POS or arg_kinds[1] != ARG_POS: self.fail("NewType(...) expects exactly two positional arguments", context) return None, False # Check first argument if not isinstance(args[0], StrExpr): self.fail("Argument 1 to NewType(...) must be a string literal", context) has_failed = True elif args[0].value != name: msg = 'String argument 1 "{}" to NewType(...) does not match variable name "{}"' self.fail(msg.format(args[0].value, name), context) has_failed = True # Check second argument msg = "Argument 2 to NewType(...) must be a valid type" try: unanalyzed_type = expr_to_unanalyzed_type(args[1], self.options, self.api.is_stub_file) except TypeTranslationError: self.fail(msg, context) return None, False # We want to use our custom error message (see above), so we suppress # the default error message for invalid types here. old_type = get_proper_type( self.api.anal_type( unanalyzed_type, report_invalid_types=False, allow_placeholder=not self.api.is_func_scope(), ) ) should_defer = False if isinstance(old_type, PlaceholderType): old_type = None if old_type is None: should_defer = True # The caller of this function assumes that if we return a Type, it's always # a valid one. So, we translate AnyTypes created from errors into None. if isinstance(old_type, AnyType) and old_type.is_from_error: self.fail(msg, context) return None, False return None if has_failed else old_type, should_defer def build_newtype_typeinfo( self, name: str, old_type: Type, base_type: Instance, line: int, existing_info: TypeInfo | None, ) -> TypeInfo: info = existing_info or self.api.basic_new_typeinfo(name, base_type, line) info.bases = [base_type] # Update in case there were nested placeholders. info.is_newtype = True # Add __init__ method args = [ Argument(Var("self"), NoneType(), None, ARG_POS), self.make_argument("item", old_type), ] signature = CallableType( arg_types=[Instance(info, []), old_type], arg_kinds=[arg.kind for arg in args], arg_names=["self", "item"], ret_type=NoneType(), fallback=self.api.named_type("builtins.function"), name=name, ) init_func = FuncDef("__init__", args, Block([]), typ=signature) init_func.info = info init_func._fullname = info.fullname + ".__init__" if not existing_info: updated = True else: previous_sym = info.names["__init__"].node assert isinstance(previous_sym, FuncDef) updated = old_type != previous_sym.arguments[1].variable.type info.names["__init__"] = SymbolTableNode(MDEF, init_func) if has_placeholder(old_type): self.api.process_placeholder(None, "NewType base", info, force_progress=updated) return info # Helpers def make_argument(self, name: str, type: Type) -> Argument: return Argument(Var(name), type, None, ARG_POS) def fail(self, msg: str, ctx: Context, *, code: ErrorCode | None = None) -> None: self.api.fail(msg, ctx, code=code)
NewTypeAnalyzer
python
coleifer__peewee
tests/regressions.py
{ "start": 54126, "end": 54223 }
class ____(TestModel): key = CharField(primary_key=True) date = DateTimeField(null=True)
NDF
python
getsentry__sentry
src/sentry/workflow_engine/migrations/0073_safe_pending_delete_actiongroupstatus.py
{ "start": 408, "end": 2665 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = True dependencies = [ ("sentry", "0932_update_grouptombstone_with_auto_now_add"), ("workflow_engine", "0072_add_detector_to_workflowfirehistory"), ] operations = [ migrations.AlterField( model_name="actiongroupstatus", name="action", field=sentry.db.models.fields.foreignkey.FlexibleForeignKey( db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to="workflow_engine.action", ), ), migrations.AlterField( model_name="actiongroupstatus", name="group", field=sentry.db.models.fields.foreignkey.FlexibleForeignKey( db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to="sentry.group" ), ), SafeRunSQL( sql="ALTER TABLE workflow_engine_actiongroupstatus DROP CONSTRAINT IF EXISTS workflow_engine_acti_action_id_e5b33d82_fk_workflow_;", reverse_sql=migrations.RunSQL.noop, hints={"tables": ["workflow_engine_actiongroupstatus"]}, ), SafeDeleteModel(name="ActionGroupStatus", deletion_action=DeletionAction.MOVE_TO_PENDING), ]
Migration
python
airbytehq__airbyte
airbyte-integrations/connectors/source-appsflyer/source_appsflyer/source.py
{ "start": 9698, "end": 10053 }
class ____(RawDataMixin, IncrementalAppsflyerStream): intervals = 31 cursor_field = "event_time" def path( self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None ) -> str: return f"raw-data/export/app/{self.app_id}/in_app_events_report/v5"
InAppEvents
python
spyder-ide__spyder
spyder/api/widgets/auxiliary_widgets.py
{ "start": 1947, "end": 5166 }
class ____(QToolBar): """ Corner widget to hold options menu, spinner and additional options. """ def __init__(self, parent, name): super().__init__(parent) self._icon_size = QSize(16, 16) self.setIconSize(self._icon_size) self._widgets = {} self._actions = [] self.setObjectName(name) # We add an strut widget here so that there is a spacing # between the first item of the corner widget and the last # item of the MainWidgetToolbar. self._strut = QWidget() self._strut.setFixedWidth(0) self._strut.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self.addWidget(self._strut) def add_widget(self, widget, before=None): """ Add a widget to the left of the last widget added to the corner. """ if not hasattr(widget, "name") or ( before is not None and not hasattr(before, "name") ): raise SpyderAPIError( f"Widget {widget} or {before} doesn't have a name, which must " f"be provided by the attribute `name`" ) if widget.name in self._widgets: raise SpyderAPIError( 'Wigdet with name "{}" already added. Current names are: {}' ''.format(widget.name, list(self._widgets.keys())) ) if before is not None and before.name not in self._widgets: raise SpyderAPIError( f"Wigdet with name '{before.name}' not in this corner widget" ) if ( not self._widgets and widget.name != PluginMainWidgetWidgets.OptionsToolButton ): raise SpyderAPIError( "The options button must be the first one to be added to the " "corner widget of dockable plugins." ) if widget.name == PluginMainWidgetWidgets.OptionsToolButton: # This is only necessary for the options button because it's the # first one to be added action = self.addWidget(widget) else: if before is not None: before_action = self.get_action(before.name) else: # By default other buttons are added to the left of the last # one before_action = self._actions[-1] # Allow to add either widgets or actions if isinstance(widget, QWidget): action = self.insertWidget(before_action, widget) else: action = widget self.insertAction(before_action, action) widget = self.widgetForAction(action) widget.name = action.name self._widgets[widget.name] = (widget, action) self._actions.append(action) def get_widget(self, widget_id): """Return a widget by unique id.""" if widget_id in self._widgets: return self._widgets[widget_id][0] def get_action(self, widget_id): """Return action corresponding to `widget_id`.""" if widget_id in self._widgets: return self._widgets[widget_id][1]
MainCornerWidget
python
getsentry__sentry
tests/sentry/sentry_apps/api/parsers/test_alert_rule_action.py
{ "start": 201, "end": 1074 }
class ____(unittest.TestCase): def setUp(self) -> None: self.schema: dict[str, Any] = { "type": "alert-rule-action", "title": "Create Task", "settings": { "type": "alert-rule-settings", "description": "This integration allows you to create a task.", "uri": "/sentry/alert-rule", "required_fields": [{"type": "text", "name": "channel", "label": "Channel"}], "optional_fields": [{"type": "text", "name": "prefix", "label": "Prefix"}], }, } def test_valid_schema(self) -> None: validate_component(self.schema) @invalid_schema def test_missing_required_fields_fails(self) -> None: del self.schema["settings"]["required_fields"] validate_component(self.schema)
TestAlertRuleActionSchemaValidation
python
tensorflow__tensorflow
tensorflow/python/module/module_test.py
{ "start": 14244, "end": 14552 }
class ____(module.Module): @def_function.function(autograph=False) @module.Module.with_name_scope def forward(self): return get_name_scope() @def_function.function(autograph=True) @module.Module.with_name_scope def forward_ag(self): return get_name_scope()
ModuleWithFunctionAnnotatedCall
python
Pylons__pyramid
src/pyramid/events.py
{ "start": 224, "end": 3671 }
class ____: """Decorator activated via a :term:`scan` which treats the function being decorated as an event subscriber for the set of interfaces passed as ``*ifaces`` and the set of predicate terms passed as ``**predicates`` to the decorator constructor. For example: .. code-block:: python from pyramid.events import NewRequest from pyramid.events import subscriber @subscriber(NewRequest) def mysubscriber(event): event.request.foo = 1 More than one event type can be passed as a constructor argument. The decorated subscriber will be called for each event type. .. code-block:: python from pyramid.events import NewRequest, NewResponse from pyramid.events import subscriber @subscriber(NewRequest, NewResponse) def mysubscriber(event): print(event) When the ``subscriber`` decorator is used without passing an arguments, the function it decorates is called for every event sent: .. code-block:: python from pyramid.events import subscriber @subscriber() def mysubscriber(event): print(event) This method will have no effect until a :term:`scan` is performed against the package or module which contains it, ala: .. code-block:: python from pyramid.config import Configurator config = Configurator() config.scan('somepackage_containing_subscribers') Any ``**predicate`` arguments will be passed along to :meth:`pyramid.config.Configurator.add_subscriber`. See :ref:`subscriber_predicates` for a description of how predicates can narrow the set of circumstances in which a subscriber will be called. Two additional keyword arguments which will be passed to the :term:`venusian` ``attach`` function are ``_depth`` and ``_category``. ``_depth`` is provided for people who wish to reuse this class from another decorator. The default value is ``0`` and should be specified relative to the ``subscriber`` invocation. It will be passed in to the :term:`venusian` ``attach`` function as the depth of the callstack when Venusian checks if the decorator is being used in a class or module context. It's not often used, but it can be useful in this circumstance. ``_category`` sets the decorator category name. It can be useful in combination with the ``category`` argument of ``scan`` to control which views should be processed. See the :py:func:`venusian.attach` function in Venusian for more information about the ``_depth`` and ``_category`` arguments. .. versionchanged:: 1.9.1 Added the ``_depth`` and ``_category`` arguments. """ venusian = venusian # for unit testing def __init__(self, *ifaces, **predicates): self.ifaces = ifaces self.predicates = predicates self.depth = predicates.pop('_depth', 0) self.category = predicates.pop('_category', 'pyramid') def register(self, scanner, name, wrapped): config = scanner.config for iface in self.ifaces or (Interface,): config.add_subscriber(wrapped, iface, **self.predicates) def __call__(self, wrapped): self.venusian.attach( wrapped, self.register, category=self.category, depth=self.depth + 1, ) return wrapped @implementer(INewRequest)
subscriber
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/links/pubsub.py
{ "start": 1174, "end": 1363 }
class ____(BaseGoogleLink): """Helper class for constructing Pub/Sub Topic Link.""" name = "Pub/Sub Topic" key = "pubsub_topic" format_str = PUBSUB_TOPIC_LINK
PubSubTopicLink
python
pytorch__pytorch
test/test_custom_ops.py
{ "start": 168114, "end": 169603 }
class ____(TestCase): """In infer_schema(), we try to suggest a correct type when the type annotation is wrong.""" def setUp(self): self.supported_base_types = [ int, float, bool, str, torch.device, torch.Tensor, torch.dtype, torch.types.Number, ] def test_simple_tuple(self): self.assertEqual(list, tuple_to_list(Tuple)) def test_supported_types(self): for t in self.supported_base_types: result_type = tuple_to_list(Tuple[t, t, t]) self.assertEqual(result_type, list[t]) result_type = tuple_to_list(Tuple[t]) self.assertEqual(result_type, list[t]) def test_optional(self): for t in self.supported_base_types: result_type = tuple_to_list(Tuple[t, Optional[t]]) self.assertEqual(result_type, list[Optional[t]]) result_type = tuple_to_list(Tuple[t, t, Optional[t]]) self.assertEqual(result_type, list[Optional[t]]) result_type = tuple_to_list(Tuple[t, ...]) self.assertEqual(result_type, list[t]) def test_mixed_types(self): result_type = tuple_to_list(Tuple[int, float]) self.assertEqual(result_type, list[typing.Union[int, float]]) result_type = tuple_to_list(Tuple[int, float, str]) self.assertEqual(result_type, list[typing.Union[int, float, str]])
TestTypeConversion
python
kamyu104__LeetCode-Solutions
Python/maximum-number-of-accepted-invitations.py
{ "start": 4367, "end": 4901 }
class ____(object): def maximumInvitations(self, grid): """ :type grid: List[List[int]] :rtype: int """ adj = collections.defaultdict(list) for i in xrange(len(grid)): for j in xrange(len(grid[0])): if not grid[i][j]: continue adj[j].append(i) return len(bipartiteMatch(adj)[0]) # Time: O(|V| * |E|) = O(min(m, n) * (m * n)) # Space: O(|V|) = O(min(m, n)) # Hungarian bipartite matching with less space
Solution
python
huggingface__transformers
src/transformers/activations.py
{ "start": 6811, "end": 7020 }
class ____(nn.Module): """ Applies the linear activation function, i.e. forwarding input directly to output. """ def forward(self, input: Tensor) -> Tensor: return input
LinearActivation
python
tensorflow__tensorflow
tensorflow/lite/python/test_util_test.py
{ "start": 981, "end": 1557 }
class ____(test_util.TensorFlowTestCase): def testBuiltinOp(self): model_path = resource_loader.get_path_to_datafile('../testdata/add.bin') op_set = tflite_test_util.get_ops_list(gfile.GFile(model_path, 'rb').read()) self.assertCountEqual(op_set, ['ADD']) def testFlexOp(self): model_path = resource_loader.get_path_to_datafile( '../testdata/softplus_flex.bin') op_set = tflite_test_util.get_ops_list(gfile.GFile(model_path, 'rb').read()) self.assertCountEqual(op_set, ['FlexSoftplus']) if __name__ == '__main__': test.main()
TestUtilTest
python
pandas-dev__pandas
asv_bench/benchmarks/index_object.py
{ "start": 2804, "end": 3193 }
class ____: def setup(self): idx_large_fast = RangeIndex(100_000) idx_small_slow = date_range(start="1/1/2012", periods=1) self.mi_large_slow = MultiIndex.from_product([idx_large_fast, idx_small_slow]) self.idx_non_object = RangeIndex(1) def time_non_object_equals_multiindex(self): self.idx_non_object.equals(self.mi_large_slow)
IndexEquals
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/orm/interfaces.py
{ "start": 4034, "end": 4199 }
class ____(roles.StatementRole): __slots__ = () _role_name = ( "Executable SQL or text() construct, including ORM aware objects" )
ORMStatementRole
python
django__django
tests/postgres_tests/test_ranges.py
{ "start": 6564, "end": 9899 }
class ____(PostgreSQLTestCase): @classmethod def setUpTestData(cls): cls.timestamps = [ datetime.datetime(year=2016, month=1, day=1), datetime.datetime(year=2016, month=1, day=2, hour=1), datetime.datetime(year=2016, month=1, day=2, hour=12), datetime.datetime(year=2016, month=1, day=3), datetime.datetime(year=2016, month=1, day=3, hour=1), datetime.datetime(year=2016, month=2, day=2), ] cls.aware_timestamps = [ timezone.make_aware(timestamp) for timestamp in cls.timestamps ] cls.dates = [ datetime.date(year=2016, month=1, day=1), datetime.date(year=2016, month=1, day=2), datetime.date(year=2016, month=1, day=3), datetime.date(year=2016, month=1, day=4), datetime.date(year=2016, month=2, day=2), datetime.date(year=2016, month=2, day=3), ] cls.obj = RangesModel.objects.create( dates=(cls.dates[0], cls.dates[3]), dates_inner=(cls.dates[1], cls.dates[2]), timestamps=(cls.timestamps[0], cls.timestamps[3]), timestamps_inner=(cls.timestamps[1], cls.timestamps[2]), ) cls.aware_obj = RangesModel.objects.create( dates=(cls.dates[0], cls.dates[3]), dates_inner=(cls.dates[1], cls.dates[2]), timestamps=(cls.aware_timestamps[0], cls.aware_timestamps[3]), timestamps_inner=(cls.timestamps[1], cls.timestamps[2]), ) # Objects that don't match any queries. for i in range(3, 4): RangesModel.objects.create( dates=(cls.dates[i], cls.dates[i + 1]), timestamps=(cls.timestamps[i], cls.timestamps[i + 1]), ) RangesModel.objects.create( dates=(cls.dates[i], cls.dates[i + 1]), timestamps=(cls.aware_timestamps[i], cls.aware_timestamps[i + 1]), ) def test_datetime_range_contains(self): filter_args = ( self.timestamps[1], self.aware_timestamps[1], (self.timestamps[1], self.timestamps[2]), (self.aware_timestamps[1], self.aware_timestamps[2]), Value(self.dates[0]), Func(F("dates"), function="lower", output_field=DateTimeField()), F("timestamps_inner"), ) for filter_arg in filter_args: with self.subTest(filter_arg=filter_arg): self.assertCountEqual( RangesModel.objects.filter(**{"timestamps__contains": filter_arg}), [self.obj, self.aware_obj], ) def test_date_range_contains(self): filter_args = ( self.timestamps[1], (self.dates[1], self.dates[2]), Value(self.dates[0], output_field=DateField()), Func(F("timestamps"), function="lower", output_field=DateField()), F("dates_inner"), ) for filter_arg in filter_args: with self.subTest(filter_arg=filter_arg): self.assertCountEqual( RangesModel.objects.filter(**{"dates__contains": filter_arg}), [self.obj, self.aware_obj], )
TestRangeContainsLookup
python
gevent__gevent
src/greentest/3.10/test_socket.py
{ "start": 110464, "end": 112928 }
class ____(SendmsgTests): # Tests for sendmsg() which require a stream socket and do not # involve recvmsg() or recvmsg_into(). def testSendmsgExplicitNoneAddr(self): # Check that peer address can be specified as None. self.assertEqual(self.serv_sock.recv(len(MSG)), MSG) def _testSendmsgExplicitNoneAddr(self): self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG)) def testSendmsgTimeout(self): # Check that timeout works with sendmsg(). self.assertEqual(self.serv_sock.recv(512), b"a"*512) self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) def _testSendmsgTimeout(self): try: self.cli_sock.settimeout(0.03) try: while True: self.sendmsgToServer([b"a"*512]) except TimeoutError: pass except OSError as exc: if exc.errno != errno.ENOMEM: raise # bpo-33937 the test randomly fails on Travis CI with # "OSError: [Errno 12] Cannot allocate memory" else: self.fail("TimeoutError not raised") finally: self.misc_event.set() # XXX: would be nice to have more tests for sendmsg flags argument. # Linux supports MSG_DONTWAIT when sending, but in general, it # only works when receiving. Could add other platforms if they # support it too. @skipWithClientIf(sys.platform not in {"linux"}, "MSG_DONTWAIT not known to work on this platform when " "sending") def testSendmsgDontWait(self): # Check that MSG_DONTWAIT in flags causes non-blocking behaviour. self.assertEqual(self.serv_sock.recv(512), b"a"*512) self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) @testSendmsgDontWait.client_skip def _testSendmsgDontWait(self): try: with self.assertRaises(OSError) as cm: while True: self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT) # bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI # with "OSError: [Errno 12] Cannot allocate memory" self.assertIn(cm.exception.errno, (errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM)) finally: self.misc_event.set()
SendmsgStreamTests
python
fluentpython__example-code-2e
21-async/mojifinder/bottle.py
{ "start": 68791, "end": 69240 }
class ____(BaseRequest): ''' A thread-local subclass of :class:`BaseRequest` with a different set of attributes for each thread. There is usually only one global instance of this class (:data:`request`). If accessed during a request/response cycle, this instance always refers to the *current* request (even on a multithreaded server). ''' bind = BaseRequest.__init__ environ = local_property()
LocalRequest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 187374, "end": 188646 }
class ____(sgqlc.types.Input): """Autogenerated input type of CreateMigrationSource""" __schema__ = github_schema __field_names__ = ("name", "url", "access_token", "type", "owner_id", "github_pat", "client_mutation_id") name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name") """The migration source name.""" url = sgqlc.types.Field(String, graphql_name="url") """The migration source URL, for example `https://github.com` or `https://monalisa.ghe.com`. """ access_token = sgqlc.types.Field(String, graphql_name="accessToken") """The migration source access token.""" type = sgqlc.types.Field(sgqlc.types.non_null(MigrationSourceType), graphql_name="type") """The migration source type.""" owner_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="ownerId") """The ID of the organization that will own the migration source.""" github_pat = sgqlc.types.Field(String, graphql_name="githubPat") """The GitHub personal access token of the user importing to the target repository. """ client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
CreateMigrationSourceInput
python
walkccc__LeetCode
solutions/238. Product of Array Except Self/238-2.py
{ "start": 0, "end": 367 }
class ____: def productExceptSelf(self, nums: list[int]) -> list[int]: n = len(nums) ans = [1] * n # Use ans as the prefix product array. for i in range(1, n): ans[i] = ans[i - 1] * nums[i - 1] suffix = 1 # suffix product for i, num in reversed(list(enumerate(nums))): ans[i] *= suffix suffix *= num return ans
Solution
python
dagster-io__dagster
python_modules/libraries/dagster-fivetran/dagster_fivetran/components/workspace_component/component.py
{ "start": 9952, "end": 10535 }
class ____( create_component_translator_cls(FivetranAccountComponent, DagsterFivetranTranslator), ComponentTranslator[FivetranAccountComponent], ): def __init__(self, component: "FivetranAccountComponent"): self._component = component def get_asset_spec(self, props: FivetranConnectorTableProps) -> dg.AssetSpec: base_asset_spec = super().get_asset_spec(props) if self.component.translation is None: return base_asset_spec else: return self.component.translation(base_asset_spec, props)
FivetranComponentTranslator
python
jazzband__django-polymorphic
src/polymorphic/query.py
{ "start": 2701, "end": 22420 }
class ____(QuerySet): """ QuerySet for PolymorphicModel Contains the core functionality for PolymorphicModel Usually not explicitly needed, except if a custom queryset class is to be used. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._iterable_class = PolymorphicModelIterable self.polymorphic_disabled = False # A parallel structure to django.db.models.query.Query.deferred_loading, # which we maintain with the untranslated field names passed to # .defer() and .only() in order to be able to retranslate them when # retrieving the real instance (so that the deferred fields apply # to that queryset as well). self.polymorphic_deferred_loading = (set(), True) def _clone(self, *args, **kwargs): # Django's _clone only copies its own variables, so we need to copy ours here new = super()._clone(*args, **kwargs) new.polymorphic_disabled = self.polymorphic_disabled new.polymorphic_deferred_loading = ( copy.copy(self.polymorphic_deferred_loading[0]), self.polymorphic_deferred_loading[1], ) return new def as_manager(cls): from .managers import PolymorphicManager manager = PolymorphicManager.from_queryset(cls)() manager._built_with_as_manager = True return manager as_manager.queryset_only = True as_manager = classmethod(as_manager) def bulk_create(self, objs, batch_size=None, ignore_conflicts=False): objs = list(objs) for obj in objs: obj.pre_save_polymorphic() return super().bulk_create(objs, batch_size, ignore_conflicts=ignore_conflicts) def non_polymorphic(self): """switch off polymorphic behaviour for this query. When the queryset is evaluated, only objects of the type of the base class used for this query are returned.""" qs = self._clone() qs.polymorphic_disabled = True if issubclass(qs._iterable_class, PolymorphicModelIterable): qs._iterable_class = ModelIterable return qs def instance_of(self, *args): """Filter the queryset to only include the classes in args (and their subclasses).""" # Implementation in _translate_polymorphic_filter_defnition. return self.filter(instance_of=args) def not_instance_of(self, *args): """Filter the queryset to exclude the classes in args (and their subclasses).""" # Implementation in _translate_polymorphic_filter_defnition.""" return self.filter(not_instance_of=args) def _filter_or_exclude(self, negate, args, kwargs): # We override this internal Django function as it is used for all filter member functions. q_objects = translate_polymorphic_filter_definitions_in_args( queryset_model=self.model, args=args, using=self.db ) # filter_field='data' additional_args = translate_polymorphic_filter_definitions_in_kwargs( queryset_model=self.model, kwargs=kwargs, using=self.db ) args = list(q_objects) + additional_args return super()._filter_or_exclude(negate=negate, args=args, kwargs=kwargs) def order_by(self, *field_names): """translate the field paths in the args, then call vanilla order_by.""" field_names = [ translate_polymorphic_field_path(self.model, a) if isinstance(a, str) else a # allow expressions to pass unchanged for a in field_names ] return super().order_by(*field_names) def defer(self, *fields): """ Translate the field paths in the args, then call vanilla defer. Also retain a copy of the original fields passed, which we'll need when we're retrieving the real instance (since we'll need to translate them again, as the model will have changed). """ new_fields = [translate_polymorphic_field_path(self.model, a) for a in fields] clone = super().defer(*new_fields) clone._polymorphic_add_deferred_loading(fields) return clone def only(self, *fields): """ Translate the field paths in the args, then call vanilla only. Also retain a copy of the original fields passed, which we'll need when we're retrieving the real instance (since we'll need to translate them again, as the model will have changed). """ new_fields = [translate_polymorphic_field_path(self.model, a) for a in fields] clone = super().only(*new_fields) clone._polymorphic_add_immediate_loading(fields) return clone def _polymorphic_add_deferred_loading(self, field_names): """ Follows the logic of django.db.models.query.Query.add_deferred_loading(), but for the non-translated field names that were passed to self.defer(). """ existing, defer = self.polymorphic_deferred_loading if defer: # Add to existing deferred names. self.polymorphic_deferred_loading = existing.union(field_names), True else: # Remove names from the set of any existing "immediate load" names. self.polymorphic_deferred_loading = existing.difference(field_names), False def _polymorphic_add_immediate_loading(self, field_names): """ Follows the logic of django.db.models.query.Query.add_immediate_loading(), but for the non-translated field names that were passed to self.only() """ existing, defer = self.polymorphic_deferred_loading field_names = set(field_names) if "pk" in field_names: field_names.remove("pk") field_names.add(self.model._meta.pk.name) if defer: # Remove any existing deferred names from the current set before # setting the new names. self.polymorphic_deferred_loading = field_names.difference(existing), False else: # Replace any existing "immediate load" field names. self.polymorphic_deferred_loading = field_names, False def _process_aggregate_args(self, args, kwargs): """for aggregate and annotate kwargs: allow ModelX___field syntax for kwargs, forbid it for args. Modifies kwargs if needed (these are Aggregate objects, we translate the lookup member variable) """ ___lookup_assert_msg = "PolymorphicModel: annotate()/aggregate(): ___ model lookup supported for keyword arguments only" def patch_lookup(a): # The field on which the aggregate operates is # stored inside a complex query expression. if isinstance(a, Q): translate_polymorphic_Q_object(self.model, a) elif isinstance(a, FilteredRelation): patch_lookup(a.condition) elif hasattr(a, "get_source_expressions"): for source_expression in a.get_source_expressions(): if source_expression is not None: patch_lookup(source_expression) else: a.name = translate_polymorphic_field_path(self.model, a.name) def test___lookup(a): """*args might be complex expressions too in django 1.8 so the testing for a '___' is rather complex on this one""" if isinstance(a, Q): def tree_node_test___lookup(my_model, node): "process all children of this Q node" for i in range(len(node.children)): child = node.children[i] if type(child) is tuple: # this Q object child is a tuple => a kwarg like Q( instance_of=ModelB ) assert "___" not in child[0], ___lookup_assert_msg else: # this Q object child is another Q object, recursively process this as well tree_node_test___lookup(my_model, child) tree_node_test___lookup(self.model, a) elif hasattr(a, "get_source_expressions"): for source_expression in a.get_source_expressions(): if source_expression is not None: test___lookup(source_expression) else: assert "___" not in a.name, ___lookup_assert_msg for a in args: test___lookup(a) for a in kwargs.values(): patch_lookup(a) def annotate(self, *args, **kwargs): """translate the polymorphic field paths in the kwargs, then call vanilla annotate. _get_real_instances will do the rest of the job after executing the query.""" self._process_aggregate_args(args, kwargs) return super().annotate(*args, **kwargs) def aggregate(self, *args, **kwargs): """translate the polymorphic field paths in the kwargs, then call vanilla aggregate. We need no polymorphic object retrieval for aggregate => switch it off.""" self._process_aggregate_args(args, kwargs) qs = self.non_polymorphic() return super(PolymorphicQuerySet, qs).aggregate(*args, **kwargs) # Starting with Django 1.9, the copy returned by 'qs.values(...)' has the # same class as 'qs', so our polymorphic modifications would apply. # We want to leave values queries untouched, so we set 'polymorphic_disabled'. def _values(self, *args, **kwargs): clone = super()._values(*args, **kwargs) clone.polymorphic_disabled = True return clone # Since django_polymorphic 'V1.0 beta2', extra() always returns polymorphic results. # The resulting objects are required to have a unique primary key within the result set # (otherwise an error is thrown). # The "polymorphic" keyword argument is not supported anymore. # def extra(self, *args, **kwargs): def _get_real_instances(self, base_result_objects): """ Polymorphic object loader Does the same as: return [ o.get_real_instance() for o in base_result_objects ] but more efficiently. The list base_result_objects contains the objects from the executed base class query. The class of all of them is self.model (our base model). Some, many or all of these objects were not created and stored as class self.model, but as a class derived from self.model. We want to re-fetch these objects from the db as their original class so we can return them just as they were created/saved. We identify these objects by looking at o.polymorphic_ctype, which specifies the real class of these objects (the class at the time they were saved). First, we sort the result objects in base_result_objects for their subclass (from o.polymorphic_ctype), and then we execute one db query per subclass of objects. Here, we handle any annotations from annotate(). Finally we re-sort the resulting objects into the correct order and return them as a list. """ resultlist = [] # polymorphic list of result-objects # dict contains one entry per unique model type occurring in result, # in the format idlist_per_model[modelclass]=[list-of-object-ids] idlist_per_model = defaultdict(list) indexlist_per_model = defaultdict(list) # django's automatic ".pk" field does not always work correctly for # custom fields in derived objects (unclear yet who to put the blame on). # We get different type(o.pk) in this case. # We work around this by using the real name of the field directly # for accessing the primary key of the the derived objects. # We might assume that self.model._meta.pk.name gives us the name of the primary key field, # but it doesn't. Therefore we use polymorphic_primary_key_name, which we set up in base.py. pk_name = self.model.polymorphic_primary_key_name # - sort base_result_object ids into idlist_per_model lists, depending on their real class; # - store objects that already have the correct class into "results" content_type_manager = ContentType.objects.db_manager(self.db) self_model_class_id = content_type_manager.get_for_model( self.model, for_concrete_model=False ).pk self_concrete_model_class_id = content_type_manager.get_for_model( self.model, for_concrete_model=True ).pk for i, base_object in enumerate(base_result_objects): if base_object.polymorphic_ctype_id == self_model_class_id: # Real class is exactly the same as base class, go straight to results resultlist.append(base_object) else: real_concrete_class = base_object.get_real_instance_class() real_concrete_class_id = base_object.get_real_concrete_instance_class_id() if real_concrete_class_id is None: # Dealing with a stale content type continue elif real_concrete_class_id == self_concrete_model_class_id: # Real and base classes share the same concrete ancestor, # upcast it and put it in the results resultlist.append(transmogrify(real_concrete_class, base_object)) else: # This model has a concrete derived class, track it for bulk retrieval. real_concrete_class = content_type_manager.get_for_id( real_concrete_class_id ).model_class() idlist_per_model[real_concrete_class].append(getattr(base_object, pk_name)) indexlist_per_model[real_concrete_class].append((i, len(resultlist))) resultlist.append(None) # For each model in "idlist_per_model" request its objects (the real model) # from the db and store them in results[]. # Then we copy the annotate fields from the base objects to the real objects. # Then we copy the extra() select fields from the base objects to the real objects. # TODO: defer(), only(): support for these would be around here for real_concrete_class, idlist in idlist_per_model.items(): indices = indexlist_per_model[real_concrete_class] real_objects = real_concrete_class._base_objects.db_manager(self.db).filter( **{(f"{pk_name}__in"): idlist} ) # copy select related configuration to new qs real_objects.query.select_related = self.query.select_related # Copy deferred fields configuration to the new queryset deferred_loading_fields = [] existing_fields = self.polymorphic_deferred_loading[0] for field in existing_fields: try: translated_field_name = translate_polymorphic_field_path( real_concrete_class, field ) except AssertionError: if "___" in field: # The originally passed argument to .defer() or .only() # was in the form Model2B___field2, where Model2B is # now a superclass of real_concrete_class. Thus it's # sufficient to just use the field name. translated_field_name = field.rpartition("___")[-1] # Check if the field does exist. # Ignore deferred fields that don't exist in this subclass type. try: real_concrete_class._meta.get_field(translated_field_name) except FieldDoesNotExist: continue else: raise deferred_loading_fields.append(translated_field_name) real_objects.query.deferred_loading = ( set(deferred_loading_fields), self.query.deferred_loading[1], ) real_objects_dict = { getattr(real_object, pk_name): real_object for real_object in real_objects } for i, j in indices: base_object = base_result_objects[i] o_pk = getattr(base_object, pk_name) real_object = real_objects_dict.get(o_pk) if real_object is None: continue # need shallow copy to avoid duplication in caches (see PR #353) real_object = copy.copy(real_object) real_class = real_object.get_real_instance_class() # If the real class is a proxy, upcast it if real_class != real_concrete_class: real_object = transmogrify(real_class, real_object) if self.query.annotations: for anno_field_name in self.query.annotations.keys(): attr = getattr(base_object, anno_field_name) setattr(real_object, anno_field_name, attr) if self.query.extra_select: for select_field_name in self.query.extra_select.keys(): attr = getattr(base_object, select_field_name) setattr(real_object, select_field_name, attr) resultlist[j] = real_object resultlist = [i for i in resultlist if i] # set polymorphic_annotate_names in all objects (currently just used for debugging/printing) if self.query.annotations: # get annotate field list annotate_names = list(self.query.annotations.keys()) for real_object in resultlist: real_object.polymorphic_annotate_names = annotate_names # set polymorphic_extra_select_names in all objects (currently just used for debugging/printing) if self.query.extra_select: # get extra select field list extra_select_names = list(self.query.extra_select.keys()) for real_object in resultlist: real_object.polymorphic_extra_select_names = extra_select_names return resultlist def __repr__(self, *args, **kwargs): if self.model.polymorphic_query_multiline_output: result = ",\n ".join(repr(o) for o in self.all()) return f"[ {result} ]" else: return super().__repr__(*args, **kwargs) class _p_list_class(list): def __repr__(self, *args, **kwargs): result = ",\n ".join(repr(o) for o in self) return f"[ {result} ]" def get_real_instances(self, base_result_objects=None): """ Cast a list of objects to their actual classes. This does roughly the same as:: return [ o.get_real_instance() for o in base_result_objects ] but more efficiently. :rtype: PolymorphicQuerySet """ "same as _get_real_instances, but make sure that __repr__ for ShowField... creates correct output" if base_result_objects is None: base_result_objects = self olist = self._get_real_instances(base_result_objects) if not self.model.polymorphic_query_multiline_output: return olist clist = PolymorphicQuerySet._p_list_class(olist) return clist
PolymorphicQuerySet
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/exc.py
{ "start": 16639, "end": 24183 }
class ____(StatementError): """Raised when the execution of a database operation fails. Wraps exceptions raised by the DB-API underlying the database operation. Driver-specific implementations of the standard DB-API exception types are wrapped by matching sub-types of SQLAlchemy's :class:`DBAPIError` when possible. DB-API's ``Error`` type maps to :class:`DBAPIError` in SQLAlchemy, otherwise the names are identical. Note that there is no guarantee that different DB-API implementations will raise the same exception type for any given error condition. :class:`DBAPIError` features :attr:`~.StatementError.statement` and :attr:`~.StatementError.params` attributes which supply context regarding the specifics of the statement which had an issue, for the typical case when the error was raised within the context of emitting a SQL statement. The wrapped exception object is available in the :attr:`~.StatementError.orig` attribute. Its type and properties are DB-API implementation specific. """ code = "dbapi" orig: Optional[Exception] @overload @classmethod def instance( cls, statement: Optional[str], params: Optional[_AnyExecuteParams], orig: Exception, dbapi_base_err: Type[Exception], hide_parameters: bool = False, connection_invalidated: bool = False, dialect: Optional[Dialect] = None, ismulti: Optional[bool] = None, ) -> StatementError: ... @overload @classmethod def instance( cls, statement: Optional[str], params: Optional[_AnyExecuteParams], orig: DontWrapMixin, dbapi_base_err: Type[Exception], hide_parameters: bool = False, connection_invalidated: bool = False, dialect: Optional[Dialect] = None, ismulti: Optional[bool] = None, ) -> DontWrapMixin: ... @overload @classmethod def instance( cls, statement: Optional[str], params: Optional[_AnyExecuteParams], orig: BaseException, dbapi_base_err: Type[Exception], hide_parameters: bool = False, connection_invalidated: bool = False, dialect: Optional[Dialect] = None, ismulti: Optional[bool] = None, ) -> BaseException: ... @classmethod def instance( cls, statement: Optional[str], params: Optional[_AnyExecuteParams], orig: Union[BaseException, DontWrapMixin], dbapi_base_err: Type[Exception], hide_parameters: bool = False, connection_invalidated: bool = False, dialect: Optional[Dialect] = None, ismulti: Optional[bool] = None, ) -> Union[BaseException, DontWrapMixin]: # Don't ever wrap these, just return them directly as if # DBAPIError didn't exist. if ( isinstance(orig, BaseException) and not isinstance(orig, Exception) ) or isinstance(orig, DontWrapMixin): return orig if orig is not None: # not a DBAPI error, statement is present. # raise a StatementError if isinstance(orig, SQLAlchemyError) and statement: return StatementError( "(%s.%s) %s" % ( orig.__class__.__module__, orig.__class__.__name__, orig.args[0], ), statement, params, orig, hide_parameters=hide_parameters, code=orig.code, ismulti=ismulti, ) elif not isinstance(orig, dbapi_base_err) and statement: return StatementError( "(%s.%s) %s" % ( orig.__class__.__module__, orig.__class__.__name__, orig, ), statement, params, orig, hide_parameters=hide_parameters, ismulti=ismulti, ) glob = globals() for super_ in orig.__class__.__mro__: name = super_.__name__ if dialect: name = dialect.dbapi_exception_translation_map.get( name, name ) if name in glob and issubclass(glob[name], DBAPIError): cls = glob[name] break return cls( statement, params, orig, connection_invalidated=connection_invalidated, hide_parameters=hide_parameters, code=cls.code, ismulti=ismulti, ) def __reduce__(self) -> Union[str, Tuple[Any, ...]]: return ( self.__class__, ( self.statement, self.params, self.orig, self.hide_parameters, self.connection_invalidated, self.__dict__.get("code"), self.ismulti, ), {"detail": self.detail}, ) def __init__( self, statement: Optional[str], params: Optional[_AnyExecuteParams], orig: BaseException, hide_parameters: bool = False, connection_invalidated: bool = False, code: Optional[str] = None, ismulti: Optional[bool] = None, ): try: text = str(orig) except Exception as e: text = "Error in str() of DB-API-generated exception: " + str(e) StatementError.__init__( self, "(%s.%s) %s" % (orig.__class__.__module__, orig.__class__.__name__, text), statement, params, orig, hide_parameters, code=code, ismulti=ismulti, ) self.connection_invalidated = connection_invalidated @property def driver_exception(self) -> Exception: """The exception object originating from the driver (DBAPI) outside of SQLAlchemy. In the case of some asyncio dialects, special steps are taken to resolve the exception to what the third party driver has raised, even for SQLAlchemy dialects that include an "emulated" DBAPI exception hierarchy. For non-asyncio dialects, this attribute will be the same attribute as the :attr:`.StatementError.orig` attribute. For an asyncio dialect provided by SQLAlchemy, depending on if the dialect provides an "emulated" exception hierarchy or if the underlying DBAPI raises DBAPI-style exceptions, it will refer to either the :attr:`.EmulatedDBAPIException.driver_exception` attribute on the :class:`.EmulatedDBAPIException` that's thrown (such as when using asyncpg), or to the actual exception object thrown by the third party driver. .. versionadded:: 2.1 """ if self.orig is None: raise ValueError( "No original exception is present. Was this " "DBAPIError constructed without a driver error?" ) if isinstance(self.orig, EmulatedDBAPIException): return self.orig.driver_exception else: return self.orig
DBAPIError
python
pyqtgraph__pyqtgraph
pyqtgraph/parametertree/parameterTypes/basetypes.py
{ "start": 10482, "end": 14497 }
class ____(ParameterItem): """ Group parameters are used mainly as a generic parent item that holds (and groups!) a set of child parameters. It also provides a simple mechanism for displaying a button or combo that can be used to add new parameters to the group. """ def __init__(self, param, depth): ParameterItem.__init__(self, param, depth) self._initialFontPointSize = self.font(0).pointSize() self.updateDepth(depth) self.addItem = None if 'addText' in param.opts: addText = param.opts['addText'] if 'addList' in param.opts: self.addWidget = QtWidgets.QComboBox() self.addWidget.setSizeAdjustPolicy(QtWidgets.QComboBox.SizeAdjustPolicy.AdjustToContents) self.updateAddList() self.addWidget.currentIndexChanged.connect(self.addChanged) else: self.addWidget = QtWidgets.QPushButton(addText) self.addWidget.clicked.connect(self.addClicked) w = QtWidgets.QWidget() l = QtWidgets.QHBoxLayout() l.setContentsMargins(0, 0, 0, 0) w.setLayout(l) l.addWidget(self.addWidget) l.addStretch() self.addWidgetBox = w self.addItem = QtWidgets.QTreeWidgetItem([]) self.addItem.setFlags(QtCore.Qt.ItemFlag.ItemIsEnabled) self.addItem.depth = self.depth + 1 ParameterItem.addChild(self, self.addItem) self.addItem.setSizeHint(0, self.addWidgetBox.sizeHint()) self.optsChanged(self.param, self.param.opts) def pointSize(self): return self._initialFontPointSize def updateDepth(self, depth): """ Change set the item font to bold and increase the font size on outermost groups. """ for c in [0, 1]: font = self.font(c) font.setBold(True) if depth == 0: font.setPointSize(self.pointSize() + 1) self.setFont(c, font) self.titleChanged() # sets the size hint for column 0 which is based on the new font def addClicked(self): """Called when "add new" button is clicked The parameter MUST have an 'addNew' method defined. """ self.param.addNew() def addChanged(self): """Called when "add new" combo is changed The parameter MUST have an 'addNew' method defined. """ if self.addWidget.currentIndex() == 0: return typ = self.addWidget.currentText() self.param.addNew(typ) self.addWidget.setCurrentIndex(0) def treeWidgetChanged(self): ParameterItem.treeWidgetChanged(self) tw = self.treeWidget() if tw is None: return self.setFirstColumnSpanned(True) if self.addItem is not None: tw.setItemWidget(self.addItem, 0, self.addWidgetBox) self.addItem.setFirstColumnSpanned(True) def addChild(self, child): ## make sure added childs are actually inserted before add btn if self.addItem is not None: ParameterItem.insertChild(self, self.childCount() - 1, child) else: ParameterItem.addChild(self, child) def optsChanged(self, param, opts): ParameterItem.optsChanged(self, param, opts) if 'addList' in opts: self.updateAddList() if hasattr(self, 'addWidget'): if 'enabled' in opts: self.addWidget.setEnabled(opts['enabled']) if 'tip' in opts: self.addWidget.setToolTip(opts['tip']) def updateAddList(self): self.addWidget.blockSignals(True) try: self.addWidget.clear() self.addWidget.addItem(self.param.opts['addText']) for t in self.param.opts['addList']: self.addWidget.addItem(t) finally: self.addWidget.blockSignals(False)
GroupParameterItem
python
apache__airflow
providers/google/tests/unit/google/cloud/hooks/test_dataproc_metastore.py
{ "start": 14135, "end": 24676 }
class ____: def setup_method(self): with mock.patch( BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_no_default_project_id ): self.hook = DataprocMetastoreHook(gcp_conn_id=TEST_GCP_CONN_ID) @mock.patch(DATAPROC_METASTORE_STRING.format("DataprocMetastoreHook.get_dataproc_metastore_client")) def test_create_backup(self, mock_client) -> None: self.hook.create_backup( project_id=TEST_PROJECT_ID, region=TEST_REGION, service_id=TEST_SERVICE_ID, backup=TEST_BACKUP, backup_id=TEST_BACKUP_ID, ) mock_client.assert_called_once() mock_client.return_value.create_backup.assert_called_once_with( request=dict( parent=TEST_PARENT_SERVICES.format(TEST_PROJECT_ID, TEST_REGION, TEST_SERVICE_ID), backup=TEST_BACKUP, backup_id=TEST_BACKUP_ID, request_id=None, ), metadata=(), retry=DEFAULT, timeout=None, ) @mock.patch(DATAPROC_METASTORE_STRING.format("DataprocMetastoreHook.get_dataproc_metastore_client")) def test_create_metadata_import(self, mock_client) -> None: self.hook.create_metadata_import( project_id=TEST_PROJECT_ID, region=TEST_REGION, service_id=TEST_SERVICE_ID, metadata_import=TEST_METADATA_IMPORT, metadata_import_id=TEST_METADATA_IMPORT_ID, ) mock_client.assert_called_once() mock_client.return_value.create_metadata_import.assert_called_once_with( request=dict( parent=TEST_PARENT_SERVICES.format(TEST_PROJECT_ID, TEST_REGION, TEST_SERVICE_ID), metadata_import=TEST_METADATA_IMPORT, metadata_import_id=TEST_METADATA_IMPORT_ID, request_id=None, ), metadata=(), retry=DEFAULT, timeout=None, ) @mock.patch(DATAPROC_METASTORE_STRING.format("DataprocMetastoreHook.get_dataproc_metastore_client")) def test_create_service(self, mock_client) -> None: self.hook.create_service( region=TEST_REGION, project_id=TEST_PROJECT_ID, service=TEST_SERVICE, service_id=TEST_SERVICE_ID, ) mock_client.assert_called_once() mock_client.return_value.create_service.assert_called_once_with( request=dict( parent=TEST_PARENT.format(TEST_PROJECT_ID, TEST_REGION), service_id=TEST_SERVICE_ID, service=TEST_SERVICE, request_id=None, ), metadata=(), retry=DEFAULT, timeout=None, ) @mock.patch(DATAPROC_METASTORE_STRING.format("DataprocMetastoreHook.get_dataproc_metastore_client")) def test_delete_backup(self, mock_client) -> None: self.hook.delete_backup( project_id=TEST_PROJECT_ID, region=TEST_REGION, service_id=TEST_SERVICE_ID, backup_id=TEST_BACKUP_ID, ) mock_client.assert_called_once() mock_client.return_value.delete_backup.assert_called_once_with( request=dict( name=TEST_NAME_BACKUPS.format(TEST_PROJECT_ID, TEST_REGION, TEST_SERVICE_ID, TEST_BACKUP_ID), request_id=None, ), metadata=(), retry=DEFAULT, timeout=None, ) @mock.patch(DATAPROC_METASTORE_STRING.format("DataprocMetastoreHook.get_dataproc_metastore_client")) def test_delete_service(self, mock_client) -> None: self.hook.delete_service( project_id=TEST_PROJECT_ID, region=TEST_REGION, service_id=TEST_SERVICE_ID, ) mock_client.assert_called_once() mock_client.return_value.delete_service.assert_called_once_with( request=dict( name=TEST_PARENT_SERVICES.format(TEST_PROJECT_ID, TEST_REGION, TEST_SERVICE_ID), request_id=None, ), retry=DEFAULT, timeout=None, metadata=(), ) @mock.patch(DATAPROC_METASTORE_STRING.format("DataprocMetastoreHook.get_dataproc_metastore_client")) def test_export_metadata(self, mock_client) -> None: self.hook.export_metadata( destination_gcs_folder=TEST_DESTINATION_GCS_FOLDER, project_id=TEST_PROJECT_ID, region=TEST_REGION, service_id=TEST_SERVICE_ID, ) mock_client.assert_called_once() mock_client.return_value.export_metadata.assert_called_once_with( request=dict( destination_gcs_folder=TEST_DESTINATION_GCS_FOLDER, service=TEST_PARENT_SERVICES.format(TEST_PROJECT_ID, TEST_REGION, TEST_SERVICE_ID), request_id=None, database_dump_type=None, ), retry=DEFAULT, timeout=None, metadata=(), ) @mock.patch(DATAPROC_METASTORE_STRING.format("DataprocMetastoreHook.get_dataproc_metastore_client")) def test_get_service(self, mock_client) -> None: self.hook.get_service( project_id=TEST_PROJECT_ID, region=TEST_REGION, service_id=TEST_SERVICE_ID, ) mock_client.assert_called_once() mock_client.return_value.get_service.assert_called_once_with( request=dict( name=TEST_PARENT_SERVICES.format(TEST_PROJECT_ID, TEST_REGION, TEST_SERVICE_ID), ), metadata=(), retry=DEFAULT, timeout=None, ) @mock.patch(DATAPROC_METASTORE_STRING.format("DataprocMetastoreHook.get_dataproc_metastore_client")) def test_list_backups(self, mock_client) -> None: self.hook.list_backups( project_id=TEST_PROJECT_ID, region=TEST_REGION, service_id=TEST_SERVICE_ID, ) mock_client.assert_called_once() mock_client.return_value.list_backups.assert_called_once_with( request=dict( parent=TEST_PARENT_BACKUPS.format(TEST_PROJECT_ID, TEST_REGION, TEST_SERVICE_ID), page_size=None, page_token=None, filter=None, order_by=None, ), metadata=(), retry=DEFAULT, timeout=None, ) @mock.patch(DATAPROC_METASTORE_STRING.format("DataprocMetastoreHook.get_dataproc_metastore_client")) def test_restore_service(self, mock_client) -> None: self.hook.restore_service( project_id=TEST_PROJECT_ID, region=TEST_REGION, service_id=TEST_SERVICE_ID, backup_project_id=TEST_PROJECT_ID, backup_region=TEST_REGION, backup_service_id=TEST_SERVICE_ID, backup_id=TEST_BACKUP_ID, ) mock_client.assert_called_once() mock_client.return_value.restore_service.assert_called_once_with( request=dict( service=TEST_PARENT_SERVICES.format(TEST_PROJECT_ID, TEST_REGION, TEST_SERVICE_ID), backup=TEST_NAME_BACKUPS.format( TEST_PROJECT_ID, TEST_REGION, TEST_SERVICE_ID, TEST_BACKUP_ID ), restore_type=None, request_id=None, ), metadata=(), retry=DEFAULT, timeout=None, ) @mock.patch(DATAPROC_METASTORE_STRING.format("DataprocMetastoreHook.get_dataproc_metastore_client")) def test_update_service(self, mock_client) -> None: self.hook.update_service( project_id=TEST_PROJECT_ID, region=TEST_REGION, service_id=TEST_SERVICE_ID, service=TEST_SERVICE_TO_UPDATE, update_mask=TEST_UPDATE_MASK, ) mock_client.assert_called_once() mock_client.return_value.update_service.assert_called_once_with( request=dict( service=TEST_SERVICE_TO_UPDATE, update_mask=TEST_UPDATE_MASK, request_id=None, ), retry=DEFAULT, timeout=None, metadata=(), ) @pytest.mark.parametrize( ("partitions_input", "partitions"), [ ([TEST_PARTITION_NAME], f"'{TEST_PARTITION_NAME}'"), ([TEST_SUBPARTITION_NAME], f"'{TEST_SUBPARTITION_NAME}'"), ( [TEST_PARTITION_NAME, TEST_SUBPARTITION_NAME], f"'{TEST_PARTITION_NAME}', '{TEST_SUBPARTITION_NAME}'", ), ([TEST_PARTITION_NAME, TEST_PARTITION_NAME], f"'{TEST_PARTITION_NAME}'"), ], ) @mock.patch( DATAPROC_METASTORE_STRING.format("DataprocMetastoreHook.get_dataproc_metastore_client_v1beta") ) def test_list_hive_partitions(self, mock_client, partitions_input, partitions) -> None: self.hook.list_hive_partitions( project_id=TEST_PROJECT_ID, service_id=TEST_SERVICE_ID, region=TEST_REGION, table=TEST_TABLE_ID, partition_names=partitions_input, ) mock_client.assert_called_once() mock_client.return_value.query_metadata.assert_called_once_with( request=dict( service=TEST_PARENT_SERVICES.format(TEST_PROJECT_ID, TEST_REGION, TEST_SERVICE_ID), query=TEST_PARTITIONS_QUERY.format(TEST_TABLE_ID, partitions), ), ) @pytest.mark.parametrize("partitions", [[], None]) @mock.patch( DATAPROC_METASTORE_STRING.format("DataprocMetastoreHook.get_dataproc_metastore_client_v1beta") ) def test_list_hive_partitions_empty_list(self, mock_client, partitions) -> None: self.hook.list_hive_partitions( project_id=TEST_PROJECT_ID, service_id=TEST_SERVICE_ID, region=TEST_REGION, table=TEST_TABLE_ID, partition_names=partitions, ) mock_client.assert_called_once() mock_client.return_value.query_metadata.assert_called_once_with( request=dict( service=TEST_PARENT_SERVICES.format(TEST_PROJECT_ID, TEST_REGION, TEST_SERVICE_ID), query=TEST_PARTITIONS_QUERY_ALL.format(TEST_TABLE_ID), ), )
TestDataprocMetastoreWithoutDefaultProjectIdHook
python
pytorch__pytorch
torch/distributed/elastic/multiprocessing/api.py
{ "start": 32913, "end": 38495 }
class ____(PContext): """``PContext`` holding worker processes invoked as a binary.""" def __init__( self, name: str, entrypoint: str, args: dict[int, tuple], envs: dict[int, dict[str, str]], logs_specs: LogsSpecs, log_line_prefixes: dict[int, str] | None = None, numa_options: NumaOptions | None = None, duplicate_stdout_filters: list[str] | None = None, duplicate_stderr_filters: list[str] | None = None, ): super().__init__( name, entrypoint, args, envs, logs_specs, log_line_prefixes, duplicate_stdout_filters, duplicate_stderr_filters, ) # state vector; _vdone[local_rank] -> is local_rank finished or not self._running_local_ranks: set[int] = set(range(self.nprocs)) self._failures: dict[int, ProcessFailure] = {} self.subprocess_handlers: dict[int, SubprocessHandler] = {} self._numa_options: NumaOptions | None = numa_options def _start(self): if self.subprocess_handlers: raise ValueError( "The subprocess handlers already initialized. Most likely the start method got called twice." ) self.subprocess_handlers = { local_rank: get_subprocess_handler( entrypoint=self.entrypoint, # type: ignore[arg-type] # entrypoint is always a str args=self.args[local_rank], env=self.envs[local_rank], stdout=self.stdouts[local_rank], stderr=self.stderrs[local_rank], local_rank_id=local_rank, numa_options=self._numa_options, ) for local_rank in range(self.nprocs) } def _capture_process_failures(self, done_local_ranks: set[int]): for local_rank in self._running_local_ranks: handler = self.subprocess_handlers[local_rank] exitcode = handler.proc.poll() if exitcode is not None: done_local_ranks.add(local_rank) if exitcode != 0: # failed or signaled self._failures[local_rank] = ProcessFailure( local_rank=local_rank, pid=handler.proc.pid, exitcode=exitcode, error_file=self.error_files[local_rank], ) # else: --> succeeded; nothing to do def _poll(self) -> RunProcsResult | None: done_local_ranks: set[int] = set() self._capture_process_failures(done_local_ranks) self._running_local_ranks.difference_update(done_local_ranks) # if ALL procs are finished or ANY have failed if not self._running_local_ranks or self._failures: self.close() # terminate all running procs self._capture_process_failures( done_local_ranks ) # log sigterms and sigkill exit codes in the self._failures for bookkeeping purposes result = RunProcsResult( failures=self._failures, stdouts=self.stdouts, stderrs=self.stderrs, ) if result.is_failed(): first_failure = min(result.failures.values(), key=lambda f: f.timestamp) logger.error( "failed (exitcode: %s) local_rank: %s (pid: %s) of binary: %s", first_failure.exitcode, first_failure.local_rank, first_failure.pid, self.entrypoint, ) else: # Populate return with dummy values. This provides consistency with MultiprocessingHandler result.return_values = dict.fromkeys(range(self.nprocs)) return result else: # there are no failures and procs still running return None def pids(self) -> dict[int, int]: return { local_rank: sh.proc.pid for local_rank, sh in self.subprocess_handlers.items() } def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None: if not self.subprocess_handlers: return for handler in self.subprocess_handlers.values(): if handler.proc.poll() is None: logger.warning( "Sending process %s closing signal %s", handler.proc.pid, death_sig.name, ) handler.close(death_sig=death_sig) end = time.monotonic() + timeout for handler in self.subprocess_handlers.values(): time_to_wait = end - time.monotonic() if time_to_wait <= 0: break try: handler.proc.wait(time_to_wait) except subprocess.TimeoutExpired: # Ignore the timeout expired exception, since # the child process will be forcefully terminated via SIGKILL pass for handler in self.subprocess_handlers.values(): if handler.proc.poll() is None: logger.warning( "Unable to shutdown process %s via %s, forcefully exiting via %s", handler.proc.pid, death_sig, _get_kill_signal(), ) handler.close(death_sig=_get_kill_signal()) handler.proc.wait()
SubprocessContext
python
huggingface__transformers
tests/models/dinov3_convnext/test_modeling_dinov3_convnext.py
{ "start": 1394, "end": 5456 }
class ____: def __init__( self, parent, batch_size=13, image_size=32, num_channels=3, hidden_sizes=[10, 20, 30, 40], depths=[2, 2, 3, 2], is_training=False, use_labels=True, intermediate_size=37, hidden_act="gelu", num_labels=10, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_labels = num_labels self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return DINOv3ConvNextConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, is_decoder=False, initializer_range=self.initializer_range, num_labels=self.num_labels, ) def create_and_check_model(self, config, pixel_values, labels): model = DINOv3ConvNextModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, 1 + self.image_size // 32 * self.image_size // 32, self.hidden_sizes[-1], ), ) def create_and_check_backbone(self, config, pixel_values, labels): model = DINOv3ConvNextBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify hidden states self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) expected_size = self.image_size // (4 * (2 ** (len(config.depths) - 1))) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size] ) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) # verify backbone works with out_features=None config.out_features = None model = DINOv3ConvNextBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size] ) # verify channels self.parent.assertEqual(len(model.channels), 1) model = DINOv3ConvNextBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size] ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch
DINOv3ConvNextModelTester
python
pypa__pip
src/pip/_vendor/pygments/filters/__init__.py
{ "start": 38098, "end": 39322 }
class ____(Filter): """Gobbles source code lines (eats initial characters). This filter drops the first ``n`` characters off every line of code. This may be useful when the source code fed to the lexer is indented by a fixed amount of space that isn't desired in the output. Options accepted: `n` : int The number of characters to gobble. .. versionadded:: 1.2 """ def __init__(self, **options): Filter.__init__(self, **options) self.n = get_int_opt(options, 'n', 0) def gobble(self, value, left): if left < len(value): return value[left:], 0 else: return '', left - len(value) def filter(self, lexer, stream): n = self.n left = n # How many characters left to gobble. for ttype, value in stream: # Remove ``left`` tokens from first line, ``n`` from all others. parts = value.split('\n') (parts[0], left) = self.gobble(parts[0], left) for i in range(1, len(parts)): (parts[i], left) = self.gobble(parts[i], n) value = '\n'.join(parts) if value != '': yield ttype, value
GobbleFilter
python
readthedocs__readthedocs.org
readthedocs/api/v3/serializers.py
{ "start": 1976, "end": 2238 }
class ____(serializers.Serializer): def _absolute_url(self, path): scheme = "http" if settings.DEBUG else "https" domain = settings.PRODUCTION_DOMAIN return urllib.parse.urlunparse((scheme, domain, path, "", "", ""))
BaseLinksSerializer
python
bokeh__bokeh
src/bokeh/client/states.py
{ "start": 2518, "end": 2769 }
class ____(State): ''' The ``ClientConnection`` connected to a Bokeh server, and has received an ACK from it. ''' async def run(self, connection: ClientConnection) -> None: await connection._handle_messages()
CONNECTED_AFTER_ACK
python
bokeh__bokeh
src/bokeh/core/serialization.py
{ "start": 5178, "end": 5437 }
class ____: """ A mixin for making a type serializable. """ def to_serializable(self, serializer: Serializer) -> AnyRep: """ Converts this object to a serializable representation. """ raise NotImplementedError() ObjID = int
Serializable
python
HypothesisWorks__hypothesis
hypothesis-python/tests/nocover/test_stateful.py
{ "start": 2654, "end": 3009 }
class ____(RuleBasedStateMachine): nodes = Bundle("nodes") @rule(target=nodes, source=st.lists(nodes)) def bunch(self, source): return source @rule(source=nodes) def shallow(self, source): def depth(ls): return 0 if not ls else 1 + max(map(depth, ls)) assert depth(source) <= 5
RoseTreeStateMachine
python
numba__numba
numba/tests/test_array_reductions.py
{ "start": 36480, "end": 37977 }
class ____(MemoryLeakMixin, TestCase): # int64, size 0 zero_size = np.arange(0) def check_exception(self, pyfunc, msg): cfunc = jit(nopython=True)(pyfunc) # make sure NumPy raises consistently/no behaviour change with self.assertRaises(BaseException): pyfunc(self.zero_size) # check numba impl raises expected with self.assertRaises(ValueError) as e: cfunc(self.zero_size) self.assertIn(msg, str(e.exception)) @classmethod def install(cls): fn_to_msg = dict() empty_seq = "attempt to get {0} of an empty sequence" op_no_ident = ("zero-size array to reduction operation " "{0}") for x in [array_argmax, array_argmax_global, array_argmin, array_argmin_global]: fn_to_msg[x] = empty_seq for x in [array_max, array_max, array_min, array_min]: fn_to_msg[x] = op_no_ident name_template = "test_zero_size_array_{0}" for fn, msg in fn_to_msg.items(): test_name = name_template.format(fn.__name__) lmsg = msg.format(fn.__name__) lmsg = lmsg.replace('array_','').replace('_global','') def test_fn(self, func=fn, message=lmsg): self.check_exception(func, message) setattr(cls, test_name, test_fn) TestArrayReductionsExceptions.install() if __name__ == '__main__': unittest.main()
TestArrayReductionsExceptions
python
numba__numba
numba/tests/test_parallel_backend.py
{ "start": 23921, "end": 31573 }
class ____(ThreadLayerTestHelper): """ Checks Numba's behaviour in various situations involving GNU OpenMP and fork """ _DEBUG = False def test_check_threading_layer_is_gnu(self): runme = """if 1: from numba.np.ufunc import omppool assert omppool.openmp_vendor == 'GNU' """ cmdline = [sys.executable, '-c', runme] out, err = self.run_cmd(cmdline) def test_par_parent_os_fork_par_child(self): """ Whilst normally valid, this actually isn't for Numba invariant of OpenMP Checks SIGABRT is received. """ body = """if 1: X = np.arange(1000000.) Y = np.arange(1000000.) Z = busy_func(X, Y) pid = os.fork() if pid == 0: Z = busy_func(X, Y) else: os.wait() """ runme = self.template % body cmdline = [sys.executable, '-c', runme] try: out, err = self.run_cmd(cmdline) except AssertionError as e: self.assertIn("failed with code -6", str(e)) def test_par_parent_implicit_mp_fork_par_child(self): """ Implicit use of multiprocessing fork context. Does this: 1. Start with OpenMP 2. Fork to processes using OpenMP (this is invalid) 3. Joins fork 4. Check the exception pushed onto the queue that is a result of catching SIGTERM coming from the C++ aborting on illegal fork pattern for GNU OpenMP """ body = """if 1: mp = multiprocessing.get_context('fork') X = np.arange(1000000.) Y = np.arange(1000000.) q = mp.Queue() # Start OpenMP runtime on parent via parallel function Z = busy_func(X, Y, q) # fork() underneath with no exec, will abort proc = mp.Process(target = busy_func, args=(X, Y, q)) proc.start() err = q.get() assert "Caught SIGTERM" in str(err) """ runme = self.template % body cmdline = [sys.executable, '-c', runme] out, err = self.run_cmd(cmdline) if self._DEBUG: print(out, err) @linux_only def test_par_parent_explicit_mp_fork_par_child(self): """ Explicit use of multiprocessing fork context. Does this: 1. Start with OpenMP 2. Fork to processes using OpenMP (this is invalid) 3. Joins fork 4. Check the exception pushed onto the queue that is a result of catching SIGTERM coming from the C++ aborting on illegal fork pattern for GNU OpenMP """ body = """if 1: X = np.arange(1000000.) Y = np.arange(1000000.) ctx = multiprocessing.get_context('fork') q = ctx.Queue() # Start OpenMP runtime on parent via parallel function Z = busy_func(X, Y, q) # fork() underneath with no exec, will abort proc = ctx.Process(target = busy_func, args=(X, Y, q)) proc.start() proc.join() err = q.get() assert "Caught SIGTERM" in str(err) """ runme = self.template % body cmdline = [sys.executable, '-c', runme] out, err = self.run_cmd(cmdline) if self._DEBUG: print(out, err) def test_par_parent_mp_spawn_par_child_par_parent(self): """ Explicit use of multiprocessing spawn, this is safe. Does this: 1. Start with OpenMP 2. Spawn to processes using OpenMP 3. Join spawns 4. Run some more OpenMP """ body = """if 1: X = np.arange(1000000.) Y = np.arange(1000000.) ctx = multiprocessing.get_context('spawn') q = ctx.Queue() # Start OpenMP runtime and run on parent via parallel function Z = busy_func(X, Y, q) procs = [] for x in range(20): # start a lot to try and get overlap ## fork() + exec() to run some OpenMP on children proc = ctx.Process(target = busy_func, args=(X, Y, q)) procs.append(proc) sys.stdout.flush() sys.stderr.flush() proc.start() [p.join() for p in procs] try: q.get(False) except multiprocessing.queues.Empty: pass else: raise RuntimeError("Queue was not empty") # Run some more OpenMP on parent Z = busy_func(X, Y, q) """ runme = self.template % body cmdline = [sys.executable, '-c', runme] out, err = self.run_cmd(cmdline) if self._DEBUG: print(out, err) def test_serial_parent_implicit_mp_fork_par_child_then_par_parent(self): """ Implicit use of multiprocessing (will be fork, but cannot declare that in Py2.7 as there's no process launch context). Does this: 1. Start with no OpenMP 2. Fork to processes using OpenMP 3. Join forks 4. Run some OpenMP """ body = """if 1: X = np.arange(1000000.) Y = np.arange(1000000.) q = multiprocessing.Queue() # this is ok procs = [] for x in range(10): # fork() underneath with but no OpenMP in parent, this is ok proc = multiprocessing.Process(target = busy_func, args=(X, Y, q)) procs.append(proc) proc.start() [p.join() for p in procs] # and this is still ok as the OpenMP happened in forks Z = busy_func(X, Y, q) try: q.get(False) except multiprocessing.queues.Empty: pass else: raise RuntimeError("Queue was not empty") """ runme = self.template % body cmdline = [sys.executable, '-c', runme] out, err = self.run_cmd(cmdline) if self._DEBUG: print(out, err) @linux_only def test_serial_parent_explicit_mp_fork_par_child_then_par_parent(self): """ Explicit use of multiprocessing 'fork'. Does this: 1. Start with no OpenMP 2. Fork to processes using OpenMP 3. Join forks 4. Run some OpenMP """ body = """if 1: X = np.arange(1000000.) Y = np.arange(1000000.) ctx = multiprocessing.get_context('fork') q = ctx.Queue() # this is ok procs = [] for x in range(10): # fork() underneath with but no OpenMP in parent, this is ok proc = ctx.Process(target = busy_func, args=(X, Y, q)) procs.append(proc) proc.start() [p.join() for p in procs] # and this is still ok as the OpenMP happened in forks Z = busy_func(X, Y, q) try: q.get(False) except multiprocessing.queues.Empty: pass else: raise RuntimeError("Queue was not empty") """ runme = self.template % body cmdline = [sys.executable, '-c', runme] out, err = self.run_cmd(cmdline) if self._DEBUG: print(out, err) @skip_parfors_unsupported @skip_no_tbb
TestForkSafetyIssues
python
squidfunk__mkdocs-material
material/plugins/social/plugin.py
{ "start": 2687, "end": 46662 }
class ____(BasePlugin[SocialConfig]): supports_multiple_instances = True # Manifest manifest: dict[str, str] = {} # Initialize plugin def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Initialize incremental builds self.is_serve = False # Determine whether we're serving the site, and thus doing an incremental # build, and initialize two thread pools for card generation, because it's # split into two stages: rendering of layers and composition. We use two # thread pools, one for each stage, as we need to make sure that all layers # of a card are rendered before we compose the card itself. At the same time # we want to off-load as much as possible onto worker threads, as card # generation is a problem that can be perfectly solved in parallel. Thus, # we leverage the file system to cache the generated images, so we don't # re-generate the exact same images again and again, making successive # builds of large sites much faster. def on_startup(self, *, command, dirty): self.is_serve = command == "serve" # Initialize thread pool for cards self.card_pool = ThreadPoolExecutor(self.config.concurrency) self.card_pool_jobs: dict[str, Future] = {} # Initialize thread pool for card layers self.card_layer_pool = ThreadPoolExecutor(self.config.concurrency) self.card_layer_pool_jobs: dict[str, Future] = {} # Resolve and load manifest and initialize environment def on_config(self, config): if not self.config.enabled: return # Resolve cache directory (once) - this is necessary, so the cache is # always relative to the configuration file, and thus project, and not # relative to the current working directory, or it would not work with # the projects plugin. path = os.path.abspath(self.config.cache_dir) if path != self.config.cache_dir: self.config.cache_dir = os.path.join( os.path.dirname(config.config_file_path), os.path.normpath(self.config.cache_dir) ) # Ensure cache directory exists os.makedirs(self.config.cache_dir, exist_ok = True) # Initialize manifest self.manifest_file = os.path.join( self.config.cache_dir, "manifest.json" ) # Load manifest if it exists and the cache should be used if os.path.isfile(self.manifest_file) and self.config.cache: try: with open(self.manifest_file) as f: self.manifest = json.load(f) except: pass # Initialize lock for synchronizing downloading of fonts self.lock = Lock() # Initialize card layouts and variables self.card_layouts: dict[str, Layout] = {} self.card_variables: dict[str, list[list[str]]] = {} # Initialize card environment self.card_env = Environment() self.card_env.filters["x"] = x_filter # Always print a warning when debug mode is active if self.config.debug: log.warning("Debug mode is enabled for \"social\" plugin.") # By default, debug mode is disabled when the documentation is # built, but not when it is served, for a better user experience if not self.is_serve and not self.config.debug_on_build: self.config.debug = False # Check if site URL is defined if not config.site_url: log.warning( "The \"site_url\" option is not set. The cards are generated, " "but not linked, so they won't be visible on social media." ) # Ensure card layouts are not copied to the site directory def on_files(self, files, *, config): if not self.config.enabled: return # We must exclude all files related to layouts from here on, so MkDocs # doesn't copy them to the site directory when the project is built for file in files: # As of MkDocs 1.6, abs_src_path is optional for generated files, # so we need to exlude them - see https://t.ly/zRYj7 if not file.abs_src_path: continue # Exclude files from layout directory if file.abs_src_path.startswith(_templates_dirpath()): file.inclusion = InclusionLevel.EXCLUDED # Generate card as soon as metadata is available (run latest) - run this # after all other plugins, so they can alter the card configuration @event_priority(-100) def on_page_markdown(self, markdown, *, page, config, files): if not self.config.enabled: return # Skip if cards should not be generated if self._is_excluded(page): return # Resolve card layout - we also preload the layout here, so we're not # triggering multiple concurrent loads in the worker threads name = self._config("cards_layout", page) self._resolve_layout(name, config) # Spawn concurrent job to generate card for page and add future to # job dictionary, as it returns the file we need to copy later self.card_pool_jobs[page.file.src_uri] = self.card_pool.submit( self._generate, name, page, config ) # Generate card metadata (run earlier) - don't run this too late, as we # want plugins like the minify plugin to pick up the HTML we inject @event_priority(50) def on_post_page(self, output, *, page, config): if not self.config.enabled: return # Skip if cards should not be generated if self._is_excluded(page): return # Reconcile concurrent jobs - we need to wait for the card job to finish # before we can copy the generated files to the output directory. If an # exception occurred in one of the jobs, we either log it as configured # by the user, or raise it, so the build fails. future = self.card_pool_jobs[page.file.src_uri] if future.exception(): e = future.exception() if self.config.log and isinstance(e, PluginError): log.log(self.config.log_level, e) return # Otherwise throw error raise e else: file: File = future.result() file.copy_file() # Resolve card layout name = self._config("cards_layout", page) layout, _ = self._resolve_layout(name, config) # Stop if no tags are present or site URL is not set if not layout.tags or not config.site_url: return # Resolve image dimensions and curate image metadata width, height = get_size(layout) image = { "url": posixpath.join(config.site_url, file.url), "type": "image/png", "width": width, "height": height } # Find offset of closing head tag, so we can insert meta tags before # it - a bit hacky, but much faster than regular expressions at = output.find("</head>") return "\n".join([ output[:at], "\n".join([ f"<meta property=\"{property}\" content=\"{content}\" />" for property, content in _replace( layout.tags, self.card_env, config, page = page, image = image, layout = self._config("cards_layout_options", page), ).items() if content ]), output[at:] ]) # Save manifest after build def on_post_build(self, *, config): if not self.config.enabled: return # Save manifest if cache should be used if self.config.cache: with open(self.manifest_file, "w") as f: f.write(json.dumps(self.manifest, indent = 2, sort_keys = True)) # Add custom layout directory to watched files def on_serve(self, server, *, config, builder): path = os.path.abspath(self.config.cards_layout_dir) if os.path.isdir(path): server.watch(path, recursive = True) # Reconcile jobs (run latest) - all other plugins do not depend on the # generated cards, so we can run this after all of them @event_priority(-100) def on_shutdown(self): if not self.config.enabled: return # Shutdown thread pools - if we're on Python 3.9 and above, cancel all # pending futures that have not yet been scheduled for pool in [self.card_layer_pool, self.card_pool]: if sys.version_info >= (3, 9): pool.shutdown(cancel_futures = True) else: pool.shutdown() # Save manifest if cache should be used if self.manifest and self.config.cache: with open(self.manifest_file, "w") as f: f.write(json.dumps(self.manifest, indent = 2, sort_keys = True)) # ------------------------------------------------------------------------- # Check if the given page is excluded - giving the author the option to # include and exclude specific pages is important, as it allows to control # which pages should generate social cards, and which shouldn't. Different # cards can be built by using multiple instances of the plugin. def _is_excluded(self, page: Page): path = page.file.src_path # Check if card generation is disabled for the given page if not self._config("cards", page): return True # Check if page matches one of the inclusion patterns if self.config.cards_include: for pattern in self.config.cards_include: if fnmatch(page.file.src_uri, pattern): return False # Page is not included log.debug(f"Excluding page '{path}' due to inclusion patterns") return True # Check if page matches one of the exclusion patterns for pattern in self.config.cards_exclude: if fnmatch(page.file.src_uri, pattern): log.debug(f"Excluding page '{path}' due to exclusion patterns") return True # Page is not excluded return False # ------------------------------------------------------------------------- # Generate card for the given page - generation of cards does not depend on # anything else than the page content (incl. metadata) and configuration, # which is why it is an embarrassingly parallel problem and can be solved # by delegating the generation of each card to a thread pool def _generate(self, name: str, page: Page, config: MkDocsConfig): layout, variables = self._resolve_layout(name, config) # Each card can consist of multiple layers, many of which are likely # the same across cards (like background or logo layers). Some of the # input values to generate a card may be dependent on author-provided # data, e.g., the site description or card title that is sourced from # front matter. Additionally, layouts may allow to define arbitrary # text boxes with author-provided metadata like tags or categories. # Thus, we generate a hash for each card, which is based on the layers # and the values of all variables that are used to generate the card. layers: dict[str, Layer] = {} for layer, templates in zip(layout.layers, variables): fingerprints = [self.config, layer] # Compute fingerprints for each layer for template in templates: template = _compile(template, self.card_env) fingerprints.append(template.render( config = config, page = page, layout = self._config("cards_layout_options", page) )) # Compute digest of fingerprints layers[_digest(fingerprints)] = layer # Compute digest of all fingerprints - we use this value to check if # the exact same card was already generated and cached hash = _digest([layout, *list(layers)]) # Determine part of path we need to replace - this depends on whether # we're using directory URLs and if the page is an index page or not suffix = ".html" if config.use_directory_urls and not page.is_index: suffix = "/index.html" # Compute path to card, which is sourced from the cache directory, and # generate file to register it with MkDocs as soon as it was generated path = page.file.dest_uri.replace(suffix, ".png") file = self._path_to_file(path, config) # Check if file hash changed, so we need to re-generate the card - if # the hash didn't change, we can just return the existing file prev = self.manifest.get(file.url, "") if hash == prev and os.path.isfile(file.abs_src_path): return file # Check if the required dependencies for rendering are available, which # is, at the absolute minimum, the 'pillow' package, and raise an error # to the caller, so he can decide what to do with the error. The caller # can treat this as a warning or an error to abort the build. if import_errors: # docs = os.path.relpath(config.docs_dir) # path = os.path.relpath(page.file.abs_src_path, docs) # raise PluginError( # f"Couldn't render card for '{path}' in '{docs}': install " # f"required dependencies – pip install 'mkdocs-material[imaging]'" # ) # @todo improve formatting of error handling raise PluginError( "Required dependencies of \"social\" plugin not found:\n" + str("\n".join(map(lambda x: "- " + x, import_errors))) + "\n\n" + "--> Install with: pip install \"mkdocs-material[imaging]\"" ) if cairosvg_error: # @todo improve formatting of error handling raise PluginError( "\"cairosvg\" Python module is installed, but it crashed with:\n" + cairosvg_error + "\n\n" + "--> Check out the troubleshooting guide: https://t.ly/MfX6u" ) # Spawn concurrent jobs to render layers - we only need to render layers # that we haven't already dispatched, reducing work by deduplication for h, layer in layers.items(): sentinel = Future() # We need to use a hack here to avoid locking the thread pool while # we check if the layer was already dispatched. If we don't do this, # layers might be dispatched multiple times. The trick is to use a # sentinel value to check if the layer was already dispatched. if sentinel == self.card_layer_pool_jobs.setdefault(h, sentinel): self.card_layer_pool_jobs[h] = self.card_layer_pool.submit( self._render, layer, page, config ) # Reconcile concurrent jobs to render layers and compose card - since # layers are rendered in parallel, we can compose the card as soon as # all layers have been rendered. For this, we await each future to # resolve with the image of the rendered layer. image = Image.new(mode = "RGBA", size = get_size(layout)) for h, layer in layers.items(): image.alpha_composite( self.card_layer_pool_jobs[h].result(), get_offset(layer, image) ) # If debug mode is enabled, render overlay if self.config.debug: image = self._render_overlay(layout, image) # Save composed image to cache - the caller must copy the image from # the cache, so we don't need to worry about concurrent access os.makedirs(os.path.dirname(file.abs_src_path), exist_ok = True) image.save(file.abs_src_path) # Update manifest by associating file with hash self.manifest[file.url] = hash # Return file for generated card return file # Render layer - this is the core of the plugin, which renders a single # layer of a card. Order is: background, icon, and typography. def _render(self, layer: Layer, page: Page, config: MkDocsConfig): image = Image.new(mode = "RGBA", size = get_size(layer)) layer = _replace( layer, self.card_env, config, page = page, layout = self._config("cards_layout_options", page) ) # Render background, icon, and typography image = self._render_background(layer, image) image = self._render_icon(layer, image, config) image = self._render_typography(layer, image) # Return image with layer return image # Render layer background def _render_background(self, layer: Layer, input: _Image): background = layer.background # If given, load background image and resize it proportionally to cover # the entire area while retaining the aspect ratio of the input image if background.image: if not os.path.isfile(background.image): raise PluginError(f"Couldn't find image '{background.image}'") # Open file and convert SVGs to PNGs with open(background.image, "rb") as f: data = f.read() if background.image.endswith(".svg"): data = svg2png(data, output_width = input.width) # Resize image to cover entire area image = Image.open(BytesIO(data)).convert("RGBA") input.alpha_composite(_resize_cover(image, input)) # If given, fill background color - this is done after the image is # loaded to allow for transparent tints. How awesome is that? if background.color: color = background.color if color == "transparent": return input # Create image filled with background color image = Image.new(mode = "RGBA", size = input.size, color = color) input.alpha_composite(image) # Return image with background return input # Render layer icon def _render_icon(self, layer: Layer, input: _Image, config: MkDocsConfig): icon = layer.icon if not icon.value: return input # Resolve icon by searching all configured theme directories and apply # the fill color before rendering, if given. Note that the fill color # must be converted to rgba() function syntax, or opacity will not work # correctly. This way, we don't need to use the fill-opacity property. data = self._resolve_icon(icon.value, config) if icon.color: (r, g, b, *a) = ImageColor.getrgb(icon.color) opacity = a[0] / 255 if a else 1 # Compute and replace fill color fill = f"rgba({r}, {g}, {b}, {opacity})" data = data.replace("<svg", f"<svg fill=\"{fill}\"") # Rasterize vector image given by icon to match the size of the # input image, resize it and render it on top of the input image image = Image.open(BytesIO( svg2png(data.encode("utf-8"), output_width = input.width) )) input.alpha_composite(_resize_contain(image.convert("RGBA"), input)) # Return image with icon return input # Render layer typography def _render_typography(self, layer: Layer, input: _Image): typography = layer.typography if not typography.content: return input # Retrieve font family and font style family = typography.font.family variant = typography.font.variant style = typography.font.style # Resolve and load font and compute metrics path = self._resolve_font(family, style, variant) current, spacing = _metrics(path, typography.line, input) font = ImageFont.truetype(path, current) # Create image and initialize drawing context image = Image.new(mode = "RGBA", size = input.size) context = ImageDraw.Draw(image) # Compute length of whitespace and ellipsis - in the next step, we will # distribute the words across the lines we have available, which means # we need to compute the length of each word and intersperse it with # whitespace. Note that lengths of words are perfectly additive, so we # can compute the length of a line by adding the lengths of all words # and the whitespace between them. space = context.textlength(" ", font = font) ellipsis = context.textlength("...", font = font) # Initialize lists to hold the lengths of words and indexes of lines. # Tracking line indexes allows us to improve splitting using heuristics. lengths: list[int] = [] indexes, current = [0], 0 # Split words at whitespace, and successively add words to the current # line. For every other than the first word, account for the whitespace # between words. If the next word would exceed the width of the input # image, and thus overflow the line, start a new one. words = re.split(r"\s+", unescape(typography.content)) for word in words: length = context.textlength(word, font = font) lengths.append(length) # Start new line if current line overflows whitespace = space if current else 0 if current + whitespace + length > input.width: indexes.append(len(lengths) - 1) current = length # Add word to current line else: current += whitespace + length # Add terminating index, if not already present if len(lengths) != indexes[-1]: indexes.append(len(lengths)) # If the number of lines exceeds the maximum amount we are able to # render, either shrink or truncate the text and add an ellipsis amount = typography.line.amount if amount < len(indexes) - 1: # If overflow mode is set to 'shrink', decrease the font size and # try to render the typography again to see if it fits overflow = typography.overflow if overflow == "shrink": typography.line.amount += 1 # Render layer with new typography metrics by calling this # function recursively and returning immediately from it return self._render_typography(layer, input) # Determine last and penultimate line indexes indexes = indexes[:amount + 1] p, q = indexes[-2:] # Compute the length of the last line, and check whether we can add # the ellipsis after the last word. If not, replace the last word. current = sum(lengths[p:q]) + (q - p) * space if current + ellipsis < input.width: q += 1 # Update line indexes and replace word with ellipsis indexes[-1] = q words[q - 1] = "..." # If there are exactly two lines, check if we can improve splitting by # moving the last word of the first line to the last line elif len(indexes) == 3: p, q, r = indexes[-3:] # Create two configurations of lines, one with the last word of the # first line moved to the last line, and one without the change a = [len(" ".join(l)) for l in [words[p:q], words[q:r]]] b = [len(" ".join(l)) for l in [words[p:q - 1], words[q - 1:r]]] # Compute standard deviation of line lengths before and after the # change, and if the standard deviation decreases, move the word if stdev(b) < stdev(a): indexes[-2] -= 1 # Compute anchor and deduce alignment, as well as offset. The anchor # is computed as a string of two characters, where the first character # denotes the horizontal alignment and the second character denotes # the vertical alignment. anchor = _anchor(typography.align) # Compute horizontal alignment if anchor[0] == "l": align, x = "left", 0 elif anchor[0] == "m": align, x = "center", input.width >> 1 else: align, x = "right", input.width >> 0 # Compute vertical alignment if anchor[1] == "a": y = 0 elif anchor[1] == "m": y = input.height >> 1 else: y = input.height >> 0 # Join words with whitespace and lines with line breaks text = "\n".join([ " ".join(words[p:q]) for p, q in zip(indexes, indexes[1:]) ]) # Draw text onto image context.text( (x, y), text, font = font, anchor = anchor, spacing = spacing, fill = typography.color, align = align ) # Return image with typography input.alpha_composite(image) return input # Render overlay for debugging def _render_overlay(self, layout: Layout, input: _Image): path = self._resolve_font("Roboto", "Regular") font = ImageFont.truetype(path, 12) # Create image and initialize drawing context image = Image.new(mode = "RGBA", size = input.size) context = ImageDraw.Draw(image) # Draw overlay grid fill = self.config.debug_color if self.config.debug_grid: step = self.config.debug_grid_step for i in range(0, input.width, step): for j in range(0, input.height, step): context.ellipse( ((i - 1, j - 1), (i + 1, j + 1)), fill = fill ) # Compute luminosity of debug color and use it to determine the color # of the text that will be drawn on top of the debug color (r, g, b, *_) = ImageColor.getrgb(fill) color = "black" if r * 0.299 + g * 0.587 + b * 0.114 > 150 else "white" # Draw overlay outline for each layer for i, layer in enumerate(layout.layers): x, y = get_offset(layer, image) w, h = get_size(layer) # Draw overlay outline context.rectangle(outline = fill, xy = (x, y, min(x + w, input.width - 1), min(y + h, input.height - 1) )) # Assemble text and compute its width and height - we only use the # coordinates denoting the width and height of the text, as we need # to compute the coordinates of the text box manually in order to # have the rectangle align perfectly with the outline text = f"{i} – {x}, {y}" (_, _, x1, y1) = context.textbbox((x, y), text, font = font) # Draw text on a small rectangle in the top left corner of the # layer denoting the number of the layer and its offset context.rectangle(fill = fill, xy = (x, y, x1 + 8, y1 + 4)) context.text((x + 4, y + 2), text, font = font, fill = color) # Return image with overlay input.alpha_composite(image) return input # ------------------------------------------------------------------------- # Resolve layout - authors can specify a custom directory for layouts in # the configuration, which is checked prior to the layout directory shipped # with this plugin. If the layout cannot be resolved in any of the known # directories, the plugin must abort with an error. def _resolve_layout(self, name: str, config: MkDocsConfig): name, _ = os.path.splitext(name) if name in self.card_layouts: return self.card_layouts[name], self.card_variables[name] # If the author specified a custom directory, try to resolve the layout # from this directory first, otherwise fall back to the default for base in [ os.path.relpath(self.config.cards_layout_dir), _templates_dirpath() ]: path = os.path.join(base, f"{name}.yml") path = os.path.normpath(path) # Skip if layout does not exist and try next directory if not os.path.isfile(path): continue # Open file and parse as YAML with open(path, encoding = "utf-8-sig") as f: layout: Layout = Layout(config_file_path = path) try: layout.load_dict(yaml.load(f, SafeLoader) or {}) # The layout could not be loaded because of a syntax error, # which we display to the author with a nice error message except Exception as e: path = os.path.relpath(path, base) raise PluginError( f"Error reading layout file '{path}' in '{base}':\n" f"{e}" ) # Validate layout and abort if errors occurred errors, warnings = layout.validate() for _, w in warnings: log.warning(w) for _, e in errors: path = os.path.relpath(path, base) raise PluginError( f"Error reading layout file '{path}' in '{base}':\n" f"{e}" ) # Store layout and variables self.card_layouts[name] = layout self.card_variables[name] = [] # Extract variables for each layer from layout for layer in layout.layers: variables = _extract(layer, self.card_env, config) self.card_variables[name].append(variables) # Set default values for for layer size, if not given for key, value in layer.size.items(): if value == 0: layer.size[key] = layout.size[key] # Abort, since we're done break # Abort if the layout could not be resolved if name not in self.card_layouts: raise PluginError(f"Couldn't find layout '{name}'") # Return layout and variables return self.card_layouts[name], self.card_variables[name] # Resolve icon with given name - this function searches for the icon in all # known theme directories, including custom directories specified by the # author, which allows for using custom icons in cards. If the icon cannot # be resolved, the plugin must abort with an error. def _resolve_icon(self, name: str, config: MkDocsConfig): for base in config.theme.dirs: path = os.path.join(base, ".icons", f"{name}.svg") path = os.path.normpath(path) # Skip if icon does not exist and try next directory if not os.path.isfile(path): continue # Open and return icon with open(path, encoding = "utf-8") as f: return f.read() # Abort if the icon could not be resolved raise PluginError(f"Couldn't find icon '{name}'") # Resolve font family with specific style - if we haven't already done it, # the font family is first downloaded from Google Fonts and the styles are # saved to the cache directory. If the font cannot be resolved, the plugin # must abort with an error. def _resolve_font(self, family: str, style: str, variant = ""): path = os.path.join(self.config.cache_dir, "fonts", family) # Fetch font family, if it hasn't been fetched yet - we use a lock to # synchronize access, so the font is not downloaded multiple times, but # all other threads wait for the font being available. This is also why # we need the double path check, which makes sure that we only use the # lock when we actually need to download a font that doesn't exist. If # we already downloaded it, we don't want to block at all. if not os.path.isdir(path): with self.lock: if not os.path.isdir(path): self._fetch_font_from_google_fonts(family) # Assemble fully qualified style - see https://t.ly/soDF0 if variant: style = f"{variant} {style}" # Check for availability of font style list = sorted(os.listdir(path)) for file in list: name, _ = os.path.splitext(file) if name == style: return os.path.join(path, file) # Find regular variant of font family - we cannot rely on the fact that # fonts always have a single regular variant - some of them have several # of them, potentially prefixed with "Condensed" etc. For this reason we # use the first font we find if we find no regular one. fallback = "" for file in list: name, _ = os.path.splitext(file) # 1. Fallback: use first font if not fallback: fallback = name # 2. Fallback: use regular font - use the shortest one, i.e., prefer # "10pt Regular" over "10pt Condensed Regular". This is a heuristic. if "Regular" in name: if not fallback or len(name) < len(fallback): fallback = name # Fall back to regular font (guess if there are multiple) return self._resolve_font(family, fallback) # ------------------------------------------------------------------------- # Fetch font family from Google Fonts def _fetch_font_from_google_fonts(self, family: str): path = os.path.join(self.config.cache_dir, "fonts") # Download manifest from Google Fonts - Google returns JSON with syntax # errors, so we just treat the response as plain text and parse out all # URLs to font files, as we're going to rename them anyway. This should # be more resilient than trying to correct the JSON syntax. url = f"https://fonts.google.com/download/list?family={family}" res = requests.get(url) # Ensure that the download succeeded if res.status_code != 200: raise PluginError( f"Couldn't find font family '{family}' on Google Fonts " f"({res.status_code}: {res.reason})" ) # Extract font URLs from manifest for match in re.findall( r"\"(https:(?:.*?)\.[ot]tf)\"", str(res.content) ): with requests.get(match) as res: res.raise_for_status() # Construct image font for analysis by directly reading the # contents from the response without priorily writing to a # temporary file (like we did before), as this might lead to # problems on Windows machines, see https://t.ly/LiF_k with BytesIO(res.content) as f: font = ImageFont.truetype(f) # Extract font family name and style name, style = font.getname() name = " ".join([name.replace(family, ""), style]).strip() # Write file to cache directory target = os.path.join(path, family, f"{name}.ttf") write_file(res.content, target) # ------------------------------------------------------------------------- # Retrieve configuration value - each page can override certain parts of # the site configuration, depending on the type and structure of the value def _config(self, name: str, page: Page): meta = page.meta.get("social", {}) # Primitive values: choose page- over site-level configuration if isinstance(self.config[name], (bool, str, int, float)): return meta.get(name, self.config[name]) # Dictionary values: merge site- with page-level configuration if isinstance(self.config[name], (dict)): return { **self.config[name], **meta.get(name, {}) } # Create a file for the given path def _path_to_file(self, path: str, config: MkDocsConfig): assert path.endswith(".png") return File( posixpath.join(self.config.cards_dir, path), self.config.cache_dir, config.site_dir, False ) # ----------------------------------------------------------------------------- # Helper functions # ----------------------------------------------------------------------------- # Compute a stable hash from an object - since we're doing compositing, we can # leverage caching to omit re-generating layers when their parameters stay the # same. Additionally, we can identify identical layers between images, e.g., # background, logos, or avatars, but also unchanged text. Note that we need to # convert the data to a string prior to hashing, because configuration objects # are inherently unstable, always resulting in new hashes. def _digest(data: object): return sha1(pickle.dumps(str(data))).hexdigest() # ----------------------------------------------------------------------------- # Extract all variables recursively def _extract(data: any, env: Environment, config: MkDocsConfig): # Traverse configuration or dictionary if isinstance(data, (Config, dict)): return [ variable for value in data.values() for variable in _extract(value, env, config) ] # Traverse list elif isinstance(data, list): return [ variable for value in data for variable in _extract(value, env, config) ] # Retrieve variables from string elif isinstance(data, str): if find_undeclared_variables(env.parse(data)): return [data] # Return nothing return [] # Replace all variables recursively and return a copy of the given data def _replace(data: any, env: Environment, config: MkDocsConfig, **kwargs): # Traverse configuration or dictionary if isinstance(data, (Config, dict)): data = copy(data) for key, value in data.items(): data[key] = _replace(value, env, config, **kwargs) # Traverse list elif isinstance(data, list): return [ _replace(value, env, config, **kwargs) for value in data ] # Retrieve variables from string elif isinstance(data, str): return _compile(data, env).render( config = config, **kwargs ) or None # Return data return data # Compile template and cache it indefinitely @functools.lru_cache(maxsize = None) def _compile(data: str, env: Environment): return env.from_string(html.unescape(data)) # Compute absolute path to internal templates directory, # we need to do it this way to assure compatibility with Python 3.8, # and also to allow users to install their Python site-packages # to a different mount root than their documentation - see https://t.ly/GMeYP def _templates_dirpath(): return os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates") # ----------------------------------------------------------------------------- # Resize image to match the size of the reference image and align it to the # center of the reference image so that it is fully covered def _resize_cover(image: _Image, ref: _Image): ratio = max( ref.width / image.width, ref.height / image.height ) # Compute aspect ratios of both images and choose the larger one, then # resize the image so that it covers the entire reference image image = image.resize(( int(image.width * ratio), int(image.height * ratio) )) # Align image to the center of the reference image - we also need to crop # the image if it's larger than the given reference image return image.crop(( image.width - ref.width >> 1, image.height - ref.height >> 1, image.width + ref.width >> 1, image.height + ref.height >> 1 )) # Resize image to match the size of the reference image and align it to the # center of the reference image so that it is fully contained def _resize_contain(image: _Image, ref: _Image): ratio = min( ref.width / image.width, ref.height / image.height ) # Resize image according to minimum ratio image = image.resize(( int(image.width * ratio), int(image.height * ratio) )) # Create a blank image and paste the resized image into it blank = Image.new(mode = "RGBA", size = ref.size) blank.paste(image, ( ref.width - image.width >> 1, ref.height - image.height >> 1 )) # Return resized image return blank # ----------------------------------------------------------------------------- # Resolve font metrics for given truetype font - this function computes the # font size and spacing between lines based on the number of lines and height. # In order to omit rounding errors, we compute the ascender and descender based # on a font size of 1,000. def _metrics(path: str, line: Line, ref: _Image): font = ImageFont.truetype(path, 1000) ascender, descender = font.getmetrics() # It would be too complex to let the author define the font size, since this # would involve a lot of fiddling to find the right value. Instead, we let # the author define the number of lines and the line height, and we compute # the font size from that. This is much more intuitive. As a basis, we use # the ascender as the actual line height and also add the descender to # account for the last line. It's no secret that correctly handling font # metrics is super tricky - see https://bit.ly/31u9bh6 extent = line.amount * ascender + 1 * descender # Now, we still need to account for spacing between lines, which is why we # take the number of lines - 1, and multiply that with the line height we # computed from the ascender. We add this to the extent we computed before, # which we use as a basis for the final font size. extent += (line.amount - 1) * (line.height - 1) * ascender size = (1000 * ref.height) / extent # From this, we can compute the spacing between lines, and we're done. We # then return both, the font size and spacing between lines. spacing = (line.height - 1) * ascender * size / 1000 return int(size), spacing # Compute anchor, determining the alignment of text relative to the given # coordinates, with the default being "top left" - see https://bit.ly/3NEfr07 def _anchor(data: str): axis = re.split(r"\s+", data) # Determine anchor on x-axis if "start" in axis: anchor = "l" elif "end" in axis: anchor = "r" elif "center" in axis: anchor = "m" else: anchor = "l" # Determine anchor on y-axis if "top" in axis: anchor += "a" elif "bottom" in axis: anchor += "d" elif "center" in axis: anchor += "m" else: anchor += "a" # Return anchor return anchor # ----------------------------------------------------------------------------- # Data # ----------------------------------------------------------------------------- # Set up logging log = logging.getLogger("mkdocs.material.social")
SocialPlugin
python
django__django
tests/migrations/test_migrations_manual_porting/0001_initial.py
{ "start": 43, "end": 344 }
class ____(migrations.Migration): initial = True operations = [ migrations.CreateModel( "SomeModel", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=255)), ], ), ]
Migration
python
PrefectHQ__prefect
src/prefect/server/schemas/actions.py
{ "start": 34848, "end": 35516 }
class ____(ActionBaseModel): """Data used by the Prefect REST API to update a work queue.""" name: Optional[str] = Field(None) description: Optional[str] = Field(None) is_paused: bool = Field( default=False, description="Whether or not the work queue is paused." ) concurrency_limit: Optional[NonNegativeInteger] = Field(None) priority: Optional[PositiveInteger] = Field(None) last_polled: Optional[DateTime] = Field(None) # DEPRECATED filter: Optional[schemas.core.QueueFilter] = Field( None, description="DEPRECATED: Filter criteria for the work queue.", deprecated=True, )
WorkQueueUpdate
python
python-poetry__poetry
tests/conftest.py
{ "start": 3871, "end": 4537 }
class ____(BaseConfig): _config_source: DictConfigSource _auth_config_source: DictConfigSource def get(self, setting_name: str, default: Any = None) -> Any: self.merge(self._config_source.config) self.merge(self._auth_config_source.config) return super().get(setting_name, default=default) def raw(self) -> dict[str, Any]: self.merge(self._config_source.config) self.merge(self._auth_config_source.config) return super().raw() def all(self) -> dict[str, Any]: self.merge(self._config_source.config) self.merge(self._auth_config_source.config) return super().all()
Config
python
sqlalchemy__sqlalchemy
test/sql/test_insert.py
{ "start": 38526, "end": 42889 }
class ____( _InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL ): __dialect__ = "default_enhanced" def test_from_bound_col_value(self): mytable = self.tables.mytable # from_dml_column() refers to another column in SET, then the # same parameter is rendered stmt = mytable.insert().values( name="some name", description=from_dml_column(mytable.c.name) ) self.assert_compile( stmt, "INSERT INTO mytable (name, description) VALUES (:name, :name)", checkparams={"name": "some name"}, ) self.assert_compile( stmt, "INSERT INTO mytable (name, description) VALUES (?, ?)", checkpositional=("some name", "some name"), dialect="sqlite", ) def test_from_static_col_value(self): mytable = self.tables.mytable # from_dml_column() refers to a column not in SET, then it # raises for INSERT stmt = mytable.insert().values( description=from_dml_column(mytable.c.name) ) with expect_raises_message( exc.CompileError, "Can't resolve referenced column name in INSERT statement: 'name'", ): stmt.compile() def test_from_sql_default(self): """test combinations with a column that has a SQL default""" mytable = self.tables.mytable_w_sql_default stmt = mytable.insert().values( description=from_dml_column(mytable.c.created_at) ) self.assert_compile( stmt, "INSERT INTO mytable_w_sql_default (description, created_at) " "VALUES (now(), now())", ) stmt = mytable.insert().values( description=cast(from_dml_column(mytable.c.created_at), String) + " o clock" ) self.assert_compile( stmt, "INSERT INTO mytable_w_sql_default (description, created_at) " "VALUES ((CAST(now() AS VARCHAR) || :param_1), now())", ) stmt = mytable.insert().values( name="some name", description=cast(from_dml_column(mytable.c.created_at), String) + " " + from_dml_column(mytable.c.name), ) self.assert_compile( stmt, "INSERT INTO mytable_w_sql_default " "(name, description, created_at) VALUES " "(:name, (CAST(now() AS VARCHAR) || :param_1 || :name), now())", checkparams={"name": "some name", "param_1": " "}, ) self.assert_compile( stmt, "INSERT INTO mytable_w_sql_default " "(name, description, created_at) VALUES " "(?, (CAST(CURRENT_TIMESTAMP AS VARCHAR) || ? || ?), " "CURRENT_TIMESTAMP)", checkpositional=("some name", " ", "some name"), dialect="sqlite", ) def test_from_sql_expr(self): mytable = self.tables.mytable stmt = mytable.insert().values( name=mytable.c.name + "lala", description=from_dml_column(mytable.c.name), ) self.assert_compile( stmt, "INSERT INTO mytable (name, description) VALUES " "((mytable.name || :name_1), (mytable.name || :name_1))", checkparams={"name_1": "lala"}, ) self.assert_compile( stmt, "INSERT INTO mytable (name, description) VALUES " "((mytable.name || ?), (mytable.name || ?))", checkpositional=("lala", "lala"), dialect="sqlite", ) def test_from_sql_expr_multiple_dmlcol(self): mytable = self.tables.mytable stmt = mytable.insert().values( myid=5, name=mytable.c.name + "lala", description=from_dml_column(mytable.c.name) + " " + cast(from_dml_column(mytable.c.myid), String), ) self.assert_compile( stmt, "INSERT INTO mytable (myid, name, description) VALUES " "(:myid, (mytable.name || :name_1), " "((mytable.name || :name_1) || :param_1 || " "CAST(:myid AS VARCHAR)))", checkparams={"myid": 5, "name_1": "lala", "param_1": " "}, )
FromDMLInsertTest
python
HypothesisWorks__hypothesis
hypothesis-python/src/hypothesis/internal/filtering.py
{ "start": 1706, "end": 14637 }
class ____(NamedTuple): """Return constraints to the appropriate strategy, and the predicate if needed. For example:: integers().filter(lambda x: x >= 0) -> {"min_value": 0"}, None integers().filter(lambda x: x >= 0 and x % 7) -> {"min_value": 0}, lambda x: x % 7 At least in principle - for now we usually return the predicate unchanged if needed. We have a separate get-predicate frontend for each "group" of strategies; e.g. for each numeric type, for strings, for bytes, for collection sizes, etc. """ constraints: dict[str, Any] predicate: Predicate | None @classmethod def unchanged(cls, predicate: Predicate) -> "ConstructivePredicate": return cls({}, predicate) def __repr__(self) -> str: fn = get_pretty_function_description(self.predicate) return f"{self.__class__.__name__}(constraints={self.constraints!r}, predicate={fn})" ARG = object() def convert(node: ast.AST, argname: str) -> object: if isinstance(node, ast.Name): if node.id != argname: raise ValueError("Non-local variable") return ARG if isinstance(node, ast.Call): if ( isinstance(node.func, ast.Name) and node.func.id == "len" and len(node.args) == 1 ): # error unless comparison is to the len *of the lambda arg* return convert(node.args[0], argname) return ast.literal_eval(node) def comp_to_constraints(x: ast.AST, op: ast.AST, y: ast.AST, *, argname: str) -> dict: a = convert(x, argname) b = convert(y, argname) num = (int, float) if not (a is ARG and isinstance(b, num)) and not (isinstance(a, num) and b is ARG): # It would be possible to work out if comparisons between two literals # are always true or false, but it's too rare to be worth the complexity. # (and we can't even do `arg == arg`, because what if it's NaN?) raise ValueError("Can't analyse this comparison") of_len = {"len": True} if isinstance(x, ast.Call) or isinstance(y, ast.Call) else {} if isinstance(op, ast.Lt): if a is ARG: return {"max_value": b, "exclude_max": True, **of_len} return {"min_value": a, "exclude_min": True, **of_len} elif isinstance(op, ast.LtE): if a is ARG: return {"max_value": b, **of_len} return {"min_value": a, **of_len} elif isinstance(op, ast.Eq): if a is ARG: return {"min_value": b, "max_value": b, **of_len} return {"min_value": a, "max_value": a, **of_len} elif isinstance(op, ast.GtE): if a is ARG: return {"min_value": b, **of_len} return {"max_value": a, **of_len} elif isinstance(op, ast.Gt): if a is ARG: return {"min_value": b, "exclude_min": True, **of_len} return {"max_value": a, "exclude_max": True, **of_len} raise ValueError("Unhandled comparison operator") # e.g. ast.Ne def merge_preds(*con_predicates: ConstructivePredicate) -> ConstructivePredicate: # This function is just kinda messy. Unfortunately the neatest way # to do this is just to roll out each case and handle them in turn. base = { "min_value": -math.inf, "max_value": math.inf, "exclude_min": False, "exclude_max": False, } predicate = None for kw, p in con_predicates: assert ( not p or not predicate or p is predicate ), "Can't merge two partially-constructive preds" predicate = p or predicate if "min_value" in kw: if kw["min_value"] > base["min_value"]: base["exclude_min"] = kw.get("exclude_min", False) base["min_value"] = kw["min_value"] elif kw["min_value"] == base["min_value"]: base["exclude_min"] |= kw.get("exclude_min", False) if "max_value" in kw: if kw["max_value"] < base["max_value"]: base["exclude_max"] = kw.get("exclude_max", False) base["max_value"] = kw["max_value"] elif kw["max_value"] == base["max_value"]: base["exclude_max"] |= kw.get("exclude_max", False) has_len = {"len" in kw for kw, _ in con_predicates if kw} assert len(has_len) <= 1, "can't mix numeric with length constraints" if has_len == {True}: base["len"] = True if not base["exclude_min"]: del base["exclude_min"] if base["min_value"] == -math.inf: del base["min_value"] if not base["exclude_max"]: del base["exclude_max"] if base["max_value"] == math.inf: del base["max_value"] return ConstructivePredicate(base, predicate) def numeric_bounds_from_ast( tree: ast.AST, argname: str, fallback: ConstructivePredicate ) -> ConstructivePredicate: """Take an AST; return a ConstructivePredicate. >>> lambda x: x >= 0 {"min_value": 0}, None >>> lambda x: x < 10 {"max_value": 10, "exclude_max": True}, None >>> lambda x: len(x) >= 5 {"min_value": 5, "len": True}, None >>> lambda x: x >= y {}, lambda x: x >= y See also https://greentreesnakes.readthedocs.io/en/latest/ """ if isinstance(tree, ast.Compare): ops = tree.ops vals = tree.comparators comparisons = [(tree.left, ops[0], vals[0])] for i, (op, val) in enumerate(zip(ops[1:], vals[1:], strict=True), start=1): comparisons.append((vals[i - 1], op, val)) bounds = [] for comp in comparisons: try: constraints = comp_to_constraints(*comp, argname=argname) # Because `len` could be redefined in the enclosing scope, we *always* # have to apply the condition as a filter, in addition to rewriting. pred = fallback.predicate if "len" in constraints else None bounds.append(ConstructivePredicate(constraints, pred)) except ValueError: bounds.append(fallback) return merge_preds(*bounds) if isinstance(tree, ast.BoolOp) and isinstance(tree.op, ast.And): return merge_preds( *(numeric_bounds_from_ast(node, argname, fallback) for node in tree.values) ) return fallback def get_numeric_predicate_bounds(predicate: Predicate) -> ConstructivePredicate: """Shared logic for understanding numeric bounds. We then specialise this in the other functions below, to ensure that e.g. all the values are representable in the types that we're planning to generate so that the strategy validation doesn't complain. """ unchanged = ConstructivePredicate.unchanged(predicate) if ( isinstance(predicate, partial) and not predicate.keywords and ( len(predicate.args) == 1 or (predicate.args[0] is Placeholder and len(predicate.args) == 2) ) ): if len(predicate.args) == 1: arg = predicate.args[0] func = predicate.func else: # pragma: no cover # Python 3.14+ only assert predicate.args[0] is Placeholder arg = predicate.args[1] func = { # reverses the table below; eq is unchanged operator.lt: operator.gt, operator.le: operator.ge, operator.ge: operator.le, operator.gt: operator.lt, }.get(predicate.func, predicate.func) assert func not in (min_len, max_len) # sanity-check; these are private if ( (isinstance(arg, Decimal) and Decimal.is_snan(arg)) or not isinstance(arg, (int, float, Fraction, Decimal)) or math.isnan(arg) ): return unchanged options = { # We're talking about op(arg, x) - the reverse of our usual intuition! operator.lt: {"min_value": arg, "exclude_min": True}, # lambda x: arg < x operator.le: {"min_value": arg}, # lambda x: arg <= x operator.eq: {"min_value": arg, "max_value": arg}, # lambda x: arg == x operator.ge: {"max_value": arg}, # lambda x: arg >= x operator.gt: {"max_value": arg, "exclude_max": True}, # lambda x: arg > x # Special-case our default predicates for length bounds min_len: {"min_value": arg, "len": True}, max_len: {"max_value": arg, "len": True}, } if func in options: return ConstructivePredicate(options[func], None) # This section is a little complicated, but stepping through with comments should # help to clarify it. We start by finding the source code for our predicate and # parsing it to an abstract syntax tree; if this fails for any reason we bail out # and fall back to standard rejection sampling (a running theme). try: if predicate.__name__ == "<lambda>": source = lambda_description(predicate) else: source = inspect.getsource(predicate) tree: ast.AST = ast.parse(source) except Exception: return unchanged # Dig down to the relevant subtree - our tree is probably a Module containing # either a FunctionDef, or an Expr which in turn contains a lambda definition. while isinstance(tree, ast.Module) and len(tree.body) == 1: tree = tree.body[0] while isinstance(tree, ast.Expr): tree = tree.value if isinstance(tree, ast.Lambda) and len(tree.args.args) == 1: return numeric_bounds_from_ast(tree.body, tree.args.args[0].arg, unchanged) elif isinstance(tree, ast.FunctionDef) and len(tree.args.args) == 1: if len(tree.body) != 1 or not isinstance(tree.body[0], ast.Return): # If the body of the function is anything but `return <expr>`, # i.e. as simple as a lambda, we can't process it (yet). return unchanged argname = tree.args.args[0].arg body = tree.body[0].value assert isinstance(body, ast.AST) return numeric_bounds_from_ast(body, argname, unchanged) return unchanged def get_integer_predicate_bounds(predicate: Predicate) -> ConstructivePredicate: constraints, predicate = get_numeric_predicate_bounds(predicate) if "min_value" in constraints: if constraints["min_value"] == -math.inf: del constraints["min_value"] elif math.isinf(constraints["min_value"]): return ConstructivePredicate({"min_value": 1, "max_value": -1}, None) elif constraints["min_value"] != int(constraints["min_value"]): constraints["min_value"] = ceil(constraints["min_value"]) elif constraints.get("exclude_min", False): constraints["min_value"] = int(constraints["min_value"]) + 1 if "max_value" in constraints: if constraints["max_value"] == math.inf: del constraints["max_value"] elif math.isinf(constraints["max_value"]): return ConstructivePredicate({"min_value": 1, "max_value": -1}, None) elif constraints["max_value"] != int(constraints["max_value"]): constraints["max_value"] = floor(constraints["max_value"]) elif constraints.get("exclude_max", False): constraints["max_value"] = int(constraints["max_value"]) - 1 kw_categories = {"min_value", "max_value", "len"} constraints = {k: v for k, v in constraints.items() if k in kw_categories} return ConstructivePredicate(constraints, predicate) def get_float_predicate_bounds(predicate: Predicate) -> ConstructivePredicate: constraints, predicate = get_numeric_predicate_bounds(predicate) if "min_value" in constraints: min_value = constraints["min_value"] constraints["min_value"] = float(constraints["min_value"]) if min_value < constraints["min_value"] or ( min_value == constraints["min_value"] and constraints.get("exclude_min", False) ): constraints["min_value"] = next_up(constraints["min_value"]) if "max_value" in constraints: max_value = constraints["max_value"] constraints["max_value"] = float(constraints["max_value"]) if max_value > constraints["max_value"] or ( max_value == constraints["max_value"] and constraints.get("exclude_max", False) ): constraints["max_value"] = next_down(constraints["max_value"]) constraints = { k: v for k, v in constraints.items() if k in {"min_value", "max_value"} } return ConstructivePredicate(constraints, predicate) def max_len(size: int, element: Collection[object]) -> bool: return len(element) <= size def min_len(size: int, element: Collection[object]) -> bool: return size <= len(element)
ConstructivePredicate
python
pytorch__pytorch
torch/distributed/pipelining/_IR.py
{ "start": 19548, "end": 46090 }
class ____(torch.nn.Module): def __init__( self, split_gm: fx.GraphModule, num_stages: int, has_loss_and_backward: bool, loss_spec, ): # TODO: is there a way not to hard wire init? torch.nn.Module.__init__(self) self.split_gm: fx.GraphModule = split_gm self.executor: DetachExecutor = DetachExecutor(self.split_gm) self.num_stages: int = num_stages self.has_loss_and_backward = has_loss_and_backward self.loss_spec = loss_spec for node in split_gm.graph.nodes: assert ( node.op in {"call_module", "placeholder", "output"} or (node.op, node.target) == ("call_function", operator.getitem) or (node.op, node.target) == ("call_method", "backward") or (node.op, node.target) == ("call_function", stage_backward) or (node.op, node.target) == ("call_function", _null_coalesce_accumulate) ), node # Detect replicated parameters so we know that we have to do an additional allreduce # before applying the optimizer # # Note that this also handles the case where there were multiple calls to a single # module from different stages, regardless of whether that module invocation # was handled by the logic above. # Map parameter value to a dictionary that maps the user pipeline module # to the local qualname within that module params_to_users: dict[torch.nn.Parameter, dict[str, str]] = {} for m_qualname, mod in self.split_gm.named_children(): for p_qualname, param in mod.named_parameters(): params_to_users.setdefault(param, {}) params_to_users[param][m_qualname] = p_qualname self.replicated_params: list[dict[str, str]] = [ use_mapping for _, use_mapping in params_to_users.items() if len(use_mapping) > 1 ] # We must break the aliasing relationship between the replicated parameters for correct # numerics in reference runs. If we do not do this, the autograd tape in separate stages # will have a reference to the same tensor value and will erroneously apply gradient # updates multiple times. Therefore, for each replicated parameter set, we deepcopy the # values so that we have separate instances. for param_mapping in self.replicated_params: for submod_name, param_qualname in param_mapping.items(): submod = getattr(self.split_gm, submod_name) atoms = param_qualname.split(".") for atom in atoms[:-1]: submod = getattr(submod, atom) setattr(submod, atoms[-1], copy.deepcopy(getattr(submod, atoms[-1]))) def throw(self, *args, **kwargs): raise RuntimeError( "To run pipeline locally, invoke the Pipe object directly, not `split_gm`" ) self.split_gm.forward = throw # Make submodules use custom direct-serialized GraphModule i = 0 while True: try: name = get_submod_name(i) submod = getattr(self.split_gm, name) submod.__class__.__reduce__ = _direct_serialization_reduce i += 1 except AttributeError: break def forward(self, *args, **kwargs): executor_args = args if len(kwargs) > 0: parameters = [] for node in self.split_gm.graph.nodes: if node.op == "placeholder": if node.args and len(node.args) > 0: parameters.append( Parameter( node.target, Parameter.POSITIONAL_OR_KEYWORD, default=node.args[0], ) ) else: parameter_kind = Parameter.POSITIONAL_OR_KEYWORD param_name = node.target if node.target.startswith("**"): parameter_kind = Parameter.VAR_KEYWORD # type: ignore[assignment] param_name = param_name[2:] elif node.target.startswith("*"): parameter_kind = Parameter.VAR_POSITIONAL # type: ignore[assignment] param_name = param_name[1:] parameters.append(Parameter(param_name, parameter_kind)) signature = Signature(parameters) ba = signature.bind(*args, **kwargs) ba.apply_defaults() executor_args = ba.arguments.values() # type: ignore[assignment] res = self.executor.run(*executor_args) return res def get_stage_module(self, stage_idx: int) -> torch.nn.Module: """ Return a stage module corresponding to `stage_idx` of the `pipe`. """ if stage_idx < 0 or stage_idx >= self.num_stages: raise ValueError(f"Invalid stage index {stage_idx}!") submod_name = get_submod_name(stage_idx) return getattr(self.split_gm, submod_name) @staticmethod def _number_and_count_forward_stages(gm: fx.GraphModule): num_stages = 0 found_idxs: dict[int, None] = {} for node in gm.graph.nodes: if node.op == "call_module" and node.target.startswith(PP_SUBMOD_PREFIX): node.meta["stage_idx"] = int(node.target[len(PP_SUBMOD_PREFIX) + 1 :]) found_idxs.setdefault(node.meta["stage_idx"]) num_stages += 1 # this assert will fail if a split point is inserted before the first layer, which creates empty first submodule # Update: the following assert may fail against some torch versions >= # 2.2.0, as: # submod_0, submod_1, submod_2, ... # may be named as # submod_0, submod_2, submod_4, ... # TODO: investigate # assert all(i in found_idxs for i in range(num_stages)) return num_stages @staticmethod def _from_traced( mod: torch.nn.Module, exported_program: ExportedProgram, multi_use_param_spec: Optional[MultiUseParamSpec] = None, output_loss_value_spec=None, split_policy: Optional[ Callable[[torch.fx.GraphModule], torch.fx.GraphModule] ] = None, ): """ Additionally, the ``output_loss_value_spec`` value can be specified to disambiguate which value in the output of `forward` is the loss value on which PiPPy should apply backpropagation. For example, if your ``forward`` returns a tuple ``(loss, model_out)``, you can specify ``output_loss_value_spec=(True, False)``. Or, if your ``forward`` returns a dict ``{'loss': loss_value, 'model_out': model_out}``, you can specify ``output_loss_value_spec={'loss': True, 'model_out': False}`` """ traced = exported_program.module(check_guards=False) if split_policy is not None: logger.info("Auto-splitting model") traced = split_policy(traced) # type: ignore[arg-type] logger.debug(traced.print_readable(print_output=False)) # type: ignore[operator] # Deduplicate `get_attr` nodes that refer to the same parameter . Downstream code for moving # parameters relies on the invariant that parameter accesses happen once. This is not necessarily # the case (especially with custom tracers), so fix that up here. get_attr_nodes: dict[str, fx.Node] = {} for node in traced.graph.nodes: # type: ignore[union-attr] if node.op == "get_attr": get_attr_nodes.setdefault(node.target, node) if get_attr_nodes[node.target] != node: node.replace_all_uses_with(get_attr_nodes[node.target]) traced.graph.erase_node(node) # type: ignore[operator, union-attr] # avoid looking at next node by keeping track of previous pipe_split prev_pipe_split_idx = -1 pipe_split_nodes_to_erase = set() for i, node in enumerate(traced.graph.nodes): # type: ignore[arg-type, union-attr] if (node.op, node.target) == ("call_function", pipe_split): if prev_pipe_split_idx == i - 1: pipe_split_nodes_to_erase.add(node) prev_pipe_split_idx = i for node in pipe_split_nodes_to_erase: traced.graph.erase_node(node) # type: ignore[operator, union-attr] traced.recompile() # type: ignore[operator] part_idx = 0 def split_callback(n: fx.Node): nonlocal part_idx if (n.op, n.target) == ( "call_function", aten_pipe_split_alias, ): logger.debug(f"Found pipe_split {part_idx}") # noqa: G004 part_idx += 1 return part_idx # TODO: what does split do with module invocations? does it move the modules # into the submodules? split = split_module(traced, mod, split_callback, partition_affix="pp") # type: ignore[arg-type] # a (custom) tracer can produce dead code like orphan get_attr nodes split.graph.eliminate_dead_code() # peephole to remove pipe_split for submodule in split.modules(): if isinstance(submodule, fx.GraphModule): for node in submodule.graph.nodes: if (node.op, node.target) == ( "call_function", aten_pipe_split_alias, ): submodule.graph.erase_node(node) submodule.recompile() for name, submodule in split.named_children(): if isinstance(submodule, fx.GraphModule): new_submod = _outline_submodules(submodule.graph) # Replace old submod split.register_module(name, new_submod) # TODO: backport this into split_module def delete_user_reference(node, user): """ Delete reference of `node` from `user`'s arg list. Args: - node: a `get_attr` node at root. - user: a submodule node that uses `node`. """ assert len(user.kwargs) == 0 use_idxs = [i for i, arg in enumerate(user.args) if arg == node] assert len(use_idxs) == 1 args_copy = list(user.args) args_copy.pop(use_idxs[0]) user.args = tuple(args_copy) logger.debug( f"Deleted {node} from user {user}, arg index = {use_idxs[0]}" # noqa: G004 ) # A list of param referrals for deferred deletion. # To be accumulated in `move_param_to_callee`. to_delete = [] def _recursive_getattr_with_parent(mod, fqn): # Returns getattr call given a nested FQN, and the last parent atoms = fqn.split(".") for atom in atoms[:-1]: if not hasattr(mod, atom): return None, None mod = getattr(mod, atom) if not hasattr(mod, atoms[-1]): return mod, None attr = getattr(mod, atoms[-1]) return mod, attr def move_param_to_callee( root, callee_name, param_fqn, ): """ Move a parameter from the root module to a submodule. Args: root: The root module. callee_name: The name of the submodule to move the parameter to. param_fqn: The fully qualified name of the parameter to move. """ # `atoms` is a list of strings representing the path to the # parameter in the original model atoms = param_fqn.split(".") mod_itr, param_val = _recursive_getattr_with_parent(split, param_fqn) # Check whether the parameter is a buffer or a parameter is_buffer = atoms[-1] in mod_itr._buffers # Check whether the parameter is a tensor assert isinstance(param_val, torch.Tensor), ( f"Expected '{param_fqn}' to be {torch.Tensor} but got {type(param_val)}." + ( f" It might happen if module '{param_fqn}' was passed to some 'leaf function'" f"(see https://pytorch.org/docs/stable/fx.html#fx.wrap). Please inspect " f"usages of '{param_fqn}' in the traced graph." if isinstance(param_val, torch.nn.Module) else "" ) ) # Get submodule callee = root.get_submodule(callee_name) assert not hasattr(callee, param_fqn), ( f"Module {callee_name} already has a parameter named {param_fqn}" ) # Assign the parameter to the submodule if is_buffer: _assign_attr( param_val, callee, param_fqn, attr_kind=_AttrKind.BUFFER, persistent=True, # TODO: handle non-persistent buffer ) else: _assign_attr( param_val, callee, param_fqn, attr_kind=_AttrKind.PARAMETER, ) logger.debug(f"Moved parameter {param_fqn} to {callee_name}") # noqa: G004 # Next step is to replace placeholder of submodule with a get_attr. # Those placeholders are created by `split_module` inside each # submodule. # Update: this step is now moved to `_sink_params` because # `_sink_params` can do it recursively (i.e. for modules inside # submodule) to_delete.append((mod_itr, atoms[-1])) # Get the list of all parameters in the root module attr_nodes = list(filter(lambda n: n.op == "get_attr", split.graph.nodes)) for node in attr_nodes: # Check whether the parameter is used in only one submodule if len(node.users) > 1: logger.info( f"Parameter {node.target} used in multiple stages: {node.users}." # noqa: G004 ) for user in node.users: assert user.op == "call_module" # Move parameter into submodule move_param_to_callee( split, user.target, node.target, ) # [aliasing] store tensor id -> list of FQNs, built from state dict # Also assign non-persistent buffers id_to_fqns: dict[int, set[str]] = defaultdict(set) for fqn, tensor in mod.state_dict(keep_vars=True).items(): id_to_fqns[id(tensor)].add(fqn) for fqn, tensor in mod.named_buffers(): id_to_fqns[id(tensor)].add(fqn) # After moving the params to their corresponding hierarchies, we also # need to move the `get_attr` nodes from the root of the graph to those # hierarchies. # [aliasing] use id -> fqn mapping to list out all valid FQNs inputs_to_state: dict[str, list[str]] = {} for attr in attr_nodes: _, tensor = _recursive_getattr_with_parent(mod, attr.target) fqns = list(id_to_fqns[id(tensor)]) if fqns: inputs_to_state[attr.name] = fqns elif attr.target in exported_program.constants: # lifted constants inputs_to_state[attr.name] = [attr.target] # [aliasing] for each submodule split, assign attributes on FQNs that may be used. # We determine this based on whether or not the FQN attribute parent exists. # i.e. if the last submodule exists, assign the attribute. added_attributes: dict[str, list[str]] = defaultdict(list) for fqn, tensor in mod.state_dict(keep_vars=True).items(): for name, submod in split.named_children(): if isinstance(submod, fx.GraphModule): parent, child = _recursive_getattr_with_parent(submod, fqn) if ( parent and child is None ): # parent exists, attribute doesn't -> assign added_attributes[name].append(fqn) setattr(parent, fqn.split(".")[-1], tensor) # Deferral deletion: Remove the original attributes (to params) from the # root GraphModule for mod_itr, last_atom in to_delete: try: delattr(mod_itr, last_atom) except AttributeError: # This is expected if the parameter is used in multiple stages pass # This is done by (1) `_sink_params` at each submodule; for submod in split.children(): if isinstance(submod, fx.GraphModule): _sink_params(submod, inputs_to_state, []) submod.graph.lint() submod.recompile() # [aliasing] This step is not super necessary, but helps reduce parameter usage/memory. # After _sink_params() routine has run, clean up unused attributes that we previously added. # Determine this based on the get_attr nodes - if not used, remove it. for name, attributes in added_attributes.items(): submod = getattr(split, name) unused_attributes = set(attributes) # track used attributes in the submodule, running DFS on subgraph hierarchy stack = [("", submod)] # (scope, submodule) while stack: scope, _mod = stack.pop() if isinstance(_mod, (fx.GraphModule, InterpreterModule)): for node in _mod.graph.nodes: if node.op == "get_attr": # get_attr might get access deeper level attribute fqn = scope + "." + node.target if scope else node.target unused_attributes.discard(fqn) for _name, _submod in _mod.named_children(): stack.append((scope + "." + _name if scope else _name, _submod)) # delete unused attributes for attr in unused_attributes: mod_itr, atoms = submod, attr.split(".") for atom in atoms[:-1]: mod_itr = getattr(mod_itr, atom) delattr(mod_itr, atoms[-1]) for node in attr_nodes: # And (2): remove `get_attr` node from submod's arg list for user in copy.copy(node.users): assert user.op == "call_module" delete_user_reference(node, user) # And (3): remove the `get_attr` node from the root graph. split.graph.erase_node(node) split.delete_all_unused_submodules() split.graph.lint() split.recompile() num_stages = Pipe._number_and_count_forward_stages(split) has_loss_and_backward = False generated_loss_spec = output_loss_value_spec if output_loss_value_spec is not None: loss_node, output_node, generated_loss_spec = _find_loss_output( mod, split.graph, output_loss_value_spec ) if loss_node is not None: _insert_stage_symbolic_backward( split.graph, loss_node, output_node, ) split.recompile() has_loss_and_backward = True logger.debug("Pipeline is in training mode, backward pass generated") else: raise RuntimeError( f"Did not find any loss value according to {output_loss_value_spec=}" ) else: logger.debug("Pipeline is in inference mode, backward pass not generated") logger.debug(f"Full pipe model:\n{split}") # noqa: G004 return Pipe( split, num_stages, has_loss_and_backward, generated_loss_spec, ) def print_readable(self): """ Print the pipe in a human-readable format. This will print both the root pipe and each stage module. """ self.split_gm.print_readable() @staticmethod def _trace_with_export( mod: torch.nn.Module, example_args: tuple[Any, ...], example_kwargs: Optional[dict[str, Any]] = None, ) -> ExportedProgram: logger.info("Tracing model ...") try: ep = torch.export.export(mod, example_args, example_kwargs) except Exception as e: raise RuntimeError( "It seems that we cannot capture your model as a full graph. " "Typical reasons include graph breaks, data/shape-dependent " "control flow, or missing meta kernels for custom operators. " "You can use our manual pipeline interfaces, or try to fix the " "graph breaks, see https://pytorch.org/docs/stable/export.html" ) from e return ep @staticmethod def from_tracing( mod: torch.nn.Module, example_args: tuple[Any, ...], example_kwargs: Optional[dict[str, Any]] = None, split_policy: Optional[Callable[[fx.GraphModule], fx.GraphModule]] = None, ): # If a param will be used in multiple pipeline stages, we default the strategy to REPLICATE'ing the param across # stages instead of TRANSMIT'ting it multi_use_param_spec = MultiUseParameterConfig.REPLICATE # Figure out which output is loss from output_chunk_spec output_loss_value_spec: Any = None # Deprecated """ if output_chunk_spec is not None: output_loss_value_spec = map_aggregate( output_chunk_spec, lambda v: isinstance(v, _LossReducer) ) """ # Trace with export exported_program = Pipe._trace_with_export( mod, example_args, example_kwargs, ) pipe = Pipe._from_traced( mod, exported_program, multi_use_param_spec, output_loss_value_spec=output_loss_value_spec, split_policy=split_policy, ) # Users want the first pipeline stage to accept kwargs if the original # program does. This is controlled by the `_codegen` field of the graph, # so we make a copy here. Note: we only want the input spec and not the # output spec, because the output spec is for the last stage. Maybe a # TODO? Not sure yet. split = pipe.split_gm traced = exported_program.module() submod0 = next(iter(split.children())) submod0_sign = signature(submod0.forward) model_sign = signature(traced.forward) if len(model_sign.parameters) != len(submod0_sign.parameters): # We don't change the signature of the first stage if it takes # different number of args than original model logger.info( f"Original model takes {len(model_sign.parameters)} args but the " # noqa: G004 f"first pipeline stage takes {len(submod0_sign.parameters)}. " "Please provide args to respective pipeline stages." ) else: # Support kwargs for the first stage submod0.graph._codegen = copy.deepcopy(traced.graph._codegen) # type: ignore[union-attr] # `_replace` is actually not "private" or internal. based on this doc: # To prevent conflicts with field names, the method and attribute names # start with an underscore submod0.graph._codegen.pytree_info = ( # type: ignore[union-attr] submod0.graph._codegen.pytree_info._replace(out_spec=None) # type: ignore[operator, union-attr] ) submod0.recompile() return pipe def __str__(self): return self.split_gm.__str__() def __repr__(self): return self.split_gm.__repr__() def info(self) -> PipeInfo: """ Get information about the pipe. Returns ------- PipeInfo A dataclass containing information about the pipe. """ return PipeInfo( graph=self.split_gm.graph, num_stages=self.num_stages, has_loss_and_backward=self.has_loss_and_backward, ) def build_stage( self, stage_index: int, device: torch.device, group: Optional[ProcessGroup] = None, ) -> _PipelineStage: """ Create a `PipelineStage` given a stage index and distributed group. The `PipelineStage` can run with `PipelineSchedule`s. """ # Find stage module stage_module = self.get_stage_module(stage_index) # Move ops argument to device # Today PT2 tracer does not treat `x.device` as a symbolic device; # instead, the device of tracing time got burned into the generated # code. Here we provide a workaround for users to manually modify the # "device" kwarg of operations. Such operation may include: # `torch.ones`, `torch.zeros`, `torch.rand`, etc. if isinstance(stage_module, torch.fx.GraphModule): _modify_graph_op_device(stage_module, device) else: logger.warning( f"Expected a `torch.fx.GraphModule` but got {type(stage_module)}" # noqa: G004 ) # Detach pipe info # Note: be careful what's included in `pipe_info`. We don't want to keep # a reference to `Pipe` or `Pipe.split_gm` which stops python from # recycling them. When python recycles them, other stage modules (which # are irrelevant to current rank) can be automatically freed. pipe_info = self.info() return _PipelineStage(stage_module, stage_index, pipe_info, device, group)
Pipe
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1288163, "end": 1289925 }
class ____(sgqlc.types.Type, Node): """A GitHub Enterprise Importer (GEI) organization migration.""" __schema__ = github_schema __field_names__ = ( "created_at", "database_id", "failure_reason", "remaining_repositories_count", "source_org_name", "source_org_url", "state", "target_org_name", "total_repositories_count", ) created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt") """Identifies the date and time when the object was created.""" database_id = sgqlc.types.Field(String, graphql_name="databaseId") """Identifies the primary key from the database.""" failure_reason = sgqlc.types.Field(String, graphql_name="failureReason") """The reason the organization migration failed.""" remaining_repositories_count = sgqlc.types.Field(Int, graphql_name="remainingRepositoriesCount") """The remaining amount of repos to be migrated.""" source_org_name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="sourceOrgName") """The name of the source organization to be migrated.""" source_org_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="sourceOrgUrl") """The URL of the source organization to migrate.""" state = sgqlc.types.Field(sgqlc.types.non_null(OrganizationMigrationState), graphql_name="state") """The migration state.""" target_org_name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="targetOrgName") """The name of the target organization.""" total_repositories_count = sgqlc.types.Field(Int, graphql_name="totalRepositoriesCount") """The total amount of repositories to be migrated."""
OrganizationMigration
python
django__django
tests/constraints/models.py
{ "start": 1665, "end": 2352 }
class ____(models.Model): name = models.CharField(max_length=255, null=True) price = models.IntegerField(null=True) discounted_price = models.IntegerField(null=True) rebate = models.GeneratedField( expression=Coalesce("price", 0) - Coalesce("discounted_price", Coalesce("price", 0)), output_field=models.IntegerField(), db_persist=False, ) lower_name = models.GeneratedField( expression=Lower(models.F("name")), output_field=models.CharField(max_length=255, null=True), db_persist=False, ) class Meta: required_db_features = {"supports_virtual_generated_columns"}
GeneratedFieldVirtualProduct
python
tensorflow__tensorflow
tensorflow/compiler/tests/jit_test.py
{ "start": 9811, "end": 17180 }
class ____(test.TestCase): """Tests for auto-compilation on CPU/GPU devices.""" def testReshape(self): """Tests an operator with compile-time constant and non-constant inputs.""" with self.session(config=NoRewriteSessionConfig()) as sess: x = array_ops.placeholder(dtypes.float32) y = array_ops.placeholder(dtypes.int32) with jit_scope(): # Reshape's first argument is non-constant in the JIT, but its second # (shape) argument will be treated as a compile-time constant for # each JIT compilation. # We do not use a tf.const() argument since we want to ensure the # shape is still a run-time argument to the JIT, and not # statically known as part of the JIT compilation's input graph. z = array_ops.reshape(x, y) run_metadata = config_pb2.RunMetadata() out = test_utils.RunWithWarmup( sess, z, { x: np.array([1, 2, 3, 4, 5, 6], np.float32), y: [-1, 3] }, run_metadata=run_metadata, options=config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE)) self.assertTrue(MetadataHasXlaRunOp(run_metadata)) self.assertAllClose(np.array([[1, 2, 3], [4, 5, 6]], np.float32), out) def testIgnoredArguments(self): """Tests that JIT computations can ignore formal parameters.""" with self.session(config=NoRewriteSessionConfig()) as sess: x = array_ops.placeholder(dtypes.int32) y = array_ops.placeholder(dtypes.int32) with jit_scope(): z = math_ops.add(x, x) w = math_ops.add(y, y) # Pulls 'w' into the same compilation via control dependencies. with ops.control_dependencies([w]): n = control_flow_ops.no_op() with ops.control_dependencies([n]): t = math_ops.add(z, z) run_metadata = config_pb2.RunMetadata() out = test_utils.RunWithWarmup( sess, t, { x: np.int32(7), y: np.int32(404) }, run_metadata=run_metadata, options=config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE)) self.assertTrue(MetadataHasXlaRunOp(run_metadata)) self.assertAllClose(28, out) def testLoops(self): """Tests that compilation accepts computations containing loops.""" with self.session(config=NoRewriteSessionConfig()) as session: x = array_ops.placeholder(dtypes.float32) with jit_scope(): c = lambda i, _: math_ops.less(i, 5) b = lambda i, x: (i + 1, x * 2.0 + 1.0) _, y = while_loop.while_loop(c, b, (constant_op.constant(0), x)) run_metadata = config_pb2.RunMetadata() result = session.run(y, {x: np.float32(2)}, run_metadata=run_metadata, options=config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE)) self.assertTrue(MetadataHasXlaRunOp(run_metadata)) self.assertAllClose(result, np.float32(95), rtol=1e-1) def testCond(self): """Tests that compilation handles switch operators.""" with self.session(config=NoRewriteSessionConfig()) as session: x = array_ops.placeholder(dtypes.float32) y = array_ops.placeholder(dtypes.float32) c = array_ops.placeholder(dtypes.bool) with jit_scope(): z = x + 1.0 w = cond.cond(c, lambda: z, lambda: y) t = math_ops.add(z, w) # If JIT compilation chooses to cluster z and t, then execution will # deadlock. run_metadata = config_pb2.RunMetadata() result = test_utils.RunWithWarmup( session, t, { x: np.float32(2), y: np.float32(4), c: True }, run_metadata=run_metadata, options=config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE)) self.assertTrue(MetadataHasXlaRunOp(run_metadata)) self.assertAllClose(result, np.float32(6), rtol=1e-1) def testNestedFunction(self): g = ops.Graph() with g.as_default(): @function.Defun(compiled=True) def Bar(x, y): return x + 2 * y @function.Defun(compiled=True) def Foo(x): return Bar(x * x, x * x * x) @function.Defun() def Entry(x): return Foo(x) inp = array_ops.placeholder(dtypes.float32) out = Entry(inp) with self.session( config=NoRewriteSessionConfig(), graph=g, use_gpu=True) as sess: run_metadata = config_pb2.RunMetadata() val = sess.run(out, feed_dict={inp: [2., 10.]}, run_metadata=run_metadata, options=config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE)) self.assertAllClose(val, [20., 2100.]) def testLoopDeadlock(self): """Regression test for bug that caused deadlocks in graphs with loops.""" with self.session(config=NoRewriteSessionConfig()) as session: x = array_ops.placeholder(dtypes.float32) with jit_scope(): y = x + 1.0 c = lambda i, _x, _y: math_ops.less(i, 5) b = lambda i, x, _y: (i + 1, x * 2.0 + 1.0, x - 3.0) _, _, w = while_loop.while_loop(c, b, (constant_op.constant(0), y, x)) u = w + y result = session.run(u, {x: np.float32(2)}) self.assertAllClose(result, np.float32(63), rtol=1e-1) def testGradient(self): """Tests that the backprop function is properly compiled.""" def _Run(compiled): @function.Defun(compiled=compiled) def Forward(x): return math_ops.log(x) g = ops.Graph() with g.as_default(): x = array_ops.placeholder(dtypes.float32) y = Forward(x) dx, = gradients_impl.gradients(y, [x], 1.0) cfg = NoRewriteSessionConfig() cfg.graph_options.optimizer_options.opt_level = ( config_pb2.OptimizerOptions.L1) cfg.graph_options.optimizer_options.do_function_inlining = True with session_lib.Session(graph=g, config=cfg) as sess: run_metadata = config_pb2.RunMetadata() dx_val = test_utils.RunWithWarmup( sess, dx, feed_dict={x: 100.}, run_metadata=run_metadata, options=config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE)) self.assertAllClose(dx_val, 0.01) return RunMetadataLabels(run_metadata) # SymGrad[f=log(x)](x, dy) = 1/x * dy # # Note: we don't need to compute log(x) for dx due to graph pruning. # Do not compile the backprop. We should see one Reciprocal and one Mul. labels = _Run(compiled=False) self.assertFalse(InLabels(labels, "Log")) self.assertTrue(InLabels(labels, "Reciprocal")) self.assertTrue(InLabels(labels, "Mul")) self.assertFalse(InLabels(labels, "XlaCompile")) self.assertFalse(InLabels(labels, "XlaRun")) # Compile the backprop. One XlaCompile/XlaRun pair. labels = _Run(compiled=True) self.assertFalse(InLabels(labels, "Log")) self.assertFalse(InLabels(labels, "Reciprocal")) self.assertFalse(InLabels(labels, "Mul")) self.assertTrue(InLabels(labels, "XlaCompile")) self.assertTrue(InLabels(labels, "XlaRun"))
XlaCompilationTest
python
huggingface__transformers
tests/models/vipllava/test_modeling_vipllava.py
{ "start": 5627, "end": 10928 }
class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `VipLlavaForConditionalGeneration`. """ all_model_classes = ( ( VipLlavaModel, VipLlavaForConditionalGeneration, ) if is_torch_available() else () ) pipeline_model_mapping = {"image-text-to-text": VipLlavaForConditionalGeneration} if is_torch_available() else {} test_resize_embeddings = True _is_composite = True def setUp(self): self.model_tester = VipLlavaVisionText2TextModelTester(self) common_properties = ["image_token_index", "vision_feature_layers", "image_seq_length"] self.config_tester = ConfigTester( self, config_class=VipLlavaConfig, has_text_modality=False, common_properties=common_properties ) def test_config(self): self.config_tester.run_common_tests() # Copied from tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_mismatching_num_image_tokens def test_mismatching_num_image_tokens(self): """ Tests that VLMs through an error with explicit message saying what is wrong when number of images doesn't match number of image tokens in the text. Also we need to test multi-image cases when one prompr has multiple image tokens. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) model.eval() curr_input_dict = copy.deepcopy(input_dict) # in=place modifications further _ = model(**curr_input_dict) # successful forward with no modifications # remove one image but leave the image token in text curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...] with self.assertRaises(ValueError): _ = model(**curr_input_dict) # simulate multi-image case by concatenating inputs where each has exactly one image/image-token input_ids = curr_input_dict["input_ids"][:1] pixel_values = curr_input_dict["pixel_values"][:1] input_ids = torch.cat([input_ids, input_ids], dim=0) # one image and two image tokens raise an error with self.assertRaises(ValueError): _ = model(input_ids=input_ids, pixel_values=pixel_values) # two images and two image tokens don't raise an error pixel_values = torch.cat([pixel_values, pixel_values], dim=0) _ = model(input_ids=input_ids, pixel_values=pixel_values) @parameterized.expand( [ (-1,), ([-1],), ([-1, -2],), ], ) def test_vision_feature_layers(self, vision_feature_layers): """ Test that we can use either one vision feature layer, or a list of vision feature layers. """ # NOTE: vipllava uses vision_feature_layers instead of vision_feature_layer as the # config key. The reason is that other llava classes supported one vision feature layer # and added support for a list of layers with granite vision support, while vipllava # originally supported multiple feature layers, and added support for a single layer for # for compatibility reasons. config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.vision_feature_layers = vision_feature_layers num_feature_layers = 1 if isinstance(vision_feature_layers, int) else len(vision_feature_layers) hidden_size = config.vision_config.hidden_size expected_features = hidden_size * num_feature_layers for model_class in self.all_model_classes: model = model_class(config).to(torch_device) # We should have the right number of input features, # and should be able to run a forward pass without exploding base_model = getattr(model, "model", model) assert base_model.multi_modal_projector.linear_1.in_features == expected_features model(**input_dict) @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip( "VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test" ) def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self): pass @require_torch
VipLlavaForConditionalGenerationModelTest
python
spack__spack
lib/spack/spack/vendor/ruamel/yaml/compat.py
{ "start": 3083, "end": 5320 }
class ____: def __init__(self, file_name=None): # type: (Any) -> None self._max_print = None # type: Any self._count = None # type: Any self._file_name = file_name def __call__(self, *args, **kw): # type: (Any, Any) -> None if not bool(_debug): return out = sys.stdout if self._file_name is None else open(self._file_name, 'a') dbgprint = print # to fool checking for print statements by dv utility kw1 = kw.copy() kw1['file'] = out dbgprint(*args, **kw1) out.flush() if self._max_print is not None: if self._count is None: self._count = self._max_print self._count -= 1 if self._count == 0: dbgprint('forced exit\n') traceback.print_stack() out.flush() sys.exit(0) if self._file_name: out.close() def set_max_print(self, i): # type: (int) -> None self._max_print = i self._count = None def fp(self, mode='a'): # type: (str) -> Any out = sys.stdout if self._file_name is None else open(self._file_name, mode) return out nprint = Nprint() nprintf = Nprint('/var/tmp/spack.vendor.ruamel.yaml.log') # char checkers following production rules def check_namespace_char(ch): # type: (Any) -> bool if '\x21' <= ch <= '\x7E': # ! to ~ return True if '\xA0' <= ch <= '\uD7FF': return True if ('\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF': # excl. byte order mark return True if '\U00010000' <= ch <= '\U0010FFFF': return True return False def check_anchorname_char(ch): # type: (Any) -> bool if ch in ',[]{}': return False return check_namespace_char(ch) def version_tnf(t1, t2=None): # type: (Any, Any) -> Any """ return True if spack.vendor.ruamel.yaml version_info < t1, None if t2 is specified and bigger else False """ from spack.vendor.ruamel.yaml import version_info # NOQA if version_info < t1: return True if t2 is not None and version_info < t2: return None return False
Nprint
python
sqlalchemy__sqlalchemy
test/dialect/mssql/test_compiler.py
{ "start": 63571, "end": 71429 }
class ____(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = mssql.dialect() def assert_compile_with_warning(self, *args, **kwargs): with testing.expect_deprecated( "The dialect options 'mssql_identity_start' and " "'mssql_identity_increment' are deprecated. " "Use the 'Identity' object instead." ): return self.assert_compile(*args, **kwargs) def test_primary_key_no_identity(self): metadata = MetaData() tbl = Table( "test", metadata, Column("id", Integer, autoincrement=False, primary_key=True), ) self.assert_compile( schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL, PRIMARY KEY (id))", ) def test_primary_key_defaults_to_identity(self): metadata = MetaData() tbl = Table("test", metadata, Column("id", Integer, primary_key=True)) self.assert_compile( schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL IDENTITY, " "PRIMARY KEY (id))", ) def test_primary_key_with_identity_object(self): metadata = MetaData() tbl = Table( "test", metadata, Column( "id", Integer, Identity(start=3, increment=42), primary_key=True, ), ) self.assert_compile( schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL IDENTITY(3,42), " "PRIMARY KEY (id))", ) def test_identity_no_primary_key(self): metadata = MetaData() tbl = Table( "test", metadata, Column("id", Integer, autoincrement=True) ) self.assert_compile( schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL IDENTITY)", ) def test_identity_object_no_primary_key(self): metadata = MetaData() tbl = Table( "test", metadata, Column("id", Integer, Identity(increment=42)), ) self.assert_compile( schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,42))", ) def test_identity_object_1_1(self): metadata = MetaData() tbl = Table( "test", metadata, Column("id", Integer, Identity(start=1, increment=1)), ) self.assert_compile( schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,1))", ) def test_identity_object_no_primary_key_non_nullable(self): metadata = MetaData() tbl = Table( "test", metadata, Column( "id", Integer, Identity(start=3), nullable=False, ), ) self.assert_compile( schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL IDENTITY(3,1))", ) def test_identity_separate_from_primary_key(self): metadata = MetaData() tbl = Table( "test", metadata, Column("id", Integer, autoincrement=False, primary_key=True), Column("x", Integer, autoincrement=True), ) self.assert_compile( schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL, " "x INTEGER NOT NULL IDENTITY, " "PRIMARY KEY (id))", ) def test_identity_object_separate_from_primary_key(self): metadata = MetaData() tbl = Table( "test", metadata, Column("id", Integer, autoincrement=False, primary_key=True), Column( "x", Integer, Identity(start=3, increment=42), ), ) self.assert_compile( schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL, " "x INTEGER NOT NULL IDENTITY(3,42), " "PRIMARY KEY (id))", ) def test_identity_illegal_two_autoincrements(self): metadata = MetaData() tbl = Table( "test", metadata, Column("id", Integer, autoincrement=True), Column("id2", Integer, autoincrement=True), ) # this will be rejected by the database, just asserting this is what # the two autoincrements will do right now self.assert_compile( schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL IDENTITY, " "id2 INTEGER NOT NULL IDENTITY)", ) def test_identity_object_illegal_two_autoincrements(self): metadata = MetaData() tbl = Table( "test", metadata, Column( "id", Integer, Identity(start=3, increment=42), autoincrement=True, ), Column( "id2", Integer, Identity(start=7, increment=2), ), ) # this will be rejected by the database, just asserting this is what # the two autoincrements will do right now self.assert_compile( schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL IDENTITY(3,42), " "id2 INTEGER NOT NULL IDENTITY(7,2))", ) def test_identity_start_0(self): metadata = MetaData() tbl = Table( "test", metadata, Column("id", Integer, mssql_identity_start=0, primary_key=True), ) self.assert_compile_with_warning( schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL IDENTITY(0,1), " "PRIMARY KEY (id))", ) def test_identity_increment_5(self): metadata = MetaData() tbl = Table( "test", metadata, Column( "id", Integer, mssql_identity_increment=5, primary_key=True ), ) self.assert_compile_with_warning( schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,5), " "PRIMARY KEY (id))", ) @testing.combinations( schema.CreateTable( Table( "test", MetaData(), Column( "id", Integer, Identity(start=2, increment=2), mssql_identity_start=0, ), ) ), schema.CreateTable( Table( "test1", MetaData(), Column( "id2", Integer, Identity(start=3, increment=3), mssql_identity_increment=5, ), ) ), ) def test_identity_options_ignored_with_identity_object(self, create_table): assert_raises_message( exc.CompileError, "Cannot specify options 'mssql_identity_start' and/or " "'mssql_identity_increment' while also using the " "'Identity' construct.", create_table.compile, dialect=self.__dialect__, ) def test_identity_object_no_options(self): metadata = MetaData() tbl = Table( "test", metadata, Column("id", Integer, Identity()), ) self.assert_compile( schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL IDENTITY)", )
CompileIdentityTest
python
mlflow__mlflow
mlflow/langchain/output_parsers.py
{ "start": 3197, "end": 3799 }
class ____(BaseTransformOutputParser[dict[str, Any]]): """ OutputParser that wraps the string output into an dictionary representation of a :py:class:`StringResponse` """ @classmethod def is_lc_serializable(cls) -> bool: """Return whether this class is serializable.""" return True @property def _type(self) -> str: """Return the output parser type for serialization.""" return "mlflow_simplified_str_object" def parse(self, text: str) -> dict[str, Any]: return asdict(StringResponse(content=text))
StringResponseOutputParser
python
django-extensions__django-extensions
tests/testapp/models.py
{ "start": 12499, "end": 12603 }
class ____(TimeStampedModel): class Meta: app_label = "django_extensions"
TimestampedTestModel
python
rapidsai__cudf
python/cudf_polars/cudf_polars/utils/config.py
{ "start": 5168, "end": 5580 }
class ____(str, enum.Enum): """ **Deprecated**: Use :class:`Cluster` instead. The scheduler to use for the task-based streaming executor. * ``Scheduler.SYNCHRONOUS`` : Single-GPU execution (use ``Cluster.SINGLE`` instead) * ``Scheduler.DISTRIBUTED`` : Multi-GPU execution (use ``Cluster.DISTRIBUTED`` instead) """ SYNCHRONOUS = "synchronous" DISTRIBUTED = "distributed"
Scheduler
python
mlflow__mlflow
dev/clint/src/clint/rules/use_sys_executable.py
{ "start": 84, "end": 1105 }
class ____(Rule): def _message(self) -> str: return ( "Use `[sys.executable, '-m', 'mlflow', ...]` when running mlflow CLI in a subprocess." ) @staticmethod def check(node: ast.Call, resolver: Resolver) -> bool: """ Returns True if `node` looks like `subprocess.Popen(["mlflow", ...])`. """ resolved = resolver.resolve(node) if ( resolved and len(resolved) == 2 and resolved[0] == "subprocess" and resolved[1] in ["Popen", "run", "check_output", "check_call"] and node.args ): first_arg = node.args[0] if isinstance(first_arg, ast.List) and first_arg.elts: first_elem = first_arg.elts[0] return ( isinstance(first_elem, ast.Constant) and isinstance(first_elem.value, str) and first_elem.value == "mlflow" ) return False
UseSysExecutable
python
realpython__materials
python-tic-tac-toe-game-tkinter/source_code_step_3/tic_tac_toe.py
{ "start": 407, "end": 2787 }
class ____: def __init__(self, players=DEFAULT_PLAYERS, board_size=BOARD_SIZE): self._players = cycle(players) self.board_size = board_size self.current_player = next(self._players) self.winner_combo = [] self._current_moves = [] self._has_winner = False self._winning_combos = [] self._setup_board() def _setup_board(self): self._current_moves = [ [Move(row, col) for col in range(self.board_size)] for row in range(self.board_size) ] self._winning_combos = self._get_winning_combos() def _get_winning_combos(self): rows = [ [(move.row, move.col) for move in row] for row in self._current_moves ] columns = [list(col) for col in zip(*rows, strict=False)] first_diagonal = [row[i] for i, row in enumerate(rows)] second_diagonal = [col[j] for j, col in enumerate(reversed(columns))] return rows + columns + [first_diagonal, second_diagonal] def is_valid_move(self, move): """Return True if move is valid, and False otherwise.""" row, col = move.row, move.col move_was_not_played = self._current_moves[row][col].label == "" no_winner = not self._has_winner return no_winner and move_was_not_played def process_move(self, move): """Process the current move and check if it's a win.""" row, col = move.row, move.col self._current_moves[row][col] = move for combo in self._winning_combos: results = set(self._current_moves[n][m].label for n, m in combo) is_win = (len(results) == 1) and ("" not in results) if is_win: self._has_winner = True self.winner_combo = combo break def has_winner(self): """Return True if the game has a winner, and False otherwise.""" return self._has_winner def is_tied(self): """Return True if the game is tied, and False otherwise.""" no_winner = not self._has_winner played_moves = ( move.label for row in self._current_moves for move in row ) return no_winner and all(played_moves) def toggle_player(self): """Return a toggled player.""" self.current_player = next(self._players)
TicTacToeGame
python
tensorflow__tensorflow
tensorflow/python/keras/regularizers.py
{ "start": 8995, "end": 9876 }
class ____(Regularizer): """A regularizer that applies a L1 regularization penalty. The L1 regularization penalty is computed as: `loss = l1 * reduce_sum(abs(x))` L1 may be passed to a layer as a string identifier: >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1') In this case, the default value used is `l1=0.01`. Attributes: l1: Float; L1 regularization factor. """ def __init__(self, l1=0.01, **kwargs): # pylint: disable=redefined-outer-name l1 = kwargs.pop('l', l1) # Backwards compatibility if kwargs: raise TypeError('Argument(s) not recognized: %s' % (kwargs,)) l1 = 0.01 if l1 is None else l1 _check_penalty_number(l1) self.l1 = backend.cast_to_floatx(l1) def __call__(self, x): return self.l1 * math_ops.reduce_sum(math_ops.abs(x)) def get_config(self): return {'l1': float(self.l1)}
L1
python
doocs__leetcode
solution/0200-0299/0223.Rectangle Area/Solution.py
{ "start": 0, "end": 432 }
class ____: def computeArea( self, ax1: int, ay1: int, ax2: int, ay2: int, bx1: int, by1: int, bx2: int, by2: int, ) -> int: a = (ax2 - ax1) * (ay2 - ay1) b = (bx2 - bx1) * (by2 - by1) width = min(ax2, bx2) - max(ax1, bx1) height = min(ay2, by2) - max(ay1, by1) return a + b - max(height, 0) * max(width, 0)
Solution
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/operators/s3.py
{ "start": 1519, "end": 3287 }
class ____(AwsBaseOperator[S3Hook]): """ This operator creates an S3 bucket. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:S3CreateBucketOperator` :param bucket_name: This is bucket name you want to create :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether or not to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param botocore_config: Configuration dictionary (key-values) for botocore client. See: https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html """ template_fields: Sequence[str] = aws_template_fields("bucket_name") aws_hook_class = S3Hook def __init__( self, *, bucket_name: str, **kwargs, ) -> None: super().__init__(**kwargs) self.bucket_name = bucket_name def execute(self, context: Context): if not self.hook.check_for_bucket(self.bucket_name): self.hook.create_bucket(bucket_name=self.bucket_name, region_name=self.region_name) self.log.info("Created bucket with name: %s", self.bucket_name) else: self.log.info("Bucket with name: %s already exists", self.bucket_name)
S3CreateBucketOperator
python
PrefectHQ__prefect
src/prefect/exceptions.py
{ "start": 5292, "end": 5804 }
class ____(PrefectException, TypeError): """Raised when parameters passed to a function do not match its signature.""" def __init__(self, msg: str): super().__init__(msg) @classmethod def from_bad_params( cls, expected_params: list[str], provided_params: list[str] ) -> Self: msg = ( f"Function expects parameters {expected_params} but was provided with" f" parameters {provided_params}" ) return cls(msg)
SignatureMismatchError
python
kennethreitz__tablib
src/tablib/formats/_html.py
{ "start": 107, "end": 1601 }
class ____: BOOK_ENDINGS = 'h3' title = 'html' extensions = ('html', ) @classmethod def export_set(cls, dataset): """HTML representation of a Dataset.""" stream = BytesIO() page = markup.page() page.table.open() if dataset.headers is not None: new_header = [item if item is not None else '' for item in dataset.headers] page.thead.open() headers = markup.oneliner.th(new_header) page.tr(headers) page.thead.close() for row in dataset: new_row = [item if item is not None else '' for item in row] html_row = markup.oneliner.td(new_row) page.tr(html_row) page.table.close() # Allow unicode characters in output wrapper = codecs.getwriter("utf8")(stream) wrapper.writelines(str(page)) return stream.getvalue().decode('utf-8') @classmethod def export_book(cls, databook): """HTML representation of a Databook.""" stream = BytesIO() # Allow unicode characters in output wrapper = codecs.getwriter("utf8")(stream) for i, dset in enumerate(databook._datasets): title = (dset.title if dset.title else 'Set %s' % (i)) wrapper.write(f'<{cls.BOOK_ENDINGS}>{title}</{cls.BOOK_ENDINGS}>\n') wrapper.write(dset.html) wrapper.write('\n') return stream.getvalue().decode('utf-8')
HTMLFormat
python
getsentry__sentry
src/sentry/utils/email/signer.py
{ "start": 213, "end": 1695 }
class ____(Signer): """ Generate a signature that is comprised of only lowercase letters. WARNING: Do not use this for anything that needs to be cryptographically secure! This is losing entropy and has a much higher chance of collision due to dropping to lowercase letters. For our purposes, this lack of entropy is ok and doesn't pose a risk. NOTE: This is needed strictly for signatures used in email addresses. Some clients (Airmail), treat email addresses as being case-insensitive, and sends the value as all lowercase. """ def __init__(self, *args: Any, **kwargs: Any) -> None: kwargs.setdefault("algorithm", "sha1") super().__init__(*args, **kwargs) def signature(self, value: str | bytes, key: str | bytes | None = None) -> str: return super().signature(value, key=key).lower() def unsign(self, signed_value: str) -> str: # This `unsign` is identical to subclass except for the lower-casing # See: https://github.com/django/django/blob/1.6.11/django/core/signing.py#L165-L172 signed_value = force_str(signed_value) if self.sep not in signed_value: raise BadSignature(f'No "{self.sep}" found in value') value, sig = signed_value.rsplit(self.sep, 1) if not constant_time_compare(sig.lower(), self.signature(value)): raise BadSignature(f'Signature "{sig}" does not match') return force_str(value)
_CaseInsensitiveSigner
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_isbn10.py
{ "start": 1566, "end": 3819 }
class ____(ColumnMapExpectation): """Expect column values to be valid ISBN10 format.""" # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "well_formed_isbn10": [ "0-521-22151-X", "0-521-29366-9", "052122151X", "0521221 51X", ], "malformed_isbn10": [ "", "0-521-22151", "978-3-16-148410-0", "This is not a valid ISBN10", ], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "well_formed_isbn10"}, "out": {"success": True}, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "malformed_isbn10"}, "out": {"success": False}, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.valid_isbn10" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "maturity": "experimental", "tags": ["experimental", "hackathon", "typed-entities"], "contributors": [ "@voidforall", ], "requirements": ["isbnlib"], } if __name__ == "__main__": ExpectColumnValuesToBeValidIsbn10().print_diagnostic_checklist()
ExpectColumnValuesToBeValidIsbn10
python
huggingface__transformers
tests/models/time_series_transformer/test_modeling_time_series_transformer.py
{ "start": 19689, "end": 23247 }
class ____(unittest.TestCase): def test_inference_no_head(self): model = TimeSeriesTransformerModel.from_pretrained("huggingface/time-series-transformer-tourism-monthly").to( torch_device ) batch = prepare_batch() with torch.no_grad(): output = model( past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], static_real_features=batch["static_real_features"], future_values=batch["future_values"], future_time_features=batch["future_time_features"], ).last_hidden_state expected_shape = torch.Size((64, model.config.context_length, model.config.d_model)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.8196, -1.5131, 1.4620], [1.1268, -1.3238, 1.5997], [1.5098, -1.0715, 1.7359]], device=torch_device ) torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) def test_inference_head(self): model = TimeSeriesTransformerForPrediction.from_pretrained( "huggingface/time-series-transformer-tourism-monthly" ).to(torch_device) batch = prepare_batch("val-batch.pt") with torch.no_grad(): output = model( past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], static_real_features=batch["static_real_features"], future_time_features=batch["future_time_features"], ).encoder_last_hidden_state expected_shape = torch.Size((64, model.config.context_length, model.config.d_model)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[-1.2957, -1.0280, -0.6045], [-0.7017, -0.8193, -0.3717], [-1.0449, -0.8149, 0.1405]], device=torch_device ) torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) def test_seq_to_seq_generation(self): model = TimeSeriesTransformerForPrediction.from_pretrained( "huggingface/time-series-transformer-tourism-monthly" ).to(torch_device) batch = prepare_batch("val-batch.pt") with torch.no_grad(): outputs = model.generate( static_categorical_features=batch["static_categorical_features"], static_real_features=batch["static_real_features"], past_time_features=batch["past_time_features"], past_values=batch["past_values"], future_time_features=batch["future_time_features"], past_observed_mask=batch["past_observed_mask"], ) expected_shape = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length)) self.assertEqual(outputs.sequences.shape, expected_shape) expected_slice = torch.tensor([2825.2749, 3584.9207, 6763.9951], device=torch_device) mean_prediction = outputs.sequences.mean(dim=1) torch.testing.assert_close(mean_prediction[0, -3:], expected_slice, rtol=1e-1, atol=1e-1)
TimeSeriesTransformerModelIntegrationTests
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_ean.py
{ "start": 1834, "end": 4379 }
class ____(ColumnMapExpectation): """Expect column values to be valid EAN (International Article Number).""" # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "all_valid": [ "73513537", "978-0-471-11709-4", "5901234123457", "4070071967072", ], "some_other": [ "73513537", "978-0-471-11709-4", "5901234123457", "abcd", ], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "all_valid"}, "out": { "success": True, }, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "some_other", "mostly": 1}, "out": { "success": False, }, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.to_be_valid_ean" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "maturity": "experimental", "tags": [ "hackathon-22", "experimental", "typed-entities", ], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@szecsip", # Don't forget to add your github handle here! ], "requirements": ["python-stdnum"], } if __name__ == "__main__": ExpectColumnValuesToBeValidEan().print_diagnostic_checklist()
ExpectColumnValuesToBeValidEan
python
spyder-ide__spyder
external-deps/python-lsp-server/test/plugins/test_type_definition.py
{ "start": 276, "end": 1471 }
class ____: a: int b: int def main() -> None: l0 = list(1, 2) my_pair = IntPair(a=10, b=20) print(f"Original pair: {my_pair}") """ def test_type_definitions(config, workspace) -> None: # Over 'IntPair' in 'main' cursor_pos = {"line": 10, "character": 14} # The definition of 'IntPair' def_range = { "start": {"line": 3, "character": 6}, "end": {"line": 3, "character": 13}, } doc = Document(DOC_URI, workspace, DOC) assert [{"uri": DOC_URI, "range": def_range}] == pylsp_type_definition( config, doc, cursor_pos ) def test_builtin_definition(config, workspace) -> None: # Over 'list' in main cursor_pos = {"line": 8, "character": 9} doc = Document(DOC_URI, workspace, DOC) defns = pylsp_type_definition(config, doc, cursor_pos) assert len(defns) == 1 assert defns[0]["uri"].endswith("builtins.pyi") def test_mutli_file_type_definitions(config, workspace, tmpdir) -> None: # Create a dummy module out of the workspace's root_path and try to get # a definition on it in another file placed next to it. module_content = """\ from dataclasses import dataclass @dataclass
IntPair
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/dataclass4.py
{ "start": 836, "end": 1015 }
class ____(DC3): # This should not generate an error because # aa replaces aa in DC3, and it's ordered # before the params with default values. aa: C2 @dataclass
DC5
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/partitions/utils/time_window.py
{ "start": 1583, "end": 2189 }
class ____: def __init__(self, time_window: TimeWindow, status: PartitionRangeStatus): self.time_window = time_window self.status = status def __repr__(self): return f"({self.time_window.start} - {self.time_window.end}): {self.status.value}" def __eq__(self, other): return ( isinstance(other, PartitionTimeWindowStatus) and self.time_window == other.time_window and self.status == other.status ) @whitelist_for_serdes( storage_name="TimeWindow", # For back-compat with existing serdes )
PartitionTimeWindowStatus
python
pyqtgraph__pyqtgraph
pyqtgraph/flowchart/library/Operators.py
{ "start": 3036, "end": 3247 }
class ____(BinOpNode): """Returns A // B. Does not check input types.""" nodeName = 'FloorDivide' def __init__(self, name): BinOpNode.__init__(self, name, '__floordiv__')
FloorDivideNode
python
Lightning-AI__lightning
src/lightning/pytorch/cli.py
{ "start": 3465, "end": 9393 }
class ____(ArgumentParser): """Extension of jsonargparse's ArgumentParser for pytorch-lightning.""" def __init__( self, *args: Any, description: str = "Lightning Trainer command line tool", env_prefix: str = "PL", default_env: bool = False, **kwargs: Any, ) -> None: """Initialize argument parser that supports configuration file input. For full details of accepted arguments see `ArgumentParser.__init__ <https://jsonargparse.readthedocs.io/en/stable/#jsonargparse.ArgumentParser.__init__>`_. Args: description: Description of the tool shown when running ``--help``. env_prefix: Prefix for environment variables. Set ``default_env=True`` to enable env parsing. default_env: Whether to parse environment variables. """ if not _JSONARGPARSE_SIGNATURES_AVAILABLE: raise ModuleNotFoundError(f"{_JSONARGPARSE_SIGNATURES_AVAILABLE}") super().__init__(*args, description=description, env_prefix=env_prefix, default_env=default_env, **kwargs) self.callback_keys: list[str] = [] # separate optimizers and lr schedulers to know which were added self._optimizers: dict[str, tuple[Union[type, tuple[type, ...]], str]] = {} self._lr_schedulers: dict[str, tuple[Union[type, tuple[type, ...]], str]] = {} def add_lightning_class_args( self, lightning_class: Union[ Callable[..., Union[Trainer, LightningModule, LightningDataModule, Callback]], type[Trainer], type[LightningModule], type[LightningDataModule], type[Callback], ], nested_key: str, subclass_mode: bool = False, required: bool = True, ) -> list[str]: """Adds arguments from a lightning class to a nested key of the parser. Args: lightning_class: A callable or any subclass of {Trainer, LightningModule, LightningDataModule, Callback}. nested_key: Name of the nested namespace to store arguments. subclass_mode: Whether allow any subclass of the given class. required: Whether the argument group is required. Returns: A list with the names of the class arguments added. """ if callable(lightning_class) and not isinstance(lightning_class, type): lightning_class = class_from_function(lightning_class) if isinstance(lightning_class, type) and issubclass( lightning_class, (Trainer, LightningModule, LightningDataModule, Callback) ): if issubclass(lightning_class, Callback): self.callback_keys.append(nested_key) if subclass_mode: return self.add_subclass_arguments(lightning_class, nested_key, fail_untyped=False, required=required) return self.add_class_arguments( lightning_class, nested_key, fail_untyped=False, instantiate=not issubclass(lightning_class, Trainer), sub_configs=True, ) raise MisconfigurationException( f"Cannot add arguments from: {lightning_class}. You should provide either a callable or a subclass of: " "Trainer, LightningModule, LightningDataModule, or Callback." ) def add_optimizer_args( self, optimizer_class: Union[type[Optimizer], tuple[type[Optimizer], ...]] = (Optimizer,), nested_key: str = "optimizer", link_to: str = "AUTOMATIC", ) -> None: """Adds arguments from an optimizer class to a nested key of the parser. Args: optimizer_class: Any subclass of :class:`torch.optim.Optimizer`. Use tuple to allow subclasses. nested_key: Name of the nested namespace to store arguments. link_to: Dot notation of a parser key to set arguments or AUTOMATIC. """ if isinstance(optimizer_class, tuple): assert all(issubclass(o, Optimizer) for o in optimizer_class) else: assert issubclass(optimizer_class, Optimizer) kwargs: dict[str, Any] = {"instantiate": False, "fail_untyped": False, "skip": {"params"}} if isinstance(optimizer_class, tuple): self.add_subclass_arguments(optimizer_class, nested_key, **kwargs) else: self.add_class_arguments(optimizer_class, nested_key, sub_configs=True, **kwargs) self._optimizers[nested_key] = (optimizer_class, link_to) def add_lr_scheduler_args( self, lr_scheduler_class: Union[LRSchedulerType, tuple[LRSchedulerType, ...]] = LRSchedulerTypeTuple, nested_key: str = "lr_scheduler", link_to: str = "AUTOMATIC", ) -> None: """Adds arguments from a learning rate scheduler class to a nested key of the parser. Args: lr_scheduler_class: Any subclass of ``torch.optim.lr_scheduler.{_LRScheduler, ReduceLROnPlateau}``. Use tuple to allow subclasses. nested_key: Name of the nested namespace to store arguments. link_to: Dot notation of a parser key to set arguments or AUTOMATIC. """ if isinstance(lr_scheduler_class, tuple): assert all(issubclass(o, LRSchedulerTypeTuple) for o in lr_scheduler_class) else: assert issubclass(lr_scheduler_class, LRSchedulerTypeTuple) kwargs: dict[str, Any] = {"instantiate": False, "fail_untyped": False, "skip": {"optimizer"}} if isinstance(lr_scheduler_class, tuple): self.add_subclass_arguments(lr_scheduler_class, nested_key, **kwargs) else: self.add_class_arguments(lr_scheduler_class, nested_key, sub_configs=True, **kwargs) self._lr_schedulers[nested_key] = (lr_scheduler_class, link_to)
LightningArgumentParser
python
jmcnamara__XlsxWriter
xlsxwriter/test/worksheet/test_write_page_margins.py
{ "start": 301, "end": 3572 }
class ____(unittest.TestCase): """ Test the Worksheet _write_page_margins() method. """ def setUp(self): self.fh = StringIO() self.worksheet = Worksheet() self.worksheet._set_filehandle(self.fh) def test_write_page_margins(self): """Test the _write_page_margins() method""" self.worksheet._write_page_margins() exp = """<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>""" got = self.fh.getvalue() self.assertEqual(exp, got) def test_write_page_margins_default(self): """Test the _write_page_margins() method with default margins""" self.worksheet.set_margins() self.worksheet._write_page_margins() exp = """<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>""" got = self.fh.getvalue() self.assertEqual(exp, got) def test_write_page_margins_left(self): """Test the _write_page_margins() method with left margin""" self.worksheet.set_margins(left=0.5) self.worksheet._write_page_margins() exp = """<pageMargins left="0.5" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>""" got = self.fh.getvalue() self.assertEqual(exp, got) def test_write_page_margins_right(self): """Test the _write_page_margins() method with right margin""" self.worksheet.set_margins(right=0.5) self.worksheet._write_page_margins() exp = """<pageMargins left="0.7" right="0.5" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>""" got = self.fh.getvalue() self.assertEqual(exp, got) def test_write_page_margins_top(self): """Test the _write_page_margins() method with top margin""" self.worksheet.set_margins(top=0.5) self.worksheet._write_page_margins() exp = """<pageMargins left="0.7" right="0.7" top="0.5" bottom="0.75" header="0.3" footer="0.3"/>""" got = self.fh.getvalue() self.assertEqual(exp, got) def test_write_page_margins_bottom(self): """Test the _write_page_margins() method with bottom margin""" self.worksheet.set_margins(bottom=0.5) self.worksheet._write_page_margins() exp = """<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.5" header="0.3" footer="0.3"/>""" got = self.fh.getvalue() self.assertEqual(exp, got) def test_write_page_margins_header(self): """Test the _write_page_margins() method with header margin""" self.worksheet.set_header(margin=0.5) self.worksheet._write_page_margins() exp = """<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.5" footer="0.3"/>""" got = self.fh.getvalue() self.assertEqual(exp, got) def test_write_page_margins_footer(self): """Test the _write_page_margins() method with footer margin""" self.worksheet.set_footer(margin=0.5) self.worksheet._write_page_margins() exp = """<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.5"/>""" got = self.fh.getvalue() self.assertEqual(exp, got)
TestWritePageMargins
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_axis33.py
{ "start": 315, "end": 1570 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_axis33.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "line"}) chart.axis_ids = [143682944, 143946496] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.add_series({"values": "=Sheet1!$B$1:$B$5"}) chart.add_series({"values": "=Sheet1!$C$1:$C$5"}) chart.set_x_axis( {"name": "XXX", "name_font": {"rotation": -45, "baseline": -1}} ) chart.set_y_axis( {"name": "YYY", "name_font": {"rotation": -45, "baseline": -1}} ) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
tiangolo__fastapi
docs_src/dependencies/tutorial013_an_py310.py
{ "start": 284, "end": 937 }
class ____(SQLModel, table=True): id: int | None = Field(default=None, primary_key=True) name: str app = FastAPI() def get_session(): with Session(engine) as session: yield session def get_user(user_id: int, session: Annotated[Session, Depends(get_session)]): user = session.get(User, user_id) if not user: raise HTTPException(status_code=403, detail="Not authorized") def generate_stream(query: str): for ch in query: yield ch time.sleep(0.1) @app.get("/generate", dependencies=[Depends(get_user)]) def generate(query: str): return StreamingResponse(content=generate_stream(query))
User
python
scipy__scipy
scipy/special/tests/test_orthogonal.py
{ "start": 2722, "end": 4473 }
class ____: def test_gegenbauer(self): a = 5*np.random.random() - 0.5 if np.any(a == 0): a = -0.2 Ca0 = orth.gegenbauer(0,a) Ca1 = orth.gegenbauer(1,a) Ca2 = orth.gegenbauer(2,a) Ca3 = orth.gegenbauer(3,a) Ca4 = orth.gegenbauer(4,a) Ca5 = orth.gegenbauer(5,a) assert_allclose(Ca0.c, array([1]), atol=1.5e-13, rtol=0) assert_allclose(Ca1.c, array([2*a, 0]), atol=1.5e-13, rtol=0) assert_allclose(Ca2.c, array([2*a*(a + 1), 0, -a]), atol=1.5e-13, rtol=0) assert_allclose(Ca3.c, array([4*sc.poch(a, 3), 0,-6*a*(a + 1), 0])/3.0, atol=1.5e-11, rtol=0) assert_allclose(Ca4.c, array([4*sc.poch(a, 4), 0, -12*sc.poch(a, 3), 0, 3*a*(a + 1)])/6.0, atol=1.5e-11, rtol=0) assert_allclose(Ca5.c, array([4*sc.poch(a, 5), 0, -20*sc.poch(a, 4), 0, 15*sc.poch(a, 3), 0])/15.0, atol=1.5e-11, rtol=0) @pytest.mark.parametrize('a', [0, 1]) def test_n_zero_gh8888(self, a): # gh-8888 reported that gegenbauer(0, 0) returns NaN polynomial Cn0 = orth.gegenbauer(0, a) assert_equal(Cn0.c, np.asarray([1.])) def test_valid_alpha(self): # Check input validation of `alpha` message = '`alpha` must be a finite number greater...' with pytest.raises(ValueError, match=message): orth.gegenbauer(0, np.nan) with pytest.raises(ValueError, match=message): orth.gegenbauer(1, -0.5) with pytest.raises(ValueError, match=message): orth.gegenbauer(2, -np.inf)
TestGegenbauer
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/links/translate.py
{ "start": 3097, "end": 3411 }
class ____(BaseGoogleLink): """ Helper class for constructing Translation Legacy Model link. Legacy Models are created and managed by AutoML API. """ name = "Translation Legacy Model" key = "translation_legacy_model" format_str = TRANSLATION_LEGACY_MODEL_LINK
TranslationLegacyModelLink
python
huggingface__transformers
src/transformers/models/gemma3n/modeling_gemma3n.py
{ "start": 5519, "end": 6409 }
class ____(nn.Module): def __init__(self, dim: int, eps: float = 1e-6, with_scale: bool = True): super().__init__() self.eps = eps self.with_scale = with_scale if self.with_scale: self.weight = nn.Parameter(torch.ones(dim)) else: self.register_buffer("weight", torch.tensor(1.0), persistent=False) def _norm(self, x): return x / torch.sqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) def forward(self, x: torch.Tensor) -> torch.Tensor: # Llama does x.to(float16) * w whilst Gemma2 is (x * w).to(float16) # See https://github.com/huggingface/transformers/pull/29402 output = self._norm(x.float()) * self.weight.float() return output.type_as(x) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.eps}" # ==== Audio Encoder ====
Gemma3nRMSNorm
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/function3.py
{ "start": 141, "end": 286 }
class ____: def method(self) -> None: pass # This should generate an error. func1: Callable[[float], None] = TestClass.method
TestClass
python
bokeh__bokeh
src/bokeh/models/ui/icons.py
{ "start": 2060, "end": 2518 }
class ____(UIElement): """ An abstract base class for icon elements. """ # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) size = Either(Int, FontSize, default="1em", help=""" The size of the icon. This can be either a number of pixels, or a CSS length string (see https://developer.mozilla.org/en-US/docs/Web/CSS/length). """)
Icon
python
spyder-ide__spyder
spyder/plugins/pylint/main_widget.py
{ "start": 2299, "end": 2366 }
class ____: Main = "main_section"
PylintWidgetMainToolbarSections
python
pytorch__pytorch
torch/distributed/elastic/metrics/api.py
{ "start": 749, "end": 952 }
class ____: __slots__ = ["params"] def __init__(self, params: dict[str, str] | None = None): self.params = params if self.params is None: self.params = {}
MetricsConfig
python
google__pytype
pytype/pyi/parser_test.py
{ "start": 76039, "end": 76621 }
class ____(parser_test_base.ParserTestBase): def test_basic(self): self.check( """ from typing import NewType X = NewType('X', int) """, """ X = newtype_X_0 class newtype_X_0(int): def __init__(self, val: int) -> None: ... """, ) def test_fullname(self): self.check( """ import typing X = typing.NewType('X', int) """, """ import typing X = newtype_X_0 class newtype_X_0(int): def __init__(self, val: int) -> None: ... """, )
NewTypeTest
python
google__flatbuffers
python/flatbuffers/number_types.py
{ "start": 1406, "end": 1553 }
class ____(object): bytewidth = 4 min_val = 0 max_val = (2**32) - 1 py_type = int name = "uint32" packer_type = packer.uint32
Uint32Flags
python
huggingface__transformers
tests/quantization/fp_quant_integration/test_fp_quant.py
{ "start": 986, "end": 2025 }
class ____(unittest.TestCase): def test_to_dict(self): """ Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object """ quantization_config = FPQuantConfig() config_to_dict = quantization_config.to_dict() for key in config_to_dict: self.assertEqual(getattr(quantization_config, key), config_to_dict[key]) def test_from_dict(self): """ Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict """ dict = {"modules_to_not_convert": ["embed_tokens", "lm_head"], "quant_method": "fp_quant"} quantization_config = FPQuantConfig.from_dict(dict) self.assertEqual(dict["modules_to_not_convert"], quantization_config.modules_to_not_convert) self.assertEqual(dict["quant_method"], quantization_config.quant_method) @slow @require_torch_accelerator @require_fp_quant @require_accelerate
FPQuantConfigTest
python
walkccc__LeetCode
solutions/2595. Number of Even and Odd Bits/2595.py
{ "start": 0, "end": 195 }
class ____: def evenOddBit(self, n: int) -> list[int]: ans = [0] * 2 i = 0 # 0 := even, 1 := odd while n > 0: ans[i] += n & 1 n >>= 1 i ^= 1 return ans
Solution
python
getsentry__sentry
src/sentry/models/apitoken.py
{ "start": 1893, "end": 4008 }
class ____(ControlOutboxProducingManager["ApiToken"]): def create(self, *args, **kwargs): token_type: AuthTokenType | None = kwargs.get("token_type", None) # Typically the .create() method is called with `refresh_token=None` as an # argument when we specifically do not want a refresh_token. # # But if it is not None or not specified, we should generate a token since # that is the expected behavior... the refresh_token field on ApiToken has # a default of generate_token() # # TODO(mdtro): All of these if/else statements will be cleaned up at a later time # to use a match statment on the AuthTokenType. Move each of the various token type # create calls one at a time. if "refresh_token" in kwargs: plaintext_refresh_token = kwargs["refresh_token"] else: plaintext_refresh_token = generate_token() if token_type == AuthTokenType.USER: plaintext_token = generate_token(token_type=AuthTokenType.USER) plaintext_refresh_token = None # user auth tokens do not have refresh tokens else: # to maintain compatibility with current # code that currently calls create with token= specified if "token" in kwargs: plaintext_token = kwargs["token"] else: plaintext_token = generate_token() kwargs["hashed_token"] = hashlib.sha256(plaintext_token.encode()).hexdigest() if plaintext_refresh_token: kwargs["hashed_refresh_token"] = hashlib.sha256( plaintext_refresh_token.encode() ).hexdigest() kwargs["token"] = plaintext_token kwargs["refresh_token"] = plaintext_refresh_token api_token = super().create(*args, **kwargs) # Store the plaintext tokens for one-time retrieval api_token._set_plaintext_token(token=plaintext_token) api_token._set_plaintext_refresh_token(token=plaintext_refresh_token) return api_token @control_silo_model
ApiTokenManager
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/sqltypes.py
{ "start": 69085, "end": 72382 }
class ____(TypeDecorator[object]): """Holds Python objects, which are serialized using pickle. PickleType builds upon the Binary type to apply Python's ``pickle.dumps()`` to incoming objects, and ``pickle.loads()`` on the way out, allowing any pickleable Python object to be stored as a serialized binary field. To allow ORM change events to propagate for elements associated with :class:`.PickleType`, see :ref:`mutable_toplevel`. """ impl = LargeBinary cache_ok = True def __init__( self, protocol: int = pickle.HIGHEST_PROTOCOL, pickler: Any = None, comparator: Optional[Callable[[Any, Any], bool]] = None, impl: Optional[_TypeEngineArgument[Any]] = None, ): """ Construct a PickleType. :param protocol: defaults to ``pickle.HIGHEST_PROTOCOL``. :param pickler: defaults to pickle. May be any object with pickle-compatible ``dumps`` and ``loads`` methods. :param comparator: a 2-arg callable predicate used to compare values of this type. If left as ``None``, the Python "equals" operator is used to compare values. :param impl: A binary-storing :class:`_types.TypeEngine` class or instance to use in place of the default :class:`_types.LargeBinary`. For example the :class: `_mysql.LONGBLOB` class may be more effective when using MySQL. .. versionadded:: 1.4.20 """ self.protocol = protocol self.pickler = pickler or pickle self.comparator = comparator super().__init__() if impl: # custom impl is not necessarily a LargeBinary subclass. # make an exception to typing for this self.impl = to_instance(impl) # type: ignore def __reduce__(self): return PickleType, (self.protocol, None, self.comparator) def bind_processor(self, dialect): impl_processor = self.impl_instance.bind_processor(dialect) dumps = self.pickler.dumps protocol = self.protocol if impl_processor: fixed_impl_processor = impl_processor def process(value): if value is not None: value = dumps(value, protocol) return fixed_impl_processor(value) else: def process(value): if value is not None: value = dumps(value, protocol) return value return process def result_processor(self, dialect, coltype): impl_processor = self.impl_instance.result_processor(dialect, coltype) loads = self.pickler.loads if impl_processor: fixed_impl_processor = impl_processor def process(value): value = fixed_impl_processor(value) if value is None: return None return loads(value) else: def process(value): if value is None: return None return loads(value) return process def compare_values(self, x, y): if self.comparator: return self.comparator(x, y) else: return x == y
PickleType
python
huggingface__transformers
src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
{ "start": 27883, "end": 28782 }
class ____(nn.Module): def __init__(self, config: OmDetTurboConfig): super().__init__() self.layers = nn.ModuleList([OmDetTurboEncoderLayer(config) for _ in range(config.encoder_layers)]) def forward( self, src, src_mask=None, pos_embed=None, output_attentions: bool = False ) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]]]: hidden_states = src attention = () if output_attentions else None for layer in self.layers: hidden_states = layer( hidden_states, attention_mask=src_mask, position_embeddings=pos_embed, output_attentions=output_attentions, ) if output_attentions: attention = attention + (hidden_states[1],) hidden_states = hidden_states[0] return hidden_states, attention
OmDetTurboEncoder
python
ray-project__ray
python/ray/tests/conftest_docker.py
{ "start": 838, "end": 7631 }
class ____(wrappers.Container): def ready(self): self._container.reload() if self.status == "exited": from pytest_docker_tools.exceptions import ContainerFailed raise ContainerFailed( self, f"Container {self.name} has already exited before " "we noticed it was ready", ) if self.status != "running": return False networks = self._container.attrs["NetworkSettings"]["Networks"] for (_, n) in networks.items(): if not n["IPAddress"]: return False if "Ray runtime started" in super().logs(): return True return False def client(self): from http.client import HTTPConnection port = self.ports["8000/tcp"][0] return HTTPConnection(f"localhost:{port}") def print_logs(self): for (name, content) in self.get_files("/tmp"): print(f"===== log start: {name} ====") print(content.decode()) # This allows us to assign static ips to docker containers ipam_config = docker.types.IPAMConfig( pool_configs=[ docker.types.IPAMPool(subnet="192.168.52.0/24", gateway="192.168.52.254") ] ) gcs_network = network(driver="bridge", ipam=ipam_config) redis_image = fetch(repository="redis:latest") redis = container( image="{redis_image.id}", network="{gcs_network.name}", command=("redis-server --save 60 1 --loglevel warning"), ) head_node_vol = volume() worker_node_vol = volume() head_node_container_name = "gcs" + str(int(time.time())) def gen_head_node(envs): return container( image="rayproject/ray:ha_integration", name=head_node_container_name, network="{gcs_network.name}", command=[ "ray", "start", "--head", "--block", "--num-cpus", "0", # Fix the port of raylet to make sure raylet restarts at the same # ip:port is treated as a different raylet. "--node-manager-port", "9379", "--dashboard-host", "0.0.0.0", ], volumes={"{head_node_vol.name}": {"bind": "/tmp", "mode": "rw"}}, environment=envs, wrapper_class=Container, ports={ "8000/tcp": None, }, # volumes={ # "/tmp/ray/": {"bind": "/tmp/ray/", "mode": "rw"} # }, ) def gen_worker_node(envs, num_cpus): return container( image="rayproject/ray:ha_integration", network="{gcs_network.name}", command=[ "ray", "start", "--address", build_address(head_node_container_name, 6379), "--block", # Fix the port of raylet to make sure raylet restarts at the same # ip:port is treated as a different raylet. "--node-manager-port", "9379", "--num-cpus", f"{num_cpus}", ], volumes={"{worker_node_vol.name}": {"bind": "/tmp", "mode": "rw"}}, environment=envs, wrapper_class=Container, ports={ "8000/tcp": None, }, # volumes={ # "/tmp/ray/": {"bind": "/tmp/ray/", "mode": "rw"} # }, ) head_node = gen_head_node( { "RAY_REDIS_ADDRESS": "{redis.ips.primary}:6379", "RAY_raylet_client_num_connect_attempts": "10", "RAY_raylet_client_connect_timeout_milliseconds": "100", } ) worker_node = gen_worker_node( envs={ "RAY_REDIS_ADDRESS": "{redis.ips.primary}:6379", "RAY_raylet_client_num_connect_attempts": "10", "RAY_raylet_client_connect_timeout_milliseconds": "100", }, num_cpus=8, ) @pytest.fixture def docker_cluster(head_node, worker_node): yield (head_node, worker_node) def run_in_container(cmds: List[List[str]], container_id: str): """Run a list of commands in the specified container. Checks that each docker command executed without error. Returns the output from each command as a list. """ outputs = [] for cmd in cmds: docker_cmd = ["docker", "exec", container_id] + cmd print(f"Executing command: {docker_cmd}", time.time()) try: resp = subprocess.check_output(docker_cmd, stderr=subprocess.STDOUT) output = resp.decode("utf-8").strip() print(f"Output: {output}") outputs.append(output) except subprocess.CalledProcessError as e: error_output = e.output.decode("utf-8") if e.output else "No output" print(f"Command failed with return code {e.returncode}") print(f"Full error output:\n{error_output}") raise return outputs IMAGE_NAME = "rayproject/ray:runtime_env_container" NESTED_IMAGE_NAME = "rayproject/ray:runtime_env_container_nested" @pytest.fixture(scope="session") def podman_docker_cluster(): start_container_command = [ "docker", "run", "-d", "--privileged", "-v", "/var/run/docker.sock:/var/run/docker.sock", "-v", "/var/lib/containers:/var/lib/containers", # For testing environment variables "--env", "RAY_TEST_ABC=1", "--env", "TEST_ABC=1", IMAGE_NAME, "tail", "-f", "/dev/null", ] try: container_id = subprocess.check_output( start_container_command, stderr=subprocess.STDOUT ).decode("utf-8") except subprocess.CalledProcessError as e: error_output = e.output.decode("utf-8") if e.output else "No output" print(f"Command failed with return code {e.returncode}") print(f"Full error output:\n{error_output}") raise container_id = container_id.strip() # Get group id that owns the docker socket file. Add user `ray` to # group to get necessary permissions for pulling an image from # docker's local storage into podman docker_group_id = run_in_container( [["stat", "-c", "%g", "/var/run/docker.sock"]], container_id )[0] run_in_container( [ ["id"], ["sudo", "groupadd", "-g", docker_group_id, "docker"], ["sudo", "usermod", "-aG", "docker", "ray"], ["podman", "pull", f"docker-daemon:{IMAGE_NAME}"], ], container_id, ) # Add custom file to new image tagged `runtime_env_container_nested`, # which can be read by Ray actors / Serve deployments to verify the # container runtime env plugin. Also add serve application that will # be imported by the telemetry test. serve_app = """ from ray import serve @serve.deployment
Container
python
Textualize__textual
src/textual/reactive.py
{ "start": 3304, "end": 14780 }
class ____(Generic[ReactiveType]): """Reactive descriptor. Args: default: A default value or callable that returns a default. layout: Perform a layout on change. repaint: Perform a repaint on change. init: Call watchers on initialize (post mount). always_update: Call watchers even when the new value equals the old value. compute: Run compute methods when attribute is changed. recompose: Compose the widget again when the attribute changes. bindings: Refresh bindings when the reactive changes. toggle_class: An optional TCSS classname(s) to toggle based on the truthiness of the value. """ _reactives: ClassVar[dict[str, object]] = {} def __init__( self, default: ReactiveType | Callable[[], ReactiveType] | Initialize[ReactiveType], *, layout: bool = False, repaint: bool = True, init: bool = False, always_update: bool = False, compute: bool = True, recompose: bool = False, bindings: bool = False, toggle_class: str | None = None, ) -> None: self._default = default self._layout = layout self._repaint = repaint self._init = init self._always_update = always_update self._run_compute = compute self._recompose = recompose self._bindings = bindings self._toggle_class = toggle_class self._owner: Type[MessageTarget] | None = None self.name: str def __rich_repr__(self) -> rich.repr.Result: yield None, self._default yield "layout", self._layout, False yield "repaint", self._repaint, True yield "init", self._init, False yield "always_update", self._always_update, False yield "compute", self._run_compute, True yield "recompose", self._recompose, False yield "bindings", self._bindings, False yield "name", getattr(self, "name", None), None @classmethod def _clear_watchers(cls, obj: Reactable) -> None: """Clear any watchers on a given object. Args: obj: A reactive object. """ try: getattr(obj, "__watchers").clear() except AttributeError: pass @property def owner(self) -> Type[MessageTarget]: """The owner (class) where the reactive was declared.""" assert self._owner is not None return self._owner def _initialize_reactive(self, obj: Reactable, name: str) -> None: """Initialized a reactive attribute on an object. Args: obj: An object with reactive attributes. name: Name of attribute. """ _rich_traceback_omit = True internal_name = f"_reactive_{name}" if hasattr(obj, internal_name): # Attribute already has a value return compute_method = getattr(obj, self.compute_name, None) if compute_method is not None and self._init: default = compute_method() else: default_or_callable = self._default default = ( ( default_or_callable(obj) if isinstance(default_or_callable, Initialize) else default_or_callable() ) if callable(default_or_callable) else default_or_callable ) setattr(obj, internal_name, default) if (toggle_class := self._toggle_class) is not None: obj.set_class(bool(default), *toggle_class.split()) if self._init: self._check_watchers(obj, name, default) @classmethod def _initialize_object(cls, obj: Reactable) -> None: """Set defaults and call any watchers / computes for the first time. Args: obj: An object with Reactive descriptors """ _rich_traceback_omit = True for name, reactive in obj._reactives.items(): reactive._initialize_reactive(obj, name) @classmethod def _reset_object(cls, obj: object) -> None: """Reset reactive structures on object (to avoid reference cycles). Args: obj: A reactive object. """ getattr(obj, "__watchers", {}).clear() getattr(obj, "__computes", []).clear() def __set_name__(self, owner: Type[MessageTarget], name: str) -> None: # Check for compute method self._owner = owner public_compute = f"compute_{name}" private_compute = f"_compute_{name}" compute_name = ( private_compute if hasattr(owner, private_compute) else public_compute ) if hasattr(owner, compute_name): # Compute methods are stored in a list called `__computes` try: computes = getattr(owner, "__computes") except AttributeError: computes = [] setattr(owner, "__computes", computes) computes.append(name) # The name of the attribute self.name = name # The internal name where the attribute's value is stored self.internal_name = f"_reactive_{name}" self.compute_name = compute_name default = self._default setattr(owner, f"_default_{name}", default) if TYPE_CHECKING: @overload def __get__( self: Reactive[ReactiveType], obj: ReactableType, obj_type: type[ReactableType], ) -> ReactiveType: ... @overload def __get__( self: Reactive[ReactiveType], obj: None, obj_type: type[ReactableType] ) -> Reactive[ReactiveType]: ... def __get__( self: Reactive[ReactiveType], obj: Reactable | None, obj_type: type[ReactableType], ) -> Reactive[ReactiveType] | ReactiveType: _rich_traceback_omit = True if obj is None: # obj is None means we are invoking the descriptor via the class, and not the instance return self if not hasattr(obj, "id"): raise ReactiveError( f"Node is missing data; Check you are calling super().__init__(...) in the {obj.__class__.__name__}() constructor, before getting reactives." ) if not hasattr(obj, internal_name := self.internal_name): self._initialize_reactive(obj, self.name) if hasattr(obj, self.compute_name): value: ReactiveType old_value = getattr(obj, internal_name) value = getattr(obj, self.compute_name)() setattr(obj, internal_name, value) self._check_watchers(obj, self.name, old_value) return value else: return getattr(obj, internal_name) def _set(self, obj: Reactable, value: ReactiveType, always: bool = False) -> None: _rich_traceback_omit = True if not hasattr(obj, "_id"): raise ReactiveError( f"Node is missing data; Check you are calling super().__init__(...) in the {obj.__class__.__name__}() constructor, before setting reactives." ) if isinstance(value, _Mutated): value = value.value always = True self._initialize_reactive(obj, self.name) if hasattr(obj, self.compute_name): raise AttributeError( f"Can't set {obj}.{self.name!r}; reactive attributes with a compute method are read-only" ) name = self.name current_value = getattr(obj, name) # Check for private and public validate functions. private_validate_function = getattr(obj, f"_validate_{name}", None) if callable(private_validate_function): value = private_validate_function(value) public_validate_function = getattr(obj, f"validate_{name}", None) if callable(public_validate_function): value = public_validate_function(value) # Toggle the classes using the value's truthiness if (toggle_class := self._toggle_class) is not None: obj.set_class(bool(value), *toggle_class.split()) # If the value has changed, or this is the first time setting the value if always or self._always_update or current_value != value: # Store the internal value setattr(obj, self.internal_name, value) # Check all watchers self._check_watchers(obj, name, current_value) if self._run_compute: self._compute(obj) if self._bindings: obj.refresh_bindings() # Refresh according to descriptor flags if self._layout or self._repaint or self._recompose: obj.refresh( repaint=self._repaint, layout=self._layout, recompose=self._recompose, ) def __set__(self, obj: Reactable, value: ReactiveType) -> None: _rich_traceback_omit = True self._set(obj, value) @classmethod def _check_watchers(cls, obj: Reactable, name: str, old_value: Any) -> None: """Check watchers, and call watch methods / computes Args: obj: The reactable object. name: Attribute name. old_value: The old (previous) value of the attribute. """ _rich_traceback_omit = True # Get the current value. internal_name = f"_reactive_{name}" value = getattr(obj, internal_name) private_watch_function = getattr(obj, f"_watch_{name}", None) if callable(private_watch_function): invoke_watcher(obj, private_watch_function, old_value, value) public_watch_function = getattr(obj, f"watch_{name}", None) if callable(public_watch_function): invoke_watcher(obj, public_watch_function, old_value, value) # Process "global" watchers watchers: list[tuple[Reactable, WatchCallbackType]] watchers = getattr(obj, "__watchers", {}).get(name, []) # Remove any watchers for reactables that have since closed if watchers: watchers[:] = [ (reactable, callback) for reactable, callback in watchers if not reactable._closing ] for reactable, callback in watchers: with reactable.prevent(*obj._prevent_message_types_stack[-1]): invoke_watcher(reactable, callback, old_value, value) @classmethod def _compute(cls, obj: Reactable) -> None: """Invoke all computes. Args: obj: Reactable object. """ _rich_traceback_guard = True for compute in obj._reactives.keys() & obj._computes: try: compute_method = getattr(obj, f"compute_{compute}") except AttributeError: try: compute_method = getattr(obj, f"_compute_{compute}") except AttributeError: continue current_value = getattr( obj, f"_reactive_{compute}", getattr(obj, f"_default_{compute}", None) ) value = compute_method() setattr(obj, f"_reactive_{compute}", value) if value != current_value: cls._check_watchers(obj, compute, current_value)
Reactive
python
kubernetes-client__python
kubernetes/client/models/v1_container_restart_rule_on_exit_codes.py
{ "start": 383, "end": 5305 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'operator': 'str', 'values': 'list[int]' } attribute_map = { 'operator': 'operator', 'values': 'values' } def __init__(self, operator=None, values=None, local_vars_configuration=None): # noqa: E501 """V1ContainerRestartRuleOnExitCodes - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._operator = None self._values = None self.discriminator = None self.operator = operator if values is not None: self.values = values @property def operator(self): """Gets the operator of this V1ContainerRestartRuleOnExitCodes. # noqa: E501 Represents the relationship between the container exit code(s) and the specified values. Possible values are: - In: the requirement is satisfied if the container exit code is in the set of specified values. - NotIn: the requirement is satisfied if the container exit code is not in the set of specified values. # noqa: E501 :return: The operator of this V1ContainerRestartRuleOnExitCodes. # noqa: E501 :rtype: str """ return self._operator @operator.setter def operator(self, operator): """Sets the operator of this V1ContainerRestartRuleOnExitCodes. Represents the relationship between the container exit code(s) and the specified values. Possible values are: - In: the requirement is satisfied if the container exit code is in the set of specified values. - NotIn: the requirement is satisfied if the container exit code is not in the set of specified values. # noqa: E501 :param operator: The operator of this V1ContainerRestartRuleOnExitCodes. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and operator is None: # noqa: E501 raise ValueError("Invalid value for `operator`, must not be `None`") # noqa: E501 self._operator = operator @property def values(self): """Gets the values of this V1ContainerRestartRuleOnExitCodes. # noqa: E501 Specifies the set of values to check for container exit codes. At most 255 elements are allowed. # noqa: E501 :return: The values of this V1ContainerRestartRuleOnExitCodes. # noqa: E501 :rtype: list[int] """ return self._values @values.setter def values(self, values): """Sets the values of this V1ContainerRestartRuleOnExitCodes. Specifies the set of values to check for container exit codes. At most 255 elements are allowed. # noqa: E501 :param values: The values of this V1ContainerRestartRuleOnExitCodes. # noqa: E501 :type: list[int] """ self._values = values def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1ContainerRestartRuleOnExitCodes): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1ContainerRestartRuleOnExitCodes): return True return self.to_dict() != other.to_dict()
V1ContainerRestartRuleOnExitCodes
python
dagster-io__dagster
examples/project_analytics/dagster_pypi/resources.py
{ "start": 1321, "end": 2151 }
class ____(PyPiResource): table: str = Field(description="BigQuery public table to query") def get_pypi_download_counts(self, date) -> pd.DataFrame: print("Fetching from bigquery for a given date: ", date) client = bigquery.Client() query = f""" SELECT date_trunc(file_downloads.timestamp, DAY) AS download_date, file_downloads.file.project AS project_name, file_downloads.file.version as project_version, COUNT(*) AS file_downloads_count FROM `{self.table}` AS file_downloads WHERE (file_downloads.file.project LIKE '%dagster%') AND date_trunc(file_downloads.timestamp, DAY) = '{date}' GROUP BY 1,2,3 ORDER BY 1,2,3 """ return client.query(query).result().to_dataframe()
PyPiBigQueryResource
python
PyCQA__pylint
doc/data/messages/s/super-without-brackets/bad.py
{ "start": 0, "end": 78 }
class ____: @staticmethod def temp(): print("Soup is hot!")
Soup
python
PrefectHQ__prefect
src/prefect/server/orchestration/rules.py
{ "start": 44390, "end": 44504 }
class ____( BaseUniversalTransform[orm_models.TaskRun, core.TaskRunPolicy] ): pass
TaskRunUniversalTransform
python
ansible__ansible
test/units/module_utils/urls/test_fetch_url.py
{ "start": 574, "end": 620 }
class ____(AnsibleModuleExit): pass
ExitJson
python
huggingface__transformers
src/transformers/models/convbert/modeling_convbert.py
{ "start": 20804, "end": 21600 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.xlm.modeling_xlm.XLMSequenceSummary with XLM->ConvBert
ConvBertPredictionHeadTransform
python
tensorflow__tensorflow
tensorflow/python/eager/monitoring.py
{ "start": 5904, "end": 6488 }
class ____(object): """CounterCell stores each value of a Counter.""" __slots__ = ["_cell"] def __init__(self, cell): """Creates a new CounterCell. Args: cell: A c pointer of TFE_MonitoringCounterCell. """ self._cell = cell def increase_by(self, value): """Atomically increments the value. Args: value: non-negative value. """ pywrap_tfe.TFE_MonitoringCounterCellIncrementBy(self._cell, value) def value(self): """Retrieves the current value.""" return pywrap_tfe.TFE_MonitoringCounterCellValue(self._cell)
CounterCell
python
apache__airflow
providers/openlineage/src/airflow/providers/openlineage/plugins/listener.py
{ "start": 3032, "end": 33191 }
class ____: """OpenLineage listener sends events on task instance and dag run starts, completes and failures.""" def __init__(self): self._executor = None self.log = logging.getLogger(__name__) self.extractor_manager = ExtractorManager() self.adapter = OpenLineageAdapter() if AIRFLOW_V_3_0_PLUS: @hookimpl def on_task_instance_running( self, previous_state: TaskInstanceState, task_instance: RuntimeTaskInstance, ): self.log.debug("OpenLineage listener got notification about task instance start") context = task_instance.get_template_context() task = context["task"] if TYPE_CHECKING: assert task dagrun = context["dag_run"] dag = context["dag"] start_date = task_instance.start_date self._on_task_instance_running(task_instance, dag, dagrun, task, start_date) else: @hookimpl def on_task_instance_running( # type: ignore[misc] self, previous_state: TaskInstanceState, task_instance: TaskInstance, session: Session, ) -> None: from airflow.providers.openlineage.utils.utils import is_ti_rescheduled_already if not getattr(task_instance, "task", None) is not None: self.log.warning( "No task set for TI object task_id: %s - dag_id: %s - run_id %s", task_instance.task_id, task_instance.dag_id, task_instance.run_id, ) return self.log.debug("OpenLineage listener got notification about task instance start") task = task_instance.task if TYPE_CHECKING: assert task start_date = task_instance.start_date if task_instance.start_date else timezone.utcnow() if is_ti_rescheduled_already(task_instance): self.log.debug("Skipping this instance of rescheduled task - START event was emitted already") return self._on_task_instance_running(task_instance, task.dag, task_instance.dag_run, task, start_date) def _on_task_instance_running( self, task_instance: RuntimeTaskInstance | TaskInstance, dag, dagrun, task, start_date: datetime ): if is_operator_disabled(task): self.log.debug( "Skipping OpenLineage event emission for operator `%s` " "due to its presence in [openlineage] disabled_for_operators.", task.task_type, ) return if not is_selective_lineage_enabled(task): self.log.debug( "Skipping OpenLineage event emission for task `%s` " "due to lack of explicit lineage enablement for task or DAG while " "[openlineage] selective_enable is on.", task_instance.task_id, ) return # Needs to be calculated outside of inner method so that it gets cached for usage in fork processes debug_facet = get_airflow_debug_facet() @print_warning(self.log) def on_running(): context = task_instance.get_template_context() if hasattr(context, "task_reschedule_count") and context["task_reschedule_count"] > 0: self.log.debug("Skipping this instance of rescheduled task - START event was emitted already") return date = dagrun.logical_date if AIRFLOW_V_3_0_PLUS and date is None: date = dagrun.run_after clear_number = 0 if hasattr(dagrun, "clear_number"): clear_number = dagrun.clear_number parent_run_id = self.adapter.build_dag_run_id( dag_id=task_instance.dag_id, logical_date=date, clear_number=clear_number, ) task_uuid = self.adapter.build_task_instance_run_id( dag_id=task_instance.dag_id, task_id=task_instance.task_id, try_number=task_instance.try_number, logical_date=date, map_index=task_instance.map_index, ) event_type = RunState.RUNNING.value.lower() operator_name = task.task_type.lower() data_interval_start = dagrun.data_interval_start if isinstance(data_interval_start, datetime): data_interval_start = data_interval_start.isoformat() data_interval_end = dagrun.data_interval_end if isinstance(data_interval_end, datetime): data_interval_end = data_interval_end.isoformat() doc, doc_type = get_task_documentation(task) if not doc: doc, doc_type = get_dag_documentation(dag) with Stats.timer(f"ol.extract.{event_type}.{operator_name}"): task_metadata = self.extractor_manager.extract_metadata( dagrun=dagrun, task=task, task_instance_state=TaskInstanceState.RUNNING ) redacted_event = self.adapter.start_task( run_id=task_uuid, job_name=get_job_name(task_instance), job_description=doc, job_description_type=doc_type, event_time=start_date.isoformat(), nominal_start_time=data_interval_start, nominal_end_time=data_interval_end, # If task owner is default ("airflow"), use DAG owner instead that may have more details owners=[x.strip() for x in (task if task.owner != "airflow" else dag).owner.split(",")], tags=dag.tags, task=task_metadata, run_facets={ **get_user_provided_run_facets(task_instance, TaskInstanceState.RUNNING), **get_task_parent_run_facet( parent_run_id=parent_run_id, parent_job_name=dag.dag_id, dr_conf=getattr(dagrun, "conf", {}), ), **get_airflow_mapped_task_facet(task_instance), **get_airflow_run_facet(dagrun, dag, task_instance, task, task_uuid), **debug_facet, }, ) Stats.gauge( f"ol.event.size.{event_type}.{operator_name}", len(Serde.to_json(redacted_event).encode("utf-8")), ) self._execute(on_running, "on_running", use_fork=True) if AIRFLOW_V_3_0_PLUS: @hookimpl def on_task_instance_success( self, previous_state: TaskInstanceState, task_instance: RuntimeTaskInstance | TaskInstance ) -> None: self.log.debug("OpenLineage listener got notification about task instance success") if isinstance(task_instance, TaskInstance): self._on_task_instance_manual_state_change( ti=task_instance, dagrun=task_instance.dag_run, ti_state=TaskInstanceState.SUCCESS, ) return context = task_instance.get_template_context() task = context["task"] if TYPE_CHECKING: assert task dagrun = context["dag_run"] dag = context["dag"] self._on_task_instance_success(task_instance, dag, dagrun, task) else: @hookimpl def on_task_instance_success( # type: ignore[misc] self, previous_state: TaskInstanceState, task_instance: TaskInstance, session: Session, ) -> None: self.log.debug("OpenLineage listener got notification about task instance success") task = task_instance.task if TYPE_CHECKING: assert task self._on_task_instance_success(task_instance, task.dag, task_instance.dag_run, task) def _on_task_instance_success(self, task_instance: RuntimeTaskInstance, dag, dagrun, task): end_date = timezone.utcnow() if is_operator_disabled(task): self.log.debug( "Skipping OpenLineage event emission for operator `%s` " "due to its presence in [openlineage] disabled_for_operators.", task.task_type, ) return if not is_selective_lineage_enabled(task): self.log.debug( "Skipping OpenLineage event emission for task `%s` " "due to lack of explicit lineage enablement for task or DAG while " "[openlineage] selective_enable is on.", task_instance.task_id, ) return @print_warning(self.log) def on_success(): date = dagrun.logical_date if AIRFLOW_V_3_0_PLUS and date is None: date = dagrun.run_after parent_run_id = self.adapter.build_dag_run_id( dag_id=task_instance.dag_id, logical_date=date, clear_number=dagrun.clear_number, ) task_uuid = self.adapter.build_task_instance_run_id( dag_id=task_instance.dag_id, task_id=task_instance.task_id, try_number=task_instance.try_number, logical_date=date, map_index=task_instance.map_index, ) event_type = RunState.COMPLETE.value.lower() operator_name = task.task_type.lower() data_interval_start = dagrun.data_interval_start if isinstance(data_interval_start, datetime): data_interval_start = data_interval_start.isoformat() data_interval_end = dagrun.data_interval_end if isinstance(data_interval_end, datetime): data_interval_end = data_interval_end.isoformat() doc, doc_type = get_task_documentation(task) if not doc: doc, doc_type = get_dag_documentation(dag) with Stats.timer(f"ol.extract.{event_type}.{operator_name}"): task_metadata = self.extractor_manager.extract_metadata( dagrun=dagrun, task=task, task_instance_state=TaskInstanceState.SUCCESS, task_instance=task_instance, ) redacted_event = self.adapter.complete_task( run_id=task_uuid, job_name=get_job_name(task_instance), end_time=end_date.isoformat(), task=task_metadata, # If task owner is default ("airflow"), use DAG owner instead that may have more details owners=[x.strip() for x in (task if task.owner != "airflow" else dag).owner.split(",")], tags=dag.tags, job_description=doc, job_description_type=doc_type, nominal_start_time=data_interval_start, nominal_end_time=data_interval_end, run_facets={ **get_user_provided_run_facets(task_instance, TaskInstanceState.SUCCESS), **get_task_parent_run_facet( parent_run_id=parent_run_id, parent_job_name=dag.dag_id, dr_conf=getattr(dagrun, "conf", {}), ), **get_airflow_run_facet(dagrun, dag, task_instance, task, task_uuid), **get_airflow_debug_facet(), }, ) Stats.gauge( f"ol.event.size.{event_type}.{operator_name}", len(Serde.to_json(redacted_event).encode("utf-8")), ) self._execute(on_success, "on_success", use_fork=True) if AIRFLOW_V_3_0_PLUS: @hookimpl def on_task_instance_failed( self, previous_state: TaskInstanceState, task_instance: RuntimeTaskInstance | TaskInstance, error: None | str | BaseException, ) -> None: self.log.debug("OpenLineage listener got notification about task instance failure") if isinstance(task_instance, TaskInstance): self._on_task_instance_manual_state_change( ti=task_instance, dagrun=task_instance.dag_run, ti_state=TaskInstanceState.FAILED, error=error, ) return context = task_instance.get_template_context() task = context["task"] if TYPE_CHECKING: assert task dagrun = context["dag_run"] dag = context["dag"] self._on_task_instance_failed(task_instance, dag, dagrun, task, error) else: @hookimpl def on_task_instance_failed( # type: ignore[misc] self, previous_state: TaskInstanceState, task_instance: TaskInstance, error: None | str | BaseException, session: Session, ) -> None: self.log.debug("OpenLineage listener got notification about task instance failure") task = task_instance.task if TYPE_CHECKING: assert task self._on_task_instance_failed(task_instance, task.dag, task_instance.dag_run, task, error) def _on_task_instance_failed( self, task_instance: TaskInstance | RuntimeTaskInstance, dag, dagrun, task, error: None | str | BaseException = None, ) -> None: end_date = timezone.utcnow() if is_operator_disabled(task): self.log.debug( "Skipping OpenLineage event emission for operator `%s` " "due to its presence in [openlineage] disabled_for_operators.", task.task_type, ) return if not is_selective_lineage_enabled(task): self.log.debug( "Skipping OpenLineage event emission for task `%s` " "due to lack of explicit lineage enablement for task or DAG while " "[openlineage] selective_enable is on.", task_instance.task_id, ) return @print_warning(self.log) def on_failure(): date = dagrun.logical_date if AIRFLOW_V_3_0_PLUS and date is None: date = dagrun.run_after parent_run_id = self.adapter.build_dag_run_id( dag_id=task_instance.dag_id, logical_date=date, clear_number=dagrun.clear_number, ) task_uuid = self.adapter.build_task_instance_run_id( dag_id=task_instance.dag_id, task_id=task_instance.task_id, try_number=task_instance.try_number, logical_date=date, map_index=task_instance.map_index, ) event_type = RunState.FAIL.value.lower() operator_name = task.task_type.lower() data_interval_start = dagrun.data_interval_start if isinstance(data_interval_start, datetime): data_interval_start = data_interval_start.isoformat() data_interval_end = dagrun.data_interval_end if isinstance(data_interval_end, datetime): data_interval_end = data_interval_end.isoformat() doc, doc_type = get_task_documentation(task) if not doc: doc, doc_type = get_dag_documentation(dag) with Stats.timer(f"ol.extract.{event_type}.{operator_name}"): task_metadata = self.extractor_manager.extract_metadata( dagrun=dagrun, task=task, task_instance_state=TaskInstanceState.FAILED, task_instance=task_instance, ) redacted_event = self.adapter.fail_task( run_id=task_uuid, job_name=get_job_name(task_instance), end_time=end_date.isoformat(), task=task_metadata, error=error, nominal_start_time=data_interval_start, nominal_end_time=data_interval_end, tags=dag.tags, # If task owner is default ("airflow"), use DAG owner instead that may have more details owners=[x.strip() for x in (task if task.owner != "airflow" else dag).owner.split(",")], job_description=doc, job_description_type=doc_type, run_facets={ **get_user_provided_run_facets(task_instance, TaskInstanceState.FAILED), **get_task_parent_run_facet( parent_run_id=parent_run_id, parent_job_name=dag.dag_id, dr_conf=getattr(dagrun, "conf", {}), ), **get_airflow_run_facet(dagrun, dag, task_instance, task, task_uuid), **get_airflow_debug_facet(), }, ) Stats.gauge( f"ol.event.size.{event_type}.{operator_name}", len(Serde.to_json(redacted_event).encode("utf-8")), ) self._execute(on_failure, "on_failure", use_fork=True) def _on_task_instance_manual_state_change( self, ti: TaskInstance, dagrun: DagRun, ti_state: TaskInstanceState, error: None | str | BaseException = None, ) -> None: self.log.debug("`_on_task_instance_manual_state_change` was called with state: `%s`.", ti_state) end_date = timezone.utcnow() @print_warning(self.log) def on_state_change(): date = dagrun.logical_date or dagrun.run_after parent_run_id = self.adapter.build_dag_run_id( dag_id=ti.dag_id, logical_date=date, clear_number=dagrun.clear_number, ) task_uuid = self.adapter.build_task_instance_run_id( dag_id=ti.dag_id, task_id=ti.task_id, try_number=ti.try_number, logical_date=date, map_index=ti.map_index, ) adapter_kwargs = { "run_id": task_uuid, "job_name": get_job_name(ti), "end_time": end_date.isoformat(), "task": OperatorLineage(), "nominal_start_time": None, "nominal_end_time": None, "tags": None, "owners": None, "job_description": None, "job_description_type": None, "run_facets": { **get_task_parent_run_facet( parent_run_id=parent_run_id, parent_job_name=ti.dag_id, dr_conf=getattr(dagrun, "conf", {}), ), **get_airflow_debug_facet(), }, } if ti_state == TaskInstanceState.FAILED: event_type = RunState.FAIL.value.lower() redacted_event = self.adapter.fail_task(**adapter_kwargs, error=error) elif ti_state == TaskInstanceState.SUCCESS: event_type = RunState.COMPLETE.value.lower() redacted_event = self.adapter.complete_task(**adapter_kwargs) else: raise ValueError(f"Unsupported ti_state: `{ti_state}`.") operator_name = ti.operator.lower() Stats.gauge( f"ol.event.size.{event_type}.{operator_name}", len(Serde.to_json(redacted_event).encode("utf-8")), ) self._execute(on_state_change, "on_state_change", use_fork=True) def _execute(self, callable, callable_name: str, use_fork: bool = False): if use_fork: self._fork_execute(callable, callable_name) else: callable() def _terminate_with_wait(self, process: psutil.Process): process.terminate() try: # Waiting for max 3 seconds to make sure process can clean up before being killed. process.wait(timeout=3) except psutil.TimeoutExpired: # If it's not dead by then, then force kill. process.kill() def _fork_execute(self, callable, callable_name: str): self.log.debug("Will fork to execute OpenLineage process.") pid = os.fork() if pid: process = psutil.Process(pid) try: self.log.debug("Waiting for process %s", pid) process.wait(conf.execution_timeout()) except psutil.TimeoutExpired: self.log.warning( "OpenLineage process with pid `%s` expired and will be terminated by listener. " "This has no impact on actual task execution status.", pid, ) self._terminate_with_wait(process) except BaseException: # Kill the process directly. self._terminate_with_wait(process) self.log.debug("Process with pid %s finished - parent", pid) else: setproctitle(getproctitle() + " - OpenLineage - " + callable_name) if not AIRFLOW_V_3_0_PLUS: configure_orm(disable_connection_pool=True) self.log.debug("Executing OpenLineage process - %s - pid %s", callable_name, os.getpid()) callable() self.log.debug("Process with current pid finishes after %s", callable_name) os._exit(0) @property def executor(self) -> ProcessPoolExecutor: if not self._executor: self._executor = ProcessPoolExecutor( max_workers=conf.dag_state_change_process_pool_size(), initializer=_executor_initializer, ) return self._executor @hookimpl def on_starting(self, component) -> None: self.log.debug("on_starting: %s", component.__class__.__name__) @hookimpl def before_stopping(self, component) -> None: self.log.debug("before_stopping: %s", component.__class__.__name__) with timeout(30): self.executor.shutdown(wait=True) @hookimpl def on_dag_run_running(self, dag_run: DagRun, msg: str) -> None: try: if dag_run.dag and not is_selective_lineage_enabled(dag_run.dag): self.log.debug( "Skipping OpenLineage event emission for DAG `%s` " "due to lack of explicit lineage enablement for DAG while " "[openlineage] selective_enable is on.", dag_run.dag_id, ) return if not self.executor: self.log.debug("Executor have not started before `on_dag_run_running`") return data_interval_start = ( dag_run.data_interval_start.isoformat() if dag_run.data_interval_start else None ) data_interval_end = dag_run.data_interval_end.isoformat() if dag_run.data_interval_end else None date = dag_run.logical_date if AIRFLOW_V_3_0_PLUS and date is None: date = dag_run.run_after doc, doc_type = get_dag_documentation(dag_run.dag) self.submit_callable( self.adapter.dag_started, dag_id=dag_run.dag_id, logical_date=date, start_date=dag_run.start_date, nominal_start_time=data_interval_start, nominal_end_time=data_interval_end, clear_number=dag_run.clear_number, owners=[x.strip() for x in dag_run.dag.owner.split(",")] if dag_run.dag else None, job_description=doc, job_description_type=doc_type, tags=dag_run.dag.tags if dag_run.dag else [], # AirflowJobFacet should be created outside ProcessPoolExecutor that pickles objects, # as it causes lack of some TaskGroup attributes and crashes event emission. job_facets=get_airflow_job_facet(dag_run=dag_run), run_facets={ **get_airflow_dag_run_facet(dag_run), **get_dag_parent_run_facet(getattr(dag_run, "conf", {})), }, ) except BaseException as e: self.log.warning("OpenLineage received exception in method on_dag_run_running", exc_info=e) @hookimpl def on_dag_run_success(self, dag_run: DagRun, msg: str) -> None: try: if dag_run.dag and not is_selective_lineage_enabled(dag_run.dag): self.log.debug( "Skipping OpenLineage event emission for DAG `%s` " "due to lack of explicit lineage enablement for DAG while " "[openlineage] selective_enable is on.", dag_run.dag_id, ) return if not self.executor: self.log.debug("Executor have not started before `on_dag_run_success`") return task_ids = DagRun._get_partial_task_ids(dag_run.dag) date = dag_run.logical_date if AIRFLOW_V_3_0_PLUS and date is None: date = dag_run.run_after data_interval_start = ( dag_run.data_interval_start.isoformat() if dag_run.data_interval_start else None ) data_interval_end = dag_run.data_interval_end.isoformat() if dag_run.data_interval_end else None doc, doc_type = get_dag_documentation(dag_run.dag) self.submit_callable( self.adapter.dag_success, dag_id=dag_run.dag_id, run_id=dag_run.run_id, end_date=dag_run.end_date, nominal_start_time=data_interval_start, nominal_end_time=data_interval_end, logical_date=date, clear_number=dag_run.clear_number, owners=[x.strip() for x in dag_run.dag.owner.split(",")] if dag_run.dag else None, tags=dag_run.dag.tags if dag_run.dag else [], job_description=doc, job_description_type=doc_type, task_ids=task_ids, dag_run_state=dag_run.get_state(), run_facets={ **get_airflow_dag_run_facet(dag_run), **get_dag_parent_run_facet(getattr(dag_run, "conf", {})), }, ) except BaseException as e: self.log.warning("OpenLineage received exception in method on_dag_run_success", exc_info=e) @hookimpl def on_dag_run_failed(self, dag_run: DagRun, msg: str) -> None: try: if dag_run.dag and not is_selective_lineage_enabled(dag_run.dag): self.log.debug( "Skipping OpenLineage event emission for DAG `%s` " "due to lack of explicit lineage enablement for DAG while " "[openlineage] selective_enable is on.", dag_run.dag_id, ) return if not self.executor: self.log.debug("Executor have not started before `on_dag_run_failed`") return task_ids = DagRun._get_partial_task_ids(dag_run.dag) date = dag_run.logical_date if AIRFLOW_V_3_0_PLUS and date is None: date = dag_run.run_after data_interval_start = ( dag_run.data_interval_start.isoformat() if dag_run.data_interval_start else None ) data_interval_end = dag_run.data_interval_end.isoformat() if dag_run.data_interval_end else None doc, doc_type = get_dag_documentation(dag_run.dag) self.submit_callable( self.adapter.dag_failed, dag_id=dag_run.dag_id, run_id=dag_run.run_id, end_date=dag_run.end_date, nominal_start_time=data_interval_start, nominal_end_time=data_interval_end, logical_date=date, clear_number=dag_run.clear_number, owners=[x.strip() for x in dag_run.dag.owner.split(",")] if dag_run.dag else None, tags=dag_run.dag.tags if dag_run.dag else [], job_description=doc, job_description_type=doc_type, dag_run_state=dag_run.get_state(), task_ids=task_ids, msg=msg, run_facets={ **get_airflow_dag_run_facet(dag_run), **get_dag_parent_run_facet(getattr(dag_run, "conf", {})), }, ) except BaseException as e: self.log.warning("OpenLineage received exception in method on_dag_run_failed", exc_info=e) def submit_callable(self, callable, *args, **kwargs): fut = self.executor.submit(callable, *args, **kwargs) fut.add_done_callback(self.log_submit_error) return fut def log_submit_error(self, fut): if fut.exception(): self.log.warning("Failed to submit method to executor", exc_info=fut.exception()) else: self.log.debug("Successfully submitted method to executor") def get_openlineage_listener() -> OpenLineageListener: """Get singleton listener manager.""" global _openlineage_listener if not _openlineage_listener: _openlineage_listener = OpenLineageListener() return _openlineage_listener
OpenLineageListener
python
realpython__materials
python-oop/starfleet_objects.py
{ "start": 35, "end": 410 }
class ____: def __init__(self, name, age, position, year_started): self.name = name self.age = age self.position = position self.year_started = year_started kirk = Employee("James Kirk", 34, "Captain", 2265) spock = Employee("Spock", 35, "Science Officer", 2254) mccoy = Employee("Leonard McCoy", 137, "Chief Medical Officer", 2266)
Employee
python
huggingface__transformers
src/transformers/models/glm4/modeling_glm4.py
{ "start": 16383, "end": 19504 }
class ____(Glm4PreTrainedModel): def __init__(self, config: Glm4Config): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [Glm4DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = Glm4RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = Glm4RotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @check_model_inputs() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, cache_position: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds: torch.Tensor = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position: torch.Tensor = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_embeddings=position_embeddings, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) @auto_docstring
Glm4Model
python
scikit-learn__scikit-learn
sklearn/externals/array_api_compat/common/_typing.py
{ "start": 3439, "end": 3568 }
class ____(TypedDict): complex64: DType complex128: DType # `__array_namespace_info__.dtypes(kind="numeric")`
DTypesComplex
python
conda__conda
conda/auxlib/exceptions.py
{ "start": 149, "end": 245 }
class ____: """Mixin to identify exceptions associated with the auxlib package."""
AuxlibError