language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
encode__django-rest-framework
tests/models.py
{ "start": 1011, "end": 1411 }
class ____(RESTFrameworkModel): name = models.CharField(max_length=100) def get_first_source(self): """Used for testing related field against a callable.""" return self.sources.all().order_by('pk')[0] @property def first_source(self): """Used for testing related field against a property.""" return self.sources.all().order_by('pk')[0]
ForeignKeyTarget
python
spack__spack
lib/spack/spack/spec.py
{ "start": 223154, "end": 223263 }
class ____(spack.error.SpecError): """Called for errors in Spec path-format strings."""
SpecFormatPathError
python
allegroai__clearml
clearml/backend_api/services/v2_9/events.py
{ "start": 42865, "end": 44571 }
class ____(Request): """ Delete all task event. *This cannot be undone!* :param task: Task ID :type task: str :param allow_locked: Allow deleting events even if the task is locked :type allow_locked: bool """ _service = "events" _action = "delete_for_task" _version = "2.9" _schema = { "definitions": {}, "properties": { "allow_locked": { "default": False, "description": "Allow deleting events even if the task is locked", "type": "boolean", }, "task": {"description": "Task ID", "type": "string"}, }, "required": ["task"], "type": "object", } def __init__(self, task: str, allow_locked: Optional[bool] = False, **kwargs: Any) -> None: super(DeleteForTaskRequest, self).__init__(**kwargs) self.task = task self.allow_locked = allow_locked @schema_property("task") def task(self) -> str: return self._property_task @task.setter def task(self, value: str) -> None: if value is None: self._property_task = None return self.assert_isinstance(value, "task", six.string_types) self._property_task = value @schema_property("allow_locked") def allow_locked(self) -> Optional[bool]: return self._property_allow_locked @allow_locked.setter def allow_locked(self, value: Optional[bool]) -> None: if value is None: self._property_allow_locked = None return self.assert_isinstance(value, "allow_locked", (bool,)) self._property_allow_locked = value
DeleteForTaskRequest
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_us_state.py
{ "start": 1787, "end": 4619 }
class ____(ColumnMapExpectation): """Expect values in this column to be valid state abbreviations. See https://pypi.org/project/us/ for more information. \ DC statehood is a perennial issue in data science, and the owners of the us repo addressed it differently than we have: https://github.com/unitedstates/python-us/issues/50. \ dc_statehood defaults to True, though can be overriden by end users """ # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "valid_states": [ "Kansas", "Minnesota", "Alabama", "Nebraska", "North Dakota", ], "invalid_states": [ "", "1234", "Weet Virginia", "Kansass", "123 Hawaii", ], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "valid_states"}, "out": {"success": True}, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "invalid_states"}, "out": {"success": False}, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.valid_us_state" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "maturity": "experimental", # "experimental", "beta", or "production" "tags": [ "hackathon", "typed-entities", ], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@luismdiaz01", "@derekma73", # Don't forget to add your github handle here! ], "requirements": ["us"], } if __name__ == "__main__": ExpectColumnValuesToBeValidUSState().print_diagnostic_checklist()
ExpectColumnValuesToBeValidUSState
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_M.py
{ "start": 17340, "end": 18473 }
class ____(Benchmark): r""" Mishra 10 objective function. This class defines the Mishra 10 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: TODO - int(x) should be used instead of floor(x)!!!!! f_{\text{Mishra10}}({x}) = \left[ \lfloor x_1 \perp x_2 \rfloor - \lfloor x_1 \rfloor - \lfloor x_2 \rfloor \right]^2 with :math:`x_i \in [-10, 10]` for :math:`i =1, 2`. *Global optimum*: :math:`f(x) = 0` for :math:`x = [2, 2]` .. [1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005 TODO line 1115 """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N)) self.global_optimum = [[2.0, 2.0]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 x1, x2 = int(x[0]), int(x[1]) f1 = x1 + x2 f2 = x1 * x2 return (f1 - f2) ** 2.0
Mishra10
python
euske__pdfminer
pdfminer/cmapdb.py
{ "start": 6490, "end": 11640 }
class ____(PSStackParser): def __init__(self, cmap, fp): PSStackParser.__init__(self, fp) self.cmap = cmap # some ToUnicode maps don't have "begincmap" keyword. self._in_cmap = True return def run(self): try: self.nextobject() except PSEOF: pass return KEYWORD_BEGINCMAP = KWD(b'begincmap') KEYWORD_ENDCMAP = KWD(b'endcmap') KEYWORD_USECMAP = KWD(b'usecmap') KEYWORD_DEF = KWD(b'def') KEYWORD_BEGINCODESPACERANGE = KWD(b'begincodespacerange') KEYWORD_ENDCODESPACERANGE = KWD(b'endcodespacerange') KEYWORD_BEGINCIDRANGE = KWD(b'begincidrange') KEYWORD_ENDCIDRANGE = KWD(b'endcidrange') KEYWORD_BEGINCIDCHAR = KWD(b'begincidchar') KEYWORD_ENDCIDCHAR = KWD(b'endcidchar') KEYWORD_BEGINBFRANGE = KWD(b'beginbfrange') KEYWORD_ENDBFRANGE = KWD(b'endbfrange') KEYWORD_BEGINBFCHAR = KWD(b'beginbfchar') KEYWORD_ENDBFCHAR = KWD(b'endbfchar') KEYWORD_BEGINNOTDEFRANGE = KWD(b'beginnotdefrange') KEYWORD_ENDNOTDEFRANGE = KWD(b'endnotdefrange') def do_keyword(self, pos, token): if token is self.KEYWORD_BEGINCMAP: self._in_cmap = True self.popall() return elif token is self.KEYWORD_ENDCMAP: self._in_cmap = False return if not self._in_cmap: return # if token is self.KEYWORD_DEF: try: ((_, k), (_, v)) = self.pop(2) self.cmap.set_attr(literal_name(k), v) except PSSyntaxError: pass return if token is self.KEYWORD_USECMAP: try: ((_, cmapname),) = self.pop(1) self.cmap.use_cmap(CMapDB.get_cmap(literal_name(cmapname))) except PSSyntaxError: pass except CMapDB.CMapNotFound: pass return if token is self.KEYWORD_BEGINCODESPACERANGE: self.popall() return if token is self.KEYWORD_ENDCODESPACERANGE: self.popall() return if token is self.KEYWORD_BEGINCIDRANGE: self.popall() return if token is self.KEYWORD_ENDCIDRANGE: objs = [obj for (__, obj) in self.popall()] for (s, e, cid) in choplist(3, objs): if (not isinstance(s, bytes) or not isinstance(e, bytes) or not isinstance(cid, int) or len(s) != len(e)): continue sprefix = s[:-4] eprefix = e[:-4] if sprefix != eprefix: continue svar = s[-4:] evar = e[-4:] s1 = nunpack(svar) e1 = nunpack(evar) vlen = len(svar) #assert s1 <= e1 for i in range(e1-s1+1): x = sprefix+struct.pack('>L', s1+i)[-vlen:] self.cmap.add_code2cid(x, cid+i) return if token is self.KEYWORD_BEGINCIDCHAR: self.popall() return if token is self.KEYWORD_ENDCIDCHAR: objs = [obj for (__, obj) in self.popall()] for (cid, code) in choplist(2, objs): if isinstance(code, bytes) and isinstance(cid, bytes): self.cmap.add_code2cid(code, nunpack(cid)) return if token is self.KEYWORD_BEGINBFRANGE: self.popall() return if token is self.KEYWORD_ENDBFRANGE: objs = [obj for (__, obj) in self.popall()] for (s, e, code) in choplist(3, objs): if (not isinstance(s, bytes) or not isinstance(e, bytes) or len(s) != len(e)): continue s1 = nunpack(s) e1 = nunpack(e) #assert s1 <= e1 if isinstance(code, list): for i in range(e1-s1+1): self.cmap.add_cid2unichr(s1+i, code[i]) else: var = code[-4:] base = nunpack(var) prefix = code[:-4] vlen = len(var) for i in range(e1-s1+1): x = prefix+struct.pack('>L', base+i)[-vlen:] self.cmap.add_cid2unichr(s1+i, x) return if token is self.KEYWORD_BEGINBFCHAR: self.popall() return if token is self.KEYWORD_ENDBFCHAR: objs = [obj for (__, obj) in self.popall()] for (cid, code) in choplist(2, objs): if isinstance(cid, bytes) and isinstance(code, bytes): self.cmap.add_cid2unichr(nunpack(cid), code) return if token is self.KEYWORD_BEGINNOTDEFRANGE: self.popall() return if token is self.KEYWORD_ENDNOTDEFRANGE: self.popall() return self.push((pos, token)) return ## CMapConverter ##
CMapParser
python
PyCQA__pylint
tests/functional/ext/redefined_variable_type/redefined_variable_type.py
{ "start": 2130, "end": 2296 }
class ____: async def funtion1(self): potato = 1 print(potato) async def funtion2(self): potato = {} print(potato)
AsyncFunctions
python
python-attrs__attrs
src/attr/validators.py
{ "start": 17871, "end": 20247 }
class ____: validator = attrib() msg = attrib( converter=default_if_none( "not_ validator child '{validator!r}' " "did not raise a captured error" ) ) exc_types = attrib( validator=deep_iterable( member_validator=_subclass_of(Exception), iterable_validator=instance_of(tuple), ), ) def __call__(self, inst, attr, value): try: self.validator(inst, attr, value) except self.exc_types: pass # suppress error to invert validity else: raise ValueError( self.msg.format( validator=self.validator, exc_types=self.exc_types, ), attr, self.validator, value, self.exc_types, ) def __repr__(self): return f"<not_ validator wrapping {self.validator!r}, capturing {self.exc_types!r}>" def not_(validator, *, msg=None, exc_types=(ValueError, TypeError)): """ A validator that wraps and logically 'inverts' the validator passed to it. It will raise a `ValueError` if the provided validator *doesn't* raise a `ValueError` or `TypeError` (by default), and will suppress the exception if the provided validator *does*. Intended to be used with existing validators to compose logic without needing to create inverted variants, for example, ``not_(in_(...))``. Args: validator: A validator to be logically inverted. msg (str): Message to raise if validator fails. Formatted with keys ``exc_types`` and ``validator``. exc_types (tuple[type, ...]): Exception type(s) to capture. Other types raised by child validators will not be intercepted and pass through. Raises: ValueError: With a human readable error message, the attribute (of type `attrs.Attribute`), the validator that failed to raise an exception, the value it got, and the expected exception types. .. versionadded:: 22.2.0 """ try: exc_types = tuple(exc_types) except TypeError: exc_types = (exc_types,) return _NotValidator(validator, msg, exc_types) @attrs(repr=False, slots=True, unsafe_hash=True)
_NotValidator
python
plotly__plotly.py
plotly/graph_objs/candlestick/increasing/_line.py
{ "start": 233, "end": 3039 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "candlestick.increasing" _path_str = "candlestick.increasing.line" _valid_props = {"color", "width"} @property def color(self): """ Sets the color of line bounding the box(es). The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def width(self): """ Sets the width (in px) of line bounding the box(es). The 'width' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["width"] @width.setter def width(self, val): self["width"] = val @property def _prop_descriptions(self): return """\ color Sets the color of line bounding the box(es). width Sets the width (in px) of line bounding the box(es). """ def __init__(self, arg=None, color=None, width=None, **kwargs): """ Construct a new Line object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.candlestick.increasing.Line` color Sets the color of line bounding the box(es). width Sets the width (in px) of line bounding the box(es). Returns ------- Line """ super().__init__("line") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.candlestick.increasing.Line constructor must be a dict or an instance of :class:`plotly.graph_objs.candlestick.increasing.Line`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("width", arg, width) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Line
python
apache__airflow
task-sdk/src/airflow/sdk/execution_time/callback_runner.py
{ "start": 1337, "end": 4122 }
class ____(Protocol): def __call__( self, func: Callable[P, R], outlet_events: OutletEventAccessorsProtocol, *, logger: logging.Logger | Logger, ) -> _ExecutionCallableRunner[P, R]: ... def create_executable_runner( func: Callable[P, R], outlet_events: OutletEventAccessorsProtocol, *, logger: logging.Logger | Logger, ) -> _ExecutionCallableRunner[P, R]: """ Run an execution callable against a task context and given arguments. If the callable is a simple function, this simply calls it with the supplied arguments (including the context). If the callable is a generator function, the generator is exhausted here, with the yielded values getting fed back into the task context automatically for execution. This convoluted implementation of inner class with closure is so *all* arguments passed to ``run()`` can be forwarded to the wrapped function. This is particularly important for the argument "self", which some use cases need to receive. This is not possible if this is implemented as a normal class, where "self" needs to point to the runner object, not the object bounded to the inner callable. :meta private: """ class _ExecutionCallableRunnerImpl(_ExecutionCallableRunner): @staticmethod def run(*args: P.args, **kwargs: P.kwargs) -> R: from airflow.sdk.definitions.asset.metadata import Metadata if not inspect.isgeneratorfunction(func): return func(*args, **kwargs) result: R if isinstance(logger, logging.Logger): def _warn_unknown(metadata): logger.warning("Ignoring unknown data of %r received from task", type(metadata)) logger.debug("Full yielded value: %r", metadata) else: def _warn_unknown(metadata): logger.warning("Ignoring unknown type received from task", type=type(metadata)) logger.debug("Full yielded value", metadata=metadata) def _run(): nonlocal result result = yield from func(*args, **kwargs) for metadata in _run(): if isinstance(metadata, Metadata): outlet_events[metadata.asset].extra.update(metadata.extra) if metadata.alias: outlet_events[metadata.alias].add(metadata.asset, extra=metadata.extra) else: _warn_unknown(metadata) return result # noqa: F821 # Ruff is not smart enough to know this is always set in _run(). return cast("_ExecutionCallableRunner[P, R]", _ExecutionCallableRunnerImpl)
ExecutionCallableRunner
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/metadata.py
{ "start": 4913, "end": 5084 }
class ____(graphene.Union): class Meta: types = (GrapheneLocalFileCodeReference, GrapheneUrlCodeReference) name = "SourceLocation"
GrapheneSourceLocation
python
more-itertools__more-itertools
more_itertools/more.py
{ "start": 166576, "end": 167301 }
class ____: __slots__ = ('iterator', 'link', 'lock') def __init__(self, iterable): it = iter(iterable) if isinstance(it, _concurrent_tee): self.iterator = it.iterator self.link = it.link self.lock = it.lock else: self.iterator = it self.link = [None, None] self.lock = Lock() def __iter__(self): return self def __next__(self): link = self.link if link[1] is None: with self.lock: if link[1] is None: link[0] = next(self.iterator) link[1] = [None, None] value, self.link = link return value
_concurrent_tee
python
joke2k__faker
tests/providers/test_address.py
{ "start": 58736, "end": 60821 }
class ____: """Test zh_CN address provider methods""" def test_postcode(self, faker, num_samples): for _ in range(num_samples): postcode = faker.postcode() assert isinstance(postcode, str) assert re.fullmatch(r"[1-9]\d{5}", postcode) def test_city_name(self, faker, num_samples): for _ in range(num_samples): city_name = faker.city_name() assert isinstance(city_name, str) assert city_name in ZhCnAddressProvider.cities def test_city_suffix(self, faker, num_samples): for _ in range(num_samples): city_suffix = faker.city_suffix() assert isinstance(city_suffix, str) assert city_suffix in ZhCnAddressProvider.city_suffixes def test_city(self, faker, num_samples): city_pattern: Pattern = re.compile(r".*?[市县]") for _ in range(num_samples): city = faker.city() assert isinstance(city, str) assert city_pattern.fullmatch(city) def test_province(self, faker, num_samples): for _ in range(num_samples): province = faker.province() assert isinstance(province, str) assert province in ZhCnAddressProvider.provinces def test_district(self, faker, num_samples): for _ in range(num_samples): district = faker.district() assert isinstance(district, str) assert district in ZhCnAddressProvider.districts def test_country(self, faker, num_samples): for _ in range(num_samples): country = faker.country() assert isinstance(country, str) assert country in ZhCnAddressProvider.countries def test_street_name(self, faker, num_samples): for _ in range(num_samples): street_name = faker.street_name() assert isinstance(street_name, str) def test_address(self, faker, num_samples): for _ in range(num_samples): address = faker.address() assert isinstance(address, str)
TestZhCn
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/asset_checks/asset_check_spec.py
{ "start": 1614, "end": 5522 }
class ____(IHaveNew, LegacyNamedTupleMixin): name: PublicAttr[str] asset_key: PublicAttr[AssetKey] description: PublicAttr[Optional[str]] additional_deps: PublicAttr[Iterable[LazyAssetDep]] blocking: PublicAttr[bool] metadata: PublicAttr[Mapping[str, Any]] automation_condition: PublicAttr[Optional[LazyAutomationCondition]] """Defines information about an asset check, except how to execute it. AssetCheckSpec is often used as an argument to decorators that decorator a function that can execute multiple checks - e.g. `@asset`, and `@multi_asset`. It defines one of the checks that will be executed inside that function. Args: name (str): Name of the check. asset (Union[AssetKey, Sequence[str], str, AssetsDefinition, SourceAsset]): The asset that the check applies to. description (Optional[str]): Description for the check. additional_deps (Optional[Iterable[AssetDep]]): Additional dependencies for the check. The check relies on these assets in some way, but the result of the check only applies to the asset specified by `asset`. For example, the check may test that `asset` has matching data with an asset in `additional_deps`. This field holds both `additional_deps` and `additional_ins` passed to @asset_check. blocking (bool): When enabled, if the check fails with severity `AssetCheckSeverity.ERROR`, then downstream assets won't execute. If this AssetCheckSpec is used in a multi-asset, that multi-asset is responsible for enforcing that downstream assets within the same step do not execute after a blocking asset check fails. metadata (Optional[Mapping[str, Any]]): A dict of static metadata for this asset check. """ def __new__( cls, name: str, *, asset: Union[CoercibleToAssetKey, "AssetsDefinition", "SourceAsset"], description: Optional[str] = None, additional_deps: Optional[Iterable["CoercibleToAssetDep"]] = None, blocking: bool = False, metadata: Optional[Mapping[str, Any]] = None, automation_condition: Optional["AutomationCondition[AssetCheckKey]"] = None, ): from dagster._core.definitions.assets.definition.asset_dep import ( coerce_to_deps_and_check_duplicates, ) asset_key = AssetKey.from_coercible_or_definition(asset) additional_asset_deps = coerce_to_deps_and_check_duplicates( additional_deps, AssetCheckKey(asset_key, name) ) for dep in additional_asset_deps: if dep.asset_key == asset_key: raise ValueError( f"Asset check {name} for asset {asset_key.to_string()} cannot have an additional " f"dependency on asset {asset_key.to_string()}." ) return super().__new__( cls, name=name, asset_key=asset_key, description=description, additional_deps=additional_asset_deps, blocking=blocking, metadata=metadata or {}, automation_condition=automation_condition, ) def get_python_identifier(self) -> str: """Returns a string uniquely identifying the asset check, that uses only the characters allowed in a Python identifier. """ return f"{self.asset_key.to_python_identifier()}_{self.name}".replace(".", "_") @property def key(self) -> AssetCheckKey: return AssetCheckKey(self.asset_key, self.name) def replace_key(self, key: AssetCheckKey) -> "AssetCheckSpec": return replace(self, asset_key=key.asset_key, name=key.name) def with_metadata(self, metadata: Mapping[str, Any]) -> "AssetCheckSpec": return replace(self, metadata=metadata)
AssetCheckSpec
python
aimacode__aima-python
nlp4e.py
{ "start": 3587, "end": 10798 }
class ____: def __init__(self, name, rules, lexicon): """A grammar has a set of rules and a lexicon. Each rule has a probability.""" self.name = name self.rules = rules self.lexicon = lexicon self.categories = defaultdict(list) for lhs in lexicon: for word, prob in lexicon[lhs]: self.categories[word].append((lhs, prob)) def rewrites_for(self, cat): """Return a sequence of possible rhs's that cat can be rewritten as.""" return self.rules.get(cat, ()) def isa(self, word, cat): """Return True iff word is of category cat""" return cat in [c for c, _ in self.categories[word]] def cnf_rules(self): """Returns the tuple (X, Y, Z, p) for rules in the form: X -> Y Z [p]""" cnf = [] for X, rules in self.rules.items(): for (Y, Z), p in rules: cnf.append((X, Y, Z, p)) return cnf def generate_random(self, S='S'): """Replace each token in S by a random entry in grammar (recursively). Returns a tuple of (sentence, probability).""" def rewrite(tokens, into): for token in tokens: if token in self.rules: non_terminal, prob = weighted_choice(self.rules[token]) into[1] *= prob rewrite(non_terminal, into) elif token in self.lexicon: terminal, prob = weighted_choice(self.lexicon[token]) into[0].append(terminal) into[1] *= prob else: into[0].append(token) return into rewritten_as, prob = rewrite(S.split(), [[], 1]) return (' '.join(rewritten_as), prob) def __repr__(self): return '<Grammar {}>'.format(self.name) E0 = Grammar('E0', Rules( # Grammar for E_0 [Figure 22.2] S='NP VP | S Conjunction S', NP='Pronoun | Name | Noun | Article Noun | Digit Digit | NP PP | NP RelClause', VP='Verb | VP NP | VP Adjective | VP PP | VP Adverb', PP='Preposition NP', RelClause='That VP'), Lexicon( # Lexicon for E_0 [Figure 22.3] Noun="stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east", Verb="is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel", # noqa Adjective="right | left | east | south | back | smelly | dead", Adverb="here | there | nearby | ahead | right | left | east | south | back", Pronoun="me | you | I | it", Name="John | Mary | Boston | Aristotle", Article="the | a | an", Preposition="to | in | on | near", Conjunction="and | or | but", Digit="0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9", That="that" )) E_ = Grammar('E_', # Trivial Grammar and lexicon for testing Rules( S='NP VP', NP='Art N | Pronoun', VP='V NP'), Lexicon( Art='the | a', N='man | woman | table | shoelace | saw', Pronoun='I | you | it', V='saw | liked | feel' )) E_NP_ = Grammar('E_NP_', # Another Trivial Grammar for testing Rules(NP='Adj NP | N'), Lexicon(Adj='happy | handsome | hairy', N='man')) E_Prob = ProbGrammar('E_Prob', # The Probabilistic Grammar from the notebook ProbRules( S="NP VP [0.6] | S Conjunction S [0.4]", NP="Pronoun [0.2] | Name [0.05] | Noun [0.2] | Article Noun [0.15] \ | Article Adjs Noun [0.1] | Digit [0.05] | NP PP [0.15] | NP RelClause [0.1]", VP="Verb [0.3] | VP NP [0.2] | VP Adjective [0.25] | VP PP [0.15] | VP Adverb [0.1]", Adjs="Adjective [0.5] | Adjective Adjs [0.5]", PP="Preposition NP [1]", RelClause="RelPro VP [1]" ), ProbLexicon( Verb="is [0.5] | say [0.3] | are [0.2]", Noun="robot [0.4] | sheep [0.4] | fence [0.2]", Adjective="good [0.5] | new [0.2] | sad [0.3]", Adverb="here [0.6] | lightly [0.1] | now [0.3]", Pronoun="me [0.3] | you [0.4] | he [0.3]", RelPro="that [0.5] | who [0.3] | which [0.2]", Name="john [0.4] | mary [0.4] | peter [0.2]", Article="the [0.5] | a [0.25] | an [0.25]", Preposition="to [0.4] | in [0.3] | at [0.3]", Conjunction="and [0.5] | or [0.2] | but [0.3]", Digit="0 [0.35] | 1 [0.35] | 2 [0.3]" )) E_Chomsky = Grammar('E_Prob_Chomsky', # A Grammar in Chomsky Normal Form Rules( S='NP VP', NP='Article Noun | Adjective Noun', VP='Verb NP | Verb Adjective', ), Lexicon( Article='the | a | an', Noun='robot | sheep | fence', Adjective='good | new | sad', Verb='is | say | are' )) E_Prob_Chomsky = ProbGrammar('E_Prob_Chomsky', # A Probabilistic Grammar in CNF ProbRules( S='NP VP [1]', NP='Article Noun [0.6] | Adjective Noun [0.4]', VP='Verb NP [0.5] | Verb Adjective [0.5]', ), ProbLexicon( Article='the [0.5] | a [0.25] | an [0.25]', Noun='robot [0.4] | sheep [0.4] | fence [0.2]', Adjective='good [0.5] | new [0.2] | sad [0.3]', Verb='is [0.5] | say [0.3] | are [0.2]' )) E_Prob_Chomsky_ = ProbGrammar('E_Prob_Chomsky_', ProbRules( S='NP VP [1]', NP='NP PP [0.4] | Noun Verb [0.6]', PP='Preposition NP [1]', VP='Verb NP [0.7] | VP PP [0.3]', ), ProbLexicon( Noun='astronomers [0.18] | eyes [0.32] | stars [0.32] | telescopes [0.18]', Verb='saw [0.5] | \'\' [0.5]', Preposition='with [1]' )) # ______________________________________________________________________________ # 22.3 Parsing
ProbGrammar
python
pytorch__pytorch
torch/export/graph_signature.py
{ "start": 2808, "end": 3448 }
class ____: kind: OutputKind arg: ArgumentSpec target: Optional[str] def __post_init__(self): assert isinstance( self.arg, ( TensorArgument, SymIntArgument, SymFloatArgument, SymBoolArgument, ConstantArgument, TokenArgument, CustomObjArgument, ), ), self.arg def __str__(self): target = "" if self.target is None else f" target='{self.target}'" return f"{str(self.arg.name)}: {str(self.kind.name)}{target}" @dataclasses.dataclass
OutputSpec
python
dabeaz-course__practical-python
Solutions/4_10/tableformat.py
{ "start": 660, "end": 877 }
class ____(TableFormatter): ''' Output data in CSV format. ''' def headings(self, headers): print(','.join(headers)) def row(self, rowdata): print(','.join(rowdata))
CSVTableFormatter
python
bokeh__bokeh
src/bokeh/server/callbacks.py
{ "start": 3545, "end": 4369 }
class ____(SessionCallback): ''' Represent a callback to execute once on the ``IOLoop`` after a specified time interval passes. ''' _timeout: int def __init__(self, callback: Callback, timeout: int, *, callback_id: ID) -> None: ''' Args: callback (callable) : timeout (int) : id (ID) : ''' super().__init__(callback=callback, callback_id=callback_id) self._timeout = timeout @property def timeout(self) -> int: ''' The timeout (in milliseconds) that the callback should run after. ''' return self._timeout #----------------------------------------------------------------------------- # Dev API #-----------------------------------------------------------------------------
TimeoutCallback
python
coleifer__peewee
peewee.py
{ "start": 31422, "end": 33476 }
class ____(_HashableSource, Source): def __init__(self, name, query, recursive=False, columns=None, materialized=None): self._alias = name self._query = query self._recursive = recursive self._materialized = materialized if columns is not None: columns = [Entity(c) if isinstance(c, basestring) else c for c in columns] self._columns = columns query._cte_list = () super(CTE, self).__init__(alias=name) def select_from(self, *columns): if not columns: raise ValueError('select_from() must specify one or more columns ' 'from the CTE to select.') query = (Select((self,), columns) .with_cte(self) .bind(self._query._database)) try: query = query.objects(self._query.model) except AttributeError: pass return query def _get_hash(self): return hash((self.__class__, self._alias, id(self._query))) def union_all(self, rhs): clone = self._query.clone() return CTE(self._alias, clone + rhs, self._recursive, self._columns) __add__ = union_all def union(self, rhs): clone = self._query.clone() return CTE(self._alias, clone | rhs, self._recursive, self._columns) __or__ = union def __sql__(self, ctx): if ctx.scope != SCOPE_CTE: return ctx.sql(Entity(self._alias)) with ctx.push_alias(): ctx.alias_manager[self] = self._alias ctx.sql(Entity(self._alias)) if self._columns: ctx.literal(' ').sql(EnclosedNodeList(self._columns)) ctx.literal(' AS ') if self._materialized: ctx.literal('MATERIALIZED ') elif self._materialized is False: ctx.literal('NOT MATERIALIZED ') with ctx.scope_normal(parentheses=True): ctx.sql(self._query) return ctx
CTE
python
tensorflow__tensorflow
tensorflow/python/data/ops/readers.py
{ "start": 12148, "end": 14741 }
class ____(dataset_ops.UnaryDataset): """A `Dataset` that maps a function over its input and flattens the result.""" def __init__(self, input_dataset, map_func, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements, name=None): """See `tf.data.experimental.parallel_interleave()` for details.""" self._input_dataset = input_dataset self._map_func = structured_function.StructuredFunctionWrapper( map_func, self._transformation_name(), dataset=input_dataset) if not isinstance(self._map_func.output_structure, dataset_ops.DatasetSpec): raise TypeError( "The `map_func` argument must return a `Dataset` object. Got " f"{_get_type(self._map_func.output_structure)!r}.") self._element_spec = self._map_func.output_structure._element_spec # pylint: disable=protected-access self._cycle_length = ops.convert_to_tensor( cycle_length, dtype=dtypes.int64, name="cycle_length") self._block_length = ops.convert_to_tensor( block_length, dtype=dtypes.int64, name="block_length") self._buffer_output_elements = convert.optional_param_to_tensor( "buffer_output_elements", buffer_output_elements, argument_default=2 * block_length) self._prefetch_input_elements = convert.optional_param_to_tensor( "prefetch_input_elements", prefetch_input_elements, argument_default=2 * cycle_length) if sloppy is None: self._deterministic = "default" elif sloppy: self._deterministic = "false" else: self._deterministic = "true" self._name = name variant_tensor = ged_ops.legacy_parallel_interleave_dataset_v2( self._input_dataset._variant_tensor, # pylint: disable=protected-access self._map_func.function.captured_inputs, self._cycle_length, self._block_length, self._buffer_output_elements, self._prefetch_input_elements, f=self._map_func.function, deterministic=self._deterministic, **self._common_args) super(ParallelInterleaveDataset, self).__init__(input_dataset, variant_tensor) def _functions(self): return [self._map_func] @property def element_spec(self): return self._element_spec def _transformation_name(self): return "tf.data.experimental.parallel_interleave()" @tf_export("data.TFRecordDataset", v1=[])
ParallelInterleaveDataset
python
ApeWorX__ape
src/ape/plugins/project.py
{ "start": 852, "end": 1607 }
class ____(PluginType): """ A plugin for downloading packages and creating :class:`~ape.plugins.project.ProjectPlugin` implementations. """ @hookspec def dependencies(self) -> dict[str, type["DependencyAPI"]]: # type: ignore[empty-body] """ A hook that returns a :class:`~ape.api.projects.DependencyAPI` mapped to its ``ape-config.yaml`` file dependencies special key. For example, when configuring GitHub dependencies, you set the ``github`` key in the ``dependencies:`` block of your ``ape-config.yaml`` file and it will automatically use this ``DependencyAPI`` implementation. Returns: type[:class:`~ape.api.projects.DependencyAPI`] """
DependencyPlugin
python
pyca__cryptography
tests/hazmat/primitives/test_hash_vectors.py
{ "start": 1212, "end": 1560 }
class ____: test_sha256 = generate_hash_test( load_hash_vectors, os.path.join("hashes", "SHA2"), ["SHA256LongMsg.rsp", "SHA256ShortMsg.rsp"], hashes.SHA256(), ) @pytest.mark.supported( only_if=lambda backend: backend.hash_supported(hashes.SHA384()), skip_message="Does not support SHA384", )
TestSHA256
python
getsentry__sentry
src/sentry_plugins/pagerduty/plugin.py
{ "start": 329, "end": 4851 }
class ____(CorePluginMixin, NotificationPlugin): description = "Send alerts to PagerDuty." slug = "pagerduty" title = "PagerDuty" conf_key = slug conf_title = title required_field = "service_key" feature_descriptions = [ FeatureDescription( """ Manage incidents and outages by sending Sentry notifications to PagerDuty. """, IntegrationFeatures.INCIDENT_MANAGEMENT, ), FeatureDescription( """ Configure rule based PagerDuty alerts to automatically be triggered in a specific service - or in multiple services! """, IntegrationFeatures.ALERT_RULE, ), ] def error_message_from_json(self, data): message = data.get("message", "unknown error") errors = data.get("errors", None) if errors: return "{}: {}".format(message, " ".join(errors)) return message def is_configured(self, project) -> bool: return bool(self.get_option("service_key", project)) def get_config(self, project, user=None, initial=None, add_additional_fields: bool = False): service_key = self.get_option("service_key", project) secret_field = get_secret_field_config( service_key, "PagerDuty's Sentry service Integration Key", include_prefix=True ) secret_field.update({"name": "service_key", "label": "Service Key"}) return [ secret_field, { "name": "routes", "label": "Tag routing", "type": "textarea", "placeholder": "environment,production,KEY1\ndevice,Other,KEY2", "required": False, "help": ( "Route each event to a different PagerDuty service key based " "on the event's tags. Each line should contain a tag, " "value, and service key separated by commas. The first " "line that matches a tag's key and value will send to that " "integration key instead of the main service key above." ), }, ] def notify_users(self, group, event, triggering_rules) -> None: if not self.is_configured(group.project): return # TODO: This should eventually just be event.title in line with other plugins. # However, we should notify users first, since PD alert routing may be # based off the message field. We default to the title now because it's # possible for `event.message` to be "" and the description # is a required field for the PD payload. description = (event.message or event.title)[:1024] tags = dict(event.tags) details = { "event_id": event.event_id, "project": group.project.name, "release": event.get_tag("sentry:release"), "platform": event.platform, "culprit": event.culprit, "datetime": event.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"), "tags": tags, "url": group.get_absolute_url(params={"referrer": "pagerduty_plugin"}), } service_key = self.get_option("service_key", group.project) routes = self.get_option("routes", group.project) or "" for route in (r.strip() for r in routes.split("\n")): fields = [f.strip() for f in route.split(",")] if len(fields) != 3: continue tag_key, tag_value, route_service_key = fields if tag_key in tags and tags[tag_key] == tag_value: service_key = route_service_key break client = PagerDutyPluginClient(service_key=service_key) try: response = client.trigger_incident( description=description, event_type="trigger", incident_key=str(group.id), details=details, contexts=[ { "type": "link", "href": absolute_uri( group.get_absolute_url(params={"referrer": "pagerduty_plugin"}) ), "text": "View Sentry Issue Details", } ], ) assert response["status"] == "success" except Exception as e: self.raise_error(e)
PagerDutyPlugin
python
tornadoweb__tornado
tornado/test/asyncio_test.py
{ "start": 5584, "end": 7775 }
class ____(unittest.TestCase): # These tests are only relevant on windows, but they should pass anywhere. def setUp(self): # As a precaution, ensure that we've run an event loop at least once # so if it spins up any singleton threads they're already there. asyncio.run(self.dummy_tornado_coroutine()) self.orig_thread_count = threading.active_count() def assert_no_thread_leak(self): # For some reason we see transient failures here, but I haven't been able # to catch it to identify which thread is causing it. Whatever thread it # is, it appears to quickly clean up on its own, so just retry a few times. # At least some of the time the errant thread was running at the time we # captured self.orig_thread_count, so use inequalities. deadline = time.time() + 1 while time.time() < deadline: threads = list(threading.enumerate()) if len(threads) <= self.orig_thread_count: break time.sleep(0.1) self.assertLessEqual(len(threads), self.orig_thread_count, threads) async def dummy_tornado_coroutine(self): # Just access the IOLoop to initialize the selector thread. IOLoop.current() def test_asyncio_run(self): for i in range(10): # asyncio.run calls shutdown_asyncgens for us. asyncio.run(self.dummy_tornado_coroutine()) self.assert_no_thread_leak() def test_asyncio_manual(self): for i in range(10): loop = asyncio.new_event_loop() loop.run_until_complete(self.dummy_tornado_coroutine()) # Without this step, we'd leak the thread. loop.run_until_complete(loop.shutdown_asyncgens()) loop.close() self.assert_no_thread_leak() def test_tornado(self): for i in range(10): # The IOLoop interfaces are aware of the selector thread and # (synchronously) shut it down. loop = IOLoop(make_current=False) loop.run_sync(self.dummy_tornado_coroutine) loop.close() self.assert_no_thread_leak()
SelectorThreadLeakTest
python
doocs__leetcode
solution/1000-1099/1090.Largest Values From Labels/Solution.py
{ "start": 0, "end": 446 }
class ____: def largestValsFromLabels( self, values: List[int], labels: List[int], numWanted: int, useLimit: int ) -> int: ans = num = 0 cnt = Counter() for v, l in sorted(zip(values, labels), reverse=True): if cnt[l] < useLimit: cnt[l] += 1 num += 1 ans += v if num == numWanted: break return ans
Solution
python
wandb__wandb
wandb/integration/yolov8/yolov8.py
{ "start": 546, "end": 11371 }
class ____: """An internal YOLO model wrapper that tracks metrics, and logs models to Weights & Biases. Usage: ```python from wandb.integration.yolov8.yolov8 import WandbCallback model = YOLO("yolov8n.pt") wandb_logger = WandbCallback( model, ) for event, callback_fn in wandb_logger.callbacks.items(): model.add_callback(event, callback_fn) ``` """ def __init__( self, yolo: YOLO, run_name: Optional[str] = None, project: Optional[str] = None, tags: Optional[List[str]] = None, resume: Optional[str] = None, **kwargs: Optional[Any], ) -> None: """A utility class to manage wandb run and various callbacks for the ultralytics YOLOv8 framework. Args: yolo: A YOLOv8 model that's inherited from `:class:ultralytics.yolo.engine.model.YOLO` run_name, str: The name of the Weights & Biases run, defaults to an auto generated run_name if `trainer.args.name` is not defined. project, str: The name of the Weights & Biases project, defaults to `"YOLOv8"` if `trainer.args.project` is not defined. tags, List[str]: A list of tags to be added to the Weights & Biases run, defaults to `["YOLOv8"]`. resume, str: Whether to resume a previous run on Weights & Biases, defaults to `None`. **kwargs: Additional arguments to be passed to `wandb.init()`. """ self.yolo = yolo self.run_name = run_name self.project = project self.tags = tags self.resume = resume self.kwargs = kwargs def on_pretrain_routine_start(self, trainer: BaseTrainer) -> None: """Starts a new wandb run to track the training process and log to Weights & Biases. Args: trainer: A task trainer that's inherited from `:class:ultralytics.yolo.engine.trainer.BaseTrainer` that contains the model training and optimization routine. """ if wandb.run is None: self.run = wandb.init( name=self.run_name if self.run_name else trainer.args.name, project=self.project if self.project else trainer.args.project or "YOLOv8", tags=self.tags if self.tags else ["YOLOv8"], config=vars(trainer.args), resume=self.resume if self.resume else None, **self.kwargs, ) else: self.run = wandb.run assert self.run is not None self.run.define_metric("epoch", hidden=True) self.run.define_metric( "train/*", step_metric="epoch", step_sync=True, summary="min" ) self.run.define_metric( "val/*", step_metric="epoch", step_sync=True, summary="min" ) self.run.define_metric( "metrics/*", step_metric="epoch", step_sync=True, summary="max" ) self.run.define_metric( "lr/*", step_metric="epoch", step_sync=True, summary="last" ) with telemetry.context(run=wandb.run) as tel: tel.feature.ultralytics_yolov8 = True def on_pretrain_routine_end(self, trainer: BaseTrainer) -> None: assert self.run is not None self.run.summary.update( { "model/parameters": get_num_params(trainer.model), "model/GFLOPs": round(get_flops(trainer.model), 3), } ) def on_train_epoch_start(self, trainer: BaseTrainer) -> None: """On train epoch start we only log epoch number to the Weights & Biases run.""" # We log the epoch number here to commit the previous step, assert self.run is not None self.run.log({"epoch": trainer.epoch + 1}) def on_train_epoch_end(self, trainer: BaseTrainer) -> None: """On train epoch end we log all the metrics to the Weights & Biases run.""" assert self.run is not None self.run.log( { **trainer.metrics, **trainer.label_loss_items(trainer.tloss, prefix="train"), **trainer.lr, }, ) # Currently only the detection and segmentation trainers save images to the save_dir if not isinstance(trainer, ClassificationTrainer): self.run.log( { "train_batch_images": [ wandb.Image(str(image_path), caption=image_path.stem) for image_path in trainer.save_dir.glob("train_batch*.jpg") ] } ) def on_fit_epoch_end(self, trainer: BaseTrainer) -> None: """On fit epoch end we log all the best metrics and model detail to Weights & Biases run summary.""" assert self.run is not None if trainer.epoch == 0: speeds = [ trainer.validator.speed.get( key, ) for key in (1, "inference") ] speed = speeds[0] if speeds[0] else speeds[1] if speed: self.run.summary.update( { "model/speed(ms/img)": round(speed, 3), } ) if trainer.best_fitness == trainer.fitness: self.run.summary.update( { "best/epoch": trainer.epoch + 1, **{f"best/{key}": val for key, val in trainer.metrics.items()}, } ) def on_train_end(self, trainer: BaseTrainer) -> None: """On train end we log all the media, including plots, images and best model artifact to Weights & Biases.""" # Currently only the detection and segmentation trainers save images to the save_dir assert self.run is not None if not isinstance(trainer, ClassificationTrainer): assert self.run is not None self.run.log( { "plots": [ wandb.Image(str(image_path), caption=image_path.stem) for image_path in trainer.save_dir.glob("*.png") ], "val_images": [ wandb.Image(str(image_path), caption=image_path.stem) for image_path in trainer.validator.save_dir.glob("val*.jpg") ], }, ) if trainer.best.exists(): assert self.run is not None self.run.log_artifact( str(trainer.best), type="model", name=f"{self.run.name}_{trainer.args.task}.pt", aliases=["best", f"epoch_{trainer.epoch + 1}"], ) def on_model_save(self, trainer: BaseTrainer) -> None: """On model save we log the model as an artifact to Weights & Biases.""" assert self.run is not None self.run.log_artifact( str(trainer.last), type="model", name=f"{self.run.name}_{trainer.args.task}.pt", aliases=["last", f"epoch_{trainer.epoch + 1}"], ) def teardown(self, _trainer: BaseTrainer) -> None: """On teardown, we finish the Weights & Biases run and set it to None.""" assert self.run is not None self.run.finish() self.run = None @property def callbacks( self, ) -> Dict[str, Callable]: """Property contains all the relevant callbacks to add to the YOLO model for the Weights & Biases logging.""" return { "on_pretrain_routine_start": self.on_pretrain_routine_start, "on_pretrain_routine_end": self.on_pretrain_routine_end, "on_train_epoch_start": self.on_train_epoch_start, "on_train_epoch_end": self.on_train_epoch_end, "on_fit_epoch_end": self.on_fit_epoch_end, "on_train_end": self.on_train_end, "on_model_save": self.on_model_save, "teardown": self.teardown, } def add_callbacks( yolo: YOLO, run_name: Optional[str] = None, project: Optional[str] = None, tags: Optional[List[str]] = None, resume: Optional[str] = None, **kwargs: Optional[Any], ) -> YOLO: """A YOLO model wrapper that tracks metrics, and logs models to Weights & Biases. Args: yolo: A YOLOv8 model that's inherited from `:class:ultralytics.yolo.engine.model.YOLO` run_name, str: The name of the Weights & Biases run, defaults to an auto generated name if `trainer.args.name` is not defined. project, str: The name of the Weights & Biases project, defaults to `"YOLOv8"` if `trainer.args.project` is not defined. tags, List[str]: A list of tags to be added to the Weights & Biases run, defaults to `["YOLOv8"]`. resume, str: Whether to resume a previous run on Weights & Biases, defaults to `None`. **kwargs: Additional arguments to be passed to `wandb.init()`. Usage: ```python from wandb.integration.yolov8 import add_callbacks as add_wandb_callbacks model = YOLO("yolov8n.pt") add_wandb_callbacks( model, ) model.train( data="coco128.yaml", epochs=3, imgsz=640, ) ``` """ wandb.termwarn( """The wandb callback is currently in beta and is subject to change based on updates to `ultralytics yolov8`. The callback is tested and supported for ultralytics v8.0.43 and above. Please report any issues to https://github.com/wandb/wandb/issues with the tag `yolov8`. """, repeat=False, ) wandb.termwarn( """This wandb callback is no longer functional and would be deprecated in the near future. We recommend you to use the updated callback using `from wandb.integration.ultralytics import add_wandb_callback`. The updated callback is tested and supported for ultralytics 8.0.167 and above. You can refer to https://docs.wandb.ai/guides/integrations/ultralytics for the updated documentation. Please report any issues to https://github.com/wandb/wandb/issues with the tag `yolov8`. """, repeat=False, ) if RANK in [-1, 0]: wandb_logger = WandbCallback( yolo, run_name=run_name, project=project, tags=tags, resume=resume, **kwargs ) for event, callback_fn in wandb_logger.callbacks.items(): yolo.add_callback(event, callback_fn) return yolo else: wandb.termerror( "The RANK of the process to add the callbacks was neither 0 or -1." "No Weights & Biases callbacks were added to this instance of the YOLO model." ) return yolo
WandbCallback
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/solver27.py
{ "start": 632, "end": 791 }
class ____(Generic[T]): pass reveal_type(ClassA[int], expected_text="type[ClassA[int]]") def deco4() -> Callable[[type[T]], type[T]]: ... @deco4()
ClassA
python
Textualize__textual
src/textual/_node_list.py
{ "start": 417, "end": 531 }
class ____(Exception): """Raised when attempting to add a widget with an id that already exists."""
DuplicateIds
python
mlflow__mlflow
mlflow/entities/logged_model_parameter.py
{ "start": 116, "end": 1140 }
class ____(_MlflowObject): """ MLflow entity representing a parameter of a Model. """ def __init__(self, key, value): if "pyspark.ml" in sys.modules: import pyspark.ml.param if isinstance(key, pyspark.ml.param.Param): key = key.name value = str(value) self._key = key self._value = value @property def key(self): """String key corresponding to the parameter name.""" return self._key @property def value(self): """String value of the parameter.""" return self._value def __eq__(self, __o): if isinstance(__o, self.__class__): return self._key == __o._key return False def __hash__(self): return hash(self._key) def to_proto(self): return pb2.LoggedModelParameter(key=self._key, value=self._value) @classmethod def from_proto(cls, proto): return cls(key=proto.key, value=proto.value)
LoggedModelParameter
python
ApeWorX__ape
src/ape/api/projects.py
{ "start": 2869, "end": 4020 }
class ____(ProjectAPI): """ The default ProjectAPI implementation. """ CONFIG_FILE_NAME: str = "ape-config" EXTENSIONS: tuple[str, ...] = (".yaml", ".yml", ".json") @property def is_valid(self) -> bool: return True # If all else fails, treat as a default Ape project. @cached_property def config_file(self) -> Path: if self._using_pyproject_toml: return self._pyproject_toml # else: check for an ape-config file. for ext in self.EXTENSIONS: path = self.path / f"{self.CONFIG_FILE_NAME}{ext}" if path.is_file(): return path # Default: non-existing ape-config.yaml file. return self.path / f"{self.CONFIG_FILE_NAME}.yaml" @property def _pyproject_toml(self) -> Path: return self.path / "pyproject.toml" @property def _using_pyproject_toml(self) -> bool: return self._pyproject_toml.is_file() and "[tool.ape" in self._pyproject_toml.read_text() def extract_config(self, **overrides) -> ApeConfig: return ApeConfig.validate_file(self.config_file, **overrides)
ApeProject
python
Netflix__metaflow
metaflow/runtime.py
{ "start": 83914, "end": 84737 }
class ____(object): def __init__(self, name, maxsize): self.name = name self._maxsize = maxsize self._buffer = BytesIO() self._size = 0 self._eof = False def write(self, bytedata, system_msg=False): if system_msg: self._buffer.write(bytedata) elif not self._eof: if self._size + len(bytedata) < self._maxsize: self._buffer.write(bytedata) self._size += len(bytedata) else: msg = b"[TRUNCATED - MAXIMUM LOG FILE SIZE REACHED]\n" self._buffer.write(mflog_msg(msg)) self._eof = True def get_bytes(self): return self._buffer.getvalue() def get_buffer(self): self._buffer.seek(0) return self._buffer
TruncatedBuffer
python
ray-project__ray
ci/ray_ci/doc/autodoc.py
{ "start": 290, "end": 5284 }
class ____: """ Autodoc class represents the top level sphinx autodoc landing page and finds autodoc APIs that would be generated from sphinx from all sub-pages. """ def __init__(self, head_rst_file: str): """ Args: head_rst_file: The path to the landing page RST file that contains the list of children RSTs of the autodoc APIs. """ self._head_rst_file = head_rst_file self._autodoc_rsts = None self._apis = None def get_apis(self) -> List[API]: self.walk() return self._apis or [] def walk(self) -> None: if self._apis is not None: # already walk return rsts = self._get_autodoc_rsts() self._apis = [] for rst in rsts: self._apis.extend(self._parse_autodoc_rst(rst)) def _get_autodoc_rsts(self) -> Set[str]: """ Recursively parse the head_rst_file to find all the autodoc rsts """ if self._autodoc_rsts is not None: return self._autodoc_rsts self._autodoc_rsts = {self._head_rst_file} visit_current = {self._head_rst_file} while visit_current: visit_next = set() for rst in visit_current: for child_rst in self._get_autodoc_rsts_in_file(rst): if child_rst not in self._autodoc_rsts: self._autodoc_rsts.add(child_rst) visit_next.add(child_rst) visit_current = visit_next return self._autodoc_rsts def _get_autodoc_rsts_in_file(self, rst_file: str) -> Set[str]: """ Parse the list of rst declared in the head_rst_file, for example: .. include:: area_00.rst .. toctree:: :option area_01.rst area_02.rst """ if not os.path.exists(rst_file): return set() rsts = set() dir = os.path.dirname(rst_file) with open(rst_file, "r") as f: line = f.readline() while line: line = line.strip() # look for the include block if line.startswith(_SPHINX_INCLUDE_HEADER): rsts.add( os.path.join( dir, line.removeprefix(_SPHINX_INCLUDE_HEADER).strip() ) ) line = f.readline() continue # look for the toctree block if not line == _SPHINX_TOCTREE_HEADER: line = f.readline() continue # parse the toctree block line = f.readline() while line: if line.strip() and not re.match(r"\s", line): # end of toctree, \s means empty space, this line is checking if # the line is not empty and not starting with empty space break if line.strip().endswith(".rst"): rsts.add(os.path.join(dir, line.strip())) line = f.readline() return rsts def _parse_autodoc_rst(self, rst_file: str) -> List[API]: """ Parse the rst file to find the autodoc APIs. Example content of the rst file .. currentmodule:: mymodule .. autoclass:: myclass .. autosummary:: myclass.myfunc_01 myclass.myfunc_02 """ if not os.path.exists(rst_file): return [] apis = [] module = None with open(rst_file, "r") as f: line = f.readline() while line: # parse currentmodule block if line.startswith(_SPHINX_CURRENTMODULE_HEADER): module = line[len(_SPHINX_CURRENTMODULE_HEADER) :].strip() # parse autoclass block if line.startswith(_SPHINX_AUTOCLASS_HEADER): apis.append(API.from_autoclass(line, module)) # parse autosummary block if line.startswith(_SPHINX_AUTOSUMMARY_HEADER): doc = line line = f.readline() # collect lines until the end of the autosummary block while line: doc += line if line.strip() and not re.match(r"\s", line): # end of autosummary, \s means empty space, this line is # checking if the line is not empty and not starting with # empty space break line = f.readline() apis.extend(API.from_autosummary(doc, module)) continue line = f.readline() return [api for api in apis if api]
Autodoc
python
numba__numba
numba/cuda/compiler.py
{ "start": 2093, "end": 2387 }
class ____(CompileResult): @property def entry_point(self): return id(self) def cuda_compile_result(**entries): entries = sanitize_compile_result_entries(entries) return CUDACompileResult(**entries) @register_pass(mutates_CFG=True, analysis_only=False)
CUDACompileResult
python
pytorch__pytorch
torch/jit/_script.py
{ "start": 8982, "end": 11664 }
class ____(type): def __init__(cls, name, bases, attrs): # noqa: B902 # Aggregate all the ScriptMethods and constants from superclasses cls._methods: dict[str, Any] = {} cls._constants_set = set(getattr(cls, "__constants__", ())) for base in reversed(bases): for k, v in getattr(base, "_methods", {}).items(): cls._methods[k] = v base_constants: set = getattr(base, "_constants_set", set()) cls._constants_set = cls._constants_set.union(base_constants) # find all the script methods of the current class for k, v in sorted(attrs.items()): if isinstance(v, ScriptMethodStub): delattr(cls, k) cls._methods[v.original_method.__name__] = v if getattr(cls, "_disable_script_meta", False): # We leave built-in ScriptModule types alone, since this metaclass # is only for compiling user classes that inherit from # ScriptModule. super().__init__(name, bases, attrs) return original_init = getattr(cls, "__init__", lambda self: None) @functools.wraps(original_init) def init_then_script(self, *args, **kwargs): num_methods = len(cls._methods) original_init(self, *args, **kwargs) added_methods_in_init = len(cls._methods) > num_methods if type(self) is cls: def make_stubs(module): cls = type(module) if hasattr(cls, "_methods"): return [v for k, v in sorted(cls._methods.items())] else: return infer_methods_to_compile(module) self.__dict__["_actual_script_module"] = ( torch.jit._recursive.create_script_module( self, make_stubs, share_types=not added_methods_in_init ) ) # Delete the Python attributes that now shadow the ScriptModule # ones, so that __getattr__ and __setattr__ will properly find # the scripted versions. concrete_type = self._actual_script_module._concrete_type for name in concrete_type.get_attributes(): delattr(self, name) for name, _ in concrete_type.get_modules(): delattr(self, name) for name in ("_parameters", "_buffers", "_modules"): delattr(self, name) cls.__init__ = init_then_script # type: ignore[misc] super().__init__(name, bases, attrs)
ScriptMeta
python
zarr-developers__zarr-python
src/zarr/storage/_logging.py
{ "start": 566, "end": 7399 }
class ____(WrapperStore[T_Store]): """ Store that logs all calls to another wrapped store. Parameters ---------- store : Store Store to wrap log_level : str Log level log_handler : logging.Handler Log handler Attributes ---------- counter : dict Counter of number of times each method has been called """ counter: defaultdict[str, int] def __init__( self, store: T_Store, log_level: str = "DEBUG", log_handler: logging.Handler | None = None, ) -> None: super().__init__(store) self.counter = defaultdict(int) self.log_level = log_level self.log_handler = log_handler self._configure_logger(log_level, log_handler) def _configure_logger( self, log_level: str = "DEBUG", log_handler: logging.Handler | None = None ) -> None: self.log_level = log_level self.logger = logging.getLogger(f"LoggingStore({self._store})") self.logger.setLevel(log_level) if not self.logger.hasHandlers(): if not log_handler: log_handler = self._default_handler() # Add handler to logger self.logger.addHandler(log_handler) def _default_handler(self) -> logging.Handler: """Define a default log handler""" handler = logging.StreamHandler(stream=sys.stdout) handler.setLevel(self.log_level) handler.setFormatter( logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") ) return handler @contextmanager def log(self, hint: Any = "") -> Generator[None, None, None]: """Context manager to log method calls Each call to the wrapped store is logged to the configured logger and added to the counter dict. """ method = inspect.stack()[2].function op = f"{type(self._store).__name__}.{method}" if hint: op = f"{op}({hint})" self.logger.info(" Calling %s", op) start_time = time.time() try: self.counter[method] += 1 yield finally: end_time = time.time() self.logger.info("Finished %s [%.2f s]", op, end_time - start_time) @classmethod async def open(cls: type[Self], store_cls: type[T_Store], *args: Any, **kwargs: Any) -> Self: log_level = kwargs.pop("log_level", "DEBUG") log_handler = kwargs.pop("log_handler", None) store = store_cls(*args, **kwargs) await store._open() return cls(store=store, log_level=log_level, log_handler=log_handler) @property def supports_writes(self) -> bool: with self.log(): return self._store.supports_writes @property def supports_deletes(self) -> bool: with self.log(): return self._store.supports_deletes @property def supports_listing(self) -> bool: with self.log(): return self._store.supports_listing @property def read_only(self) -> bool: with self.log(): return self._store.read_only @property def _is_open(self) -> bool: with self.log(): return self._store._is_open @_is_open.setter def _is_open(self, value: bool) -> None: raise NotImplementedError("LoggingStore must be opened via the `_open` method") async def _open(self) -> None: with self.log(): return await self._store._open() async def _ensure_open(self) -> None: with self.log(): return await self._store._ensure_open() async def is_empty(self, prefix: str = "") -> bool: # docstring inherited with self.log(): return await self._store.is_empty(prefix=prefix) async def clear(self) -> None: # docstring inherited with self.log(): return await self._store.clear() def __str__(self) -> str: return f"logging-{self._store}" def __repr__(self) -> str: return f"LoggingStore({self._store.__class__.__name__}, '{self._store}')" def __eq__(self, other: object) -> bool: with self.log(other): return type(self) is type(other) and self._store.__eq__(other._store) # type: ignore[attr-defined] async def get( self, key: str, prototype: BufferPrototype, byte_range: ByteRequest | None = None, ) -> Buffer | None: # docstring inherited with self.log(key): return await self._store.get(key=key, prototype=prototype, byte_range=byte_range) async def get_partial_values( self, prototype: BufferPrototype, key_ranges: Iterable[tuple[str, ByteRequest | None]], ) -> list[Buffer | None]: # docstring inherited keys = ",".join([k[0] for k in key_ranges]) with self.log(keys): return await self._store.get_partial_values(prototype=prototype, key_ranges=key_ranges) async def exists(self, key: str) -> bool: # docstring inherited with self.log(key): return await self._store.exists(key) async def set(self, key: str, value: Buffer) -> None: # docstring inherited with self.log(key): return await self._store.set(key=key, value=value) async def set_if_not_exists(self, key: str, value: Buffer) -> None: # docstring inherited with self.log(key): return await self._store.set_if_not_exists(key=key, value=value) async def delete(self, key: str) -> None: # docstring inherited with self.log(key): return await self._store.delete(key=key) async def list(self) -> AsyncGenerator[str, None]: # docstring inherited with self.log(): async for key in self._store.list(): yield key async def list_prefix(self, prefix: str) -> AsyncGenerator[str, None]: # docstring inherited with self.log(prefix): async for key in self._store.list_prefix(prefix=prefix): yield key async def list_dir(self, prefix: str) -> AsyncGenerator[str, None]: # docstring inherited with self.log(prefix): async for key in self._store.list_dir(prefix=prefix): yield key async def delete_dir(self, prefix: str) -> None: # docstring inherited with self.log(prefix): await self._store.delete_dir(prefix=prefix) async def getsize(self, key: str) -> int: with self.log(key): return await self._store.getsize(key) async def getsize_prefix(self, prefix: str) -> int: with self.log(prefix): return await self._store.getsize_prefix(prefix)
LoggingStore
python
realpython__materials
python-argparse/custom_action.py
{ "start": 18, "end": 388 }
class ____(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): print(f"Storing {values} in the {option_string} option...") setattr(namespace, self.dest, values) arg_parser = argparse.ArgumentParser() arg_parser.add_argument("-n", "--name", action=VerboseStore) args = arg_parser.parse_args() print(args)
VerboseStore
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 171493, "end": 172137 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ( "environment_id", "wait_timer", "reviewers", "client_mutation_id", ) environment_id = sgqlc.types.Field( sgqlc.types.non_null(ID), graphql_name="environmentId" ) wait_timer = sgqlc.types.Field(Int, graphql_name="waitTimer") reviewers = sgqlc.types.Field( sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="reviewers" ) client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
UpdateEnvironmentInput
python
mlflow__mlflow
tests/genai/judges/test_alignment_optimizer.py
{ "start": 994, "end": 2250 }
class ____(AlignmentOptimizer): """Mock AlignmentOptimizer implementation for testing.""" def align(self, judge: Judge, traces: list[Trace]) -> Judge: # Return a new judge with modified name to show it was processed return MockJudge(name=f"{judge.name}_optimized") def test_alignment_optimizer_abstract(): """Test that AlignmentOptimizer cannot be instantiated directly.""" with pytest.raises(TypeError, match="Can't instantiate abstract class AlignmentOptimizer"): AlignmentOptimizer() def test_alignment_optimizer_align_method_required(): """Test that concrete classes must implement align method.""" class IncompleteOptimizer(AlignmentOptimizer): pass with pytest.raises(TypeError, match="Can't instantiate abstract class IncompleteOptimizer"): IncompleteOptimizer() def test_concrete_optimizer_implementation(): """Test that concrete optimizer can be instantiated and used.""" optimizer = MockOptimizer() judge = MockJudge(name="test_judge") traces = [] # Empty traces for testing # Should not raise any errors result = optimizer.align(judge, traces) assert isinstance(result, Judge) assert result.name == "test_judge_optimized"
MockOptimizer
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 218010, "end": 219299 }
class ____(VegaLiteSchema): """ CompositionConfig schema wrapper. Parameters ---------- columns : float The number of columns to include in the view composition layout. **Default value**: ``undefined`` -- An infinite number of columns (a single row) will be assumed. This is equivalent to ``hconcat`` (for ``concat``) and to using the ``column`` channel (for ``facet`` and ``repeat``). **Note**: 1) This property is only for: * the general (wrappable) ``concat`` operator (not ``hconcat``/``vconcat``) * the ``facet`` and ``repeat`` operator with one field/repetition definition (without row/column nesting) 2) Setting the ``columns`` to ``1`` is equivalent to ``vconcat`` (for ``concat``) and to using the ``row`` channel (for ``facet`` and ``repeat``). spacing : float The default spacing in pixels between composed sub-views. **Default value**: ``20`` """ _schema = {"$ref": "#/definitions/CompositionConfig"} def __init__( self, columns: Optional[float] = Undefined, spacing: Optional[float] = Undefined, **kwds, ): super().__init__(columns=columns, spacing=spacing, **kwds)
CompositionConfig
python
sqlalchemy__sqlalchemy
test/orm/test_instrumentation.py
{ "start": 10236, "end": 11583 }
class ____(fixtures.MappedTest): def fixture(self): return Table( "t", MetaData(), Column("id", Integer, primary_key=True), Column("type", Integer), Column("x", Integer), Column("y", Integer), ) def test_partially_mapped_inheritance(self): class A: pass class B(A): pass class C(B): def __init__(self, x): pass self.mapper_registry.map_imperatively(A, self.fixture()) # B is not mapped in the current implementation assert_raises(sa.orm.exc.UnmappedClassError, class_mapper, B) # C is not mapped in the current implementation assert_raises(sa.orm.exc.UnmappedClassError, class_mapper, C) def test_del_warning(self): class A: def __del__(self): pass assert_warns_message( sa.exc.SAWarning, r"__del__\(\) method on class " r"<class '.*\.A'> will cause " r"unreachable cycles and memory leaks, as SQLAlchemy " r"instrumentation often creates reference cycles. " r"Please remove this method.", self.mapper_registry.map_imperatively, A, self.fixture(), )
MapperInitTest
python
google__flatbuffers
tests/MyGame/Example/NestedUnion/Any.py
{ "start": 96, "end": 729 }
class ____(object): NONE = 0 Vec3 = 1 TestSimpleTableWithEnum = 2 def AnyCreator(unionType, table): from flatbuffers.table import Table if not isinstance(table, Table): return None if unionType == Any.Vec3: import MyGame.Example.NestedUnion.Vec3 return MyGame.Example.NestedUnion.Vec3.Vec3T.InitFromBuf(table.Bytes, table.Pos) if unionType == Any.TestSimpleTableWithEnum: import MyGame.Example.NestedUnion.TestSimpleTableWithEnum return MyGame.Example.NestedUnion.TestSimpleTableWithEnum.TestSimpleTableWithEnumT.InitFromBuf(table.Bytes, table.Pos) return None
Any
python
pytorch__pytorch
test/onnx/test_utility_funs.py
{ "start": 2967, "end": 71094 }
class ____(_BaseTestCase): opset_version = None def test_is_in_onnx_export(self): test_self = self class MyModule(torch.nn.Module): def forward(self, x): test_self.assertTrue(torch.onnx.is_in_onnx_export()) raise ValueError return x + 1 x = torch.randn(3, 4) f = io.BytesIO() try: torch.onnx.export( MyModule(), x, f, opset_version=self.opset_version, dynamo=False ) except ValueError: self.assertFalse(torch.onnx.is_in_onnx_export()) def test_validate_dynamic_axes_invalid_input_output_name(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") utils._validate_dynamic_axes( {"input1": {}, "output": {}, "invalid_name1": {}, "invalid_name2": {}}, None, ["input1", "input2"], ["output"], ) messages = [str(warning.message) for warning in w] self.assertIn( "Provided key invalid_name1 for dynamic axes is not a valid input/output name", messages, ) self.assertIn( "Provided key invalid_name2 for dynamic axes is not a valid input/output name", messages, ) self.assertEqual(len(messages), 2) @skipIfUnsupportedMinOpsetVersion(11) def test_split_to_slice(self): class SplitModule(torch.nn.Module): def forward(self, x, y, t): splits = (x.size(1), y.size(1)) out, out2 = torch.split(t, splits, dim=1) return out, out2 GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX x = torch.randn(2, 3) y = torch.randn(2, 4) t = torch.randn(2, 7) graph, _, _ = self._model_to_graph( SplitModule(), (x, y, t), input_names=["x", "y", "t"], dynamic_axes={"x": [0, 1], "y": [0, 1], "t": [0, 1]}, ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::SplitToSequence") def test_constant_fold_transpose(self): class TransposeModule(torch.nn.Module): def forward(self, x): a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) b = torch.transpose(a, 1, 0) return b + x GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX x = torch.ones(3, 2) graph, _, __ = self._model_to_graph( TransposeModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Transpose") self.assertNotEqual(node.kind(), "onnx::Cast") self.assertEqual(len(list(graph.nodes())), 2) @skipIfUnsupportedMaxOpsetVersion(17) def test_constant_fold_reduceL2(self): class ReduceModule(torch.nn.Module): def forward(self, x): a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) b = torch.norm(a, p=2, dim=-2, keepdim=False) return b + x GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX x = torch.ones(2, 3) graph, _, __ = self._model_to_graph( ReduceModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::ReduceL2") @skipIfUnsupportedMaxOpsetVersion(17) def test_constant_fold_reduceL1(self): class NormModule(torch.nn.Module): def forward(self, x): a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) b = torch.norm(a, p=1, dim=-2) return b + x GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX x = torch.ones(2, 3) graph, _, __ = self._model_to_graph( NormModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::ReduceL1") def test_constant_fold_slice(self): class NarrowModule(torch.nn.Module): def forward(self, x): a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) b = torch.narrow(a, 0, 0, 1) return b + x GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX x = torch.ones(1, 3) graph, _, __ = self._model_to_graph( NarrowModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Slice") self.assertNotEqual(node.kind(), "onnx::Cast") self.assertEqual(len(list(graph.nodes())), 2) def test_constant_fold_slice_index_exceeds_dim(self): class SliceIndexExceedsDimModule(torch.nn.Module): def forward(self, x): a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) b = a[1:10] # index exceeds dimension return b + x GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX x = torch.ones(1, 3) graph, _, __ = self._model_to_graph( SliceIndexExceedsDimModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]}, ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Slice") self.assertNotEqual(node.kind(), "onnx::Cast") self.assertEqual(len(list(graph.nodes())), 2) def test_constant_fold_slice_negative_index(self): class SliceNegativeIndexModule(torch.nn.Module): def forward(self, x): a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) b = a[0:-1] # index relative to the end c = torch.select(a, dim=-1, index=-2) d = torch.select(a, dim=1, index=0) return b + x, c + d GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX x = torch.ones(1, 3) graph, _, __ = self._model_to_graph( SliceNegativeIndexModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]}, ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Slice") self.assertNotEqual(node.kind(), "onnx::Cast") def test_constant_fold_gather(self): class GatherModule(torch.nn.Module): def forward(self, x): a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) b = torch.select(a, dim=1, index=-2) c = torch.index_select(a, dim=-2, index=torch.tensor([0, 1])) return b + 1, c + x GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX x = torch.ones(1, 3) model = GatherModule() model(x) graph, _, __ = self._model_to_graph( GatherModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Gather") def test_constant_fold_unsqueeze(self): class UnsqueezeModule(torch.nn.Module): def forward(self, x): a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) b = torch.unsqueeze(a, -2) return b + x GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX x = torch.ones(1, 2, 3) graph, _, __ = self._model_to_graph( UnsqueezeModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1, 2]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Unsqueeze") self.assertNotEqual(node.kind(), "onnx::Cast") self.assertEqual(len(list(graph.nodes())), 2) def test_constant_fold_unsqueeze_multi_axies(self): class PReluModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.prelu = torch.nn.PReLU() def forward(self, x): a = torch.randn(2, 3, 4, 5, 8, 7) return self.prelu(x) + a GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX x = torch.randn(2, 3, 4, 5, 8, 7) graph, _, __ = self._model_to_graph( PReluModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2, 3, 4, 5]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Unsqueeze") self.assertNotEqual(node.kind(), "onnx::Cast") self.assertEqual(len(list(graph.nodes())), 5) def test_constant_fold_squeeze_without_axes(self): class SqueezeModule(torch.nn.Module): def forward(self, x): a = torch.tensor([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]) return torch.squeeze(a) + x + torch.squeeze(a) GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX x = torch.ones(2, 3) graph, _, __ = self._model_to_graph( SqueezeModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Squeeze") self.assertNotEqual(node.kind(), "onnx::Cast") self.assertEqual(len(list(graph.nodes())), 4) def test_constant_fold_squeeze_with_axes(self): class SqueezeAxesModule(torch.nn.Module): def forward(self, x): a = torch.tensor([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]) return torch.squeeze(a, dim=-3) + x GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX x = torch.ones(2, 3) graph, _, __ = self._model_to_graph( SqueezeAxesModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Squeeze") self.assertNotEqual(node.kind(), "onnx::Cast") self.assertEqual(len(list(graph.nodes())), 2) def test_constant_fold_concat(self): class ConcatModule(torch.nn.Module): def forward(self, x): # Why did I insert a Cast here? There appears to be intentional # behavior in ONNX constant folding where constant tensors which # are not attached to any known to be foldable onnx # operations don't get extracted into the initializer graph. So # without these casts, we will actually fail to pull out one of # the constants, thus failing constant folding. I think the # test is wrong but I don't have time to write a more correct # test (I think the right way to go about the test is to setup # a predicate for what invariant graphs should hold after # constant folding, and then verify this predicate holds. # I think the asserts below are an attempt at this predicate, # but it is not right!) # # More commentary at # https://github.com/pytorch/pytorch/pull/18698/files#r340107552 a = torch.tensor([[1.0, 2.0, 3.0]]).to(torch.float) b = torch.tensor([[4.0, 5.0, 6.0]]).to(torch.float) c = torch.cat((a, b), 0) d = b + c return x + d GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX x = torch.ones(2, 3) graph, _, __ = self._model_to_graph( ConcatModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Concat") self.assertNotEqual(node.kind(), "onnx::Cast") self.assertEqual(len(list(graph.nodes())), 2) def test_constant_fold_lstm(self): class GruNet(torch.nn.Module): def __init__(self) -> None: super().__init__() self.mygru = torch.nn.GRU(7, 3, 1, bidirectional=False) def forward(self, input, initial_state): return self.mygru(input, initial_state) GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX input = torch.randn(5, 3, 7) h0 = torch.randn(1, 3, 3) graph, _, __ = self._model_to_graph( GruNet(), (input, h0), input_names=["input", "h0"], dynamic_axes={"input": [0, 1, 2], "h0": [0, 1, 2]}, ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Slice") self.assertNotEqual(node.kind(), "onnx::Concat") self.assertNotEqual(node.kind(), "onnx::Unsqueeze") if self.opset_version <= 12: self.assertEqual(len(list(graph.nodes())), 3) else: # Unsqueeze op parameter "axes" as an input instead of as an attribute when opset version >= 13 self.assertEqual(len(list(graph.nodes())), 4) def test_constant_fold_transpose_matmul(self): class MatMulNet(torch.nn.Module): def __init__(self) -> None: super().__init__() self.B = torch.nn.Parameter(torch.ones(5, 3)) def forward(self, A): return torch.matmul(A, torch.transpose(self.B, -1, -2)) GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX A = torch.randn(2, 3) graph, _, __ = self._model_to_graph( MatMulNet(), (A,), input_names=["A"], dynamic_axes={"A": [0, 1]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Transpose") self.assertEqual(len(list(graph.nodes())), 1) def test_constant_fold_reshape(self): class ReshapeModule(torch.nn.Module): def __init__( self, ): super().__init__() self.weight = torch.nn.Buffer(torch.ones(5)) def forward(self, x): b = self.weight.reshape(1, -1, 1, 1) return x * b GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX x = torch.randn(4, 5) graph, _, __ = self._model_to_graph( ReshapeModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Reshape") self.assertEqual(len(list(graph.nodes())), 1) def test_constant_fold_div(self): class Module(torch.nn.Module): def __init__( self, ): super().__init__() self.weight = torch.nn.Buffer(torch.ones(5)) def forward(self, x): div = self.weight.div(torch.tensor([1, 2, 3, 4, 5])) return div * x x = torch.randn(2, 5) GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX graph, _, __ = self._model_to_graph( Module(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Div") self.assertEqual(len(list(graph.nodes())), 1) def test_constant_fold_mul(self): class Module(torch.nn.Module): def __init__( self, ): super().__init__() self.weight = torch.nn.Buffer(torch.ones(5)) def forward(self, x): mul = self.weight.mul(torch.tensor([1, 2, 3, 4, 5])) return mul / x x = torch.randn(2, 5) GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX graph, _, __ = self._model_to_graph( Module(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Mul") self.assertEqual(len(list(graph.nodes())), 1) def test_constant_fold_add(self): class Module(torch.nn.Module): def __init__( self, ): super().__init__() self.weight = torch.nn.Buffer(torch.ones(5)) def forward(self, x): add = self.weight + torch.tensor([1, 2, 3, 4, 5]) return add - x x = torch.randn(2, 5) GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX graph, params_dict, __ = self._model_to_graph( Module(), (x,), do_constant_folding=True, operator_export_type=OperatorExportTypes.ONNX, input_names=["x"], dynamic_axes={"x": [0, 1]}, ) for node in graph.nodes(): self.assertTrue(node.kind() != "onnx::Add") self.assertEqual(len(list(graph.nodes())), 1) params = list(params_dict.values()) self.assertEqual(len(params), 1) weight = params[0] self.assertEqual(weight, torch.tensor([2.0, 3.0, 4.0, 5.0, 6.0])) def test_constant_fold_sub(self): class Module(torch.nn.Module): def __init__( self, ): super().__init__() self.weight = torch.nn.Buffer(torch.ones(5)) def forward(self, x): sub = self.weight - torch.tensor([1, 2, 3, 4, 5]) return sub + x x = torch.randn(2, 5) GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX graph, params_dict, __ = self._model_to_graph( Module(), (x,), do_constant_folding=True, operator_export_type=OperatorExportTypes.ONNX, input_names=["x"], dynamic_axes={"x": [0, 1]}, ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Sub") self.assertEqual(len(list(graph.nodes())), 1) params = list(params_dict.values()) self.assertEqual(len(params), 1) weight = params[0] self.assertEqual(weight, torch.tensor([0.0, -1.0, -2.0, -3.0, -4.0])) def test_constant_fold_sqrt(self): class Module(torch.nn.Module): def __init__( self, ): super().__init__() self.weight = torch.nn.Buffer(torch.ones(5)) def forward(self, x): sqrt = torch.sqrt(self.weight) return sqrt / x x = torch.randn(2, 5) GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX graph, _, __ = self._model_to_graph( Module(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Sqrt") self.assertEqual(len(list(graph.nodes())), 1) def test_constant_fold_shape(self): class ShapeModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.weight = torch.nn.Buffer(torch.ones(5)) def forward(self, x): shape = self.weight.shape[0] return x + shape x = torch.randn(2, 5) GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX graph, _, __ = self._model_to_graph( ShapeModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]} ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::Shape") self.assertEqual(len(list(graph.nodes())), 2) def test_constant_fold_upsample_scale_fold_as_constant(self): # upsample scale is a constant, not a model parameter, # therefore should not be added as initializer after constant folding. model = torch.nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True) x = torch.randn(1, 32, 224, 224) f = io.BytesIO() torch.onnx.export(model, x, f, dynamo=False) onnx_model = onnx.load(io.BytesIO(f.getvalue())) self.assertEqual(len(onnx_model.graph.initializer), 0) def test_verbose(self): class MyModule(torch.nn.Module): def forward(self, input): return torch.exp(input) x = torch.randn(3, 4) def is_model_stripped(f, verbose=None): if verbose is None: torch.onnx.export( MyModule(), x, f, opset_version=self.opset_version, dynamo=False ) else: torch.onnx.export( MyModule(), x, f, verbose=verbose, opset_version=self.opset_version, dynamo=False, ) model = onnx.load(io.BytesIO(f.getvalue())) model_strip = copy.copy(model) onnx.helper.strip_doc_string(model_strip) return model == model_strip # test verbose=False (default) self.assertTrue(is_model_stripped(io.BytesIO())) # test verbose=True self.assertFalse(is_model_stripped(io.BytesIO(), True)) # NB: remove this test once DataParallel can be correctly handled def test_error_on_data_parallel(self): model = torch.nn.DataParallel(torch.nn.ReflectionPad2d((1, 2, 3, 4))) x = torch.randn(1, 2, 3, 4) f = io.BytesIO() with self.assertRaisesRegex( ValueError, "torch.nn.DataParallel is not supported by ONNX " "exporter, please use 'attribute' module to " "unwrap model from torch.nn.DataParallel. Try ", ): torch.onnx.export( model, x, f, opset_version=self.opset_version, dynamo=False ) @skipIfUnsupportedMinOpsetVersion(11) def test_sequence_dim(self): class Module(torch.nn.Module): def forward(self, x, y): return [x, y] model = Module() # Export with scripting to keep output as Sequence type. # Tracing unpacks the list. script_model = torch.jit.script(model) x = torch.randn(2, 3) # Case 1: dynamic axis f = io.BytesIO() y = torch.randn(2, 3) torch.onnx.export( script_model, (x, y), f, opset_version=self.opset_version, input_names=["x", "y"], dynamic_axes={"y": [1]}, dynamo=False, ) onnx_model = onnx.load(io.BytesIO(f.getvalue())) loop_output_value_info_proto = onnx_model.graph.output[0] ref_value_info_proto = onnx.helper.make_tensor_sequence_value_info( loop_output_value_info_proto.name, 1, [2, None] ) self.assertEqual(loop_output_value_info_proto, ref_value_info_proto) # Case 2: no dynamic axes. f = io.BytesIO() y = torch.randn(2, 3) torch.onnx.export( script_model, (x, y), f, opset_version=self.opset_version, dynamo=False ) onnx_model = onnx.load(io.BytesIO(f.getvalue())) loop_output_value_info_proto = onnx_model.graph.output[0] ref_value_info_proto = onnx.helper.make_tensor_sequence_value_info( loop_output_value_info_proto.name, 1, [2, 3] ) self.assertEqual(loop_output_value_info_proto, ref_value_info_proto) def test_export_mode(self): class MyModule(torch.nn.Module): def forward(self, x): y = x + 1 return y model = MyModule() x = torch.randn(10, 3, 128, 128) f = io.BytesIO() # set mode to in inference mode and export in training mode model.eval() old_state = model.training torch.onnx.export( model, (x,), f, opset_version=self.opset_version, training=torch.onnx.TrainingMode.TRAINING, dynamo=False, ) # verify that the model state is preserved self.assertEqual(model.training, old_state) # set mode to training mode and export in inference mode model.train() old_state = model.training torch.onnx.export( model, (x,), f, opset_version=self.opset_version, training=torch.onnx.TrainingMode.EVAL, dynamo=False, ) # verify that the model state is preserved self.assertEqual(model.training, old_state) def test_export_does_not_fail_on_frozen_scripted_module(self): class Inner(torch.nn.Module): def forward(self, x): if x > 0: return x else: return x * x class Outer(torch.nn.Module): def __init__(self) -> None: super().__init__() self.inner = torch.jit.script(Inner()) def forward(self, x): return self.inner(x) x = torch.zeros(1) # Freezing is only implemented in eval mode. So we need to call eval() outer_module = Outer().eval() module = torch.jit.trace_module(outer_module, {"forward": (x)}) # jit.freeze removes the training attribute in the module module = torch.jit.freeze(module) torch.onnx.export( module, (x,), io.BytesIO(), opset_version=self.opset_version, dynamo=False ) @skipIfUnsupportedMinOpsetVersion(15) def test_local_function(self): class N(torch.nn.Module): def __init__(self, prob): super().__init__() self.dropout = torch.nn.Dropout(prob) def forward(self, x): return self.dropout(x) class M(torch.nn.Module): def __init__(self, num_layers): super().__init__() self.num_layers = num_layers self.lns = torch.nn.ModuleList( [torch.nn.LayerNorm(3, eps=i) for i in range(num_layers)] ) self.celu1 = torch.nn.CELU(1.0) self.celu2 = torch.nn.CELU(2.0) self.dropout = N(0.5) def forward(self, x, y, z): res1 = self.celu1(x) res2 = self.celu2(y) for ln in self.lns: z = ln(z) return res1 + res2, self.dropout(z) x = torch.randn(2, 3) y = torch.randn(2, 3) z = torch.randn(2, 3) # Export specified modules. Test against specifying modules that won't # exist in the exported model. # Model export in inference mode will remove dropout node, # thus the dropout module no longer exist in graph. f = io.BytesIO() torch.onnx.export( M(3), (x, y, z), f, opset_version=self.opset_version, export_modules_as_functions={ torch.nn.CELU, torch.nn.Dropout, torch.nn.LayerNorm, }, dynamo=False, ) onnx_model = onnx.load(io.BytesIO(f.getvalue())) # Check function definition funcs = onnx_model.functions celu_funcs = [f for f in funcs if f.name == "CELU"] self.assertEqual(len(celu_funcs), 1) self.assertEqual(celu_funcs[0].domain, "torch.nn.modules.activation") self.assertEqual(len(celu_funcs[0].attribute), 3) ln_funcs = [f for f in funcs if f.name == "LayerNorm"] self.assertEqual(len(ln_funcs), 1) self.assertEqual(ln_funcs[0].domain, "torch.nn.modules.normalization") self.assertEqual(len(ln_funcs[0].attribute), 3) # Check local function nodes nodes = onnx_model.graph.node celu_ns = [n for n in nodes if n.op_type == "CELU"] ln_ns = [n for n in nodes if n.op_type == "LayerNorm"] self.assertEqual(len(celu_ns), 2) self.assertEqual(celu_ns[0].domain, "torch.nn.modules.activation") self.assertEqual(len(celu_ns[0].attribute), 3) self.assertEqual(len(ln_ns), 3) self.assertEqual(ln_ns[0].domain, "torch.nn.modules.normalization") self.assertEqual(len(ln_ns[0].attribute), 3) # Export specified modules. f = io.BytesIO() torch.onnx.export( M(3), (x, y, z), f, opset_version=self.opset_version, export_modules_as_functions={torch.nn.CELU}, dynamo=False, ) onnx_model = onnx.load(io.BytesIO(f.getvalue())) funcs = onnx_model.functions self.assertEqual(len(funcs), 1) self.assertEqual(funcs[0].name, "CELU") # Export with empty specified modules. Normal export. f = io.BytesIO() torch.onnx.export( M(3), (x, y, z), f, opset_version=self.opset_version, export_modules_as_functions=set(), dynamo=False, ) onnx_model = onnx.load(io.BytesIO(f.getvalue())) funcs = onnx_model.functions self.assertEqual(len(funcs), 0) # Export all modules. Should contain {M, CELU, LayerNorm}. f = io.BytesIO() torch.onnx.export( M(3), (x, y, z), f, opset_version=self.opset_version, export_modules_as_functions=True, dynamo=False, ) onnx_model = onnx.load(io.BytesIO(f.getvalue())) funcs = onnx_model.functions self.assertEqual(len(funcs), 3) @skipIfUnsupportedMinOpsetVersion(15) def test_local_function_overloads(self): class NWithOverloads(torch.nn.Module): def forward(self, x, y=None, z=None): if y is None: return x + 1 elif z is None: return x + y else: return x + y, x + z class M(torch.nn.Module): def __init__(self, num_layers): super().__init__() self.n = NWithOverloads() def forward(self, x, y, z): return self.n(x), self.n(x, y), self.n(x, y, z) x = torch.randn(2, 3) y = torch.randn(2, 3) z = torch.randn(2, 3) f = io.BytesIO() torch.onnx.export( M(3), (x, y, z), f, opset_version=self.opset_version, export_modules_as_functions={NWithOverloads}, dynamo=False, ) onnx_model = onnx.load(io.BytesIO(f.getvalue())) funcs = onnx_model.functions self.assertEqual(len(funcs), 3) func_names = [f.name for f in funcs] self.assertIn("NWithOverloads", func_names) self.assertIn("NWithOverloads.1", func_names) self.assertIn("NWithOverloads.2", func_names) # Failing after ONNX 1.13.0 @skipIfUnsupportedMaxOpsetVersion(1) def test_local_function_infer_scopes(self): class M(torch.nn.Module): def forward(self, x): # Concatenation of scalars inserts unscoped tensors in IR graph. new_tensor_shape = x.size()[:-1] + (1, 1, -1) tensor = x.view(*new_tensor_shape) return tensor x = torch.randn(4, 5) f = io.BytesIO() torch.onnx.export( M(), (x,), f, export_modules_as_functions=True, opset_version=self.opset_version, do_constant_folding=False, dynamo=False, ) onnx_model = onnx.load(io.BytesIO(f.getvalue())) funcs = onnx_model.functions self.assertIn("M", [f.name for f in funcs]) @skipIfUnsupportedMinOpsetVersion(15) def test_local_function_predefined_attributes(self): class M(torch.nn.Module): num_layers: int def __init__(self, num_layers): super().__init__() self.num_layers = num_layers self.lns = torch.nn.ModuleList( [torch.nn.LayerNorm(3, eps=1e-4) for _ in range(num_layers)] ) def forward(self, x): for ln in self.lns: x = ln(x) return x x = torch.randn(2, 3) f = io.BytesIO() model = M(3) torch.onnx.export( model, (x,), f, export_modules_as_functions=True, opset_version=self.opset_version, dynamo=False, ) onnx_model = onnx.load(io.BytesIO(f.getvalue())) funcs = onnx_model.functions m_funcs = [fn for fn in funcs if fn.name == "M"] self.assertEqual(m_funcs[0].attribute, ["num_layers"]) ln_funcs = [fn for fn in funcs if fn.name == "LayerNorm"] self.assertEqual(ln_funcs[0].attribute, ["eps", "elementwise_affine"]) from onnx import helper m_node = [n for n in onnx_model.graph.node if n.op_type == "M"] self.assertEqual( m_node[0].attribute[0], helper.make_attribute("num_layers", model.num_layers), ) ln_nodes = [n for n in m_funcs[0].node if n.op_type == "LayerNorm"] expected_ln_attrs = [ helper.make_attribute( "elementwise_affine", model.lns[0].elementwise_affine ), helper.make_attribute("eps", model.lns[0].eps), ] for ln_node in ln_nodes: self.assertIn(ln_node.attribute[0], expected_ln_attrs) self.assertIn(ln_node.attribute[1], expected_ln_attrs) # This test cases checks the issue where an object does not have an attribute. # When enabling `export_modules_as_functions = True`, the exporter could return an # AttributeError. With this test case, we check that the export passes successfully # without any AttributeError exceptions. # See https://github.com/pytorch/pytorch/pull/109759 for an example. The exception that # this test tries to avoid is `AttributeError: 'Embedding' object has no attribute 'freeze'`. @skipIfUnsupportedMinOpsetVersion(15) def test_local_function_subset_of_predefined_attributes(self): class M(torch.nn.Module): num_layers: int def __init__(self, num_layers): super().__init__() self.embed_layer = torch.nn.Embedding.from_pretrained( torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]]) ) self.num_layers = num_layers self.lns = torch.nn.ModuleList( [torch.nn.LayerNorm(3, eps=1e-4) for _ in range(num_layers)] ) def forward(self, x): e = self.embed_layer(torch.LongTensor([1])) for ln in self.lns: x = ln(x) return x, e x = torch.randn(2, 3) f = io.BytesIO() model = M(3) torch.onnx.export( model, (x,), f, export_modules_as_functions=True, opset_version=self.opset_version, verbose=True, # Allows the test case to print `Skipping module attribute 'freeze'` dynamo=False, ) def test_node_scope(self): class N(torch.nn.Module): def __init__(self) -> None: super().__init__() self.relu = torch.nn.ReLU() def forward(self, x): return self.relu(x) class M(torch.nn.Module): def __init__(self, num_layers): super().__init__() self.num_layers = num_layers self.lns = torch.nn.ModuleList( [torch.nn.LayerNorm(3, eps=float(i)) for i in range(num_layers)] ) self.gelu1 = torch.nn.GELU() self.gelu2 = torch.nn.GELU() self.relu = N() def forward(self, x, y, z): res1 = self.gelu1(x) res2 = self.gelu2(y) for ln in self.lns: z = ln(z) return res1 + res2, self.relu(z) x = torch.randn(2, 3) y = torch.randn(2, 3) z = torch.randn(2, 3) model = M(3) expected_scope_names = { "M::/torch.nn.modules.activation.GELU::gelu1", "M::/torch.nn.modules.activation.GELU::gelu2", "M::/torch.nn.modules.normalization.LayerNorm::lns.0", "M::/torch.nn.modules.normalization.LayerNorm::lns.1", "M::/torch.nn.modules.normalization.LayerNorm::lns.2", "M::/N::relu/torch.nn.modules.activation.ReLU::relu", "M::", } graph, _, _ = self._model_to_graph( model, (x, y, z), input_names=[], dynamic_axes={} ) for node in graph.nodes(): self.assertIn( _remove_test_environment_prefix_from_scope_name(node.scopeName()), expected_scope_names, ) graph, _, _ = self._model_to_graph( torch.jit.script(model), (x, y, z), input_names=[], dynamic_axes={} ) for node in graph.nodes(): self.assertIn( _remove_test_environment_prefix_from_scope_name(node.scopeName()), expected_scope_names, ) def test_scope_of_constants_when_combined_by_cse_pass(self): layer_num = 3 class M(torch.nn.Module): def __init__(self, constant): super().__init__() self.constant = constant def forward(self, x): # 'self.constant' is designed to be the same for all layers, # hence it is common sub expression. return x + self.constant class N(torch.nn.Module): def __init__(self, layers: int = layer_num): super().__init__() self.layers = torch.nn.ModuleList( [M(constant=torch.tensor(1.0)) for i in range(layers)] ) def forward(self, x): for layer in self.layers: x = layer(x) return x graph, _, _ = self._model_to_graph( N(), (torch.randn(2, 3)), input_names=[], dynamic_axes={} ) # NOTE: Duplicated constants are populated due to implicit casting in scalar_type_analysis, # so we expect 3 constants with different scopes. The 3 constants are for the 3 layers. # If CSE in exporter is improved later, this test needs to be updated. # It should expect 1 constant, with same scope as root. expected_root_scope_name = "N::" expected_layer_scope_name = "M::layers" expected_constant_scope_name = [ f"{expected_root_scope_name}/{expected_layer_scope_name}.{i}" for i in range(layer_num) ] constant_scope_names = [] for node in graph.nodes(): if node.kind() == "onnx::Constant": constant_scope_names.append( _remove_test_environment_prefix_from_scope_name(node.scopeName()) ) self.assertEqual(constant_scope_names, expected_constant_scope_name) def test_scope_of_nodes_when_combined_by_cse_pass(self): layer_num = 3 class M(torch.nn.Module): def __init__(self, constant, bias): super().__init__() self.constant = constant self.bias = bias def forward(self, x): # 'constant' and 'x' is designed to be the same for all layers, # hence `x + self.constant` is common sub expression. # 'bias' is designed to be different for all layers, # hence `* self.bias` is not common sub expression. return (x + self.constant) * self.bias class N(torch.nn.Module): def __init__(self, layers: int = layer_num): super().__init__() self.layers = torch.nn.ModuleList( [ M(constant=torch.tensor([1.0]), bias=torch.randn(1)) for i in range(layers) ] ) def forward(self, x): y = [] for layer in self.layers: y.append(layer(x)) return y[0], y[1], y[2] graph, _, _ = self._model_to_graph( N(), (torch.randn(2, 3)), input_names=[], dynamic_axes={} ) expected_root_scope_name = "N::" expected_layer_scope_name = "M::layers" expected_add_scope_names = [ f"{expected_root_scope_name}/{expected_layer_scope_name}.0" ] expected_mul_scope_names = [ f"{expected_root_scope_name}/{expected_layer_scope_name}.{i}" for i in range(layer_num) ] add_scope_names = [] mul_scope_names = [] for node in graph.nodes(): if node.kind() == "onnx::Add": add_scope_names.append( _remove_test_environment_prefix_from_scope_name(node.scopeName()) ) elif node.kind() == "onnx::Mul": mul_scope_names.append( _remove_test_environment_prefix_from_scope_name(node.scopeName()) ) self.assertEqual(add_scope_names, expected_add_scope_names) self.assertEqual(mul_scope_names, expected_mul_scope_names) def test_aten_fallthrough(self): # Test aten export of op with no symbolic class Module(torch.nn.Module): def forward(self, x): return torch.erfc(x) x = torch.randn(2, 3, 4) GLOBALS.export_onnx_opset_version = self.opset_version graph, _, __ = self._model_to_graph( Module(), (x,), operator_export_type=OperatorExportTypes.ONNX_FALLTHROUGH, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}, ) iter = graph.nodes() self.assertEqual(next(iter).kind(), "aten::erfc") def test_custom_op_fallthrough(self): # Test custom op op_source = """ #include <torch/script.h> torch::Tensor custom_add(torch::Tensor self, torch::Tensor other) { return self + other; } static auto registry = torch::RegisterOperators("custom_namespace::custom_op", &custom_add); """ torch.utils.cpp_extension.load_inline( name="custom_add", cpp_sources=op_source, is_python_module=False, verbose=True, ) class FooModel(torch.nn.Module): def forward(self, input, other): # Calling custom op return torch.ops.custom_namespace.custom_op(input, other) x = torch.randn(2, 3, 4, requires_grad=False) y = torch.randn(2, 3, 4, requires_grad=False) model = FooModel() graph, _, __ = self._model_to_graph( model, (x, y), operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH, input_names=["x", "y"], dynamic_axes={"x": [0, 1, 2], "y": [0, 1, 2]}, ) iter = graph.nodes() self.assertEqual(next(iter).kind(), "custom_namespace::custom_op") # gelu is exported as onnx::Gelu for opset >= 20 @skipIfUnsupportedMaxOpsetVersion(19) def test_custom_opsets_gelu(self): self.addCleanup(torch.onnx.unregister_custom_op_symbolic, "::gelu", 9) def gelu(g, self, approximate): return g.op("com.microsoft::Gelu", self).setType(self.type()) torch.onnx.register_custom_op_symbolic("::gelu", gelu, 9) model = torch.nn.GELU(approximate="none") x = torch.randn(3, 3) f = io.BytesIO() torch.onnx.export( model, (x,), f, opset_version=self.opset_version, custom_opsets={"com.microsoft": 1}, dynamo=False, ) graph = onnx.load(io.BytesIO(f.getvalue())) self.assertEqual(graph.graph.node[0].op_type, "Gelu") self.assertEqual(graph.opset_import[0].version, self.opset_version) self.assertEqual(graph.opset_import[1].domain, "com.microsoft") self.assertEqual(graph.opset_import[1].version, 1) # gelu is exported as onnx::Gelu for opset >= 20 @skipIfUnsupportedMaxOpsetVersion(19) def test_register_aten_custom_op_symbolic(self): self.addCleanup(torch.onnx.unregister_custom_op_symbolic, "aten::gelu", 9) def gelu(g, self, approximate): return g.op("com.microsoft::Gelu", self).setType(self.type()) torch.onnx.register_custom_op_symbolic("aten::gelu", gelu, 9) model = torch.nn.GELU(approximate="none") x = torch.randn(3, 3) f = io.BytesIO() torch.onnx.export( model, (x,), f, opset_version=self.opset_version, dynamo=False ) graph = onnx.load(io.BytesIO(f.getvalue())) self.assertEqual(graph.graph.node[0].op_type, "Gelu") self.assertEqual(graph.opset_import[1].domain, "com.microsoft") @skipIfNoLapack def test_custom_opsets_inverse(self): self.addCleanup(torch.onnx.unregister_custom_op_symbolic, "::linalg_inv", 9) class CustomInverse(torch.nn.Module): def forward(self, x): return torch.inverse(x) + x def linalg_inv(g, self): return g.op("com.microsoft::Inverse", self).setType(self.type()) torch.onnx.register_custom_op_symbolic("::linalg_inv", linalg_inv, 9) model = CustomInverse() x = torch.randn(2, 3, 3) f = io.BytesIO() torch.onnx.export( model, (x,), f, opset_version=self.opset_version, custom_opsets={"com.microsoft": 1}, dynamo=False, ) graph = onnx.load(io.BytesIO(f.getvalue())) self.assertEqual(graph.graph.node[0].op_type, "Inverse") self.assertEqual(graph.opset_import[0].version, self.opset_version) self.assertEqual(graph.opset_import[1].domain, "com.microsoft") self.assertEqual(graph.opset_import[1].version, 1) def test_onnx_fallthrough(self): # Test aten export of op with symbolic for aten class Module(torch.nn.Module): def forward(self, x): return torch.digamma(x) x = torch.randn(100, 128) graph, _, __ = self._model_to_graph( Module(), (x,), operator_export_type=OperatorExportTypes.ONNX_FALLTHROUGH, input_names=["x"], dynamic_axes={"x": [0, 1]}, ) iter = graph.nodes() self.assertEqual(next(iter).kind(), "aten::digamma") # prim::ListConstruct is exported as onnx::SequenceConstruct for opset >= 11 @skipIfUnsupportedMaxOpsetVersion(10) def test_prim_fallthrough(self): # Test prim op class PrimModule(torch.jit.ScriptModule): @torch.jit.script_method def forward(self, x): if isinstance(x, list): y = x else: y = [x] return y x = torch.tensor([2]) model = PrimModule() model.eval() graph, _, __ = self._model_to_graph( model, (x,), operator_export_type=OperatorExportTypes.ONNX_FALLTHROUGH, input_names=["x"], dynamic_axes={"x": [0]}, ) iter = graph.nodes() self.assertEqual(next(iter).kind(), "prim::ListConstruct") def test_custom_layer_tuple(self): class CustomFunction(torch.autograd.Function): @staticmethod def symbolic(g, input): return g.op("CustomNamespace::Custom", input, outputs=2) @staticmethod def forward(ctx, input): return input, input class Custom(torch.nn.Module): def forward(self, input): return CustomFunction.apply(input) model = Custom() batch = torch.FloatTensor(1, 3) graph, _, _ = self._model_to_graph( model, batch, input_names=["batch"], dynamic_axes={"batch": [0, 1]} ) iter = graph.nodes() self.assertEqual(next(iter).kind(), "CustomNamespace::Custom") def test_autograd_onnx_fallthrough(self): class CustomFunction(torch.autograd.Function): @staticmethod def forward(ctx, input): ctx.save_for_backward(input) return input.clamp(min=0) @staticmethod def backward(ctx, grad_output): (input,) = ctx.saved_tensors grad_input = grad_output.clone() grad_input[input < 0] = 0 return grad_input class Custom(torch.nn.Module): def forward(self, input): return CustomFunction.apply(input) model = Custom() batch = torch.FloatTensor(1, 3) graph, _, _ = self._model_to_graph( model, batch, operator_export_type=OperatorExportTypes.ONNX_FALLTHROUGH, input_names=["batch"], dynamic_axes={"batch": [0, 1]}, ) iter = graph.nodes() self.assertEqual(next(iter).kind(), "prim::PythonOp") def test_autograd_module_name(self): class CustomFunction(torch.autograd.Function): @staticmethod def forward(ctx, input): ctx.save_for_backward(input) return input.clamp(min=0) @staticmethod def backward(ctx, grad_output): (input,) = ctx.saved_tensors grad_input = grad_output.clone() grad_input[input < 0] = 0 return grad_input class Custom(torch.nn.Module): def forward(self, input): return CustomFunction.apply(input) + CustomFunction2.apply(input) model = Custom() batch = torch.FloatTensor(1, 3) graph, _, _ = self._model_to_graph( model, batch, operator_export_type=OperatorExportTypes.ONNX_FALLTHROUGH, input_names=["batch"], dynamic_axes={"batch": [0, 1]}, ) iter = graph.nodes() autograd1 = next(iter) autograd2 = next(iter) self.assertEqual(autograd1.kind(), "prim::PythonOp") self.assertEqual(autograd2.kind(), "prim::PythonOp") self.assertNotEqual(autograd1.s("module"), autograd2.s("module")) def test_unused_initializers(self): class Model(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv2 = torch.nn.ConvTranspose2d( 16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(1, 1) ) self.k_proj = torch.nn.Linear(5, 5, bias=True) def forward(self, x): x = self.conv2(x) return x x = torch.randn(20, 16, 50, 100) GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX _, params_dict, __ = self._model_to_graph( Model(), (x,), do_constant_folding=False, operator_export_type=OperatorExportTypes.ONNX, input_names=["x"], dynamic_axes={"x": [0, 1, 2, 3]}, ) self.assertEqual(len(params_dict), 2) def test_scripting_param(self): class MyModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d( 3, 16, kernel_size=1, stride=2, padding=3, bias=True ) self.bn = torch.nn.BatchNorm2d(16, affine=True) def forward(self, x): x = self.conv(x) bn = self.bn(x) return bn model = torch.jit.script(MyModule()) x = torch.randn(10, 3, 128, 128) GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX graph, _, __ = self._model_to_graph( model, (x,), do_constant_folding=True, operator_export_type=OperatorExportTypes.ONNX, training=torch.onnx.TrainingMode.TRAINING, input_names=["x"], dynamic_axes={"x": [0, 1, 2, 3]}, ) graph_input_params = [param.debugName() for param in graph.inputs()] for item in dict(model.named_parameters()): self.assertIn( item, graph_input_params, "Graph parameter names does not match model parameters.", ) def test_fuse_conv_bn(self): class Fuse(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d( 3, 2, kernel_size=1, stride=2, padding=3, bias=True ) self.bn = torch.nn.BatchNorm2d(2) def forward(self, x): out = self.conv(x) return self.bn(out) x = torch.randn(2, 3, 2, 2, requires_grad=True) graph, _, __ = self._model_to_graph( Fuse(), (x,), training=TrainingMode.EVAL, input_names=["x"], dynamic_axes={"x": [0, 1, 2, 3]}, ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::BatchNormalization") self.assertEqual(node.kind(), "onnx::Conv") self.assertEqual(len(list(graph.nodes())), 1) def test_fuse_resnet18(self): model = torchvision.models.resnet18(weights=None) x = torch.randn(2, 3, 224, 224, requires_grad=True) graph, _, __ = self._model_to_graph( model, (x,), training=TrainingMode.EVAL, input_names=["x"], dynamic_axes={"x": [0, 1, 2, 3]}, ) for node in graph.nodes(): self.assertNotEqual(node.kind(), "onnx::BatchNormalization") def test_onnx_function_substitution_pass(self): @torch.jit.script def f(x: torch.Tensor, y: torch.Tensor): z = x - y return x + z class MyModule(torch.nn.Module): def forward(self, x, y): return f(x, y) input_1 = torch.tensor([11]) input_2 = torch.tensor([12]) GLOBALS.export_onnx_opset_version = self.opset_version GLOBALS.operator_export_type = OperatorExportTypes.ONNX graph, _, __ = self._model_to_graph( MyModule(), (input_1, input_2), do_constant_folding=True, operator_export_type=OperatorExportTypes.ONNX, input_names=["input_1", "input_2"], dynamic_axes={"input_1": [0], "input_2": [0]}, ) # Check that the prim::Constant node in the graph for representing the # scripted function `f` is removed and the following prim::CallFunction # is replced by inline graph, with onnx::Sub and onnx::Add nodes. for node in graph.nodes(): self.assertNotEqual(node.kind(), "prim::Constant") self.assertEqual( len(list(graph.nodes())), 2 ) # onnx::Sub and onnx::Add nodes only. def test_onnx_value_name(self): class MyModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.in_weight = torch.nn.Parameter(torch.Tensor(3, 3)) self.in_bias = torch.nn.Parameter(torch.Tensor(3)) def forward(self, x): start = 0 end = None weight = self.in_weight bias = self.in_bias weight = weight[start:end, :] if bias is not None: bias = bias[start:end] return torch.nn.functional.linear(x, weight, bias) model = MyModule() x = torch.randn(3, 3) f = io.BytesIO() model.eval() torch.onnx.export( model, (x,), f, opset_version=self.opset_version, keep_initializers_as_inputs=True, dynamo=False, ) graph = onnx.load(io.BytesIO(f.getvalue())) self.assertEqual(graph.graph.input[1].name, "in_weight") self.assertEqual(graph.graph.input[2].name, "in_bias") def test_onnx_node_naming(self): class MainModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self._module_1 = torch.nn.Linear(10, 10) self._module_2 = torch.nn.Linear(10, 10) self._module_3 = torch.nn.Linear(10, 10) self._module_4 = torch.nn.Linear(10, 10) def forward(self, x): y = self._module_1(x) z = self._module_2(y) z = self._module_3(y * z) z = self._module_4(y * z) return z module = MainModule() ref_node_names = [ "/_module_1/Gemm", "/_module_2/Gemm", "/_module_3/Gemm", "/_module_4/Gemm", "/Mul", "/Mul_1", ] f = io.BytesIO() torch.onnx.export( module, torch.ones(1, 10), f, output_names=["y"], dynamo=False ) onnx_model = onnx.load(io.BytesIO(f.getvalue())) for n in onnx_model.graph.node: self.assertIn(n.name, ref_node_names) torch.onnx.export( torch.jit.script(module), torch.ones(1, 10), f, output_names=["y"], dynamo=False, ) onnx_model = onnx.load(io.BytesIO(f.getvalue())) for n in onnx_model.graph.node: self.assertIn(n.name, ref_node_names) def _test_deduplicate_initializers(self, torchscript=False): class MyModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.layer1 = torch.nn.Linear(3, 3) self.layer2 = torch.nn.Linear(3, 3) # Reusing layers. self.layer3 = self.layer1 # Reusing parameters. self.layer2.weight = self.layer1.weight self.layer1.bias = self.layer2.bias # Parameter with different tensors equal in value. self.param1 = torch.nn.Parameter(torch.tensor([1.0, 2.0, 3.0])) self.param2 = torch.nn.Parameter(torch.tensor([1.0, 2.0, 3.0])) def forward(self, x): return ( self.layer3(self.layer2(self.layer1(x))) + self.param1 + self.param2 ) model = torch.jit.script(MyModule()) if torchscript else MyModule() x = torch.randn(3, 3) param_name_set = {k for k, _ in model.named_parameters()} # Test training mode. model.train() f = io.BytesIO() torch.onnx.export( model, (x,), f, training=TrainingMode.TRAINING, opset_version=self.opset_version, dynamo=False, ) graph = onnx.load(io.BytesIO(f.getvalue())) self.assertSetEqual({i.name for i in graph.graph.initializer}, param_name_set) model.train() f = io.BytesIO() torch.onnx.export( model, (x,), f, training=TrainingMode.PRESERVE, opset_version=self.opset_version, dynamo=False, ) graph = onnx.load(io.BytesIO(f.getvalue())) self.assertSetEqual({i.name for i in graph.graph.initializer}, param_name_set) # Test eval mode. model.eval() f = io.BytesIO() torch.onnx.export( model, (x,), f, opset_version=self.opset_version, dynamo=False ) graph = onnx.load(io.BytesIO(f.getvalue())) param_name_set.remove("param2") self.assertSetEqual({i.name for i in graph.graph.initializer}, param_name_set) def test_deduplicate_initializers(self): self._test_deduplicate_initializers(torchscript=False) def test_deduplicate_initializers_torchscript(self): self._test_deduplicate_initializers(torchscript=True) @skipIfNoCuda def test_deduplicate_initializers_diff_devices(self): class Model(torch.nn.Module): def __init__(self) -> None: super().__init__() self.w_cpu = torch.nn.Parameter( torch.ones(3, device=torch.device("cpu")) ) self.w_cuda = torch.nn.Parameter( torch.ones(3, device=torch.device("cuda")) ) def forward(self, x, y): return x + self.w_cpu, y + self.w_cuda x = torch.randn(3, 3, device=torch.device("cpu")) y = torch.randn(3, 3, device=torch.device("cuda")) f = io.BytesIO() torch.onnx.export( Model(), (x, y), f, opset_version=self.opset_version, dynamo=False ) graph = onnx.load(io.BytesIO(f.getvalue())) self.assertSetEqual({i.name for i in graph.graph.initializer}, {"w_cpu"}) def test_duplicated_output_node(self): class DuplicatedOutputNet(torch.nn.Module): def __init__(self, input_size, num_classes): super().__init__() self.fc1 = torch.nn.Linear(input_size, num_classes) def forward(self, input0, input1): out1 = self.fc1(input0) out2 = self.fc1(input1) return out1, out1, out2, out1, out2 N, D_in, D_out = 64, 784, 10 pt_model = DuplicatedOutputNet(D_in, D_out) f = io.BytesIO() x = torch.randn(N, D_in) dynamic_axes = { "input0": {0: "input0_dim0", 1: "input0_dim1"}, "input1": {0: "input1_dim0", 1: "input1_dim1"}, "output-0": {0: "output-0_dim0", 1: "output-0_dim1"}, "output-1": {0: "output-1_dim0", 1: "output-1_dim1"}, "output-2": {0: "output-2_dim0", 1: "output-2_dim1"}, "output-3": {0: "output-3_dim0", 1: "output-3_dim1"}, "output-4": {0: "output-4_dim0", 1: "output-4_dim1"}, } torch.onnx.export( pt_model, (x, x), f, input_names=["input0", "input1"], output_names=["output-0", "output-1", "output-2", "output-3", "output-4"], do_constant_folding=False, training=torch.onnx.TrainingMode.TRAINING, dynamic_axes=dynamic_axes, verbose=True, keep_initializers_as_inputs=True, dynamo=False, ) graph = onnx.load(io.BytesIO(f.getvalue())) self.assertEqual(graph.graph.input[0].name, "input0") self.assertEqual(graph.graph.input[1].name, "input1") for i in range(5): self.assertEqual(graph.graph.output[i].name, f"output-{i}") self.assertEqual(graph.graph.node[0].op_type, "Gemm") self.assertEqual(graph.graph.node[1].op_type, "Identity") self.assertEqual(graph.graph.node[2].op_type, "Identity") self.assertEqual(graph.graph.node[3].op_type, "Gemm") self.assertEqual(graph.graph.node[4].op_type, "Identity") def test_deduplicate_ignore_upsample_scale(self): # upsample scale is a constant, not a model parameter, # therefore should be ignored by shared weight deduplication. class Model(torch.nn.Module): def __init__(self) -> None: super().__init__() self.upsample_1 = torch.nn.Upsample(scale_factor=2) self.upsample_2 = torch.nn.Upsample(scale_factor=2) def forward(self, x): return self.upsample_1(x), self.upsample_2(x) f = io.BytesIO() x = torch.randn(1, 32, 224, 224) torch.onnx.export(Model(), x, f, dynamo=False) onnx_model = onnx.load(io.BytesIO(f.getvalue())) # aten::upsample converts to onnx::resize resize_nodes = [n for n in onnx_model.graph.node if n.op_type == "Resize"] self.assertEqual(len(resize_nodes), 2) for resize_node in resize_nodes: scale_node = [ n for n in onnx_model.graph.node if n.output[0] == resize_node.input[2] ] self.assertEqual(len(scale_node), 1) self.assertEqual(scale_node[0].op_type, "Constant") def test_bad_symbolic_registration(self): _onnx_opset_version = 9 @parse_args("v") def cat(g, tensor_list, dim): tensors = _unpack_list(tensor_list) return g.op("Concat", *tensors, axis_i=dim) torch.onnx.register_custom_op_symbolic("::cat", cat, _onnx_opset_version) class CatModel(torch.nn.Module): def forward(self, x): return torch.cat((x, x, x), 0) model = CatModel() x = torch.randn(2, 3) f = io.BytesIO() self.assertExpectedRaisesInline( AssertionError, lambda: torch.onnx.export( model, (x,), f, opset_version=_onnx_opset_version, dynamo=False ), ( "A mismatch between the number of arguments (2) and their descriptors (1) was found at symbolic function " "'cat'. If you believe this is not due to custom symbolic implementation within your code or an external " "library, please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml to " "report this bug." ), ) torch.onnx.unregister_custom_op_symbolic("::cat", _onnx_opset_version) if __name__ == "__main__": common_utils.run_tests()
TestUtilityFuns
python
huggingface__transformers
tests/models/instructblip/test_processing_instructblip.py
{ "start": 902, "end": 1579 }
class ____(ProcessorTesterMixin, unittest.TestCase): processor_class = InstructBlipProcessor @classmethod def _setup_tokenizer(cls): tokenizer_class = cls._get_component_class_from_processor("tokenizer") return tokenizer_class.from_pretrained("hf-internal-testing/tiny-random-GPT2Model") @classmethod def _setup_qformer_tokenizer(cls): qformer_tokenizer_class = cls._get_component_class_from_processor("qformer_tokenizer") return qformer_tokenizer_class.from_pretrained("hf-internal-testing/tiny-random-bert") @staticmethod def prepare_processor_dict(): return {"num_query_tokens": 1}
InstructBlipProcessorTest
python
astropy__astropy
astropy/units/tests/test_quantity_non_ufuncs.py
{ "start": 2602, "end": 2883 }
class ____(BasicTestSetup): def check(self, func, *args, **kwargs): o = func(self.q, *args, **kwargs) expected = func(self.q.value, *args, **kwargs) * self.q.unit assert o.shape == expected.shape assert np.all(o == expected)
InvariantUnitTestSetup
python
pyca__cryptography
src/cryptography/x509/certificate_transparency.py
{ "start": 315, "end": 398 }
class ____(utils.Enum): X509_CERTIFICATE = 0 PRE_CERTIFICATE = 1
LogEntryType
python
pydata__xarray
xarray/tests/test_ufuncs.py
{ "start": 5978, "end": 6411 }
class ____(np.ndarray): # Minimal subclassed duck array with its own self-contained namespace, # which implements a few ufuncs def __new__(cls, array): obj = np.asarray(array).view(cls) return obj def __array_namespace__(self, *, api_version=None): return DuckArray @staticmethod def sin(x): return np.sin(x) @staticmethod def add(x, y): return x + y
DuckArray
python
tensorflow__tensorflow
tensorflow/python/tpu/topology_test.py
{ "start": 808, "end": 1535 }
class ____(test.TestCase): def testSerialization(self): """Tests if the class is able to generate serialized strings.""" original_topology = topology.Topology( mesh_shape=[1, 1, 1, 2], device_coordinates=[[[0, 0, 0, 0], [0, 0, 0, 1]]], ) serialized_str = original_topology.serialized() new_topology = topology.Topology(serialized=serialized_str) # Make sure the topology recovered from serialized str is same as the # original topology. self.assertAllEqual( original_topology.mesh_shape, new_topology.mesh_shape) self.assertAllEqual( original_topology.device_coordinates, new_topology.device_coordinates) if __name__ == "__main__": test.main()
TopologyTest
python
sqlalchemy__sqlalchemy
test/sql/test_metadata.py
{ "start": 168068, "end": 182909 }
class ____(fixtures.TestBase): @contextmanager def _fixture(self): from sqlalchemy.engine.default import DefaultDialect class ParticipatingDialect(DefaultDialect): construct_arguments = [ (schema.Index, {"x": 5, "y": False, "z_one": None}), (schema.ForeignKeyConstraint, {"foobar": False}), ] class ParticipatingDialect2(DefaultDialect): construct_arguments = [ (schema.Index, {"x": 9, "y": True, "pp": "default"}), (schema.Table, {"*": None}), ] class NonParticipatingDialect(DefaultDialect): construct_arguments = None def load(dialect_name): if dialect_name == "participating": return ParticipatingDialect elif dialect_name == "participating2": return ParticipatingDialect2 elif dialect_name == "nonparticipating": return NonParticipatingDialect else: raise exc.NoSuchModuleError("no dialect %r" % dialect_name) with mock.patch("sqlalchemy.dialects.registry.load", load): yield def teardown_test(self): Index._kw_registry.clear() def test_participating(self): with self._fixture(): idx = Index("a", "b", "c", participating_y=True) eq_( idx.dialect_options, {"participating": {"x": 5, "y": True, "z_one": None}}, ) eq_(idx.dialect_kwargs, {"participating_y": True}) def test_nonparticipating(self): with self._fixture(): idx = Index( "a", "b", "c", nonparticipating_y=True, nonparticipating_q=5 ) eq_( idx.dialect_kwargs, {"nonparticipating_y": True, "nonparticipating_q": 5}, ) def test_bad_kwarg_raise(self): with self._fixture(): assert_raises_message( TypeError, "Additional arguments should be named " "<dialectname>_<argument>, got 'foobar'", Index, "a", "b", "c", foobar=True, ) def test_unknown_dialect_warning(self): with self._fixture(): with testing.expect_warnings( "Can't validate argument 'unknown_y'; can't locate " "any SQLAlchemy dialect named 'unknown'", ): Index("a", "b", "c", unknown_y=True) def test_participating_bad_kw(self): with self._fixture(): assert_raises_message( exc.ArgumentError, "Argument 'participating_q_p_x' is not accepted by dialect " "'participating' on behalf of " "<class 'sqlalchemy.sql.schema.Index'>", Index, "a", "b", "c", participating_q_p_x=8, ) def test_participating_unknown_schema_item(self): with self._fixture(): # the dialect doesn't include UniqueConstraint in # its registry at all. assert_raises_message( exc.ArgumentError, "Argument 'participating_q_p_x' is not accepted by dialect " "'participating' on behalf of " "<class 'sqlalchemy.sql.schema.UniqueConstraint'>", UniqueConstraint, "a", "b", participating_q_p_x=8, ) @testing.emits_warning("Can't validate") def test_unknown_dialect_warning_still_populates(self): with self._fixture(): idx = Index("a", "b", "c", unknown_y=True) eq_(idx.dialect_kwargs, {"unknown_y": True}) # still populates @testing.emits_warning("Can't validate") def test_unknown_dialect_warning_still_populates_multiple(self): with self._fixture(): idx = Index( "a", "b", "c", unknown_y=True, unknown_z=5, otherunknown_foo="bar", participating_y=8, ) eq_( idx.dialect_options, { "unknown": {"y": True, "z": 5, "*": None}, "otherunknown": {"foo": "bar", "*": None}, "participating": {"x": 5, "y": 8, "z_one": None}, }, ) eq_( idx.dialect_kwargs, { "unknown_z": 5, "participating_y": 8, "unknown_y": True, "otherunknown_foo": "bar", }, ) # still populates def test_combined(self): with self._fixture(): idx = Index( "a", "b", "c", participating_x=7, nonparticipating_y=True ) eq_( idx.dialect_options, { "participating": {"y": False, "x": 7, "z_one": None}, "nonparticipating": {"y": True, "*": None}, }, ) eq_( idx.dialect_kwargs, {"participating_x": 7, "nonparticipating_y": True}, ) def test_multiple_participating(self): with self._fixture(): idx = Index( "a", "b", "c", participating_x=7, participating2_x=15, participating2_y="lazy", ) eq_( idx.dialect_options, { "participating": {"x": 7, "y": False, "z_one": None}, "participating2": {"x": 15, "y": "lazy", "pp": "default"}, }, ) eq_( idx.dialect_kwargs, { "participating_x": 7, "participating2_x": 15, "participating2_y": "lazy", }, ) def test_foreign_key_propagate(self): with self._fixture(): m = MetaData() fk = ForeignKey("t2.id", participating_foobar=True) t = Table("t", m, Column("id", Integer, fk)) fkc = [ c for c in t.constraints if isinstance(c, ForeignKeyConstraint) ][0] eq_(fkc.dialect_kwargs, {"participating_foobar": True}) def test_foreign_key_propagate_exceptions_delayed(self): with self._fixture(): m = MetaData() fk = ForeignKey("t2.id", participating_fake=True) c1 = Column("id", Integer, fk) assert_raises_message( exc.ArgumentError, "Argument 'participating_fake' is not accepted by " "dialect 'participating' on behalf of " "<class 'sqlalchemy.sql.schema.ForeignKeyConstraint'>", Table, "t", m, c1, ) def test_wildcard(self): with self._fixture(): m = MetaData() t = Table( "x", m, Column("x", Integer), participating2_xyz="foo", participating2_engine="InnoDB", ) eq_( t.dialect_kwargs, { "participating2_xyz": "foo", "participating2_engine": "InnoDB", }, ) def test_uninit_wildcard(self): with self._fixture(): m = MetaData() t = Table("x", m, Column("x", Integer)) eq_(t.dialect_options["participating2"], {"*": None}) eq_(t.dialect_kwargs, {}) def test_not_contains_wildcard(self): with self._fixture(): m = MetaData() t = Table("x", m, Column("x", Integer)) assert "foobar" not in t.dialect_options["participating2"] def test_contains_wildcard(self): with self._fixture(): m = MetaData() t = Table("x", m, Column("x", Integer), participating2_foobar=5) assert "foobar" in t.dialect_options["participating2"] def test_update(self): with self._fixture(): idx = Index("a", "b", "c", participating_x=20) eq_(idx.dialect_kwargs, {"participating_x": 20}) idx._validate_dialect_kwargs( {"participating_x": 25, "participating_z_one": "default"} ) eq_( idx.dialect_options, {"participating": {"x": 25, "y": False, "z_one": "default"}}, ) eq_( idx.dialect_kwargs, {"participating_x": 25, "participating_z_one": "default"}, ) idx._validate_dialect_kwargs( {"participating_x": 25, "participating_z_one": "default"} ) eq_( idx.dialect_options, {"participating": {"x": 25, "y": False, "z_one": "default"}}, ) eq_( idx.dialect_kwargs, {"participating_x": 25, "participating_z_one": "default"}, ) idx._validate_dialect_kwargs( {"participating_y": True, "participating2_y": "p2y"} ) eq_( idx.dialect_options, { "participating": {"x": 25, "y": True, "z_one": "default"}, "participating2": {"y": "p2y", "pp": "default", "x": 9}, }, ) eq_( idx.dialect_kwargs, { "participating_x": 25, "participating_y": True, "participating2_y": "p2y", "participating_z_one": "default", }, ) def test_key_error_kwargs_no_dialect(self): with self._fixture(): idx = Index("a", "b", "c") assert_raises(KeyError, idx.kwargs.__getitem__, "foo_bar") def test_key_error_kwargs_no_underscore(self): with self._fixture(): idx = Index("a", "b", "c") assert_raises(KeyError, idx.kwargs.__getitem__, "foobar") def test_key_error_kwargs_no_argument(self): with self._fixture(): idx = Index("a", "b", "c") assert_raises( KeyError, idx.kwargs.__getitem__, "participating_asdmfq34098" ) assert_raises( KeyError, idx.kwargs.__getitem__, "nonparticipating_asdmfq34098", ) def test_key_error_dialect_options(self): with self._fixture(): idx = Index("a", "b", "c") assert_raises( KeyError, idx.dialect_options["participating"].__getitem__, "asdfaso890", ) assert_raises( KeyError, idx.dialect_options["nonparticipating"].__getitem__, "asdfaso890", ) def test_ad_hoc_participating_via_opt(self): with self._fixture(): idx = Index("a", "b", "c") idx.dialect_options["participating"]["foobar"] = 5 eq_(idx.dialect_options["participating"]["foobar"], 5) eq_(idx.kwargs["participating_foobar"], 5) def test_ad_hoc_nonparticipating_via_opt(self): with self._fixture(): idx = Index("a", "b", "c") idx.dialect_options["nonparticipating"]["foobar"] = 5 eq_(idx.dialect_options["nonparticipating"]["foobar"], 5) eq_(idx.kwargs["nonparticipating_foobar"], 5) def test_ad_hoc_participating_via_kwargs(self): with self._fixture(): idx = Index("a", "b", "c") idx.kwargs["participating_foobar"] = 5 eq_(idx.dialect_options["participating"]["foobar"], 5) eq_(idx.kwargs["participating_foobar"], 5) def test_ad_hoc_nonparticipating_via_kwargs(self): with self._fixture(): idx = Index("a", "b", "c") idx.kwargs["nonparticipating_foobar"] = 5 eq_(idx.dialect_options["nonparticipating"]["foobar"], 5) eq_(idx.kwargs["nonparticipating_foobar"], 5) def test_ad_hoc_via_kwargs_invalid_key(self): with self._fixture(): idx = Index("a", "b", "c") assert_raises_message( exc.ArgumentError, "Keys must be of the form <dialectname>_<argname>", idx.kwargs.__setitem__, "foobar", 5, ) def test_ad_hoc_via_kwargs_invalid_dialect(self): with self._fixture(): idx = Index("a", "b", "c") assert_raises_message( exc.ArgumentError, "no dialect 'nonexistent'", idx.kwargs.__setitem__, "nonexistent_foobar", 5, ) def test_add_new_arguments_participating(self): with self._fixture(): Index.argument_for("participating", "xyzqpr", False) idx = Index("a", "b", "c", participating_xyzqpr=True) eq_(idx.kwargs["participating_xyzqpr"], True) idx = Index("a", "b", "c") eq_(idx.dialect_options["participating"]["xyzqpr"], False) def test_add_new_arguments_participating_no_existing(self): with self._fixture(): PrimaryKeyConstraint.argument_for("participating", "xyzqpr", False) pk = PrimaryKeyConstraint("a", "b", "c", participating_xyzqpr=True) eq_(pk.kwargs["participating_xyzqpr"], True) pk = PrimaryKeyConstraint("a", "b", "c") eq_(pk.dialect_options["participating"]["xyzqpr"], False) def test_add_new_arguments_nonparticipating(self): with self._fixture(): assert_raises_message( exc.ArgumentError, "Dialect 'nonparticipating' does have keyword-argument " "validation and defaults enabled configured", Index.argument_for, "nonparticipating", "xyzqpr", False, ) def test_add_new_arguments_invalid_dialect(self): with self._fixture(): assert_raises_message( exc.ArgumentError, "no dialect 'nonexistent'", Index.argument_for, "nonexistent", "foobar", 5, )
DialectKWArgTest
python
encode__django-rest-framework
tests/test_model_serializer.py
{ "start": 39936, "end": 40184 }
class ____(models.Model): text = models.CharField(max_length=100) bar = models.ForeignKey( 'Issue7550BarModel', null=True, blank=True, on_delete=models.SET_NULL, related_name='foos', related_query_name='foo')
Issue7550FooModel
python
walkccc__LeetCode
solutions/288. Unique Word Abbreviation/288.py
{ "start": 0, "end": 556 }
class ____: def __init__(self, dictionary: list[str]): self.dict = set(dictionary) # T := unique, F := not unique self.abbrUnique = {} for word in self.dict: abbr = self._getAbbr(word) self.abbrUnique[abbr] = abbr not in self.abbrUnique def isUnique(self, word: str) -> bool: abbr = self._getAbbr(word) return abbr not in self.abbrUnique or self.abbrUnique[abbr] and word in self.dict def _getAbbr(self, s: str) -> str: n = len(s) if n <= 2: return s return s[0] + str(n - 2) + s[-1]
ValidWordAbbr
python
PyCQA__pylint
tests/functional/s/slots_checks.py
{ "start": 2236, "end": 2298 }
class ____: __slots__ = Good.__slots__
PotentiallyFourthGood
python
PrefectHQ__prefect
tests/server/models/test_block_registration.py
{ "start": 2682, "end": 3876 }
class ____: async def test_register_new_block_type(self, session): read_block_type = await read_block_type_by_slug( session, block_type_slug="secret" ) assert read_block_type is None registered_block_type_id = await register_block_type( session=session, block_type=Secret._to_block_type() ) read_block_type = await read_block_type_by_slug( session, block_type_slug="secret" ) assert registered_block_type_id == read_block_type.id async def test_register_existing_block_type(self, session): first_block_type_id = await register_block_type( session=session, block_type=Secret._to_block_type() ) Secret._description = "I have overwritten this description" second_block_type_id = await register_block_type( session=session, block_type=Secret._to_block_type() ) assert first_block_type_id == second_block_type_id read_block_type = await read_block_type_by_slug( session, block_type_slug="secret" ) assert read_block_type.description == Secret._description
TestRegisterBlockType
python
apache__airflow
providers/sftp/src/airflow/providers/sftp/decorators/sensors/sftp.py
{ "start": 1037, "end": 2882 }
class ____(SFTPSensor): """ Wraps a Python callable and captures args/kwargs when called for execution. :param python_callable: A reference to an object that is callable :param task_id: task Id :param op_args: a list of positional arguments that will get unpacked when calling your callable (templated) :param op_kwargs: a dictionary of keyword arguments that will get unpacked in your function (templated) :param kwargs_to_upstream: For certain operators, we might need to upstream certain arguments that would otherwise be absorbed by the DecoratedOperator (for example python_callable for the PythonOperator). This gives a user the option to upstream kwargs as needed. """ template_fields: Sequence[str] = ("op_args", "op_kwargs", *SFTPSensor.template_fields) custom_operator_name = "@task.sftp_sensor" # since we won't mutate the arguments, we should just do the shallow copy # there are some cases we can't deepcopy the objects (e.g protobuf). shallow_copy_attrs: Sequence[str] = ("python_callable",) def __init__( self, *, task_id: str, **kwargs, ) -> None: kwargs.pop("multiple_outputs") kwargs["task_id"] = get_unique_task_id(task_id, kwargs.get("dag"), kwargs.get("task_group")) super().__init__(**kwargs) def sftp_sensor_task(python_callable: Callable | None = None, **kwargs) -> TaskDecorator: """ Wrap a function into an Airflow operator. Accepts kwargs for operator kwarg. Can be reused in a single DAG. :param python_callable: Function to decorate """ return task_decorator_factory( python_callable=python_callable, multiple_outputs=False, decorated_operator_class=_DecoratedSFTPSensor, **kwargs, )
_DecoratedSFTPSensor
python
numba__numba
numba/tests/test_typeinfer.py
{ "start": 30223, "end": 30871 }
class ____(unittest.TestCase): """ Make sure partial typing stores type errors in compiler state properly """ def test_partial_typing_error(self): # example with type unification error def impl(flag): if flag: a = 1 else: a = str(1) return a typing_errs = get_func_typing_errs(impl, (types.bool_,)) self.assertTrue(isinstance(typing_errs, list) and len(typing_errs) == 1) self.assertTrue(isinstance(typing_errs[0], errors.TypingError) and "Cannot unify" in typing_errs[0].msg)
TestPartialTypingErrors
python
numpy__numpy
numpy/matrixlib/tests/test_matrix_linalg.py
{ "start": 1480, "end": 1538 }
class ____(SVDCases, MatrixTestCase): pass
TestSVDMatrix
python
pyqtgraph__pyqtgraph
pyqtgraph/graphicsItems/ViewBox/ViewBoxMenu.py
{ "start": 200, "end": 9283 }
class ____(QtWidgets.QMenu): def __init__(self, view): QtWidgets.QMenu.__init__(self) self.view = weakref.ref(view) ## keep weakref to view to avoid circular reference (don't know why, but this prevents the ViewBox from being collected) self.valid = False ## tells us whether the ui needs to be updated self.viewMap = weakref.WeakValueDictionary() ## weakrefs to all views listed in the link combos self.setTitle(translate("ViewBox", "ViewBox options")) self.viewAll = QtGui.QAction(translate("ViewBox", "View All"), self) self.viewAll.triggered.connect(self.autoRange) self.addAction(self.viewAll) self.ctrl = [] self.widgetGroups = [] self.dv = QtGui.QDoubleValidator(self) for axis in 'XY': m = self.addMenu(f"{axis} {translate('ViewBox', 'axis')}") w = QtWidgets.QWidget() ui = ui_template.Ui_Form() ui.setupUi(w) a = QtWidgets.QWidgetAction(self) a.setDefaultWidget(w) m.addAction(a) self.ctrl.append(ui) wg = WidgetGroup(w) self.widgetGroups.append(wg) connects = [ (ui.mouseCheck.toggled, 'MouseToggled'), (ui.manualRadio.clicked, 'ManualClicked'), (ui.minText.editingFinished, 'RangeTextChanged'), (ui.maxText.editingFinished, 'RangeTextChanged'), (ui.autoRadio.clicked, 'AutoClicked'), (ui.autoPercentSpin.valueChanged, 'AutoSpinChanged'), (ui.linkCombo.currentIndexChanged, 'LinkComboChanged'), (ui.autoPanCheck.toggled, 'AutoPanToggled'), (ui.visibleOnlyCheck.toggled, 'VisibleOnlyToggled') ] for sig, fn in connects: sig.connect(getattr(self, axis.lower()+fn)) self.ctrl[0].invertCheck.toggled.connect(self.xInvertToggled) self.ctrl[1].invertCheck.toggled.connect(self.yInvertToggled) leftMenu = self.addMenu(translate("ViewBox", "Mouse Mode")) group = QtGui.QActionGroup(self) group.triggered.connect(self.setMouseMode) pan = QtGui.QAction(translate("ViewBox", "3 button"), group) zoom = QtGui.QAction(translate("ViewBox", "1 button"), group) pan.setCheckable(True) zoom.setCheckable(True) leftMenu.addActions(group.actions()) self.mouseModes = [pan, zoom] self.view().sigStateChanged.connect(self.viewStateChanged) self.updateState() @QtCore.Slot() def viewStateChanged(self): self.valid = False if self.ctrl[0].minText.isVisible() or self.ctrl[1].minText.isVisible(): self.updateState() def updateState(self): ## Something about the viewbox has changed; update the menu GUI state = self.view().getState(copy=False) if state['mouseMode'] == ViewBox.PanMode: self.mouseModes[0].setChecked(True) else: self.mouseModes[1].setChecked(True) for i in [0,1]: # x, y tr = state['targetRange'][i] self.ctrl[i].minText.setText("%0.5g" % tr[0]) self.ctrl[i].maxText.setText("%0.5g" % tr[1]) if state['autoRange'][i] is not False: self.ctrl[i].autoRadio.setChecked(True) if state['autoRange'][i] is not True: self.ctrl[i].autoPercentSpin.setValue(int(state['autoRange'][i] * 100)) else: self.ctrl[i].manualRadio.setChecked(True) self.ctrl[i].mouseCheck.setChecked(state['mouseEnabled'][i]) ## Update combo to show currently linked view c = self.ctrl[i].linkCombo c.blockSignals(True) try: view = state['linkedViews'][i] ## will always be string or None if view is None: view = '' ind = c.findText(view) if ind == -1: ind = 0 c.setCurrentIndex(ind) finally: c.blockSignals(False) self.ctrl[i].autoPanCheck.setChecked(state['autoPan'][i]) self.ctrl[i].visibleOnlyCheck.setChecked(state['autoVisibleOnly'][i]) xy = ['x', 'y'][i] self.ctrl[i].invertCheck.setChecked(state.get(xy+'Inverted', False)) self.valid = True def popup(self, *args): if not self.valid: self.updateState() QtWidgets.QMenu.popup(self, *args) @QtCore.Slot() def autoRange(self): self.view().autoRange() ## don't let signal call this directly--it'll add an unwanted argument @QtCore.Slot(bool) def xMouseToggled(self, b): self.view().setMouseEnabled(x=b) @QtCore.Slot() def xManualClicked(self): self.view().enableAutoRange(ViewBox.XAxis, False) @QtCore.Slot() def xRangeTextChanged(self): self.ctrl[0].manualRadio.setChecked(True) self.view().setXRange(*self._validateRangeText(0), padding=0) @QtCore.Slot() def xAutoClicked(self): val = self.ctrl[0].autoPercentSpin.value() * 0.01 self.view().enableAutoRange(ViewBox.XAxis, val) @QtCore.Slot(int) def xAutoSpinChanged(self, val): self.ctrl[0].autoRadio.setChecked(True) self.view().enableAutoRange(ViewBox.XAxis, val*0.01) @QtCore.Slot(int) def xLinkComboChanged(self, ind): self.view().setXLink(str(self.ctrl[0].linkCombo.currentText())) @QtCore.Slot(bool) def xAutoPanToggled(self, b): self.view().setAutoPan(x=b) @QtCore.Slot(bool) def xVisibleOnlyToggled(self, b): self.view().setAutoVisible(x=b) @QtCore.Slot(bool) def yMouseToggled(self, b): self.view().setMouseEnabled(y=b) @QtCore.Slot() def yManualClicked(self): self.view().enableAutoRange(ViewBox.YAxis, False) @QtCore.Slot() def yRangeTextChanged(self): self.ctrl[1].manualRadio.setChecked(True) self.view().setYRange(*self._validateRangeText(1), padding=0) @QtCore.Slot() def yAutoClicked(self): val = self.ctrl[1].autoPercentSpin.value() * 0.01 self.view().enableAutoRange(ViewBox.YAxis, val) @QtCore.Slot(int) def yAutoSpinChanged(self, val): self.ctrl[1].autoRadio.setChecked(True) self.view().enableAutoRange(ViewBox.YAxis, val*0.01) @QtCore.Slot(int) def yLinkComboChanged(self, ind): self.view().setYLink(str(self.ctrl[1].linkCombo.currentText())) @QtCore.Slot(bool) def yAutoPanToggled(self, b): self.view().setAutoPan(y=b) @QtCore.Slot(bool) def yVisibleOnlyToggled(self, b): self.view().setAutoVisible(y=b) @QtCore.Slot(bool) def yInvertToggled(self, b): self.view().invertY(b) @QtCore.Slot(bool) def xInvertToggled(self, b): self.view().invertX(b) @QtCore.Slot(QtGui.QAction) def setMouseMode(self, action): mode = None if action == self.mouseModes[0]: mode = 'pan' elif action == self.mouseModes[1]: mode = 'rect' if mode is not None: self.view().setLeftButtonAction(mode) def setViewList(self, views): names = [''] self.viewMap.clear() ## generate list of views to show in the link combo for v in views: name = v.name if name is None: ## unnamed views do not show up in the view list (although they are linkable) continue names.append(name) self.viewMap[name] = v for i in [0,1]: c = self.ctrl[i].linkCombo current = c.currentText() c.blockSignals(True) changed = True try: c.clear() for name in names: c.addItem(name) if name == current: changed = False c.setCurrentIndex(c.count()-1) finally: c.blockSignals(False) if changed: c.setCurrentIndex(0) c.currentIndexChanged.emit(c.currentIndex()) def _validateRangeText(self, axis): """Validate range text inputs. Return current value(s) if invalid.""" inputs = (self.ctrl[axis].minText.text(), self.ctrl[axis].maxText.text()) vals = self.view().viewRange()[axis] for i, text in enumerate(inputs): try: vals[i] = float(text) except ValueError: # could not convert string to float pass return vals from .ViewBox import ViewBox
ViewBoxMenu
python
kamyu104__LeetCode-Solutions
Python/separate-the-digits-in-an-array.py
{ "start": 44, "end": 429 }
class ____(object): def separateDigits(self, nums): """ :type nums: List[int] :rtype: List[int] """ result = [] for x in reversed(nums): while x: result.append(x%10) x //= 10 result.reverse() return result # Time: O(n * logr) # Space: O(logr), r = max(nums) # array
Solution
python
sympy__sympy
sympy/polys/domains/simpledomain.py
{ "start": 150, "end": 377 }
class ____(Domain[Er]): """Base class for simple domains, e.g. ZZ, QQ. """ is_Simple = True def inject(self, *gens): """Inject generators into this domain. """ return self.poly_ring(*gens)
SimpleDomain
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/repository_definition/repository_data.py
{ "start": 1794, "end": 7428 }
class ____(ABC): """Users should usually rely on the :py:func:`@repository <repository>` decorator to create new repositories, which will in turn call the static constructors on this class. However, users may subclass :py:class:`RepositoryData` for fine-grained control over access to and lazy creation of repository members. """ @abstractmethod def get_top_level_resources(self) -> Mapping[str, ResourceDefinition]: """Return all top-level resources in the repository as a list, such as those provided to the Definitions constructor. Returns: List[ResourceDefinition]: All top-level resources in the repository. """ @abstractmethod def get_env_vars_by_top_level_resource(self) -> Mapping[str, AbstractSet[str]]: pass @abstractmethod @public def get_all_jobs(self) -> Sequence[JobDefinition]: """Return all jobs in the repository as a list. Returns: List[JobDefinition]: All jobs in the repository. """ @public def get_job_names(self) -> Sequence[str]: """Get the names of all jobs in the repository. Returns: List[str] """ return [job_def.name for job_def in self.get_all_jobs()] @public def has_job(self, job_name: str) -> bool: """Check if a job with a given name is present in the repository. Args: job_name (str): The name of the job. Returns: bool """ return job_name in self.get_job_names() @public def get_job(self, job_name: str) -> JobDefinition: """Get a job by name. Args: job_name (str): Name of the job to retrieve. Returns: JobDefinition: The job definition corresponding to the given name. """ match = next(job for job in self.get_all_jobs() if job.name == job_name) if match is None: raise DagsterInvariantViolationError(f"Could not find job {job_name} in repository") return match @public def get_schedule_names(self) -> Sequence[str]: """Get the names of all schedules in the repository. Returns: List[str] """ return [schedule.name for schedule in self.get_all_schedules()] @public def get_all_schedules(self) -> Sequence[ScheduleDefinition]: """Return all schedules in the repository as a list. Returns: List[ScheduleDefinition]: All jobs in the repository. """ return [] @public def get_schedule(self, schedule_name: str) -> ScheduleDefinition: """Get a schedule by name. Args: schedule_name (str): name of the schedule to retrieve. Returns: ScheduleDefinition: The schedule definition corresponding to the given name. """ schedules_with_name = [ schedule for schedule in self.get_all_schedules() if schedule.name == schedule_name ] if not schedules_with_name: raise DagsterInvariantViolationError( f"Could not find schedule {schedule_name} in repository" ) return schedules_with_name[0] @public def has_schedule(self, schedule_name: str) -> bool: """Check if a schedule with a given name is present in the repository.""" return schedule_name in self.get_schedule_names() @public def get_all_sensors(self) -> Sequence[SensorDefinition]: """Sequence[SensorDefinition]: Return all sensors in the repository as a list.""" return [] @public def get_sensor_names(self) -> Sequence[str]: """Sequence[str]: Get the names of all sensors in the repository.""" return [sensor.name for sensor in self.get_all_sensors()] @public def get_sensor(self, sensor_name: str) -> SensorDefinition: """Get a sensor by name. Args: sensor_name (str): name of the sensor to retrieve. Returns: SensorDefinition: The sensor definition corresponding to the given name. """ sensors_with_name = [ sensor for sensor in self.get_all_sensors() if sensor.name == sensor_name ] if not sensors_with_name: raise DagsterInvariantViolationError( f"Could not find sensor {sensor_name} in repository" ) return sensors_with_name[0] @public def has_sensor(self, sensor_name: str) -> bool: """Check if a sensor with a given name is present in the repository.""" return sensor_name in self.get_sensor_names() @public def get_source_assets_by_key(self) -> Mapping[AssetKey, SourceAsset]: """Mapping[AssetKey, SourceAsset]: Get the source assets for the repository.""" return {} @public def get_assets_defs_by_key(self) -> Mapping[AssetKey, "AssetsDefinition"]: """Mapping[AssetKey, AssetsDefinition]: Get the asset definitions for the repository.""" return {} @public def get_asset_checks_defs_by_key(self) -> Mapping[AssetCheckKey, "AssetChecksDefinition"]: """Mapping[AssetCheckKey, AssetChecksDefinition]: Get the asset checks definitions for the repository.""" return {} def get_component_tree(self) -> Optional["ComponentTree"]: return None def load_all_definitions(self): # force load of all lazy constructed code artifacts self.get_all_jobs() self.get_all_schedules() self.get_all_sensors() self.get_source_assets_by_key()
RepositoryData
python
run-llama__llama_index
llama-index-core/llama_index/core/postprocessor/pii.py
{ "start": 3552, "end": 5323 }
class ____(BaseNodePostprocessor): """ NER PII Node processor. Uses a HF transformers model. """ pii_node_info_key: str = "__pii_node_info__" @classmethod def class_name(cls) -> str: return "NERPIINodePostprocessor" def mask_pii(self, ner: Callable, text: str) -> Tuple[str, Dict]: """Mask PII in text.""" new_text = text response = ner(text) mapping = {} for entry in response: entity_group_tag = f"[{entry['entity_group']}_{entry['start']}]" new_text = new_text.replace(entry["word"], entity_group_tag).strip() mapping[entity_group_tag] = entry["word"] return new_text, mapping def _postprocess_nodes( self, nodes: List[NodeWithScore], query_bundle: Optional[QueryBundle] = None, ) -> List[NodeWithScore]: """Postprocess nodes.""" from transformers import pipeline # pants: no-infer-dep ner = pipeline("ner", grouped_entities=True) # swap out text from nodes, with the original node mappings new_nodes = [] for node_with_score in nodes: node = node_with_score.node new_text, mapping_info = self.mask_pii( ner, node.get_content(metadata_mode=MetadataMode.LLM) ) new_node = deepcopy(node) new_node.excluded_embed_metadata_keys.append(self.pii_node_info_key) new_node.excluded_llm_metadata_keys.append(self.pii_node_info_key) new_node.metadata[self.pii_node_info_key] = mapping_info new_node.set_content(new_text) new_nodes.append(NodeWithScore(node=new_node, score=node_with_score.score)) return new_nodes
NERPIINodePostprocessor
python
numba__numba
numba/core/typing/builtins.py
{ "start": 6006, "end": 6068 }
class ____(BinOp): pass @infer_global(operator.mul)
BinOpSub
python
PyCQA__pylint
tests/test_check_parallel.py
{ "start": 1950, "end": 2700 }
class ____(BaseRawFileChecker): """A checker that does not need to consolidate data across run invocations.""" name = "sequential-checker" test_data = "sequential" msgs = { "R9999": ( "Test", "sequential-test-check", "Some helpful text.", ) } def __init__(self, linter: PyLinter) -> None: super().__init__(linter) self.data: list[str] = [] self.linter = linter def process_module(self, node: nodes.Module) -> None: """Called once per stream/file/astroid object.""" # record the number of invocations with the data object record = self.test_data + str(len(self.data)) self.data.append(record)
SequentialTestChecker
python
django__django
tests/test_utils/tests.py
{ "start": 13087, "end": 15662 }
class ____(TestCase): @classmethod def setUpTestData(cls): cls.person_pk = str(Person.objects.create(name="test").pk) cls.url = f"/test_utils/get_person/{cls.person_pk}/" def test_simple(self): with CaptureQueriesContext(connection) as captured_queries: Person.objects.get(pk=self.person_pk) self.assertEqual(len(captured_queries), 1) self.assertIn(self.person_pk, captured_queries[0]["sql"]) with CaptureQueriesContext(connection) as captured_queries: pass self.assertEqual(0, len(captured_queries)) def test_within(self): with CaptureQueriesContext(connection) as captured_queries: Person.objects.get(pk=self.person_pk) self.assertEqual(len(captured_queries), 1) self.assertIn(self.person_pk, captured_queries[0]["sql"]) def test_nested(self): with CaptureQueriesContext(connection) as captured_queries: Person.objects.count() with CaptureQueriesContext(connection) as nested_captured_queries: Person.objects.count() self.assertEqual(1, len(nested_captured_queries)) self.assertEqual(2, len(captured_queries)) def test_failure(self): with self.assertRaises(TypeError): with CaptureQueriesContext(connection): raise TypeError def test_with_client(self): with CaptureQueriesContext(connection) as captured_queries: self.client.get(self.url) self.assertEqual(len(captured_queries), 1) self.assertIn(self.person_pk, captured_queries[0]["sql"]) with CaptureQueriesContext(connection) as captured_queries: self.client.get(self.url) self.assertEqual(len(captured_queries), 1) self.assertIn(self.person_pk, captured_queries[0]["sql"]) with CaptureQueriesContext(connection) as captured_queries: self.client.get(self.url) self.client.get(self.url) self.assertEqual(len(captured_queries), 2) self.assertIn(self.person_pk, captured_queries[0]["sql"]) self.assertIn(self.person_pk, captured_queries[1]["sql"]) def test_with_client_nested(self): with CaptureQueriesContext(connection) as captured_queries: Person.objects.count() with CaptureQueriesContext(connection): pass self.client.get(self.url) self.assertEqual(2, len(captured_queries)) @override_settings(ROOT_URLCONF="test_utils.urls")
CaptureQueriesContextManagerTests
python
doocs__leetcode
solution/2500-2599/2541.Minimum Operations to Make Array Equal II/Solution.py
{ "start": 0, "end": 425 }
class ____: def minOperations(self, nums1: List[int], nums2: List[int], k: int) -> int: ans = x = 0 for a, b in zip(nums1, nums2): if k == 0: if a != b: return -1 continue if (a - b) % k: return -1 y = (a - b) // k ans += abs(y) x += y return -1 if x else ans // 2
Solution
python
kamyu104__LeetCode-Solutions
Python/number-of-days-between-two-dates.py
{ "start": 819, "end": 1054 }
class ____(object): def daysBetweenDates(self, date1, date2): delta = datetime.datetime.strptime(date1, "%Y-%m-%d") delta -= datetime.datetime.strptime(date2, "%Y-%m-%d") return abs(delta.days)
Solution2
python
pallets__werkzeug
src/werkzeug/middleware/lint.py
{ "start": 3567, "end": 3876 }
class ____: def __init__(self, write: t.Callable[[bytes], object], chunks: list[int]) -> None: self._write = write self._chunks = chunks def __call__(self, s: bytes) -> None: check_type("write()", s, bytes) self._write(s) self._chunks.append(len(s))
GuardedWrite
python
pypa__pip
src/pip/_vendor/packaging/_parser.py
{ "start": 610, "end": 691 }
class ____(Node): def serialize(self) -> str: return str(self)
Variable
python
Lightning-AI__lightning
src/lightning/fabric/utilities/distributed.py
{ "start": 16023, "end": 17311 }
class ____: """A barrier with an infinite timeout. Creates a new process group with the GLOO backend with a very high timeout that makes the barrier effectively wait forever. This is useful in cases where you want to execute a long-running operation on a subset of ranks that should not be subject to the regular collective timeout. """ def __init__(self) -> None: self.group = None self.barrier = lambda: None def __call__(self) -> None: self.barrier() def __enter__(self) -> Self: if _distributed_is_initialized(): # Create a barrier with an 'infinite' timeout (only reliably possible over the GLOO backend) self.group = torch.distributed.new_group(backend="gloo", timeout=timedelta(days=10000)) self.barrier = self.group.monitored_barrier return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: self.barrier() if self.group is not None: torch.distributed.destroy_process_group(self.group) def _is_dtensor(tensor: Tensor) -> TypeGuard["DTensor"]: if _TORCH_GREATER_EQUAL_2_4: from torch.distributed._tensor import DTensor return isinstance(tensor, DTensor) return False
_InfiniteBarrier
python
google__jax
tests/buffer_callback_test.py
{ "start": 831, "end": 6529 }
class ____(jtu.JaxTestCase): def setUp(self): super().setUp() if jtu.test_device_matches(["tpu"]): self.skipTest("Not supported on TPU.") @parameterized.parameters(jtu.dtypes.all) @jtu.run_on_devices("cpu") def test_numpy(self, dtype): def callback(ctx, out, arg): with self.assertRaisesRegex( jax.errors.JaxRuntimeError, "XLA FFI GPU context is not available" ): ctx.stream self.assertEqual(ctx.stage, buffer_callback.ExecutionStage.EXECUTE) self.assertEqual(arg.shape, shape) self.assertEqual(arg.dtype, dtype) self.assertEqual(out.shape, shape) self.assertEqual(out.dtype, dtype) self.assertFalse(arg.writeable) self.assertTrue(out.writeable) x = np.asarray(arg) self.assertArraysEqual(x, data) y = np.asarray(out) self.assertEqual(x.dtype, y.dtype) self.assertEqual(x.shape, y.shape) y[...] = x rng = jtu.rand_default(self.rng()) shape = (3, 4) data = rng(shape, dtype) fun = buffer_callback.buffer_callback( callback, jax.ShapeDtypeStruct(data.shape, data.dtype) ) self.assertArraysEqual(fun(data), data) @parameterized.parameters(jtu.dtypes.all) @jtu.run_on_devices("cpu") def test_dlpack(self, dtype): if dtype == jnp.bfloat16: self.skipTest("Numpy's DLPack implementation does not support bfloat16") def callback(ctx, out, arg): del ctx # unused x = np.from_dlpack(arg) self.assertArraysEqual(x, data) y = np.from_dlpack(out) self.assertEqual(x.dtype, y.dtype) self.assertEqual(x.shape, y.shape) rng = jtu.rand_default(self.rng()) shape = (3, 4) data = rng(shape, dtype) fun = buffer_callback.buffer_callback( callback, jax.ShapeDtypeStruct(data.shape, data.dtype) ) # We can't actually test the output because numpy doesn't support writable # DLPack tensors. jax.block_until_ready(fun(data)) @parameterized.product( dtype=jtu.dtypes.all, command_buffer_compatible=[True, False] ) @jtu.run_on_devices("cuda") def test_cuda_array_interface(self, dtype, command_buffer_compatible): if command_buffer_compatible: self.skipTest("Requires jaxlib extension version of at least 337.") def callback(ctx, out, arg): ctx.stream # doesn't crash self.assertEqual(ctx.stage, buffer_callback.ExecutionStage.EXECUTE) self.assertEqual(arg.shape, shape) self.assertEqual(arg.dtype, dtype) self.assertEqual(out.shape, shape) self.assertEqual(out.dtype, dtype) obj = arg.__cuda_array_interface__ self.assertEqual(obj["shape"], data.shape) self.assertEqual(obj["typestr"], data.dtype.str) obj = out.__cuda_array_interface__ self.assertEqual(obj["shape"], data.shape) self.assertEqual(obj["typestr"], data.dtype.str) rng = jtu.rand_default(self.rng()) shape = (3, 4) data = rng(shape, dtype) fun = buffer_callback.buffer_callback( callback, jax.ShapeDtypeStruct(data.shape, data.dtype), command_buffer_compatible=command_buffer_compatible, ) # TODO: There's an XLA:GPU/CUDA bug that causes a segfault when # instantiating an empty CUDA graph. Once that bug is fixed or worked # around, add a test that checks that the Python callback is only executed # once. jax.block_until_ready(fun(data)) @parameterized.parameters([ "sequential", "sequential_unrolled", "expand_dims", "broadcast_all" ]) @jtu.run_on_devices("cpu") def test_batching(self, vmap_method): def callback(ctx, out, *args): del ctx # unused x = np.asarray(args[0]) y = np.asarray(args[1]) z = np.asarray(out) z[...] = x z[...] += y rng = jtu.rand_default(self.rng()) shape = (3, 4) x = rng(shape, jnp.float32) y = rng(shape, jnp.float32) fun = buffer_callback.buffer_callback( callback, jax.ShapeDtypeStruct(x.shape[1:], x.dtype), vmap_method=vmap_method, ) self.assertArraysEqual(jax.vmap(fun)(x, y), x + y) @jtu.run_on_devices("cpu") def test_input_output_aliases(self): def callback(ctx, out, arg): del ctx # unused x = np.asarray(arg) y = np.asarray(out) self.assertEqual(x.ctypes.data, y.ctypes.data) rng = jtu.rand_default(self.rng()) shape = (3, 4) data = rng(shape, jnp.float32) fun = buffer_callback.buffer_callback( callback, jax.ShapeDtypeStruct(data.shape, data.dtype), input_output_aliases={0: 0}, ) jax.block_until_ready(fun(data)) @jtu.run_on_devices("cpu") def test_buffer_callback_multi_mesh(self): def no_op(*args, **kwargs): pass @jax.jit def f(x, y): z = x * y output_shape = jax.ShapeDtypeStruct(x.shape, x.dtype) buffer_call = buffer_callback.buffer_callback( no_op, output_shape, command_buffer_compatible=True) return buffer_call((z,)) mesh1 = jtu.create_mesh((1, 1), ('a', 'b')) mesh2 = jtu.create_mesh((1, 1), ('x', 'y')) x = jax.device_put( jnp.ones((32, 32)), jax.NamedSharding(mesh1, jax.P('a', 'b'))) y = jax.device_put( jnp.ones((32, 32)), jax.NamedSharding(mesh2, jax.P('x', 'y'))) f(x, y) # doesn't crash def test_side_effect(self): def callback(*_): nonlocal called called = True called = False fun = buffer_callback.buffer_callback( callback, jax.ShapeDtypeStruct((), jnp.float32), has_side_effect=True) jax.block_until_ready(fun()) self.assertTrue(called) if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
BufferCallbackTest
python
ray-project__ray
python/ray/train/v2/_internal/execution/worker_group/worker.py
{ "start": 3699, "end": 8649 }
class ____: def __init__(self): self._callbacks: List[WorkerCallback] = [] def execute(self, fn: Callable[..., T], *fn_args, **fn_kwargs) -> T: return fn(*fn_args, **fn_kwargs) def run_train_fn(self, train_fn_ref: ObjectRefWrapper[Callable[[], None]]): """Run the training function in a separate thread. This function should return immediately, freeing up the main actor thread to perform other tasks such as polling the status. """ try: train_fn = train_fn_ref.get() except Exception as e: logger.error(f"Error deserializing the training function: {e}") raise def train_fn_with_final_checkpoint_flush(): train_fn() get_train_context().checkpoint_upload_threadpool.shutdown() # Create and start the training thread. get_train_context().execution_context.training_thread_runner.run( train_fn_with_final_checkpoint_flush ) def get_metadata(self) -> ActorMetadata: return ActorMetadata( hostname=socket.gethostname(), node_id=ray.get_runtime_context().get_node_id(), node_ip=ray.util.get_node_ip_address(), pid=os.getpid(), accelerator_ids=ray.get_runtime_context().get_accelerator_ids(), ) def poll_status(self) -> WorkerStatus: execution_context = get_train_context().execution_context # TODO: We can implement two phase commit here. # Only mark the task done when the result has been processed by the controller. try: training_report = execution_context.result_queue.get_nowait() execution_context.result_queue.task_done() except queue.Empty: training_report = None error = execution_context.training_thread_runner.get_error() # TODO: The running state should not be conflated with queue flushing. # Running should only be true if the user code is still running. # This relies on `worker_group_status.finished` returning False # until all training results have been flushed. running = execution_context.training_thread_runner.is_running() or bool( training_report ) return WorkerStatus( running=running, error=error, training_report=training_report ) def shutdown(self): """Shutdown the worker. This method is not doing the real shutdown, but it is used by the worker group to signal the worker to stop running the training function. Any shutdown worker callbacks can hook on this method to implement the corresponding shutdown logic. Note that the shutdown logic needs to be thread-safe if it is running in a separate thread. """ for callback in self._callbacks: callback.before_worker_shutdown() def init_train_context( self, train_run_context: TrainRunContext, distributed_context: DistributedContext, synchronization_actor: SynchronizationActor, storage_context: StorageContext, worker_callbacks: List[Union[WorkerCallback, TrainContextCallback]], controller_actor: ActorHandle, dataset_shard_provider: Optional["DatasetShardProvider"] = None, checkpoint: Optional[Checkpoint] = None, ): self._callbacks = [c for c in worker_callbacks if isinstance(c, WorkerCallback)] context_callbacks_to_propagate = [ c for c in worker_callbacks if isinstance(c, TrainContextCallback) ] context = TrainContext( train_run_context=train_run_context, distributed_context=distributed_context, execution_context=ExecutionContext( synchronization_actor=synchronization_actor, # Make the queue size 1 to avoid building up too # many unprocessed results. result_queue=queue.Queue(maxsize=1), training_thread_runner=ThreadRunner(), train_context_callbacks=context_callbacks_to_propagate, ), storage_context=storage_context, controller_actor=controller_actor, checkpoint=checkpoint, dataset_shard_provider=dataset_shard_provider, ) # Configure the train and root logger for the worker processes. if ray_constants.env_bool( ENABLE_WORKER_STRUCTURED_LOGGING_ENV_VAR, DEFAULT_ENABLE_WORKER_LOGGING ): LoggingManager.configure_worker_logger(context) patch_print_function() # Set the train context global variable for the worker. set_train_context(context) # user facing train fn utils set_train_fn_utils(DistributedTrainFnUtils()) for callback in self._callbacks: callback.after_init_train_context()
RayTrainWorker
python
wepe__MachineLearning
Ridge/kernel_ridge/kernel_ridge.py
{ "start": 94, "end": 2611 }
class ____(): """ Simple implementation of a Kernel Ridge Regression using the closed form for training. Doc: https://www.ics.uci.edu/~welling/classnotes/papers_class/Kernel-Ridge.pdf """ def __init__(self, kernel_type='linear', C=1.0, gamma=5.0): """ :param kernel_type: Kernel type to use in training. 'linear' use linear kernel function. 'quadratic' use quadratic kernel function. 'gaussian' use gaussian kernel function :param C: Value of regularization parameter C :param gamma: parameter for gaussian kernel or Polynomial kernel """ self.kernels = { 'linear': self.kernel_linear, 'quadratic': self.kernel_quadratic, 'gaussian': self.kernel_gaussian } self.kernel_type = kernel_type self.kernel = self.kernels[self.kernel_type] self.C = C self.gamma = gamma # Define kernels def kernel_linear(self, x1, x2): return np.dot(x1, x2.T) def kernel_quadratic(self, x1, x2): return (np.dot(x1, x2.T) ** 2) def kernel_gaussian(self, x1, x2, gamma=5.0): gamma = self.gamma return np.exp(-linalg.norm(x1 - x2) ** 2 / (2 * (gamma ** 2))) def compute_kernel_matrix(self, X1, X2): """ compute kernel matrix (gram matrix) give two input matrix """ # sample size n1 = X1.shape[0] n2 = X2.shape[0] # Gram matrix K = np.zeros((n1, n2)) for i in range(n1): for j in range(n2): K[i, j] = self.kernel(X1[i], X2[j]) return K def fit(self, X, y): """ training KRR :param X: training X :param y: training y :return: alpha vector, see document TODO """ K = self.compute_kernel_matrix(X, X) self.alphas = sp.dot(inv(K + self.C * np.eye(np.shape(K)[0])), y.transpose()) return self.alphas def predict(self, x_train, x_test): """ :param x_train: DxNtr array of Ntr train data points with D features :param x_test: DxNte array of Nte test data points with D features :return: y_test, D2xNte array """ k = self.compute_kernel_matrix(x_test, x_train) y_test = sp.dot(k, self.alphas) return y_test.transpose()
KernelRidge
python
walkccc__LeetCode
solutions/710. Random Pick with Blacklist/710.py
{ "start": 0, "end": 590 }
class ____: def __init__(self, n: int, blacklist: list[int]): self.validRange = n - len(blacklist) self.dict = {} maxAvailable = n - 1 for b in blacklist: self.dict[b] = -1 for b in blacklist: if b < self.validRange: # Find the slot that haven't been used. while maxAvailable in self.dict: maxAvailable -= 1 self.dict[b] = maxAvailable maxAvailable -= 1 def pick(self) -> int: value = random.randint(0, self.validRange - 1) if value in self.dict: return self.dict[value] return value
Solution
python
PyCQA__pylint
tests/functional/t/too/too_few_public_methods.py
{ "start": 62, "end": 260 }
class ____: # [too-few-public-methods] def __init__(self): pass def meth1(self): print(self) def _dontcount(self): print(self) # Don't emit for these cases.
Aaaa
python
mitsuhiko__rye
rye-devtools/src/rye_devtools/common.py
{ "start": 626, "end": 2246 }
class ____(NamedTuple): major: int minor: int patch: int @classmethod def from_str(cls, version: str) -> Self: major, minor, patch = version.split(".", 3) return cls(int(major), int(minor), int(patch)) def __str__(self) -> str: return f"{self.major}.{self.minor}.{self.patch}" def __neg__(self) -> Self: return Version(-self.major, -self.minor, -self.patch) async def fetch(client: httpx.AsyncClient, url: str) -> httpx.Response: """Fetch from GitHub API with rate limit awareness.""" resp = await client.get(url, timeout=15) if ( resp.status_code in [403, 429] and resp.headers.get("x-ratelimit-remaining") == "0" ): # See https://docs.github.com/en/rest/using-the-rest-api/troubleshooting-the-rest-api?apiVersion=2022-11-28 if (retry_after := resp.headers.get("retry-after")) is not None: log(f"Got retry-after header, retry in {retry_after} seconds.") time.sleep(int(retry_after)) return await fetch(client, url) if (retry_at := resp.headers.get("x-ratelimit-reset")) is not None: utc = datetime.now(timezone.utc).timestamp() retry_after = max(int(retry_at) - int(utc), 0) log(f"Got x-ratelimit-reset header, retry in {retry_after} seconds.") time.sleep(retry_after) return await fetch(client, url) log("Got rate limited but no information how long, wait for 2 minutes.") time.sleep(60 * 2) return await fetch(client, url) resp.raise_for_status() return resp
Version
python
sympy__sympy
sympy/plotting/series.py
{ "start": 57667, "end": 61824 }
class ____(Line2DBaseSeries): is_parametric = True def _set_parametric_line_label(self, label): """Logic to set the correct label to be shown on the plot. If `use_cm=True` there will be a colorbar, so we show the parameter. If `use_cm=False`, there might be a legend, so we show the expressions. Parameters ========== label : str label passed in by the pre-processor or the user """ self._label = str(self.var) if label is None else label self._latex_label = latex(self.var) if label is None else label if (self.use_cm is False) and (self._label == str(self.var)): self._label = str(self.expr) self._latex_label = latex(self.expr) # if the expressions is a lambda function and use_cm=False and no label # has been provided, then its better to do the following in order to # avoid surprises on the backend if any(callable(e) for e in self.expr) and (not self.use_cm): if self._label == str(self.expr): self._label = "" def get_label(self, use_latex=False, wrapper="$%s$"): # parametric lines returns the representation of the parameter to be # shown on the colorbar if `use_cm=True`, otherwise it returns the # representation of the expression to be placed on the legend. if self.use_cm: if str(self.var) == self._label: if use_latex: return self._get_wrapped_label(latex(self.var), wrapper) return str(self.var) # here the user has provided a custom label return self._label if use_latex: if self._label != str(self.expr): return self._latex_label return self._get_wrapped_label(self._latex_label, wrapper) return self._label def _get_data_helper(self): """Returns coordinates that needs to be postprocessed. Depending on the `adaptive` option, this function will either use an adaptive algorithm or it will uniformly sample the expression over the provided range. """ if self.adaptive: np = import_module("numpy") coords = self._adaptive_sampling() coords = [np.array(t) for t in coords] else: coords = self._uniform_sampling() if self.is_2Dline and self.is_polar: # when plot_polar is executed with polar_axis=True np = import_module('numpy') x, y, _ = coords r = np.sqrt(x**2 + y**2) t = np.arctan2(y, x) coords = [t, r, coords[-1]] if callable(self.color_func): coords = list(coords) coords[-1] = self.eval_color_func(*coords) return coords def _uniform_sampling(self): """Returns coordinates that needs to be postprocessed.""" np = import_module('numpy') results = self._evaluate() for i, r in enumerate(results): _re, _im = np.real(r), np.imag(r) _re[np.invert(np.isclose(_im, np.zeros_like(_im)))] = np.nan results[i] = _re return [*results[1:], results[0]] def get_parameter_points(self): return self.get_data()[-1] def get_points(self): """ Return lists of coordinates for plotting. Depending on the ``adaptive`` option, this function will either use an adaptive algorithm or it will uniformly sample the expression over the provided range. This function is available for back-compatibility purposes. Consider using ``get_data()`` instead. Returns ======= x : list List of x-coordinates y : list List of y-coordinates z : list List of z-coordinates, only for 3D parametric line plot. """ return self._get_data_helper()[:-1] @property def nb_of_points(self): return self.n[0] @nb_of_points.setter def nb_of_points(self, v): self.n = v
ParametricLineBaseSeries
python
PrefectHQ__prefect
tests/server/models/test_variables.py
{ "start": 1411, "end": 3085 }
class ____: async def test_create_variable( self, session, ): current_time = now("UTC") variable = VariableCreate( name="my_variable", value="my-value", tags=["123", "456"] ) model = await create_variable(session, variable) await session.commit() assert model assert model.id assert model.created and model.created > current_time assert model.updated and model.updated > current_time assert model.name == variable.name assert model.value == variable.value assert model.tags == variable.tags @pytest.mark.parametrize( "value", [ "string-value", '"string-value"', 123, 12.3, True, False, None, {"key": "value"}, ["value1", "value2"], {"key": ["value1", "value2"]}, ], ) async def test_create_variable_json_types(self, session, value): variable = VariableCreate(name="my_variable", value=value, tags=["123", "456"]) model = await create_variable(session, variable) await session.commit() assert model assert model.id assert model.value == variable.value == value async def test_create_variable_name_unique( self, session, variable, ): with pytest.raises(sa.exc.IntegrityError): await create_variable( session, VariableCreate( name="my_variable", value="my-value", tags=["123", "456"] ), )
TestCreateVariable
python
numba__llvmlite
setup.py
{ "start": 2492, "end": 2908 }
class ____(build_ext): def run(self): build_ext.run(self) build_library_files(self.dry_run) # HACK: this makes sure the library file (which is large) is only # included in binary builds, not source builds. from llvmlite.utils import get_library_files self.distribution.package_data = { "llvmlite.binding": get_library_files(), }
LlvmliteBuildExt
python
aimacode__aima-python
csp.py
{ "start": 37146, "end": 42234 }
class ____: """Solves a CSP with arc consistency and domain splitting""" def __init__(self, csp): """a CSP solver that uses arc consistency * csp is the CSP to be solved """ self.csp = csp def GAC(self, orig_domains=None, to_do=None, arc_heuristic=sat_up): """ Makes this CSP arc-consistent using Generalized Arc Consistency orig_domains: is the original domains to_do : is a set of (variable,constraint) pairs returns the reduced domains (an arc-consistent variable:domain dictionary) """ if orig_domains is None: orig_domains = self.csp.domains if to_do is None: to_do = {(var, const) for const in self.csp.constraints for var in const.scope} else: to_do = to_do.copy() domains = orig_domains.copy() to_do = arc_heuristic(to_do) checks = 0 while to_do: var, const = to_do.pop() other_vars = [ov for ov in const.scope if ov != var] new_domain = set() if len(other_vars) == 0: for val in domains[var]: if const.holds({var: val}): new_domain.add(val) checks += 1 # new_domain = {val for val in domains[var] # if const.holds({var: val})} elif len(other_vars) == 1: other = other_vars[0] for val in domains[var]: for other_val in domains[other]: checks += 1 if const.holds({var: val, other: other_val}): new_domain.add(val) break # new_domain = {val for val in domains[var] # if any(const.holds({var: val, other: other_val}) # for other_val in domains[other])} else: # general case for val in domains[var]: holds, checks = self.any_holds(domains, const, {var: val}, other_vars, checks=checks) if holds: new_domain.add(val) # new_domain = {val for val in domains[var] # if self.any_holds(domains, const, {var: val}, other_vars)} if new_domain != domains[var]: domains[var] = new_domain if not new_domain: return False, domains, checks add_to_do = self.new_to_do(var, const).difference(to_do) to_do |= add_to_do return True, domains, checks def new_to_do(self, var, const): """ Returns new elements to be added to to_do after assigning variable var in constraint const. """ return {(nvar, nconst) for nconst in self.csp.var_to_const[var] if nconst != const for nvar in nconst.scope if nvar != var} def any_holds(self, domains, const, env, other_vars, ind=0, checks=0): """ Returns True if Constraint const holds for an assignment that extends env with the variables in other_vars[ind:] env is a dictionary Warning: this has side effects and changes the elements of env """ if ind == len(other_vars): return const.holds(env), checks + 1 else: var = other_vars[ind] for val in domains[var]: # env = dict_union(env, {var:val}) # no side effects env[var] = val holds, checks = self.any_holds(domains, const, env, other_vars, ind + 1, checks) if holds: return True, checks return False, checks def domain_splitting(self, domains=None, to_do=None, arc_heuristic=sat_up): """ Return a solution to the current CSP or False if there are no solutions to_do is the list of arcs to check """ if domains is None: domains = self.csp.domains consistency, new_domains, _ = self.GAC(domains, to_do, arc_heuristic) if not consistency: return False elif all(len(new_domains[var]) == 1 for var in domains): return {var: first(new_domains[var]) for var in domains} else: var = first(x for x in self.csp.variables if len(new_domains[x]) > 1) if var: dom1, dom2 = partition_domain(new_domains[var]) new_doms1 = extend(new_domains, var, dom1) new_doms2 = extend(new_domains, var, dom2) to_do = self.new_to_do(var, None) return self.domain_splitting(new_doms1, to_do, arc_heuristic) or \ self.domain_splitting(new_doms2, to_do, arc_heuristic) def partition_domain(dom): """Partitions domain dom into two""" split = len(dom) // 2 dom1 = set(list(dom)[:split]) dom2 = dom - dom1 return dom1, dom2
ACSolver
python
pandas-dev__pandas
pandas/tests/generic/test_duplicate_labels.py
{ "start": 7471, "end": 13580 }
class ____: @pytest.mark.parametrize( "cls, axes", [ (pd.Series, {"index": ["a", "a"], "dtype": float}), (pd.DataFrame, {"index": ["a", "a"]}), (pd.DataFrame, {"index": ["a", "a"], "columns": ["b", "b"]}), (pd.DataFrame, {"columns": ["b", "b"]}), ], ) def test_set_flags_with_duplicates(self, cls, axes): result = cls(**axes) assert result.flags.allows_duplicate_labels is True msg = "Index has duplicates." with pytest.raises(pd.errors.DuplicateLabelError, match=msg): cls(**axes).set_flags(allows_duplicate_labels=False) @pytest.mark.parametrize( "data", [ pd.Series(index=[0, 0], dtype=float), pd.DataFrame(index=[0, 0]), pd.DataFrame(columns=[0, 0]), ], ) def test_setting_allows_duplicate_labels_raises(self, data): msg = "Index has duplicates." with pytest.raises(pd.errors.DuplicateLabelError, match=msg): data.flags.allows_duplicate_labels = False assert data.flags.allows_duplicate_labels is True def test_series_raises(self): a = pd.Series(0, index=["a", "b"]) b = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False) msg = "Index has duplicates." with pytest.raises(pd.errors.DuplicateLabelError, match=msg): pd.concat([a, b]) @pytest.mark.parametrize( "getter, target", [ (operator.itemgetter(["A", "A"]), None), # loc (operator.itemgetter(["a", "a"]), "loc"), pytest.param(operator.itemgetter(("a", ["A", "A"])), "loc"), (operator.itemgetter((["a", "a"], "A")), "loc"), # iloc (operator.itemgetter([0, 0]), "iloc"), pytest.param(operator.itemgetter((0, [0, 0])), "iloc"), pytest.param(operator.itemgetter(([0, 0], 0)), "iloc"), ], ) def test_getitem_raises(self, getter, target): df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags( allows_duplicate_labels=False ) if target: # df, df.loc, or df.iloc target = getattr(df, target) else: target = df msg = "Index has duplicates." with pytest.raises(pd.errors.DuplicateLabelError, match=msg): getter(target) def test_concat_raises(self): objs = [ pd.Series(1, index=[0, 1], name="a"), pd.Series(2, index=[0, 1], name="a"), ] objs = [x.set_flags(allows_duplicate_labels=False) for x in objs] msg = "Index has duplicates." with pytest.raises(pd.errors.DuplicateLabelError, match=msg): pd.concat(objs, axis=1) def test_merge_raises(self): a = pd.DataFrame({"A": [0, 1, 2]}, index=["a", "b", "c"]).set_flags( allows_duplicate_labels=False ) b = pd.DataFrame({"B": [0, 1, 2]}, index=["a", "b", "b"]) msg = "Index has duplicates." with pytest.raises(pd.errors.DuplicateLabelError, match=msg): pd.merge(a, b, left_index=True, right_index=True) @pytest.mark.parametrize( "idx", [ pd.Index([1, 1]), pd.Index(["a", "a"]), pd.Index([1.1, 1.1]), pd.PeriodIndex([pd.Period("2000", "D")] * 2), pd.DatetimeIndex([pd.Timestamp("2000")] * 2), pd.TimedeltaIndex([pd.Timedelta("1D")] * 2), pd.CategoricalIndex(["a", "a"]), pd.IntervalIndex([pd.Interval(0, 1)] * 2), pd.MultiIndex.from_tuples([("a", 1), ("a", 1)]), ], ids=lambda x: type(x).__name__, ) def test_raises_basic(idx): msg = "Index has duplicates." with pytest.raises(pd.errors.DuplicateLabelError, match=msg): pd.Series(1, index=idx).set_flags(allows_duplicate_labels=False) with pytest.raises(pd.errors.DuplicateLabelError, match=msg): pd.DataFrame({"A": [1, 1]}, index=idx).set_flags(allows_duplicate_labels=False) with pytest.raises(pd.errors.DuplicateLabelError, match=msg): pd.DataFrame([[1, 2]], columns=idx).set_flags(allows_duplicate_labels=False) def test_format_duplicate_labels_message(): idx = pd.Index(["a", "b", "a", "b", "c"]) result = idx._format_duplicate_message() expected = pd.DataFrame( {"positions": [[0, 2], [1, 3]]}, index=pd.Index(["a", "b"], name="label") ) tm.assert_frame_equal(result, expected) def test_format_duplicate_labels_message_multi(): idx = pd.MultiIndex.from_product([["A"], ["a", "b", "a", "b", "c"]]) result = idx._format_duplicate_message() expected = pd.DataFrame( {"positions": [[0, 2], [1, 3]]}, index=pd.MultiIndex.from_product([["A"], ["a", "b"]]), ) tm.assert_frame_equal(result, expected) def test_dataframe_insert_raises(): df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False) msg = "Cannot specify" with pytest.raises(ValueError, match=msg): df.insert(0, "A", [3, 4], allow_duplicates=True) @pytest.mark.parametrize( "method, frame_only", [ (operator.methodcaller("set_index", "A", inplace=True), True), (operator.methodcaller("reset_index", inplace=True), True), (operator.methodcaller("rename", lambda x: x, inplace=True), False), ], ) def test_inplace_raises(method, frame_only): df = pd.DataFrame({"A": [0, 0], "B": [1, 2]}).set_flags( allows_duplicate_labels=False ) s = df["A"] s.flags.allows_duplicate_labels = False msg = "Cannot specify" with pytest.raises(ValueError, match=msg): method(df) if not frame_only: with pytest.raises(ValueError, match=msg): method(s) def test_pickle(): a = pd.Series([1, 2]).set_flags(allows_duplicate_labels=False) b = tm.round_trip_pickle(a) tm.assert_series_equal(a, b) a = pd.DataFrame({"A": []}).set_flags(allows_duplicate_labels=False) b = tm.round_trip_pickle(a) tm.assert_frame_equal(a, b)
TestRaises
python
Lightning-AI__lightning
tests/tests_pytorch/trainer/optimization/test_manual_optimization.py
{ "start": 20030, "end": 25093 }
class ____(BoringModel): def __init__(self): super().__init__() self.automatic_optimization = False def loss_ones(self, batch, prediction): # An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction)) def loss_zeros(self, batch, prediction): # An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls return torch.nn.functional.mse_loss(prediction, torch.zeros_like(prediction)) def manual_sync_grad(self) -> bool: torch_distrib.all_reduce(self.layer.weight.grad.data, async_op=False) return True def training_step(self, batch, batch_idx): # emulate gans training opt_gen, opt_dis = self.optimizers() # Note: Be careful, don't log on the same key in self.log in both closure # as they will be aggregated together on epoch_end world_size = torch_distrib.get_world_size(torch_distrib.group.WORLD) assert world_size == 2 make_gen_optimizer_step = batch_idx % 2 == 1 make_dis_optimizer_step = batch_idx % 4 == 0 def compute_loss(): x = batch[0] x = F.dropout(x, 0.1) predictions = self(x) predictions = F.dropout(predictions, 0.1) loss_ones = self.loss_ones(None, predictions) loss_zeros = self.loss_zeros(None, predictions) return loss_ones, loss_zeros def make_manual_backward(loss, retain_graph=False, make_optimizer_step=True): self.manual_backward(loss, retain_graph=retain_graph) if make_optimizer_step: grad_clone = self.layer.weight.grad.clone() assert self.manual_sync_grad() self.layer.weight.grad /= world_size assert torch.equal(self.layer.weight.grad, grad_clone) def gen_closure(): loss_ones_gen, _ = compute_loss() make_manual_backward(loss_ones_gen, retain_graph=True, make_optimizer_step=make_gen_optimizer_step) make_manual_backward(loss_ones_gen, make_optimizer_step=make_gen_optimizer_step) def dis_closure(): loss_ones_gen, _ = compute_loss() make_manual_backward(loss_ones_gen, retain_graph=True, make_optimizer_step=make_dis_optimizer_step) make_manual_backward(loss_ones_gen, make_optimizer_step=make_dis_optimizer_step) # this will accumulate gradients for 2 batches and then call opt_gen.step() if make_gen_optimizer_step: opt_gen.step(closure=gen_closure) opt_gen.zero_grad() # update discriminator every 4 baches # therefore, no gradient accumulation for discriminator if make_dis_optimizer_step: opt_dis.step(closure=dis_closure) def configure_optimizers(self): optimizer_gen = torch.optim.SGD(self.layer.parameters(), lr=0.1) optimizer_dis = torch.optim.Adam(self.layer.parameters(), lr=0.001) return [optimizer_gen, optimizer_dis] def on_train_start(self): # this is done here instead of in the calling function due to `spawn` sgd, adam = self.optimizers() self.sgd_step_patch = patch.object(sgd, "step", wraps=sgd.step) self.sgd_step_mock = self.sgd_step_patch.start() self.adam_step_patch = patch.object(adam, "step", wraps=adam.step) self.adam_step_mock = self.adam_step_patch.start() def on_train_end(self): self.sgd_step_patch.stop() assert self.sgd_step_mock.call_count == 4 self.adam_step_patch.stop() assert self.adam_step_mock.call_count == 2 def train_manual_optimization(tmp_path, strategy, model_cls=TesManualOptimizationDDPModel): seed_everything(42) model = model_cls() model_copy = deepcopy(model) model.val_dataloader = None limit_train_batches = 8 trainer = Trainer( default_root_dir=tmp_path, limit_train_batches=limit_train_batches, limit_val_batches=2, max_epochs=1, log_every_n_steps=1, accelerator="gpu", devices=2, strategy=strategy, enable_progress_bar=False, enable_model_summary=False, ) trainer.fit(model) for param, param_copy in zip(model.parameters(), model_copy.parameters()): assert not torch.equal(param.cpu().data, param_copy.data) @RunIf(min_cuda_gpus=2, standalone=True) def test_step_with_optimizer_closure_with_different_frequencies_ddp(tmp_path): """Tests that `step` works with optimizer_closure and different accumulated_gradient frequency.""" train_manual_optimization(tmp_path, "ddp") @RunIf(min_cuda_gpus=2) def test_step_with_optimizer_closure_with_different_frequencies_ddp_spawn(tmp_path): """Tests that `step` works with optimizer_closure and different accumulated_gradient frequency.""" train_manual_optimization(tmp_path, "ddp_spawn")
TesManualOptimizationDDPModel
python
numpy__numpy
numpy/_core/tests/test_cpu_features.py
{ "start": 1671, "end": 3923 }
class ____: features = [] features_groups = {} features_map = {} features_flags = set() def load_flags(self): # a hook pass def test_features(self): self.load_flags() for gname, features in self.features_groups.items(): test_features = [self.cpu_have(f) for f in features] assert_features_equal(__cpu_features__.get(gname), all(test_features), gname) for feature_name in self.features: cpu_have = self.cpu_have(feature_name) npy_have = __cpu_features__.get(feature_name) assert_features_equal(npy_have, cpu_have, feature_name) def cpu_have(self, feature_name): map_names = self.features_map.get(feature_name, feature_name) if isinstance(map_names, str): return map_names in self.features_flags return any(f in self.features_flags for f in map_names) def load_flags_cpuinfo(self, magic_key): self.features_flags = self.get_cpuinfo_item(magic_key) def get_cpuinfo_item(self, magic_key): values = set() with open('/proc/cpuinfo') as fd: for line in fd: if not line.startswith(magic_key): continue flags_value = [s.strip() for s in line.split(':', 1)] if len(flags_value) == 2: values = values.union(flags_value[1].upper().split()) return values def load_flags_auxv(self): auxv = subprocess.check_output(['/bin/true'], env={"LD_SHOW_AUXV": "1"}) for at in auxv.split(b'\n'): if not at.startswith(b"AT_HWCAP"): continue hwcap_value = [s.strip() for s in at.split(b':', 1)] if len(hwcap_value) == 2: self.features_flags = self.features_flags.union( hwcap_value[1].upper().decode().split() ) @pytest.mark.skipif( sys.platform == 'emscripten', reason=( "The subprocess module is not available on WASM platforms and" " therefore this test class cannot be properly executed." ), ) @pytest.mark.thread_unsafe(reason="setup & tmp_path_factory threads-unsafe, modifies environment variables")
AbstractTest
python
astropy__astropy
astropy/nddata/utils.py
{ "start": 16358, "end": 34739 }
class ____: """ Create a cutout object from a 2D array. The returned object will contain a 2D cutout array. If ``copy=False`` (default), the cutout array is a view into the original ``data`` array, otherwise the cutout array will contain a copy of the original data. If a `~astropy.wcs.WCS` object is input, then the returned object will also contain a copy of the original WCS, but updated for the cutout array. For example usage, see :ref:`astropy:cutout_images`. .. warning:: The cutout WCS object does not currently handle cases where the input WCS object contains distortion lookup tables described in the `FITS WCS distortion paper <https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__. Parameters ---------- data : ndarray The 2D data array from which to extract the cutout array. position : tuple or `~astropy.coordinates.SkyCoord` The position of the cutout array's center with respect to the ``data`` array. The position can be specified either as a ``(x, y)`` tuple of pixel coordinates or a `~astropy.coordinates.SkyCoord`, in which case ``wcs`` is a required input. size : int, array-like, or `~astropy.units.Quantity` The size of the cutout array along each axis. If ``size`` is a scalar number or a scalar `~astropy.units.Quantity`, then a square cutout of ``size`` will be created. If ``size`` has two elements, they should be in ``(ny, nx)`` order. Scalar numbers in ``size`` are assumed to be in units of pixels. ``size`` can also be a `~astropy.units.Quantity` object or contain `~astropy.units.Quantity` objects. Such `~astropy.units.Quantity` objects must be in pixel or angular units. For all cases, ``size`` will be converted to an integer number of pixels, rounding the nearest integer. See the ``mode`` keyword for additional details on the final cutout size. .. note:: If ``size`` is in angular units, the cutout size is converted to pixels using the pixel scales along each axis of the image at the ``CRPIX`` location. Projection and other non-linear distortions are not taken into account. wcs : `~astropy.wcs.WCS`, optional A WCS object associated with the input ``data`` array. If ``wcs`` is not `None`, then the returned cutout object will contain a copy of the updated WCS for the cutout data array. mode : {'trim', 'partial', 'strict'}, optional The mode used for creating the cutout data array. For the ``'partial'`` and ``'trim'`` modes, a partial overlap of the cutout array and the input ``data`` array is sufficient. For the ``'strict'`` mode, the cutout array has to be fully contained within the ``data`` array, otherwise an `~astropy.nddata.utils.PartialOverlapError` is raised. In all modes, non-overlapping arrays will raise a `~astropy.nddata.utils.NoOverlapError`. In ``'partial'`` mode, positions in the cutout array that do not overlap with the ``data`` array will be filled with ``fill_value``. In ``'trim'`` mode only the overlapping elements are returned, thus the resulting cutout array may be smaller than the requested ``shape``. fill_value : float or int, optional If ``mode='partial'``, the value to fill pixels in the cutout array that do not overlap with the input ``data``. ``fill_value`` must have the same ``dtype`` as the input ``data`` array. copy : bool, optional If `False` (default), then the cutout data will be a view into the original ``data`` array. If `True`, then the cutout data will hold a copy of the original ``data`` array. limit_rounding_method : callable The rounding method when calculating the minimum and maximum pixel indices. This must be a callable function. Examples: `~numpy.ceil`, `~numpy.floor`, `~numpy.round`. Default is `~numpy.ceil`. Attributes ---------- data : 2D `~numpy.ndarray` The 2D cutout array. shape : (2,) tuple The ``(ny, nx)`` shape of the cutout array. shape_input : (2,) tuple The ``(ny, nx)`` shape of the input (original) array. input_position_cutout : (2,) tuple The (unrounded) ``(x, y)`` position with respect to the cutout array. input_position_original : (2,) tuple The original (unrounded) ``(x, y)`` input position (with respect to the original array). slices_original : (2,) tuple of slice object A tuple of slice objects for the minimal bounding box of the cutout with respect to the original array. For ``mode='partial'``, the slices are for the valid (non-filled) cutout values. slices_cutout : (2,) tuple of slice object A tuple of slice objects for the minimal bounding box of the cutout with respect to the cutout array. For ``mode='partial'``, the slices are for the valid (non-filled) cutout values. xmin_original, ymin_original, xmax_original, ymax_original : float The minimum and maximum ``x`` and ``y`` indices of the minimal rectangular region of the cutout array with respect to the original array. For ``mode='partial'``, the bounding box indices are for the valid (non-filled) cutout values. These values are the same as those in `bbox_original`. xmin_cutout, ymin_cutout, xmax_cutout, ymax_cutout : float The minimum and maximum ``x`` and ``y`` indices of the minimal rectangular region of the cutout array with respect to the cutout array. For ``mode='partial'``, the bounding box indices are for the valid (non-filled) cutout values. These values are the same as those in `bbox_cutout`. wcs : `~astropy.wcs.WCS` or None A WCS object associated with the cutout array if a ``wcs`` was input. Examples -------- >>> import numpy as np >>> from astropy.nddata.utils import Cutout2D >>> from astropy import units as u >>> data = np.arange(20.).reshape(5, 4) >>> cutout1 = Cutout2D(data, (2, 2), (3, 3)) >>> print(cutout1.data) # doctest: +FLOAT_CMP [[ 5. 6. 7.] [ 9. 10. 11.] [13. 14. 15.]] >>> print(cutout1.center_original) (2.0, 2.0) >>> print(cutout1.center_cutout) (1.0, 1.0) >>> print(cutout1.origin_original) (1, 1) >>> cutout2 = Cutout2D(data, (2, 2), 3) >>> print(cutout2.data) # doctest: +FLOAT_CMP [[ 5. 6. 7.] [ 9. 10. 11.] [13. 14. 15.]] >>> size = u.Quantity([3, 3], u.pixel) >>> cutout3 = Cutout2D(data, (0, 0), size) >>> print(cutout3.data) # doctest: +FLOAT_CMP [[0. 1.] [4. 5.]] >>> cutout4 = Cutout2D(data, (0, 0), (3 * u.pixel, 3)) >>> print(cutout4.data) # doctest: +FLOAT_CMP [[0. 1.] [4. 5.]] >>> cutout5 = Cutout2D(data, (0, 0), (3, 3), mode='partial') >>> print(cutout5.data) # doctest: +FLOAT_CMP [[nan nan nan] [nan 0. 1.] [nan 4. 5.]] """ def __init__( self, data, position, size, wcs=None, mode="trim", fill_value=np.nan, copy=False, *, limit_rounding_method: LimitRoundingMethod = np.ceil, ): if wcs is None: wcs = getattr(data, "wcs", None) if isinstance(position, SkyCoord): if wcs is None: raise ValueError("wcs must be input if position is a SkyCoord") position = skycoord_to_pixel(position, wcs, mode="all") # (x, y) if np.isscalar(size): size = np.repeat(size, 2) # special handling for a scalar Quantity if isinstance(size, u.Quantity): size = np.atleast_1d(size) if len(size) == 1: size = np.repeat(size, 2) if len(size) > 2: raise ValueError("size must have at most two elements") shape = np.zeros(2).astype(int) pixel_scales = None # ``size`` can have a mixture of int and Quantity (and even units), # so evaluate each axis separately for axis, side in enumerate(size): if not isinstance(side, u.Quantity): shape[axis] = int(np.round(side)) # pixels else: if side.unit == u.pixel: shape[axis] = int(np.round(side.value)) elif side.unit.physical_type == "angle": if wcs is None: raise ValueError( "wcs must be input if any element of size has angular units" ) if pixel_scales is None: pixel_scales = u.Quantity( proj_plane_pixel_scales(wcs), wcs.wcs.cunit[axis] ) shape[axis] = int(np.round((side / pixel_scales[axis]).decompose())) else: raise ValueError( "shape can contain Quantities with only pixel or angular units" ) if not isinstance( data, (Section, CompImageSection) ): # Accept lazy-loaded image sections data = np.asanyarray(data) # reverse position because extract_array and overlap_slices # use (y, x), but keep the input position pos_yx = position[::-1] cutout_data, input_position_cutout = extract_array( data, tuple(shape), pos_yx, mode=mode, fill_value=fill_value, return_position=True, limit_rounding_method=limit_rounding_method, ) if copy: cutout_data = np.copy(cutout_data) self.data = cutout_data self.input_position_cutout = input_position_cutout[::-1] # (x, y) slices_original, slices_cutout = overlap_slices( data.shape, shape, pos_yx, mode=mode, limit_rounding_method=limit_rounding_method, ) self.slices_original = slices_original self.slices_cutout = slices_cutout self.shape = self.data.shape self.input_position_original = position self.shape_input = shape ( (self.ymin_original, self.ymax_original), (self.xmin_original, self.xmax_original), ) = self.bbox_original ( (self.ymin_cutout, self.ymax_cutout), (self.xmin_cutout, self.xmax_cutout), ) = self.bbox_cutout # the true origin pixel of the cutout array, including any # filled cutout values self._origin_original_true = ( self.origin_original[0] - self.slices_cutout[1].start, self.origin_original[1] - self.slices_cutout[0].start, ) if wcs is not None: self.wcs = deepcopy(wcs) self.wcs.wcs.crpix -= self._origin_original_true self.wcs.array_shape = self.data.shape if wcs.sip is not None: self.wcs.sip = Sip( wcs.sip.a, wcs.sip.b, wcs.sip.ap, wcs.sip.bp, wcs.sip.crpix - self._origin_original_true, ) else: self.wcs = None def to_original_position(self, cutout_position): """ Convert an ``(x, y)`` position in the cutout array to the original ``(x, y)`` position in the original large array. Parameters ---------- cutout_position : tuple The ``(x, y)`` pixel position in the cutout array. Returns ------- original_position : tuple The corresponding ``(x, y)`` pixel position in the original large array. See Also -------- to_cutout_position """ return tuple(cutout_position[i] + self.origin_original[i] for i in [0, 1]) def to_cutout_position(self, original_position): """ Convert an ``(x, y)`` position in the original large array to the ``(x, y)`` position in the cutout array. Parameters ---------- original_position : tuple The ``(x, y)`` pixel position in the original large array. Returns ------- cutout_position : tuple The corresponding ``(x, y)`` pixel position in the cutout array. See Also -------- to_original_position """ return tuple(original_position[i] - self.origin_original[i] for i in [0, 1]) def plot_on_original(self, ax=None, fill=False, **kwargs): """ Plot the cutout region on a matplotlib Axes instance. Parameters ---------- ax : `matplotlib.axes.Axes` instance, optional If `None`, then the current `matplotlib.axes.Axes` instance is used. fill : bool, optional Set whether to fill the cutout patch. The default is `False`. kwargs : optional Any keyword arguments accepted by `matplotlib.patches.Patch`. Returns ------- ax : `matplotlib.axes.Axes` instance The matplotlib Axes instance constructed in the method if ``ax=None``. Otherwise the output ``ax`` is the same as the input ``ax``. """ import matplotlib.patches as mpatches import matplotlib.pyplot as plt kwargs["fill"] = fill if ax is None: ax = plt.gca() height, width = self.shape hw, hh = width / 2.0, height / 2.0 pos_xy = self.position_original - np.array([hw, hh]) patch = mpatches.Rectangle(pos_xy, width, height, angle=0.0, **kwargs) ax.add_patch(patch) return ax @staticmethod def _calc_center(slices): """ Calculate the center position. The center position will be fractional for even-sized arrays. For ``mode='partial'``, the central position is calculated for the valid (non-filled) cutout values. """ return tuple(0.5 * (slices[i].start + slices[i].stop - 1) for i in [1, 0]) @staticmethod def _calc_bbox(slices): """ Calculate a minimal bounding box in the form ``((ymin, ymax), (xmin, xmax))``. Note these are pixel locations, not slice indices. For ``mode='partial'``, the bounding box indices are for the valid (non-filled) cutout values. """ # (stop - 1) to return the max pixel location, not the slice index return ( (slices[0].start, slices[0].stop - 1), (slices[1].start, slices[1].stop - 1), ) @functools.cached_property def origin_original(self): """ The ``(x, y)`` index of the origin pixel of the cutout with respect to the original array. For ``mode='partial'``, the origin pixel is calculated for the valid (non-filled) cutout values. """ return (self.slices_original[1].start, self.slices_original[0].start) @functools.cached_property def origin_cutout(self): """ The ``(x, y)`` index of the origin pixel of the cutout with respect to the cutout array. For ``mode='partial'``, the origin pixel is calculated for the valid (non-filled) cutout values. """ return (self.slices_cutout[1].start, self.slices_cutout[0].start) @staticmethod def _round(a): """ Round the input to the nearest integer. If two integers are equally close, the value is rounded up. Note that this is different from `np.round`, which rounds to the nearest even number. """ return int(np.floor(a + 0.5)) @functools.cached_property def position_original(self): """ The ``(x, y)`` position index (rounded to the nearest pixel) in the original array. """ return ( self._round(self.input_position_original[0]), self._round(self.input_position_original[1]), ) @functools.cached_property def position_cutout(self): """ The ``(x, y)`` position index (rounded to the nearest pixel) in the cutout array. """ return ( self._round(self.input_position_cutout[0]), self._round(self.input_position_cutout[1]), ) @functools.cached_property def center_original(self): """ The central ``(x, y)`` position of the cutout array with respect to the original array. For ``mode='partial'``, the central position is calculated for the valid (non-filled) cutout values. """ return self._calc_center(self.slices_original) @functools.cached_property def center_cutout(self): """ The central ``(x, y)`` position of the cutout array with respect to the cutout array. For ``mode='partial'``, the central position is calculated for the valid (non-filled) cutout values. """ return self._calc_center(self.slices_cutout) @functools.cached_property def bbox_original(self): """ The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal rectangular region of the cutout array with respect to the original array. For ``mode='partial'``, the bounding box indices are for the valid (non-filled) cutout values. """ return self._calc_bbox(self.slices_original) @functools.cached_property def bbox_cutout(self): """ The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal rectangular region of the cutout array with respect to the cutout array. For ``mode='partial'``, the bounding box indices are for the valid (non-filled) cutout values. """ return self._calc_bbox(self.slices_cutout)
Cutout2D
python
PyCQA__pylint
tests/functional/n/name/name_preset_snake_case.py
{ "start": 607, "end": 702 }
class ____(Enum): # [invalid-name] const_with_snake_case = 42 another_const = 43
FooEnum
python
tensorflow__tensorflow
tensorflow/python/autograph/pyct/testing/codegen.py
{ "start": 1905, "end": 1996 }
class ____(NodeSampler): sample_map = dict(((gast.USub, 1), (gast.UAdd, 0)))
UnaryOpSampler
python
langchain-ai__langchain
libs/core/langchain_core/output_parsers/pydantic.py
{ "start": 433, "end": 4464 }
class ____(JsonOutputParser, Generic[TBaseModel]): """Parse an output using a Pydantic model.""" pydantic_object: Annotated[type[TBaseModel], SkipValidation()] """The Pydantic model to parse.""" def _parse_obj(self, obj: dict) -> TBaseModel: try: if issubclass(self.pydantic_object, pydantic.BaseModel): return self.pydantic_object.model_validate(obj) if issubclass(self.pydantic_object, pydantic.v1.BaseModel): return self.pydantic_object.parse_obj(obj) msg = f"Unsupported model version for PydanticOutputParser: \ {self.pydantic_object.__class__}" raise OutputParserException(msg) except (pydantic.ValidationError, pydantic.v1.ValidationError) as e: raise self._parser_exception(e, obj) from e def _parser_exception( self, e: Exception, json_object: dict ) -> OutputParserException: json_string = json.dumps(json_object, ensure_ascii=False) name = self.pydantic_object.__name__ msg = f"Failed to parse {name} from completion {json_string}. Got: {e}" return OutputParserException(msg, llm_output=json_string) def parse_result( self, result: list[Generation], *, partial: bool = False ) -> TBaseModel | None: """Parse the result of an LLM call to a Pydantic object. Args: result: The result of the LLM call. partial: Whether to parse partial JSON objects. If `True`, the output will be a JSON object containing all the keys that have been returned so far. Raises: `OutputParserException`: If the result is not valid JSON or does not conform to the Pydantic model. Returns: The parsed Pydantic object. """ try: json_object = super().parse_result(result) return self._parse_obj(json_object) except OutputParserException: if partial: return None raise def parse(self, text: str) -> TBaseModel: """Parse the output of an LLM call to a Pydantic object. Args: text: The output of the LLM call. Returns: The parsed Pydantic object. """ return super().parse(text) def get_format_instructions(self) -> str: """Return the format instructions for the JSON output. Returns: The format instructions for the JSON output. """ # Copy schema to avoid altering original Pydantic schema. schema = dict(self._get_schema(self.pydantic_object).items()) # Remove extraneous fields. reduced_schema = schema if "title" in reduced_schema: del reduced_schema["title"] if "type" in reduced_schema: del reduced_schema["type"] # Ensure json in context is well-formed with double quotes. schema_str = json.dumps(reduced_schema, ensure_ascii=False) return _PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str) @property def _type(self) -> str: return "pydantic" @property @override def OutputType(self) -> type[TBaseModel]: """Return the Pydantic model.""" return self.pydantic_object _PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below. As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}} the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. Here is the output schema: ``` {schema} ```""" # noqa: E501 # Re-exporting types for backwards compatibility __all__ = [ "PydanticBaseModel", "PydanticOutputParser", "TBaseModel", ]
PydanticOutputParser
python
redis__redis-py
tests/test_pubsub.py
{ "start": 23503, "end": 29434 }
class ____: "These tests only validate that we get unicode values back" channel = "uni" + chr(4456) + "code" pattern = "uni" + chr(4456) + "*" data = "abc" + chr(4458) + "123" def make_message(self, type, channel, data, pattern=None): return {"type": type, "channel": channel, "pattern": pattern, "data": data} def setup_method(self, method): self.message = None def message_handler(self, message): self.message = message @pytest.fixture() def r(self, request): return _get_client(redis.Redis, request=request, decode_responses=True) def test_channel_subscribe_unsubscribe(self, r): p = r.pubsub() p.subscribe(self.channel) assert wait_for_message(p) == self.make_message("subscribe", self.channel, 1) p.unsubscribe(self.channel) assert wait_for_message(p) == self.make_message("unsubscribe", self.channel, 0) def test_pattern_subscribe_unsubscribe(self, r): p = r.pubsub() p.psubscribe(self.pattern) assert wait_for_message(p) == self.make_message("psubscribe", self.pattern, 1) p.punsubscribe(self.pattern) assert wait_for_message(p) == self.make_message("punsubscribe", self.pattern, 0) @skip_if_server_version_lt("7.0.0") def test_shard_channel_subscribe_unsubscribe(self, r): p = r.pubsub() p.ssubscribe(self.channel) assert wait_for_message(p, func=p.get_sharded_message) == self.make_message( "ssubscribe", self.channel, 1 ) p.sunsubscribe(self.channel) assert wait_for_message(p, func=p.get_sharded_message) == self.make_message( "sunsubscribe", self.channel, 0 ) def test_channel_publish(self, r): p = r.pubsub() p.subscribe(self.channel) assert wait_for_message(p) == self.make_message("subscribe", self.channel, 1) r.publish(self.channel, self.data) assert wait_for_message(p) == self.make_message( "message", self.channel, self.data ) @pytest.mark.onlynoncluster def test_pattern_publish(self, r): p = r.pubsub() p.psubscribe(self.pattern) assert wait_for_message(p) == self.make_message("psubscribe", self.pattern, 1) r.publish(self.channel, self.data) assert wait_for_message(p) == self.make_message( "pmessage", self.channel, self.data, pattern=self.pattern ) @skip_if_server_version_lt("7.0.0") def test_shard_channel_publish(self, r): p = r.pubsub() p.ssubscribe(self.channel) assert wait_for_message(p, func=p.get_sharded_message) == self.make_message( "ssubscribe", self.channel, 1 ) r.spublish(self.channel, self.data) assert wait_for_message(p, func=p.get_sharded_message) == self.make_message( "smessage", self.channel, self.data ) def test_channel_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe(**{self.channel: self.message_handler}) assert wait_for_message(p) is None r.publish(self.channel, self.data) assert wait_for_message(p) is None assert self.message == self.make_message("message", self.channel, self.data) # test that we reconnected to the correct channel self.message = None p.connection.disconnect() assert wait_for_message(p) is None # should reconnect new_data = self.data + "new data" r.publish(self.channel, new_data) assert wait_for_message(p) is None assert self.message == self.make_message("message", self.channel, new_data) def test_pattern_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.psubscribe(**{self.pattern: self.message_handler}) assert wait_for_message(p) is None r.publish(self.channel, self.data) assert wait_for_message(p) is None assert self.message == self.make_message( "pmessage", self.channel, self.data, pattern=self.pattern ) # test that we reconnected to the correct pattern self.message = None p.connection.disconnect() assert wait_for_message(p) is None # should reconnect new_data = self.data + "new data" r.publish(self.channel, new_data) assert wait_for_message(p) is None assert self.message == self.make_message( "pmessage", self.channel, new_data, pattern=self.pattern ) @skip_if_server_version_lt("7.0.0") def test_shard_channel_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.ssubscribe(**{self.channel: self.message_handler}) assert wait_for_message(p, func=p.get_sharded_message) is None r.spublish(self.channel, self.data) assert wait_for_message(p, func=p.get_sharded_message) is None assert self.message == self.make_message("smessage", self.channel, self.data) # test that we reconnected to the correct channel self.message = None try: # cluster mode p.disconnect() except AttributeError: # standalone mode p.connection.disconnect() # should reconnect assert wait_for_message(p, func=p.get_sharded_message) is None new_data = self.data + "new data" r.spublish(self.channel, new_data) assert wait_for_message(p, func=p.get_sharded_message) is None assert self.message == self.make_message("smessage", self.channel, new_data) def test_context_manager(self, r): with r.pubsub() as pubsub: pubsub.subscribe("foo") assert pubsub.connection is not None assert pubsub.connection is None assert pubsub.channels == {} assert pubsub.patterns == {}
TestPubSubAutoDecoding
python
jmcnamara__XlsxWriter
xlsxwriter/test/vml/test_write_shapetype.py
{ "start": 289, "end": 1442 }
class ____(unittest.TestCase): """ Test the Vml _write_shapetype() method. """ def setUp(self): self.fh = StringIO() self.vml = Vml() self.vml._set_filehandle(self.fh) def test_write_comment_shapetype(self): """Test the _write_comment_shapetype() method""" self.vml._write_comment_shapetype() exp = """<v:shapetype id="_x0000_t202" coordsize="21600,21600" o:spt="202" path="m,l,21600r21600,l21600,xe"><v:stroke joinstyle="miter"/><v:path gradientshapeok="t" o:connecttype="rect"/></v:shapetype>""" got = self.fh.getvalue() self.assertEqual(exp, got) def test_write_button_shapetype(self): """Test the _write_button_shapetype() method""" self.vml._write_button_shapetype() exp = """<v:shapetype id="_x0000_t201" coordsize="21600,21600" o:spt="201" path="m,l,21600r21600,l21600,xe"><v:stroke joinstyle="miter"/><v:path shadowok="f" o:extrusionok="f" strokeok="f" fillok="f" o:connecttype="rect"/><o:lock v:ext="edit" shapetype="t"/></v:shapetype>""" got = self.fh.getvalue() self.assertEqual(exp, got)
TestWriteVshapetype
python
mlflow__mlflow
mlflow/genai/scorers/registry.py
{ "start": 637, "end": 1180 }
class ____(MlflowException): """Exception thrown when building a scorer store with an unsupported URI""" def __init__(self, unsupported_uri, supported_uri_schemes): message = ( f"Scorer registration functionality is unavailable; got unsupported URI" f" '{unsupported_uri}' for scorer data storage. Supported URI schemes are:" f" {supported_uri_schemes}." ) super().__init__(message) self.supported_uri_schemes = supported_uri_schemes
UnsupportedScorerStoreURIException
python
pytorch__pytorch
test/distributed/checkpoint/_experimental/test_checkpointer.py
{ "start": 16273, "end": 25248 }
class ____(TestCase): """Tests specific to AsyncCheckpointer functionality.""" def setUp(self): super().setUp() # Create a temporary directory for checkpoints self.temp_dir = tempfile.mkdtemp() # Create real objects for testing self.rank_info = RankInfo( global_world_size=1, global_rank=0, ) self.writer_config = CheckpointWriterConfig() # Create reader for testing self.reader = CheckpointReader( rank_info=self.rank_info, ) # Create test state dictionary self.state_dict = { "model": torch.nn.Linear(10, 5).state_dict(), "optimizer": {"param_groups": [{"lr": 0.01}]}, "epoch": 5, "step": 1000, } def tearDown(self): # Clean up the temporary directory shutil.rmtree(self.temp_dir) def _create_async_checkpointer(self) -> AsyncCheckpointer: """Helper method to create AsyncCheckpointer with real components.""" # Create staging config for async operations # Use conservative settings to avoid CUDA issues in test environment stager_config = CheckpointStagerConfig( use_async_staging=True, use_pinned_memory=False, # Disable to avoid CUDA memory issues use_shared_memory=True, use_non_blocking_copy=False, # Disable to avoid CUDA issues ) # Create process config process_config = CheckpointProcessConfig( subprocess_init_timeout_secs=30, subprocess_shutdown_timeout_secs=60, ) # Create stager checkpoint_stager = DefaultStager(stager_config) # Create checkpoint process checkpoint_process = CheckpointProcess( rank_info=self.rank_info, config=process_config, subprocess_init_fn=subprocess_init_fn, subprocess_init_args=( "test-async-checkpointer", os.getpid(), ), checkpoint_writer_init_fn=ckpt_writer_init_fn, checkpoint_writer_init_args={ "config": self.writer_config, "rank_info": self.rank_info, }, ) # Wait for process initialization checkpoint_process.process_creation_future.result() return AsyncCheckpointer( checkpoint_stager=checkpoint_stager, checkpoint_process=checkpoint_process, reader=self.reader, ) def test_async_returns_futures(self): """Test that async save returns futures.""" checkpointer = self._create_async_checkpointer() checkpoint_path = os.path.join(self.temp_dir, "checkpoint_futures") try: # Save the checkpoint asynchronously result = checkpointer.save(checkpoint_path, self.state_dict) # Verify that futures are returned self.assertIsInstance(result, tuple) self.assertEqual(len(result), 2) stage_future, write_future = result self.assertIsInstance(stage_future, Future) self.assertIsInstance(write_future, Future) # Wait for completion stage_future.result() write_future.result() finally: checkpointer.close() def test_async_sequential_saves_wait(self): """Test that sequential async saves wait for previous operations.""" checkpointer = self._create_async_checkpointer() try: # First save checkpoint_path1 = os.path.join(self.temp_dir, "checkpoint_seq_1") stage_future1, write_future1 = checkpointer.save( checkpoint_path1, self.state_dict ) # Second save (should wait for first to complete) checkpoint_path2 = os.path.join(self.temp_dir, "checkpoint_seq_2") modified_state_dict = self.state_dict.copy() modified_state_dict["epoch"] = 10 stage_future2, write_future2 = checkpointer.save( checkpoint_path2, modified_state_dict ) # Wait for both to complete write_future1.result() write_future2.result() # Verify both checkpoints were created with correct content checkpoint_file1 = os.path.join( checkpoint_path1, f"checkpoint_{self.rank_info.global_rank}.pt" ) checkpoint_file2 = os.path.join( checkpoint_path2, f"checkpoint_{self.rank_info.global_rank}.pt" ) self.assertTrue(os.path.exists(checkpoint_file1)) self.assertTrue(os.path.exists(checkpoint_file2)) loaded1 = torch.load(checkpoint_file1) loaded2 = torch.load(checkpoint_file2) self.assertEqual(loaded1["epoch"], 5) self.assertEqual(loaded2["epoch"], 10) finally: checkpointer.close() def test_async_multiple_saves_ordering(self): """Test that multiple async saves maintain proper ordering.""" checkpointer = self._create_async_checkpointer() try: # Create multiple state dicts state_dicts = [ {"epoch": 1, "model": torch.nn.Linear(5, 3).state_dict()}, {"epoch": 2, "model": torch.nn.Linear(5, 3).state_dict()}, {"epoch": 3, "model": torch.nn.Linear(5, 3).state_dict()}, ] # Save multiple checkpoints futures = [] checkpoint_paths = [] for i, state_dict in enumerate(state_dicts, 1): checkpoint_path = os.path.join(self.temp_dir, f"multi_{i}") checkpoint_paths.append(checkpoint_path) stage_future, write_future = checkpointer.save( checkpoint_path, state_dict ) futures.append((stage_future, write_future)) # Wait for all to complete for stage_future, write_future in futures: stage_future.result() write_future.result() # Verify all checkpoints exist and have correct content for i, checkpoint_path in enumerate(checkpoint_paths, 1): checkpoint_file = os.path.join( checkpoint_path, f"checkpoint_{self.rank_info.global_rank}.pt" ) self.assertTrue(os.path.exists(checkpoint_file)) loaded = torch.load(checkpoint_file) self.assertEqual(loaded["epoch"], i) finally: checkpointer.close() def test_async_error_handling(self): """Test error handling in async operations.""" # Create checkpointer with mocked components to simulate errors mock_stager = Mock() mock_process = Mock() mock_reader = Mock() # Mock staging to return a completed future mock_staging_future = Future() mock_staging_future.set_result({"staged": "data"}) mock_stager.stage.return_value = mock_staging_future # Mock process write to raise an error mock_write_future = Future() mock_write_future.set_exception(RuntimeError("Write failed")) mock_process.write.return_value = mock_write_future checkpointer = AsyncCheckpointer( checkpoint_stager=mock_stager, checkpoint_process=mock_process, reader=mock_reader, ) try: # This should not raise immediately stage_future, write_future = checkpointer.save("/tmp/test", self.state_dict) # But waiting for the write future should raise the error with self.assertRaises(RuntimeError) as cm: write_future.result() self.assertIn("Write failed", str(cm.exception)) finally: checkpointer.close() def test_async_future_results(self): """Test the results returned by async futures.""" checkpointer = self._create_async_checkpointer() checkpoint_path = os.path.join(self.temp_dir, "checkpoint_results") try: # Save checkpoint stage_future, write_future = checkpointer.save( checkpoint_path, self.state_dict ) # Both futures should complete successfully stage_result = stage_future.result() write_result = write_future.result() # Stage result is wrapped by wrap_future() so it returns None on success # This is intentional - the stage_future indicates completion, not data access self.assertIsNone(stage_result) # Write result should be None (success indicator) self.assertIsNone(write_result) finally: checkpointer.close() if __name__ == "__main__": run_tests()
TestAsyncCheckpointerSpecific
python
huggingface__transformers
src/transformers/models/sam3/modeling_sam3.py
{ "start": 86647, "end": 100914 }
class ____(Sam3PreTrainedModel): input_modalities = ["image", "text"] _checkpoint_conversion_mapping = { r"detector_model.(.+)": r"\1" # the regex allows to remove the prefix, and add it back in revert mode } _keys_to_ignore_on_load_unexpected = [ r"^tracker_model.", r"^tracker_neck.", ] def __init__(self, config: Sam3Config): # loading from a sam3_video config if hasattr(config, "detector_config") and config.detector_config is not None: detector_config = config.detector_config if isinstance(detector_config, dict): detector_config = Sam3Config(**detector_config) config = detector_config super().__init__(config) self.vision_encoder = Sam3VisionModel(config.vision_config) self.text_encoder = CLIPTextModelWithProjection(config.text_config) self.vocab_size = config.text_config.vocab_size # Project text features from text encoder hidden size to model hidden size # CLIP text encoder outputs 1024-dim features, but we need 256-dim for DETR self.text_projection = nn.Linear(config.text_config.hidden_size, config.detr_encoder_config.hidden_size) # Pass _attn_implementation to subconfigs BEFORE creating modules config.geometry_encoder_config._attn_implementation = config._attn_implementation config.detr_encoder_config._attn_implementation = config._attn_implementation config.detr_decoder_config._attn_implementation = config._attn_implementation config.mask_decoder_config._attn_implementation = config._attn_implementation self.geometry_encoder = Sam3GeometryEncoder(config.geometry_encoder_config) self.detr_encoder = Sam3DetrEncoder(config.detr_encoder_config) self.detr_decoder = Sam3DetrDecoder(config.detr_decoder_config) self.mask_decoder = Sam3MaskDecoder(config.mask_decoder_config) # Dot product scoring to compute classification scores self.dot_product_scoring = Sam3DotProductScoring(config) self.post_init() @auto_docstring def get_text_features( self, input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> torch.FloatTensor: r""" Returns: text_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Text embeddings that can be passed as `text_embeds` to the forward method. Example: ```python >>> from transformers import Sam3Model, Sam3Processor >>> from PIL import Image >>> import requests >>> model = Sam3Model.from_pretrained("facebook/sam3") >>> processor = Sam3Processor.from_pretrained("facebook/sam3") >>> # Pre-compute text embeddings >>> text_inputs = processor(text="cat", return_tensors="pt") >>> text_embeds = model.get_text_features(**text_inputs) >>> # Reuse text embeddings for multiple images >>> img_url = "http://images.cocodataset.org/val2017/000000077595.jpg" >>> image = Image.open(requests.get(img_url, stream=True).raw) >>> img_inputs = processor(images=image, return_tensors="pt") >>> outputs = model(pixel_values=img_inputs.pixel_values, text_embeds=text_embeds) ``` """ text_features = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, **kwargs ).last_hidden_state text_features = self.text_projection(text_features) return text_features @auto_docstring def get_vision_features( self, pixel_values: torch.FloatTensor, **kwargs: Unpack[TransformersKwargs], ) -> Sam3VisionEncoderOutput: r""" Returns: vision_embeds (`Sam3VisionEncoderOutput`): Vision embeddings that can be passed as `vision_embeds` to the forward method. Example: ```python >>> from transformers import Sam3Model, Sam3Processor >>> from PIL import Image >>> import requests >>> model = Sam3Model.from_pretrained("facebook/sam3") >>> processor = Sam3Processor.from_pretrained("facebook/sam3") >>> # Pre-compute vision embeddings >>> img_url = "http://images.cocodataset.org/val2017/000000077595.jpg" >>> image = Image.open(requests.get(img_url, stream=True).raw) >>> img_inputs = processor(images=image, return_tensors="pt") >>> vision_embeds = model.get_vision_features(pixel_values=img_inputs.pixel_values) >>> # Reuse vision embeddings for multiple text prompts >>> text_inputs = processor(text="cat", return_tensors="pt") >>> outputs = model(vision_embeds=vision_embeds, input_ids=text_inputs.input_ids) ``` """ vision_outputs = self.vision_encoder(pixel_values, **kwargs) return vision_outputs @check_model_inputs() @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, vision_embeds: Optional[Sam3VisionEncoderOutput] = None, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, text_embeds: Optional[torch.FloatTensor] = None, input_boxes: Optional[torch.FloatTensor] = None, input_boxes_labels: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Sam3ImageSegmentationOutput: r""" vision_embeds (`Sam3VisionEncoderOutput`, *optional*): Pre-computed vision embeddings. Can be used to easily reuse vision embeddings. If provided, `pixel_values` should not be passed. Mutually exclusive with `pixel_values`. text_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Pre-computed text embeddings. Can be used to easily reuse text embeddings. If provided, `input_ids` should not be passed. Mutually exclusive with `input_ids`. input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`, *optional*): Normalized box coordinates in [0, 1] range, in (cx, cy, w, h) format. input_boxes_labels (`torch.LongTensor` of shape `(batch_size, num_boxes)`, *optional*): Labels for boxes: 1 (positive), 0 (negative). Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoModel, AutoProcessor >>> model = AutoModel.from_pretrained("facebook/sam3") >>> processor = AutoProcessor.from_pretrained("facebook/sam3") >>> img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png" >>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") >>> text = "car" >>> inputs = processor(images=raw_image, text=text, return_tensors="pt") >>> # Get segmentation output >>> outputs = model(**inputs) >>> pred_masks = outputs.pred_masks >>> pred_boxes = outputs.pred_boxes ``` """ if (pixel_values is None) == (vision_embeds is None): raise ValueError("You must specify exactly one of pixel_values or vision_embeds") if (input_ids is None) == (text_embeds is None): raise ValueError("You must specify exactly one of input_ids or text_embeds") if pixel_values is not None: batch_size = pixel_values.shape[0] device = pixel_values.device else: batch_size = vision_embeds.fpn_hidden_states[0].shape[0] device = vision_embeds.fpn_hidden_states[0].device if vision_embeds is None: vision_outputs = self.vision_encoder(pixel_values, **kwargs) else: vision_outputs = vision_embeds fpn_hidden_states = vision_outputs.fpn_hidden_states[:-1] fpn_position_encoding = vision_outputs.fpn_position_encoding[:-1] if text_embeds is None: text_features = self.get_text_features(input_ids=input_ids, attention_mask=attention_mask, **kwargs) else: text_features = text_embeds text_mask = attention_mask.bool() if attention_mask is not None else None has_geometry_prompts = input_boxes is not None and input_boxes.numel() > 0 geometry_prompt_features = None geometry_prompt_mask = None if has_geometry_prompts: if input_boxes is not None and input_boxes.numel() > 0: box_embeddings = input_boxes # [batch_size, num_boxes, 4] box_labels = ( input_boxes_labels if input_boxes_labels is not None else torch.ones_like(box_embeddings[..., 0], dtype=torch.long) ) box_mask = ( (input_boxes_labels != -10) if input_boxes_labels is not None else torch.ones(batch_size, input_boxes.shape[1], dtype=torch.bool, device=device) ) box_labels = torch.where(box_labels == -10, 0, box_labels) else: box_embeddings = torch.zeros(batch_size, 0, 4, dtype=text_features.dtype, device=device) box_labels = torch.zeros(batch_size, 0, dtype=torch.long, device=device) box_mask = torch.zeros(batch_size, 0, dtype=torch.bool, device=device) geometry_outputs = self.geometry_encoder( box_embeddings=box_embeddings, box_mask=box_mask, box_labels=box_labels, img_feats=fpn_hidden_states, img_pos_embeds=fpn_position_encoding, ) geometry_prompt_features = geometry_outputs.last_hidden_state geometry_prompt_mask = geometry_outputs.attention_mask if geometry_prompt_features is not None: # Repeat text_features for all geometry prompts if text_features.shape[0] == 1 and geometry_prompt_features.shape[0] > 1: text_features = text_features.repeat(geometry_prompt_features.shape[0], 1, 1) combined_prompt_features = torch.cat([text_features, geometry_prompt_features], dim=1) if text_mask is not None and text_mask.shape[0] == 1 and geometry_prompt_mask.shape[0] > 1: text_mask = text_mask.repeat(geometry_prompt_mask.shape[0], 1) if text_mask is not None and geometry_prompt_mask is not None: combined_prompt_mask = torch.cat([text_mask, geometry_prompt_mask], dim=1) elif text_mask is not None: geo_valid_mask = torch.ones( batch_size, geometry_prompt_features.shape[1], dtype=torch.bool, device=device ) combined_prompt_mask = torch.cat([text_mask, geo_valid_mask], dim=1) elif geometry_prompt_mask is not None: text_valid_mask = torch.ones(batch_size, text_features.shape[1], dtype=torch.bool, device=device) combined_prompt_mask = torch.cat([text_valid_mask, geometry_prompt_mask], dim=1) else: combined_prompt_mask = None else: combined_prompt_features = text_features combined_prompt_mask = text_mask encoder_outputs = self.detr_encoder( vision_features=[fpn_hidden_states[-1]], text_features=combined_prompt_features, vision_pos_embeds=[fpn_position_encoding[-1]], text_mask=combined_prompt_mask, **kwargs, ) decoder_outputs = self.detr_decoder( vision_features=encoder_outputs.last_hidden_state, text_features=encoder_outputs.text_features, vision_pos_encoding=encoder_outputs.pos_embeds_flattened, text_mask=combined_prompt_mask, spatial_shapes=encoder_outputs.spatial_shapes, **kwargs, ) # Refine boxes from decoder all_box_offsets = self.detr_decoder.box_head(decoder_outputs.intermediate_hidden_states) reference_boxes_inv_sig = inverse_sigmoid(decoder_outputs.reference_boxes) all_pred_boxes_cxcywh = (reference_boxes_inv_sig + all_box_offsets).sigmoid() all_pred_boxes = box_cxcywh_to_xyxy(all_pred_boxes_cxcywh) all_pred_logits = self.dot_product_scoring( decoder_hidden_states=decoder_outputs.intermediate_hidden_states, text_features=encoder_outputs.text_features, text_mask=combined_prompt_mask, ).squeeze(-1) pred_logits = all_pred_logits[-1] pred_boxes = all_pred_boxes[-1] decoder_hidden_states = decoder_outputs.intermediate_hidden_states[-1] presence_logits = decoder_outputs.presence_logits[-1] mask_outputs = self.mask_decoder( decoder_queries=decoder_hidden_states, backbone_features=list(fpn_hidden_states), encoder_hidden_states=encoder_outputs.last_hidden_state, prompt_features=combined_prompt_features, prompt_mask=combined_prompt_mask, **kwargs, ) return Sam3ImageSegmentationOutput( pred_masks=mask_outputs.pred_masks, pred_boxes=pred_boxes, pred_logits=pred_logits, presence_logits=presence_logits, semantic_seg=mask_outputs.semantic_seg, decoder_hidden_states=decoder_outputs.hidden_states, decoder_reference_boxes=decoder_outputs.reference_boxes, encoder_hidden_states=encoder_outputs.hidden_states, vision_hidden_states=vision_outputs.hidden_states, vision_attentions=vision_outputs.attentions, detr_encoder_attentions=encoder_outputs.attentions, detr_decoder_attentions=decoder_outputs.attentions, mask_decoder_attentions=mask_outputs.attentions, ) __all__ = ["Sam3Model", "Sam3VisionModel", "Sam3ViTModel", "Sam3PreTrainedModel"]
Sam3Model
python
getsentry__sentry-python
sentry_sdk/integrations/loguru.py
{ "start": 679, "end": 1481 }
class ____(enum.IntEnum): TRACE = 5 DEBUG = 10 INFO = 20 SUCCESS = 25 WARNING = 30 ERROR = 40 CRITICAL = 50 DEFAULT_LEVEL = LoggingLevels.INFO.value DEFAULT_EVENT_LEVEL = LoggingLevels.ERROR.value SENTRY_LEVEL_FROM_LOGURU_LEVEL = { "TRACE": "DEBUG", "DEBUG": "DEBUG", "INFO": "INFO", "SUCCESS": "INFO", "WARNING": "WARNING", "ERROR": "ERROR", "CRITICAL": "CRITICAL", } # Map Loguru level numbers to corresponding OTel level numbers SEVERITY_TO_OTEL_SEVERITY = { LoggingLevels.CRITICAL: 21, # fatal LoggingLevels.ERROR: 17, # error LoggingLevels.WARNING: 13, # warn LoggingLevels.SUCCESS: 11, # info LoggingLevels.INFO: 9, # info LoggingLevels.DEBUG: 5, # debug LoggingLevels.TRACE: 1, # trace }
LoggingLevels
python
gevent__gevent
src/greentest/3.11/test_socket.py
{ "start": 201450, "end": 204677 }
class ____(unittest.TestCase): class MockSocket(socket.socket): def connect(self, *args): raise TimeoutError('timed out') @contextlib.contextmanager def mocked_socket_module(self): """Return a socket which times out on connect""" old_socket = socket.socket socket.socket = self.MockSocket try: yield finally: socket.socket = old_socket @socket_helper.skip_if_tcp_blackhole def test_connect(self): port = socket_helper.find_unused_port() cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.addCleanup(cli.close) with self.assertRaises(OSError) as cm: cli.connect((HOST, port)) self.assertEqual(cm.exception.errno, errno.ECONNREFUSED) @socket_helper.skip_if_tcp_blackhole def test_create_connection(self): # Issue #9792: errors raised by create_connection() should have # a proper errno attribute. port = socket_helper.find_unused_port() with self.assertRaises(OSError) as cm: socket.create_connection((HOST, port)) # Issue #16257: create_connection() calls getaddrinfo() against # 'localhost'. This may result in an IPV6 addr being returned # as well as an IPV4 one: # >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM) # >>> [(2, 2, 0, '', ('127.0.0.1', 41230)), # (26, 2, 0, '', ('::1', 41230, 0, 0))] # # create_connection() enumerates through all the addresses returned # and if it doesn't successfully bind to any of them, it propagates # the last exception it encountered. # # On Solaris, ENETUNREACH is returned in this circumstance instead # of ECONNREFUSED. So, if that errno exists, add it to our list of # expected errnos. expected_errnos = socket_helper.get_socket_conn_refused_errs() self.assertIn(cm.exception.errno, expected_errnos) def test_create_connection_all_errors(self): port = socket_helper.find_unused_port() try: socket.create_connection((HOST, port), all_errors=True) except ExceptionGroup as e: eg = e else: self.fail('expected connection to fail') self.assertIsInstance(eg, ExceptionGroup) for e in eg.exceptions: self.assertIsInstance(e, OSError) addresses = socket.getaddrinfo( 'localhost', port, 0, socket.SOCK_STREAM) # assert that we got an exception for each address self.assertEqual(len(addresses), len(eg.exceptions)) def test_create_connection_timeout(self): # Issue #9792: create_connection() should not recast timeout errors # as generic socket errors. with self.mocked_socket_module(): try: socket.create_connection((HOST, 1234)) except TimeoutError: pass except OSError as exc: if socket_helper.IPV6_ENABLED or exc.errno != errno.EAFNOSUPPORT: raise else: self.fail('TimeoutError not raised')
NetworkConnectionNoServer
python
openai__openai-python
src/openai/types/evals/run_cancel_response.py
{ "start": 6798, "end": 7166 }
class ____(BaseModel): template: List[DataSourceResponsesInputMessagesTemplateTemplate] """A list of chat messages forming the prompt or context. May include variable references to the `item` namespace, ie {{item.name}}. """ type: Literal["template"] """The type of input messages. Always `template`."""
DataSourceResponsesInputMessagesTemplate
python
Textualize__textual
src/textual/timer.py
{ "start": 916, "end": 6255 }
class ____: """A class to send timer-based events. Args: event_target: The object which will receive the timer events. interval: The time between timer events, in seconds. name: A name to assign the event (for debugging). callback: An optional callback to invoke when the event is handled. repeat: The number of times to repeat the timer, or None to repeat forever. skip: Enable skipping of scheduled events that couldn't be sent in time. pause: Start the timer paused. """ _timer_count: int = 1 def __init__( self, event_target: MessageTarget, interval: float, *, name: str | None = None, callback: TimerCallback | None = None, repeat: int | None = None, skip: bool = True, pause: bool = False, ) -> None: self._target_repr = repr(event_target) self._target = weakref.ref(event_target) self._interval = interval self.name = f"Timer#{self._timer_count}" if name is None else name self._timer_count += 1 self._callback = callback self._repeat = repeat self._skip = skip self._task: Task | None = None self._reset: bool = False self._original_pause = pause @cached_property def _active(self) -> Event: event = Event() if not self._original_pause: event.set() return event def __rich_repr__(self) -> Result: yield self._interval yield "name", self.name yield "repeat", self._repeat, None @property def target(self) -> MessageTarget: target = self._target() if target is None: raise EventTargetGone() return target def _start(self) -> None: """Start the timer.""" self._task = create_task(self._run_timer(), name=self.name) def stop(self) -> None: """Stop the timer.""" if self._task is None: return self._active.set() self._task.cancel() self._task = None @classmethod async def _stop_all(cls, timers: Iterable[Timer]) -> None: """Stop a number of timers, and await their completion. Args: timers: A number of timers. """ async def stop_timer(timer: Timer) -> None: """Stop a timer and wait for it to finish. Args: timer: A Timer instance. """ if timer._task is not None: timer._active.set() timer._task.cancel() try: await timer._task except CancelledError: pass timer._task = None await gather(*[stop_timer(timer) for timer in list(timers)]) def pause(self) -> None: """Pause the timer. A paused timer will not send events until it is resumed. """ self._active.clear() def reset(self) -> None: """Reset the timer, so it starts from the beginning.""" self._active.set() self._reset = True def resume(self) -> None: """Resume a paused timer.""" self._active.set() async def _run_timer(self) -> None: """Run the timer task.""" try: await self._run() except CancelledError: pass async def _run(self) -> None: """Run the timer.""" count = 0 _repeat = self._repeat _interval = self._interval self._active # Force instantiation in same thread await self._active.wait() start = _time.get_time() while _repeat is None or count <= _repeat: next_timer = start + ((count + 1) * _interval) now = _time.get_time() if self._skip and next_timer < now: count = int((now - start) / _interval + 1) continue now = _time.get_time() wait_time = max(0, next_timer - now) await sleep(wait_time) count += 1 await self._active.wait() if self._reset: start = _time.get_time() count = 0 self._reset = False continue try: await self._tick(next_timer=next_timer, count=count) except EventTargetGone: break async def _tick(self, *, next_timer: float, count: int) -> None: """Triggers the Timer's action: either call its callback, or sends an event to its target""" app = active_app.get() if app._exit: return if self._callback is not None: try: await invoke(self._callback) except CancelledError: # https://github.com/Textualize/textual/pull/2895 # Re-raise CancelledErrors that would be caught by the following exception block in Python 3.7 raise except Exception as error: app._handle_exception(error) else: event = events.Timer( timer=self, time=next_timer, count=count, callback=self._callback, ) self.target.post_message(event)
Timer
python
PrefectHQ__prefect
src/prefect/server/database/dependencies.py
{ "start": 1315, "end": 6329 }
class ____(TypedDict): database_config: Optional[BaseDatabaseConfiguration] query_components: Optional["BaseQueryComponents"] orm: Optional["BaseORMConfiguration"] interface_class: Optional[type["PrefectDBInterface"]] MODELS_DEPENDENCIES: _ModelDependencies = { "database_config": None, "query_components": None, "orm": None, "interface_class": None, } def provide_database_interface() -> "PrefectDBInterface": """ Get the current Prefect REST API database interface. If components of the interface are not set, defaults will be inferred based on the dialect of the connection URL. """ from prefect.server.database.interface import PrefectDBInterface from prefect.server.database.orm_models import ( AioSqliteORMConfiguration, AsyncPostgresORMConfiguration, ) from prefect.server.database.query_components import ( AioSqliteQueryComponents, AsyncPostgresQueryComponents, ) connection_url = PREFECT_API_DATABASE_CONNECTION_URL.value() database_config = MODELS_DEPENDENCIES.get("database_config") query_components = MODELS_DEPENDENCIES.get("query_components") orm = MODELS_DEPENDENCIES.get("orm") interface_class = MODELS_DEPENDENCIES.get("interface_class") dialect = get_dialect(connection_url) if database_config is None: if dialect.name == "postgresql": database_config = AsyncPostgresConfiguration(connection_url=connection_url) elif dialect.name == "sqlite": database_config = AioSqliteConfiguration(connection_url=connection_url) else: raise ValueError( "Unable to infer database configuration from provided dialect. Got" f" dialect name {dialect.name!r}" ) MODELS_DEPENDENCIES["database_config"] = database_config if query_components is None: if dialect.name == "postgresql": query_components = AsyncPostgresQueryComponents() elif dialect.name == "sqlite": query_components = AioSqliteQueryComponents() else: raise ValueError( "Unable to infer query components from provided dialect. Got dialect" f" name {dialect.name!r}" ) MODELS_DEPENDENCIES["query_components"] = query_components if orm is None: if dialect.name == "postgresql": orm = AsyncPostgresORMConfiguration() elif dialect.name == "sqlite": orm = AioSqliteORMConfiguration() else: raise ValueError( "Unable to infer orm configuration from provided dialect. Got dialect" f" name {dialect.name!r}" ) MODELS_DEPENDENCIES["orm"] = orm if interface_class is None: interface_class = PrefectDBInterface return interface_class( database_config=database_config, query_components=query_components, orm=orm, ) def inject_db(fn: Callable[P, R]) -> Callable[P, R]: """ Decorator that provides a database interface to a function. The decorated function _must_ take a `db` kwarg and if a db is passed when called it will be used instead of creating a new one. """ # NOTE: this wrapper will not pass a iscoroutinefunction() # check unless the caller first uses inspect.unwrap() # or we start using inspect.markcoroutinefunction() (Python 3.12) # In the past this has only been an issue when @inject_db # was being used in tests. # # If this becomes an issue again in future, use the @db_injector decorator # instead. @wraps(fn) def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: if "db" not in kwargs or kwargs["db"] is None: kwargs["db"] = provide_database_interface() return fn(*args, **kwargs) return wrapper @overload def db_injector(func: _DBMethod[T, P, R]) -> _Method[T, P, R]: ... @overload def db_injector(func: _DBFunction[P, R]) -> _Function[P, R]: ... def db_injector( func: Union[_DBMethod[T, P, R], _DBFunction[P, R]], ) -> Union[_Method[T, P, R], _Function[P, R]]: """ Decorator to inject a PrefectDBInterface instance as the first positional argument to the decorated function. Unlike `inject_db`, which injects the database connection as a keyword argument, `db_injector` adds it explicitly as the first positional argument. This change enhances type hinting by making the dependency on PrefectDBInterface explicit in the function signature. When decorating a coroutine function, the result will continue to pass the iscoroutinefunction() test. Args: func: The function or method to decorate. Returns: A wrapped descriptor object which injects the PrefectDBInterface instance as the first argument to the function or method. This handles method binding transparently. """ return DBInjector(func)
_ModelDependencies
python
viewflow__viewflow
viewflow/workflow/flow/views/create.py
{ "start": 1238, "end": 1736 }
class ____( FormLayoutMixin, FormAjaxCompleteMixin, FormDependentSelectMixin, mixins.SuccessMessageMixin, mixins.TaskSuccessUrlMixin, mixins.TaskViewTemplateNames, generic.CreateView, ): template_filename = "start.html" def form_valid(self, form): self.object = form.save() self.request.activation.process.artifact = self.object self.request.activation.execute() return HttpResponseRedirect(self.get_success_url())
CreateArtifactView
python
plotly__plotly.py
plotly/graph_objs/treemap/marker/colorbar/title/_font.py
{ "start": 233, "end": 9949 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "treemap.marker.colorbar.title" _path_str = "treemap.marker.colorbar.title.font" _valid_props = { "color", "family", "lineposition", "shadow", "size", "style", "textcase", "variant", "weight", } @property def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. The 'family' property is a string and must be specified as: - A non-empty string Returns ------- str """ return self["family"] @family.setter def family(self, val): self["family"] = val @property def lineposition(self): """ Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. The 'lineposition' property is a flaglist and may be specified as a string containing: - Any combination of ['under', 'over', 'through'] joined with '+' characters (e.g. 'under+over') OR exactly one of ['none'] (e.g. 'none') Returns ------- Any """ return self["lineposition"] @lineposition.setter def lineposition(self, val): self["lineposition"] = val @property def shadow(self): """ Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow for additional options. The 'shadow' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["shadow"] @shadow.setter def shadow(self, val): self["shadow"] = val @property def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] Returns ------- int|float """ return self["size"] @size.setter def size(self, val): self["size"] = val @property def style(self): """ Sets whether a font should be styled with a normal or italic face from its family. The 'style' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'italic'] Returns ------- Any """ return self["style"] @style.setter def style(self, val): self["style"] = val @property def textcase(self): """ Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. The 'textcase' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'word caps', 'upper', 'lower'] Returns ------- Any """ return self["textcase"] @textcase.setter def textcase(self, val): self["textcase"] = val @property def variant(self): """ Sets the variant of the font. The 'variant' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'small-caps', 'all-small-caps', 'all-petite-caps', 'petite-caps', 'unicase'] Returns ------- Any """ return self["variant"] @variant.setter def variant(self, val): self["variant"] = val @property def weight(self): """ Sets the weight (or boldness) of the font. The 'weight' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [1, 1000] OR exactly one of ['normal', 'bold'] (e.g. 'bold') Returns ------- int """ return self["weight"] @weight.setter def weight(self, val): self["weight"] = val @property def _prop_descriptions(self): return """\ color family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. size style Sets whether a font should be styled with a normal or italic face from its family. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. variant Sets the variant of the font. weight Sets the weight (or boldness) of the font. """ def __init__( self, arg=None, color=None, family=None, lineposition=None, shadow=None, size=None, style=None, textcase=None, variant=None, weight=None, **kwargs, ): """ Construct a new Font object Sets this color bar's title font. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.treemap.marker .colorbar.title.Font` color family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. size style Sets whether a font should be styled with a normal or italic face from its family. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. variant Sets the variant of the font. weight Sets the weight (or boldness) of the font. Returns ------- Font """ super().__init__("font") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.treemap.marker.colorbar.title.Font constructor must be a dict or an instance of :class:`plotly.graph_objs.treemap.marker.colorbar.title.Font`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("family", arg, family) self._set_property("lineposition", arg, lineposition) self._set_property("shadow", arg, shadow) self._set_property("size", arg, size) self._set_property("style", arg, style) self._set_property("textcase", arg, textcase) self._set_property("variant", arg, variant) self._set_property("weight", arg, weight) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Font
python
modin-project__modin
asv_bench/benchmarks/io/csv.py
{ "start": 2721, "end": 5065 }
class ____: shapes = get_benchmark_shapes("TimeReadCsvNamesDtype") _dtypes_params = ["Int64", "Int64_Timestamp"] _timestamp_columns = ["col1", "col2"] param_names = ["shape", "names", "dtype"] params = [ shapes, ["array-like"], _dtypes_params, ] def _get_file_id(self, shape, dtype): return get_shape_id(shape) + dtype def _add_timestamp_columns(self, df): df = df.copy() date_column = IMPL.date_range("2000", periods=df.shape[0], freq="ms") for col in self._timestamp_columns: df[col] = date_column return df def setup_cache(self, test_filename="io_test_file_csv_names_dtype"): # filenames with a metadata of saved dataframes cache = {} for shape in self.shapes: for dtype in self._dtypes_params: df = generate_dataframe( "int", *shape, RAND_LOW, RAND_HIGH, impl="pandas" ) if dtype == "Int64_Timestamp": df = self._add_timestamp_columns(df) file_id = self._get_file_id(shape, dtype) cache[file_id] = ( f"{test_filename}_{file_id}.csv", df.columns.to_list(), df.dtypes.to_dict(), ) df.to_csv(cache[file_id][0], index=False) return cache def setup(self, cache, shape, names, dtype): # ray init if ASV_USE_IMPL == "modin": IMPL.DataFrame([]) file_id = self._get_file_id(shape, dtype) self.filename, self.names, self.dtype = cache[file_id] self.parse_dates = None if dtype == "Int64_Timestamp": # cached version of dtype should not change self.dtype = self.dtype.copy() for col in self._timestamp_columns: del self.dtype[col] self.parse_dates = self._timestamp_columns def time_read_csv_names_dtype(self, cache, shape, names, dtype): execute( IMPL.read_csv( self.filename, names=self.names, header=0, dtype=self.dtype, parse_dates=self.parse_dates, ) ) from ..utils import setup # noqa: E402, F401
TimeReadCsvNamesDtype
python
google__jax
tests/sourcemap_test.py
{ "start": 739, "end": 2366 }
class ____(jtu.JaxTestCase): @parameterized.parameters( (0,), (1,), (2,), (3,), (4,), (5,), (-1,), (-2,), (-3,), (-4,), (123,), (456,), (1024,), (1025,), (2**16,), (2**31 - 1,), ) def test_roundtrip_vlq(self, value): actual = sourcemap.decode_vlq(sourcemap.encode_vlq(value)) self.assertEqual(actual, value) @parameterized.parameters( (b"A",), (b"C",), (b"AAAA",), (b"ACDE",), (b"AACAA",), ) def test_roundtrip_segment(self, enc): actual = sourcemap.encode_segment(sourcemap.decode_segment(enc)) self.assertEqual(actual, enc) def test_roundtrip_sourcemap_json(self): data = { "version": 3, # "file": "out.js", # "sourceRoot": "", "sources": ["foo.js", "bar.js"], "sourcesContent": [None, None], "names": ["src", "maps", "are", "fun"], "mappings": "A,AAAC;;AACDE", } json_data = json.dumps(data) json_data_roundtripped = sourcemap.SourceMap.from_json(json_data).to_json() self.assertEqual(json.loads(json_data_roundtripped), data) def test_generate_mappings(self): expected = "A,AAAC;;AACDE" gen = sourcemap.MappingsGenerator() # A gen.new_group() gen.new_segment(0) # ,AAAC gen.new_segment(0, 0, 0, 1) # ; gen.new_group() # ;AACDE gen.new_group() gen.new_segment(0, 0, 1, 0, 2) self.assertEqual(sourcemap.serialize_mappings(gen.mappings()), expected) if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
SourceMapTest
python
huggingface__transformers
src/transformers/models/qwen2/modeling_qwen2.py
{ "start": 11909, "end": 12679 }
class ____(nn.Module): def __init__(self, hidden_size, eps: float = 1e-6) -> None: """ Qwen2RMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
Qwen2RMSNorm