language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
gevent__gevent
src/gevent/testing/testrunner.py
{ "start": 8497, "end": 9284 }
class ____(object): def __init__(self, runner, travis_fold_msg): self._runner = runner self._travis_fold_msg = travis_fold_msg self._travis_fold_name = str(int(util.perf_counter())) # A zope-style acquisition proxy would be convenient here. run_tests = runner._run_tests def _run_tests(): self._begin_fold() try: run_tests() finally: self._end_fold() runner._run_tests = _run_tests def _begin_fold(self): travis.fold_start(self._travis_fold_name, self._travis_fold_msg) def _end_fold(self): travis.fold_end(self._travis_fold_name) def __call__(self): return self._runner()
TravisFoldingRunner
python
pytorch__pytorch
torch/_functorch/_aot_autograd/descriptors.py
{ "start": 22676, "end": 23141 }
class ____(DifferentiableAOTOutput): """An intermediate base of multiple outputs which alias each other. We only report ONE of the outputs that contributed to this base""" base_of: "AOTOutput" def expr(self) -> str: return f"__intermediate_base({self.base_of.expr()})" # TODO: it's a little dodgy this is differentiable lol, but we do generate # these BEFORE autograd is handled @dataclasses.dataclass(frozen=True)
IntermediateBaseAOTOutput
python
pypa__pip
tests/unit/test_base_command.py
{ "start": 873, "end": 1613 }
class ____(Command): _name = "fake" def __init__( self, run_func: Callable[[], int] | None = None, error: bool = False ) -> None: if error: def run_func() -> int: raise SystemExit(1) self.run_func = run_func super().__init__(self._name, self._name) def main(self, args: list[str]) -> int: args.append("--disable-pip-version-check") return super().main(args) def run(self, options: Values, args: list[str]) -> int: logging.getLogger("pip.tests").info("fake") # Return SUCCESS from run if run_func is not provided if self.run_func: return self.run_func() else: return SUCCESS
FakeCommand
python
python-openxml__python-docx
tests/opc/test_pkgwriter.py
{ "start": 5041, "end": 7218 }
class ____: def it_can_compose_content_types_element(self, xml_for_fixture): cti, expected_xml = xml_for_fixture types_elm = cti._element assert types_elm.xml == expected_xml # fixtures --------------------------------------------- def _mock_part(self, request: FixtureRequest, name, partname_str, content_type): partname = PackURI(partname_str) return instance_mock(request, Part, name=name, partname=partname, content_type=content_type) @pytest.fixture( params=[ ("Default", "/ppt/MEDIA/image.PNG", CT.PNG), ("Default", "/ppt/media/image.xml", CT.XML), ("Default", "/ppt/media/image.rels", CT.OPC_RELATIONSHIPS), ("Default", "/ppt/media/image.jpeg", CT.JPEG), ("Override", "/docProps/core.xml", "app/vnd.core"), ("Override", "/ppt/slides/slide1.xml", "app/vnd.ct_sld"), ("Override", "/zebra/foo.bar", "app/vnd.foobar"), ] ) def xml_for_fixture(self, request: FixtureRequest): elm_type, partname_str, content_type = request.param part_ = self._mock_part(request, "part_", partname_str, content_type) cti = _ContentTypesItem.from_parts([part_]) # expected_xml ----------------- types_bldr = a_Types().with_nsdecls() ext = partname_str.split(".")[-1].lower() if elm_type == "Default" and ext not in ("rels", "xml"): default_bldr = a_Default() default_bldr.with_Extension(ext) default_bldr.with_ContentType(content_type) types_bldr.with_child(default_bldr) types_bldr.with_child( a_Default().with_Extension("rels").with_ContentType(CT.OPC_RELATIONSHIPS) ) types_bldr.with_child(a_Default().with_Extension("xml").with_ContentType(CT.XML)) if elm_type == "Override": override_bldr = an_Override() override_bldr.with_PartName(partname_str) override_bldr.with_ContentType(content_type) types_bldr.with_child(override_bldr) expected_xml = types_bldr.xml() return cti, expected_xml
Describe_ContentTypesItem
python
sympy__sympy
sympy/geometry/ellipse.py
{ "start": 42460, "end": 50305 }
class ____(Ellipse): r"""A circle in space. Constructed simply from a center and a radius, from three non-collinear points, or the equation of a circle. Parameters ========== center : Point radius : number or SymPy expression points : sequence of three Points equation : equation of a circle Attributes ========== radius (synonymous with hradius, vradius, major and minor) circumference equation Raises ====== GeometryError When the given equation is not that of a circle. When trying to construct circle from incorrect parameters. See Also ======== Ellipse, sympy.geometry.point.Point Examples ======== >>> from sympy import Point, Circle, Eq >>> from sympy.abc import x, y, a, b A circle constructed from a center and radius: >>> c1 = Circle(Point(0, 0), 5) >>> c1.hradius, c1.vradius, c1.radius (5, 5, 5) A circle constructed from three points: >>> c2 = Circle(Point(0, 0), Point(1, 1), Point(1, 0)) >>> c2.hradius, c2.vradius, c2.radius, c2.center (sqrt(2)/2, sqrt(2)/2, sqrt(2)/2, Point2D(1/2, 1/2)) A circle can be constructed from an equation in the form `ax^2 + by^2 + gx + hy + c = 0`, too: >>> Circle(x**2 + y**2 - 25) Circle(Point2D(0, 0), 5) If the variables corresponding to x and y are named something else, their name or symbol can be supplied: >>> Circle(Eq(a**2 + b**2, 25), x='a', y=b) Circle(Point2D(0, 0), 5) """ def __new__(cls, *args, **kwargs): evaluate = kwargs.get('evaluate', global_parameters.evaluate) if len(args) == 1 and isinstance(args[0], (Expr, Eq)): x = kwargs.get('x', 'x') y = kwargs.get('y', 'y') equation = args[0].expand() if isinstance(equation, Eq): equation = equation.lhs - equation.rhs x = find(x, equation) y = find(y, equation) try: a, b, c, d, e = linear_coeffs(equation, x**2, y**2, x, y) except ValueError: raise GeometryError("The given equation is not that of a circle.") if S.Zero in (a, b) or a != b: raise GeometryError("The given equation is not that of a circle.") center_x = -c/a/2 center_y = -d/b/2 r2 = (center_x**2) + (center_y**2) - e/a return Circle((center_x, center_y), sqrt(r2), evaluate=evaluate) else: c, r = None, None if len(args) == 3: args = [Point(a, dim=2, evaluate=evaluate) for a in args] t = Triangle(*args) if not isinstance(t, Triangle): return t c = t.circumcenter r = t.circumradius elif len(args) == 2: # Assume (center, radius) pair c = Point(args[0], dim=2, evaluate=evaluate) r = args[1] # this will prohibit imaginary radius try: r = Point(r, 0, evaluate=evaluate).x except ValueError: raise GeometryError("Circle with imaginary radius is not permitted") if not (c is None or r is None): if r == 0: return c return GeometryEntity.__new__(cls, c, r, **kwargs) raise GeometryError("Circle.__new__ received unknown arguments") def _eval_evalf(self, prec=15, **options): pt, r = self.args dps = prec_to_dps(prec) pt = pt.evalf(n=dps, **options) r = r.evalf(n=dps, **options) return self.func(pt, r, evaluate=False) @property def circumference(self): """The circumference of the circle. Returns ======= circumference : number or SymPy expression Examples ======== >>> from sympy import Point, Circle >>> c1 = Circle(Point(3, 4), 6) >>> c1.circumference 12*pi """ return 2 * S.Pi * self.radius def equation(self, x='x', y='y'): """The equation of the circle. Parameters ========== x : str or Symbol, optional Default value is 'x'. y : str or Symbol, optional Default value is 'y'. Returns ======= equation : SymPy expression Examples ======== >>> from sympy import Point, Circle >>> c1 = Circle(Point(0, 0), 5) >>> c1.equation() x**2 + y**2 - 25 """ x = _symbol(x, real=True) y = _symbol(y, real=True) t1 = (x - self.center.x)**2 t2 = (y - self.center.y)**2 return t1 + t2 - self.major**2 def intersection(self, o): """The intersection of this circle with another geometrical entity. Parameters ========== o : GeometryEntity Returns ======= intersection : list of GeometryEntities Examples ======== >>> from sympy import Point, Circle, Line, Ray >>> p1, p2, p3 = Point(0, 0), Point(5, 5), Point(6, 0) >>> p4 = Point(5, 0) >>> c1 = Circle(p1, 5) >>> c1.intersection(p2) [] >>> c1.intersection(p4) [Point2D(5, 0)] >>> c1.intersection(Ray(p1, p2)) [Point2D(5*sqrt(2)/2, 5*sqrt(2)/2)] >>> c1.intersection(Line(p2, p3)) [] """ return Ellipse.intersection(self, o) @property def radius(self): """The radius of the circle. Returns ======= radius : number or SymPy expression See Also ======== Ellipse.major, Ellipse.minor, Ellipse.hradius, Ellipse.vradius Examples ======== >>> from sympy import Point, Circle >>> c1 = Circle(Point(3, 4), 6) >>> c1.radius 6 """ return self.args[1] def reflect(self, line): """Override GeometryEntity.reflect since the radius is not a GeometryEntity. Examples ======== >>> from sympy import Circle, Line >>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1))) Circle(Point2D(1, 0), -1) """ c = self.center c = c.reflect(line) return self.func(c, -self.radius) def scale(self, x=1, y=1, pt=None): """Override GeometryEntity.scale since the radius is not a GeometryEntity. Examples ======== >>> from sympy import Circle >>> Circle((0, 0), 1).scale(2, 2) Circle(Point2D(0, 0), 2) >>> Circle((0, 0), 1).scale(2, 4) Ellipse(Point2D(0, 0), 2, 4) """ c = self.center if pt: pt = Point(pt, dim=2) return self.translate(*(-pt).args).scale(x, y).translate(*pt.args) c = c.scale(x, y) x, y = [abs(i) for i in (x, y)] if x == y: return self.func(c, x*self.radius) h = v = self.radius return Ellipse(c, hradius=h*x, vradius=v*y) @property def vradius(self): """ This Ellipse property is an alias for the Circle's radius. Whereas hradius, major and minor can use Ellipse's conventions, the vradius does not exist for a circle. It is always a positive value in order that the Circle, like Polygons, will have an area that can be positive or negative as determined by the sign of the hradius. Examples ======== >>> from sympy import Point, Circle >>> c1 = Circle(Point(3, 4), 6) >>> c1.vradius 6 """ return abs(self.radius) from .polygon import Polygon, Triangle
Circle
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-pgvector/destination_pgvector/config.py
{ "start": 183, "end": 522 }
class ____(BaseModel): password: str = Field( ..., title="Password", airbyte_secret=True, description="Enter the password you want to use to access the database", examples=["AIRBYTE_PASSWORD"], order=7, ) class Config: title = "Credentials"
PasswordBasedAuthorizationModel
python
plotly__plotly.py
plotly/graph_objs/parcoords/unselected/_line.py
{ "start": 233, "end": 3648 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "parcoords.unselected" _path_str = "parcoords.unselected.line" _valid_props = {"color", "opacity"} @property def color(self): """ Sets the base color of unselected lines. in connection with `unselected.line.opacity`. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def opacity(self): """ Sets the opacity of unselected lines. The default "auto" decreases the opacity smoothly as the number of lines increases. Use 1 to achieve exact `unselected.line.color`. The 'opacity' property is a number and may be specified as: - An int or float in the interval [0, 1] Returns ------- int|float """ return self["opacity"] @opacity.setter def opacity(self, val): self["opacity"] = val @property def _prop_descriptions(self): return """\ color Sets the base color of unselected lines. in connection with `unselected.line.opacity`. opacity Sets the opacity of unselected lines. The default "auto" decreases the opacity smoothly as the number of lines increases. Use 1 to achieve exact `unselected.line.color`. """ def __init__(self, arg=None, color=None, opacity=None, **kwargs): """ Construct a new Line object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.parcoords.unselected.Line` color Sets the base color of unselected lines. in connection with `unselected.line.opacity`. opacity Sets the opacity of unselected lines. The default "auto" decreases the opacity smoothly as the number of lines increases. Use 1 to achieve exact `unselected.line.color`. Returns ------- Line """ super().__init__("line") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.parcoords.unselected.Line constructor must be a dict or an instance of :class:`plotly.graph_objs.parcoords.unselected.Line`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("opacity", arg, opacity) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Line
python
ansible__ansible
lib/ansible/errors/__init__.py
{ "start": 9490, "end": 9635 }
class ____(AnsibleTemplateError): """A syntax error was encountered while parsing a Jinja template or expression."""
AnsibleTemplateSyntaxError
python
huggingface__transformers
src/transformers/models/align/processing_align.py
{ "start": 718, "end": 975 }
class ____(ProcessingKwargs, total=False): # see processing_utils.ProcessingKwargs documentation for usage. _defaults = { "text_kwargs": { "padding": "max_length", "max_length": 64, }, }
AlignProcessorKwargs
python
apache__airflow
airflow-core/tests/unit/plugins/test_plugin.py
{ "start": 5790, "end": 6225 }
class ____(AirflowPlugin): name = "preload" def on_load(self, *args, **kwargs): self.name = "postload" # Example external view with invalid destination external_view_with_invalid_destination = { "name": "Invalid External View", "href": "https://example.com/invalid", "url_route": "invalid_external_view", "destination": "Assets", # <-- invalid destination "icon": "book", }
AirflowTestOnLoadPlugin
python
django-extensions__django-extensions
tests/testapp/models.py
{ "start": 465, "end": 670 }
class ____(models.Model): name = models.CharField(blank=True, max_length=255, null=True) text = models.TextField(blank=True, null=True) class Meta: app_label = "django_extensions"
Secret
python
kamyu104__LeetCode-Solutions
Python/largest-unique-number.py
{ "start": 50, "end": 278 }
class ____(object): def largestUniqueNumber(self, A): """ :type A: List[int] :rtype: int """ A.append(-1) return max(k for k,v in collections.Counter(A).items() if v == 1)
Solution
python
scipy__scipy
scipy/io/wavfile.py
{ "start": 2115, "end": 30691 }
class ____(IntEnum): """ WAVE form wFormatTag IDs Complete list is in mmreg.h in Windows 10 SDK. ALAC and OPUS are the newest additions, in v10.0.14393 2016-07 """ UNKNOWN = 0x0000 PCM = 0x0001 ADPCM = 0x0002 IEEE_FLOAT = 0x0003 VSELP = 0x0004 IBM_CVSD = 0x0005 ALAW = 0x0006 MULAW = 0x0007 DTS = 0x0008 DRM = 0x0009 WMAVOICE9 = 0x000A WMAVOICE10 = 0x000B OKI_ADPCM = 0x0010 DVI_ADPCM = 0x0011 IMA_ADPCM = 0x0011 # Duplicate MEDIASPACE_ADPCM = 0x0012 SIERRA_ADPCM = 0x0013 G723_ADPCM = 0x0014 DIGISTD = 0x0015 DIGIFIX = 0x0016 DIALOGIC_OKI_ADPCM = 0x0017 MEDIAVISION_ADPCM = 0x0018 CU_CODEC = 0x0019 HP_DYN_VOICE = 0x001A YAMAHA_ADPCM = 0x0020 SONARC = 0x0021 DSPGROUP_TRUESPEECH = 0x0022 ECHOSC1 = 0x0023 AUDIOFILE_AF36 = 0x0024 APTX = 0x0025 AUDIOFILE_AF10 = 0x0026 PROSODY_1612 = 0x0027 LRC = 0x0028 DOLBY_AC2 = 0x0030 GSM610 = 0x0031 MSNAUDIO = 0x0032 ANTEX_ADPCME = 0x0033 CONTROL_RES_VQLPC = 0x0034 DIGIREAL = 0x0035 DIGIADPCM = 0x0036 CONTROL_RES_CR10 = 0x0037 NMS_VBXADPCM = 0x0038 CS_IMAADPCM = 0x0039 ECHOSC3 = 0x003A ROCKWELL_ADPCM = 0x003B ROCKWELL_DIGITALK = 0x003C XEBEC = 0x003D G721_ADPCM = 0x0040 G728_CELP = 0x0041 MSG723 = 0x0042 INTEL_G723_1 = 0x0043 INTEL_G729 = 0x0044 SHARP_G726 = 0x0045 MPEG = 0x0050 RT24 = 0x0052 PAC = 0x0053 MPEGLAYER3 = 0x0055 LUCENT_G723 = 0x0059 CIRRUS = 0x0060 ESPCM = 0x0061 VOXWARE = 0x0062 CANOPUS_ATRAC = 0x0063 G726_ADPCM = 0x0064 G722_ADPCM = 0x0065 DSAT = 0x0066 DSAT_DISPLAY = 0x0067 VOXWARE_BYTE_ALIGNED = 0x0069 VOXWARE_AC8 = 0x0070 VOXWARE_AC10 = 0x0071 VOXWARE_AC16 = 0x0072 VOXWARE_AC20 = 0x0073 VOXWARE_RT24 = 0x0074 VOXWARE_RT29 = 0x0075 VOXWARE_RT29HW = 0x0076 VOXWARE_VR12 = 0x0077 VOXWARE_VR18 = 0x0078 VOXWARE_TQ40 = 0x0079 VOXWARE_SC3 = 0x007A VOXWARE_SC3_1 = 0x007B SOFTSOUND = 0x0080 VOXWARE_TQ60 = 0x0081 MSRT24 = 0x0082 G729A = 0x0083 MVI_MVI2 = 0x0084 DF_G726 = 0x0085 DF_GSM610 = 0x0086 ISIAUDIO = 0x0088 ONLIVE = 0x0089 MULTITUDE_FT_SX20 = 0x008A INFOCOM_ITS_G721_ADPCM = 0x008B CONVEDIA_G729 = 0x008C CONGRUENCY = 0x008D SBC24 = 0x0091 DOLBY_AC3_SPDIF = 0x0092 MEDIASONIC_G723 = 0x0093 PROSODY_8KBPS = 0x0094 ZYXEL_ADPCM = 0x0097 PHILIPS_LPCBB = 0x0098 PACKED = 0x0099 MALDEN_PHONYTALK = 0x00A0 RACAL_RECORDER_GSM = 0x00A1 RACAL_RECORDER_G720_A = 0x00A2 RACAL_RECORDER_G723_1 = 0x00A3 RACAL_RECORDER_TETRA_ACELP = 0x00A4 NEC_AAC = 0x00B0 RAW_AAC1 = 0x00FF RHETOREX_ADPCM = 0x0100 IRAT = 0x0101 VIVO_G723 = 0x0111 VIVO_SIREN = 0x0112 PHILIPS_CELP = 0x0120 PHILIPS_GRUNDIG = 0x0121 DIGITAL_G723 = 0x0123 SANYO_LD_ADPCM = 0x0125 SIPROLAB_ACEPLNET = 0x0130 SIPROLAB_ACELP4800 = 0x0131 SIPROLAB_ACELP8V3 = 0x0132 SIPROLAB_G729 = 0x0133 SIPROLAB_G729A = 0x0134 SIPROLAB_KELVIN = 0x0135 VOICEAGE_AMR = 0x0136 G726ADPCM = 0x0140 DICTAPHONE_CELP68 = 0x0141 DICTAPHONE_CELP54 = 0x0142 QUALCOMM_PUREVOICE = 0x0150 QUALCOMM_HALFRATE = 0x0151 TUBGSM = 0x0155 MSAUDIO1 = 0x0160 WMAUDIO2 = 0x0161 WMAUDIO3 = 0x0162 WMAUDIO_LOSSLESS = 0x0163 WMASPDIF = 0x0164 UNISYS_NAP_ADPCM = 0x0170 UNISYS_NAP_ULAW = 0x0171 UNISYS_NAP_ALAW = 0x0172 UNISYS_NAP_16K = 0x0173 SYCOM_ACM_SYC008 = 0x0174 SYCOM_ACM_SYC701_G726L = 0x0175 SYCOM_ACM_SYC701_CELP54 = 0x0176 SYCOM_ACM_SYC701_CELP68 = 0x0177 KNOWLEDGE_ADVENTURE_ADPCM = 0x0178 FRAUNHOFER_IIS_MPEG2_AAC = 0x0180 DTS_DS = 0x0190 CREATIVE_ADPCM = 0x0200 CREATIVE_FASTSPEECH8 = 0x0202 CREATIVE_FASTSPEECH10 = 0x0203 UHER_ADPCM = 0x0210 ULEAD_DV_AUDIO = 0x0215 ULEAD_DV_AUDIO_1 = 0x0216 QUARTERDECK = 0x0220 ILINK_VC = 0x0230 RAW_SPORT = 0x0240 ESST_AC3 = 0x0241 GENERIC_PASSTHRU = 0x0249 IPI_HSX = 0x0250 IPI_RPELP = 0x0251 CS2 = 0x0260 SONY_SCX = 0x0270 SONY_SCY = 0x0271 SONY_ATRAC3 = 0x0272 SONY_SPC = 0x0273 TELUM_AUDIO = 0x0280 TELUM_IA_AUDIO = 0x0281 NORCOM_VOICE_SYSTEMS_ADPCM = 0x0285 FM_TOWNS_SND = 0x0300 MICRONAS = 0x0350 MICRONAS_CELP833 = 0x0351 BTV_DIGITAL = 0x0400 INTEL_MUSIC_CODER = 0x0401 INDEO_AUDIO = 0x0402 QDESIGN_MUSIC = 0x0450 ON2_VP7_AUDIO = 0x0500 ON2_VP6_AUDIO = 0x0501 VME_VMPCM = 0x0680 TPC = 0x0681 LIGHTWAVE_LOSSLESS = 0x08AE OLIGSM = 0x1000 OLIADPCM = 0x1001 OLICELP = 0x1002 OLISBC = 0x1003 OLIOPR = 0x1004 LH_CODEC = 0x1100 LH_CODEC_CELP = 0x1101 LH_CODEC_SBC8 = 0x1102 LH_CODEC_SBC12 = 0x1103 LH_CODEC_SBC16 = 0x1104 NORRIS = 0x1400 ISIAUDIO_2 = 0x1401 SOUNDSPACE_MUSICOMPRESS = 0x1500 MPEG_ADTS_AAC = 0x1600 MPEG_RAW_AAC = 0x1601 MPEG_LOAS = 0x1602 NOKIA_MPEG_ADTS_AAC = 0x1608 NOKIA_MPEG_RAW_AAC = 0x1609 VODAFONE_MPEG_ADTS_AAC = 0x160A VODAFONE_MPEG_RAW_AAC = 0x160B MPEG_HEAAC = 0x1610 VOXWARE_RT24_SPEECH = 0x181C SONICFOUNDRY_LOSSLESS = 0x1971 INNINGS_TELECOM_ADPCM = 0x1979 LUCENT_SX8300P = 0x1C07 LUCENT_SX5363S = 0x1C0C CUSEEME = 0x1F03 NTCSOFT_ALF2CM_ACM = 0x1FC4 DVM = 0x2000 DTS2 = 0x2001 MAKEAVIS = 0x3313 DIVIO_MPEG4_AAC = 0x4143 NOKIA_ADAPTIVE_MULTIRATE = 0x4201 DIVIO_G726 = 0x4243 LEAD_SPEECH = 0x434C LEAD_VORBIS = 0x564C WAVPACK_AUDIO = 0x5756 OGG_VORBIS_MODE_1 = 0x674F OGG_VORBIS_MODE_2 = 0x6750 OGG_VORBIS_MODE_3 = 0x6751 OGG_VORBIS_MODE_1_PLUS = 0x676F OGG_VORBIS_MODE_2_PLUS = 0x6770 OGG_VORBIS_MODE_3_PLUS = 0x6771 ALAC = 0x6C61 _3COM_NBX = 0x7000 # Can't have leading digit OPUS = 0x704F FAAD_AAC = 0x706D AMR_NB = 0x7361 AMR_WB = 0x7362 AMR_WP = 0x7363 GSM_AMR_CBR = 0x7A21 GSM_AMR_VBR_SID = 0x7A22 COMVERSE_INFOSYS_G723_1 = 0xA100 COMVERSE_INFOSYS_AVQSBC = 0xA101 COMVERSE_INFOSYS_SBC = 0xA102 SYMBOL_G729_A = 0xA103 VOICEAGE_AMR_WB = 0xA104 INGENIENT_G726 = 0xA105 MPEG4_AAC = 0xA106 ENCORE_G726 = 0xA107 ZOLL_ASAO = 0xA108 SPEEX_VOICE = 0xA109 VIANIX_MASC = 0xA10A WM9_SPECTRUM_ANALYZER = 0xA10B WMF_SPECTRUM_ANAYZER = 0xA10C GSM_610 = 0xA10D GSM_620 = 0xA10E GSM_660 = 0xA10F GSM_690 = 0xA110 GSM_ADAPTIVE_MULTIRATE_WB = 0xA111 POLYCOM_G722 = 0xA112 POLYCOM_G728 = 0xA113 POLYCOM_G729_A = 0xA114 POLYCOM_SIREN = 0xA115 GLOBAL_IP_ILBC = 0xA116 RADIOTIME_TIME_SHIFT_RADIO = 0xA117 NICE_ACA = 0xA118 NICE_ADPCM = 0xA119 VOCORD_G721 = 0xA11A VOCORD_G726 = 0xA11B VOCORD_G722_1 = 0xA11C VOCORD_G728 = 0xA11D VOCORD_G729 = 0xA11E VOCORD_G729_A = 0xA11F VOCORD_G723_1 = 0xA120 VOCORD_LBC = 0xA121 NICE_G728 = 0xA122 FRACE_TELECOM_G729 = 0xA123 CODIAN = 0xA124 FLAC = 0xF1AC EXTENSIBLE = 0xFFFE DEVELOPMENT = 0xFFFF KNOWN_WAVE_FORMATS = {WAVE_FORMAT.PCM, WAVE_FORMAT.IEEE_FLOAT} def _raise_bad_format(format_tag): try: format_name = WAVE_FORMAT(format_tag).name except ValueError: format_name = f'{format_tag:#06x}' raise ValueError(f"Unknown wave file format: {format_name}. Supported " "formats: " + ', '.join(x.name for x in KNOWN_WAVE_FORMATS)) def _read_fmt_chunk(fid, is_big_endian): """ Returns ------- size : int size of format subchunk in bytes (minus 8 for "fmt " and itself) format_tag : int PCM, float, or compressed format channels : int number of channels fs : int sampling frequency in samples per second bytes_per_second : int overall byte rate for the file block_align : int bytes per sample, including all channels bit_depth : int bits per sample Notes ----- Assumes file pointer is immediately after the 'fmt ' id """ if is_big_endian: fmt = '>' else: fmt = '<' size = struct.unpack(fmt+'I', fid.read(4))[0] if size < 16: raise ValueError("Binary structure of wave file is not compliant") res = struct.unpack(fmt+'HHIIHH', fid.read(16)) bytes_read = 16 format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res if format_tag == WAVE_FORMAT.EXTENSIBLE and size >= (16+2): ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0] bytes_read += 2 if ext_chunk_size >= 22: extensible_chunk_data = fid.read(22) bytes_read += 22 raw_guid = extensible_chunk_data[2+4:2+4+16] # GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361) # MS GUID byte order: first three groups are native byte order, # rest is Big Endian if is_big_endian: tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71' else: tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71' if raw_guid.endswith(tail): format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0] else: raise ValueError("Binary structure of wave file is not compliant") if format_tag not in KNOWN_WAVE_FORMATS: _raise_bad_format(format_tag) # move file pointer to next chunk if size > bytes_read: fid.read(size - bytes_read) # fmt should always be 16, 18 or 40, but handle it just in case _handle_pad_byte(fid, size) if format_tag == WAVE_FORMAT.PCM: if bytes_per_second != fs * block_align: raise ValueError("WAV header is invalid: nAvgBytesPerSec must" " equal product of nSamplesPerSec and" " nBlockAlign, but file has nSamplesPerSec =" f" {fs}, nBlockAlign = {block_align}, and" f" nAvgBytesPerSec = {bytes_per_second}") return (size, format_tag, channels, fs, bytes_per_second, block_align, bit_depth) def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian, is_rf64, block_align, mmap=False, rf64_chunk_size=None): """ Notes ----- Assumes file pointer is immediately after the 'data' id It's possible to not use all available bits in a container, or to store samples in a container bigger than necessary, so bytes_per_sample uses the actual reported container size (nBlockAlign / nChannels). Real-world examples: Adobe Audition's "24-bit packed int (type 1, 20-bit)" nChannels = 2, nBlockAlign = 6, wBitsPerSample = 20 http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Samples/AFsp/M1F1-int12-AFsp.wav is: nChannels = 2, nBlockAlign = 4, wBitsPerSample = 12 http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Docs/multichaudP.pdf gives an example of: nChannels = 2, nBlockAlign = 8, wBitsPerSample = 20 """ if is_big_endian: fmt = '>' else: fmt = '<' # Size of the data subchunk in bytes if not is_rf64: size = struct.unpack(fmt+'I', fid.read(4))[0] else: # chunk size is stored in global file header for RF64 size = rf64_chunk_size # skip data chunk size as it is 0xFFFFFFF fid.read(4) # Number of bytes per sample (sample container size) bytes_per_sample = block_align // channels n_samples = size // bytes_per_sample if format_tag == WAVE_FORMAT.PCM: if 1 <= bit_depth <= 8: dtype = 'u1' # WAV of 8-bit integer or less are unsigned elif bytes_per_sample in {3, 5, 6, 7}: # No compatible dtype. Load as raw bytes for reshaping later. dtype = 'V1' elif bit_depth <= 64: # Remaining bit depths can map directly to signed numpy dtypes dtype = f'{fmt}i{bytes_per_sample}' else: raise ValueError("Unsupported bit depth: the WAV file " f"has {bit_depth}-bit integer data.") elif format_tag == WAVE_FORMAT.IEEE_FLOAT: if bit_depth in {32, 64}: dtype = f'{fmt}f{bytes_per_sample}' else: raise ValueError("Unsupported bit depth: the WAV file " f"has {bit_depth}-bit floating-point data.") else: _raise_bad_format(format_tag) start = fid.tell() if not mmap: try: count = size if dtype == 'V1' else n_samples data = np.fromfile(fid, dtype=dtype, count=count) except io.UnsupportedOperation: # not a C-like file fid.seek(start, 0) # just in case it seeked, though it shouldn't data = np.frombuffer(fid.read(size), dtype=dtype) if dtype == 'V1': # Rearrange raw bytes into smallest compatible numpy dtype dt = f'{fmt}i4' if bytes_per_sample == 3 else f'{fmt}i8' a = np.zeros((len(data) // bytes_per_sample, np.dtype(dt).itemsize), dtype='V1') if is_big_endian: a[:, :bytes_per_sample] = data.reshape((-1, bytes_per_sample)) else: a[:, -bytes_per_sample:] = data.reshape((-1, bytes_per_sample)) data = a.view(dt).reshape(a.shape[:-1]) else: if bytes_per_sample in {1, 2, 4, 8}: start = fid.tell() data = np.memmap(fid, dtype=dtype, mode='c', offset=start, shape=(n_samples,)) fid.seek(start + size) else: raise ValueError("mmap=True not compatible with " f"{bytes_per_sample}-byte container size.") _handle_pad_byte(fid, size) if channels > 1: data = data.reshape(-1, channels) return data def _skip_unknown_chunk(fid, is_big_endian): if is_big_endian: fmt = '>I' else: fmt = '<I' data = fid.read(4) # call unpack() and seek() only if we have really read data from file # otherwise empty read at the end of the file would trigger # unnecessary exception at unpack() call # in case data equals somehow to 0, there is no need for seek() anyway if data: size = struct.unpack(fmt, data)[0] fid.seek(size, 1) _handle_pad_byte(fid, size) def _read_riff_chunk(fid): str1 = fid.read(4) # File signature if str1 == b'RIFF': is_rf64 = False is_big_endian = False fmt = '<I' elif str1 == b'RIFX': is_rf64 = False is_big_endian = True fmt = '>I' elif str1 == b'RF64': is_rf64 = True is_big_endian = False fmt = '<Q' else: # There are also .wav files with "FFIR" or "XFIR" signatures? raise ValueError(f"File format {repr(str1)} not understood. Only " "'RIFF', 'RIFX', and 'RF64' supported.") # Size of entire file if not is_rf64: file_size = struct.unpack(fmt, fid.read(4))[0] + 8 rf64_chunk_size = None str2 = fid.read(4) else: # Skip 0xFFFFFFFF (-1) bytes fid.read(4) str2 = fid.read(4) str3 = fid.read(4) if str3 != b'ds64': raise ValueError("Invalid RF64 file: ds64 chunk not found.") ds64_size = struct.unpack("<I", fid.read(4))[0] file_size = struct.unpack(fmt, fid.read(8))[0] + 8 rf64_chunk_size = struct.unpack('<Q', fid.read(8))[0] # Ignore additional attributes of ds64 chunk like sample count, tables, etc. # and just skip to the next chunk fid.seek(ds64_size - 16, 1) if str2 != b'WAVE': raise ValueError(f"Not a WAV file. RIFF form type is {repr(str2)}.") return file_size, is_big_endian, is_rf64, rf64_chunk_size def _handle_pad_byte(fid, size): # "If the chunk size is an odd number of bytes, a pad byte with value zero # is written after ckData." So we need to seek past this after each chunk. if size % 2: fid.seek(1, 1) def read(filename, mmap=False): """ Open a WAV file. Return the sample rate (in samples/sec) and data from an LPCM WAV file. Parameters ---------- filename : string or open file handle Input WAV file. mmap : bool, optional Whether to read data as memory-mapped (default: False). Not compatible with some bit depths; see Notes. Only to be used on real files. .. versionadded:: 0.12.0 Returns ------- rate : int Sample rate of WAV file. data : numpy array Data read from WAV file. Data-type is determined from the file; see Notes. Data is 1-D for 1-channel WAV, or 2-D of shape (Nsamples, Nchannels) otherwise. If a file-like input without a C-like file descriptor (e.g., :class:`python:io.BytesIO`) is passed, this will not be writeable. Notes ----- Common data types: [1]_ ===================== =========== =========== ============= WAV format Min Max NumPy dtype ===================== =========== =========== ============= 32-bit floating-point -1.0 +1.0 float32 32-bit integer PCM -2147483648 +2147483647 int32 24-bit integer PCM -2147483648 +2147483392 int32 16-bit integer PCM -32768 +32767 int16 8-bit integer PCM 0 255 uint8 ===================== =========== =========== ============= WAV files can specify arbitrary bit depth, and this function supports reading any integer PCM depth from 1 to 64 bits. Data is returned in the smallest compatible numpy int type, in left-justified format. 8-bit and lower is unsigned, while 9-bit and higher is signed. For example, 24-bit data will be stored as int32, with the MSB of the 24-bit data stored at the MSB of the int32, and typically the least significant byte is 0x00. (However, if a file actually contains data past its specified bit depth, those bits will be read and output, too. [2]_) This bit justification and sign matches WAV's native internal format, which allows memory mapping of WAV files that use 1, 2, 4, or 8 bytes per sample (so 24-bit files cannot be memory-mapped, but 32-bit can). IEEE float PCM in 32- or 64-bit format is supported, with or without mmap. Values exceeding [-1, +1] are not clipped. Non-linear PCM (mu-law, A-law) is not supported. References ---------- .. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming Interface and Data Specifications 1.0", section "Data Format of the Samples", August 1991 http://www.tactilemedia.com/info/MCI_Control_Info.html .. [2] Adobe Systems Incorporated, "Adobe Audition 3 User Guide", section "Audio file formats: 24-bit Packed Int (type 1, 20-bit)", 2007 Examples -------- >>> from os.path import dirname, join as pjoin >>> from scipy.io import wavfile >>> import scipy.io Get the filename for an example .wav file from the tests/data directory. >>> data_dir = pjoin(dirname(scipy.io.__file__), 'tests', 'data') >>> wav_fname = pjoin(data_dir, 'test-44100Hz-2ch-32bit-float-be.wav') Load the .wav file contents. >>> samplerate, data = wavfile.read(wav_fname) >>> print(f"number of channels = {data.shape[1]}") number of channels = 2 >>> length = data.shape[0] / samplerate >>> print(f"length = {length}s") length = 0.01s Plot the waveform. >>> import matplotlib.pyplot as plt >>> import numpy as np >>> time = np.linspace(0., length, data.shape[0]) >>> plt.plot(time, data[:, 0], label="Left channel") >>> plt.plot(time, data[:, 1], label="Right channel") >>> plt.legend() >>> plt.xlabel("Time [s]") >>> plt.ylabel("Amplitude") >>> plt.show() """ if hasattr(filename, 'read'): fid = filename mmap = False else: fid = open(filename, 'rb') if not (was_seekable := fid.seekable()): fid = SeekEmulatingReader(fid) try: file_size, is_big_endian, is_rf64, rf64_chunk_size = _read_riff_chunk(fid) fmt_chunk_received = False data_chunk_received = False while fid.tell() < file_size: # read the next chunk chunk_id = fid.read(4) if not chunk_id: if data_chunk_received: # End of file but data successfully read warnings.warn( f"Reached EOF prematurely; finished at {fid.tell():d} bytes, " f"expected {file_size:d} bytes from header.", WavFileWarning, stacklevel=2) break else: raise ValueError("Unexpected end of file.") elif len(chunk_id) < 4: msg = f"Incomplete chunk ID: {repr(chunk_id)}" # If we have the data, ignore the broken chunk if fmt_chunk_received and data_chunk_received: warnings.warn(msg + ", ignoring it.", WavFileWarning, stacklevel=2) else: raise ValueError(msg) if chunk_id == b'fmt ': fmt_chunk_received = True fmt_chunk = _read_fmt_chunk(fid, is_big_endian) format_tag, channels, fs = fmt_chunk[1:4] bit_depth = fmt_chunk[6] block_align = fmt_chunk[5] elif chunk_id == b'fact': _skip_unknown_chunk(fid, is_big_endian) elif chunk_id == b'data': data_chunk_received = True if not fmt_chunk_received: raise ValueError("No fmt chunk before data") data = _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian, is_rf64, block_align, mmap, rf64_chunk_size) elif chunk_id == b'LIST': # Someday this could be handled properly but for now skip it _skip_unknown_chunk(fid, is_big_endian) elif chunk_id in {b'JUNK', b'Fake'}: # Skip alignment chunks without warning _skip_unknown_chunk(fid, is_big_endian) else: warnings.warn("Chunk (non-data) not understood, skipping it.", WavFileWarning, stacklevel=2) _skip_unknown_chunk(fid, is_big_endian) finally: if not hasattr(filename, 'read'): fid.close() elif was_seekable: # Rewind, if we are able, so that caller can do something # else with the raw WAV stream. fid.seek(0) return fs, data def write(filename, rate, data): """ Write a NumPy array as a WAV file. Parameters ---------- filename : string or open file handle Output wav file. rate : int The sample rate (in samples/sec). data : ndarray A 1-D or 2-D NumPy array of either integer or float data-type. Notes ----- * Writes a simple uncompressed WAV file. * To write multiple-channels, use a 2-D array of shape (Nsamples, Nchannels). * The bits-per-sample and PCM/float will be determined by the data-type. Common data types: [1]_ ===================== =========== =========== ============= WAV format Min Max NumPy dtype ===================== =========== =========== ============= 32-bit floating-point -1.0 +1.0 float32 32-bit PCM -2147483648 +2147483647 int32 16-bit PCM -32768 +32767 int16 8-bit PCM 0 255 uint8 ===================== =========== =========== ============= Note that 8-bit PCM is unsigned. References ---------- .. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming Interface and Data Specifications 1.0", section "Data Format of the Samples", August 1991 http://www.tactilemedia.com/info/MCI_Control_Info.html Examples -------- Create a 100Hz sine wave, sampled at 44100Hz. Write to 16-bit PCM, Mono. >>> from scipy.io.wavfile import write >>> import numpy as np >>> samplerate = 44100; fs = 100 >>> t = np.linspace(0., 1., samplerate) >>> amplitude = np.iinfo(np.int16).max >>> data = amplitude * np.sin(2. * np.pi * fs * t) >>> write("example.wav", samplerate, data.astype(np.int16)) """ if hasattr(filename, 'write'): fid = filename else: fid = open(filename, 'wb') fs = rate try: dkind = data.dtype.kind allowed_dtypes = ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'] if data.dtype.name not in allowed_dtypes: raise ValueError(f"Unsupported data type '{data.dtype}'") header_data = b'' header_data += b'RIFF' header_data += b'\x00\x00\x00\x00' header_data += b'WAVE' # fmt chunk header_data += b'fmt ' if dkind == 'f': format_tag = WAVE_FORMAT.IEEE_FLOAT else: format_tag = WAVE_FORMAT.PCM if data.ndim == 1: channels = 1 else: channels = data.shape[1] bit_depth = data.dtype.itemsize * 8 bytes_per_second = fs*(bit_depth // 8)*channels block_align = channels * (bit_depth // 8) fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs, bytes_per_second, block_align, bit_depth) if not (dkind == 'i' or dkind == 'u'): # add cbSize field for non-PCM files fmt_chunk_data += b'\x00\x00' header_data += struct.pack('<I', len(fmt_chunk_data)) header_data += fmt_chunk_data # check data size (needs to be immediately before the data chunk) # if too large for standard RIFF, use RF64 instead resulting_file_size = len(header_data) + 4 + 4 + data.nbytes is_rf64 = (resulting_file_size - 8) > 0xFFFFFFFF if is_rf64: header_data = b'' header_data += b'RF64' header_data += b'\xFF\xFF\xFF\xFF' header_data += b'WAVE' header_data += b'ds64' # size of ds64 chunk header_data += struct.pack('<I', 28) # will be filled later with real file size header_data += struct.pack('<Q', 0) header_data += struct.pack('<Q', data.nbytes) header_data += struct.pack('<Q', data.shape[0]) # ignore 'table' field for now header_data += struct.pack('<I', 0) header_data += b'fmt ' header_data += struct.pack('<I', len(fmt_chunk_data)) header_data += fmt_chunk_data # fact chunk (non-PCM files) if not (dkind == 'i' or dkind == 'u'): header_data += b'fact' header_data += struct.pack('<II', 4, data.shape[0]) fid.write(header_data) # data chunk fid.write(b'data') # write data chunk size, unless its too big in which case 0xFFFFFFFF is written fid.write(struct.pack('<I', min(data.nbytes, 4294967295))) if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and sys.byteorder == 'big'): data = data.byteswap() _array_tofile(fid, data) # Determine file size and place it in correct # position at start of the file or the data chunk. size = fid.tell() if not is_rf64: fid.seek(4) fid.write(struct.pack('<I', size-8)) else: fid.seek(20) fid.write(struct.pack('<Q', size-8)) finally: if not hasattr(filename, 'write'): fid.close() else: fid.seek(0) def _array_tofile(fid, data): # ravel gives a c-contiguous buffer fid.write(data.ravel().view('b').data)
WAVE_FORMAT
python
PyCQA__pylint
tests/checkers/unittest_base_checker.py
{ "start": 1728, "end": 5311 }
class ____(BaseChecker): name = "message-with-options-checker" msgs = { "W0003": ( "Just a message with pre-defined options %s()", "message-with-options", "Message with options dict to test consistent hashing.", {"old_names": [("W1003", "old-message-with-options")], "shared": True}, ), } def test_base_checker_doc() -> None: basic = OtherBasicChecker() expected_beginning = """\ Basic checker ~~~~~~~~~~~~~ Verbatim name of the checker is ``basic``. """ expected_middle = """\ Basic checker Options ^^^^^^^^^^^^^^^^^^^^^ :example-args: Example of integer argument for the checker. Default: ``42`` """ expected_end = """\ Basic checker Messages ^^^^^^^^^^^^^^^^^^^^^^ :basic-checker-example (W0001): *Basic checker has an example.* Used nowhere and serves no purpose. """ assert str(basic) == expected_beginning + expected_end assert repr(basic) == "Checker 'basic' (responsible for 'W0001')" less_basic = LessBasicChecker() assert str(less_basic) == expected_beginning + expected_middle + expected_end assert repr(less_basic) == repr(basic) def test_base_checker_ordering() -> None: """Test ordering of checkers based on their __gt__ method.""" linter = PyLinter() imports_builtin = ImportsChecker(linter) typecheck_builtin = TypeChecker(linter) basic_1_ext = OtherBasicChecker() basic_2_ext = LessBasicChecker() basic_3_ext = DifferentBasicChecker() while_used_ext = WhileChecker(linter) broad_try_clause_ext = BroadTryClauseChecker(linter) list_of_checkers = [ 1, basic_1_ext, basic_2_ext, basic_3_ext, typecheck_builtin, broad_try_clause_ext, imports_builtin, while_used_ext, linter, ] assert sorted(list_of_checkers) == [ # type: ignore[type-var] linter, imports_builtin, typecheck_builtin, basic_3_ext, basic_1_ext, basic_2_ext, broad_try_clause_ext, while_used_ext, 1, ] # main checker is always smaller assert linter < basic_1_ext assert linter < while_used_ext assert linter < imports_builtin assert basic_2_ext > linter assert while_used_ext > linter assert imports_builtin > linter # builtin are smaller than extension (even when not alphabetically) assert imports_builtin < while_used_ext assert imports_builtin < broad_try_clause_ext assert while_used_ext > imports_builtin assert broad_try_clause_ext > imports_builtin # alphabetical order for builtin assert imports_builtin < typecheck_builtin assert typecheck_builtin > imports_builtin # alphabetical order for extension assert typecheck_builtin < while_used_ext assert while_used_ext > typecheck_builtin assert basic_1_ext > basic_3_ext assert basic_2_ext > basic_3_ext assert basic_1_ext == basic_2_ext def test_base_checker_invalid_message() -> None: linter = PyLinter() with pytest.raises(InvalidMessageError): linter.register_checker(MissingFieldsChecker(linter)) def test_base_checker_consistent_hash() -> None: linter = PyLinter() checker = MessageWithOptionsChecker(linter) some_set = {checker} original_hash = hash(checker) assert checker in some_set for msgid, msg in checker.msgs.items(): checker.create_message_definition_from_tuple(msgid, msg) assert hash(checker) == original_hash assert checker in some_set
MessageWithOptionsChecker
python
vyperlang__vyper
tests/venom_utils.py
{ "start": 1944, "end": 3304 }
class ____: passes: list[type] post_passes: list[type] pass_objects: list[IRPass] default_hevm: bool def __init__(self, passes: list[type], post: list[type] = None, default_hevm: bool = True): self.passes = passes if post is None: self.post_passes = [] else: self.post_passes = post self.default_hevm = default_hevm self.pass_objects = list() def __call__(self, pre: str, post: str, hevm: bool | None = None) -> list[IRPass]: from tests.hevm import hevm_check_venom self.pass_objects.clear() if hevm is None: hevm = self.default_hevm pre_ctx = parse_from_basic_block(pre) for fn in pre_ctx.functions.values(): ac = IRAnalysesCache(fn) for p in self.passes: obj = p(ac, fn) self.pass_objects.append(obj) obj.run_pass() post_ctx = parse_from_basic_block(post) for fn in post_ctx.functions.values(): ac = IRAnalysesCache(fn) for p in self.post_passes: obj = p(ac, fn) self.pass_objects.append(obj) obj.run_pass() assert_ctx_eq(pre_ctx, post_ctx) if hevm: hevm_check_venom(pre, post) return self.pass_objects
PrePostChecker
python
getsentry__sentry
tests/sentry/seer/explorer/test_tools.py
{ "start": 28058, "end": 28113 }
class ____(BaseModel): id: int slug: str
_Project
python
conda__conda
conda/gateways/repodata/jlap/interface.py
{ "start": 666, "end": 4006 }
class ____(RepoInterface): def __init__( self, url: str, repodata_fn: str | None, *, cache: RepodataCache, **kwargs, ) -> None: log.debug("Using %s", self.__class__.__name__) self._cache = cache self._url = url self._repodata_fn = repodata_fn self._log = logging.getLogger(__name__) self._stderrlog = logging.getLogger("conda.stderrlog") def repodata(self, state: dict | RepodataState) -> str | None: """ Fetch newest repodata if necessary. Always writes to ``cache_path_json``. """ self.repodata_parsed(state) raise RepodataOnDisk() def repodata_parsed(self, state: dict | RepodataState) -> dict | None: """ JLAP has to parse the JSON anyway. Use this to avoid a redundant parse when repodata is updated. When repodata is not updated, it doesn't matter whether this function or the caller reads from a file. """ session = get_session(self._url) if not context.ssl_verify: disable_ssl_verify_warning() repodata_url = f"{self._url}/{self._repodata_fn}" # XXX won't modify caller's state dict state_ = self._repodata_state_copy(state) # at this point, self._cache.state == state == state_ temp_path = ( self._cache.cache_dir / f"{self._cache.name}.{os.urandom(2).hex()}.tmp" ) try: with conda_http_errors(self._url, self._repodata_fn): repodata_json_or_none = fetch.request_url_jlap_state( repodata_url, state_, session=session, cache=self._cache, temp_path=temp_path, ) # update caller's state dict-or-RepodataState. Do this before # the self._cache.replace() call which also writes state, then # signal not to write state to caller. state.update(state_) state[URL_KEY] = self._url headers = state.get("jlap", {}).get( "headers" ) # XXX overwrite headers in jlapper.request_url_jlap_state if headers: state[ETAG_KEY] = headers.get("etag") state[LAST_MODIFIED_KEY] = headers.get("last-modified") state[CACHE_CONTROL_KEY] = headers.get("cache-control") self._cache.state.update(state) if temp_path.exists(): self._cache.replace(temp_path) except fetch.Jlap304NotModified: raise Response304ContentUnchanged() finally: # Clean up the temporary file. In the successful case it raises # OSError as self._cache_replace() removed temp_file. try: temp_path.unlink() except OSError: pass if repodata_json_or_none is None: # common # Indicate that subdir_data mustn't rewrite cache_path_json raise RepodataOnDisk() else: return repodata_json_or_none def _repodata_state_copy(self, state: dict | RepodataState): return RepodataState(dict=state)
JlapRepoInterface
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/array_ops/constant_op_test.py
{ "start": 37585, "end": 39373 }
class ____(test.TestCase): @test_util.run_deprecated_v1 def testFullShape(self): with self.session(force_gpu=test_util.is_gpu_available()): p = array_ops.placeholder_with_default([[2, 2], [2, 2]], shape=[2, 2]) a = array_ops.identity(p) self.assertAllEqual([[2, 2], [2, 2]], self.evaluate(a)) self.assertAllEqual( [[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]})) with self.assertRaises(ValueError): a.eval(feed_dict={p: [[6, 6, 6], [6, 6, 6]]}) @test_util.run_deprecated_v1 def testPartialShape(self): with self.session(force_gpu=test_util.is_gpu_available()): p = array_ops.placeholder_with_default([1, 2, 3], shape=[None]) a = array_ops.identity(p) self.assertAllEqual([1, 2, 3], self.evaluate(a)) self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]})) with self.assertRaises(ValueError): a.eval(feed_dict={p: [[2, 2], [2, 2]]}) @test_util.run_deprecated_v1 def testNoShape(self): with self.session(force_gpu=test_util.is_gpu_available()): p = array_ops.placeholder_with_default([17], shape=None) a = array_ops.identity(p) self.assertAllEqual([17], self.evaluate(a)) self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]})) self.assertAllEqual( [[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]})) @test_util.run_deprecated_v1 def testGradient(self): with self.session(force_gpu=test_util.is_gpu_available()): x = array_ops.placeholder(dtypes_lib.float32, [5, 7]) y = array_ops.placeholder_with_default(x, None) err = gradient_checker.compute_gradient_error(x, [5, 7], y, [5, 7]) self.assertLess(err, 1e-3) if __name__ == "__main__": test.main()
PlaceholderWithDefaultTest
python
agronholm__apscheduler
tests/test_schedulers.py
{ "start": 3139, "end": 47388 }
class ____: def test_repr(self) -> None: scheduler = AsyncScheduler(identity="my identity") assert repr(scheduler) == ( "AsyncScheduler(identity='my identity', role=<SchedulerRole.both: 3>, " "data_store=MemoryDataStore(), event_broker=LocalEventBroker())" ) async def test_use_before_initialized(self) -> None: scheduler = AsyncScheduler() with pytest.raises( RuntimeError, match="The scheduler has not been initialized yet" ): await scheduler.add_job(dummy_async_job) async def test_properties(self) -> None: async with AsyncScheduler() as scheduler: assert isinstance(scheduler.data_store, MemoryDataStore) assert isinstance(scheduler.event_broker, LocalEventBroker) assert scheduler.role is SchedulerRole.both assert isinstance(scheduler.identity, str) assert len(scheduler.job_executors) == 3 assert isinstance(scheduler.job_executors["async"], AsyncJobExecutor) assert isinstance( scheduler.job_executors["threadpool"], ThreadPoolJobExecutor ) assert isinstance( scheduler.job_executors["processpool"], ProcessPoolJobExecutor ) assert scheduler.task_defaults.job_executor == "async" assert scheduler.state is RunState.stopped @pytest.mark.parametrize("as_default", [False, True]) async def test_async_executor(self, as_default: bool) -> None: async with AsyncScheduler() as scheduler: await scheduler.start_in_background() if as_default: thread_id = await scheduler.run_job(threading.get_ident) else: thread_id = await scheduler.run_job( threading.get_ident, job_executor="async" ) assert thread_id == threading.get_ident() async def test_threadpool_executor(self) -> None: async with AsyncScheduler() as scheduler: await scheduler.start_in_background() thread_id = await scheduler.run_job( threading.get_ident, job_executor="threadpool" ) assert thread_id != threading.get_ident() async def test_processpool_executor(self) -> None: async with AsyncScheduler() as scheduler: await scheduler.start_in_background() pid = await scheduler.run_job(os.getpid, job_executor="processpool") assert pid != os.getpid() async def test_configure_task(self, raw_datastore: DataStore) -> None: send, receive = create_memory_object_stream[Event](2) with send, receive: async with AsyncScheduler(data_store=raw_datastore) as scheduler: scheduler.subscribe(send.send) await scheduler.configure_task("mytask", func=dummy_async_job) await scheduler.configure_task("mytask", misfire_grace_time=2) tasks = await scheduler.get_tasks() assert len(tasks) == 1 assert tasks[0].id == "mytask" assert tasks[0].func == f"{__name__}:dummy_async_job" assert tasks[0].misfire_grace_time == timedelta(seconds=2) with fail_after(3): event = await receive.receive() assert isinstance(event, TaskAdded) assert event.task_id == "mytask" event = await receive.receive() assert isinstance(event, TaskUpdated) assert event.task_id == "mytask" async def test_configure_task_with_decorator(self) -> None: async with AsyncScheduler() as scheduler: await scheduler.configure_task("taskfunc", func=decorated_job) tasks = await scheduler.get_tasks() assert len(tasks) == 1 assert tasks[0].max_running_jobs == 3 assert tasks[0].misfire_grace_time == timedelta(seconds=6) assert tasks[0].job_executor == "threadpool" async def test_configure_local_task_with_decorator(self) -> None: @task( id="taskfunc", job_executor="threadpool", max_running_jobs=3, misfire_grace_time=timedelta(seconds=6), metadata={"local": 6}, ) def taskfunc() -> None: pass task_defaults = TaskDefaults(metadata={"global": "foo"}) async with AsyncScheduler(task_defaults=task_defaults) as scheduler: await scheduler.configure_task(taskfunc, metadata={"direct": [1, 9]}) tasks = await scheduler.get_tasks() assert len(tasks) == 1 assert tasks[0].id == "taskfunc" assert tasks[0].max_running_jobs == 3 assert tasks[0].misfire_grace_time == timedelta(seconds=6) assert tasks[0].job_executor == "threadpool" assert tasks[0].metadata == {"global": "foo", "local": 6, "direct": [1, 9]} async def test_add_pause_unpause_remove_schedule( self, raw_datastore: DataStore, timezone: ZoneInfo ) -> None: send, receive = create_memory_object_stream[Event](5) with send, receive: async with AsyncScheduler(data_store=raw_datastore) as scheduler: scheduler.subscribe(send.send) now = datetime.now(timezone) trigger = DateTrigger(now) schedule_id = await scheduler.add_schedule( dummy_async_job, trigger, id="foo" ) assert schedule_id == "foo" schedules = await scheduler.get_schedules() assert len(schedules) == 1 assert schedules[0].id == "foo" assert schedules[0].task_id == f"{__name__}:dummy_async_job" await scheduler.pause_schedule("foo") schedule = await scheduler.get_schedule("foo") assert schedule.paused assert schedule.next_fire_time == now await scheduler.unpause_schedule("foo") schedule = await scheduler.get_schedule("foo") assert not schedule.paused assert schedule.next_fire_time == now await scheduler.remove_schedule(schedule_id) assert not await scheduler.get_schedules() with fail_after(3): event = await receive.receive() assert isinstance(event, TaskAdded) assert event.task_id == f"{__name__}:dummy_async_job" event = await receive.receive() assert isinstance(event, ScheduleAdded) assert event.schedule_id == "foo" assert event.task_id == f"{__name__}:dummy_async_job" assert event.next_fire_time == now event = await receive.receive() assert isinstance(event, ScheduleUpdated) assert event.schedule_id == "foo" assert event.task_id == f"{__name__}:dummy_async_job" assert event.next_fire_time == now event = await receive.receive() assert isinstance(event, ScheduleUpdated) assert event.schedule_id == "foo" assert event.task_id == f"{__name__}:dummy_async_job" assert event.next_fire_time == now event = await receive.receive() assert isinstance(event, ScheduleRemoved) assert event.schedule_id == "foo" assert event.task_id == f"{__name__}:dummy_async_job" assert not event.finished async def test_add_job_wait_result(self, raw_datastore: DataStore) -> None: send, receive = create_memory_object_stream[Event](2) async with AsyncExitStack() as exit_stack: exit_stack.enter_context(send) exit_stack.enter_context(receive) scheduler = await exit_stack.enter_async_context( AsyncScheduler(data_store=raw_datastore) ) assert await scheduler.get_jobs() == [] scheduler.subscribe(send.send) job_id = await scheduler.add_job(dummy_async_job, result_expiration_time=10) with fail_after(3): event = await receive.receive() assert isinstance(event, TaskAdded) assert event.task_id == f"{__name__}:dummy_async_job" event = await receive.receive() assert isinstance(event, JobAdded) assert event.job_id == job_id jobs = await scheduler.get_jobs() assert len(jobs) == 1 assert jobs[0].id == job_id assert jobs[0].task_id == f"{__name__}:dummy_async_job" with pytest.raises(JobLookupError): await scheduler.get_job_result(job_id, wait=False) await scheduler.start_in_background() with fail_after(3): event = await receive.receive() assert isinstance(event, SchedulerStarted) event = await receive.receive() assert isinstance(event, JobAcquired) assert event.job_id == job_id assert event.task_id == f"{__name__}:dummy_async_job" assert event.schedule_id is None assert event.scheduled_start is None acquired_at = event.timestamp event = await receive.receive() assert isinstance(event, JobReleased) assert event.job_id == job_id assert event.task_id == f"{__name__}:dummy_async_job" assert event.schedule_id is None assert event.scheduled_start is None assert event.started_at is not None assert event.started_at >= acquired_at assert event.outcome is JobOutcome.success result = await scheduler.get_job_result(job_id) assert result assert result.outcome is JobOutcome.success assert result.return_value == "returnvalue" @pytest.mark.parametrize("success", [True, False]) async def test_run_job(self, raw_datastore: DataStore, success: bool) -> None: send, receive = create_memory_object_stream[Event](4) with send, receive: async with AsyncScheduler(data_store=raw_datastore) as scheduler: await scheduler.start_in_background() scheduler.subscribe(send.send) try: result = await scheduler.run_job( dummy_async_job, kwargs={"fail": not success} ) except RuntimeError as exc: assert str(exc) == "failing as requested" else: assert result == "returnvalue" assert not await scheduler.get_jobs() with fail_after(3): # The task was added event = await receive.receive() assert isinstance(event, TaskAdded) assert event.task_id == f"{__name__}:dummy_async_job" # The job was added event = await receive.receive() assert isinstance(event, JobAdded) job_id = event.job_id assert event.task_id == f"{__name__}:dummy_async_job" # The scheduler acquired the job event = await receive.receive() assert isinstance(event, JobAcquired) assert event.job_id == job_id assert event.task_id == f"{__name__}:dummy_async_job" assert event.schedule_id is None assert event.scheduled_start is None assert event.scheduler_id == scheduler.identity acquired_at = event.timestamp # The scheduler released the job event = await receive.receive() assert isinstance(event, JobReleased) assert event.job_id == job_id assert event.task_id == f"{__name__}:dummy_async_job" assert event.schedule_id is None assert event.scheduled_start is None assert event.started_at is not None assert event.started_at >= acquired_at assert event.scheduler_id == scheduler.identity # The scheduler was stopped event = await receive.receive() assert isinstance(event, SchedulerStopped) # There should be no more events on the list with pytest.raises(WouldBlock): receive.receive_nowait() @pytest.mark.parametrize( "target, expected_result", [ pytest.param(dummy_async_job, "returnvalue", id="async_func"), pytest.param(dummy_sync_job, "returnvalue", id="sync_func"), pytest.param(DummyClass.dummy_static_method, "static", id="staticmethod"), pytest.param( DummyClass.dummy_async_static_method, "static", id="async_staticmethod" ), pytest.param(DummyClass.dummy_class_method, "class", id="classmethod"), pytest.param( DummyClass.dummy_async_class_method, "class", id="async_classmethod" ), pytest.param(DummyClass(5).dummy_instance_method, 5, id="instancemethod"), pytest.param( DummyClass(6).dummy_async_instance_method, 6, id="async_instancemethod" ), pytest.param(bytes, b"", id="builtin_function"), pytest.param( datetime(2023, 10, 19, tzinfo=UTC).timestamp, 1697673600.0, id="builtin_method", ), pytest.param(partial(bytes, "foo", "ascii"), b"foo", id="partial"), ], ) @pytest.mark.parametrize( "use_scheduling", [ pytest.param(False, id="job"), pytest.param(True, id="schedule"), ], ) async def test_callable_types( self, target: Callable[..., Any], expected_result: object, use_scheduling: bool, raw_datastore: DataStore, timezone: ZoneInfo, ) -> None: now = datetime.now(timezone) send, receive = create_memory_object_stream[Event](4) async with AsyncExitStack() as exit_stack: exit_stack.enter_context(send) exit_stack.enter_context(receive) scheduler = await exit_stack.enter_async_context( AsyncScheduler(data_store=raw_datastore) ) scheduler.subscribe(send.send, {JobReleased}) await scheduler.start_in_background() if use_scheduling: trigger = DateTrigger(now) await scheduler.add_schedule(target, trigger, id="foo") else: await scheduler.add_job(target, result_expiration_time=10) with fail_after(3): event = await receive.receive() assert isinstance(event, JobReleased) if not use_scheduling: result = await scheduler.get_job_result(event.job_id) assert result assert result.outcome is JobOutcome.success assert result.return_value == expected_result async def test_scheduled_job_missed_deadline( self, raw_datastore: DataStore, timezone: ZoneInfo ) -> None: one_second_in_past = datetime.now(timezone) - timedelta(seconds=1) trigger = DateTrigger(one_second_in_past) scheduler_send, scheduler_receive = create_memory_object_stream[Event](4) worker_send, worker_receive = create_memory_object_stream[Event](2) with scheduler_send, scheduler_receive, worker_send, worker_receive: async with AsyncExitStack() as exit_stack: scheduler = await exit_stack.enter_async_context( AsyncScheduler(data_store=raw_datastore) ) await scheduler.add_schedule( dummy_async_job, trigger, misfire_grace_time=0, id="foo" ) await scheduler.start_in_background() exit_stack.enter_context( scheduler.subscribe( scheduler_send.send, {JobAdded, ScheduleUpdated} ) ) exit_stack.enter_context( scheduler.subscribe(worker_send.send, {JobAcquired, JobReleased}) ) with fail_after(3): # The schedule was processed and a job was added for it event = await scheduler_receive.receive() assert isinstance(event, JobAdded) assert event.schedule_id == "foo" assert event.task_id == "test_schedulers:dummy_async_job" job_id = event.job_id # The schedule was updated with a null next fire time event = await scheduler_receive.receive() assert isinstance(event, ScheduleUpdated) assert event.schedule_id == "foo" assert event.next_fire_time is None # The new job was acquired event = await worker_receive.receive() assert isinstance(event, JobReleased) assert event.job_id == job_id assert event.task_id == "test_schedulers:dummy_async_job" assert event.schedule_id == "foo" assert event.scheduled_start == one_second_in_past assert event.started_at is None assert event.outcome is JobOutcome.missed_start_deadline # There should be no more events on the list with pytest.raises(WouldBlock): scheduler_receive.receive_nowait() @pytest.mark.parametrize( "coalesce, expected_jobs, first_fire_time_delta", [ pytest.param( CoalescePolicy.all, 4, timedelta(minutes=3, seconds=5), id="all" ), pytest.param( CoalescePolicy.earliest, 1, timedelta(minutes=3, seconds=5), id="earliest", ), pytest.param(CoalescePolicy.latest, 1, timedelta(seconds=5), id="latest"), ], ) async def test_coalesce_policy( self, coalesce: CoalescePolicy, expected_jobs: int, first_fire_time_delta: timedelta, raw_datastore: DataStore, timezone: ZoneInfo, ) -> None: now = datetime.now(timezone) first_start_time = now - timedelta(minutes=3, seconds=5) trigger = IntervalTrigger(minutes=1, start_time=first_start_time) send, receive = create_memory_object_stream[Event](4) with send, receive: async with AsyncScheduler( data_store=raw_datastore, role=SchedulerRole.scheduler, cleanup_interval=None, ) as scheduler: await scheduler.add_schedule( dummy_async_job, trigger, id="foo", coalesce=coalesce ) scheduler.subscribe(send.send) await scheduler.start_in_background() with fail_after(3): # The scheduler was started event = await receive.receive() assert isinstance(event, SchedulerStarted) # The schedule was processed and one or more jobs weres added for index in range(expected_jobs): event = await receive.receive() assert isinstance(event, JobAdded) assert event.schedule_id == "foo" assert event.task_id == "test_schedulers:dummy_async_job" event = await receive.receive() assert isinstance(event, ScheduleUpdated) assert event.next_fire_time == now + timedelta(seconds=55) expected_scheduled_fire_time = now - first_fire_time_delta jobs = await scheduler.get_jobs() for job in sorted(jobs, key=lambda job: job.scheduled_fire_time): assert job.scheduled_fire_time assert job.scheduled_fire_time < now assert job.scheduled_fire_time == expected_scheduled_fire_time expected_scheduled_fire_time += timedelta(minutes=1) # The scheduler was stopped event = await receive.receive() assert isinstance(event, SchedulerStopped) # There should be no more events on the list with pytest.raises(WouldBlock): receive.receive_nowait() @pytest.mark.parametrize( "max_jitter, expected_upper_bound", [pytest.param(2, 2, id="within"), pytest.param(4, 2.999999, id="exceed")], ) async def test_jitter( self, mocker: MockerFixture, timezone: ZoneInfo, max_jitter: float, expected_upper_bound: float, raw_datastore: DataStore, ) -> None: jitter = 1.569374 now = datetime.now(timezone) fake_uniform = mocker.patch("random.uniform") fake_uniform.configure_mock(side_effect=lambda a, b: jitter) send, receive = create_memory_object_stream[Event](4) async with AsyncExitStack() as exit_stack: exit_stack.enter_context(send) exit_stack.enter_context(receive) scheduler = await exit_stack.enter_async_context( AsyncScheduler(data_store=raw_datastore, role=SchedulerRole.scheduler) ) scheduler.subscribe(send.send) trigger = DateTrigger(now) schedule_id = await scheduler.add_schedule( dummy_async_job, trigger, id="foo", max_jitter=max_jitter ) schedule = await scheduler.get_schedule(schedule_id) assert schedule.max_jitter == timedelta(seconds=max_jitter) await scheduler.start_in_background() with fail_after(3): # The task was added event = await receive.receive() assert isinstance(event, TaskAdded) assert event.task_id == "test_schedulers:dummy_async_job" # The schedule was added event = await receive.receive() assert isinstance(event, ScheduleAdded) assert event.schedule_id == "foo" assert event.next_fire_time == now # The scheduler was started event = await receive.receive() assert isinstance(event, SchedulerStarted) # The schedule was processed and a job was added for it event = await receive.receive() assert isinstance(event, JobAdded) assert event.schedule_id == "foo" assert event.task_id == "test_schedulers:dummy_async_job" # Check that the job was created with the proper amount of jitter in its # scheduled time jobs = await scheduler.get_jobs() assert len(jobs) == 1 assert jobs[0].jitter == timedelta(seconds=jitter) assert jobs[0].scheduled_fire_time == now + timedelta(seconds=jitter) assert jobs[0].original_scheduled_time == now async def test_add_job_get_result_success(self, raw_datastore: DataStore) -> None: async with AsyncScheduler(data_store=raw_datastore) as scheduler: job_id = await scheduler.add_job( dummy_async_job, kwargs={"delay": 0.2}, result_expiration_time=5 ) await scheduler.start_in_background() with fail_after(3): result = await scheduler.get_job_result(job_id) assert result assert result.job_id == job_id assert result.outcome is JobOutcome.success assert result.return_value == "returnvalue" async def test_add_job_get_result_empty(self, raw_datastore: DataStore) -> None: send, receive = create_memory_object_stream[Event](4) async with AsyncExitStack() as exit_stack: exit_stack.enter_context(send) exit_stack.enter_context(receive) scheduler = await exit_stack.enter_async_context( AsyncScheduler(data_store=raw_datastore) ) await scheduler.start_in_background() scheduler.subscribe(send.send) job_id = await scheduler.add_job(dummy_async_job) with fail_after(3): event = await receive.receive() assert isinstance(event, TaskAdded) event = await receive.receive() assert isinstance(event, JobAdded) assert event.job_id == job_id event = await receive.receive() assert isinstance(event, JobAcquired) assert event.job_id == job_id assert event.task_id == "test_schedulers:dummy_async_job" assert event.schedule_id is None event = await receive.receive() assert isinstance(event, JobReleased) assert event.job_id == job_id assert event.task_id == "test_schedulers:dummy_async_job" assert event.schedule_id is None with pytest.raises(JobLookupError): await scheduler.get_job_result(job_id, wait=False) async def test_add_job_get_result_error(self) -> None: async with AsyncScheduler() as scheduler: job_id = await scheduler.add_job( dummy_async_job, kwargs={"delay": 0.2, "fail": True}, result_expiration_time=5, ) await scheduler.start_in_background() with fail_after(3): result = await scheduler.get_job_result(job_id) assert result assert result.job_id == job_id assert result.outcome is JobOutcome.error assert isinstance(result.exception, RuntimeError) assert str(result.exception) == "failing as requested" async def test_add_job_get_result_no_ready_yet(self) -> None: send, receive = create_memory_object_stream[Event](4) async with AsyncExitStack() as exit_stack: exit_stack.enter_context(send) exit_stack.enter_context(receive) scheduler = await exit_stack.enter_async_context(AsyncScheduler()) scheduler.subscribe(send.send) job_id = await scheduler.add_job(dummy_async_job, kwargs={"delay": 0.2}) with fail_after(3): event = await receive.receive() assert isinstance(event, TaskAdded) event = await receive.receive() assert isinstance(event, JobAdded) assert event.job_id == job_id with pytest.raises(JobLookupError), fail_after(1): await scheduler.get_job_result(job_id, wait=False) async def test_add_job_not_rewriting_task_config( self, raw_datastore: DataStore ) -> None: async with AsyncScheduler(data_store=raw_datastore) as scheduler: TASK_ID = "task_dummy_async_job" JOB_EXECUTOR = "async" MISFIRE_GRACE_TIME = timedelta(seconds=10) MAX_RUNNING_JOBS = 5 METADATA = {"key": "value"} await scheduler.configure_task( func_or_task_id=TASK_ID, func=dummy_async_job, job_executor=JOB_EXECUTOR, misfire_grace_time=MISFIRE_GRACE_TIME, max_running_jobs=MAX_RUNNING_JOBS, metadata=METADATA, ) assert await scheduler.add_job(TASK_ID) task = await scheduler.data_store.get_task(TASK_ID) assert task.job_executor == JOB_EXECUTOR assert task.misfire_grace_time == MISFIRE_GRACE_TIME assert task.max_running_jobs == MAX_RUNNING_JOBS assert task.metadata == METADATA async def test_contextvars(self, mocker: MockerFixture, timezone: ZoneInfo) -> None: def check_contextvars() -> None: assert current_async_scheduler.get() is scheduler info = current_job.get() assert isinstance(info, Job) assert info.task_id == "contextvars" assert info.schedule_id == "foo" assert info.original_scheduled_time == now assert info.scheduled_fire_time == now + timedelta(seconds=2.16) assert info.jitter == timedelta(seconds=2.16) assert info.start_deadline == now + timedelta(seconds=2.16) + timedelta( seconds=10 ) fake_uniform = mocker.patch("random.uniform") fake_uniform.configure_mock(return_value=2.16) now = datetime.now(timezone) send, receive = create_memory_object_stream[Event](1) async with AsyncExitStack() as exit_stack: exit_stack.enter_context(send) exit_stack.enter_context(receive) scheduler = await exit_stack.enter_async_context(AsyncScheduler()) await scheduler.configure_task("contextvars", func=check_contextvars) await scheduler.add_schedule( "contextvars", DateTrigger(now), id="foo", max_jitter=3, misfire_grace_time=10, ) scheduler.subscribe(send.send, {JobReleased}) await scheduler.start_in_background() with fail_after(3): event = await receive.receive() assert event.outcome is JobOutcome.success async def test_explicit_cleanup(self, raw_datastore: DataStore) -> None: send, receive = create_memory_object_stream[Event](1) async with AsyncExitStack() as exit_stack: exit_stack.enter_context(send) exit_stack.enter_context(receive) scheduler = await exit_stack.enter_async_context( AsyncScheduler(raw_datastore, cleanup_interval=None) ) scheduler.subscribe(send.send, {ScheduleRemoved}) event = anyio.Event() scheduler.subscribe(lambda _: event.set(), {JobReleased}, one_shot=True) await scheduler.start_in_background() # Add a job whose result expires after 1 ms job_id = await scheduler.add_job( dummy_async_job, result_expiration_time=0.001 ) with fail_after(3): await event.wait() # After the sleeping past the expiration time and performing a cleanup, the # result should not be there anymore await sleep(0.1) await scheduler.cleanup() with pytest.raises(JobLookupError): await scheduler.get_job_result(job_id) # Add a schedule to immediately set the event event = anyio.Event() scheduler.subscribe(lambda _: event.set(), {JobReleased}, one_shot=True) await scheduler.add_schedule( dummy_async_job, DateTrigger(datetime.now(timezone.utc)), id="event_set" ) with fail_after(3): await event.wait() # The schedule should still be around, but with a null next_fire_time schedule = await scheduler.get_schedule("event_set") assert schedule.next_fire_time is None # After the cleanup, the schedule should be gone await scheduler.cleanup() with pytest.raises(ScheduleLookupError): await scheduler.get_schedule("event_set") # Check that the corresponding event was received with fail_after(3): event = await receive.receive() assert isinstance(event, ScheduleRemoved) assert event.schedule_id == schedule.id assert event.finished async def test_explicit_cleanup_avoid_schedules_still_having_jobs( self, raw_datastore: DataStore ) -> None: send, receive = create_memory_object_stream[Event](4) async with AsyncExitStack() as exit_stack: exit_stack.enter_context(send) exit_stack.enter_context(receive) scheduler = await exit_stack.enter_async_context( AsyncScheduler(raw_datastore, cleanup_interval=None) ) scheduler.subscribe(send.send, {ScheduleUpdated, JobAdded, JobReleased}) await scheduler.start_in_background() # Add a schedule to immediately set the event dummy_event = anyio.Event() await scheduler.configure_task("event_set", func=dummy_event.wait) schedule_id = await scheduler.add_schedule( "event_set", DateTrigger(datetime.now(timezone.utc)), id="event_set" ) # Wait for the job to be submitted event = await receive.receive() assert isinstance(event, JobAdded) assert event.schedule_id == schedule_id # Wait for the schedule to be updated event = await receive.receive() assert isinstance(event, ScheduleUpdated) assert event.schedule_id == schedule_id assert event.next_fire_time is None # Check that there is a job for the schedule jobs = await scheduler.get_jobs() assert len(jobs) == 1 assert jobs[0].schedule_id == schedule_id # After the cleanup, the schedule should still be around, with a # null next_fire_time await scheduler.cleanup() schedule = await scheduler.get_schedule("event_set") assert schedule.next_fire_time is None # Wait for the job to finish dummy_event.set() event = await receive.receive() assert isinstance(event, JobReleased) async def test_implicit_cleanup(self, mocker: MockerFixture) -> None: """ Test that the scheduler's cleanup() method is called when the scheduler is started. """ async with AsyncScheduler() as scheduler: event = anyio.Event() mocker.patch.object(scheduler.data_store, "cleanup", side_effect=event.set) await scheduler.start_in_background() with fail_after(3): await event.wait() async def test_wait_until_stopped(self) -> None: async with AsyncScheduler() as scheduler: await scheduler.add_job(scheduler.stop) await scheduler.wait_until_stopped() # This should be a no-op await scheduler.wait_until_stopped() async def test_max_concurrent_jobs(self) -> None: lock = Lock() scheduler = AsyncScheduler(max_concurrent_jobs=1) tasks_done = 0 async def acquire_release() -> None: nonlocal tasks_done lock.acquire_nowait() await sleep(0.1) tasks_done += 1 if tasks_done == 2: await scheduler.stop() lock.release() with fail_after(3): async with scheduler: await scheduler.configure_task("dummyjob", func=acquire_release) await scheduler.add_job("dummyjob") await scheduler.add_job("dummyjob") await scheduler.run_until_stopped() @pytest.mark.parametrize( "trigger_type, run_job", [ pytest.param("cron", False, id="cron"), pytest.param("date", True, id="date"), ], ) async def test_pause_unpause_schedule( self, raw_datastore: DataStore, timezone: ZoneInfo, trigger_type: str, run_job: bool, ) -> None: if trigger_type == "cron": trigger = CronTrigger() else: trigger = DateTrigger(datetime.now(timezone)) async with AsyncExitStack() as exit_stack: send, receive = create_memory_object_stream[Event](4) exit_stack.enter_context(send) exit_stack.enter_context(receive) scheduler = await exit_stack.enter_async_context( AsyncScheduler(data_store=raw_datastore, role=SchedulerRole.scheduler) ) schedule_id = await scheduler.add_schedule( dummy_async_job, trigger, id="foo" ) scheduler.subscribe(send.send, {ScheduleUpdated, JobAdded}) # Pause the schedule and wait for the schedule update event await scheduler.pause_schedule(schedule_id) schedule = await scheduler.get_schedule(schedule_id) assert schedule.paused event = await receive.receive() assert isinstance(event, ScheduleUpdated) if run_job: # Make sure that no jobs are added when the scheduler is started await scheduler.start_in_background() assert not await scheduler.get_jobs() # Unpause the schedule and wait for the schedule update event await scheduler.unpause_schedule(schedule_id) schedule = await scheduler.get_schedule(schedule_id) assert not schedule.paused event = await receive.receive() assert isinstance(event, ScheduleUpdated) if run_job: with fail_after(3): job_added_event = await receive.receive() assert isinstance(job_added_event, JobAdded) assert job_added_event.schedule_id == schedule_id async def test_schedule_job_result_expiration_time( self, raw_datastore: DataStore, timezone: ZoneInfo ) -> None: trigger = DateTrigger(datetime.now(timezone)) send, receive = create_memory_object_stream[Event](4) with send, receive: async with AsyncExitStack() as exit_stack: scheduler = await exit_stack.enter_async_context( AsyncScheduler(data_store=raw_datastore) ) await scheduler.add_schedule( dummy_async_job, trigger, id="foo", job_result_expiration_time=10 ) exit_stack.enter_context(scheduler.subscribe(send.send, {JobAdded})) await scheduler.start_in_background() # Wait for the scheduled job to be added with fail_after(3): event = await receive.receive() assert isinstance(event, JobAdded) assert event.schedule_id == "foo" # Get its result result = await scheduler.get_job_result(event.job_id) assert result assert result.outcome is JobOutcome.success assert result.return_value == "returnvalue" async def test_scheduler_crash_restart_schedule_immediately( self, raw_datastore: DataStore, timezone: ZoneInfo ) -> None: """ Test that the scheduler can immediately start processing a schedule it had acquired while the crash occurred. """ scheduler = AsyncScheduler(data_store=raw_datastore) error_patch = patch.object( raw_datastore, "release_schedules", side_effect=RuntimeError("Fake failure") ) with pytest.raises(ExceptionGroup) as exc_info, error_patch: async with scheduler: await scheduler.add_schedule( dummy_async_job, IntervalTrigger(minutes=1), id="foo" ) with move_on_after(3): await scheduler.run_until_stopped() pytest.fail("The scheduler did not crash") exc = exc_info.value while isinstance(exc, ExceptionGroup) and len(exc.exceptions) == 1: exc = exc.exceptions[0] assert isinstance(exc, RuntimeError) assert exc.args == ("Fake failure",) # Don't clear the data store at launch if isinstance(raw_datastore, BaseExternalDataStore): raw_datastore.start_from_scratch = False # Now reinitialize the scheduler and make sure the schedule gets processed # immediately async with scheduler: # Check that the schedule was left in an acquired state schedules = await scheduler.get_schedules() assert len(schedules) == 1 assert schedules[0].acquired_by == scheduler.identity assert schedules[0].acquired_until > datetime.now(timezone) # Start the scheduler and wait for the schedule to be processed await scheduler.start_in_background() with fail_after(scheduler.lease_duration.total_seconds() / 2): job_added_event = await scheduler.get_next_event(JobAdded) assert job_added_event.schedule_id == "foo" async def test_scheduler_crash_reap_abandoned_jobs( self, raw_datastore: DataStore, timezone: ZoneInfo ) -> None: """ Test that after the scheduler has crashed and been restarted, it immediately detects an abandoned job and releases it with the appropriate result code. """ scheduler = AsyncScheduler(data_store=raw_datastore) error_patch = patch.object( raw_datastore, "release_job", side_effect=RuntimeError("Fake failure") ) with pytest.raises(ExceptionGroup) as exc_info, error_patch: async with scheduler: job_id = await scheduler.add_job(dummy_async_job) with move_on_after(3): await scheduler.run_until_stopped() pytest.fail("The scheduler did not crash") exc = exc_info.value while isinstance(exc, ExceptionGroup) and len(exc.exceptions) == 1: exc = exc.exceptions[0] assert isinstance(exc, RuntimeError) assert exc.args == ("Fake failure",) # Don't clear the data store at launch if isinstance(raw_datastore, BaseExternalDataStore): raw_datastore.start_from_scratch = False # Now reinitialize the scheduler and make sure the job gets processed # immediately async with scheduler: # Check that the job was left in an acquired state jobs = await scheduler.get_jobs() assert len(jobs) == 1 assert jobs[0].acquired_by == scheduler.identity assert jobs[0].acquired_until > datetime.now(timezone) trigger_event = anyio.Event() job_released_event: JobReleased | None = None def event_callback(event: Event) -> None: nonlocal job_released_event job_released_event = cast(JobReleased, event) trigger_event.set() # Start the scheduler and wait for the job to be processed with scheduler.subscribe(event_callback, {JobReleased}): await scheduler.start_in_background() with fail_after(scheduler.lease_duration.total_seconds() / 2): await trigger_event.wait() assert job_released_event assert job_released_event.job_id == job_id assert job_released_event.outcome is JobOutcome.abandoned assert not await scheduler.get_jobs()
TestAsyncScheduler
python
pandas-dev__pandas
asv_bench/benchmarks/indexing.py
{ "start": 15690, "end": 15944 }
class ____: # GH#19299 def setup(self): N = 1000 cols = 500 self.df = DataFrame(index=range(N), columns=range(cols), dtype=object) def time_setitem_object_dtype(self): self.df.loc[0, 1] = 1.0
SetitemObjectDtype
python
django__django
tests/check_framework/test_commands.py
{ "start": 424, "end": 1025 }
class ____(SimpleTestCase): def test_migrate_and_makemigrations_autodetector_different(self): expected_error = Error( "The migrate and makemigrations commands must have the same " "autodetector.", hint=( "makemigrations.Command.autodetector is int, but " "migrate.Command.autodetector is MigrationAutodetector." ), id="commands.E001", ) self.assertEqual( checks.run_checks(app_configs=self.apps.get_app_configs()), [expected_error], )
CommandCheckTests
python
cython__cython
tests/run/pure_cdef_class_dataclass.py
{ "start": 1043, "end": 2430 }
class ____: """ >>> NoInitFields() NoInitFields(has_default=DummyObj(), has_factory='From a lambda', neither=None) >>> NoInitFields().has_default is NoInitFields().has_default True >>> NoInitFields(1) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): TypeError: NoInitFields.__init__() takes 1 positional argument but 2 were given >>> NoInitFields(has_default=1) # doctest: +ELLIPSIS Traceback (most recent call last): TypeError: ...has_default... >>> NoInitFields(has_factory=1) # doctest: +ELLIPSIS Traceback (most recent call last): TypeError: ...has_factory... >>> NoInitFields(neither=1) # doctest: +ELLIPSIS Traceback (most recent call last): TypeError: ...neither... """ has_default : object = cython.dataclasses.field(default=DummyObj(), init=False) has_factory : object = cython.dataclasses.field(default_factory=lambda: "From a lambda", init=False) # Cython will default-initialize to None neither : object = cython.dataclasses.field(init=False) def __post_init__(self): if not cython.compiled: # Cython will default-initialize this to None, while Python won't # and not initializing it will mess up repr assert not hasattr(self, "neither") self.neither = None @cython.dataclasses.dataclass
NoInitFields
python
pydantic__pydantic
tests/mypy/outputs/mypy-default_ini/metaclass_args.py
{ "start": 740, "end": 1021 }
class ____(BaseModel): i: int = Field(2, alias='j') NoArguments(i=1) # MYPY: error: Unexpected keyword argument "i" for "NoArguments" [call-arg] NoArguments(j=None) # MYPY: error: Argument "j" to "NoArguments" has incompatible type "None"; expected "int" [arg-type]
NoArguments
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/renderer.py
{ "start": 11969, "end": 12130 }
class ____(Enum): "Enum: whether or not CPR is supported." SUPPORTED = "SUPPORTED" NOT_SUPPORTED = "NOT_SUPPORTED" UNKNOWN = "UNKNOWN"
CPR_Support
python
pytorch__pytorch
test/export/test_export.py
{ "start": 99618, "end": 609495 }
class ____(torch.nn.Module): def forward(self, x): x: "f32[3, 3]"; x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec) _guards_fn = self._guards_fn(x); _guards_fn = None sum_1: "f32[]" = torch.ops.aten.sum.default(x) gt: "b8[]" = torch.ops.aten.gt.Scalar(sum_1, 3); sum_1 = None true_graph_0 = self.true_graph_0 false_graph_0 = self.false_graph_0 cond = torch.ops.higher_order.cond(gt, true_graph_0, false_graph_0, ()); gt = true_graph_0 = false_graph_0 = None getitem = cond[0]; cond = getitem = None select: "f32[3]" = torch.ops.aten.select.int(x, 0, 0); x = None return pytree.tree_unflatten((select,), self._out_spec) class true_graph_0(torch.nn.Module): def forward(self): return (0,) class false_graph_0(torch.nn.Module): def forward(self): return (0,) """, # noqa: B950 ) self.assertEqual(m(*args), ep.module()(*args)) @torch._dynamo.config.patch(capture_scalar_outputs=True) def test_cond_contains_unbacked_no_escape(self): class M(torch.nn.Module): def forward(self, a, b1, b2, c): def true_fn(x): return x * b1.item() def false_fn(x): return x * b2.item() r = torch.cond(a, true_fn, false_fn, (c,)) return r * 2 args = ( torch.tensor(True), torch.tensor([4]), torch.tensor([4]), torch.randn(10, requires_grad=True), ) torch.export.export(M(), args) def test_cond_int_closure(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.num = 4 def forward(self, a, x): def true_fn(x): return x * self.num def false_fn(x): return x + self.num r = torch.cond(a, true_fn, false_fn, (x,)) return r * 2 args = (torch.tensor(True), torch.randn(10)) ep = torch.export.export(M(), args) self.assertEqual(ep.module()(*args), M()(*args)) def test_state_tensors(self): class M(torch.nn.Module): # simple with register buffer def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.ones(2, 3), persistent=False) def forward(self, x): # x = 2 y = self.buf # y = 1 w1 = self.buf + 3 w2 = self.buf + 4 w3 = self.buf + 5 self.buf = w1 z = self.buf self.buf = w3 # z = 4 return x + y + z + w2 ep = export(M(), (torch.randn(2, 3),), strict=False).run_decompositions({}) self.assertEqual(list(ep.graph_signature.buffers_to_mutate.values()), ["buf"]) self.assertTrue( torch.allclose(ep.module()(torch.ones(2, 3) + 1), torch.ones(2, 3) * 12) ) class M(torch.nn.Module): # simple without register buffer def __init__(self) -> None: super().__init__() self.buf = torch.ones(2, 3) def forward(self, x): # x = 2 y = self.buf # y = 1 self.buf = self.buf + 3 z = self.buf # z = 3 return x + y + z with self.assertWarnsRegex( UserWarning, "The tensor attribute self.buf was assigned during export", ): export(M(), (torch.randn(2, 3),), strict=False) class M(torch.nn.Module): # complex with register buffer def __init__(self) -> None: super().__init__() tensors = [torch.ones(2, 3), torch.ones(2, 3)] for i, tensor in enumerate(tensors): self.register_buffer(f"buf_{i}", tensor, persistent=False) def get_tensor(self, i): return getattr(self, f"buf_{i}") def set_tensor(self, i, val): setattr(self, f"buf_{i}", val) def forward(self, x): # x = 2 y = self.get_tensor(0) + self.get_tensor(1) # y = 1 + 1 self.set_tensor(0, torch.ones(2, 3) + 2) self.set_tensor(1, torch.ones(2, 3) + 2) z = self.get_tensor(0) + self.get_tensor(1) # z = 3 + 3 return x + y + z ep = export(M(), (torch.randn(2, 3),), strict=False).run_decompositions({}) self.assertEqual( list(ep.graph_signature.buffers_to_mutate.values()), ["buf_0", "buf_1"] ) self.assertTrue( torch.allclose(ep.module()(torch.ones(2, 3) + 1), torch.ones(2, 3) * 10) ) class M(torch.nn.Module): # complex without register buffer def __init__(self) -> None: super().__init__() self.tensors = [torch.ones(2, 3), torch.ones(2, 3)] def get_tensor(self, i): return self.tensors[i] def set_tensor(self, i, val): self.tensors[i] = val def forward(self, x): # x = 2 y = self.get_tensor(0) + self.get_tensor(1) # y = 1 + 1 self.set_tensor(0, torch.ones(2, 3) + 2) self.set_tensor(1, torch.ones(2, 3) + 2) z = self.get_tensor(0) + self.get_tensor(1) # z = 3 + 3 return x + y + z with self.assertWarnsRegex( UserWarning, "The tensor attributes self.tensors\\[0\\], self.tensors\\[1\\] were assigned during export", ): export(M(), (torch.randn(2, 3),), strict=False) @torch._dynamo.config.patch(capture_scalar_outputs=True) def test_while_loop_tensor_constant_idx(self): def while_loop_decomp(x, y0): out = torch.zeros_like(x) def cond_fn(idx, out, y0): return idx < out.size(0) def body_fn(idx, out, y0): i = idx.item() # TODO removing those causes PendingUnbackedSymbolNotFound. torch._check(i >= 0) torch._check(i < x.size(0)) y0 = x[i] + y0 out = out.clone() out[i] = y0 return idx + 1, out, y0 cnt = torch.tensor(0) _, out, _ = while_loop(cond_fn, body_fn, [cnt, out, y0]) return out class TestModel(torch.nn.Module): def forward(self, x, y0): return while_loop_decomp(x, y0) x, y0 = torch.randn(16, 8), torch.randn(8) exp_out = TestModel()(x, y0) ep = export(TestModel(), (x, y0)) out = ep.module()(x, y0) self.assertEqual(exp_out, out) def test_malformed_fqn_from_source_name(self): # See https://github.com/pytorch/pytorch/issues/141939 from types import MethodType class Block(torch.nn.Module): def __init__(self, i, o): super().__init__() self.to_out = torch.nn.ModuleList([]) self.to_out.append(torch.nn.Linear(i, o, bias=True)) self.to_out.append(torch.nn.Dropout(0.5)) def forward(self, x): for l in self.to_out: x = l(x) return x class Problem1(torch.nn.Module): def __init__(self): super().__init__() self.blocks = torch.nn.ModuleDict( {f"{i}": Block(64, 64) for i in range(5)} ) def forward(self, x): for k, m in self.blocks.items(): x = m(x) return x class Problem2(torch.nn.Module): def __init__(self): super().__init__() self.blocks = torch.nn.ModuleList([Block(64, 64) for i in range(5)]) def forward(self, x): x = self.blocks[0](x) for m in self.blocks[1:4]: x = m(x) return x def _split_after_forward(self, *args, **kwargs): return self._orig_forward(*args, **kwargs) def annotate_split_points(mod: torch.nn.Module, spec): for qualname, split_type in spec.items(): atoms = qualname.split(".") predecessor_module = mod for i, atom in enumerate(atoms[:-1]): try: predecessor_module = getattr(predecessor_module, atom) except AttributeError as e: raise e mod_to_wrap = getattr(predecessor_module, atoms[-1]) mod_to_wrap._orig_forward = mod_to_wrap.forward mod_to_wrap.forward = MethodType(_split_after_forward, mod_to_wrap) for problem in [Problem1, Problem2]: m = problem() m(torch.rand(64, 64)) # simplified torch.distributed.pipeline code annotate_split_points(m, {"blocks.1": 1, "blocks.3": 1}) gm = export(m, (torch.rand(64, 64),)) torch.export.unflatten(gm) @testing.expectedFailureStrictV2 def test_unflatten_closure(self): class Dummy(torch.nn.Module): def forward(self, fn, x): y = x + 2 z = fn(y) return z + 4 class N(torch.nn.Module): def forward(self, x): return x + 3 class M(torch.nn.Module): def __init__(self): super().__init__() self.dummy = Dummy() self.n = N() def forward(self, x): y = x + 1 z = self.dummy(lambda k: self.n(y + k) + y, y) return z + 5 m = M() x = torch.randn(3) ep = export(m, (x,)) ufm = torch.export.unflatten(ep) self.assertExpectedInline( str(ufm.graph_module.code).strip(), """\ def forward(self, x): add = torch.ops.aten.add.Tensor(x, 1); x = None dummy = self.dummy(add); add = None add_6 = torch.ops.aten.add.Tensor(dummy, 5); dummy = None return (add_6,)""", ) self.assertExpectedInline( str(ufm.dummy.graph_module.code).strip(), """\ def forward(self, add): add_1 = torch.ops.aten.add.Tensor(add, 2) add_2 = torch.ops.aten.add.Tensor(add, add_1); add_1 = None add_3 = torch.ops.aten.add.Tensor(add_2, 3); add_2 = None add_4 = torch.ops.aten.add.Tensor(add_3, add); add_3 = add = None add_5 = torch.ops.aten.add.Tensor(add_4, 4); add_4 = None return add_5""", ) def test_state_primitives(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.x = 1 self.y = {"k": 2} self.z = (3,) def forward(self, x): self.x = self.x + 4 self.y["k"] = self.y["k"] + 5 self.z = (self.z[0] + 6,) return x + self.x + self.y["k"] + self.z[0] ep = export(M(), (torch.randn(2, 3),)) self.assertTrue( torch.allclose(ep.module()(torch.zeros(2, 3)), torch.ones(2, 3) * 21) ) def test_state_shape_attribute_assignment(self): class TestModule(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(10, 10) self.last_z_shape = self.linear.weight.shape def forward(self, x): self.last_z_shape = x.shape return self.linear(x) model = TestModule() x = torch.randn(20, 10) ep_model = export(model, (x,), strict=False).module() self.assertTrue(torch.allclose(model(x), ep_model(x))) def test_output_node_name(self): class TestModule(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(10, 10) def forward(self, x): return self.linear(x) model = TestModule() x = torch.randn(20, 10) ep_model = export(model, (x,), strict=False).module() self.assertEqual(list(ep_model.graph.nodes)[-1].name, "output") self.assertTrue(torch.allclose(model(x), ep_model(x))) def test_real_tensor_size_mismatch(self): from torch._subclasses.fake_tensor import MetadataMismatchError class M(torch.nn.Module): def forward(self, a, b): return torch.ops.mylib.foo(a, b) @torch.library.custom_op("mylib::foo", mutates_args={}) def foo(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: return a + b @foo.register_fake def foo_fake_impl(a, b): m, n = a.shape return torch.empty(n, m) # incorrectly permute error_type = ( MetadataMismatchError if is_non_strict_test(self._testMethodName) else torch._dynamo.exc.TorchRuntimeError ) with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): # won't catch anything if dims are equal export( M(), (torch.randn(4, 4), torch.randn(4, 4)), ) # catch concrete inequality with self.assertRaisesRegex( error_type, r"Real tensor propagation found an output size mismatch between fake shape 8 and real shape 4, " r"at output\.size\(0\), for func: mylib.foo.default", ): export( M(), (torch.randn(4, 8), torch.randn(4, 8)), ) # same test with dynamic shapes d0 = Dim("d0") d1 = Dim("d1") export( M(), (torch.randn(4, 4), torch.randn(4, 4)), dynamic_shapes={ "a": (d0, d1), "b": (d0, d1), }, ) with self.assertRaisesRegex( error_type, r"Real tensor propagation found an output size mismatch between fake shape s\d+ and real shape 4, " r"at output\.size\(0\), for func: mylib.foo.default", ): export( M(), (torch.randn(4, 8), torch.randn(4, 8)), dynamic_shapes={ "a": (d0, d1), "b": (d0, d1), }, ) def test_real_tensor_alias_dtype_mismatch(self): from torch._subclasses.fake_tensor import MetadataMismatchError error_type = ( MetadataMismatchError if is_non_strict_test(self._testMethodName) else torch._dynamo.exc.TorchRuntimeError ) # test alias case class M(torch.nn.Module): def forward(self, a): return torch.ops.mylib.foo_alias(a) @torch.library.custom_op("mylib::foo_alias", mutates_args={}) def foo_alias(a: torch.Tensor) -> torch.Tensor: return a * 2 @foo_alias.register_fake def foo_fake_impl(a): return a with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): with self.assertRaisesRegex( error_type, r"Real tensor propagation found an aliasing mismatch between fake output (.*\n)*.* " r"and real output (.*\n)*.* for func: mylib.foo_alias.default", ): ep = export(M(), (torch.randn(4, 4),)) # test dtype case class N(torch.nn.Module): def forward(self, a): return torch.ops.mylib.foo_dtype(a) @torch.library.custom_op("mylib::foo_dtype", mutates_args={}) def foo_dtype(a: torch.Tensor) -> torch.Tensor: return a * 2 @foo_dtype.register_fake def foo_fake_impl(a): m, n = a.shape return torch.empty([m, n], dtype=torch.int32) with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): with self.assertRaisesRegex( error_type, r"Real tensor propagation found a metadata mismatch between fake tensor (.*\n)*.* " r"and real tensor (.*\n)*.* at output, for func: mylib.foo_dtype.default", ): ep = export(N(), (torch.randn(4, 4),)) def test_real_tensor_for_max_op(self): class Foo(torch.nn.Module): def forward(self, x, y): x = x[x > 0] y = y[y > 0] return max(x.shape[0], y.shape[0]) model = Foo() inputs = (torch.zeros(64), torch.ones(64)) with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): ep = export(model, inputs) self.assertEqual(ep.module()(*inputs), model(*inputs)) x = torch.zeros(64) y = torch.ones(64) # This seems to be a bug with old export because when we pass in x, x # as input, runtime assertion should fail. This is because we would create # guard on y.shape[0] > x.shape[0] but somehow in old export, we dce this # assertion. self.assertEqual(ep.module()(x, x), model(x, x)) self.assertEqual(ep.module()(x, y), model(x, y)) def test_draft_export_checks_mutation_with_nan(self): @torch.library.custom_op("export::foo", mutates_args={}) def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return x + y @foo.register_fake def _(x, y): return x + y class Foo(torch.nn.Module): def forward(self, x, y): return foo(x, y) model = Foo() inputs = (torch.full((64,), torch.nan), torch.full((64,), torch.nan)) with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): ep = export(model, inputs) def test_draft_export_checks_mutation(self): @torch.library.custom_op("export::foo", mutates_args={}) def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: y.add_(1) return x.clone() @foo.register_fake def _(x, y): return x.clone() class Foo(torch.nn.Module): def forward(self, x, y): return foo(x, y) model = Foo() inputs = (torch.randn(64), torch.randn(64)) with self.assertRaisesRegex(RuntimeError, "for argument 'y'"): with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): ep = export(model, inputs) @torch.library.custom_op("export::foo", mutates_args={"y"}) def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: y.add_(1) return x.clone() @foo.register_fake def _(x, y): return x.clone() # No errors with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): ep = export(model, inputs) def test_draft_export_checks_mutation_list(self): @torch.library.custom_op("export::foo", mutates_args={}) def foo(xs: List[torch.Tensor]) -> torch.Tensor: x, y = xs y.add_(1) return x.clone() @foo.register_fake def _(xs): x, y = xs return x.clone() class Foo(torch.nn.Module): def forward(self, xs): return foo(xs) model = Foo() inputs = ([torch.randn(64), torch.randn(64)],) with self.assertRaisesRegex(RuntimeError, "for argument 'xs'"): with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): ep = export(model, inputs) @torch.library.custom_op("export::foo", mutates_args={"xs"}) def foo(xs: List[torch.Tensor]) -> torch.Tensor: x, y = xs y.add_(1) return x.clone() @foo.register_fake def _(xs): x, y = xs return x.clone() # No errors with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): ep = export(model, inputs) def test_draft_export_checks_aliasing(self): @torch.library.custom_op("export::foo", mutates_args={}) def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return x @foo.register_fake def _(x, y): return x.clone() class Foo(torch.nn.Module): def forward(self, x, y): return foo(x, y) model = Foo() inputs = (torch.randn(64), torch.randn(64)) with self.assertRaisesRegex(RuntimeError, "may not alias"): with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): ep = export(model, inputs) @torch.library.custom_op("export::foo", mutates_args={}) def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return x.clone() @foo.register_fake def _(x, y): return x.clone() # No errors with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): ep = export(model, inputs) def test_draft_export_infers_fake_kernel(self): strict = True with torch.library._scoped_library("export", "FRAGMENT") as lib: lib.define("bar(Tensor x) -> Tensor") lib.impl("bar", lambda x: x[0].clone(), "CPU") @torch.library.custom_op("export::foo", mutates_args={}) def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return x * y class Foo(torch.nn.Module): def forward(self, x, y): return foo(x, y), torch.ops.export.bar(y) model = Foo() inputs = (torch.randn(1, 3), torch.randn(2, 1)) with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): ep = export(model, inputs, strict=strict) # expecttest only works for the base TestExport class. if self.__class__ != TestExport: return self.assertExpectedInline( str(ep.graph_module.code).strip(), """\ def forward(self, x, y): foo = torch.ops.export.foo.default(x, y); x = None sym_size_int = torch.ops.aten.sym_size.int(foo, 0) sym_size_int_1 = torch.ops.aten.sym_size.int(foo, 1) sym_constrain_range_for_size_default = torch.ops.aten.sym_constrain_range_for_size.default(sym_size_int); sym_constrain_range_for_size_default = None ge = sym_size_int >= 0; sym_size_int = None _assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar_default = None sym_constrain_range_for_size_default_1 = torch.ops.aten.sym_constrain_range_for_size.default(sym_size_int_1); sym_constrain_range_for_size_default_1 = None ge_1 = sym_size_int_1 >= 0; sym_size_int_1 = None _assert_scalar_default_1 = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u1 >= 0 on node 'ge_1'"); ge_1 = _assert_scalar_default_1 = None bar = torch.ops.export.bar.default(y); y = None sym_size_int_2 = torch.ops.aten.sym_size.int(bar, 0) sym_constrain_range_for_size_default_2 = torch.ops.aten.sym_constrain_range_for_size.default(sym_size_int_2); sym_constrain_range_for_size_default_2 = None ge_2 = sym_size_int_2 >= 0; sym_size_int_2 = None _assert_scalar_default_2 = torch.ops.aten._assert_scalar.default(ge_2, "Runtime assertion failed for expression u2 >= 0 on node 'ge_2'"); ge_2 = _assert_scalar_default_2 = None return (foo, bar)""", ) def test_draft_export_fake_kernel_inference_errors(self): @torch.library.custom_op("export::foo", mutates_args={}) def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return x.expand(32, 3).contiguous()[4] class Foo(torch.nn.Module): def forward(self, x, y): return foo(x, y) model = Foo() inputs = (torch.randn(1, 3), torch.randn(2, 1)) with self.assertRaisesRegex(RuntimeError, "non-zero storage offset"): with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): ep = export(model, inputs) @torch.library.custom_op("export::foo", mutates_args={}) def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return torch.randn(3, 3).diagonal() with self.assertRaisesRegex(RuntimeError, "not dense in memory"): with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): ep = export(model, inputs) def test_subclasses_parameterization(self): if "cpp_runtime_nonstrict" in self.id(): self.skipTest("TODO Unexpected success in OSS but not in fbcode.") class Foo(torch.nn.Module): def __init__(self): super().__init__() self.p1 = torch.nn.Parameter(torch.ones(3, 4)) self.p2 = torch.nn.Parameter( CustomTensorPlainOut(torch.ones(3, 4), torch.ones(3, 4)) ) def forward(self, x): a = (2 * self.p1 + self.p2).sum() return x + a m = Foo() ref_x = torch.randn(3, 4) ref_out = m(ref_x) ep_training = torch.export.export(m, (ref_x,)) self.assertExpectedInline( str(ep_training.graph).strip(), """\ graph(): %p_p1 : [num_users=1] = placeholder[target=p_p1] %p_p2 : [num_users=1] = placeholder[target=p_p2] %x : [num_users=1] = placeholder[target=x] %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%p_p1, 2), kwargs = {}) %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %p_p2), kwargs = {}) %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add,), kwargs = {}) %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %sum_1), kwargs = {}) return (add_1,)""", ) ep = export(m, (ref_x,)).run_decompositions({}) self.assertExpectedInline( str(ep.graph).strip(), """\ graph(): %p_p1 : [num_users=1] = placeholder[target=p_p1] %p_parametrizations_p2_original0 : [num_users=1] = placeholder[target=p_parametrizations_p2_original0] %p_parametrizations_p2_original1 : [num_users=1] = placeholder[target=p_parametrizations_p2_original1] %x : [num_users=1] = placeholder[target=x] %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%p_p1, 2), kwargs = {}) %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %p_parametrizations_p2_original0), kwargs = {}) %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %p_parametrizations_p2_original1), kwargs = {}) %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %add_1), kwargs = {}) %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add_2,), kwargs = {}) %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %sum_1), kwargs = {}) return (add_3,)""", ) res = ep.module()(ref_x) self.assertEqual(res, ref_out) @testing.expectedFailureCppRuntimeNonStrict def test_subclasses_parameterization_nested(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() self.p1 = torch.nn.Parameter(torch.ones(2, 2)) self.p2 = torch.nn.Parameter( CustomTensorPlainOut( CustomTensorPlainOut( torch.Tensor([[0, 0], [0, 1]]), torch.Tensor([[0, 0], [1, 0]]), ), CustomTensorPlainOut( torch.Tensor([[1, 0], [0, 0]]), torch.Tensor([[0, 1], [0, 0]]), ), ) ) def forward(self, x): a = (x + 2 * self.p1 + self.p2).sum().sum() return x + a m = Foo() ref_x = torch.randn(2, 2) ref_out = m(ref_x) ep_training = torch.export.export(m, (ref_x,), strict=False) self.assertExpectedInline( str(ep_training.graph).strip(), """\ graph(): %p_p1 : [num_users=1] = placeholder[target=p_p1] %p_p2 : [num_users=1] = placeholder[target=p_p2] %x : [num_users=2] = placeholder[target=x] %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%p_p1, 2), kwargs = {}) %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %mul), kwargs = {}) %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %p_p2), kwargs = {}) %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add_1,), kwargs = {}) %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sum_1,), kwargs = {}) %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %sum_2), kwargs = {}) return (add_2,)""", ) ep = export(m, (ref_x,)) ep = ep.run_decompositions({}) self.assertExpectedInline( str(ep.graph).strip(), """\ graph(): %p_p1 : [num_users=1] = placeholder[target=p_p1] %p_parametrizations_p2_original0 : [num_users=1] = placeholder[target=p_parametrizations_p2_original0] %p_parametrizations_p2_original1 : [num_users=1] = placeholder[target=p_parametrizations_p2_original1] %p_parametrizations_p2_original2 : [num_users=1] = placeholder[target=p_parametrizations_p2_original2] %p_parametrizations_p2_original3 : [num_users=1] = placeholder[target=p_parametrizations_p2_original3] %x : [num_users=2] = placeholder[target=x] %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%p_p1, 2), kwargs = {}) %add : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %mul), kwargs = {}) %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %p_parametrizations_p2_original0), kwargs = {}) %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %p_parametrizations_p2_original1), kwargs = {}) %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %add_2), kwargs = {}) %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %p_parametrizations_p2_original2), kwargs = {}) %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %p_parametrizations_p2_original3), kwargs = {}) %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %add_5), kwargs = {}) %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %add_6), kwargs = {}) %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add_7,), kwargs = {}) %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sum_1,), kwargs = {}) %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %sum_2), kwargs = {}) return (add_8,)""", ) res = ep.module()(ref_x) self.assertEqual(res, ref_out) @testing.expectedFailureSerDer # can't serialize functorch ops @testing.expectedFailureSerDerNonStrict # can't serialize functorch ops @testing.expectedFailureCppRuntime def test_vmap(self): class Vmap(torch.nn.Module): def forward(self, x, y): f = lambda x, y: (x * y + 1).sum(dim=0) # noqa: E731 vmapped = torch.vmap(f)(x, y) return vmapped.sum(dim=0) DYN = torch.export.Dim.DYNAMIC inputs = (torch.tensor([1.0, 2.0, 3.0]), torch.tensor([0.1, 0.2, 0.3])) dynamic = {"x": {0: DYN}, "y": {0: DYN}} ep = torch.export.export(Vmap(), inputs, {}, dynamic_shapes=dynamic) self.assertExpectedInline( str(ep.graph).strip(), """\ graph(): %x : [num_users=1] = placeholder[target=x] %y : [num_users=2] = placeholder[target=y] %sym_size_int_3 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%y, 0), kwargs = {}) %lazy_load_decompositions : [num_users=0] = call_function[target=torch._functorch.predispatch.lazy_load_decompositions](args = (), kwargs = {}) %_vmap_increment_nesting : [num_users=0] = call_function[target=torch._functorch.predispatch._vmap_increment_nesting](args = (%sym_size_int_3, error), kwargs = {}) %_add_batch_dim : [num_users=1] = call_function[target=torch._functorch.predispatch._add_batch_dim](args = (%x, 0, 1), kwargs = {}) %_add_batch_dim_1 : [num_users=1] = call_function[target=torch._functorch.predispatch._add_batch_dim](args = (%y, 0, 1), kwargs = {}) %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_add_batch_dim, %_add_batch_dim_1), kwargs = {}) %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {}) %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add, [0]), kwargs = {}) %_remove_batch_dim : [num_users=1] = call_function[target=torch._functorch.predispatch._remove_batch_dim](args = (%sum_1, 1, %sym_size_int_3, 0), kwargs = {}) %_vmap_decrement_nesting : [num_users=0] = call_function[target=torch._functorch.predispatch._vmap_decrement_nesting](args = (), kwargs = {}) %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%_remove_batch_dim, [0]), kwargs = {}) return (sum_2,)""", ) ep = torch.export.export( Vmap(), inputs, {}, dynamic_shapes=dynamic, strict=True ) self.assertExpectedInline( str(ep.graph).strip(), """\ graph(): %x : [num_users=1] = placeholder[target=x] %y : [num_users=2] = placeholder[target=y] %sym_size_int_2 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%y, 0), kwargs = {}) %lazy_load_decompositions : [num_users=0] = call_function[target=torch._functorch.predispatch.lazy_load_decompositions](args = (), kwargs = {}) %_vmap_increment_nesting : [num_users=0] = call_function[target=torch._functorch.predispatch._vmap_increment_nesting](args = (%sym_size_int_2, error), kwargs = {}) %_add_batch_dim : [num_users=1] = call_function[target=torch._functorch.predispatch._add_batch_dim](args = (%x, 0, 1), kwargs = {}) %_add_batch_dim_1 : [num_users=1] = call_function[target=torch._functorch.predispatch._add_batch_dim](args = (%y, 0, 1), kwargs = {}) %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_add_batch_dim, %_add_batch_dim_1), kwargs = {}) %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {}) %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add, [0]), kwargs = {}) %_remove_batch_dim : [num_users=1] = call_function[target=torch._functorch.predispatch._remove_batch_dim](args = (%sum_1, 1, %sym_size_int_2, 0), kwargs = {}) %_vmap_decrement_nesting : [num_users=0] = call_function[target=torch._functorch.predispatch._vmap_decrement_nesting](args = (), kwargs = {}) %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%_remove_batch_dim, [0]), kwargs = {}) return (sum_2,)""", ) self.assertTrue(torch.allclose(ep.module()(*inputs), Vmap()(*inputs))) ep = export(Vmap(), inputs, {}, dynamic_shapes=dynamic).run_decompositions({}) self.assertTrue(torch.allclose(ep.module()(*inputs), Vmap()(*inputs))) @testing.expectedFailureLegacyExportNonStrict # Old export doesn't work with subclasses @testing.expectedFailureLegacyExportStrict # Old export doesn't work with subclasses def test_subclass_nested_attr_access(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() self.p1 = torch.nn.Parameter(torch.ones(3, 4)) self.p2 = torch.nn.Parameter( TwoTensor( TwoTensor(torch.ones(3, 4), torch.ones(3, 4)), TwoTensor(torch.ones(3, 4), torch.ones(3, 4)), ) ) self.b1 = torch.nn.Buffer( TwoTensor( TwoTensor(torch.ones(3, 4), torch.ones(3, 4)), TwoTensor(torch.ones(3, 4), torch.ones(3, 4)), ) ) def forward(self, x): res = (2 * self.p1 + self.p2 + self.b1).sum() return x + res.get_elem_a().b m = Foo() ref_x = torch.randn(3, 4) ref_out = m(ref_x) ep_training = torch.export.export(m, (ref_x,), strict=False) self.assertTrue(torch.allclose(ep_training.module()(ref_x), ref_out)) self.assertExpectedInline( str(ep_training.graph).strip(), """\ graph(): %p_p1 : [num_users=1] = placeholder[target=p_p1] %p_p2 : [num_users=1] = placeholder[target=p_p2] %b_b1 : [num_users=1] = placeholder[target=b_b1] %x : [num_users=1] = placeholder[target=x] %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%p_p1, 2), kwargs = {}) %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %p_p2), kwargs = {}) %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %b_b1), kwargs = {}) %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add_1,), kwargs = {}) %access_subclass_inner_tensor_default_64 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%sum_1, a), kwargs = {}) %access_subclass_inner_tensor_default_69 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%access_subclass_inner_tensor_default_64, b), kwargs = {}) %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %access_subclass_inner_tensor_default_69), kwargs = {}) return (add_2,)""", ) ep = export(m, (ref_x,)) self.assertTrue(torch.allclose(ep.module()(ref_x), ref_out)) def test_subclass_nested_attr_access_submodule(self): class Bar(torch.nn.Module): def __init__(self): super().__init__() self.p1 = torch.nn.Parameter(torch.ones(3, 4)) self.p2 = torch.nn.Parameter( TwoTensor( TwoTensor(torch.ones(3, 4), torch.ones(3, 4)), TwoTensor(torch.ones(3, 4), torch.ones(3, 4)), ) ) self.b1 = torch.nn.Buffer( TwoTensor( TwoTensor(torch.ones(3, 4), torch.ones(3, 4)), TwoTensor(torch.ones(3, 4), torch.ones(3, 4)), ) ) def forward(self, x): return x class Foo(torch.nn.Module): def __init__(self): super().__init__() self.bar = Bar() def forward(self, x): res = (2 * self.bar.p1 + self.bar.p2 + self.bar.b1).sum() return x + res.get_elem_a().b m = Foo() ref_x = torch.randn(3, 4) ref_out = m(ref_x) ep_training = torch.export.export(m, (ref_x,), strict=False) self.assertExpectedInline( str(ep_training.graph).strip(), """\ graph(): %p_bar_p1 : [num_users=1] = placeholder[target=p_bar_p1] %p_bar_p2 : [num_users=1] = placeholder[target=p_bar_p2] %b_bar_b1 : [num_users=1] = placeholder[target=b_bar_b1] %x : [num_users=1] = placeholder[target=x] %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%p_bar_p1, 2), kwargs = {}) %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %p_bar_p2), kwargs = {}) %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %b_bar_b1), kwargs = {}) %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add_1,), kwargs = {}) %access_subclass_inner_tensor_default_64 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%sum_1, a), kwargs = {}) %access_subclass_inner_tensor_default_69 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%access_subclass_inner_tensor_default_64, b), kwargs = {}) %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %access_subclass_inner_tensor_default_69), kwargs = {}) return (add_2,)""", ) ep = export(m, (ref_x,)) self.assertTrue(torch.allclose(ep.module()(ref_x), ref_out)) def test_subclass_nested_attr_access_const_metadata(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() self.p1 = torch.nn.Parameter(torch.ones(3, 4)) self.p2 = torch.nn.Parameter( ConstantExtraMetadataTensor( ConstantExtraMetadataTensor(torch.ones(3, 4)), ) ) def forward(self, x): res = 2 * self.p1 + self.p2 res2 = res + res.constant_attribute return x + res2.elem.elem m = Foo() ref_x = torch.randn(3, 4) ref_out = m(ref_x) ep_training = torch.export.export(m, (ref_x,), strict=False) self.assertExpectedInline( str(ep_training.graph).strip(), """\ graph(): %p_p1 : [num_users=1] = placeholder[target=p_p1] %p_p2 : [num_users=1] = placeholder[target=p_p2] %x : [num_users=1] = placeholder[target=x] %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%p_p1, 2), kwargs = {}) %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %p_p2), kwargs = {}) %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 4), kwargs = {}) %access_subclass_inner_tensor_default_10 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%add_1, elem), kwargs = {}) %access_subclass_inner_tensor_default_13 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%access_subclass_inner_tensor_default_10, elem), kwargs = {}) %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %access_subclass_inner_tensor_default_13), kwargs = {}) return (add_2,)""", ) ep = export(m, (ref_x,)) self.assertTrue(torch.allclose(ep.module()(ref_x), ref_out)) def test_subclass_nested_attr_access_const_metadata_not_top_level(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() self.p1 = torch.nn.Parameter(torch.ones(3, 4)) self.p2 = torch.nn.Parameter( ConstantExtraMetadataTensor( ConstantExtraMetadataTensor(torch.ones(3, 4)), ) ) def forward(self, x): res = 2 * self.p1 + self.p2 res2 = res + res.constant_attribute return x + res2.elem.elem m = Foo() ref_x = torch.randn(3, 4) ref_out = m(ref_x) ep_training = torch.export.export(m, (ref_x,), strict=False) self.assertExpectedInline( str(ep_training.graph).strip(), """\ graph(): %p_p1 : [num_users=1] = placeholder[target=p_p1] %p_p2 : [num_users=1] = placeholder[target=p_p2] %x : [num_users=1] = placeholder[target=x] %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%p_p1, 2), kwargs = {}) %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %p_p2), kwargs = {}) %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 4), kwargs = {}) %getattr_22 : [num_users=1] = call_function[target=builtins.getattr](args = (%add_1, elem), kwargs = {}) %getattr_27 : [num_users=1] = call_function[target=builtins.getattr](args = (%getattr_22, elem), kwargs = {}) %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %getattr_27), kwargs = {}) return (add_2,)""", ) ep = export(m, (ref_x,)) self.assertTrue(torch.allclose(ep.module()(ref_x), ref_out)) def test_subclass_nested_attr_access_const_metadata_not_top_level(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() self.p1 = torch.nn.Parameter(torch.ones(3, 4)) self.p2 = torch.nn.Parameter( TwoTensor( ConstantExtraMetadataTensor(torch.ones(3, 4)), ConstantExtraMetadataTensor(torch.ones(3, 4)), ) ) def forward(self, x): res = 2 * self.p1 + self.p2 res2 = res + res.a.elem + res.b.constant_attribute return x + res2.a.elem m = Foo() ref_x = torch.randn(3, 4) ref_out = m(ref_x) ep_training = torch.export.export(m, (ref_x,), strict=False) self.assertExpectedInline( str(ep_training.graph).strip(), """\ graph(): %p_p1 : [num_users=1] = placeholder[target=p_p1] %p_p2 : [num_users=1] = placeholder[target=p_p2] %x : [num_users=1] = placeholder[target=x] %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%p_p1, 2), kwargs = {}) %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %p_p2), kwargs = {}) %access_subclass_inner_tensor_default_18 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%add, a), kwargs = {}) %access_subclass_inner_tensor_default_21 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%access_subclass_inner_tensor_default_18, elem), kwargs = {}) %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %access_subclass_inner_tensor_default_21), kwargs = {}) %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 4), kwargs = {}) %access_subclass_inner_tensor_default_25 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%add_2, a), kwargs = {}) %access_subclass_inner_tensor_default_28 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%access_subclass_inner_tensor_default_25, elem), kwargs = {}) %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %access_subclass_inner_tensor_default_28), kwargs = {}) return (add_3,)""", ) ep = export(m, (ref_x,)) self.assertTrue(torch.allclose(ep.module()(ref_x), ref_out)) def test_subclass_nested_attr_access_complicated_metadata(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() self.p1 = torch.nn.Parameter(torch.ones(3, 4)) self.p2 = torch.nn.Parameter( ConstantExtraMetadataTensor( ConstantExtraMetadataTensor(torch.ones(3, 4)), ) ) def forward(self, x): res = x + 2 * self.p1 + self.p2 return res.elem.elem + self.p2.get_complicated_metadata().foo m = Foo() ref_x = torch.randn(3, 4) ref_out = m(ref_x) ep_training = torch.export.export(m, (ref_x,), strict=False) self.assertExpectedInline( str(ep_training.graph).strip(), """\ graph(): %p_p1 : [num_users=1] = placeholder[target=p_p1] %p_p2 : [num_users=1] = placeholder[target=p_p2] %x : [num_users=1] = placeholder[target=x] %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%p_p1, 2), kwargs = {}) %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %mul), kwargs = {}) %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %p_p2), kwargs = {}) %access_subclass_inner_tensor_default_10 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%add_1, elem), kwargs = {}) %access_subclass_inner_tensor_default_13 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%access_subclass_inner_tensor_default_10, elem), kwargs = {}) %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%access_subclass_inner_tensor_default_13, 4), kwargs = {}) return (add_2,)""", ) ep = export(m, (ref_x,)) self.assertTrue(torch.allclose(ep.module()(ref_x), ref_out)) def test_real_tensor_errors_on_aliasing_custom_op(self): @torch.library.custom_op("export::foo_alias", mutates_args={}) def foo(x: torch.Tensor) -> torch.Tensor: return x class Foo(torch.nn.Module): def forward(self, x): return torch.ops.export.foo_alias(x) * 2 model = Foo() inputs = (torch.randn(4, 4),) error_type = ( RuntimeError if is_non_strict_test(self._testMethodName) else torch._dynamo.exc.TorchRuntimeError ) with self.assertRaisesRegex( error_type, ( r"The output of this custom operator \(1\) must not also be an input " r"to this custom operator and \(2\) may not alias any inputs to this " r"custom operator or other returns" ), ): with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): ep = export(model, inputs) def test_real_tensor_bool_cast(self): class Foo(torch.nn.Module): def forward(self, x): return bool(x.eq(0.1).any()) model = Foo() inputs = (torch.randn(64),) with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): ep = export(model, inputs, strict=False) def test_is_nonzero(self): class Foo(torch.nn.Module): def forward(self, x): return torch.is_nonzero(x) def _long_tensor(nz): return torch.full((), int(nz)) def _float_tensor(nz): return torch.full((), int(nz), dtype=torch.float32) def _bool_tensor(nz): return torch.full((), int(nz)).bool() mod = Foo() for _tensor in [ _long_tensor, _float_tensor, _bool_tensor, # local_scalar_dense on complex NYI for fake tensors ]: with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True): for nz in [True, False]: sample_input = _tensor(nz=nz) ep = export(mod, (sample_input,), strict=False) self.assertEqual(ep.module()(sample_input), nz) def test_export_script_module(self): class Foo(torch.nn.Module): def forward(self, rv: torch.Tensor, t: torch.Tensor): i = t.item() return rv + i foo = Foo() foo_script = torch.jit.script(foo) inp = (torch.zeros(3, 4), torch.tensor(7)) with self.assertRaisesRegex( ValueError, "Exporting a ScriptModule is not supported" ): export(foo_script, inp) from torch._export.converter import TS2EPConverter TS2EPConverter(foo_script, inp).convert() def test_dim_auto_and_dim(self): # test basic Dims class Foo(torch.nn.Module): def forward(self, x, y): return x - y inputs = (torch.randn(4, 4), torch.randn(4, 4)) shapes = { "x": (Dim.AUTO, Dim("d1", min=3)), "y": (Dim("d0", max=8), Dim.DYNAMIC), } ep = export(Foo(), inputs, dynamic_shapes=shapes) x, y = [node for node in ep.graph.nodes if node.op == "placeholder"] self.assertEqual((s0 := x.meta["val"].shape[0]), y.meta["val"].shape[0]) self.assertEqual((s1 := x.meta["val"].shape[1]), y.meta["val"].shape[1]) vr0 = ep.range_constraints[s0.node.expr] vr1 = ep.range_constraints[s1.node.expr] self.assertEqual([vr0.upper, vr1.lower], [8, 3]) # test derived Dims class Bar(torch.nn.Module): def forward(self, x, y, z): return x + y[1::3] + z inputs = (torch.randn(4), torch.randn(13), torch.randn(4)) dx = Dim("dx", min=2, max=10) shapes = { "x": (dx,), "y": (3 * dx + 1,), "z": (Dim.AUTO,), } ep = export(Bar(), inputs, dynamic_shapes=shapes) x, y, z = [node for node in ep.graph.nodes if node.op == "placeholder"] self.assertEqual((s0 := x.meta["val"].shape[0]), z.meta["val"].shape[0]) expr = y.meta["val"].shape[0] free_symbols = expr.node.expr.free_symbols self.assertEqual(len(free_symbols), 1) self.assertEqual(next(iter(free_symbols)), s0.node.expr) # test specialization still complains inputs = (torch.randn(4), torch.randn(4)) shapes = { "x": (Dim.STATIC,), "y": (Dim("dy"),), } with self.assertRaisesRegex( torch._dynamo.exc.UserError, r"You marked.*but your code specialized it to be a constant.*" r"If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO", ): export(Foo(), inputs, dynamic_shapes=shapes) def test_issue_157289(self): class MyModule(torch.nn.Module): def __init__(self): super(MyModule, self).__init__() def forward(self, causal_mask, fill_value): causal_mask = causal_mask.clone() mask_length = fill_value.shape[-1] causal_mask[:, :, :, :mask_length] = fill_value return causal_mask causal_mask = torch.randn(2, 2, 3, 4) fill_value = torch.randn(2, 2, 3, 3) dynamic_shapes = { "causal_mask": {3: Dim("M")}, "fill_value": {3: Dim("N")}, } ep = export( MyModule(), (causal_mask, fill_value), dynamic_shapes=dynamic_shapes ) if not is_training_ir_test(self._testMethodName) and not is_retracebility_test( self._testMethodName ): self.assertExpectedInline( str(ep.graph_module.code).strip(), """\ def forward(self, causal_mask, fill_value): sym_size_int_4 = torch.ops.aten.sym_size.int(fill_value, 3) clone = torch.ops.aten.clone.default(causal_mask); causal_mask = None slice_1 = torch.ops.aten.slice.Tensor(clone, 3, 0, sym_size_int_4); sym_size_int_4 = None copy_ = torch.ops.aten.copy_.default(slice_1, fill_value); slice_1 = fill_value = copy_ = None return (clone,)""", ) decomposed_ep = ep.run_decompositions() self.assertExpectedInline( str(decomposed_ep.graph_module.code).strip(), """\ def forward(self, causal_mask, fill_value): sym_size_int_5 = torch.ops.aten.sym_size.int(fill_value, 3) clone = torch.ops.aten.clone.default(causal_mask); causal_mask = None slice_1 = torch.ops.aten.slice.Tensor(clone, 3, 0, sym_size_int_5) copy = torch.ops.aten.copy.default(slice_1, fill_value); slice_1 = fill_value = None slice_scatter = torch.ops.aten.slice_scatter.default(clone, copy, 3, 0, sym_size_int_5); clone = copy = sym_size_int_5 = None return (slice_scatter,)""", ) def test_dim_dynamic_specialization(self): class Foo(torch.nn.Module): def forward(self, x): return x + 2 # 0/1 specialization with self.assertRaisesRegex( ValueError, r"Received user-specified dim hint Dim.DYNAMIC.*" r"but export 0/1 specialized due to hint of 0 for dimension " r"inputs\['x'\]\.shape\[0\](.*\n)*.*" r"Received user-specified dim hint Dim.DYNAMIC.*" r"but export 0/1 specialized due to hint of 1 for dimension " r"inputs\['x'\]\.shape\[1\].*", ): export( Foo(), (torch.randn(0, 1),), dynamic_shapes={ "x": {0: Dim.DYNAMIC, 1: Dim.DYNAMIC}, }, ) class Bar(torch.nn.Module): def forward(self, x): assert x.shape[0] <= 32 return x + 2 # static specialization with self.assertRaisesRegex( ValueError, r"Received user-specified dim hint Dim.DYNAMIC.*" r"but tracing inferred a static shape of 32 for dimension " r"inputs\['x'\]\.shape\[0\](.*\n)*.*", ): export( Bar(), (torch.randn(32),), dynamic_shapes={ "x": {0: Dim.DYNAMIC(min=32)}, }, ) def test_unbacked_slice_forward(self): class Foo(torch.nn.Module): def forward(self, x, xs): u0, u1 = xs.tolist() out = x[u0:u1] return out x = torch.randn(10) idxs = torch.tensor([3, 6]) mod = Foo() ep = export(mod, (x, idxs)) for xs in [ idxs, torch.tensor([-9, -1]), torch.tensor([-10000, 10000]), torch.tensor([0, -10]), ]: self.assertTrue(torch.allclose(ep.module()(x, xs), mod(x, xs))) # check unbacked bindings # should be 4 symbols: u0, u1, output size, output storage offset bound_unbacked = set() for node in ep.graph.nodes: bound_unbacked |= node.meta.get("unbacked_bindings", {}).keys() self.assertEqual(len(bound_unbacked), 4) def test_dim_hint_ranges(self): class Foo(torch.nn.Module): def forward(self, x, y): return x + y inputs = ( torch.randn(6, 4), torch.randn(6, 4), ) shapes = { "x": (Dim.AUTO(min=4), Dim.AUTO), "y": (Dim.DYNAMIC(max=16), Dim.AUTO(max=32)), } ep = export(Foo(), inputs, dynamic_shapes=shapes) ep.module()(torch.randn(8, 5), torch.randn(8, 5)) with self.assertRaisesRegex( AssertionError, escape("Guard failed: x.size()[0] >= 4"), ): # expected >= 4, but got 3 ep.module()(torch.randn(3, 5), torch.randn(3, 5)) with self.assertRaisesRegex( AssertionError, escape("Guard failed: x.size()[0] <= 16"), ): # expected <= 16, but got 17 ep.module()(torch.randn(17, 5), torch.randn(17, 5)) with self.assertRaisesRegex( AssertionError, escape("Guard failed: x.size()[1] <= 32"), ): # expected <= 32, but got 33 ep.module()(torch.randn(9, 33), torch.randn(9, 33)) def test_dim_hint_range_violations(self): class Foo(torch.nn.Module): def forward(self, xs): x, y = xs["data"][0] assert y.shape[0] <= 32 return x[6:], y + 2 x, y = torch.randn(8), torch.randn(8) # conflict with lower bound shapes = torch.export.ShapesCollection() shapes[x] = [Dim.DYNAMIC(max=5)] with self.assertRaisesRegex( ValueError, r"Received user-specified .* \[None, 5\], conflicting with the inferred .*" r"\[8, int_oo\],.* for inputs\['xs'\]\['data'\]\[0\]\[0\]\.shape\[0\]", ): export(Foo(), ({"data": [[x, y]]},), dynamic_shapes=shapes) # conflict with upper bound shapes = torch.export.ShapesCollection() shapes[y] = [Dim.AUTO(min=48, max=62)] with self.assertRaisesRegex( ValueError, r"Received user-specified .* \[48, 62\], conflicting with the inferred .*" r"\[2, 32\],.* for inputs\['xs'\]\['data'\]\[0\]\[1\]\.shape\[0\]", ): export(Foo(), ({"data": [[x, y]]},), dynamic_shapes=shapes) class Bar(torch.nn.Module): def forward(self, x): return x + 2 # conflict with static range shapes = {"x": [Dim.STATIC(min=6, max=8)]} with self.assertRaisesRegex( ValueError, r"Received user-specified .* \[6, 8\], conflicting with the inferred .*" r"\[4, 4\],.* for inputs\['x'\].shape\[0\]", ): export(Bar(), (torch.randn(4),), dynamic_shapes=shapes) # multiple conflicts class Moo(torch.nn.Module): def forward(self, x, y): assert x.shape[0] <= 32 assert y.shape[0] >= 128 return x + 2, y + 2 inps = (torch.randn(16), torch.randn(256)) shapes = { "x": (Dim.DYNAMIC(min=33),), "y": (Dim.DYNAMIC(max=127),), } with self.assertRaisesRegex( ValueError, r"Received user-specified .* \[33, None\], conflicting with the inferred .*" r"\[2, 32\],.* for inputs\['x'\].shape\[0\](.*\n)*.*" r"Received user-specified .* \[None, 127\], conflicting with the inferred .*" r"\[128, int_oo\],.* for inputs\['y'\].shape\[0\]", ): export(Moo(), inps, dynamic_shapes=shapes) def test_torch_fn(self): class M1(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 3) self.relu = torch.nn.ReLU() def forward(self, x): x = self.linear(x) x = self.linear(x) x = self.relu(x) x = x + x return x ep1 = export(M1(), (torch.randn(3, 3),)).run_decompositions() expected_result = [ ("linear_1", "builtin_function_or_method.linear"), ("linear_1", "builtin_function_or_method.linear"), ("linear_2", "builtin_function_or_method.linear"), ("linear_2", "builtin_function_or_method.linear"), ("relu_1", "function.relu"), ("add_1", "method_descriptor.add"), ] actual_result = [] for i, node in enumerate(ep1.graph.nodes): if node.op == "call_function": actual_result.append(node.meta.get("torch_fn")) self.assertEqual(actual_result, expected_result) class M2(torch.nn.Module): def __init__(self) -> None: super().__init__() def forward(self, x, weight, bias): x = torch.nn.functional.linear(x, weight, bias) x = torch.nn.functional.relu(x) x = torch.add(x, x) return x ep2 = export( M2(), (torch.randn(3, 3), torch.randn(3, 3), torch.randn(3)) ).run_decompositions() expected_result = [ ("linear_1", "builtin_function_or_method.linear"), ("linear_1", "builtin_function_or_method.linear"), ("relu_1", "function.relu"), ("add_1", "builtin_function_or_method.add"), ] actual_result = [] for i, node in enumerate(ep2.graph.nodes): if node.op == "call_function": actual_result.append(node.meta.get("torch_fn")) self.assertEqual(actual_result, expected_result) def test_hoo_inline_users_issue(self): # This came from an issue where replace_with_hop passes would inline subgraphs, # and mess up node.users for nodes present in multiple subgraphs (e.g. _x in SetGradCase # below, since it's used in both set_grad_enabled HOO modules). # This checks that node.users and node.args are in correspondence. def check_users_for_graph(graph): def _tuple_contains(_tuple, val): # check nested, since output node args have format ((x, y, ...),) return any( _tuple_contains(x, val) if isinstance(x, tuple) else x == val for x in _tuple ) for node in graph.nodes: # check node.users for user in node.users.keys(): assert _tuple_contains(user.args, node) # check node.args for arg in node.args: if isinstance(arg, torch.fx.Node): assert _tuple_contains(arg.users, node) # check set grad enabled class SetGradCase(torch.nn.Module): def forward(self, x): _x = x.shape[0] + 2 _xx = _x + 2 with torch.no_grad(): y = _x * 4 return _xx, y ep = export( SetGradCase(), (torch.randn(6),), dynamic_shapes={"x": (Dim("dx"),)}, strict=False, ) check_users_for_graph(ep.graph) def test_export_custom_op_lib(self): ops_registered_before = set(torch.ops.mylib) # Assert warning for CompositeImplicitAutograd op with torch.library._scoped_library("mylib", "FRAGMENT") as lib: lib.define("foo123(Tensor x) -> Tensor") lib.impl("foo123", lambda x: x.sin(), "CompositeImplicitAutograd") ops_registered_after = set(torch.ops.mylib) self.assertEqual(ops_registered_after, ops_registered_before) def test_export_preserve_linear_but_not_custom_op(self): table = torch.export.default_decompositions() del table[torch.ops.aten.linear.default] with torch.library._scoped_library("mylib", "FRAGMENT") as lib: lib.define("foo123(Tensor x) -> Tensor") lib.impl("foo123", lambda x: x.sin(), "CompositeImplicitAutograd") class Bar(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(4, 4) def forward(self, x): lin = self.linear(x) return torch.ops.mylib.foo123(lin) x = torch.randn(4, 4) ep = export(Bar(), (x,)).run_decompositions(table) self.assertExpectedInline( str(ep.graph_module.code).strip(), """\ def forward(self, p_linear_weight, p_linear_bias, x): linear = torch.ops.aten.linear.default(x, p_linear_weight, p_linear_bias); x = p_linear_weight = p_linear_bias = None sin = torch.ops.aten.sin.default(linear); linear = None return (sin,)""", ) def test_export_preserve_linear_at_aot_level(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 3) def forward(self, x): x = self.linear(x) return torch.ops.aten.chunk.default(x, 3, 0) ep = torch.export.export(Foo(), (torch.randn(3, 3),)) decomp_table = default_decompositions() del decomp_table[torch.ops.aten.linear.default] ep = ep.run_decompositions(decomp_table) gm = ep.graph_module # linear is CompositeImplicitAutograd functional op so we should preserve it # chunk is CompositeImplicitAutograd non-functional op we decompose. self.assertExpectedInline( str(gm.code).strip(), """\ def forward(self, p_linear_weight, p_linear_bias, x): linear = torch.ops.aten.linear.default(x, p_linear_weight, p_linear_bias); x = p_linear_weight = p_linear_bias = None split_with_sizes = torch.ops.aten.split_with_sizes.default(linear, [1, 1, 1]); linear = None getitem = split_with_sizes[0] getitem_1 = split_with_sizes[1] getitem_2 = split_with_sizes[2]; split_with_sizes = None return (getitem, getitem_1, getitem_2)""", ) def test_export_cond_preserve_torch_fn_for_subgraphs(self): class MySubModule(torch.nn.Module): def foo(self, x): return x.cos() def forward(self, x): return self.foo(x) class CondBranchClassMethod(torch.nn.Module): def __init__(self) -> None: super().__init__() self.subm = MySubModule() def bar(self, x): return x.sin() def forward(self, x): return cond(x.sum() <= 2, self.subm.forward, self.bar, [x]) example_inputs = (torch.randn(1, 3, 3, 3),) m = CondBranchClassMethod() m.eval() gm = export(m, example_inputs).module() actual_torch_fns = [] for mod in gm.modules(): if hasattr(mod, "graph"): for node in mod.graph.nodes: if node.name in {"sin", "cos"}: torch_fn = node.meta.get("torch_fn") print(torch_fn) actual_torch_fns.append(torch_fn) exp_torch_fns = [ ("cos_1", "method_descriptor.cos"), ("sin_1", "method_descriptor.sin"), ] self.assertEqual(actual_torch_fns, exp_torch_fns) def test_is_exporting(self): class Mod(torch.nn.Module): def forward(self, pred, x): def f(x): return x.sin() if torch.compiler.is_exporting() else x.cos() y = f(x) def true_fn(x): return f(x) - 1 if torch.compiler.is_exporting() else f(x) + 1 def false_fn(x): return f(x) + 1 if torch.compiler.is_exporting() else f(x) - 1 return torch.cond(pred, true_fn, false_fn, (x,)) * y ep = export( Mod(), ( torch.tensor(False), torch.randn(3, 4), ), ) FileCheck().check_count("torch.ops.aten.sin", 1, exactly=True).run( ep.graph_module.code ) FileCheck().check_count("torch.ops.higher_order.cond", 1, exactly=True).run( ep.graph_module.code ) # True graph should contain sin and sub FileCheck().check_count("torch.ops.aten.sub", 1, exactly=True).run( ep.graph_module.true_graph_0.code ) FileCheck().check_count("torch.ops.aten.sin", 1, exactly=True).run( ep.graph_module.true_graph_0.code ) # False graph should contain sin and add FileCheck().check_count("torch.ops.aten.add", 1, exactly=True).run( ep.graph_module.false_graph_0.code ) FileCheck().check_count("torch.ops.aten.sin", 1, exactly=True).run( ep.graph_module.false_graph_0.code ) def test_ends_of_bounds_oblivious(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() self.register_buffer("buf", torch.zeros(10)) def forward(self, x, y): self.buf[0 : x.shape[0]] = x return x + 2, y[:, ::1] inps = (torch.randn(10), torch.randn(32, 36)) dynamic_shapes = { "x": {0: Dim("dx", min=1, max=10)}, "y": {0: Dim("dy0"), 1: Dim("dy1")}, } with torch.fx.experimental._config.patch(backed_size_oblivious=True): ep = export(Foo(), inps, dynamic_shapes=dynamic_shapes) ep.module()(torch.randn(9), torch.randn(4, 4)) ep.module()(torch.randn(1), torch.randn(1, 1)) def test_colin_unbacked_backed_vr_sub(self): class Model(torch.nn.Module): def forward(self, a, b, c): nz = torch.nonzero(a) ones = a.new_ones([nz.size(0), b.size(0)]) torch._check(ones.size(0) >= 1) equals = torch.add(ones, c) return equals model = Model() example_inputs = ( torch.ones(64), torch.randn(32), torch.randn(64, 32), ) dynamic_shapes = {"a": None, "b": None, "c": (Dim.DYNAMIC, Dim.STATIC)} with torch.fx.experimental._config.patch(backed_size_oblivious=True): ep = export(model, example_inputs, dynamic_shapes=dynamic_shapes) # check lower bound for sym, vr in ep.range_constraints.items(): if str(sym) in ["u0", "s0"]: self.assertEqual(vr.lower, 1) @testing.expectedFailureStrictV2 def test_duplicate_modules_with_non_persistent_buffers(self): class FooWithBuf(torch.nn.Module): def __init__(self): super().__init__() self.register_buffer("buf", torch.randn(4), persistent=False) def forward(self, x): return x + self.buf class BarWithFoo(torch.nn.Module): def __init__(self, foo): super().__init__() self.foo = foo def forward(self, x): return self.foo(x) class ModWith2Bars(torch.nn.Module): def __init__(self): super().__init__() foo = FooWithBuf() self.b1 = BarWithFoo(foo) self.b2 = BarWithFoo(foo) def forward(self, x): return self.b1(x) + self.b2(x) mod = ModWith2Bars() inputs = (torch.randn(4),) ep = export(mod, inputs) self.assertTrue(torch.allclose(ep.module()(*inputs), mod(*inputs))) def test_derived_dim_basic(self): class Foo(torch.nn.Module): def forward(self, x, y): return x + y[1:] foo = Foo() x, y = torch.randn(5), torch.randn(6) dimx = torch.export.Dim("dimx", min=3, max=6) dimy = torch.export.Dim("dimy", min=4, max=7) # doesn't work with self.assertRaisesRegex( torch._dynamo.exc.UserError, ( "Constraints violated \\(dimy\\)!(.*\n)*.*" "The values of dimy.*must always be related to the values of dimx.*by.*(.*\n)*.*" "Suggested fixes:(.*\n)*.*" "dimy = dimx \\+ 1" ), ): export( foo, (x, y), dynamic_shapes=({0: dimx}, {0: dimy}), ) dimy = dimx * 2 # doesn't work with self.assertRaisesRegex( torch._dynamo.exc.UserError, "Expected input.*size.* to be equal to 2\\*dimx, where dimx = 5, but got 6", ): export( foo, (x, y), dynamic_shapes=({0: dimx}, {0: dimy}), ) dimy = dimx + 1 # works ep = export( foo, (x, y), dynamic_shapes=({0: dimx}, {0: dimy}), ) with self.assertRaisesRegex( AssertionError, escape("Guard failed: x.size()[0] == -1 + y.size()[0]"), ): # expected 5, but got 6 ep.module()(torch.randn(4), torch.randn(6)) self.assertEqual(ep.module()(torch.randn(4), torch.randn(5)).size()[0], 4) def test_derived_dim_nested(self): class Foo(torch.nn.Module): def forward(self, x, y): return x + y[1::2] foo = Foo() x, y = torch.randn(5), torch.randn(11) dimx = torch.export.Dim("dimx", min=3, max=6) dimy = dimx * 2 + 1 # works ep = export( foo, (x, y), dynamic_shapes=({0: dimx}, {0: dimy}), ) self.assertEqual(ep.module()(torch.randn(4), torch.randn(9)).size()[0], 4) class Foo(torch.nn.Module): def forward(self, z, y): return z[1:] + y[1::2] foo = Foo() z, y = torch.randn(6), torch.randn(11) dimz = dimx dimy = dimx * 2 - 1 # works ep = export( foo, (z, y), dynamic_shapes=({0: dimz}, {0: dimy}), ) self.assertEqual(ep.module()(torch.randn(5), torch.randn(9)).size()[0], 4) dimz = dimx + 1 dimy = dimx * 2 - 1 # doesn't work with self.assertRaisesRegex( torch._dynamo.exc.UserError, "Expected input.*size.*to be equal to 2\\*dimx - 1, where dimx = 5, but got 11", ): export( foo, (z, y), dynamic_shapes=({0: dimz}, {0: dimy}), ) dimy = dimx * 2 + 1 # works ep = export( foo, (z, y), dynamic_shapes=({0: dimz}, {0: dimy}), ) with self.assertRaisesRegex( AssertionError, escape("Guard failed: z.size()[0] <= 7"), ): # expected <= 7, but got 8 ep.module()(torch.randn(8), torch.randn(15)) with self.assertRaisesRegex( AssertionError, escape("Guard failed: -1 + 2 * z.size()[0] == y.size()[0]"), ): # expected 9, but got 8 ep.module()(torch.randn(5), torch.randn(8)) self.assertEqual(ep.module()(torch.randn(5), torch.randn(9)).size()[0], 4) def test_derived_dim_integer(self): class Foo(torch.nn.Module): def forward(self, w): if w.shape[0] % 2 == 0: return w[::2] else: return w[1:-1:2] foo = Foo() w = torch.randn(10) dimx = torch.export.Dim("dimx", min=3, max=6) dimw = dimx * 2 + 1 # doesn't work with self.assertRaisesRegex( torch._dynamo.exc.UserError, "Expected shape.*= 10 of input Tensor to be " "of the form 2\\*dimx \\+ 1, where dimx is an integer", ): export( foo, (w,), dynamic_shapes=({0: dimw},), ) dimw = dimx * 2 # works ep = export( foo, (w,), dynamic_shapes=({0: dimw},), ) with self.assertRaisesRegex( AssertionError, escape("Guard failed: w.size()[0] % 2 == 0"), ): # expected 2*..., got 9 ep.module()(torch.randn(9)) self.assertEqual(ep.module()(torch.randn(8)).size()[0], 4) with self.assertRaisesRegex( AssertionError, escape("Guard failed: w.size()[0] <= 12"), ): # expected <= 12, but got 14 ep.module()(torch.randn(14)) def test_derived_dim_repeat_derived(self): class Foo(torch.nn.Module): def forward(self, u, v): return u[::2] + v[::2] foo = Foo() u, v = torch.randn(10), torch.randn(10) dimx = torch.export.Dim("dimx", min=3, max=6) dimw = dimx * 2 # works ep = export( foo, (u, v), dynamic_shapes=({0: dimw}, {0: dimw}), ) self.assertEqual(ep.module()(torch.randn(8), torch.randn(8)).size()[0], 4) def test_derived_dim_out_of_order(self): dimy = torch.export.Dim("dimy", min=5, max=7) dimx = dimy - 1 # out of order, effectively dimy = dimx + 1 dimz = dimy + 1 # out of order, effectively dimz = dimx + 2 class Foo(torch.nn.Module): def forward(self, x, y, z): return x + y[1:] + z[2:] foo = Foo() u, v, w = torch.randn(5), torch.randn(6), torch.randn(7) ep = export( foo, (u, v, w), dynamic_shapes=({0: dimx}, {0: dimy}, {0: dimz}), ) with self.assertRaisesRegex( AssertionError, escape("Guard failed: z.size()[0] >= 6"), ): # expected 8, but got 5 ep.module()(torch.randn(6), torch.randn(7), torch.randn(5)) self.assertEqual( ep.module()(torch.randn(6), torch.randn(7), torch.randn(8)).size()[0], 6 ) def test_derived_dim_out_of_order_repeat_derived(self): dimy = torch.export.Dim("dimy", min=5, max=7) dimx = dimy - 1 # out of order, effectively dimy = dimx + 1 dimz = dimy + 1 # out of order, effectively dimz = dimx + 2 dimx1 = dimx dimx2 = dimz - 2 # works, effectively = dimx class Foo(torch.nn.Module): def forward(self, x, y, z, x1, x2): return x + y[1:] + z[2:] + x1 + x2 foo = Foo() u, v, w, u1, u2 = ( torch.randn(5), torch.randn(6), torch.randn(7), torch.randn(5), torch.randn(5), ) ep = export( foo, (u, v, w, u1, u2), dynamic_shapes=({0: dimx}, {0: dimy}, {0: dimz}, {0: dimx1}, {0: dimx2}), ) with self.assertRaisesRegex( AssertionError, escape("Guard failed: x2.size()[0] == x.size()[0]"), ): # expected 6, but got 5 ep.module()( torch.randn(6), torch.randn(7), torch.randn(8), torch.randn(6), torch.randn(5), ) self.assertEqual( ep.module()( torch.randn(6), torch.randn(7), torch.randn(8), torch.randn(6), torch.randn(6), ).size()[0], 6, ) ep = export( foo, (u, v, w, u, u), # reused inputs dynamic_shapes=({0: dimx}, {0: dimy}, {0: dimz}, {0: dimx1}, {0: dimx2}), ) with self.assertRaisesRegex( AssertionError, escape("Guard failed: x2.size()[0] == x.size()[0]"), ): # expected 6, but got 5 ep.module()( torch.randn(6), torch.randn(7), torch.randn(8), torch.randn(6), torch.randn(5), ) self.assertEqual( ep.module()( torch.randn(6), torch.randn(7), torch.randn(8), torch.randn(6), torch.randn(6), ).size()[0], 6, ) def test_specialize_derived_dim_roots(self): # dim & derived dim both specialize class Foo(torch.nn.Module): def forward(self, x, y): return x.reshape([-1]) + y dy = Dim("dy", min=6) x, y = torch.randn(6, 2), torch.randn(12) dynamic_shapes = { "x": (dy - 6, 2), "y": (dy,), } try: export(Foo(), (x, y), dynamic_shapes=dynamic_shapes) raise Exception( "export() call should have failed with dynamic shapes error." ) except torch._dynamo.exc.UserError as exc: expected_error_msg = ( "Specializations unexpectedly required \(dy\)!(.*\n)*.*" ".*solving the guards generated for dy - 6.*resulted in a specialized value of 6(.*\n)*.*" "Suggested fixes(.*\n)*.*" ".*dy = 12(.*\n)*.*" ) self.assertTrue(re.search(expected_error_msg, exc.args[0]) is not None) self.assertTrue( "dy - 6 = 6" not in exc.args[0] ) # don't suggest fix for non-root dim @unittest.skip("See https://github.com/pytorch/pytorch/issues/135759") def test_keep_composite_ops_invalid(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 3) def forward(self, x): x = self.linear(x) return torch.ops.aten.chunk.default(x, 3, 0) def _(*args, **kwargs): return NotImplemented with self.assertWarnsRegex(UserWarning, "The op aten.chunk.default"): _ = torch.export.export( Foo(), (torch.randn(3, 3),), ).run_decompositions({torch.ops.aten.chunk.default: _}) with self.assertWarnsRegex(UserWarning, "The op aten.sym_size.default"): _ = torch.export.export( Foo(), (torch.randn(3, 3),), ).run_decompositions({torch.ops.aten.sym_size.default: _}) with self.assertWarnsRegex( UserWarning, "The op aten.native_batch_norm.default", ): _ = torch.export.export( Foo(), (torch.randn(3, 3),), ).run_decompositions({torch.ops.aten.native_batch_norm.default: _}) def test_keep_composite_ops_linear_convd(self): class MyLinear(torch.nn.Module): def __init__(self) -> None: super().__init__() self.weight = torch.randn(20, 98) self.bias = torch.randn(20) def forward(self, x): return torch.nn.functional.linear(x, self.weight, self.bias) class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(16, 33, 3) self.conv1d = torch.nn.Conv1d(16, 33, 3) self.linear = MyLinear() def forward(self, x, y): x_conv = self.conv(x) y_conv_1d = self.conv1d(y) x_linear = self.linear(x_conv) return x_linear.cos() + y_conv_1d.sum() ep = torch.export.export( Foo(), (torch.randn(20, 16, 50, 100), torch.randn(20, 16, 50)) ) ep_has_linear_convd = ep.run_decompositions({}) self.assertExpectedInline( str(ep_has_linear_convd.graph_module.code).strip(), """\ def forward(self, p_conv_weight, p_conv_bias, p_conv1d_weight, p_conv1d_bias, c_linear_weight, c_linear_bias, x, y): conv2d = torch.ops.aten.conv2d.default(x, p_conv_weight, p_conv_bias); x = p_conv_weight = p_conv_bias = None conv1d = torch.ops.aten.conv1d.default(y, p_conv1d_weight, p_conv1d_bias); y = p_conv1d_weight = p_conv1d_bias = None linear = torch.ops.aten.linear.default(conv2d, c_linear_weight, c_linear_bias); conv2d = c_linear_weight = c_linear_bias = None cos = torch.ops.aten.cos.default(linear); linear = None sum_1 = torch.ops.aten.sum.default(conv1d); conv1d = None add = torch.ops.aten.add.Tensor(cos, sum_1); cos = sum_1 = None return (add,)""", ) decomp_table = default_decompositions() del decomp_table[torch.ops.aten.conv2d.default] del decomp_table[torch.ops.aten.conv1d.default] ep_has_convd = ep.run_decompositions(decomp_table=decomp_table) self.assertExpectedInline( str(ep_has_convd.graph_module.code).strip(), """\ def forward(self, p_conv_weight, p_conv_bias, p_conv1d_weight, p_conv1d_bias, c_linear_weight, c_linear_bias, x, y): conv2d = torch.ops.aten.conv2d.default(x, p_conv_weight, p_conv_bias); x = p_conv_weight = p_conv_bias = None conv1d = torch.ops.aten.conv1d.default(y, p_conv1d_weight, p_conv1d_bias); y = p_conv1d_weight = p_conv1d_bias = None view = torch.ops.aten.view.default(conv2d, [31680, 98]); conv2d = None permute = torch.ops.aten.permute.default(c_linear_weight, [1, 0]); c_linear_weight = None addmm = torch.ops.aten.addmm.default(c_linear_bias, view, permute); c_linear_bias = view = permute = None view_1 = torch.ops.aten.view.default(addmm, [20, 33, 48, 20]); addmm = None cos = torch.ops.aten.cos.default(view_1); view_1 = None sum_1 = torch.ops.aten.sum.dim_IntList(conv1d, []); conv1d = None add = torch.ops.aten.add.Tensor(cos, sum_1); cos = sum_1 = None return (add,)""", ) decomp_table = default_decompositions() del decomp_table[torch.ops.aten.conv2d.default] ep_has_convd = ep_has_convd.run_decompositions(decomp_table=decomp_table) self.assertExpectedInline( str(ep_has_convd.graph_module.code).strip(), """\ def forward(self, p_conv_weight, p_conv_bias, p_conv1d_weight, p_conv1d_bias, c_linear_weight, c_linear_bias, x, y): conv2d = torch.ops.aten.conv2d.default(x, p_conv_weight, p_conv_bias); x = p_conv_weight = p_conv_bias = None convolution = torch.ops.aten.convolution.default(y, p_conv1d_weight, p_conv1d_bias, [1], [0], [1], False, [0], 1); y = p_conv1d_weight = p_conv1d_bias = None view = torch.ops.aten.view.default(conv2d, [31680, 98]); conv2d = None permute = torch.ops.aten.permute.default(c_linear_weight, [1, 0]); c_linear_weight = None addmm = torch.ops.aten.addmm.default(c_linear_bias, view, permute); c_linear_bias = view = permute = None view_1 = torch.ops.aten.view.default(addmm, [20, 33, 48, 20]); addmm = None cos = torch.ops.aten.cos.default(view_1); view_1 = None sum_1 = torch.ops.aten.sum.dim_IntList(convolution, []); convolution = None add = torch.ops.aten.add.Tensor(cos, sum_1); cos = sum_1 = None return (add,)""", ) def test_keep_composite_ops_linear_convd_for_training_ir(self): class MyLinear(torch.nn.Module): def __init__(self) -> None: super().__init__() self.weight = torch.nn.Buffer(torch.randn(20, 98)) self.bias = torch.nn.Buffer(torch.randn(20)) def forward(self, x): return torch.nn.functional.linear(x, self.weight, self.bias) class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(16, 33, 3) self.conv1d = torch.nn.Conv1d(16, 33, 3) self.linear = MyLinear() def forward(self, x, y): x_conv = self.conv(x) y_conv_1d = self.conv1d(y) x_linear = self.linear(x_conv) return x_linear.cos() + y_conv_1d.sum() ep = torch.export.export( Foo(), (torch.randn(20, 16, 50, 100), torch.randn(20, 16, 50)) ) ep_has_linear_convd = ep.run_decompositions( decomp_table={}, ) self.assertExpectedInline( str(ep_has_linear_convd.graph_module.code).strip(), """\ def forward(self, p_conv_weight, p_conv_bias, p_conv1d_weight, p_conv1d_bias, b_linear_weight, b_linear_bias, x, y): conv2d = torch.ops.aten.conv2d.default(x, p_conv_weight, p_conv_bias); x = p_conv_weight = p_conv_bias = None conv1d = torch.ops.aten.conv1d.default(y, p_conv1d_weight, p_conv1d_bias); y = p_conv1d_weight = p_conv1d_bias = None linear = torch.ops.aten.linear.default(conv2d, b_linear_weight, b_linear_bias); conv2d = b_linear_weight = b_linear_bias = None cos = torch.ops.aten.cos.default(linear); linear = None sum_1 = torch.ops.aten.sum.default(conv1d); conv1d = None add = torch.ops.aten.add.Tensor(cos, sum_1); cos = sum_1 = None return (add,)""", ) decomp_table = default_decompositions() del decomp_table[torch.ops.aten.conv2d.default] del decomp_table[torch.ops.aten.conv1d.default] ep_has_convd = ep.run_decompositions(decomp_table=decomp_table) self.assertExpectedInline( str(ep_has_convd.graph_module.code).strip(), """\ def forward(self, p_conv_weight, p_conv_bias, p_conv1d_weight, p_conv1d_bias, b_linear_weight, b_linear_bias, x, y): conv2d = torch.ops.aten.conv2d.default(x, p_conv_weight, p_conv_bias); x = p_conv_weight = p_conv_bias = None conv1d = torch.ops.aten.conv1d.default(y, p_conv1d_weight, p_conv1d_bias); y = p_conv1d_weight = p_conv1d_bias = None view = torch.ops.aten.view.default(conv2d, [31680, 98]); conv2d = None permute = torch.ops.aten.permute.default(b_linear_weight, [1, 0]); b_linear_weight = None addmm = torch.ops.aten.addmm.default(b_linear_bias, view, permute); b_linear_bias = view = permute = None view_1 = torch.ops.aten.view.default(addmm, [20, 33, 48, 20]); addmm = None cos = torch.ops.aten.cos.default(view_1); view_1 = None sum_1 = torch.ops.aten.sum.dim_IntList(conv1d, []); conv1d = None add = torch.ops.aten.add.Tensor(cos, sum_1); cos = sum_1 = None return (add,)""", ) decomp_table = default_decompositions() del decomp_table[torch.ops.aten.conv2d.default] ep_has_convd = ep_has_convd.run_decompositions(decomp_table=decomp_table) self.assertExpectedInline( str(ep_has_convd.graph_module.code).strip(), """\ def forward(self, p_conv_weight, p_conv_bias, p_conv1d_weight, p_conv1d_bias, b_linear_weight, b_linear_bias, x, y): conv2d = torch.ops.aten.conv2d.default(x, p_conv_weight, p_conv_bias); x = p_conv_weight = p_conv_bias = None convolution = torch.ops.aten.convolution.default(y, p_conv1d_weight, p_conv1d_bias, [1], [0], [1], False, [0], 1); y = p_conv1d_weight = p_conv1d_bias = None view = torch.ops.aten.view.default(conv2d, [31680, 98]); conv2d = None permute = torch.ops.aten.permute.default(b_linear_weight, [1, 0]); b_linear_weight = None addmm = torch.ops.aten.addmm.default(b_linear_bias, view, permute); b_linear_bias = view = permute = None view_1 = torch.ops.aten.view.default(addmm, [20, 33, 48, 20]); addmm = None cos = torch.ops.aten.cos.default(view_1); view_1 = None sum_1 = torch.ops.aten.sum.dim_IntList(convolution, []); convolution = None add = torch.ops.aten.add.Tensor(cos, sum_1); cos = sum_1 = None return (add,)""", ) @unittest.skip("See https://github.com/pytorch/pytorch/issues/135759") def test_error_when_passing_mutating_primitive_op(self): class Foo(torch.nn.Module): def forward(self, x): return x.sin() ep = export(Foo(), (torch.ones(3, 3),)) with self.assertWarnsRegex( UserWarning, "The op aten.index_put_.default", ): ep.run_decompositions({torch.ops.aten.index_put_.default: None}) def test_export_cond_warns_constant_pred(self): class Mod(torch.nn.Module): def forward(self, pred, x): return torch.cond(pred, lambda x: x.sin(), lambda x: x.cos(), (x,)) mod = Mod() with self.assertWarnsRegex(UserWarning, "Pred is a Python constant"): ep = export(mod, (True, torch.randn(3, 3))) nodes = ep.module().graph.find_nodes( op="call_function", target=torch.ops.aten.sin.default ) self.assertEqual(len(nodes), 1) def test_export_custom_decomp_table_basic_pop(self): with torch.library._scoped_library("mylib", "FRAGMENT") as lib: lib.define("foo123(Tensor x) -> Tensor") lib.impl("foo123", lambda x: x.sin(), "CompositeImplicitAutograd") lib.define("foo456(Tensor x) -> Tensor") lib.impl("foo456", lambda x: x.sin(), "CompositeImplicitAutograd") table = default_decompositions() # Since this table hasn't been materialized yet, we shouldn't error val = table.pop(torch.ops.mylib.foo123.default) self.assertIsNotNone(val) with self.assertRaisesRegex(KeyError, "mylib.foo123.default"): table.pop(torch.ops.mylib.foo123.default) val = table.pop(torch.ops.mylib.foo123.default, "HELLO") self.assertEqual(val, "HELLO") all_ops = set(k for k, v in table.items()) self.assertTrue(table.has_materialized) # When we force materialize, torch.ops.mylib.foo123.default should have gone self.assertFalse(torch.ops.mylib.foo123.default in all_ops) self.assertTrue(torch.ops.mylib.foo456.default in all_ops) def test_export_custom_decomp_table_container_methods(self): # tests __len__ with torch.library._scoped_library("mylib", "FRAGMENT") as lib: table = default_decompositions() length_before = len(table) lib.define("foo123(Tensor x) -> Tensor") lib.impl("foo123", lambda x: x.sin(), "CompositeImplicitAutograd") lib.define("foo456(Tensor x) -> Tensor") lib.impl("foo456", lambda x: x.sin(), "CompositeImplicitAutograd") table = default_decompositions() self.assertEqual(len(table) - length_before, 2) # tests __contains__ with torch.library._scoped_library("mylib", "FRAGMENT") as lib: lib.define("foo123(Tensor x) -> Tensor") lib.impl("foo123", lambda x: x.sin(), "CompositeImplicitAutograd") table = default_decompositions() self.assertTrue(torch.ops.mylib.foo123.default in table) del table[torch.ops.mylib.foo123.default] self.assertFalse(torch.ops.mylib.foo123.default in table) # Lot of ppl do # for op in all_ops: # if op in table: # del table[op] with torch.library._scoped_library("mylib", "FRAGMENT") as lib: lib.define("foo123(Tensor x) -> Tensor") lib.impl("foo123", lambda x: x.sin(), "CompositeImplicitAutograd") table = default_decompositions() if torch.ops.mylib.foo123.default in table: del table[torch.ops.mylib.foo123.default] self.assertFalse(torch.ops.mylib.foo123.default in table) table.materialize() self.assertFalse(torch.ops.mylib.foo123.default in table) @testing.expectedFailureStrictV2 def test_if_post_autograd_op_preserved(self): class Foo(torch.nn.Module): def forward(self, x): return x.sin() + x.sum() ep = export(Foo(), (torch.ones(3, 3),)) decomp_table = default_decompositions() del decomp_table[torch.ops.aten.sum.default] ep_preserve_sum = ep.run_decompositions(decomp_table) # Even though we are decomposing to core aten which should make # sum into sum.dim_IntList, we explicitly marked it to not do that. self.assertExpectedInline( str(ep_preserve_sum.graph_module.code).strip(), """\ def forward(self, x): sin = torch.ops.aten.sin.default(x) sum_1 = torch.ops.aten.sum.default(x); x = None add = torch.ops.aten.add.Tensor(sin, sum_1); sin = sum_1 = None return (add,)""", ) ep_no_preserve_sum = ep.run_decompositions() self.assertExpectedInline( str(ep_no_preserve_sum.graph_module.code).strip(), """\ def forward(self, x): sin = torch.ops.aten.sin.default(x) sum_1 = torch.ops.aten.sum.dim_IntList(x, []); x = None add = torch.ops.aten.add.Tensor(sin, sum_1); sin = sum_1 = None return (add,)""", ) def test_set_grad_empty(self): class M(torch.nn.Module): def forward(self, x): with torch.no_grad(): x = x + 1 return x, None ep = export(M(), (torch.ones(3, 3),)) inp = torch.randn(3, 3) self.assertTrue(torch.allclose(ep.module()(inp)[0], inp + 1)) def test_set_grad_as_side_effect(self): class Foo(torch.nn.Module): def forward(self, x): torch._C._set_grad_enabled(False) return x.sum() before = torch.is_grad_enabled() ep = torch.export.export(Foo(), (torch.randn(4, 4),)) after = torch.is_grad_enabled() self.assertEqual(before, after) def test_derived_dim_out_of_order_simplified(self): _dimz = torch.export.Dim("_dimz", min=6, max=8) dimy = _dimz - 1 dimx = dimy - 1 dimz = torch.export.Dim("dimz", min=6, max=8) # doesn't work, should be = _dimz class Foo(torch.nn.Module): def forward(self, x, y, z): return x + y[1:] + z[2:] foo = Foo() u, v, w = torch.randn(5), torch.randn(6), torch.randn(7) try: export( foo, (u, v, w), dynamic_shapes=({0: dimx}, {0: dimy}, {0: dimz}), ) except torch._dynamo.exc.UserError as exc: expected_error_msg = ( "Constraints violated \(dimz\)!(.*\n)*.*" "The values of dimz.*must always be related to the values of _dimz - 2.*by.*(.*\n)*.*" "Suggested fixes:(.*\n)*.*" "dimz = _dimz" ) self.assertTrue(re.search(expected_error_msg, exc.args[0]) is not None) # don't suggest fix for non-root dims, and no need to update root here self.assertTrue("_dimz - 2 = Dim(" not in exc.args[0]) self.assertTrue("_dimz - 1 = _dimz - 1" not in exc.args[0]) self.assertTrue("_dimz = Dim(" not in exc.args[0]) dimz = dimx + 2 # works, effectively = _dimz ep = export( foo, (u, v, w), dynamic_shapes=({0: dimx}, {0: dimy}, {0: dimz}), ) with self.assertRaisesRegex( AssertionError, escape("Guard failed: z.size()[0] >= 6"), ): # expected 8, but got 5 ep.module()(torch.randn(6), torch.randn(7), torch.randn(5)) self.assertEqual( ep.module()(torch.randn(6), torch.randn(7), torch.randn(8)).size()[0], 6 ) def test_simple_export_for_training(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(2, 2) def forward(self, x): return self.linear(x) eager_model = Foo() ep_for_training = torch.export.export(eager_model, (torch.ones(2, 2),)) self.assertExpectedInline( str(ep_for_training.graph_module.code).strip(), """\ def forward(self, p_linear_weight, p_linear_bias, x): linear = torch.ops.aten.linear.default(x, p_linear_weight, p_linear_bias); x = p_linear_weight = p_linear_bias = None return (linear,)""", ) gm = ep_for_training.module() self.assertExpectedInline( str(gm.code).strip(), """\ def forward(self, x): x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec) linear_weight = self.linear.weight linear_bias = self.linear.bias _guards_fn = self._guards_fn(x); _guards_fn = None linear = torch.ops.aten.linear.default(x, linear_weight, linear_bias); x = linear_weight = linear_bias = None return pytree.tree_unflatten((linear,), self._out_spec)""", ) self.assertTrue( torch.allclose(gm(torch.ones(2, 2)), eager_model(torch.ones(2, 2))) ) def test_export_for_training_with_mutation(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.ones(4, 4)) def forward(self, x): x.add_(5) self.buffer.add_(5) return x + self.buffer eager_model_for_export = Foo() eager_model_for_testing = Foo() ep_for_training = torch.export.export( eager_model_for_export, (torch.ones(4, 4),) ) self.assertExpectedInline( str(ep_for_training.graph_module.code).strip(), """\ def forward(self, b_buffer, x): add_ = torch.ops.aten.add_.Tensor(x, 5); x = None add__1 = torch.ops.aten.add_.Tensor(b_buffer, 5); b_buffer = None add = torch.ops.aten.add.Tensor(add_, add__1); add_ = add__1 = None return (add,)""", ) gm = ep_for_training.module() self.assertExpectedInline( str(gm.code).strip(), """\ def forward(self, x): x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec) buffer = self.buffer _guards_fn = self._guards_fn(x); _guards_fn = None add_ = torch.ops.aten.add_.Tensor(x, 5); x = None add__1 = torch.ops.aten.add_.Tensor(buffer, 5); buffer = None add = torch.ops.aten.add.Tensor(add_, add__1); add_ = add__1 = None return pytree.tree_unflatten((add,), self._out_spec)""", ) self.assertTrue( torch.allclose( gm(torch.ones(4, 4)), eager_model_for_testing(torch.ones(4, 4)) ) ) def test_export_for_training_with_dynamic_shapes(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.ones(4, 4)) def forward(self, x): x.add_(5) self.buffer.add_(5) return x + self.buffer.sum() eager_model_for_export_training = Foo() eager_model_for_export_inference = Foo() eager_model_for_testing = Foo() ep_for_training = torch.export.export( eager_model_for_export_training, (torch.ones(4, 4),), dynamic_shapes=({0: Dim("x")},), ) self.assertTrue( torch.allclose( ep_for_training.module()(torch.ones(2, 4)), eager_model_for_testing(torch.ones(2, 4)), ) ) ep_for_real = export( eager_model_for_export_inference, (torch.ones(4, 4),), dynamic_shapes=({0: Dim("x")},), ) # Since symbol names are based on hash of source names, and these differ across inference and # training, we do range comparisons instead. self.assertEqual( str(ep_for_training.range_constraints.values()), str(ep_for_real.range_constraints.values()), ) def test_unbacked_unsqueeze(self): class Unsqueeze(torch.nn.Module): def forward(self, xs): u0, u1 = xs.tolist() x = torch.zeros(1, u0 + u1).contiguous() return x.unsqueeze(-1) mod = Unsqueeze() x = torch.tensor([5, 6]) ep = export(mod, (x,), strict=False) self.assertTrue(torch.allclose(mod(x), ep.module()(x))) x = torch.tensor([1, 2]) self.assertTrue(torch.allclose(mod(x), ep.module()(x))) def test_export_for_training_with_container_type(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.ones(4, 4)) def forward(self, container): x = container[0][0] y = container[0][1] x.add_(5) y.add_(5) return x + y + self.buffer.sum() eager_model = Foo() ep_for_training = torch.export.export( eager_model, ([torch.ones(4, 4), torch.ones(4, 4)],), ) self.assertTrue( torch.allclose( ep_for_training.module()( ([torch.ones(4, 4), torch.ones(4, 4)]), ), eager_model(([torch.ones(4, 4), torch.ones(4, 4)])), ) ) def test_function_holding_tensor(self): global_storage = [] class FunctionClosureLeak(torch.nn.Module): def forward(self, x): fake_tensor = x + 1 # In real export, this would be a FakeTensor def closure(): return fake_tensor.shape # Captures fake_tensor # Store closure globally - this creates the leak global_storage.append(closure) return x.sin() with ( torch._export.config.patch(detect_non_strict_fake_tensor_leaks=True), self.assertWarnsRegex( UserWarning, "Detected 1 fake tensors that are still alive after export" ), ): export(FunctionClosureLeak(), (torch.randn(4, 4),), strict=False) def test_detect_leak_nonstrict(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y): return x + y global_list = [] class ReferenceControl: def __init__(self, mod): self.bank = [] self.bank_dict = {} self.mod = mod def hacked_up_forward(self_, x, y): self.bank.append(x.clone()) self.bank_dict["x"] = x.clone() global_list.append(x.clone()) return x + y self.mod.forward = hacked_up_forward.__get__(self.mod, Foo) def __call__(self, x, y): ep = export(self.mod, (x, y), strict=False).module() out = ep(x, y) return out def update(self): return self.bank foo = Foo() ref = ReferenceControl(foo) ref(torch.randn(4, 4), torch.randn(4, 4)) self.assertTrue( isinstance(ref.bank[0], torch._subclasses.fake_tensor.FakeTensor) ) with ( torch._export.config.patch(detect_non_strict_fake_tensor_leaks=True), self.assertWarnsRegex( UserWarning, "Detected 3 fake tensors that are still alive after export" ), ): ref(torch.randn(4, 4), torch.randn(4, 4)) def test_detect_leak_nonstrict_with_stacktrace(self): global_list = [] class Foo(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y): nonlocal global_list global_list.append(x + y) return x + y foo = Foo() ep = export(foo, (torch.randn(4, 4), torch.randn(4, 4)), strict=False) self.assertTrue( isinstance(global_list[0], torch._subclasses.fake_tensor.FakeTensor) ) with torch._export.config.patch(detect_non_strict_fake_tensor_leaks=True): warn_re = re.compile( r"Detected\s+\d+\s+fake\s+tensors?" r".*test_export\.py.*global_list\.append\(x \+ y\)", re.S, ) with self.assertWarnsRegex(UserWarning, warn_re): ep = export(foo, (torch.randn(4, 4), torch.randn(4, 4)), strict=False) def test_export_cyclic_reference_leak(self): class Node: def __init__(self, tag): self.tag = tag self.ref = None self.tensor = None bank = [] class LeakyCycle(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y): z = x + y node1 = Node("A") node2 = Node("B") node1.ref = node2 node2.ref = node1 node1.tensor = z # Keep the cycle alive intentionally -> leak nonlocal bank bank.append(node1) return (z.sin()).cos() lc = LeakyCycle() ep = export(lc, (torch.randn(4, 4), torch.randn(4, 4)), strict=False) node1_ref = weakref.ref(bank[0]) node2_ref = weakref.ref(bank[0].ref) bank.clear() del bank bank = [] self.assertIsNotNone(node1_ref(), "node1 should still be alive due to cycle") self.assertIsNotNone(node2_ref(), "node2 should still be alive due to cycle") with torch._export.config.patch(detect_non_strict_fake_tensor_leaks=True): warn_re = re.compile( r"Detected\s+\d+\s+fake\s+tensors?" r'.*?[/\\]test_export\.py",\s+line\s+\d+,\s+in\s+forward' r"(?:\\n|\n)\s*z\s*=\s*x\s*\+\s*y", re.S, ) with self.assertWarnsRegex(UserWarning, warn_re): ep = export(lc, (torch.randn(4, 4), torch.randn(4, 4)), strict=False) def test_export_for_training_run_decomp(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.ones(2, 2)) self.linear = torch.nn.Linear(2, 2) def forward(self, x): self.buffer.add_(5) return self.linear(x) + self.buffer.sum() eager_model = Foo() ep_for_training = torch.export.export( eager_model, (torch.ones(2, 2),), ) ep_for_inference = ep_for_training.run_decompositions() self.assertExpectedInline( str(ep_for_inference.graph_module.code).strip(), """\ def forward(self, p_linear_weight, p_linear_bias, b_buffer, x): add = torch.ops.aten.add.Tensor(b_buffer, 5); b_buffer = None permute = torch.ops.aten.permute.default(p_linear_weight, [1, 0]); p_linear_weight = None addmm = torch.ops.aten.addmm.default(p_linear_bias, x, permute); p_linear_bias = x = permute = None sum_1 = torch.ops.aten.sum.dim_IntList(add, []) add_1 = torch.ops.aten.add.Tensor(addmm, sum_1); addmm = sum_1 = None return (add, add_1)""", ) def test_derived_dim_out_of_order_simplified_repeat_non_derived(self): class Foo(torch.nn.Module): def forward(self, x, y, y1, z): return x + y[1:] + y1[1:] + z[2:] foo = Foo() u, v, v1, w = torch.randn(5), torch.randn(6), torch.randn(6), torch.randn(7) _dimz = torch.export.Dim("_dimz", min=6, max=8) dimy = _dimz - 1 dimx = dimy - 1 dimz = dimx + 2 # works, effectively = _dimz ep = export( foo, (u, v, v1, w), dynamic_shapes=({0: dimx}, {0: dimy}, {0: dimy}, {0: dimz}), ) with self.assertRaisesRegex( AssertionError, escape("Guard failed: y1.size()[0] == y.size()[0]"), ): # expected 7, but got 5 ep.module()( torch.randn(6), torch.randn(7), torch.randn(5), torch.randn(8), ) self.assertEqual( ep.module()( torch.randn(6), torch.randn(7), torch.randn(7), torch.randn(8), ).size()[0], 6, ) def test_static_dim_constraints(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.l = torch.nn.Linear(6, 4) def forward(self, x, y, z): x0 = self.l(x) + y[1:] return x0, z * 2.0 foo = Foo() inputs = (torch.randn(4, 6), torch.randn(5, 4), torch.randn(3, 3)) dx = Dim("dx", min=3, max=6) dy = dx + 1 dz = Dim("dz", min=3, max=6) # test that tweaking shapes fails wrong_shape_inputs = [ (torch.randn(4, 7), torch.randn(5, 4), torch.randn(3, 3)), (torch.randn(4, 6), torch.randn(5, 5), torch.randn(3, 3)), (torch.randn(4, 6), torch.randn(5, 4), torch.randn(3, 4)), ] # all of these should be fine for dynamic_shapes in [ ({0: dx, 1: 6}, {0: dy, 1: 4}, {0: dz, 1: 3}), ((dx, None), (dy, 4), (dz, 3)), ((None, 6), (5, None), (None, None)), ((4, 6), {0: None, 1: 4}, {0: None, 1: 3}), (None, None, (Dim.STATIC, Dim.STATIC)), ]: ep = export(foo, inputs, dynamic_shapes=dynamic_shapes) self.assertEqual(foo(*inputs), ep.module()(*inputs)) for wrong_inputs in wrong_shape_inputs: with self.assertRaisesRegex(AssertionError, "Guard failed"): with self.assertRaises(RuntimeError): ep.module()(*wrong_inputs) # check range_constraints - static dims shouldn't be present ep = export(foo, inputs, dynamic_shapes=((dx, None), (dy, 4), (dz, 3))) self.assertEqual(len(ep.range_constraints), 3) for vr in ep.range_constraints.values(): self.assertTrue(vr.lower < vr.upper) # check raised errors with self.assertRaisesRegex( ( torch.fx.experimental.symbolic_shapes.ConstraintViolationError, torch._dynamo.exc.UserError, ), "Static shape constraint of 5 does not match input size of 4, for .*", ): _ = export(foo, inputs, dynamic_shapes=((5, None), None, None)) with self.assertRaisesRegex( ( torch.fx.experimental.symbolic_shapes.ConstraintViolationError, torch._dynamo.exc.UserError, ), "Static shape constraint of 9 does not match input size of 6, for .*", ): _ = export(foo, inputs, dynamic_shapes=((dx, 9), (dy, 4), (3, 3))) def test_dim_1_2(self): class Foo(torch.nn.Module): def forward(self, x): return x * 2 dx = Dim("dx", min=1, max=2) ep = export(Foo(), (torch.randn(2, 2),), dynamic_shapes=({0: dx, 1: None},)) ep.module()(torch.randn(1, 2)) ep.module()(torch.randn(2, 2)) with self.assertRaisesRegex( AssertionError, escape("Guard failed: x.size()[0] <= 2"), ): # expected <= 2, but got 3 ep.module()(torch.randn(3, 2)) vr = list(ep.range_constraints.values())[0] self.assertEqual(vr.lower, 1) self.assertEqual(vr.upper, 2) def test_derived_dim_1_2(self): class Bar(torch.nn.Module): def forward(self, x, y): return x + y[1:] dx = Dim("dx", min=1, max=2) ep = export( Bar(), (torch.randn(2, 2), torch.randn(3, 2)), dynamic_shapes=({0: dx, 1: None}, {0: dx + 1, 1: None}), ) with self.assertRaisesRegex( AssertionError, escape("Guard failed: -1 + y.size()[0] != 1"), ): # TODO: this should not error? ep.module()(torch.randn(1, 2), torch.randn(2, 2)) range_lower_bounds = sorted(vr.lower for vr in ep.range_constraints.values()) range_upper_bounds = sorted(vr.upper for vr in ep.range_constraints.values()) self.assertEqual(range_lower_bounds, [1, 2]) self.assertEqual(range_upper_bounds, [2, 3]) def test_issue_161902(self): class Add(torch.nn.Module): def forward(self, x, y): return x + y m = Add() x = torch.randn(2, 3) y = torch.randn(2, 3) dx = Dim("dx", min=1, max=2) conflicting = {"x": (2 * dx, Dim.STATIC), "y": (dx + 1, Dim.STATIC)} with self.assertRaisesRegex( torch._dynamo.exc.UserError, r"Constraints violated.*" r"\n.*You marked 2\*dx as dynamic but your code specialized it to be a constant \(2\).*" r"\n.*You marked dx \+ 1 as dynamic but your code specialized it to be a constant \(2\).*", ): export(m, (x, y), dynamic_shapes=conflicting) def test_range_constraints_with_replacement(self): class M(torch.nn.Module): def forward(self, x, y): return (x + y)[:3] m = M() inp = (torch.randn(4), torch.randn(4)) dynamic_shapes = ((torch.export.Dim.DYNAMIC,), (torch.export.Dim.DYNAMIC,)) ep = export(m, inp, dynamic_shapes=dynamic_shapes) assert len(ep.range_constraints) == 1 vr = next(iter(ep.range_constraints.values())) self.assertEqual(vr.lower, 3) def test_unbacked_linear_layer_norm_input(self): class MyModel(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(387, 128, bias=True) self.layer_norm = torch.nn.LayerNorm(387) def forward(self, x, mask): masked_select = x.masked_select(mask) view = masked_select.view(-1, 387) linear = self.linear(view) layer_norm = self.layer_norm(view) return linear, layer_norm inputs = ( torch.randn((256, 387), dtype=torch.float), torch.randint(low=0, high=1, size=(256, 1), dtype=torch.bool), ) model = MyModel() ep = export(model, inputs) ref = model(*inputs) actual = ep.module()(*inputs) self.assertTrue(torch.allclose(ref[0], actual[0])) self.assertTrue(torch.allclose(ref[1], actual[1])) @torch._dynamo.config.patch(capture_scalar_outputs=True) def test_layer_norm_unbacked_normalized_shape(self): class MyModel(torch.nn.Module): def forward(self, scalar, weight, bias): u1 = scalar.item() y = torch.ones(2, u1) return torch.nn.functional.layer_norm( input=y, normalized_shape=(u1,), weight=weight, bias=bias ) model = MyModel() inputs = ( torch.scalar_tensor(16, dtype=torch.int32), torch.randn(16), torch.randn(16), ) ep = export(model, inputs) actual = ep.module()(*inputs) ref = model(*inputs) self.assertTrue(torch.allclose(ref[0], actual[0])) def test_unbacked_3d_matmul(self): class Model(torch.nn.Module): def forward(self, x, repeat): u0 = repeat.item() t1 = x.unsqueeze(1).expand(x.size(0), u0 // 2, x.size(-1)) t2 = torch.ones(3) return torch.matmul(t1, t2) model = Model() inputs = (torch.randn(4, 3), torch.scalar_tensor(2, dtype=torch.int)) exported = export(model, inputs).module() self.assertEqual(model(*inputs), exported(*inputs)) def test_dynamic_shapes_wrapped_with_shape_guards(self): class Neuron(torch.nn.Module): def __init__(self, n_dims: int = 5, n_targets: int = 3): super().__init__() self.linear = torch.nn.Linear(n_dims, n_targets) def forward(self, x, y): return torch.sigmoid(self.linear(x + y)) args = (torch.randn(2, 5), torch.randn(2, 5)) batch = torch.export.Dim.DYNAMIC n = Neuron() compiled = export(n, args, dynamic_shapes=({0: batch}, {0: batch})) expected = n(*args) mod = compiled.module() got = mod(*args) self.assertTrue(torch.allclose(expected, got)) class Wrapped(Neuron): def forward(self, *args): return super().forward(*args) w = Wrapped() if is_retracebility_test(self._testMethodName): with self.assertRaisesRegex( torch._dynamo.exc.UserError, "Detected mismatch between the structure of `inputs` and `dynamic_shapes`" ": `inputs` has 2 elements, but `dynamic_shapes` has 1 elements", ): export(w, args, dynamic_shapes={"args": ({0: batch}, {0: batch})}) else: compiled = export( w, args, dynamic_shapes={"args": ({0: batch}, {0: batch})} ) expected = w(*args) mod = compiled.module() got = mod(*args) self.assertTrue(torch.allclose(expected, got)) def test_dynamic_shapes_builder_basic(self): class M(torch.nn.Module): def forward(self, x, y, z): return x + y[0] + z["k"] m = M() x = torch.randn(4) y = [torch.randn(4)] z = {"k": torch.randn(4)} args = (x, y, z) shapes_collection = torch.export.ShapesCollection() dim = torch.export.Dim("dim", max=10) # specify shape of tensor shapes_collection[x] = (dim,) # tensor can be arbitrarily deep shapes_collection[y[0]] = (dim,) # can also specify some dimension in shape of tensor shapes_collection[z["k"]][0] = dim ep = export(m, args, dynamic_shapes=shapes_collection) sym = next(iter(ep.range_constraints.keys())) for node in ep.graph.nodes: if node.op == "placeholder": self.assertEqual(str(tuple(node.meta["val"].shape)), f"({sym},)") def test_dynamic_shapes_builder_kwargs(self): class M(torch.nn.Module): def forward(self, x, y, z): return x + y[0] + z["k"] m = M() x = torch.randn(4) y = [torch.randn(4)] z = {"k": torch.randn(4)} args = (x,) kwargs = {"z": z, "y": y} shapes_collection = torch.export.ShapesCollection() dim = torch.export.Dim("dim", max=10) shapes_collection[x] = (dim,) shapes_collection[y[0]] = (dim,) shapes_collection[z["k"]] = (dim,) ep = export(m, args, kwargs=kwargs, dynamic_shapes=shapes_collection) sym = next(iter(ep.range_constraints.keys())) for node in ep.graph.nodes: if node.op == "placeholder": self.assertEqual(str(tuple(node.meta["val"].shape)), f"({sym},)") def test_dynamic_shapes_builder_pytree(self): torch.export.register_dataclass( Inp1, serialized_type_name="test_dynamic_shapes_builder_pytree.Inp1", ) class M(torch.nn.Module): def forward(self, inp: Inp1): return inp.x + inp.y[0] + inp.z["k"] m = M() x = torch.randn(4) y = [torch.randn(4)] z = {"k": torch.randn(4)} args = (Inp1(x, y, z),) shapes_collection = torch.export.ShapesCollection() dim = torch.export.Dim("dim", max=10) shapes_collection[x] = (dim,) shapes_collection[y[0]] = (dim,) shapes_collection[z["k"]] = (dim,) ep = export(m, args, dynamic_shapes=shapes_collection.dynamic_shapes(m, args)) sym = next(iter(ep.range_constraints.keys())) for node in ep.graph.nodes: if node.op == "placeholder": self.assertEqual(str(tuple(node.meta["val"].shape)), f"({sym},)") def test_dynamic_shapes_inferred_basic(self): class M(torch.nn.Module): def forward(self, x, y, z): # x and y[0] must have same dynamic shape (say `dim`) >= 3 tmp = (x + y[0])[:3] # z["k"] must have static shape = 3 return tmp * z["k"] m = M() args = (torch.randn(4), [torch.randn(4)], {"k": torch.randn(3)}) additional_inputs = torch.export.AdditionalInputs() # 4->5, 4->5, 3->3 good_args = (torch.randn(5), [torch.randn(5)], {"k": torch.randn(3)}) additional_inputs.add(good_args) ep = export(m, args, dynamic_shapes=additional_inputs) got_shapes = [ str(tuple(node.meta["val"].shape)) for node in ep.graph.find_nodes(op="placeholder") ] dim = next(iter(ep.range_constraints.keys())) expected_shapes = [f"({dim},)", f"({dim},)", "(3,)"] self.assertEqual(got_shapes, expected_shapes) def expect_error(bad_args, run_time_msg, compile_time_msg): with self.assertRaisesRegex(AssertionError, run_time_msg): ep.module()(*bad_args) additional_inputs = torch.export.AdditionalInputs() additional_inputs.add(bad_args) with self.assertRaisesRegex(RuntimeError, compile_time_msg): export(m, args, dynamic_shapes=additional_inputs) expect_error( # 4->2, 4->2, 3->3 bad_args=(torch.randn(2), [torch.randn(2)], {"k": torch.randn(3)}), run_time_msg=escape( "Guard failed: x.size()[0] >= 3" ), # expected >= 3, but got 2 compile_time_msg="Expected input.*to be >= 3, but got 2", ) expect_error( # 4->6, 4->7, 3->3 bad_args=(torch.randn(6), [torch.randn(7)], {"k": torch.randn(3)}), run_time_msg=escape( "Guard failed: y[0].size()[0] == x.size()[0]" ), # expected 6, but got 7 compile_time_msg="Expected input.*to be equal to 6, but got 7", ) expect_error( # 4->5, 4->5, 3->4 bad_args=(torch.randn(5), [torch.randn(5)], {"k": torch.randn(4)}), run_time_msg=escape( "Guard failed: z['k'].size()[0] == 3" ), # expected 3, but got 4 compile_time_msg=r"You marked.*but your code specialized it to be a constant.*If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO", ) def test_additional_inputs_constants(self): @dataclass class D: b: bool i: int f: float t: torch.Tensor pytree.register_dataclass(D) class M(torch.nn.Module): def forward(self, d: D): return d.i + d.f + d.t input1 = (D(True, 3, 3.0, torch.ones(3)),) # int and tensor change input2 = (D(True, 4, 3.0, torch.ones(4)),) ai = torch.export.AdditionalInputs() ai.add(input1) ai.add(input2) dynamic_shapes = ai.dynamic_shapes(M(), input1) self.assertEqual( dynamic_shapes, {"d": [None, Dim.DYNAMIC, None, (Dim.DYNAMIC,)]} ) torch.export.export(M(), input1, dynamic_shapes=ai) # float changes, error input2 = (D(True, 3, 4.0, torch.ones(3)),) ai = torch.export.AdditionalInputs() ai.add(input1) ai.add(input2) with self.assertRaisesRegex( ValueError, r"they cannot be marked as dynamic: \(3\.0, 3\.0, 4\.0\)" ): ai.dynamic_shapes(M(), input1) with self.assertRaisesRegex( ValueError, r"they cannot be marked as dynamic: \(3\.0, 3\.0, 4\.0\)" ): torch.export.export(M(), input1, dynamic_shapes=ai) # bool changes, error input2 = (D(False, 3, 3.0, torch.ones(3)),) ai = torch.export.AdditionalInputs() ai.add(input1) ai.add(input2) with self.assertRaisesRegex( ValueError, r"they cannot be marked as dynamic: \(True, True, False\)" ): ai.dynamic_shapes(M(), input1) with self.assertRaisesRegex( ValueError, r"they cannot be marked as dynamic: \(True, True, False\)" ): torch.export.export(M(), input1, dynamic_shapes=ai) # Differing types input1 = (D(True, 0, 3.0, torch.ones(3)),) input2 = (D(True, False, 3.0, torch.ones(3)),) ai = torch.export.AdditionalInputs() ai.add(input1) ai.add(input2) with self.assertRaisesRegex( ValueError, r"differing types, so they cannot be marked as dynamic: \(0, 0, False\)", ): print(ai.dynamic_shapes(M(), input1)) with self.assertRaisesRegex( ValueError, r"differing types, so they cannot be marked as dynamic: \(0, 0, False\)", ): torch.export.export(M(), input1, dynamic_shapes=ai) def test_mismatched_dynamic_shapes(self): AUTO, STATIC = Dim.AUTO, Dim.STATIC class M(torch.nn.Module): def forward(self, x): return x["k"]["k"][0] + x["k"]["k"][1] inputs = ({"k": {"k": [torch.rand(4), torch.rand(4)]}},) dim = torch.export.Dim("dim") dynamic_shapes = { "k": {"k": [dim, dim]} } # ValueError: Node keys mismatch; missing key(s): {'x'}; extra key(s): {'k'}. with self.assertRaisesRegex( torch._dynamo.exc.UserError, re.escape( "When `dynamic_shapes` is specified as a dict, its top-level keys " "must be the arg names ['x'] of `inputs`, but here they are ['k']. " "Since here `inputs` is a list/tuple enclosing a single dict, " "maybe you just forgot to enclose `dynamic_shapes` in a list/tuple?" ), ): export(M(), inputs, dynamic_shapes=dynamic_shapes) dynamic_shapes = ( {"k": {"k": [dim, dim]}}, ) # torch._dynamo.exc.UserError: Unexpected dynamic_shape .*dim.* of Tensor, try None instead with self.assertRaisesRegex( torch._dynamo.exc.UserError, "Unexpected input tensor shape .*dim.* " + re.escape( "specified at `dynamic_shapes[0]['k']['k'][0]` " "(expected either a list/tuple of dimensions, or a dict mapping indices to dimensions," " where each dimension is an int, a Dim, Dim.AUTO, Dim.STATIC, or Dim.DYNAMIC)" ), ): export(M(), inputs, dynamic_shapes=dynamic_shapes) dynamic_shapes = ( {"k": {"k": (dim, dim)}}, ) # ValueError: Node type mismatch; expected <class 'list'>, but got <class 'tuple'>. with self.assertRaisesRegex( torch._dynamo.exc.UserError, re.escape( "Detected mismatch between the structure of `inputs` and `dynamic_shapes`: " "`inputs[0]['k']['k']` is a <class 'list'>, but `dynamic_shapes[0]['k']['k']` is a <class 'tuple'>" ), ): export(M(), inputs, dynamic_shapes=dynamic_shapes) dynamic_shapes = ({"k": {"k": [(dim,), (dim,)]}},) # ok export(M(), inputs, dynamic_shapes=dynamic_shapes) dynamic_shapes = ( {"k": {"k": dim}}, ) # ValueError: Node type mismatch; expected <class 'list'>, but got .*Dim.*. with self.assertRaisesRegex( torch._dynamo.exc.UserError, re.escape( "Detected mismatch between the structure of `inputs` and `dynamic_shapes`: " "`inputs[0]['k']['k']` is a <class 'list'>, but `dynamic_shapes[0]['k']['k']` is not" ), ): export(M(), inputs, dynamic_shapes=dynamic_shapes) dynamic_shapes = { "x": {"k": [(dim,), (dim,)]}, "k": {"k": [(dim,), (dim,)]}, } # ValueError: Node arity mismatch; expected 1, but got 2. with self.assertRaisesRegex( torch._dynamo.exc.UserError, re.escape( "When `dynamic_shapes` is specified as a dict, its top-level keys " "must be the arg names ['x'] of `inputs`, but here they are ['x', 'k']. " "Alternatively, you could also ignore arg names entirely " "and specify `dynamic_shapes` as a list/tuple matching `inputs`." ), ): export(M(), inputs, dynamic_shapes=dynamic_shapes) dynamic_shapes = ( {"k": {"k": [(dim,), (dim,), (dim,)]}}, ) # ValueError: Node arity mismatch; expected 2, but got 3. with self.assertRaisesRegex( torch._dynamo.exc.UserError, re.escape( "Detected mismatch between the structure of `inputs` and `dynamic_shapes`: " "`inputs[0]['k']['k']` has 2 elements, but `dynamic_shapes[0]['k']['k']` has 3 elements" ), ): export(M(), inputs, dynamic_shapes=dynamic_shapes) dynamic_shapes = ( {"k": {"K": [(dim,), (dim,), (dim,)]}}, ) # ValueError: Node keys mismatch; missing key(s): {'k'}; extra key(s): {'K'}. with self.assertRaisesRegex( torch._dynamo.exc.UserError, re.escape( "Detected mismatch between the structure of `inputs` and `dynamic_shapes`: " "`inputs[0]['k']` has keys ['k'], but `dynamic_shapes[0]['k']` has keys ['K']" ), ): export(M(), inputs, dynamic_shapes=dynamic_shapes) class N(torch.nn.Module): def forward(self, x): return x["k"]["k1"][0] + x["k"]["k2"][0] inputs = ({"k": {"k1": [torch.rand(4)], "k2": [torch.rand(4)]}},) dim = torch.export.Dim("dim") dynamic_shapes = ({"k": {"k2": [(dim,)], "k1": [(dim,)]}},) # ok export(N(), inputs, dynamic_shapes=dynamic_shapes) class O(torch.nn.Module): def forward(self, x): return x + 2 inputs = (torch.randn(4, 8, 6),) dynamic_shapes = {"x": (dim, None)} with self.assertRaisesRegex( torch._dynamo.exc.UserError, r"Expected dynamic shape spec .* at `dynamic_shapes\['x'\]` to have the same length " r"as the actual tensor shape torch\.Size\(\[4, 8, 6\]\) \(expected 3, but got 2 instead\)", ): export(O(), inputs, dynamic_shapes=dynamic_shapes) def test_unbacked_bindings_for_divisible_u_symint(self): from torch._export.utils import _get_shape_env_from_gm from torch.utils._sympy.symbol import prefix_str, symbol_is_type, SymT class M(torch.nn.Module): def forward(self, a, b): return torch.ops.mylib.foo_unbacked(a, b) @torch.library.custom_op("mylib::foo_unbacked", mutates_args={}) def foo_unbacked(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: return a[b.item()] @foo_unbacked.register_fake def foo_unbacked_fake_impl(a, b): ctx = torch.library.get_ctx() u = ctx.new_dynamic_size(min=0, max=len(a) // 10) * 10 return torch.empty(u, a.shape[1], dtype=a.dtype) # check binding path is correct ep = export( M(), (torch.randn(100, 4), torch.tensor(10)), ) foo = [node for node in ep.graph.nodes if node.name == "foo_unbacked"][0] unbacked_bindings = foo.meta["unbacked_bindings"] self.assertEqual(len(unbacked_bindings), 1) # check binding is {u: path} u = next(iter(unbacked_bindings.keys())) self.assertEqual( type(u).__name__, "Symbol" ) # check binding is symbol, not expr path = unbacked_bindings[u] self.assertEqual(len(path), 3) # check path is [size, 0, DivideByKey(10)] self.assertEqual(type(path[2]).__name__, "DivideByKey") self.assertEqual(path[2].divisor, 10) # collect bound symbols bound = set() for node in ep.graph.nodes: bound.update(node.meta.get("unbacked_bindings", {})) # check ShapeEnv counters compared to binding indices shape_env = _get_shape_env_from_gm(ep.graph_module) next_index = shape_env.unbacked_symint_counter shape_env.unbacked_symint_counter += 1 for symbol in bound: self.assertTrue(symbol_is_type(symbol, SymT.UNBACKED_INT)) self.assertTrue( int(str(symbol)[len(prefix_str[SymT.UNBACKED_INT]) :]) < next_index ) def test_torch_check_eq_commutativity(self): class M1(torch.nn.Module): def forward(self, x1, x2, x3, y): z1 = x1.item() z2 = x2.item() z3 = x3.item() # instead of: torch._check((z2 + z3) == z1) torch._check(z1 == (z2 + z3)) if z2 + z3 == z1: return y * 2 else: return y + 3 export( M1(), (torch.tensor(6), torch.tensor(3), torch.tensor(3), torch.randn(1)), ) class M2(torch.nn.Module): def forward(self, x1, x2, x3, y): z1 = x1.item() z2 = x2.item() z3 = x3.item() # instead of: torch._check((z2 + z3) != z1) torch._check(z1 != (z2 + z3)) if z2 + z3 == z1: return y * 2 else: return y + 3 export( M2(), (torch.tensor(6), torch.tensor(6), torch.tensor(6), torch.randn(1)), ) def test_replaced_unbacked_bindings(self): import sympy from torch.utils._sympy.symbol import prefix_str, symbol_is_type, SymT class Foo(torch.nn.Module): def forward(self, x, y, z): m, n = x.item(), y.item() torch._check(m == 4) torch._check(n == z.shape[0]) return m + n + z inps = ( torch.tensor(4), torch.tensor(5), torch.randn(5), ) dynamic_shapes = { "x": None, "y": None, "z": (Dim("dx", max=16),), } ep = export(Foo(), inps, dynamic_shapes=dynamic_shapes) # values should have no unbacked symbols, bindings should be empty for node in ep.graph.nodes: val = node.meta.get("val") bindings = node.meta.get("unbacked_bindings") self.assertTrue( not ( isinstance(val, sympy.Symbol) and symbol_is_type(val, SymT.UNBACKED_INT) ) ) self.assertTrue(bindings is None) def test_raise_user_error_when_guard_on_data_dependent_operation(self): class M(torch.nn.Module): def forward(self, x): y = x.nonzero() z = y.shape[0] if z > 2: return x.cos() else: return x.sin() with self.assertRaisesRegex( ( torchdynamo.exc.UserError, torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode, ), "Could not guard on data-dependent expression", ): _ = export(M(), (torch.tensor([2, 3, 5]),)) def test_unbacked_infer_size(self): class Foo(torch.nn.Module): def forward(self, x): u0 = x.item() t = torch.empty(u0 - 1) return t + t ep = torch.export.export(Foo(), (torch.tensor([5]),)) ep.module()(torch.tensor([5])) ep.module()(torch.tensor([1])) def test_unbacked_pad(self): class Foo(torch.nn.Module): def forward(self, xs, pad): u0, u1, u2 = xs.tolist() x = torch.ones(u0, u1, u2) pl0, pr0, pl1, pr1 = pad.tolist() return torch.nn.functional.pad(x, (pl0, pr0, pl1, pr1)) x = torch.tensor([64, 64, 64]) pad = torch.tensor([8, -8, 4, 0]) m = Foo() ep = export(m, (x, pad)) self.assertEqual(ep.module()(x, pad).shape, m(x, pad).shape) # don't guard on negative/positive pad values pad2 = torch.tensor([-5, 9, 0, 8]) self.assertEqual(ep.module()(x, pad2).shape, m(x, pad2).shape) def test_suggested_fixes_for_data_dependent_errors_basic(self): # suggested fixes for data-dependent errors only work in non-strict mode strict = False error_type = torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode # Just to introduce some indirection: N is a top-level module N that calls # module M, defined next. class N(torch.nn.Module): def __init__(self) -> None: super().__init__() self.m = M() def forward(self, t): return self.m(t) + 1 # example input t = torch.tensor([1, 4, 4], dtype=torch.int32) # We define a series of versions of M() below. Each version has # raises a data-dependent error that the next version fixes, by # copy-pasting a suggested fix in the error message. The fix is # always a torch.check() on an unresolved condition (or its negation) # on unbacked symints mentioned in the error message. # Note that the suggested fixes are in terms of local variables # near the location of error that "contain" the unbacked symints # in the unresolved condition (either directly or indirectly, e.g., # inside a list or inside the shape of a tensor). class M_v0(torch.nn.Module): def forward(self, t): items = [t[i].item() for i in range(t.numel())] r = torch.randn([items[0], items[1]]) return r.view(items[0], items[2]) M = M_v0 export(N(), (t,), strict=strict) def test_suggested_fixes_for_data_dependent_errors_puzzlers(self): # suggested fixes for data-dependent errors only work in non-strict mode strict = False error_type = torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode def retry_export(m, inp, fixes): # API that applies a series of fixes, retrying export after applying each fix, # and asserting the applied fix was suggested in the previous try. # Using this API avoids the need to define multiple versions of the same test # module, as in `test_suggested_fixes_for_data_dependent_errors_basic` above. def code(snippets): return f"[{', '.join(snippets)}]" for i in range(len(fixes)): with self.assertRaisesRegex(error_type, re.escape(fixes[i])): export(m, (*inp, code(fixes[:i])), strict=strict) export(m, (*inp, code(fixes)), strict=strict) # The following examples are lifted from @ezyang's "Data-dependent shape puzzlers" # notebook at https://www.internalfb.com/intern/anp/view/?id=5330476 # These test modules are written in a way that works well with retry_export above. # Specifically, they take an extra `fixes` argument and `eval` it at the location # that is expected to raise errors. class cf_implicitsize(torch.nn.Module): def forward(self, x, y, fixes): i = x.item() eval(fixes) # instead of y[i] return y.narrow(0, i, 1).squeeze() retry_export( cf_implicitsize(), (torch.tensor(2), torch.randn(10)), fixes=[], ) class cf_stacklist(torch.nn.Module): def forward(self, xs, y, fixes): i = y.item() eval(fixes) return torch.stack(xs, 0).narrow(0, i, 1).squeeze() retry_export( cf_stacklist(), ([torch.ones(5) * i for i in range(10)], torch.tensor(2)), fixes=[], ) class cf_tensorsplit(torch.nn.Module): def forward(self, x, offsets_t, fixes): lengths = torch.diff(offsets_t).tolist() rs = [] start = 0 for length in lengths: eval(fixes) rs.append(x.narrow(0, start, length)) start += length return rs retry_export( cf_tensorsplit(), (torch.arange(10), torch.tensor([0, 2, 5, 7, 10])), fixes=[], # nothing to fix! ) def test_simple_unbacked_view(self): if "cpp_runtime_nonstrict" in self.id(): self.skipTest("TODO Unexpected success in OSS but not in fbcode.") class Foo(torch.nn.Module): def forward(self, x): u0 = x.item() y = torch.empty(5, u0) return y.view(u0, 5) # [5, u0] -> [u0, 5] ep = export(Foo(), (torch.tensor([9]),)) self.assertEqual(ep.module()(torch.tensor([8])).size(0), 8) self.assertEqual(ep.module()(torch.tensor([5])).size(0), 5) class Foov2(torch.nn.Module): def forward(self, xs): xsl = xs.tolist() a, b = xsl x = torch.zeros(a) return x.reshape(b) xs = torch.tensor([4, 4]) ep = export(Foov2(), (xs,)) self.assertEqual(ep.module()(xs).size(0), 4) self.assertEqual(ep.module()(torch.tensor([5, 5])).size(0), 5) def test_no_suggested_fixes_for_data_dependent_errors(self): # suggested fixes for data-dependent errors only work in non-strict mode strict = False error_type = torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode class cf_stacklist(torch.nn.Module): def forward(self, xs, y): # y.item() is not a local, so we can't suggest a fix if y.item() < 0: return ( torch.stack(xs, 0).narrow(0, y.item() + xs.size(), 1).squeeze() ) else: return torch.stack(xs, 0).narrow(0, y.item(), 1).squeeze() with self.assertRaisesRegex( error_type, "Could not guard on data-dependent expression u0 < 0", ): export( cf_stacklist(), ([torch.ones(5) * i for i in range(10)], torch.tensor(2)), strict=strict, ) class Box: def __init__(self, content): self.content = content from torch.utils._pytree import register_pytree_node register_pytree_node( Box, lambda box: ([box.content], None), # flatten_fn lambda contents, _context: Box(*contents), # unflatten_fn flatten_with_keys_fn=None, # unflatten_fn serialized_type_name="test_no_suggested_fixes_for_data_dependent_errors.Box", ) class cf_stacklist_udd(torch.nn.Module): def forward(self, xs, y): box = Box(y.item()) # box.content is not a local, so we can't suggest a fix if box.content < 0: return ( torch.stack(xs, 0) .narrow(0, box.content + xs.size(), 1) .squeeze() ) else: return ( torch.stack(xs, 0) .narrow(0, box.content + xs.size(), 1) .squeeze() ) with self.assertRaisesRegex( error_type, "Could not guard on data-dependent expression u0 < 0", ): export( cf_stacklist_udd(), ([torch.ones(5) * i for i in range(10)], torch.tensor(2)), strict=strict, ) def test_tolist(self): class M(torch.nn.Module): def forward(self, x): return x.tolist() ep = export(M(), (torch.ones(3, dtype=torch.int),)) self.assertEqual(ep.module()(torch.tensor([1, 2, 3])), [1, 2, 3]) def test_if_functional(self): class Module(torch.nn.Module): def forward(self, x): z = x + 4 z.add_(4) y = z.view(x.shape) return x.cos() + y.cos() foo = Module() gm = export(foo, (torch.tensor([2, 3, 5]),)).run_decompositions({}) view_count = 0 for node in gm.graph.nodes: if node.op == "call_function" and node.target == torch.ops.aten.add_.Tensor: # No more inplace mutation self.assertNotEqual( node.target, torch.ops.aten.add_.Tensor, "There shouldn't be any inplace mutation node in the graph.", ) if ( node.op == "call_function" and node.target == torch.ops.aten.view.default ): view_count += 1 # There should be nonzero view nodes in the graph self.assertTrue(view_count > 0) def test_solver_unsupported_sympy_function(self): # repro of https://github.com/pytorch/pytorch/issues/131897 class MyModule(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y): x = torch.nn.functional.interpolate( x, scale_factor=0.5, mode="bilinear" ) x = torch.nn.functional.interpolate( x, scale_factor=2.0, mode="bilinear" ) x = x + y return x model = MyModule().eval() inputs = ( torch.rand((1, 1, 32, 32)), torch.rand((1, 1, 32, 32)), ) dim = torch.export.Dim.AUTO dynamic_shapes = {"x": {2: dim, 3: dim}, "y": {2: dim, 3: dim}} exported_program = export(model, inputs, dynamic_shapes=dynamic_shapes) self.assertEqual(exported_program.module()(*inputs), model(*inputs)) def test_export_max_onnx_reported(self): class Model(torch.nn.Module): def forward(self, x, y): s1 = max(x.shape[0], y.shape[0]) s2 = max(x.shape[1], y.shape[1]) z = torch.zeros((s1, s2), dtype=x.dtype) z[: x.shape[0], : x.shape[1]] = x z[: y.shape[0], : y.shape[1]] += y return z model = Model() x = torch.arange(6).reshape((2, 3)) y = torch.arange(6).reshape((3, 2)) * 10 DYN = torch.export.Dim.DYNAMIC ep = export( model, (x, y), dynamic_shapes=({0: DYN, 1: DYN}, {0: DYN, 1: DYN}), strict=True, ) self.assertTrue(torch.allclose(ep.module()(x, y), model(x, y))) x2 = torch.arange(4).reshape((2, 2)) y2 = torch.arange(9).reshape((3, 3)) with self.assertRaisesRegex( AssertionError, ( escape("Guard failed: max(x.size()[1], y.size()[1]) == x.size()[1]") if is_retracebility_test(self._testMethodName) else escape( "Guard failed: max(1, x.size()[1], y.size()[1]) == x.size()[1]" ) ), ): # TODO: this should not error? self.assertTrue(torch.allclose(ep.module()(x2, y2), model(x2, y2))) def test_export_max_nonstrict(self): class FooMax(torch.nn.Module): def forward(self, x): return torch.ones(max(x.item(), 1024)) ep_non_strict_foo_max_symint = export( FooMax(), (torch.tensor(4),), strict=False ).graph FileCheck().check_count("torch.sym_max", count=1, exactly=True).run( str(ep_non_strict_foo_max_symint) ) class FooMaxTensors(torch.nn.Module): def forward(self, x): return torch.ones(max(x, x)) + torch.ones(min(x, x)) ep_non_strict_foo_max_symint = export( FooMaxTensors(), (torch.tensor(4),), strict=False ).graph FileCheck().check_count( "torch.ops.aten.maximum.default", count=1, exactly=True ).run(str(ep_non_strict_foo_max_symint)) FileCheck().check_count( "torch.ops.aten.minimum.default", count=1, exactly=True ).run(str(ep_non_strict_foo_max_symint)) class FooMaxTensorsIter(torch.nn.Module): def forward(self, x): return max([x, x]) + min([x, x]) + max(x, 5) + min(x, 3) ep_non_strict_foo_max_symint = export( FooMaxTensorsIter(), (torch.tensor(4),), strict=False ).graph FileCheck().check_count( "torch.ops.aten.maximum.default", count=1, exactly=True ).run(str(ep_non_strict_foo_max_symint)) FileCheck().check_count( "torch.ops.aten.minimum.default", count=1, exactly=True ).run(str(ep_non_strict_foo_max_symint)) FileCheck().check_count( "torch.ops.aten.clamp.default", count=2, exactly=True ).run(str(ep_non_strict_foo_max_symint)) class FooMaxTensorsSymInt(torch.nn.Module): def forward(self, x, y): return max([x.shape[0], y.shape[0], x.shape[0]]) + min( [x.shape[0], y.shape[0], x.shape[0]] ) dynamic_shapes = { "x": {0: torch.export.Dim.AUTO}, "y": {0: torch.export.Dim.AUTO}, } ep_non_strict_foo_max_symint = export( FooMaxTensorsSymInt(), (torch.randn(4, 4), torch.randn(4, 4)), dynamic_shapes=dynamic_shapes, strict=False, ).graph FileCheck().check_count("torch.sym_max", count=1, exactly=True).run( str(ep_non_strict_foo_max_symint) ) FileCheck().check_count("torch.sym_min", count=1, exactly=True).run( str(ep_non_strict_foo_max_symint) ) class FooMaxTensorsSymShape(torch.nn.Module): def forward(self, x): return max(x, x.shape[0]) dynamic_shapes = { "x": {0: torch.export.Dim.AUTO}, } with self.assertRaisesRegex( RuntimeError, "Dynamo failed to run FX node with fake tensors" ): _ = export( FooMaxTensorsSymShape(), (torch.randn(4, 4),), dynamic_shapes=dynamic_shapes, strict=True, ).graph with self.assertRaisesRegex( RuntimeError, "Boolean value of Tensor with more than one value is ambiguous", ): _t = export( FooMaxTensorsSymShape(), (torch.randn(4, 4),), dynamic_shapes=dynamic_shapes, strict=False, ).graph def test_math_pow(self): class M(torch.nn.Module): def forward(self, x, y): b = x.item() p = min(b, 10) p = math.pow(p, 10) return y * p ep = export(M(), (torch.tensor(5), torch.randn(5)), strict=False) FileCheck().check_count("torch.sym_min", count=1, exactly=True).run( str(ep.graph) ) FileCheck().check_count("operator.pow", count=1, exactly=True).run( str(ep.graph) ) def test_export_mod_constraints(self): class BasicDynamiShapeModel(torch.nn.Module): def forward(self, x: torch.Tensor) -> torch.Tensor: return x.view(x.shape[0] - 1, -1) m = BasicDynamiShapeModel() a = torch.randn(3, 4) dim0_x = torch.export.Dim("dim0_x", min=3) dim1_x = torch.export.Dim("dim1_x", max=8000) dynamic_shapes = {"x": (dim0_x, dim1_x)} em = torch.export.export( m, (a,), dynamic_shapes=dynamic_shapes, prefer_deferred_runtime_asserts_over_guards=True, ) em.module()(torch.randn(4, 3)) with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for expression Eq\(Mod\(s27\*s77, s77 \- 1\), 0\)", ): em.module()(torch.randn(4, 5)) dim0_x = None dim1_x = 2 * torch.export.Dim("_dim1_x", max=4000) dynamic_shapes = {"x": (dim0_x, dim1_x)} em = torch.export.export(m, (a,), dynamic_shapes=dynamic_shapes) x = torch.randn(3, 5) with self.assertRaisesRegex( AssertionError, escape("Guard failed: 3 * x.size()[1] % 2 == 0"), ): # expected 2*..., but got 5 em.module()(x) def test_dont_duck_size_for_auto_dynamic(self): AUTO, STATIC = Dim.AUTO, Dim.STATIC class Foo(torch.nn.Module): def forward(self, x, y): # x: [s0, s1], y: [s0 + 1, 4] assert y.shape[1] == 4 assert x.shape[0] == y.shape[0] - 1 return x * 2, y * 2 # duck sizing would make all static based on these sample inputs inputs = (torch.randn(4, 4), torch.randn(5, 4)) shapes = { "x": (AUTO, AUTO), "y": (AUTO, AUTO), } ep = export(Foo(), inputs, dynamic_shapes=shapes) ep.module()(torch.randn(6, 3), torch.randn(7, 4)) def test_map(self): if "cpp_runtime_nonstrict" in self.id(): self.skipTest("TODO Unexpected success in OSS but not in fbcode.") class Module(torch.nn.Module): def forward(self, xs, y, z): def body(x, y, z): return x + y + z return map(body, xs, y, z) list_tensor_map = Module() inps = (torch.ones(6, 4), torch.tensor(5), torch.tensor(4)) self._test_export_same_as_eager(list_tensor_map, inps) @unittest.expectedFailure def test_crop_like(self): # https://fb.workplace.com/groups/1405155842844877/posts/8195050017188725/ # Minimal crop code copied from https://github.com/pytorch/vision/blob/main/torchvision/transforms/v2/functional class CropLike(torch.nn.Module): def forward(self, image, crop_height, crop_width): c, image_height, image_width = image.shape crop_top = int(round((image_height - crop_height) / 2.0)) crop_left = int(round((image_width - crop_width) / 2.0)) return image[ ..., crop_top : crop_top + crop_height, crop_left : crop_left + crop_width, ] crop = CropLike() imagew = Dim("width") imageh = Dim("height") dynamic_dims = { "image": {0: None, 1: imageh, 2: imagew}, "crop_height": None, "crop_width": None, } args = (torch.rand(3, 512, 512), 150, 150) ecrop = export(crop, args=args, dynamic_shapes=dynamic_dims) args = (torch.rand(3, 700, 700), 150, 150) self.assertEqual(ecrop.module()(*args), ecrop(*args)) def test_dim_dynamic_divisibility(self): class M(torch.nn.Module): def forward(self, x): if x.size(0) % 2 == 0: return x.clone() * 2 else: return x.clone() * 0 input1 = (torch.randn(4),) model = M() dynamic_shapes = { "x": {0: torch.export.Dim.DYNAMIC}, } export(model, input1, dynamic_shapes=dynamic_shapes) def test_export_func_with_kwargs(self): class Module(torch.nn.Module): def forward(self, arg1, arg2, kw1, kw2): return arg1 + arg2, kw1 + kw2 kw_func = Module() args = (torch.ones(6, 4), torch.ones(1, 1)) kwargs = {"kw1": torch.ones(1, 1), "kw2": torch.ones(6, 4)} self._test_export_same_as_eager(kw_func, args, kwargs) def test_export_func_with_pytree_kwargs(self): class Module(torch.nn.Module): def forward(self, arg1, arg2, a, b): return arg1 + a["kw1"] + b[0], arg2 + a["kw2"] + b[1] kw_func = Module() args = (torch.ones(2, 3), torch.ones(3, 4)) kwargs = { "a": {"kw1": torch.ones(2, 3), "kw2": torch.ones(3, 4)}, "b": [torch.ones(2, 3), torch.ones(3, 4)], } self._test_export_same_as_eager(kw_func, args, kwargs) def test_export_func_with_default_kwargs(self): class Module(torch.nn.Module): def forward(self, arg1, arg2, a, b=1): return arg1 + arg2, a["kw1"] + a["kw2"] + b kw_func = Module() class Module2(torch.nn.Module): def forward(self, arg1, arg2, a=1, b=2): return arg1 + a, arg2 + b kw_func2 = Module2() args = (torch.ones(6, 4), torch.ones(1, 1)) kwargs1 = {"a": {"kw1": torch.ones(1, 1), "kw2": torch.ones(6, 4)}} kwargs2 = {"a": {"kw1": torch.ones(1, 1), "kw2": torch.ones(6, 4)}, "b": 2} self._test_export_same_as_eager(kw_func, args, kwargs1) self._test_export_same_as_eager(kw_func, args, kwargs2) kwargs3 = {"b": 1} self._test_export_same_as_eager(kw_func2, args, kwargs3) def test_kwargs_reorder(self): class M(torch.nn.Module): def forward(self, *, x, y, z): return x + y + z ep = export( M(), (), {"z": torch.ones(3), "y": torch.ones(3), "x": torch.ones(3)} ) ep.module()(**{"z": torch.ones(3), "y": torch.ones(3), "x": torch.ones(3)}) ep.module()(z=torch.ones(3), y=torch.ones(3), x=torch.ones(3)) ep.module()(x=torch.ones(3), z=torch.ones(3), y=torch.ones(3)) def test_set_example_inputs(self): class M(torch.nn.Module): def forward(self, a, *, x, y, z): return a, x + y + z inp = ( (torch.ones(3),), {"z": torch.ones(3), "y": torch.ones(3), "x": torch.ones(3)}, ) ep = export(M(), inp[0], inp[1]) ep.module()(*ep.example_inputs[0], **ep.example_inputs[1]) ep.example_inputs = ( (torch.ones(3),), {"x": torch.ones(3), "z": torch.ones(3), "y": torch.ones(3)}, ) ep.module()(*ep.example_inputs[0], **ep.example_inputs[1]) with self.assertRaisesRegex(ValueError, "Example inputs should be a tuple"): ep.example_inputs = (torch.ones(3),) with self.assertRaisesRegex(ValueError, "Ran into a kwarg keyword mismatch"): ep.example_inputs = ((torch.ones(3),), {}) with self.assertRaisesRegex(ValueError, "Trying to flatten user inputs"): ep.example_inputs = ( (), {"x": torch.ones(3), "z": torch.ones(3), "y": torch.ones(3)}, ) def test_export_func_with_var_postional_args(self): class Module(torch.nn.Module): def forward(self, arg1, arg2, *args): return arg1 + args[0], arg2 + args[1] kw_func = Module() args = (torch.ones(2, 3), torch.ones(3, 4), torch.ones(2, 3), torch.ones(3, 4)) self._test_export_same_as_eager(kw_func, args) @testing.expectedFailureCppRuntime def test_export_module(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() self.p1 = torch.nn.Parameter(torch.ones(3, 4)) self.p2 = torch.nn.Parameter( CustomTensorPlainOut( torch.ones(3, 4), torch.ones(3, 4), ) ) def forward(self, x): a = (2 * self.p1 + self.p2).sum() return x + a model = Foo() example_inputs = (torch.randn(3, 4),) ep = export(model, example_inputs, strict=False) before = list(ep.state_dict.keys()) ep.run_decompositions() after = list(ep.state_dict.keys()) self.assertEqual(before, after) def test_export_func_with_keyword_only_args(self): class Module(torch.nn.Module): def forward(self, arg1, arg2, *args, kw1, kw2): return arg1 + args[0] + kw1, arg2 + args[1] + kw2 kw_func = Module() args = (torch.ones(2, 3), torch.ones(3, 4), torch.ones(2, 3), torch.ones(3, 4)) kwargs = {"kw1": torch.ones(2, 3), "kw2": torch.ones(3, 4)} self._test_export_same_as_eager(kw_func, args, kwargs) def test_export_func_with_var_keyword_args(self): class Module(torch.nn.Module): def forward(self, arg1, arg2, *args, kw1, kw2, **kwargs): return ( arg1 + args[0] + kw1 + kwargs["kw3"], arg2 + args[1] + kw2 + kwargs["kw4"], ) kw_func = Module() args = (torch.ones(2, 3), torch.ones(3, 4), torch.ones(2, 3), torch.ones(3, 4)) kwargs = { "kw1": torch.ones(2, 3), "kw2": torch.ones(3, 4), "kw3": torch.ones(2, 3), "kw4": torch.ones(3, 4), } self._test_export_same_as_eager(kw_func, args, kwargs) def test_unbacked_stack(self): class M(torch.nn.Module): def forward(self, x): nz = torch.nonzero(x) nz_size = nz.size(0) torch._check(nz_size % 4 == 0) # Create two tensors whose leading dimensions are equivalent at # runtime but expressed via different SymInt formulas. first = torch.zeros((nz_size // 2, 4)) second = torch.zeros(((nz_size // 4) * 2, 4)) return torch.stack([first, second], dim=0) inputs = (torch.ones((32,)),) ep = export(M(), inputs) orig_res = M()(*inputs) ep_res = ep.module()(*inputs) self.assertTrue(torch.allclose(orig_res, ep_res)) def test_unbacked_slice_simple(self): class M(torch.nn.Module): def forward(self, scores, score_thr, topk: torch.Tensor, results=None): valid_mask = scores > score_thr scores = scores[valid_mask] valid_idxs = torch.nonzero(valid_mask).to(scores.device) num_topk = torch.minimum(topk, torch.tensor(valid_idxs.shape[0])).item() scores, idxs = scores.sort(descending=True) scores = scores[:num_topk] topk_idxs = valid_idxs[idxs[:num_topk]] keep_idxs, labels = topk_idxs.unbind(dim=1) return scores, labels, keep_idxs score = torch.tensor( [[0.1, 0.3, 0.2], [0.12, 0.7, 0.9], [0.02, 0.8, 0.08], [0.4, 0.1, 0.08]] ) bbox_pred = torch.tensor([[0.2, 0.3], [0.4, 0.7], [0.1, 0.1], [0.5, 0.1]]) score_thr = 0.15 nms_pre = torch.tensor(4) inputs = (score, score_thr, nms_pre, dict(bbox_pred=bbox_pred)) ep = export(M(), inputs) orig_res = M()(*inputs) ep_res = ep.module()(*inputs) self.assertTrue(torch.allclose(orig_res[0], ep_res[0])) self.assertTrue(torch.allclose(orig_res[1], ep_res[1])) self.assertTrue(torch.allclose(orig_res[2], ep_res[2])) def test_multidimensional_slicing(self): class M(torch.nn.Module): def forward(self, x, y): b = x.item() torch._check(b >= 0) torch._check(b < y.shape[0]) return y[0, b] if is_non_strict_test(self._testMethodName): m = M() inp = (torch.tensor(4), torch.ones(10, 10)) r = m(*inp) epm = export(m, inp).module() er = epm(*inp) self.assertTrue(torch.allclose(er, r)) @testing.expectedFailureSerDerNonStrict @testing.expectedFailureCppRuntimeNonStrict def test_more_multidimensional_slicing(self): # Inputs: a 3d tensor t and a 1d tensor x of indices into t # Output: a 3-tuple of indices @torch.library.custom_op("demo::indices3d", mutates_args=()) def indices3d(t: torch.Tensor, x: torch.Tensor) -> tuple[int, int, int]: assert t.ndim == 3 assert x.ndim == 1 and x.shape[0] == 3 return tuple(x[i].item() for i in range(3)) # The meta-kernel for this op constrains the indices in x # to be within bounds of t via torch._checks. @torch.library.register_fake("demo::indices3d") def _(t, x): assert t.ndim == 3 assert x.ndim == 1 and x.shape[0] == 3 sizes = tuple(torch.library.get_ctx().new_dynamic_size() for i in range(3)) for i, size in enumerate(sizes): torch._check(size >= 0) torch._check(size <= t.shape[i]) return sizes # example inputs t = torch.randn([4, 5, 6]) x = torch.tensor([2, 3, 4]) def test(m, g, debug=False): # Dynamo does not yet support some cases of indexing tested here, # so don't export in strict mode. if is_non_strict_test(self._testMethodName): em = export(m, (t, x)).module() if debug: print(em) self.assertTrue(torch.allclose(m(t, x), g(t, x))) self.assertTrue(torch.allclose(em(t, x), m(t, x))) # In the following series of test cases, M_* corresponds to indexing code # that a user might write, and G_* corresponds to equivalent code that # export might generate by rewriting the indexing in terms of a sequence # of lower-level ops. # indexing with ints class M_ints(torch.nn.Module): def forward(self, t, x): i, j, k = indices3d(t, x) return t[i, j, k] class G_ints(torch.nn.Module): def forward(self, t, x): i, j, k = indices3d(t, x) a = torch.select(t, 0, i) b = torch.select(a, 0, j) c = torch.select(b, 0, k) return c test(M_ints(), G_ints()) # indexing with slices class M_slices(torch.nn.Module): def forward(self, t, x): i, j, k = indices3d(t, x) return t[:i, :j, :k] class G_slices(torch.nn.Module): def forward(self, t, x): i, j, k = indices3d(t, x) a = torch.narrow(t, 0, 0, i) b = torch.narrow(a, 1, 0, j) c = torch.narrow(b, 2, 0, k) return c test(M_slices(), G_slices()) # indexing with ints and slices class M_ints_slices(torch.nn.Module): def forward(self, t, x): i, j, k = indices3d(t, x) return t[:i, j, :k] class G_ints_slices(torch.nn.Module): def forward(self, t, x): i, j, k = indices3d(t, x) a = torch.narrow(t, 0, 0, i) b = torch.select(a, 1, j) c = torch.narrow(b, 1, 0, k) return c test(M_ints_slices(), G_ints_slices()) # indexing with ints and None class M_ints_None(torch.nn.Module): def forward(self, t, x): i, j, k = indices3d(t, x) return t[None, i, None] class G_ints_None(torch.nn.Module): def forward(self, t, x): i, j, k = indices3d(t, x) a = torch.unsqueeze(t, 0) b = torch.select(a, 1, i) c = torch.unsqueeze(b, 1) return c test(M_ints_None(), G_ints_None()) # indexing with slices and None class M_slices_None(torch.nn.Module): def forward(self, t, x): i, j, k = indices3d(t, x) return t[:i, None, :j, None, None, :k] class G_slices_None(torch.nn.Module): def forward(self, t, x): i, j, k = indices3d(t, x) a = torch.narrow(t, 0, 0, i) b = torch.unsqueeze(a, 1) c = torch.narrow(b, 2, 0, j) d = torch.unsqueeze(c, 3) e = torch.unsqueeze(d, 4) f = torch.narrow(e, 5, 0, k) return f test(M_slices_None(), G_slices_None()) # indexing with None, Ellipsis, and int class M_None_Ellipsis_int(torch.nn.Module): def forward(self, t, x): i, j, k = indices3d(t, x) return t[None, ..., None, j] class G_None_Ellipsis_int(torch.nn.Module): def forward(self, t, x): i, j, k = indices3d(t, x) a = torch.unsqueeze(t, 0) b = torch.unsqueeze(a, 3) c = torch.select(b, 4, j) return c test(M_None_Ellipsis_int(), G_None_Ellipsis_int()) # indexing with slice, None, Ellipsis, and int class M_slice_None_Ellipsis_int(torch.nn.Module): def forward(self, t, x): i, j, k = indices3d(t, x) return t[:i, None, ..., None, j] class G_slice_None_Ellipsis_int(torch.nn.Module): def forward(self, t, x): i, j, k = indices3d(t, x) a = torch.narrow(t, 0, 0, i) b = torch.unsqueeze(a, 1) c = torch.unsqueeze(b, 3) d = torch.select(c, 4, j) return d test(M_slice_None_Ellipsis_int(), G_slice_None_Ellipsis_int()) def test_sequential_slicing(self): # See https://github.com/pytorch/pytorch/issues/137455 class TestModule1(torch.nn.Module): def __init__(self) -> None: super().__init__() self.seq = torch.nn.Sequential( torch.nn.Linear(4, 4), torch.nn.Linear(4, 4), torch.nn.Linear(4, 4), ) def forward(self, x: torch.Tensor) -> torch.Tensor: # seq_last as local variable works seq_last = self.seq[1:] return seq_last(x) class TestModule2(torch.nn.Module): def __init__(self) -> None: super().__init__() self.seq = torch.nn.Sequential( torch.nn.Linear(4, 4), torch.nn.Linear(4, 4), torch.nn.Linear(4, 4), ) # seq_last as initialized submodule works self.seq_last = self.seq[1:] def forward(self, x: torch.Tensor) -> torch.Tensor: return self.seq_last(x) inp = (torch.randn(4, 4),) for mod in [TestModule1(), TestModule2()]: epm = export(mod, inp).module() self.assertTrue(torch.allclose(epm(*inp), mod(*inp))) def test_unflatten_isinstance(self): class N(torch.nn.Module): def forward(self, x, b): if b: return x + 1 else: return x + 2 class M(torch.nn.Module): def __init__(self): super().__init__() self.n = N() def forward(self, x): return self.n(x + 1, True) + self.n(x + 1, False) x = torch.zeros(4) types = {"n": N} ep = export( M(), (x,), preserve_module_call_signature=tuple(types.keys()), ) ufm = torch.export.unflatten(ep) self.assertTrue(torch.allclose(ufm(x), x + 5)) for fqn, mod in ufm.named_modules(remove_duplicate=False): if cls := types.get(fqn): ty = f"{cls.__module__}.{cls.__qualname__}" self.assertTrue(ty, mod.type_name()) def test_unflatten_asserts(self): # TODO: strict-export fails class M1(torch.nn.Module): def forward(self, x, y): b = x.item() torch._check(b >= 0) torch._check(b < y.size(0)) return y[:b] class M3(torch.nn.Module): def forward(self, x, y): b = x.item() torch._check(b >= 0) torch._check(b < y.size(0) * 2) return y[:b] class M2(torch.nn.Module): def __init__(self) -> None: super().__init__() self.m1 = M1() self.m3 = M3() def forward(self, x, y): return self.m1(x, y) + self.m3(x, y) inputs = (torch.tensor(3), torch.randn(10)) ep = torch.export.export( M2(), inputs, dynamic_shapes={"x": None, "y": (Dim("moo"),)}, strict=False ) orig_res = M2()(*inputs) ep_res = ep.module()(*inputs) self.assertTrue(torch.allclose(orig_res[0], ep_res[0])) self.assertTrue(torch.allclose(orig_res[1], ep_res[1])) self.assertTrue(torch.allclose(orig_res[2], ep_res[2])) unflattened = torch.export.unflatten(ep) ep_res = unflattened(*inputs) self.assertTrue(torch.allclose(orig_res[0], ep_res[0])) self.assertTrue(torch.allclose(orig_res[1], ep_res[1])) self.assertTrue(torch.allclose(orig_res[2], ep_res[2])) def test_unflatten_placeholder_update_child2parent_swap(self): class Child(torch.nn.Module): def forward(self, x): torch.ops.aten.dropout_(x, 0.5, False) # Applying dropout inplace return x - 2 class Foo(torch.nn.Module): def __init__(self): super().__init__() self.child = Child() def forward(self, x): f1 = self.child(x) f2 = x * 4 return f1 + f2 m = Foo() inp = torch.ones(3, 10, dtype=torch.float32) orig_result = m(inp) if not is_retracebility_test(self._testMethodName): inp = torch.ones(3, 10, dtype=torch.float32) ep = export(m, (inp,), preserve_module_call_signature=("child",)) unf = unflatten(ep) unf.print_readable() inp = torch.ones(3, 10, dtype=torch.float32) ep_result = ep.module()(inp) self.assertTrue(torch.allclose(ep_result, orig_result)) unf.set_submodule("child", m.child) inp = torch.ones(3, 10, dtype=torch.float32) unf_result = unf(inp) self.assertTrue(torch.allclose(unf_result, orig_result)) def test_unflatten_placeholder_update_grandchild2cousin_swap(self): class Grandchild(torch.nn.Module): def forward(self, x): a = x.to(torch.float32) # .to is considered a mutation return x + 4, a class Child(torch.nn.Module): def __init__(self): super().__init__() self.grandchild = Grandchild() def forward(self, x): y, a = self.grandchild(x) return y + a class OtherGrandchild(torch.nn.Module): def forward(self, x): return x * 2 class OtherChild(torch.nn.Module): def __init__(self): super().__init__() self.other_grandchild = OtherGrandchild() def forward(self, x): return x + self.other_grandchild(x) class Foo(torch.nn.Module): def __init__(self): super().__init__() self.child = Child() self.other_child = OtherChild() def forward(self, x): f1 = self.child(x) f2 = self.other_child(x) return f1 + f2 inp = torch.ones(2, 3, dtype=torch.float32) orig_result = Foo()(inp) self.assertTrue(torch.allclose(orig_result, torch.ones(2, 3) * 9)) if not is_retracebility_test(self._testMethodName): inp = torch.ones(2, 3, dtype=torch.float32) ep = export(Foo(), (inp,), preserve_module_call_signature=("child",)) unf = unflatten(ep) inp = torch.ones(2, 3, dtype=torch.float32) ep_result = ep.module()(inp) self.assertTrue(torch.allclose(ep_result, orig_result)) unf.set_submodule("child", Child()) inp = torch.ones(2, 3, dtype=torch.float32) unf_result = unf(inp) self.assertTrue(torch.allclose(unf_result, orig_result)) def test_unflatten_buffer_update_child2parent_swap(self): class Child(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.tensor(10)) def forward(self, x): self.buf.add_(1) return x + 2 class Foo(torch.nn.Module): def __init__(self): super().__init__() self.child = Child() def forward(self, x): y = self.child(x) # child.buf <- 10 + 1 = 11, x + 2 = 3 x = y + self.child.buf # 14 y = self.child(x) # child.buf <- 11 + 1 = 12, x + 2 = 16 x = y + self.child.buf # 28 y = self.child(x) # child.buf <- 12 + 1 = 13, x + 2 = 30 x = y + self.child.buf # 43 return x inp = torch.ones(2, 3, dtype=torch.float32) orig_result = Foo()(inp) self.assertTrue(torch.allclose(orig_result, torch.ones(2, 3) * 43)) if not is_retracebility_test(self._testMethodName): inp = torch.ones(2, 3, dtype=torch.float32) ep = export(Foo(), (inp,), preserve_module_call_signature=("child",)) unf = unflatten(ep) inp = torch.ones(2, 3, dtype=torch.float32) ep_result = ep.module()(inp) self.assertTrue(torch.allclose(ep_result, orig_result)) unf.set_submodule("child", Child()) inp = torch.ones(2, 3, dtype=torch.float32) unf_result = unf(inp) self.assertTrue(torch.allclose(unf_result, orig_result)) def test_export_func_with_var_keyword_pytree_args(self): class Module(torch.nn.Module): def forward(self, arg1, arg2, *args, kw1, kw2, **kwargs): return ( arg1 + arg2[0][0] + args[0] + kw1[0] + kwargs["kw3"][0], arg2[1] + args[1] + kw2 + kwargs["kw4"], ) kw_func = Module() args = ( torch.ones(2, 3), [(torch.ones(2, 3),), torch.ones(3, 4)], torch.ones(2, 3), torch.ones(3, 4), ) kwargs = { "kw1": (torch.ones(2, 3),), "kw2": torch.ones(3, 4), "kw3": (torch.ones(2, 3), torch.ones(3, 4)), "kw4": torch.ones(3, 4), } self._test_export_same_as_eager(kw_func, args, kwargs) @testing.expectedFailureSerDer # we don't save placeholder metadata @testing.expectedFailureCppSerDes # we don't save placeholder metadata @testing.expectedFailureSerDerNonStrict @testing.expectedFailureStrictV2 def test_linear_conv(self): strict = True class MyLinear(torch.nn.Module): def __init__(self) -> None: super().__init__() self.weight = torch.randn(20, 98) self.bias = torch.randn(20) def forward(self, x): return torch.nn.functional.linear(x, self.weight, self.bias) class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(16, 33, 3) self.linear = MyLinear() def forward(self, x): x_conv = self.conv(x) x_linear = self.linear(x_conv) return x_linear.cos() ep = export(Foo(), (torch.randn(20, 16, 50, 100),), strict=strict) for node in ep.graph.nodes: if ( node.op == "placeholder" and node.name in ep.graph_signature.inputs_to_buffers or node.name in ep.graph_signature.inputs_to_parameters ): self.assertTrue("source_fn_stack" in node.meta) def test_dynamic_shapes_dataclass(self): torch.export.register_dataclass( Inp2, serialized_type_name="test_export_api_with_dynamic_shapes.Inp2", ) class Foo(torch.nn.Module): def forward(self, inputs): return torch.matmul(inputs.a, inputs.b) foo = Foo() inputs = (Inp2(a=torch.randn(10, 2, 3), b=torch.randn(10, 3, 4)),) batch = Dim("batch") efoo = export( foo, inputs, dynamic_shapes={"inputs": [{0: batch}, {0: batch}]}, ) self.assertEqual( [ # First dimension varies across strict and non-strict # since the source names are different, resulting in # different symbol names. str(node.meta["val"].shape[1:]) for node in efoo.graph_module.graph.nodes if node.op == "placeholder" ], ["torch.Size([2, 3])", "torch.Size([3, 4])"], ) @testing.expectedFailureCppSerDes def test_export_method(self): from torch._export.utils import sync_state, wrap_method strict = True class M(torch.nn.Module): def __init__(self): super().__init__() self.t = torch.nn.Buffer(torch.tensor(10)) def forward(self, x): return self.foo(x) * self.bar(x) def foo(self, x): self.t.mul_(2) return x + self.t def bar(self, x): return x - self.t # exporting... em = M() ex = torch.randn(4) # ...foo epm_foo = export( wrap_method(em.foo), (ex,), dynamic_shapes={"x": (Dim.DYNAMIC,)}, strict=strict, ).module() # ...bar epm_bar = export( wrap_method(em.bar), (ex,), dynamic_shapes=((Dim.DYNAMIC,),), strict=strict, ).module() if is_serdes_test(self._testMethodName): sync_state(epm_foo, epm_bar) # running... m = M() rx = torch.randn(5) self.assertTrue(torch.allclose(m.t, epm_foo.t)) self.assertTrue(torch.allclose(m.t, epm_bar.t)) # ...foo self.assertTrue(torch.allclose(epm_foo(rx), m.foo(rx))) self.assertTrue(torch.allclose(m.t, epm_foo.t)) self.assertTrue(torch.allclose(m.t, epm_bar.t)) # ...bar self.assertTrue(torch.allclose(epm_bar(rx), m.bar(rx))) self.assertTrue(torch.allclose(m.t, epm_foo.t)) self.assertTrue(torch.allclose(m.t, epm_bar.t)) def test_export_api_with_dynamic_shapes(self): from torch.export import Dim, dims # pass dynamic shapes of inputs [args] class Foo(torch.nn.Module): def forward(self, x, y): return torch.matmul(x, y) foo = Foo() inputs = (torch.randn(10, 2, 3), torch.randn(10, 3, 4)) batch = Dim("batch") efoo = export( foo, inputs, dynamic_shapes={k: {0: batch} for k in ["x", "y"]}, ) self.assertEqual(efoo.module()(*inputs).shape, foo(*inputs).shape) foo = Foo() inputs = (torch.randn(10, 2, 3),) kwinputs = {"y": torch.randn(10, 3, 4)} batch = Dim("batch") efoo = export( foo, inputs, kwinputs, dynamic_shapes={k: {0: batch} for k in ["x", "y"]} ) self.assertEqual( efoo.module()(*inputs, **kwinputs).shape, foo(*inputs, **kwinputs).shape ) # pass dynamic shapes of inputs [partial, error] foo = Foo() inputs = (torch.randn(10, 2, 3),) kwinputs = {"y": torch.randn(10, 3, 4)} batch = Dim("batch") with self.assertRaisesRegex( torch._dynamo.exc.UserError, ( "You marked.*but your code specialized it to be a constant.*" "If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO(.*\n)*.*" "Suggested fixes:(.*\n)*.*" "batch = 10" ), ): export( foo, inputs, kwinputs, dynamic_shapes={"x": {0: batch}, "y": None}, ) # pass dynamic shapes of inputs [module] foo = Foo() inputs = (torch.randn(10, 2, 3), torch.randn(10, 3, 4)) batch = Dim("batch") efoo = export( foo, inputs, dynamic_shapes={"x": {0: batch}, "y": {0: batch}}, ) self.assertEqual(efoo.module()(*inputs).shape, foo(*inputs).shape) # pass dynamic shapes of inputs [bounds, mostly shared] foo = Foo() inputs = (torch.randn(10, 3, 3), torch.randn(10, 3, 3)) batch = Dim("batch", min=8, max=64) size = Dim("size") efoo = export( foo, inputs, dynamic_shapes={ "x": (batch, size, size), "y": (batch, size, size), }, ) for node in efoo.graph_module.graph.nodes: if node.op == "placeholder": self.assertEqual(node.meta["val"].shape[1], node.meta["val"].shape[2]) self.assertEqual(efoo.module()(*inputs).shape, foo(*inputs).shape) # pass dynamic shapes of inputs [multiple, mostly distinct] inputs = (torch.randn(10, 2, 3), torch.randn(10, 3, 4)) batch, M, K, N = dims("batch", "M", "K", "N") efoo = export( Foo(), inputs, dynamic_shapes={"x": (batch, M, K), "y": (batch, K, N)}, ) placeholders = [ node.meta["val"].shape for node in efoo.graph_module.graph.nodes if node.op == "placeholder" ] self.assertEqual( placeholders[0][2], placeholders[1][1], ) self.assertEqual(efoo.module()(*inputs).shape, foo(*inputs).shape) # pass dynamic shapes of inputs [dict] class Foo(torch.nn.Module): def forward(self, inputs): return torch.matmul(inputs["x"], inputs["y"]) foo = Foo() inputs = ({"x": torch.randn(10, 2, 3), "y": torch.randn(10, 3, 4)},) batch = Dim("batch") efoo = export( foo, inputs, dynamic_shapes={"inputs": {k: {0: batch} for k in ["x", "y"]}} ) self.assertEqual( [ # First dimension varies across strict and non-strict # since the source names are different, resulting in # different symbol names. str(node.meta["val"].shape[1:]) for node in efoo.graph_module.graph.nodes if node.op == "placeholder" ], ["torch.Size([2, 3])", "torch.Size([3, 4])"], ) self.assertEqual(efoo.module()(*inputs).shape, foo(*inputs).shape) # pass dynamic shapes of inputs [list] class Foo(torch.nn.Module): def forward(self, inputs): return torch.matmul(inputs[0], inputs[1]) foo = Foo() inputs = ([torch.randn(10, 2, 3), torch.randn(10, 3, 4)],) batch = Dim("batch") efoo = export( foo, inputs, dynamic_shapes={"inputs": [{0: batch} for _ in range(2)]} ) self.assertEqual( [ # First dimension varies across strict and non-strict # since the source names are different, resulting in # different symbol names. str(node.meta["val"].shape[1:]) for node in efoo.graph_module.graph.nodes if node.op == "placeholder" ], ["torch.Size([2, 3])", "torch.Size([3, 4])"], ) self.assertEqual(efoo.module()(*inputs).shape, foo(*inputs).shape) # pass dynamic shapes of inputs [pytree-registered classes] if HAS_TORCHREC: # skipping tests if torchrec not available class Foo(torch.nn.Module): def forward(self, kjt) -> torch.Tensor: return kjt.values() + 0, kjt.offsets() + 0 foo = Foo() kjt = KeyedJaggedTensor( values=torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]), keys=["index_0", "index_1"], lengths=torch.IntTensor([0, 2, 0, 1, 1, 1, 0, 3]), offsets=torch.IntTensor([0, 0, 2, 2, 3, 4, 5, 5, 8]), ) inputs = (kjt,) dim = Dim("dim") dim_plus_one = Dim("dim_plus_one") efoo = torch.export.export( foo, inputs, dynamic_shapes={ "kjt": [{0: dim}, None, {0: dim}, {0: dim_plus_one}, None, None] }, ) self.assertEqual( [out.shape for out in efoo.module()(*inputs)], [out.shape for out in foo(*inputs)], ) # pass dynamic shapes of inputs [distinct, error] class Foo(torch.nn.Module): def forward(self, x, y): return torch.matmul(x, y) foo = Foo() inputs = (torch.randn(10, 2, 3), torch.randn(10, 3, 4)) batch, M, K1, K2, N = dims("batch", "M", "K1", "K2", "N") with self.assertRaisesRegex( torch._dynamo.exc.UserError, ( "Constraints violated \\(K2\\)!(.*\n)*.*" "K2.*and.*K1.*must always be equal(.*\n)*.*" "Suggested fixes:(.*\n)*.*" "K2 = K1" ), ): export( foo, inputs, dynamic_shapes={"x": (batch, M, K1), "y": (batch, K2, N)}, ) # pass dynamic shapes of inputs [specialized, error] foo = Foo() inputs = (torch.randn(10, 2, 3), torch.randn(10, 3, 4)) batch, M, K1, N = dims("batch", "M", "K1", "N") with self.assertRaisesRegex( torch._dynamo.exc.UserError, ( "You marked.*but your code specialized it to be a constant.*" "If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO(.*\n)*" "Suggested fixes:(.*\n)*.*" "K1 = 3" ), ): export( foo, inputs, dynamic_shapes={"x": (batch, M, K1), "y": (batch, None, N)}, ) # pass dynamic shapes of inputs [guards, error] class Foo(torch.nn.Module): def forward(self, x, y): if x.shape[0] < 16 and y.shape[1] % 3 == 0: return torch.matmul(x, y) else: return x + y foo = Foo() inputs = (torch.randn(10, 2, 3), torch.randn(10, 3, 4)) batch, M, K, N = dims("batch", "M", "K", "N") with self.assertRaisesRegex( torch._dynamo.exc.UserError, ( "Constraints violated.*!(.*\n)*.*" "Not all values of K.*satisfy the generated guard(.*\n)*.*" "Not all values of batch.*satisfy the generated guard(.*\n)*.*" "Suggested fixes:(.*\n)*.*" "batch = Dim\\('batch', max=15\\)(.*\n)*.*" "K = 3\\*_K" ), ): export( foo, inputs, dynamic_shapes={"x": (batch, M, K), "y": (batch, K, N)}, ) def test_suggested_fixes_new_roots(self): from torch.export import dims # suggested fixes should introduce new root dim for modulo guard class Foo(torch.nn.Module): def forward(self, x, y, z): # dy = 3 * _dx # dx = 3 * _dx - 1 # dz = 3 * _dx + 2 # suggested fixes results will look something like # {"dx": {"eq": 3*_dx-1, "min": 5, "max": 36}, "dy": {"eq": dx+1}, ...} if x.shape[0] >= 5 and x.shape[0] <= 36 and y.shape[0] % 3 == 0: return x + y[1:] + z[3:] foo = Foo() inputs = ( torch.randn( 11, ), torch.randn( 12, ), torch.randn( 14, ), ) dx, dy, dz = dims("dx", "dy", "dz") dynamic_shapes = { "x": (dx,), "y": (dy,), "z": (dz,), } with self.assertRaisesRegex( # figure out regex later torch._dynamo.exc.UserError, ( "Constraints violated.*!(.*\n)*.*" "Suggested fixes(.*\n)*.*" "_dx = Dim\(\\'_dx\\', max=12\)(.*\n)*.*" "dx = 3\*_dx - 1(.*\n)*.*" "dy = 3\*_dx(.*\n)*.*" "dz = 3\*_dx \+ 2" ), ): export(Foo(), inputs, dynamic_shapes=dynamic_shapes) # retry export _dx = Dim("_dx", min=2, max=12) dynamic_shapes = {"x": (3 * _dx - 1,), "y": (3 * _dx,), "z": (3 * _dx + 2,)} export(Foo(), inputs, dynamic_shapes=dynamic_shapes) def test_refine_dynamic_shapes_from_suggested_fixes(self): from torch.export.dynamic_shapes import ( refine_dynamic_shapes_from_suggested_fixes, ) def helper(model, inputs, dynamic_shapes): # export, fail, parse & refine suggested fixes, re-export try: export(Foo(), inps, dynamic_shapes=dynamic_shapes) raise Exception("should have raised constraint violation error") except torch._dynamo.exc.UserError as exc: new_shapes = refine_dynamic_shapes_from_suggested_fixes( exc.msg, dynamic_shapes ) export(Foo(), inps, dynamic_shapes=new_shapes) return new_shapes # specialize dims + derived dims class Foo(torch.nn.Module): def forward(self, x, y, z): x0 = x + y[1:] + z[2:] x1 = x @ torch.randn(4, 4) return x0, x1 inps = ( torch.randn( 4, ), torch.randn( 5, ), torch.randn( 6, ), ) dx = Dim("dx", max=16) dynamic_shapes = {"x": (dx,), "y": (dx + 1,), "z": (dx + 2,)} new_shapes = helper(Foo(), inps, dynamic_shapes) self.assertEqual(new_shapes["x"][0], 4) self.assertEqual(new_shapes["z"][0], 6) # refine lower, upper bound class Foo(torch.nn.Module): def forward(self, x, y): if x.shape[0] >= 6 and y.shape[0] <= 16: return x * 2.0, y + 1 inps = (torch.randn(16), torch.randn(12)) dynamic_shapes = {"x": (Dim("dx"),), "y": (Dim("dy"),)} new_shapes = helper(Foo(), inps, dynamic_shapes) self.assertEqual(new_shapes["x"][0].min, 6) self.assertEqual(new_shapes["y"][0].max, 16) # divisiblity, will introduce new root class Foo(torch.nn.Module): def forward(self, x): if x.shape[0] >= 9: return x.reshape([-1, 3]) inps = ( torch.randn( 15, ), ) dynamic_shapes = ((Dim("dx"),),) new_shapes = helper(Foo(), inps, dynamic_shapes) dim = new_shapes[0][0] root = dim.root self.assertEqual(dim.fn(2), 6) self.assertEqual(root.min, 3) # turn dim into derived dim/relation class Foo(torch.nn.Module): def forward(self, x, y): return x + y[4:] inps = (torch.randn(6, 4), torch.randn(10, 4)) dynamic_shapes = { "x": (Dim("dx0"), Dim("dx1")), "y": (Dim("dy0"), Dim("dy1")), } new_shapes = helper(Foo(), inps, dynamic_shapes) self.assertEqual(new_shapes["x"][0], new_shapes["y"][0].root) # dy0 = dx0 + 4 self.assertEqual(new_shapes["y"][0].fn(5), 9) self.assertEqual(new_shapes["x"][1], new_shapes["y"][1]) # dx1 = dy1 # nested dynamic shapes spec class Foo(torch.nn.Module): def forward(self, x, y): x0 = x[0]["data"] + x[1] + x[2][2:] x1 = y["a"] @ torch.randn(4, 4) x2 = y["b"] @ torch.randn(6, 6) return x0, x1, x2 inps = ( ( {"data": torch.randn(4, 4)}, torch.randn(4, 4), torch.randn(6, 4), ), { "a": torch.randn(8, 4), "b": torch.randn(9, 6), }, ) dynamic_shapes = { "x": ( {"data": (Dim("dx00"), Dim("dx01"))}, (Dim("dx10"), Dim("dx11")), (Dim("dx20"), Dim("dx21")), ), "y": { "a": (Dim("dya0"), Dim("dya1")), "b": (Dim("dyb0"), Dim("dyb1")), }, } new_shapes = helper(Foo(), inps, dynamic_shapes) self.assertEqual( new_shapes["x"][0]["data"][0], new_shapes["x"][1][0] ) # dx10 = dx00 self.assertEqual( new_shapes["x"][2][0].root, new_shapes["x"][0]["data"][0] ) # dx20 = dx00 + 2 self.assertEqual(new_shapes["x"][2][0].fn(10), 12) self.assertEqual( new_shapes["x"][0]["data"][1], new_shapes["x"][1][1] ) # dx11 = dx01 self.assertEqual(new_shapes["y"]["a"][1], 4) self.assertEqual(new_shapes["y"]["b"][1], 6) self.assertEqual(new_shapes["y"]["b"][0].__name__, "dyb0") # unchanged def test_dynamic_shapes_spec_with_pytree(self): from torch.export import Dim, export from torch.utils._pytree import tree_map inputs = { "tensor": torch.randn(3), "dict_of_tensors": {k: torch.randn(3) for k in ["A", "B", "C", "D"]}, "list_of_tensors": [torch.randn(3) for _ in range(4)], } batch = Dim("batch") # uniformly specify dynamic shapes for all inputs spec = tree_map(lambda x: {0: batch}, inputs) class Foo(torch.nn.Module): def forward(self, inputs): return ( inputs["tensor"] + inputs["dict_of_tensors"]["A"] + inputs["list_of_tensors"][0] ) ep = export(Foo(), (inputs,), dynamic_shapes={"inputs": spec}) input_shapes = [ str(node.meta["val"].shape) for node in ep.graph_module.graph.nodes if node.op == "placeholder" ] self.assertEqual(len(input_shapes), 9) self.assertEqual(len(set(input_shapes)), 1) def test_error_does_not_reference_eager_fallback(self): class Module(torch.nn.Module): def forward(self, x): y = x.nonzero() z = y.shape[0] if z > 2: return x.cos() else: return x.sin() fn_ddo = Module() if is_non_strict_test(self._testMethodName): error = torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode error_msg = r"Could not guard on data-dependent expression" else: error = torchdynamo.exc.UserError error_msg = r"^(?!.*fall back to eager).*" with self.assertRaisesRegex(error, error_msg): _ = export(fn_ddo, (torch.tensor([2, 3, 5]),)) def test_pytree_register_data_class(self): @dataclass class MyDataClass: x: int y: int z: int = None dt = MyDataClass(x=3, y=4) flat, spec = tree_flatten(dt) self.assertTrue(spec, treespec_leaf()) self.assertTrue(len(flat) == 1) torch.export.register_dataclass( MyDataClass, serialized_type_name="test_pytree_register_data_class.MyDataClass", ) flat, spec = tree_flatten(dt) self.assertEqual( spec, TreeSpec( MyDataClass, [["x", "y"], ["z"]], [treespec_leaf(), treespec_leaf()] ), ) self.assertEqual(flat, [3, 4]) orig_dt = tree_unflatten(flat, spec) self.assertTrue(isinstance(orig_dt, MyDataClass)) self.assertEqual(orig_dt.x, 3) self.assertEqual(orig_dt.y, 4) self.assertEqual(orig_dt.z, None) roundtrip_spec = treespec_loads(treespec_dumps(spec)) self.assertEqual(roundtrip_spec, spec) @dataclass class MyOtherDataClass: # the pytree registration don't allow registering the same class twice x: int y: int z: int = None # Override the registration with keep none fields register_dataclass_as_pytree_node( MyOtherDataClass, return_none_fields=True, serialized_type_name="test_pytree_regster_data_class.MyOtherDataClass", ) dt = MyOtherDataClass(x=3, y=4) flat, spec = tree_flatten(dt) self.assertEqual( spec, TreeSpec( MyOtherDataClass, [["x", "y", "z"], []], [treespec_leaf(), treespec_leaf(), treespec_leaf()], ), ) self.assertEqual(flat, [3, 4, None]) orig_dt = tree_unflatten(flat, spec) self.assertTrue(isinstance(orig_dt, MyOtherDataClass)) self.assertEqual(orig_dt.x, 3) self.assertEqual(orig_dt.y, 4) self.assertEqual(orig_dt.z, None) roundtrip_spec = treespec_loads(treespec_dumps(spec)) self.assertEqual(roundtrip_spec, spec) def test_pytree_register_nested_data_class(self): @dataclass class Inner: x: int y: int @dataclass class Outer: xy: Inner ab: Inner xy = Inner(1, 2) ab = Inner(3, 4) dt = Outer(xy, ab) inp = {"dt1": (dt, ({},)), "dt2": ((torch.ones(1),), dt)} torch.export.register_dataclass( Inner, serialized_type_name="test_pytree_register_nested_data_class.Inner" ) torch.export.register_dataclass( Outer, serialized_type_name="test_pytree_register_nested_data_class.Outer" ) flat, spec = tree_flatten(inp) self.assertEqual(flat, [1, 2, 3, 4, torch.ones(1), 1, 2, 3, 4]) unflat = tree_unflatten(flat, spec) self.assertEqual(unflat, inp) roundtrip_spec = treespec_loads(treespec_dumps(spec)) self.assertEqual(roundtrip_spec, spec) def test_param_util(self): class Basic(torch.nn.Module): def __init__(self) -> None: super().__init__() self.lin = torch.nn.Linear(10, 1) def forward(self, x): return self.lin(x) ep = export(Basic(), (torch.randn(5, 10),)) num_params = 0 params = [] for node in ep.graph.nodes: if is_param(ep, node): num_params += 1 params.append(get_param(ep, node)) self.assertEqual(num_params, 2) self.assertEqual(params[0].shape, [1, 10]) # weight self.assertEqual(params[1].shape, [1]) # bias def test_buffer_util(self): ep = export( torch.nn.BatchNorm2d(100, affine=False), (torch.ones(20, 100, 35, 45),) ) num_buffer = 0 buffer = [] for node in ep.graph.nodes: if is_buffer(ep, node): num_buffer += 1 buffer.append(get_buffer(ep, node)) self.assertEqual(num_buffer, 3) # The insertion order is not guaranteed to be same for strict vs # non-strict, so commenting this out. # self.assertEqual(buffer[0].shape, torch.Size([100])) # running_mean # self.assertEqual(buffer[1].shape, torch.Size([100])) # running_var # self.assertEqual(buffer[2].shape, torch.Size([])) # num_batches_tracked def test_export_dynamo_config(self): class MyModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.lstm = torch.nn.LSTM(input_size=4, hidden_size=5, num_layers=1) def forward(self, inputs: torch.Tensor) -> torch.Tensor: return self.lstm(inputs) config = DEFAULT_EXPORT_DYNAMO_CONFIG mod = MyModule() @contextmanager def _patch_config(kwargs): orig_config_dict = dataclasses.asdict(config) try: for k, v in kwargs.items(): setattr(config, k, v) yield finally: for k, v in orig_config_dict.items(): setattr(config, k, v) inp = (torch.rand(5, 4),) exported_program = export(mod, inp, strict=True) with _patch_config({"allow_rnn": False}): with self.assertRaisesRegex( torch._dynamo.exc.Unsupported, "Dynamo does not support RNN, GRU, or LSTM.", ): _ = export(mod, inp, strict=True) def test_device_to_static(self): class Module(torch.nn.Module): def forward(self, x): return x.to("cpu") ep = export(Module(), (torch.tensor(1, device="cpu"),)) ops = [] for node in ep.graph.nodes: if node.op == "call_function": ops.append(node.target) if is_training_ir_test(self._testMethodName): # aten.to will just specialize by decomposing to a no-op self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, ], ) else: self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, torch.ops.aten.to.dtype_layout, ], ) ep = ep.run_decompositions({}) ops = [] for node in ep.graph.nodes: if node.op == "call_function": ops.append(node.target) self.assertEqual(len(ops), 1) def test_device_to_dynamic(self): class Module(torch.nn.Module): def forward(self, x): return x.to("cpu") ep = export( Module(), (torch.tensor([1, 2], device="cpu"),), dynamic_shapes={"x": {0: Dim("i")}}, ) ops = [] for node in ep.graph.nodes: if node.op == "call_function": ops.append(node.target) if is_training_ir_test(self._testMethodName): # aten.to will just specialize by decomposing to a no-op self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, ], ) else: self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, torch.ops.aten.to.dtype_layout, ], ) ep = ep.run_decompositions({}) ops = [] for node in ep.graph.nodes: if node.op == "call_function": ops.append(node.target) self.assertEqual(len(ops), 1) def test_device_to_mutation(self): class Module(torch.nn.Module): def forward(self, x): y = x.to("cpu") y.add_(1) return y, x ep = export(Module(), (torch.tensor(1, device="cpu"),)) ops = [] for node in ep.graph.nodes: if node.op == "call_function": ops.append(node.target) if is_training_ir_test(self._testMethodName): # aten.to decomposes to no-op, add_ decomposes to functional variant self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, torch.ops.aten.add.Tensor, ], ) else: self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, torch.ops.aten.to.dtype_layout, torch.ops.aten.add_.Tensor, ], ) # test mutation x = torch.tensor(2, device="cpu") y, _ = ep.module()(x) self.assertEqual(x.item(), 3) self.assertEqual(id(y), id(x)) # test decomp ep ep = ep.run_decompositions({}) for node in ep.graph.nodes: if node.op == "call_function": self.assertNotEqual(node.target, torch.ops.aten.to.dtype_layout) # test mutation for decomposed program y, _ = ep.module()(x) self.assertEqual(x.item(), 4) self.assertEqual(id(y), id(x)) @requires_gpu @testing.expectedFailureCppRuntime def test_device_to_gpu(self): class Foo(torch.nn.Module): def forward(self, x): return x.to("cpu") ep = export(Foo(), (torch.randn(64).to(GPU_TYPE),)) ops = [] for node in ep.graph.nodes: if node.op == "call_function": ops.append(node.target) if is_training_ir_test(self._testMethodName): # aten.to decomposes to _to_copy self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, torch.ops.aten._to_copy.default, ], ) else: self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, torch.ops.aten.to.dtype_layout, ], ) # Check device assertion with self.assertRaisesRegex(RuntimeError, "Tensor device mismatch!"): ep.module()(torch.randn(64)) ep = ep.run_decompositions() ops = [] for node in ep.graph.nodes: if node.op == "call_function": ops.append(node.target) self.assertEqual(len(ops), 2) self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, torch.ops.aten._to_copy.default, ], ) # Check device assertion again after decomp with self.assertRaisesRegex(RuntimeError, "Tensor device mismatch!"): ep.module()(torch.randn(64)) def test_tensor_constant_aten_to(self): class Module(torch.nn.Module): def __init__(self): super(Module, self).__init__() self.t = torch.tensor([1.0]) def forward(self, x): return x + self.t.to(torch.float64) inputs = (torch.randn(1, 10),) model = Module() ep = export(model, inputs).run_decompositions({}) ops = [] for node in ep.graph.nodes: if node.op == "call_function": ops.append(node.target) self.assertGreater(len(ops), 0) self.assertIn(torch.ops.aten._to_copy.default, ops) self.assertEqual(ep.module()(*inputs), model(*inputs)) def test_export_aten_to_unflatten(self): class Bar(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x): return x.sum() class Foo(torch.nn.Module): def __init__(self): super().__init__() self.bar = Bar() def forward(self, x): to = x.to(torch.float) return self.bar(to).sum() inp = torch.randn(4, 4) ep = export( Foo(), (inp,), strict=False, preserve_module_call_signature=("bar",) ) mod = ep.module() self.assertTrue(torch.allclose(mod(inp), Foo()(inp))) @testing.expectedFailureLegacyExportNonStrict @testing.expectedFailureLegacyExportStrict @testing.expectedFailureRetraceabilityNonStrict # when we retrace, ep.module() is hierarchical @testing.expectedFailureRetraceability # when we retrace, ep.module() is hierarchical def test_export_aten_to_unflatten_subclass(self): class Bar(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x): return x.sum() class Foo(torch.nn.Module): def __init__(self): super().__init__() self.bar = Bar() self.param = torch.nn.Parameter( TwoTensor(torch.ones(4, 4), torch.ones(4, 4)) ) def forward(self, x): to = self.param.to(torch.float) return (self.bar(to).sum() + x.sum()).get_elem_a() inp = torch.randn(4, 4) with self.assertRaisesRegex( ValueError, "It looks like p_param is a tensor subclass." ): export( Foo(), (inp,), strict=False, preserve_module_call_signature=("bar",) ).run_decompositions({}) def test_export_aten_to_unflatten_subclass_pre_dispatch(self): class Bar(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x): return x.sum() class Foo(torch.nn.Module): def __init__(self): super().__init__() self.bar = Bar() self.param = torch.nn.Parameter( TwoTensor(torch.ones(4, 4), torch.ones(4, 4)) ) def forward(self, x): to = self.param.to(torch.float) return (self.bar(to).sum() + x.sum()).get_elem_a() inp = torch.randn(4, 4) ep = torch.export.export( Foo(), (inp,), strict=False, preserve_module_call_signature=("bar",) ) unflat = unflatten(ep).bar self.assertExpectedInline( str(unflat.graph).strip(), """\ graph(): %_positional_arg_0 : [num_users=1] = placeholder[target=_positional_arg_0] %_spec_0 : [num_users=1] = get_attr[target=_spec_0] %tree_flatten_spec : [num_users=1] = call_function[target=torch.fx._pytree.tree_flatten_spec](args = (((%_positional_arg_0,), {}), %_spec_0), kwargs = {}) %to : [num_users=1] = call_function[target=operator.getitem](args = (%tree_flatten_spec, 0), kwargs = {}) %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%to,), kwargs = {}) %_spec_1 : [num_users=1] = get_attr[target=_spec_1] %tree_unflatten : [num_users=1] = call_function[target=torch.utils._pytree.tree_unflatten](args = ((%sum_1,), %_spec_1), kwargs = {}) return tree_unflatten""", ) with self.assertRaisesRegex( ValueError, "It looks like p_param is a tensor subclass." ): ep.run_decompositions() def test_float_conversion(self): class Module(torch.nn.Module): def forward(self, x): return x.float() ep = export(Module(), (torch.tensor(1, dtype=torch.float),)) ops = [] for node in ep.graph.nodes: if node.op == "call_function": ops.append(node.target) if is_training_ir_test(self._testMethodName): # .float() decomposes to no-op self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, ], ) else: self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, torch.ops.aten.to.dtype, ], ) ep = ep.run_decompositions({}) ops = [] for node in ep.graph.nodes: if node.op == "call_function": ops.append(node.target) self.assertEqual(len(ops), 1) # test aliasing x = torch.tensor(1, dtype=torch.float) out = ep.module()(x) self.assertEqual(id(x), id(out)) def test_float_conversion_from_int(self): class Module(torch.nn.Module): def forward(self, x): return x.float() ep = export(Module(), (torch.tensor(1, dtype=torch.int32),)) ops = [] for node in ep.graph.nodes: if node.op == "call_function": ops.append(node.target) if is_training_ir_test(self._testMethodName): # .float() decomposes to _to_copy() self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, torch.ops.aten._to_copy.default, ], ) else: self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, torch.ops.aten.to.dtype, ], ) # Raises error because the input dtype is not the same as the input # tensor when exporting. with self.assertRaisesRegex(RuntimeError, "Tensor dtype mismatch!"): ep.module()(torch.tensor(1, dtype=torch.float32)) ep = ep.run_decompositions({}) ops = [] for node in ep.graph.nodes: if node.op == "call_function": ops.append(node.target) self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, torch.ops.aten._to_copy.default, ], ) # Check dtype assertion again after decomp with self.assertRaisesRegex(RuntimeError, "Tensor dtype mismatch!"): ep.module()(torch.tensor(1, dtype=torch.float32)) self.assertEqual(ep.module()(torch.tensor(1, dtype=torch.int32)), 1) def test_device_to_mutation_float(self): class Module(torch.nn.Module): def forward(self, x): y = x.float() y.add_(1) return y, x ep = export(Module(), (torch.tensor(1, dtype=torch.float),)) ops = [] for node in ep.graph.nodes: if node.op == "call_function": ops.append(node.target) if is_training_ir_test(self._testMethodName): # aten.to decomposes to no-op, add_ decomposes to functional variant self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, torch.ops.aten.add.Tensor, ], ) else: self.assertEqual( ops, [ torch.ops.aten._assert_tensor_metadata.default, torch.ops.aten.to.dtype, torch.ops.aten.add_.Tensor, ], ) # test mutation x = torch.tensor(2, dtype=torch.float) y, _ = ep.module()(x) self.assertEqual(x.item(), 3.0) self.assertEqual(id(y), id(x)) # test decomp ep ep = ep.run_decompositions({}) for node in ep.graph.nodes: if node.op == "call_function": self.assertNotEqual(node.target, torch.ops.aten.to.dtype) # test mutation for decomposed program y, _ = ep.module()(x) self.assertEqual(x.item(), 4.0) self.assertEqual(id(y), id(x)) def test_module(self): class MyLinear(torch.nn.Module): def __init__(self) -> None: super().__init__() self.weight = torch.randn(20, 98) self.bias = torch.randn(20) def forward(self, x): return torch.nn.functional.linear(x, self.weight, self.bias) class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(16, 33, 3) self.linear = MyLinear() def forward(self, x): a, b = x a_conv = self.conv(a) a_linear = self.linear(a_conv) b_conv = self.conv(b) b_linear = self.linear(b_conv) return ( a_linear.cos() + b_linear.sin(), a_linear.sin() + b_linear.cos(), ) inp_container = ((torch.randn(20, 16, 50, 100), torch.randn(20, 16, 50, 100)),) ep = export(Foo(), inp_container) ep_rexported = export(ep.module(), inp_container) inp_test = ((torch.randn(20, 16, 50, 100), torch.randn(20, 16, 50, 100)),) self.assertTrue( torch.allclose( ep.module()(*inp_test)[0], ep_rexported.module()(*inp_test)[0] ) ) self.assertTrue( torch.allclose( ep.module()(*inp_test)[1], ep_rexported.module()(*inp_test)[1] ) ) def test_use_embedding_twice(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() self.embed = torch.nn.Embedding(4, 4) def forward(self, x): return self.embed(x) + self.embed.weight[x] inputs = (torch.tensor([0, 1, 2, 3]),) ep = export(Foo(), inputs) def test_module_with_dict_container_inp_out(self): class MyLinear(torch.nn.Module): def __init__(self) -> None: super().__init__() self.weight = torch.randn(20, 98) self.bias = torch.randn(20) def forward(self, x): return torch.nn.functional.linear(x, self.weight, self.bias) class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(16, 33, 3) self.linear = MyLinear() def forward(self, x): a1, a2 = x["a"] b = x["b"] a1_conv = self.conv(a1) a1_linear = self.linear(a1_conv) a2_conv = self.conv(a2) a2_linear = self.linear(a2_conv) b_conv = self.conv(b) b_linear = self.linear(b_conv) return { "a": a1_linear.cos() + b_linear.sin(), "b": a2_linear.sin() + b_linear.cos(), } inp_container = ( { "a": (torch.randn(20, 16, 50, 100), torch.randn(20, 16, 50, 100)), "b": torch.randn(20, 16, 50, 100), }, ) ep = export(Foo(), inp_container) ep_rexported = export(ep.module(), inp_container) inp_test = ( { "a": (torch.randn(20, 16, 50, 100), torch.randn(20, 16, 50, 100)), "b": torch.randn(20, 16, 50, 100), }, ) self.assertTrue( torch.allclose( ep.module()(*inp_test)["a"], ep_rexported.module()(*inp_test)["a"] ) ) self.assertTrue( torch.allclose( ep.module()(*inp_test)["b"], ep_rexported.module()(*inp_test)["b"] ) ) def test_args_type_checked(self): class M(torch.nn.Module): def forward(self, x): return x + 1 inp = torch.rand(2, 2) with self.assertRaisesRegex(torch._dynamo.exc.UserError, "to be a tuple"): # Intentionally not wrapping `inp` in a tuple to trigger the error _ = export(M(), inp) def test_decomp_item_in_prim_before_decomposition(self): class M(torch.nn.Module): def forward(self, x): torch.ops.aten._assert_async.msg(torch.tensor(True), "Fail") return x ep = export(M(), (torch.randn(2, 2),)) FileCheck().check_count( "torch.ops.aten._assert_async.msg", 1, exactly=True ).run(ep.graph_module.code) def test_decomp_item_in_prim_after_decomposition(self): class M(torch.nn.Module): def forward(self, x): torch.ops.aten._assert_async.msg(torch.tensor(True), "Fail") return x decomp_table = {**default_decompositions(), **decomposition_table} ep = torch.export.export(M(), (torch.randn(2, 2),)).run_decompositions( decomp_table ) self.assertExpectedInline( str(ep.graph_module.code).strip(), """\ def forward(self, c_lifted_tensor_0, x): clone = torch.ops.prims.clone.default(c_lifted_tensor_0, memory_format = torch.preserve_format); c_lifted_tensor_0 = None _assert_async = torch.ops.aten._assert_async.msg(clone, 'Fail'); clone = _assert_async = None return (x,)""", ) def test_decomp_batch_norm_functional_predispatch(self): class ConvBatchnorm(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 3, 1, 1) self.bn = torch.nn.BatchNorm2d(3) def forward(self, x): x = self.conv(x) x = self.bn(x) return (x,) mod = ConvBatchnorm() mod.eval() inp = torch.randn(1, 1, 3, 3) gm = torch.export.export(mod, (inp,)).module() self.assertExpectedInline( str(gm.code).strip(), """\ def forward(self, x): x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec) conv_weight = self.conv.weight conv_bias = self.conv.bias bn_weight = self.bn.weight bn_bias = self.bn.bias bn_running_mean = self.bn.running_mean bn_running_var = self.bn.running_var bn_num_batches_tracked = self.bn.num_batches_tracked; bn_num_batches_tracked = None _guards_fn = self._guards_fn(x); _guards_fn = None conv2d = torch.ops.aten.conv2d.default(x, conv_weight, conv_bias); x = conv_weight = conv_bias = None batch_norm = torch.ops.aten.batch_norm.default(conv2d, bn_weight, bn_bias, bn_running_mean, bn_running_var, False, 0.1, 1e-05, True); conv2d = bn_weight = bn_bias = bn_running_mean = bn_running_var = None return pytree.tree_unflatten((batch_norm,), self._out_spec)""", ) mod.train() gm_train = torch.export.export(mod, (inp,)).module() self.assertExpectedInline( str(gm_train.code).strip(), """\ def forward(self, x): x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec) conv_weight = self.conv.weight conv_bias = self.conv.bias bn_weight = self.bn.weight bn_bias = self.bn.bias bn_running_mean = self.bn.running_mean bn_running_var = self.bn.running_var bn_num_batches_tracked = self.bn.num_batches_tracked _guards_fn = self._guards_fn(x); _guards_fn = None conv2d = torch.ops.aten.conv2d.default(x, conv_weight, conv_bias); x = conv_weight = conv_bias = None add_ = torch.ops.aten.add_.Tensor(bn_num_batches_tracked, 1); bn_num_batches_tracked = add_ = None batch_norm = torch.ops.aten.batch_norm.default(conv2d, bn_weight, bn_bias, bn_running_mean, bn_running_var, True, 0.1, 1e-05, True); conv2d = bn_weight = bn_bias = bn_running_mean = bn_running_var = None return pytree.tree_unflatten((batch_norm,), self._out_spec)""", ) def test_constrain_size_in_eager(self): class Module(torch.nn.Module): def forward(self, x, y): n = x.max().item() torch._check(n >= 0) return y + n fn = Module() ep = export( fn, (torch.randint(1, 2, (2, 2)), torch.randint(3, 5, (2, 3))), ) test_inp = (torch.randint(1, 2, (2, 2)), torch.randint(3, 5, (2, 3))) self.assertTrue(torch.allclose(ep.module()(*test_inp), fn(*test_inp))) def test_constrain_size_with_constrain_value(self): class Module(torch.nn.Module): def forward(self, x, y): n = x.max().item() torch._check(n >= 2) torch._check(n <= 10) return y + n fn = Module() with self.assertRaisesRegex( RuntimeError, r"Expected cond to be True, but got False" ): _ = fn(torch.randint(1, 2, (2, 2)), torch.randint(3, 5, (2, 3))) ep = export( fn, (torch.randint(3, 4, (2, 2)), torch.randint(3, 5, (2, 3))), ) with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for expression u[\d+] \>\= 2" ): test_inp = (torch.randint(1, 2, (2, 2)), torch.randint(3, 5, (2, 3))) _ = ep.module()(*test_inp) def test_while_loop_simple(self): class Simple(torch.nn.Module): def forward(self, ci, a, b): def cond_fn(i, x, y): return i > 0 def body_fn(i, x, y): return i - 1, x + y, y - x return torch._higher_order_ops.while_loop(cond_fn, body_fn, [ci, a, b]) example_inputs = ( torch.tensor(1), torch.randn(10, 20), torch.randn(10, 20), ) ep = export(Simple(), example_inputs) self.assertEqual(ep.module()(*example_inputs), Simple()(*example_inputs)) def test_constrain_size_with_various_cases(self): class Module1(torch.nn.Module): def forward(self, x, y): n = x.item() torch._check(n >= 0) return y.sum() + torch.ones(n, 5).sum() case1 = Module1() class Module2(torch.nn.Module): def forward(self, x, y): n = x.item() torch._check(n >= 0) torch._check(n <= 6) return y.sum() + torch.ones(n, 5).sum() case2 = Module2() class Module3(torch.nn.Module): def forward(self, x, y): n = x.item() torch._check(n >= 0) torch._check(n <= 1) return y.sum() + torch.ones(n, 5).sum() case3 = Module3() class Module4(torch.nn.Module): def forward(self, x, y): n = x.item() torch._check(n >= 2) return y.sum() + torch.ones(n, 5).sum() case4 = Module4() class Module5(torch.nn.Module): def forward(self, x, y): n = x.item() torch._check(n >= 1) return y.sum() + torch.ones(n, 5).sum() case5 = Module5() ep = export(case1, (torch.tensor(1), torch.ones(4, 5))) with self.assertRaisesRegex( RuntimeError, r"Expected cond to be True, but got False" ): _ = case1(torch.tensor(-1), torch.randn(4, 5)) self.assertTrue( torch.allclose( ep.module()(torch.tensor(1), torch.ones(4, 5)), case1(torch.tensor(1), torch.ones(4, 5)), ) ) ep = export(case2, (torch.tensor(5), torch.randn(4, 5))) with self.assertRaisesRegex( RuntimeError, r"Expected cond to be True, but got False", ): _ = case2(torch.tensor(7), torch.randn(4, 5)) with self.assertRaisesRegex( RuntimeError, r"Expected cond to be True, but got False", ): _ = case2(torch.tensor(9), torch.randn(4, 5)) self.assertTrue( torch.allclose( ep.module()(torch.tensor(5), torch.ones(4, 5)), case2(torch.tensor(5), torch.ones(4, 5)), ) ) _ = case3(torch.tensor(1), torch.randn(4, 5)) with self.assertRaisesRegex( RuntimeError, r"Expected cond to be True, but got False", ): _ = case4(torch.tensor(1), torch.randn(4, 5)) ep = export(case4, (torch.tensor(5), torch.randn(4, 5))) with self.assertRaisesRegex( RuntimeError, r"Expected cond to be True, but got False", ): _ = case4(torch.tensor(1), torch.randn(4, 5)) self.assertTrue( torch.allclose( ep.module()(torch.tensor(5), torch.ones(4, 5)), case4(torch.tensor(5), torch.ones(4, 5)), ) ) ep = export(case5, (torch.tensor(5), torch.randn(4, 5))) with self.assertRaisesRegex( RuntimeError, r"Expected cond to be True, but got False", ): _ = case5(torch.tensor(0), torch.randn(4, 5)) self.assertTrue( torch.allclose( ep.module()(torch.tensor(5), torch.ones(4, 5)), case5(torch.tensor(5), torch.ones(4, 5)), ) ) @testing.expectedFailureStrictV2 def test_automatic_constrain_size(self): class M(torch.nn.Module): def forward(self, x, y): n = x.item() return y.sum() + torch.ones(n, 5).sum() ep = export(M(), (torch.tensor(1), torch.ones(4, 5))) # This is because we insert sym_constrain_range in the graph now error_msg = r".* failed for expression u0 >= 0 on node .*" with self.assertRaisesRegex(RuntimeError, error_msg): _ = ep.module()(torch.tensor(-1), torch.randn(4, 5)) self.assertTrue( torch.allclose( ep.module()(torch.tensor(1), torch.ones(4, 5)), M()(torch.tensor(1), torch.ones(4, 5)), ) ) def test_cleanup_dynamic_markers(self) -> None: class Foo(torch.nn.Module): def forward(self, inputs): x, y = inputs["x"], inputs["y"] return x + y inputs = ( { "x": torch.randn(4, 8), "y": torch.randn(4, 8), }, ) shapes = { "inputs": { "x": (Dim.AUTO, Dim.STATIC), "y": (Dim.DYNAMIC, Dim.STATIC), }, } ep = export(Foo(), inputs, dynamic_shapes=shapes) for tensor in inputs[0].values(): for attr in [ "_dynamo_weak_dynamic_indices", "_dynamo_dynamic_indices", "_dynamo_dynamic_range", "_dynamo_static_indices", "_dynamo_unbacked_indices", ]: self.assertFalse(hasattr(tensor, attr)) @testing.expectedFailureCppRuntime def test_while_loop_index_assertions(self): from torch._higher_order_ops import while_loop class Foo(torch.nn.Module): def forward(self, x): def cond_fn(idx, acc): i = idx.item() return i < x.size(0) def body_fn(idx, acc): # this check_is_size call needs to be traced by this subgraph for the select call, # it can't be in the cond graph, as that fires & fails right before loop termination. i = idx.item() return idx + 1, acc + x[i] acc = torch.zeros(x.size(1)) n = torch.full((), 0, dtype=torch.int64) _, out = while_loop(cond_fn, body_fn, [n, acc]) return out x = torch.randn(8, 4) ep = export(Foo(), (x,), strict=False) self.assertTrue(torch.allclose(x.sum(dim=0), ep.module()(x))) @testing.expectedFailureCppRuntime def test_while_loop_assert_separation(self): from torch._higher_order_ops import while_loop class Bar(torch.nn.Module): def forward(self, idx, x): i = idx.item() def cond_fn(idx, x): i = idx.item() torch._check(i != 5) return i <= 9 def body_fn(idx, x): i = idx.item() torch._check(i % 2 == 0) return idx + 2, x + i return while_loop(cond_fn, body_fn, [idx, x + i]) inps = (torch.tensor([0]), torch.zeros(1)) ep = export(Bar(), inps, strict=False) i, out = ep.module()(*inps) self.assertEqual(i, 10) self.assertEqual(out.item(), 20) # check assertions are separate for each subgraph with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for expression Ne\(u[\d]+, 5\).*" ): ep.graph_module.while_loop_cond_graph_0(torch.tensor([5]), torch.zeros(1)) with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for expression Eq\(PythonMod\(u[\d]+, 2\), 0\).*", ): ep.graph_module.while_loop_body_graph_0(torch.tensor([5]), torch.zeros(1)) @testing.expectedFailureStrictV2 def test_constrain_decomp(self) -> None: class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.freq = torch.ones(5, 5) def forward(self, start_pos: torch.Tensor): pos = start_pos.item() torch._check(pos >= 0) torch._check(pos <= 4) return self.freq[pos] * self.freq[pos] ep = export(M(), (torch.tensor(1),)) FileCheck().check_count( "torch.ops.aten._assert_scalar.default", 2, exactly=True ).run(ep.graph_module.code) decompose_ep = ep.run_decompositions() FileCheck().check_count( "torch.ops.aten._assert_scalar.default", 2, exactly=True ).run(ep.graph_module.code) def test_mixed_input(self): class Module(torch.nn.Module): def forward(self, a, b, alpha: int): return torch.add(a, b, alpha=alpha) func = Module() a = torch.rand(1, 2) b = torch.rand(1, 2) alpha = 10 exported = export(func, (a, b, alpha)) for node in exported.graph_module.graph.nodes: if node.op == "placeholder": self.assertTrue(isinstance(node.meta["val"], (Tensor, int))) @testing.expectedFailureRetraceability # size gets unflattened into a tuple def test_size_input(self): class Model(torch.nn.Module): def __init__(self): super(Model, self).__init__() def forward(self, theta, size): return torch.nn.functional.affine_grid(theta, size, align_corners=None) model = Model() theta = torch.ones((1, 2, 3)) size = torch.Size((1, 3, 24, 24)) inp = (theta, size) eager_result = model(*inp) ep = export(model, inp) epm = ep.module() ep_result = epm(*inp) self.assertTrue(torch.allclose(ep_result, eager_result)) args, _kwargs = ep.example_inputs self.assertTrue(torch.allclose(arg, i) for arg, i in zip(args, inp)) def test_tensor_constant_with_wrapped_method(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.constant = torch.ones(4, 4) def forward(self, x): return x + self.constant, self.constant class Wrapper(torch.nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, *arg, **kwargs): return self.fn(*arg, **kwargs) inp = (torch.zeros(4, 4),) def test(m): m_result = m(*inp) ep_result = export(m, inp).module()(*inp) for m_t, ep_t in zip(m_result, ep_result): self.assertTrue(torch.allclose(m_t, ep_t)) test(M()) test(Wrapper(M().forward)) def test_export_with_inline_constraints(self): class Module(torch.nn.Module): def forward(self, x): a = x.item() torch._check(a >= 4) torch._check(a <= 7) return torch.randn((a, 4)) f = Module() ep = export(f, (torch.tensor([5]),)) self.assertEqual(ep.module()(torch.tensor([6])).shape, (6, 4)) FileCheck().check_count( "torch.ops.aten._assert_scalar.default", 2, exactly=True ).run(ep.graph_module.code) with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for expression u[\d+] \<\= 7", ) as cm: ep.module()(torch.tensor([30])) def test_export_with_inline_constraints_complex(self): class Module(torch.nn.Module): def forward(self, x): a = x.item() torch._check(a >= 4) torch._check(a <= 7) randn = torch.randn((a, 4)) return torch.cat((randn.transpose(0, 1), torch.zeros(6, a)), 0) f = Module() ep = export(f, (torch.tensor([6]),)) self.assertEqual(ep.module()(torch.tensor([5])).shape, (10, 5)) FileCheck().check_count( "torch.ops.aten._assert_scalar.default", 2, exactly=True ).run(ep.graph_module.code) def test_to_module_with_mutated_buffer(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.zeros(1)) def forward(self, x): self.buf.add_(1) return x.sum() + self.buf.sum() exported = export(Foo(), (torch.ones(5, 5),)) stateful_gm = exported.module() export_return_val = stateful_gm(torch.ones(5, 5)) eager = Foo() eager_return_val = eager(torch.ones(5, 5)) self.assertTrue(torch.allclose(eager_return_val, export_return_val)) for name, buffer in stateful_gm.named_buffers(): self.assertTrue(torch.allclose(torch.ones(1), buffer)) changed = stateful_gm.graph.eliminate_dead_code() self.assertFalse(changed) self.assertTrue( torch.allclose(stateful_gm(torch.ones(5, 5)), eager(torch.ones(5, 5))) ) for name, buffer in stateful_gm.named_buffers(): self.assertTrue(torch.allclose(torch.tensor(2, dtype=torch.float), buffer)) def test_to_module_with_mutated_buffer_multiple(self): class Bar(torch.nn.Module): def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) def forward(self, x): self.buf.add_(1) return x.sum() + self.buf.sum() class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.zeros(1)) self.bar = Bar() def forward(self, x): self.buf.add_(1) self.bar.buf.add_(2) bar = self.bar(x) return bar.sum() + self.buf.sum() exported = export(Foo(), (torch.ones(5, 5),)) stateful_gm = exported.module() export_return_val = stateful_gm(torch.ones(5, 5)) eager = Foo() eager_return_val = eager(torch.ones(5, 5)) self.assertTrue(torch.allclose(eager_return_val, export_return_val)) for name, buffer in stateful_gm.named_buffers(): if name == "L__self___buf": self.assertTrue(torch.allclose(torch.ones(1), buffer)) if name == "L__self___bar_buf": self.assertTrue( torch.allclose(torch.tensor(4, dtype=torch.float), buffer) ) changed = stateful_gm.graph.eliminate_dead_code() self.assertFalse(changed) self.assertTrue( torch.allclose(stateful_gm(torch.ones(5, 5)), eager(torch.ones(5, 5))) ) for name, buffer in stateful_gm.named_buffers(): if name == "L__self___buf": self.assertTrue( torch.allclose(torch.tensor(2, dtype=torch.float), buffer) ) if name == "L__self___bar_buf": self.assertTrue( torch.allclose(torch.tensor(7, dtype=torch.float), buffer) ) def test_module_input(self): class Foo(torch.nn.Module): def forward(self, x, y, m): return m(x, y) + x + y i = InputModule() f = Foo() ep = export(f, (torch.randn(3), torch.randn(3), i), strict=False) m = InputModule() inputs = (torch.randn(3), torch.randn(3), m) self.assertEqual(f(*inputs), ep.module()(*inputs)) def test_module_input_subclasses_parameterization_nested(self): class Module(torch.nn.Module): def forward(self, x, m): return m(x) * 2 mod = InputModuleWithNestedSubclass() f = Module() ref_x = torch.randn(2, 2) ref_out = f(ref_x, mod) ep = torch.export.export(f, (torch.randn(2, 2), mod), strict=False) self.assertEqual(ref_out, ep.module()(ref_x, mod)) def test_unbacked_noncontig_lin(self): if "cpp_runtime_nonstrict" in self.id(): self.skipTest("TODO Unexpected success in OSS but not in fbcode.") class Foo(torch.nn.Module): def __init__(self): super().__init__() self.lin = torch.nn.Linear(32, 64) def forward(self, x): n = x.item() y = torch.empty(x).view(1, -1, 32) return self.lin(y) mod = Foo() x = torch.tensor([128]) ep = export(mod, (x,)) self.assertEqual(mod(x).shape, ep.module()(x).shape) x = torch.tensor([512]) self.assertEqual(mod(x).shape, ep.module()(x).shape) def test_runtime_assert_for_prim(self): class Foo(torch.nn.Module): def forward(self, x, y): return x + y foo = Foo() tensor_inp = torch.ones(7, 5) dim0_x = torch.export.Dim("dim0_x", min=6) dynamic_shapes = {"x": {0: dim0_x}, "y": None} exported = torch.export.export( foo, (tensor_inp, 5), dynamic_shapes=dynamic_shapes ) self.assertTrue( torch.allclose( exported.module()(torch.ones(8, 5), 5), foo(torch.ones(8, 5), 5) ) ) with self.assertRaisesRegex( AssertionError, escape("Guard failed: y == 5"), ): # expected 5, but got 6 _ = exported.module()(torch.ones(8, 5), 6) exported = torch.export.export( foo, (tensor_inp, 5.0), dynamic_shapes=dynamic_shapes ) with self.assertRaisesRegex( AssertionError, escape("Guard failed: y == 5.0"), ): # expected 5.0, but got 6.0 _ = exported.module()(torch.ones(7, 5), 6.0) def test_runtime_assert_for_prm_str(self): class Foo(torch.nn.Module): def forward(self, a, b, mode): return torch.div(a, b, rounding_mode=mode) foo = Foo() inps = (torch.randn(4, 4), torch.randn(4), "trunc") exported = export(foo, inps) with self.assertRaisesRegex( AssertionError, escape("Guard failed: mode == 'trunc'"), ): # expected 'trunc', but got 'floor' _ = exported.module()(torch.randn(4, 4), torch.randn(4), "floor") self.assertTrue(torch.allclose(exported.module()(*inps), foo(*inps))) def test_sym_or_sym_and(self): if "cpp_runtime_nonstrict" in self.id(): self.skipTest("TODO Unexpected success in OSS but not in fbcode.") from torch.fx.experimental.symbolic_shapes import sym_and, sym_or class Foo(torch.nn.Module): def forward(self, xs): u0, u1, u2 = xs.tolist() torch._check(sym_or(u0 == 2, u0 == 4, u0 == 6)) torch._check(sym_and(u1 >= 4, u1 <= 8, u2 == 5)) return u0 + u1 + u2 ep = export(Foo(), (torch.tensor([2, 6, 5]),), strict=False) ep.module()(torch.tensor([2, 6, 5])) ep.module()(torch.tensor([4, 7, 5])) ep.module()(torch.tensor([6, 5, 5])) with self.assertRaisesRegex( RuntimeError, r".* expression Eq\(u0, 2\) \| Eq\(u0, 4\) \| Eq\(u0, 6\) .*" ): ep.module()(torch.tensor([3, 6, 5])) with self.assertRaisesRegex(RuntimeError, r".* expression u[\d]+ <= 5 .*"): ep.module()(torch.tensor([6, 6, 6])) def test_redundant_assert_max_upper_bound(self): class M(torch.nn.Module): def forward(self, x): b = x.nonzero() torch._check(b.shape[0] >= 3) return b m = M() inp = (torch.tensor([1, 1, 1, 0, 1]),) dim = torch.export.Dim("dim") ep = export(m, inp, dynamic_shapes=((dim,),)) FileCheck().check_count( "torch.ops.aten._assert_scalar.default", 1, exactly=True ).run(ep.graph_module.code) def test_to_module_with_mutated_buffer_multiple_update_sub_later(self): class Bar(torch.nn.Module): def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) def forward(self, x): self.buf.add_(1) return x.sum() + self.buf.sum() class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.zeros(1)) self.bar = Bar() def forward(self, x): self.buf.add_(1) bar = self.bar(x) self.bar.buf.add_(2) return bar.sum() + self.buf.sum() exported = export(Foo(), (torch.ones(5, 5),)) stateful_gm = exported.module() export_return_val = stateful_gm(torch.ones(5, 5)) eager = Foo() eager_return_val = eager(torch.ones(5, 5)) self.assertTrue(torch.allclose(eager_return_val, export_return_val)) for name, buffer in stateful_gm.named_buffers(): if name == "L__self___buf": self.assertTrue(torch.allclose(torch.ones(1), buffer)) if name == "L__self___bar_buf": self.assertTrue( torch.allclose(torch.tensor(4, dtype=torch.float), buffer) ) changed = stateful_gm.graph.eliminate_dead_code() self.assertFalse(changed) self.assertTrue( torch.allclose(stateful_gm(torch.ones(5, 5)), eager(torch.ones(5, 5))) ) for name, buffer in stateful_gm.named_buffers(): if name == "L__self___buf": self.assertTrue( torch.allclose(torch.tensor(2, dtype=torch.float), buffer) ) if name == "L__self___bar_buf": self.assertTrue( torch.allclose(torch.tensor(7, dtype=torch.float), buffer) ) def test_retracable_ep(self): class Bar(torch.nn.Module): def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) def forward(self, x): self.buf.add_(1) return x.sum() + self.buf.sum() class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.zeros(1)) self.bar = Bar() def forward(self, x): self.buf.add_(1) bar = self.bar(x) self.bar.buf.add_(2) return bar.sum() + self.buf.sum() inp = torch.ones(5, 5) exported = torch.export.export(Foo(), (inp,)) reexported = torch.export.export(exported.module(), (inp,)) self.assertTrue(torch.allclose(Foo()(inp), reexported.module()(inp))) dim0_x = torch.export.Dim("dim0_x") exported = torch.export.export(Foo(), (inp,), dynamic_shapes=({0: dim0_x},)) reexported = torch.export.export(exported.module(), (inp,)) with self.assertRaisesRegex( AssertionError, escape("Guard failed: x.size()[0] == 5"), ): # expected 5, but got 7 reexported.module()(torch.ones(7, 5)) reexported = torch.export.export( exported.module(), (inp,), dynamic_shapes=({0: dim0_x},) ) self.assertTrue( torch.allclose( Foo()(torch.ones(7, 5)), reexported.module()(torch.ones(7, 5)) ) ) # can't retrace with invalid inputs with respect to the original ExportedProgram dim0_x_v2 = torch.export.Dim("dim0_x_v2", min=3) exported_v2 = torch.export.export( Foo(), (inp,), dynamic_shapes={"x": {0: dim0_x_v2}} ) with self.assertRaisesRegex( AssertionError, escape("Guard failed: x.size()[0] >= 3"), ): # expected >= 3, but got 2 torch.export.export(exported_v2.module(), (torch.randn(2, 2),)) def test_export_cond_symbool_pred(self): class A(torch.nn.Module): def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.ones(6, 4)) def forward(self): return self.buffer.cos() class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.a = A() def forward(self, x): def true_fn(x): return x.cos() + self.a().sum() def false_fn(x): return x.sin() return cond(x.shape[0] > 4, true_fn, false_fn, [x]) dim0 = torch.export.Dim("dim0", min=3) inp = torch.ones(6, 4) ep = export(Foo(), (inp,), dynamic_shapes={"x": {0: dim0}}) schema = get_hop_schema(ep) self.assertExpectedInline( str(schema), """cond(SymBool pred, GraphModule true_fn, GraphModule false_fn, Tensor[2] operands) -> Tensor[1]""", ) # serdes deserializes tuple as list if need_serdes_test(self._testMethodName): self.assertExpectedInline( ep.graph_module.code.strip(), """\ def forward(self, b_a_buffer, x): sym_size_int_1 = torch.ops.aten.sym_size.int(x, 0) gt = sym_size_int_1 > 4; sym_size_int_1 = None true_graph_0 = self.true_graph_0 false_graph_0 = self.false_graph_0 cond = torch.ops.higher_order.cond(gt, true_graph_0, false_graph_0, [x, b_a_buffer]); gt = true_graph_0 = false_graph_0 = x = b_a_buffer = None getitem = cond[0]; cond = None return (getitem,)""", ) else: self.assertExpectedInline( ep.graph_module.code.strip(), """\ def forward(self, b_a_buffer, x): sym_size_int_1 = torch.ops.aten.sym_size.int(x, 0) gt = sym_size_int_1 > 4; sym_size_int_1 = None true_graph_0 = self.true_graph_0 false_graph_0 = self.false_graph_0 cond = torch.ops.higher_order.cond(gt, true_graph_0, false_graph_0, (x, b_a_buffer)); gt = true_graph_0 = false_graph_0 = x = b_a_buffer = None getitem = cond[0]; cond = None return (getitem,)""", ) self.assertTrue( torch.allclose(ep.module()(torch.ones(6, 4)), Foo()(torch.ones(6, 4))) ) def test_ccode_python_mod(self): import sympy from torch.utils._sympy.functions import PythonMod class Foo(torch.nn.Module): def forward(self, xs): u0, u1 = xs.tolist() return u0, u1 ep = export(Foo(), (torch.tensor([2, 3]),), strict=False) u0_node, u1_node = list(ep.graph.nodes)[-1].args[0] u0 = u0_node.meta["val"] u1 = u1_node.meta["val"] self.assertExpectedInline( sympy.ccode(PythonMod(u0, 3)), """(u0 % 3) < 0 ? u0 % 3 + 3 : u0 % 3""" ) self.assertExpectedInline( sympy.ccode(PythonMod(u0, u1)), """(u0 % u1) < 0 ? u0 % u1 + abs(u1) : u0 % u1""", ) def test_aten_lift_fresh_copy(self): class M(torch.nn.Module): def forward(self, x): return torch.ops.aten.lift_fresh_copy(x) ep = export(M(), (torch.ones(6, 4),)).run_decompositions({}) found = False op = "torch.ops.aten.clone.default" FileCheck().check_count(op, 1, exactly=True).run(ep.graph_module.code) def test_cond_buffers(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.register_parameter( "param", torch.nn.Parameter(torch.ones(2, 3), requires_grad=False) ) self.buffer = torch.nn.Buffer(torch.ones(2, 3) + 1) def true_fn(self, x): return x + self.param def false_fn(self, x): return x + self.buffer def forward(self, x): return cond(x.shape[0] == 4, self.true_fn, self.false_fn, [x]) inp = torch.ones(2, 3) ep = torch.export.export(M(), (inp,)) inp = torch.randn(2, 3) epm = ep.module() self.assertTrue(torch.allclose(epm(inp), M()(inp))) for gm in epm.named_modules(): if not isinstance(gm, torch.fx.GraphModule): continue self.assertEqual( len([node for node in gm.graph.nodes if node.op == "placeholder"]), 1 ) @requires_cuda_and_triton @testing.expectedFailureCppRuntime def test_export_associative_scan_symbol_dim(self): device = torch.device("cuda") combine_mode = "pointwise" dim1 = torch.export.Dim("dim0", min=5, max=15) xs = torch.ones(3, 10, 2, device=device) class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() def combine_fn(self, x, y): return x + y def forward(self, x): return associative_scan( self.combine_fn, x, 2, combine_mode=combine_mode ) ep = export(Foo(), (xs,), dynamic_shapes={"x": {1: dim1}}) module_out = Foo()(xs) self.assertTrue(torch.allclose(ep.module()(xs), module_out)) @requires_cuda_and_triton @testing.expectedFailureCppRuntime def test_export_associative_scan_symbol_scandim(self): device = torch.device("cuda") combine_mode = "pointwise" dim1 = torch.export.Dim("dim0", min=5, max=15) xs = torch.ones(3, 10, 2, device=device) class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() def combine_fn(self, x, y): return x + y def forward(self, x): return associative_scan( self.combine_fn, x, 1, combine_mode=combine_mode ) ep = export(Foo(), (xs,), dynamic_shapes={"x": {1: dim1}}) module_out = Foo()(xs) self.assertTrue(torch.allclose(ep.module()(xs), module_out)) @requires_cuda_and_triton @testing.expectedFailureStrictV2 def test_export_associative_scan_lifted_buffers(self): if "cpp_runtime_nonstrict" in self.id(): self.skipTest("TODO Unexpected success in OSS but not in fbcode.") device = torch.device("cuda") combine_mode = "pointwise" class A(torch.nn.Module): def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.ones(3, 2, device=device)) def forward(self): return self.buffer.cos() class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.a = A() def combine_fn(self, x, y): return (x + y) * self.a() def forward(self, x): return associative_scan( self.combine_fn, x, 1, combine_mode=combine_mode ) inp = torch.ones(3, 10, 2, device=device) ep = export(M(), (inp,)) epm = ep.module() self.assertTrue(torch.allclose(epm(inp), M()(inp))) for gm in epm.named_modules(): if not isinstance(gm, torch.fx.GraphModule): continue self.assertEqual( len([node for node in gm.graph.nodes if node.op == "placeholder"]), 1, ) # scan is not supported in sigmoid yet @testing.expectedFailureCppRuntime def test_export_scan_pytree_output(self): def add(carry, accum): return carry + carry, (accum[0]["moo"] + 1, accum[0]["moo2"] + 1) class M(torch.nn.Module): def forward(self, init, accum): return scan(add, init, accum) inp = torch.randn(3) init, xs = torch.ones(3), ({"moo": torch.ones(3), "moo2": torch.ones(3)},) ep = export(M(), (init, xs)) self.assertEqual(ep.module()(init, xs), M()(init, xs)) def test_map_buffers(self): class M1(torch.nn.Module): def __init__(self) -> None: super().__init__() self.register_parameter( "param", torch.nn.Parameter(torch.tensor(5), requires_grad=False) ) self.buffer = torch.nn.Buffer(torch.tensor(6) + 1) m1 = M1() def map_fn(x, y): z = x + y + m1.param + m1.buffer z.add_(4) return z class M(torch.nn.Module): def forward(self, xs, y): return map(map_fn, xs, y) example_inputs = (torch.ones(3, 2), torch.tensor(3)) ep = torch.export.export(M(), example_inputs) example_inputs = (torch.randn(3, 2), torch.tensor(3)) epm = ep.module() self.assertTrue(torch.allclose(epm(*example_inputs), M()(*example_inputs))) for gm in epm.named_modules(): if not isinstance(gm, torch.fx.GraphModule): continue self.assertEqual( len([node for node in gm.graph.nodes if node.op == "placeholder"]), 2 ) @testing.expectedFailureStrictV2 def test_no_check_is_size_error(self): class Module(torch.nn.Module): def forward(self, x): a = x.item() return torch.randn(24).view(a, 4) f = Module() ep = export(f, (torch.tensor(6),)) ep.module()(torch.tensor(6)) with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for .* u.* 6" ): ep.module()(torch.tensor(5)) def test_suggest_torch_checks_with_non_negative_check(self): from unittest.mock import patch import sympy from torch.export.dynamic_shapes import defaultdict from torch.fx.experimental.symbolic_shapes import _suggest_torch_checks u = sympy.Symbol("u") cond = u >= 0 mock_exception = MagicMock( spec=torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode ) mock_exception.args = ["Test error message"] mock_exception.cond = cond mock_printer = MagicMock() mock_printer.doprint.side_effect = lambda expr: ( str(cond) if expr == cond else "u < 0" # Simulating the condition ) with patch( "torch.fx.experimental.symbolic_shapes._PythonMsgPrinter", return_value=mock_printer, ): src_map = defaultdict(list) src_map["u"] = ["u"] _suggest_torch_checks(mock_exception, src_map) error_msg = mock_exception.args[0] self.assertIn("torch._check(u < 0)", error_msg) def test_suggest_torch_checks_with_regular_check(self): import sympy from torch.export.dynamic_shapes import defaultdict from torch.fx.experimental.symbolic_shapes import _suggest_torch_checks mock_exception = MagicMock( spec=torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode ) mock_exception.args = ["Test error message"] mock_cond = MagicMock() mock_cond.free_symbols = {sympy.Symbol("u")} mock_exception.cond = mock_cond mock_printer = MagicMock() mock_printer.doprint.side_effect = lambda expr: ( "u > 5" if expr == mock_cond else "u <= 5" ) with patch( "torch.fx.experimental.symbolic_shapes._PythonMsgPrinter", return_value=mock_printer, ): src_map = defaultdict(list) src_map["u"] = ["u"] _suggest_torch_checks(mock_exception, src_map) error_msg = mock_exception.args[0] self.assertIn("torch._check(u > 5)", error_msg) self.assertIn("torch._check(u <= 5)", error_msg) def test_train_eval_on_exported_preautograd_module(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() def forward(self, x): if x.shape[0] > 4: return x.cos() return x.sin() graph_module = _export(Foo(), (torch.ones(7, 5),), pre_dispatch=True).module() with self.assertRaisesRegex( NotImplementedError, r"Calling train\(\) is not supported yet." ): graph_module.train() with self.assertRaisesRegex( NotImplementedError, r"Calling eval\(\) is not supported yet." ): graph_module.eval() def test_lifted_constants(self) -> None: class Module(torch.nn.Module): def forward(self, x): return x + torch.tensor(3) f = Module() ep = export(f, (torch.tensor(1),)) self.assertEqual(len(ep.graph_signature.input_specs), 2) self.assertEqual(len(ep.constants), 1) class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.a = torch.tensor(3) def forward(self, x): list_tensor = [torch.tensor(3), torch.tensor(4)] return x + self.a + list_tensor[0] + list_tensor[1] ep = export(Foo(), (torch.tensor(1),)) self.assertEqual(len(ep.graph_signature.input_specs), 4) self.assertEqual(len(ep.state_dict), 0) self.assertEqual(len(ep.constants), 3) inp = (torch.tensor(5),) self.assertTrue(torch.allclose(ep.module()(*inp), Foo()(*inp))) transform = ep.run_decompositions() self.assertEqual(len(ep.graph_signature.input_specs), 4) self.assertTrue(torch.allclose(ep.module()(*inp), transform.module()(*inp))) class Boo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.a = torch.tensor(True) def forward(self, x): list_tensor = [torch.tensor(False), torch.tensor(True)] return x + self.a + list_tensor[0] + list_tensor[1] ep = export(Boo(), (torch.tensor(False),)) self.assertEqual(len(ep.graph_signature.input_specs), 4) self.assertEqual(len(ep.state_dict), 0) self.assertEqual(len(ep.constants), 3) inp = (torch.tensor(True),) self.assertTrue(torch.allclose(ep.module()(*inp), Boo()(*inp))) transform = ep.run_decompositions() self.assertEqual(len(ep.graph_signature.input_specs), 4) self.assertTrue(torch.allclose(ep.module()(*inp), transform.module()(*inp))) @testing.expectedFailureStrictV2 def test_tensor_attribute_zero_args(self): class Foo(torch.nn.Module): def __init__(self, value): super().__init__() self.x = torch.tensor(value) def forward(self): return self.x.clone() m = Foo([1, 2]) ep = export(m, ()) self.assertEqual(ep.graph_signature.lifted_tensor_constants, ["x"]) @testing.expectedFailureStrictV2 def test_preserve_shape_dynamism_for_unused_inputs(self): torch.export.register_dataclass( Inp3, serialized_type_name="test_preserve_shape_dynamism_for_unused_inputs.Inp3", ) class Module(torch.nn.Module): def forward(self, x: Inp3): return x.f + 1 mod = Module() example_inputs = (Inp3(f=torch.ones(10, 4), p=torch.zeros(10, 4)),) ep_static = export(mod, example_inputs) for node in ep_static.graph.nodes: if node.op == "placeholder": for s in node.meta["val"].shape: self.assertIsInstance(s, int) dim0_x_f, dim0_x_p = torch.export.dims("dim0_x_f", "dim0_x_p") dynamic_shapes = {"x": [{0: dim0_x_f}, {0: dim0_x_p}]} ep_dynamic = export(mod, example_inputs, dynamic_shapes=dynamic_shapes) for node in ep_dynamic.graph.nodes: if node.op == "placeholder": for i, s in enumerate(node.meta["val"].shape): if i == 0: self.assertIsInstance(s, torch.SymInt) else: self.assertIsInstance(s, int) def test_multiple_definitions_same_name_dim(self): class Foo(torch.nn.Module): def forward(self, x, y): return torch.matmul(x, y) A = torch.export.Dim("C", min=3) B = torch.export.Dim("C", max=12) with self.assertRaisesRegex( torch._dynamo.exc.UserError, "Found different definitions Dim\\(.*min=3\\) and Dim\\(.*max=12\\) " "for the same symbolic dimension", ): torch.export.export( Foo(), (torch.randn(10, 10), torch.randn(10, 10)), dynamic_shapes={"x": (A, B), "y": (B, A)}, ) def test_multinomial_dynamic(self): class Model(torch.nn.Module): def forward(self, x, y): return torch.multinomial(x, y.shape[0]) model = Model() DYNAMIC = torch.export.Dim.DYNAMIC def exported_module(inputs): dynamic_shapes = tuple(tuple(DYNAMIC for _ in inp.shape) for inp in inputs) ep = export(model, inputs, dynamic_shapes=dynamic_shapes) return ep.module() def check(inputs, epm): eager_result = model(*inputs) ep_result = epm(*inputs) self.assertEqual(ep_result.shape, eager_result.shape) inputs = ( torch.tensor([0, 10, 3, 0], dtype=torch.float32), torch.ones(2, dtype=torch.int64), ) epm = exported_module(inputs) # output shape is (2,), where n_sample 2 <= dist_size 4 check(inputs, epm) inputs = ( torch.tensor([0, 10, 3, 7, 6, 0], dtype=torch.float32), torch.ones(3, dtype=torch.int64), ) # output shape is (3,), with n_sample 3 <= dist_size 6 check(inputs, epm) inputs = ( torch.tensor([0, 10, 3, 0], dtype=torch.float32), torch.ones(5, dtype=torch.int64), ) with self.assertRaisesRegex(RuntimeError, "cannot sample"): # n_sample 5 > dist_size 4 epm(*inputs) inputs = ( torch.tensor([[4, 5], [6, 7], [8, 9]], dtype=torch.float32), torch.ones(2, dtype=torch.int64), ) epm = exported_module(inputs) # output shape is (3, 2), with n_row 3 and n_sample 2 <= dist_size 2 check(inputs, epm) inputs = ( torch.tensor([[4, 5], [6, 7], [8, 9]], dtype=torch.float32), torch.ones(3, dtype=torch.int64), ) epm = exported_module(inputs) with self.assertRaisesRegex(RuntimeError, "cannot sample"): # n_sample 3 > dist_size 2 epm(*inputs) def test_export_with_wrong_inputs(self): class MyModule(torch.nn.Module): def forward(self, x): return x + x exported_program = export(MyModule(), (torch.rand(2, 3),), {}) with self.assertRaisesRegex(ValueError, "Trying to flatten user inputs"): exported_program.module()(torch.rand(2, 3), torch.rand(2, 3)) def test_export_decomps_simple(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.lin = torch.nn.Linear(10, 1) def forward(self, x): return self.lin(x) inp = (torch.randn(5, 10),) m = M() ep = export(m, inp) state_dict = ep.state_dict self.assertTrue(torch.allclose(ep.module()(*inp), m(*inp))) core_aten_ep = ep.run_decompositions() FileCheck().check_count("torch.ops.aten.permute.default", 1, exactly=True).run( core_aten_ep.graph_module.code ) FileCheck().check_count("torch.ops.aten.t.default", 0, exactly=True).run( core_aten_ep.graph_module.code ) self.assertTrue(torch.allclose(core_aten_ep.module()(*inp), m(*inp))) self.assertEqual(id(state_dict), id(ep.state_dict)) @unittest.skipIf(IS_FBCODE, "We can't customize decomp in fbcode") def test_export_decomp_torture_case_1(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.lin = torch.nn.Linear(10, 1) def forward(self, x): return self.lin(x) inp = (torch.randn(5, 10),) m = M() ep = export(m, inp) def custom_decomp_callable(x, weight, bias): return x + bias decomp_table = default_decompositions() decomp_table[torch.ops.aten.linear.default] = custom_decomp_callable core_aten_ep = ep.run_decompositions(decomp_table) self.assertExpectedInline( str(core_aten_ep.graph_module.code).strip(), """\ def forward(self, p_lin_weight, p_lin_bias, x): add = torch.ops.aten.add.Tensor(x, p_lin_bias); x = p_lin_bias = None return (add,)""", ) @unittest.skipIf(IS_FBCODE, "We can't customize decomp in fbcode") @testing.expectedFailureStrictV2 def test_export_decomp_torture_case_2(self): class MyLinear(torch.nn.Module): def __init__(self) -> None: super().__init__() self.weight = torch.randn(20, 98) self.bias = torch.randn(20) def forward(self, x): return torch.nn.functional.linear(x, self.weight, self.bias) class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(16, 33, 3) self.conv1d = torch.nn.Conv1d(16, 33, 3) self.linear = MyLinear() def forward(self, x, y): x_conv = self.conv(x) y_conv_1d = self.conv1d(y) x_linear = self.linear(x_conv) return x_linear.cos() + y_conv_1d.sum() ep = export(Foo(), (torch.randn(20, 16, 50, 100), torch.randn(20, 16, 50))) ep_has_linear_convd = ep.run_decompositions(decomp_table={}) def _decompose_linear_custom(x, weight, bias): return torch.matmul(x, weight.T) + 2 * bias ep_decompose_linear = ep_has_linear_convd.run_decompositions( decomp_table={torch.ops.aten.linear.default: _decompose_linear_custom} ) self.assertExpectedInline( str(ep_decompose_linear.graph_module.code).strip(), """\ def forward(self, p_conv_weight, p_conv_bias, p_conv1d_weight, p_conv1d_bias, c_linear_weight, c_linear_bias, x, y): conv2d = torch.ops.aten.conv2d.default(x, p_conv_weight, p_conv_bias); x = p_conv_weight = p_conv_bias = None conv1d = torch.ops.aten.conv1d.default(y, p_conv1d_weight, p_conv1d_bias); y = p_conv1d_weight = p_conv1d_bias = None permute = torch.ops.aten.permute.default(c_linear_weight, [1, 0]); c_linear_weight = None matmul = torch.ops.aten.matmul.default(conv2d, permute); conv2d = permute = None mul = torch.ops.aten.mul.Tensor(c_linear_bias, 2); c_linear_bias = None add = torch.ops.aten.add.Tensor(matmul, mul); matmul = mul = None cos = torch.ops.aten.cos.default(add); add = None sum_1 = torch.ops.aten.sum.default(conv1d); conv1d = None add_1 = torch.ops.aten.add.Tensor(cos, sum_1); cos = sum_1 = None return (add_1,)""", ) def test_export_decomps_dynamic(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.lin = torch.nn.Linear(10, 1) def forward(self, x): return self.lin(x) inp = (torch.randn(5, 10),) m = M() ep = export(m, inp, dynamic_shapes={"x": {0: Dim("batch")}}) core_aten_ep = ep.run_decompositions() input_node = [ node for node in core_aten_ep.graph.nodes if node.op == "placeholder" ][-1] self.assertTrue(isinstance(input_node.meta["val"].shape[0], torch.SymInt)) FileCheck().check_count("torch.ops.aten.permute.default", 1, exactly=True).run( core_aten_ep.graph_module.code ) FileCheck().check_count("torch.ops.aten.t.default", 0, exactly=True).run( core_aten_ep.graph_module.code ) self.assertTrue(torch.allclose(core_aten_ep.module()(*inp), m(*inp))) def test_nonzero_2(self): class Module(torch.nn.Module): def forward(self, x): return torch.nonzero(x) f = Module() ep = export(f, (torch.ones(2),)) inp = torch.randn(2) self.assertTrue(torch.allclose(ep.module()(inp), torch.nonzero(inp))) def test_redundant_asserts(self): class Foo(torch.nn.Module): def forward(self, x): y = x.item() return torch.zeros(y) f = Foo() ep = export(f, (torch.tensor([3]),)) FileCheck().check_count( "torch.ops.aten._assert_scalar.default", 1, exactly=True ).run(ep.graph_module.code) ep = ep.run_decompositions() FileCheck().check_count( "torch.ops.aten._assert_scalar.default", 1, exactly=True ).run(ep.graph_module.code) def test_non_arg_name_dynamic_shapes_api(self): class Foo(torch.nn.Module): def forward(self, a, b): return a.sum() + b.sum() foo = Foo() dim = torch.export.Dim("dim") ep = torch.export.export( foo, (torch.randn(4, 4), torch.randn(4, 4)), dynamic_shapes=(None, {0: dim}), ) test_inp = (torch.randn(4, 4), torch.randn(7, 4)) self.assertEqual(ep.module()(*test_inp), foo(*test_inp)) ep_v2 = torch.export.export( foo, (torch.randn(4, 4), torch.randn(4, 4)), dynamic_shapes=(None, None), ) with self.assertRaisesRegex( AssertionError, escape("Guard failed: b.size()[0] == 4"), ): # expected 4, but got 7 ep_v2.module()(*test_inp) @testing.expectedFailureStrictV2 def test_constant_output(self): class ModuleConstant(torch.nn.Module): def __init__(self) -> None: super().__init__() self.b = torch.randn(3, 2) def forward(self): return self.b class ModuleNestedConstant(torch.nn.Module): def __init__(self) -> None: super().__init__() self.bff = torch.randn(3, 2) def forward(self, x, y): return {"prediction": (x + y, self.bff)} mod = ModuleConstant() ep = export(mod, ()) self.assertEqual(ep.module()(), mod()) args = (torch.randn(3, 2), torch.randn(3, 2)) mod = ModuleNestedConstant() ep = export(mod, args) self.assertEqual(ep.module()(*args), mod(*args)) def test_non_arg_name_dynamic_shapes_api_with_kwarg(self): class Foo(torch.nn.Module): def forward(self, a, b, kw1, kw2): return a.sum() + b.sum() + kw1.sum() - kw2.sum() foo = Foo() dim = torch.export.Dim("dim") dim_for_kw1 = torch.export.Dim("dim_for_kw1") ep = torch.export.export( foo, (torch.randn(4, 4), torch.randn(4, 4)), {"kw2": torch.ones(4, 4), "kw1": torch.zeros(4, 4)}, # We are specifying dynamism on the first kwarg even though user passed in # different order dynamic_shapes=(None, {0: dim}, {0: dim_for_kw1}, None), ) test_inp = (torch.randn(4, 4), torch.randn(7, 4)) test_kwargs = {"kw2": torch.ones(4, 4), "kw1": torch.zeros(9, 4)} # This should work even if the kwarg order are flipped. self.assertEqual( ep.module()(*test_inp, **test_kwargs), foo(*test_inp, **test_kwargs) ) def test_non_arg_name_dynamic_shapes_api_with_container_type(self): class Foo(torch.nn.Module): def forward(self, a, b): return a[0].sum() + a[1].sum() + b.sum() inp_a = (torch.randn(4, 4), torch.randn(4, 4)) inp_b = torch.randn(4, 4) inp = (inp_a, inp_b) count = 0 def dynamify_inp(x): # Mark the second input a[1] dynamic nonlocal count if count == 1: dim = torch.export.Dim("dim", min=3) count += 1 return {0: dim} count += 1 return None dynamic_shapes = tree_map(dynamify_inp, inp) foo = Foo() ep = torch.export.export(foo, inp, dynamic_shapes=dynamic_shapes) test_inp = ((torch.randn(4, 4), torch.randn(2, 4)), torch.randn(4, 4)) with self.assertRaisesRegex( AssertionError, escape("Guard failed: a[1].size()[0] >= 3"), ): # expected >= 3, but got 2 ep.module()(*test_inp) @testing.expectedFailureStrictV2 def test_nested_module(self): class M1(torch.nn.Module): def forward(self, x): return x + x class M2(torch.nn.Module): def forward(self, x): m = M1() return m(x) * x inps = (torch.randn(3, 3),) ep = export(M2(), inps) self.assertTrue(torch.allclose(ep.module()(*inps), M2()(*inps))) add_nodes = [ node for node in ep.graph.nodes if node.op == "call_function" and node.target == torch.ops.aten.add.Tensor ] self.assertEqual(len(add_nodes), 1) add_node = add_nodes[0] self.assertEqual(len(add_node.meta["nn_module_stack"]), 1) self.assertTrue("M2" in list(add_node.meta["nn_module_stack"].values())[0][1]) self.assertExpectedInline( str(ep.graph).strip(), """\ graph(): %x : [num_users=2] = placeholder[target=x] %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %x), kwargs = {}) %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %x), kwargs = {}) return (mul,)""", ) unflattened = unflatten(ep) self.assertTrue(torch.allclose(unflattened(*inps), M2()(*inps))) @testing.expectedFailureStrictV2 def test_nested_module_with_init_buffer(self): class M1(torch.nn.Module): def __init__(self) -> None: super().__init__() self.b = torch.ones(3, 3) def forward(self, x): return x + self.b class M2(torch.nn.Module): def forward(self, x): m = M1() return m(x) * x inps = (torch.randn(3, 3),) ep = export(M2(), inps) self.assertTrue(torch.allclose(ep.module()(*inps), M2()(*inps))) self.assertEqual(len(ep.state_dict), 0) self.assertEqual(len(ep.constants), 0) self.assertExpectedInline( str(ep.graph).strip(), """\ graph(): %x : [num_users=2] = placeholder[target=x] %ones : [num_users=1] = call_function[target=torch.ops.aten.ones.default](args = ([3, 3],), kwargs = {device: cpu, pin_memory: False}) %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %ones), kwargs = {}) %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %x), kwargs = {}) return (mul,)""", ) unflattened = unflatten(ep) self.assertTrue(torch.allclose(unflattened(*inps), M2()(*inps))) def test_nested_module_with_constant_buffer(self): class M1(torch.nn.Module): def __init__(self) -> None: super().__init__() self.b = torch.tensor(5) def forward(self, x): return x + self.b class M2(torch.nn.Module): def forward(self, x): m = M1() return m(x) * x inps = (torch.randn(3, 3),) ep = torch.export.export(M2(), inps).run_decompositions({}) self.assertTrue(torch.allclose(ep.module()(*inps), M2()(*inps))) self.assertEqual(len(ep.state_dict), 0) self.assertEqual(len(ep.constants), 1) self.assertExpectedInline( str(ep.graph).strip(), """\ graph(): %c_lifted_tensor_0 : [num_users=1] = placeholder[target=c_lifted_tensor_0] %x : [num_users=2] = placeholder[target=x] %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%c_lifted_tensor_0,), kwargs = {}) %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %clone), kwargs = {}) %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %x), kwargs = {}) return (mul,)""", ) unflattened = unflatten(ep) self.assertTrue(torch.allclose(unflattened(*inps), M2()(*inps))) def test_nested_module_with_parameter(self): class M1(torch.nn.Module): def __init__(self) -> None: super().__init__() self.a = torch.nn.Parameter(torch.ones(3, 3)) self.b = torch.nn.Parameter(torch.tensor(5.0)) def forward(self, x): return x + self.a * self.b class M2(torch.nn.Module): def forward(self, x): m = M1() return m(x) * x inps = (torch.randn(3, 3),) # Strict export segfaults (Issue #128109) ep = torch.export.export(M2(), inps, strict=False).run_decompositions({}) self.assertTrue(torch.allclose(ep.module()(*inps), M2()(*inps))) self.assertEqual(len(ep.state_dict), 0) self.assertEqual(len(ep.constants), 1) self.assertExpectedInline( str(ep.graph).strip(), """\ graph(): %c_lifted_tensor_0 : [num_users=1] = placeholder[target=c_lifted_tensor_0] %x : [num_users=2] = placeholder[target=x] %ones : [num_users=1] = call_function[target=torch.ops.aten.ones.default](args = ([3, 3],), kwargs = {device: cpu, pin_memory: False}) %detach : [num_users=1] = call_function[target=torch.ops.aten.detach.default](args = (%ones,), kwargs = {}) %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%c_lifted_tensor_0,), kwargs = {}) %detach_1 : [num_users=1] = call_function[target=torch.ops.aten.detach.default](args = (%clone,), kwargs = {}) %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%detach, %detach_1), kwargs = {}) %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %mul), kwargs = {}) %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %x), kwargs = {}) return (mul_1,)""", ) unflattened = unflatten(ep) self.assertTrue(torch.allclose(unflattened(*inps), M2()(*inps))) def test_module_dict_key(self): class Module(torch.nn.Module): def __init__(self): super().__init__() self.mod = torch.nn.Linear(10, 10) def forward(self, x, d): d = {m: d[name] for name, m in self.named_children()} return x + d[self.mod] m = Module() sample_inputs = (torch.randn(10), {"mod": torch.randn(10)}) ep = export(m, sample_inputs) self.assertEqual(ep.module()(*sample_inputs), m(*sample_inputs)) @testing.expectedFailureStrictV2 def test_lazy_module_kwargs(self): class LazyModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module): def initialize_parameters(self, *args, **kwargs): pass def forward(self, x, y): return x + y m = LazyModule() ep = export(m, (), {"x": torch.randn(3, 3), "y": torch.randn(3, 3)}) inputs = {"x": torch.randn(3, 3), "y": torch.randn(3, 3)} self.assertEqual(ep.module()(**inputs), m(**inputs)) def test_retrace_pre_autograd(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.ones(4, 4)) def forward(self, x): self.buffer.add_(4) return x.sum() + self.buffer.sum() inp = torch.randn(4, 4) gm = export( Foo(), (inp,), dynamic_shapes=({0: torch.export.Dim("dim", min=3)},), ).module() with self.assertRaisesRegex( AssertionError, escape("Guard failed: x.size()[0] >= 3"), ): # expected >= 3, got 2 gm(torch.randn(2, 2)) with self.assertRaisesRegex( AssertionError, escape("Guard failed: x.size()[0] >= 3"), ): # expected >= 3, got 2 export(gm, (torch.randn(2, 2),)) ep = export( gm, (torch.randn(5, 4),), dynamic_shapes=({0: torch.export.Dim("dim", min=3)},), ) test_inp = torch.ones(8, 4) self.assertTrue(torch.allclose(ep.module()(test_inp), Foo().forward(test_inp))) def test_runtime_assert_with_size(self): class M(torch.nn.Module): def forward(self, x, y): a = x.item() return y[:a] ep = export( M(), (torch.tensor(5), torch.ones(10)), dynamic_shapes={"x": None, "y": {0: torch.export.Dim("t")}}, ) inp = (torch.tensor(6), torch.randn(13)) self.assertTrue(torch.allclose(ep.module()(*inp), M()(*inp))) @unittest.skip("Test is only supposed to work with non-strict mode") def test_issue_113041(self): class TestModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.a = torch.tensor(1.0) def forward(self, x: torch.Tensor) -> torch.Tensor: return x + self.a def forward_hook(module: torch.nn.Module, inputs, output) -> torch.Tensor: return 2 * output seq = torch.nn.Sequential(TestModule()).eval() seq.b = torch.tensor(2) handle = seq.register_forward_hook(forward_hook) class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.seq = seq def forward(self, x): return self.seq(x) + self.seq.b inp = (torch.randn(2, 8),) ep = export(M(), inp) # This errors because dynamo adds an extra input def test_export_with_fake_tensor_inputs(self): fake_mode = torch._subclasses.fake_tensor.FakeTensorMode() class Model(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(2, 2) def forward(self, x): out = self.linear(x) return out # Put the inputs on a device with fake_mode, torch.device("meta"): x = torch.rand(5, 2, 2) model = Model() exported_program = torch.export.export(model, (x,)) export_res = exported_program.module()(x) exp_res = model(x) all_meta_val = [ node.meta["val"] for node in exported_program.graph_module.graph.nodes if "val" in node.meta ] self.assertTrue(export_res.size() == exp_res.size()) self.assertTrue(all(val.device == x.device for val in all_meta_val)) self.assertTrue( all(val.fake_mode is all_meta_val[0].fake_mode for val in all_meta_val) ) decomposed_ep = exported_program.run_decompositions() export_res = decomposed_ep.module()(x) self.assertTrue(export_res.size() == exp_res.size()) @skipIfXpu def test_export_with_fake_tensor_inputs_on_cuda_devices(self): fake_mode = torch._subclasses.fake_tensor.FakeTensorMode() class Model(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(2, 2) def forward(self, x): out = self.linear(x) return out # Put the inputs on a device with fake_mode, torch.device("meta"): x = torch.rand(5, 2, 2) model = Model() # Manually set the fake_device of fake tensors. x.fake_device = torch.device("cuda:0") for n, p in model.named_parameters(): p.fake_device = torch.device("cuda:0") # Need to set all the requires_grad of tensors to False, because fake_tensor with CUDA device # doesn't quite work well with aot_autograd right now due to some logic fails # the check in call getDeviceGuardImpl in InputMetadata. x.requires_grad = False for n, p in model.named_parameters(): p.requires_grad = False def check_device_and_fake_mode(): exported_program = torch.export.export(model, (x,)) export_res = exported_program.module()(x) exp_res = model(x) all_meta_val = [ node.meta["val"] for node in exported_program.graph_module.graph.nodes if "val" in node.meta ] self.assertTrue(export_res.size() == exp_res.size()) self.assertTrue(all(val.device == x.device for val in all_meta_val)) self.assertTrue( all(val.fake_mode is all_meta_val[0].fake_mode for val in all_meta_val) ) check_device_and_fake_mode() def test_run_decomposition_supports_user_input_mutation(self): class SingleOp(torch.nn.Module): def __init__(self) -> None: super().__init__() self.op = torch.ops.aten.native_batch_norm def forward( self, input, weight, bias, running_mean, running_var, training, momentum, eps, **kwargs, ): return self.op( input, weight, bias, running_mean, running_var, training, momentum, eps, **kwargs, ) input = torch.randn(5, 5, 5) weight = torch.randn(5) bias = torch.randn(5) running_mean = torch.randn(5) running_var = torch.randn(5) training = True momentum = 0.5 eps = 0.6 model = SingleOp() output = model( input, weight, bias, running_mean, running_var, training, momentum, eps ) ep = torch.export.export( model, args=( input, weight, bias, running_mean, running_var, training, momentum, eps, ), ) ep.run_decompositions() self.assertEqual( ep.module()( input, weight, bias, running_mean, running_var, training, momentum, eps ), output, ) def test_export_graph_with_no_inputs(self): # We saw this pattern when users want to export # a graph that initlizes the states of a model. class Module(torch.nn.Module): def forward(self): return torch.randn(3, 4), torch.randn(3, 4) f = Module() ep = torch.export.export(f, ()) a, b = ep.module()() self.assertEqual(a.size(), torch.Size([3, 4])) self.assertEqual(b.size(), torch.Size([3, 4])) # Contains unbacked symint class M(torch.nn.Module): def forward(self): full = torch.full((), 11) i0 = full.item() return (torch.full((i0,), 0.0),) f = M() ep = export(f, ()) a = ep.module()()[0] self.assertEqual(a.size(), torch.Size([11])) self.assertEqual(a, torch.zeros(11)) def test_pad_sequence(self): class Module(torch.nn.Module): def forward(self, x): return torch._C._nn.pad_sequence([x]) m0 = Module() inputs = (torch.randn(3, 2),) ep = torch.export.export( m0, inputs, dynamic_shapes={"x": {0: Dim("batch_size")}} ) self.assertEqual(ep.module()(*inputs), m0(*inputs)) class ModuleBatchFirst(torch.nn.Module): def forward(self, x): return torch._C._nn.pad_sequence([x], batch_first=True) m1 = ModuleBatchFirst() inputs = (torch.randn(3, 2),) ep = torch.export.export( m1, inputs, dynamic_shapes={"x": {0: Dim("batch_size")}} ) self.assertEqual(ep.module()(*inputs), m1(*inputs)) class ModuleMulti(torch.nn.Module): def forward(self, x, y, z): return torch._C._nn.pad_sequence([x, y, z]) m2 = ModuleMulti() inputs = (torch.randn(5, 2), torch.randn(4, 2), torch.randn(3, 2)) ep = torch.export.export( m2, inputs, dynamic_shapes={ "x": {0: Dim("batch_size")}, "y": {0: Dim("y")}, "z": {0: Dim("z")}, }, ) self.assertEqual(ep.module()(*inputs), m2(*inputs)) class ModuleMultiBatchFirst(torch.nn.Module): def forward(self, x, y, z): return torch._C._nn.pad_sequence([x, y, z], batch_first=True) m3 = ModuleMulti() inputs = (torch.randn(5, 2), torch.randn(4, 2), torch.randn(3, 2)) ep = torch.export.export( m2, inputs, dynamic_shapes={ "x": {0: Dim("batch_size")}, "y": {0: Dim("y")}, "z": {0: Dim("z")}, }, ) self.assertEqual(ep.module()(*inputs), m3(*inputs)) def test_operator_aten_tensor_mode_variant(self): class Module(torch.nn.Module): def forward(self, x): return torch.ops.aten.div.Tensor_mode(x, 2, rounding_mode="floor") m = Module() args = (torch.randn(4, 3),) ep = export(m, args) self.assertEqual(ep.module()(*args), m(*args)) def test_cdist_forward_compute_mode_zero_export(self): class CDistModel(torch.nn.Module): def __init__(self): super(CDistModel, self).__init__() def forward(self, x, y, compute_mode): return torch.ops.aten._cdist_forward( x, y, p=2.0, compute_mode=compute_mode ) x = torch.ones([3, 3]) y = torch.ones([3, 3]) model = CDistModel() expected_none = model(x, y, None) ep_none = torch.export.export(model, (x, y, None)) self.assertTrue(torch.equal(ep_none.module()(x, y, None), expected_none)) expected_0 = model(x, y, 0) ep_0 = torch.export.export(model, (x, y, 0)) self.assertTrue(torch.equal(ep_0.module()(x, y, 0), expected_0)) def test_export_then_compile_tensor_ctor(self): class M(torch.nn.Module): def forward(self, scores, mask): scores = scores.masked_fill( mask, torch.tensor(torch.finfo(scores.dtype).min) ) # (bs, n_heads, q_length, k_length) return scores tensor_cpu = torch.randn(2, 4) mask_cpu = torch.BoolTensor( [[False, True, False, False], [False, False, False, False]] ) m = M().eval() # res_ref = m(tensor_cpu, mask_cpu) # print("res_ref is: {}".format(res_ref), flush=True) exported_model = _export(m, (tensor_cpu, mask_cpu), pre_dispatch=True).module() optimized_model = torch.compile(exported_model) optimized_model(tensor_cpu, mask_cpu) def test_export_input_mutation_static_shape(self): class MutationModel(torch.nn.Module): def forward(self, x, y): x.view(3, 2, -1).add_(y) return x inputs = (torch.randn(12), torch.tensor(2)) model = MutationModel() ep = export(model, inputs) inputs_export = copy.deepcopy(inputs) inputs_model = copy.deepcopy(inputs) self.assertEqual(ep.module()(*inputs_export), model(*inputs_model)) self.assertEqual(inputs[0] + torch.tensor(2), inputs_model[0]) self.assertEqual(inputs[0] + torch.tensor(2), inputs_export[0]) def test_export_input_mutation_dynamic_shape(self): class MutationModel(torch.nn.Module): def forward(self, x, y): x[0].mul_(y) return x inputs = ((torch.randn(12), torch.randn(3, 2)), 2.0) model = MutationModel() ep = torch.export.export( model, inputs, dynamic_shapes={"x": ({0: torch.export.Dim("dim")}, None), "y": None}, ) nodes = list(ep.graph.nodes) self.assertEqual(nodes[0].op, "placeholder") self.assertIsInstance(nodes[0].meta["val"], torch.Tensor) self.assertIsInstance(nodes[0].meta["val"].shape[0], torch.SymInt) inputs_export = copy.deepcopy(inputs) inputs_model = copy.deepcopy(inputs) self.assertEqual(ep.module()(*inputs_export), model(*inputs_model)) self.assertEqual(inputs[0][0] * 2.0, inputs_model[0][0]) self.assertEqual(inputs[0][0] * 2.0, inputs_export[0][0]) def test_export_input_mutation_bug(self): class M(torch.nn.Module): def forward(self, x): x[:, :2, :] = x[:, :2, :] + 1 return x inputs = (torch.ones(4, 4, 4),) ep = torch.export.export(M(), inputs) m = ep.module() # Make the name conflict with a placeholder name that we get from # aot_export for i, node in enumerate(m.graph.nodes): if node.op == "placeholder": node.name = f"arg0_{i + 1}" m.recompile() ep = torch.export.export(m, inputs) inputs = (torch.randn(4, 4, 4),) self.assertEqual( ep.module()(*copy.deepcopy(inputs)), M()(*copy.deepcopy(inputs)) ) def test__scaled_dot_product_flash_attention(self): class Module(torch.nn.Module): def forward(self, q, k, v): res = torch.nn.functional.scaled_dot_product_attention(q, k, v) return res[0] m = Module() inputs = ( torch.randn(5, 4, 3, 2), torch.randn(5, 4, 3, 2), torch.randn(5, 4, 3, 2), ) ep = export(m, inputs) self.assertEqual(ep.module()(*inputs), m(*inputs)) def test_sym_sqrt(self): import math class M(torch.nn.Module): def forward(self, x): return x / torch.sym_sqrt(x.shape[0]) ep = export(M(), (torch.ones(16, 4),), dynamic_shapes={"x": {0: Dim("dim")}}) _ExportPassBaseDeprecatedDoNotUse()(ep.graph_module) FileCheck().check_count("torch._sym_sqrt", 1, exactly=True).run( ep.graph_module.code ) def test_check_specialized_int(self): class SingleOp(torch.nn.Module): def __init__(self) -> None: super().__init__() self.op = torch.ops.aten.scatter_add def forward(self, t, dim, index, src, **kwargs): return self.op(t, dim, index, src, **kwargs) t = torch.randn(10, 5) dim = -1 index = torch.tensor( [ [2, 4, 3, 1, 0], [0, 2, 1, 4, 3], [3, 1, 4, 2, 0], [4, 0, 3, 1, 2], [3, 0, 4, 1, 2], ] ) src = torch.randn(5, 5) model = SingleOp() output = model(t, dim, index, src) ep = torch.export.export(model, args=(t, dim, index, src)) ep = ep.run_decompositions() self.assertEqual(ep.module()(t, dim, index, src), output) def test_fqn(self): class NestedChild(torch.nn.Module): def forward(self, x): return x / x class Child1(torch.nn.Module): def __init__(self) -> None: super().__init__() self.nested = NestedChild() self.register_parameter( "child1param", torch.nn.Parameter(torch.ones(2, 3)) ) def forward(self, x): x = self.nested(x) return x + self.child1param class Child2(torch.nn.Module): def __init__(self) -> None: super().__init__() self.child2buffer = torch.nn.Buffer(torch.ones(2, 3)) def forward(self, x): return x - self.child2buffer class MyModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.foo = Child1() self.bar = Child2() self.register_parameter( "rootparam", torch.nn.Parameter(torch.ones(2, 3)) ) def forward(self, x): x = x * self.rootparam x = self.foo(x) x = self.bar(x) return x orig_eager = MyModule() test_inp = torch.randn(2, 3) torch_gm = _export_to_torch_ir(orig_eager, (torch.rand(2, 3),), {}) torch_gm.state_dict().keys() for k, v in orig_eager.state_dict().items(): self.assertIn(k, torch_gm.state_dict()) self.assertEqual(v, torch_gm.state_dict()[k]) self.assertTrue(torch.allclose(torch_gm(test_inp), orig_eager(test_inp))) pre_autograd_gm = torch.export._trace._export( orig_eager, (torch.rand(2, 3),), {}, pre_dispatch=True ).module() for k, v in orig_eager.state_dict().items(): self.assertIn(k, pre_autograd_gm.state_dict()) self.assertEqual(v, pre_autograd_gm.state_dict()[k]) self.assertTrue(torch.allclose(pre_autograd_gm(test_inp), orig_eager(test_inp))) ep = export(orig_eager, (torch.rand(2, 3),), {}) for k, v in orig_eager.state_dict().items(): # We do not need to normalize the key here because exported # program's state dict is able to contain the module information. self.assertIn(k, ep.state_dict) self.assertEqual(v, ep.state_dict[k]) self.assertTrue(torch.allclose(ep.module()(test_inp), orig_eager(test_inp))) self.assertTrue(torch_gm.state_dict().keys(), orig_eager.state_dict().keys()) def test_nn_module_stack(self): class Leaf(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) def forward(self, x): return self.linear(x) class Bar(torch.nn.Module): def __init__(self) -> None: super().__init__() self.leaf = Leaf() self.buffer = torch.nn.Buffer(torch.randn(4, 4)) def forward(self, x): return self.buffer.sum() + self.leaf(x).sum() class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.bar = Bar() def forward(self, x): y = self.bar.buffer + x return (self.bar(x) + y.sum(),) inp = (torch.randn(4, 4),) mod = Foo() ep_strict = torch.export.export(mod, inp).run_decompositions() ep_non_strict = torch.export.export(mod, inp, strict=False).run_decompositions() gm_unflat_non_strict = unflatten(ep_non_strict) self.assertTrue(hasattr(gm_unflat_non_strict, "bar")) self.assertTrue(hasattr(gm_unflat_non_strict.bar, "buffer")) self.assertTrue(hasattr(gm_unflat_non_strict.bar, "leaf")) gm_unflat_strict = unflatten(ep_strict) self.assertEqual(gm_unflat_non_strict(*inp), gm_unflat_strict(*inp)) self.assertExpectedInline( str(gm_unflat_non_strict.bar.leaf.linear.graph).strip(), """\ graph(): %x : [num_users=1] = placeholder[target=x] %weight : [num_users=1] = get_attr[target=weight] %bias : [num_users=1] = get_attr[target=bias] %permute : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%weight, [1, 0]), kwargs = {}) %addmm : [num_users=1] = call_function[target=torch.ops.aten.addmm.default](args = (%bias, %x, %permute), kwargs = {}) return addmm""", ) gm_flat_non_strict = ep_non_strict.module() gm_flat_strict = ep_strict.module() self.assertEqual(gm_flat_non_strict(*inp), gm_flat_strict(*inp)) def test_nn_module_stack_shared_submodule(self): class Leaf(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) def forward(self, x): return self.linear(x) class Bar(torch.nn.Module): def __init__(self) -> None: super().__init__() self.leaf = Leaf() self.buffer = torch.nn.Buffer(torch.randn(4, 4)) def forward(self, x): return self.buffer.sum() + self.leaf(x).sum() class BarDifferent(torch.nn.Module): def __init__(self) -> None: super().__init__() self.leaf = Leaf() def forward(self, x): a = self.leaf(x).sum() b = self.leaf(x).sum() return a + b class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.bar = Bar() self.bar_different = BarDifferent() def forward(self, x): y = self.bar.buffer + x return ( self.bar(x) + self.bar_different(x + 2), y.sum(), ) inp = (torch.randn(4, 4),) mod = Foo() ep_strict = export(mod, inp) ep_non_strict = export(mod, inp, strict=False) gm_unflat_non_strict = unflatten(ep_non_strict) self.assertTrue(hasattr(gm_unflat_non_strict, "bar")) self.assertTrue(hasattr(gm_unflat_non_strict.bar, "buffer")) self.assertTrue(hasattr(gm_unflat_non_strict.bar, "leaf")) self.assertTrue(hasattr(gm_unflat_non_strict.bar_different, "leaf")) gm_unflat_strict = unflatten(ep_strict) self.assertEqual(gm_unflat_non_strict(*inp), gm_unflat_strict(*inp)) self.assertExpectedInline( str(gm_unflat_non_strict.bar.leaf.linear.graph).strip(), """\ graph(): %x : [num_users=1] = placeholder[target=x] %weight : [num_users=1] = get_attr[target=weight] %bias : [num_users=1] = get_attr[target=bias] %linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %weight, %bias), kwargs = {}) return linear""", ) self.assertExpectedInline( str(gm_unflat_non_strict.bar_different.leaf.linear.graph).strip(), """\ graph(): %add_2 : [num_users=1] = placeholder[target=add_2] %weight : [num_users=1] = get_attr[target=weight] %bias : [num_users=1] = get_attr[target=bias] %linear_1 : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%add_2, %weight, %bias), kwargs = {}) return linear_1""", ) gm_flat_non_strict = ep_non_strict.module() gm_flat_strict = ep_strict.module() self.assertEqual(gm_flat_non_strict(*inp), gm_flat_strict(*inp)) def test_unflatten_random_dag_5(self): # dag: {0: [1, 2, 3], 1: [2, 4], 2: [4], 3: [], 4: []} class N4(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x): return x + 1 class N3(torch.nn.Module): def __init__(self): super().__init__() self.n4 = N4() def forward(self, x): return x + 1 class N2(torch.nn.Module): def __init__(self): super().__init__() self.n3 = N3() def forward(self, x): x = self.n3.n4(x + 1) return x + 1 class N1(torch.nn.Module): def __init__(self): super().__init__() self.n2 = N2() def forward(self, x): x = self.n2(x + 1) x = self.n2.n3.n4(x + 1) return x + 1 class N0(torch.nn.Module): def __init__(self): super().__init__() self.n1 = N1() def forward(self, x): x = self.n1(x + 1) x = self.n1.n2(x + 1) x = self.n1.n2.n3(x + 1) return x + 1 n0 = N0() inp = (torch.ones(1),) eager = n0(*inp) ep = export(n0, inp) epm = ep.module() ufm = torch.export.unflatten(ep) self.assertTrue(torch.allclose(epm(*inp), eager)) self.assertTrue(torch.allclose(ufm(*inp), eager)) def test_unflatten_random_dag_6(self): # dag: {0: [1, 2, 4, 5], 1: [3, 5], 2: [4, 5], 3: [], 4: [5], 5: []} class N5(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x): return x + 1 class N4(torch.nn.Module): def __init__(self): super().__init__() self.n5 = N5() def forward(self, x): x = self.n5(x + 1) return x + 1 class N3(torch.nn.Module): def __init__(self): super().__init__() self.n4 = N4() def forward(self, x): return x + 1 class N2(torch.nn.Module): def __init__(self): super().__init__() self.n3 = N3() def forward(self, x): x = self.n3.n4(x + 1) x = self.n3.n4.n5(x + 1) return x + 1 class N1(torch.nn.Module): def __init__(self): super().__init__() self.n2 = N2() def forward(self, x): x = self.n2.n3(x + 1) x = self.n2.n3.n4.n5(x + 1) return x + 1 class N0(torch.nn.Module): def __init__(self): super().__init__() self.n1 = N1() def forward(self, x): x = self.n1(x + 1) x = self.n1.n2(x + 1) x = self.n1.n2.n3.n4(x + 1) x = self.n1.n2.n3.n4.n5(x + 1) return x + 1 n0 = N0() inp = (torch.ones(1),) eager = n0(*inp) ep = export(n0, inp) epm = ep.module() ufm = torch.export.unflatten(ep) self.assertTrue(torch.allclose(epm(*inp), eager)) self.assertTrue(torch.allclose(ufm(*inp), eager)) def test_unflatten_random_dag_buf_8(self): class N7(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) def forward(self, x): return x + 1 class N6(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n7 = N7() def forward(self, x): x = self.n7(x + 1) return x + 1 class N5(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n6 = N6() def forward(self, x): x = x + self.n6.n7.buf x = self.n6(x + 1) return x + 1 class N4(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n5 = N5() def forward(self, x): x = x + self.n5.buf x = self.n5(x + 1) x = self.n5.n6(x + 1) return x + 1 class N3(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n4 = N4() def forward(self, x): x = x + self.n4.buf x = x + self.n4.n5.n6.n7.buf x = self.n4(x + 1) x = self.n4.n5.n6(x + 1) return x + 1 class N2(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n3 = N3() def forward(self, x): x = x + self.n3.n4.n5.n6.n7.buf x = self.n3(x + 1) x = self.n3.n4(x + 1) return x + 1 class N1(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n2 = N2() def forward(self, x): x = x + self.n2.n3.n4.n5.buf x = x + self.n2.n3.n4.n5.n6.n7.buf x = self.n2(x + 1) x = self.n2.n3.n4(x + 1) x = self.n2.n3.n4.n5(x + 1) x = self.n2.n3.n4.n5.n6.n7(x + 1) return x + 1 class N0(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n1 = N1() def forward(self, x): x = x + self.n1.n2.buf x = x + self.n1.n2.n3.n4.buf x = self.n1(x + 1) x = self.n1.n2(x + 1) x = self.n1.n2.n3.n4(x + 1) x = self.n1.n2.n3.n4.n5.n6.n7(x + 1) return x + 1 n0 = N0() inp = (torch.ones(1),) eager = n0(*inp) ep = export(n0, inp) epm = ep.module() ufm = torch.export.unflatten(ep) assert torch.allclose(epm(*inp), eager) assert torch.allclose(ufm(*inp), eager) def test_unflatten_random_dag_mutating_buf_4(self): class N3(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) def forward(self, x): return x + 1 class N2(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n3 = N3() def forward(self, x): x = x + self.n3.buf x = self.n3(x + 1) return x + 1 class N1(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n2 = N2() def forward(self, x): x = x + self.n2.n3.buf x = self.n2(x + 1) x = self.n2.n3(x + 1) self.n2.n3.buf.add_(1) return x + 1 class N0(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n1 = N1() def forward(self, x): x = x + self.n1.buf x = x + self.n1.n2.n3.buf x = self.n1(x + 1) x = self.n1.n2(x + 1) x = self.n1.n2.n3(x + 1) self.n1.buf.add_(1) self.n1.n2.buf.add_(1) return x + 1 n0 = N0() inp = (torch.ones(1),) eager = n0(*inp) ep = export(N0(), inp) epm = ep.module() ufm = torch.export.unflatten(ep) assert torch.allclose(epm(*inp), eager) assert torch.allclose(ufm(*inp), eager) def test_unflatten_random_dag_mutating_buf_6(self): class N5(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) def forward(self, x): return x + 1 class N4(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n5 = N5() def forward(self, x): x = x + self.n5.buf self.n5.buf.add_(1) return x + 1 class N3(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n4 = N4() def forward(self, x): x = x + self.n4.buf x = self.n4(x + 1) return x + 1 class N2(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n3 = N3() def forward(self, x): x = x + self.n3.buf x = x + self.n3.n4.n5.buf x = self.n3(x + 1) x = self.n3.n4(x + 1) x = self.n3.n4.n5(x + 1) self.n3.n4.buf.add_(1) return x + 1 class N1(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n2 = N2() def forward(self, x): x = x + self.n2.n3.n4.buf x = self.n2.n3(x + 1) x = self.n2.n3.n4(x + 1) x = self.n2.n3.n4.n5(x + 1) self.n2.buf.add_(1) self.n2.n3.buf.add_(1) self.n2.n3.n4.n5.buf.add_(1) return x + 1 class N0(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n1 = N1() def forward(self, x): x = x + self.n1.n2.buf x = x + self.n1.n2.n3.buf x = x + self.n1.n2.n3.n4.buf x = self.n1(x + 1) x = self.n1.n2(x + 1) x = self.n1.n2.n3(x + 1) x = self.n1.n2.n3.n4(x + 1) x = self.n1.n2.n3.n4.n5(x + 1) self.n1.n2.buf.add_(1) self.n1.n2.n3.n4.buf.add_(1) return x + 1 n0 = N0() inp = (torch.ones(1),) eager = n0(*inp) ep = export(N0(), inp) epm = ep.module() ufm = torch.export.unflatten(ep) assert torch.allclose(epm(*inp), eager) assert torch.allclose(ufm(*inp), eager) def test_unflatten_random_dag_mutating_buf_9(self): class N8(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) def forward(self, x): return x + 1 class N7(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n8 = N8() def forward(self, x): x = self.n8(x + 1) self.n8.buf.add_(1) return x + 1 class N6(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n7 = N7() def forward(self, x): x = x + self.n7.buf x = x + self.n7.n8.buf x = self.n7.n8(x + 1) self.n7.buf.add_(1) self.n7.n8.buf.add_(1) return x + 1 class N5(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n6 = N6() def forward(self, x): x = x + self.n6.n7.buf x = self.n6.n7(x + 1) self.n6.buf.add_(1) self.n6.n7.n8.buf.add_(1) return x + 1 class N4(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n5 = N5() def forward(self, x): x = x + self.n5.buf x = x + self.n5.n6.buf x = self.n5(x + 1) x = self.n5.n6.n7(x + 1) x = self.n5.n6.n7.n8(x + 1) self.n5.n6.n7.buf.add_(1) self.n5.n6.n7.n8.buf.add_(1) return x + 1 class N3(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n4 = N4() def forward(self, x): x = x + self.n4.buf x = x + self.n4.n5.n6.n7.buf x = x + self.n4.n5.n6.n7.n8.buf x = self.n4(x + 1) x = self.n4.n5.n6(x + 1) self.n4.n5.n6.n7.buf.add_(1) self.n4.n5.n6.n7.n8.buf.add_(1) return x + 1 class N2(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n3 = N3() def forward(self, x): x = x + self.n3.n4.n5.n6.buf x = x + self.n3.n4.n5.n6.n7.buf x = self.n3(x + 1) x = self.n3.n4(x + 1) x = self.n3.n4.n5(x + 1) x = self.n3.n4.n5.n6.n7.n8(x + 1) self.n3.n4.buf.add_(1) self.n3.n4.n5.buf.add_(1) self.n3.n4.n5.n6.buf.add_(1) self.n3.n4.n5.n6.n7.n8.buf.add_(1) return x + 1 class N1(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n2 = N2() def forward(self, x): x = x + self.n2.n3.buf x = x + self.n2.n3.n4.n5.buf x = x + self.n2.n3.n4.n5.n6.buf x = x + self.n2.n3.n4.n5.n6.n7.n8.buf x = self.n2(x + 1) x = self.n2.n3.n4(x + 1) self.n2.buf.add_(1) self.n2.n3.n4.n5.n6.buf.add_(1) self.n2.n3.n4.n5.n6.n7.buf.add_(1) return x + 1 class N0(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n1 = N1() def forward(self, x): x = x + self.n1.buf x = x + self.n1.n2.n3.n4.buf x = x + self.n1.n2.n3.n4.n5.n6.n7.buf x = self.n1(x + 1) x = self.n1.n2(x + 1) x = self.n1.n2.n3(x + 1) x = self.n1.n2.n3.n4(x + 1) x = self.n1.n2.n3.n4.n5.n6.n7(x + 1) self.n1.n2.n3.buf.add_(1) self.n1.n2.n3.n4.n5.n6.buf.add_(1) self.n1.n2.n3.n4.n5.n6.n7.n8.buf.add_(1) return x + 1 n0 = N0() inp = (torch.ones(1),) eager = n0(*inp) ep = export(N0(), inp) epm = ep.module() ufm = torch.export.unflatten(ep) assert torch.allclose(epm(*inp), eager) assert torch.allclose(ufm(*inp), eager) def test_unflatten_random_dag_preserving_4(self): # {0: [1, 2, 3], 1: [2], 2: [], 3: []} class N3(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x): return x + 1 class N2(torch.nn.Module): def __init__(self): super().__init__() self.n3 = N3() def forward(self, x): return x + 1 class N1(torch.nn.Module): def __init__(self): super().__init__() self.n2 = N2() def forward(self, x): x = self.n2(x + 1) return x + 1 class N0(torch.nn.Module): def __init__(self): super().__init__() self.n1 = N1() def forward(self, x): x = self.n1(x + 1) x = self.n1.n2(x + 1) x = self.n1.n2.n3(x + 1) return x + 1 inp = (torch.ones(1),) eager = N0()(*inp) fqns = ( "n1", "n1.n2", "n1.n2.n3", ) ep = export(N0(), inp, preserve_module_call_signature=fqns) epm = ep.module() ufm = torch.export.unflatten(ep) assert torch.allclose(epm(*inp), eager) assert torch.allclose(ufm(*inp), eager) def test_unflatten_random_dag_mutating_buf_preserving_4(self): # {0: [2, 3], 1: [2], 2: [3], 3: []} # {0: [], 1: [3], 2: [3], 3: []} # {0: [2, 3], 1: [2], 2: [3], 3: []} class N3(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) def forward(self, x): return x + 1 class N2(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n3 = N3() def forward(self, x): x = x + self.n3.buf x = self.n3(x + 1) self.n3.buf.add_(1) return x + 1 class N1(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n2 = N2() def forward(self, x): x = x + self.n2.buf x = self.n2(x + 1) self.n2.n3.buf.add_(1) return x + 1 class N0(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n1 = N1() def forward(self, x): x = x + self.n1.n2.buf x = x + self.n1.n2.n3.buf x = self.n1.n2(x + 1) x = self.n1.n2.n3(x + 1) return x + 1 inp = (torch.ones(1),) eager = N0()(*inp) fqns = ( "n1", "n1.n2", "n1.n2.n3", ) ep = export(N0(), inp, preserve_module_call_signature=fqns) epm = ep.module() ufm = torch.export.unflatten(ep) assert torch.allclose(epm(*inp), eager) assert torch.allclose(ufm(*inp), eager) def test_unflatten_random_dag_mutating_buf_preserving_4_1(self): # {0: [2], 1: [3], 2: [3], 3: []} # {0: [2, 3], 1: [3], 2: [3], 3: []} # {0: [1], 1: [3], 2: [], 3: []} class N3(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) def forward(self, x): return x + 1 class N2(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n3 = N3() def forward(self, x): x = x + self.n3.buf self.n3.buf.add_(1) return x + 1 class N1(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n2 = N2() def forward(self, x): x = x + self.n2.n3.buf x = self.n2.n3(x + 1) self.n2.n3.buf.add_(1) return x + 1 class N0(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n1 = N1() def forward(self, x): x = x + self.n1.n2.buf x = self.n1(x + 1) self.n1.n2.buf.add_(1) self.n1.n2.n3.buf.add_(1) return x + 1 inp = (torch.ones(1),) eager = N0()(*inp) fqns = ( "n1", "n1.n2", "n1.n2.n3", ) ep = export(N0(), inp, preserve_module_call_signature=fqns) epm = ep.module() ufm = torch.export.unflatten(ep) assert torch.allclose(epm(*inp), eager) assert torch.allclose(ufm(*inp), eager) def test_unflatten_random_dag_mutating_buf_preserving_5(self): # {0: [1, 2, 3], 1: [3, 4], 2: [3, 4], 3: [4], 4: []} # {0: [3], 1: [4], 2: [3, 4], 3: [4], 4: []} # {0: [1, 2], 1: [2, 3], 2: [3, 4], 3: [], 4: []} class N4(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) def forward(self, x): return x + 1 class N3(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n4 = N4() def forward(self, x): x = x + self.n4.buf self.n4.buf.add_(1) return x + 1 class N2(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n3 = N3() def forward(self, x): x = x + self.n3.buf x = x + self.n3.n4.buf x = self.n3(x + 1) x = self.n3.n4(x + 1) self.n3.buf.add_(1) self.n3.n4.buf.add_(1) return x + 1 class N1(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n2 = N2() def forward(self, x): x = x + self.n2.n3.buf x = x + self.n2.n3.n4.buf x = self.n2(x + 1) x = self.n2.n3(x + 1) self.n2.n3.n4.buf.add_(1) return x + 1 class N0(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n1 = N1() def forward(self, x): x = x + self.n1.buf x = x + self.n1.n2.buf x = x + self.n1.n2.n3.buf x = self.n1(x + 1) x = self.n1.n2(x + 1) self.n1.n2.n3.buf.add_(1) return x + 1 inp = (torch.ones(1),) eager = N0()(*inp) fqns = ( "n1", "n1.n2", "n1.n2.n3", "n1.n2.n3.n4", ) ep = export(N0(), inp, preserve_module_call_signature=fqns) epm = ep.module() ufm = torch.export.unflatten(ep) assert torch.allclose(epm(*inp), eager) assert torch.allclose(ufm(*inp), eager) def test_unflatten_random_dag_mutating_buf_preserving_7(self): # {0: [3, 4, 5, 6], 1: [2, 3, 4, 5, 6], 2: [3, 4, 5], 3: [5, 6], 4: [6], 5: [6], 6: []} # {0: [2, 4, 5, 6], 1: [3, 4, 6], 2: [6], 3: [5], 4: [], 5: [], 6: []} # {0: [1, 2, 3, 4, 5, 6], 1: [2, 3, 4], 2: [4, 5, 6], 3: [4, 5, 6], 4: [5, 6], 5: [6], 6: []} class N6(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) def forward(self, x): return x + 1 class N5(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n6 = N6() def forward(self, x): x = x + self.n6.buf x = self.n6(x + 1) return x + 1 class N4(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n5 = N5() def forward(self, x): x = x + self.n5.n6.buf x = self.n5(x + 1) x = self.n5.n6(x + 1) return x + 1 class N3(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n4 = N4() def forward(self, x): x = x + self.n4.n5.buf x = x + self.n4.n5.n6.buf x = self.n4(x + 1) x = self.n4.n5(x + 1) x = self.n4.n5.n6(x + 1) self.n4.n5.buf.add_(1) return x + 1 class N2(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n3 = N3() def forward(self, x): x = x + self.n3.buf x = x + self.n3.n4.buf x = x + self.n3.n4.n5.buf x = self.n3.n4(x + 1) x = self.n3.n4.n5(x + 1) x = self.n3.n4.n5.n6(x + 1) self.n3.n4.n5.n6.buf.add_(1) return x + 1 class N1(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n2 = N2() def forward(self, x): x = x + self.n2.buf x = x + self.n2.n3.buf x = x + self.n2.n3.n4.buf x = x + self.n2.n3.n4.n5.buf x = x + self.n2.n3.n4.n5.n6.buf x = self.n2(x + 1) x = self.n2.n3(x + 1) x = self.n2.n3.n4(x + 1) self.n2.n3.buf.add_(1) self.n2.n3.n4.buf.add_(1) self.n2.n3.n4.n5.n6.buf.add_(1) return x + 1 class N0(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n1 = N1() def forward(self, x): x = x + self.n1.n2.n3.buf x = x + self.n1.n2.n3.n4.buf x = x + self.n1.n2.n3.n4.n5.buf x = x + self.n1.n2.n3.n4.n5.n6.buf x = self.n1(x + 1) x = self.n1.n2(x + 1) x = self.n1.n2.n3(x + 1) x = self.n1.n2.n3.n4(x + 1) x = self.n1.n2.n3.n4.n5(x + 1) x = self.n1.n2.n3.n4.n5.n6(x + 1) self.n1.n2.buf.add_(1) self.n1.n2.n3.n4.buf.add_(1) self.n1.n2.n3.n4.n5.buf.add_(1) self.n1.n2.n3.n4.n5.n6.buf.add_(1) return x + 1 inp = (torch.ones(1),) eager = N0()(*inp) fqns = ( "n1", "n1.n2", "n1.n2.n3", "n1.n2.n3.n4", "n1.n2.n3.n4.n5", "n1.n2.n3.n4.n5.n6", ) ep = export(N0(), inp, preserve_module_call_signature=fqns) epm = ep.module() ufm = torch.export.unflatten(ep) assert torch.allclose(epm(*inp), eager) assert torch.allclose(ufm(*inp), eager) def test_unflatten_random_dag_mutating_buf_preserving_10(self): class N9(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) def forward(self, x): return x + 1 class N8(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n9 = N9() def forward(self, x): x = x + self.n9.buf self.n9.buf.add_(1) return x + 1 class N7(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n8 = N8() def forward(self, x): x = self.n8(x + 1) x = self.n8.n9(x + 1) self.n8.buf.add_(1) return x + 1 class N6(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n7 = N7() def forward(self, x): x = x + self.n7.n8.buf x = self.n7(x + 1) x = self.n7.n8.n9(x + 1) return x + 1 class N5(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n6 = N6() def forward(self, x): x = x + self.n6.buf x = x + self.n6.n7.buf x = x + self.n6.n7.n8.buf x = self.n6(x + 1) x = self.n6.n7.n8.n9(x + 1) self.n6.n7.buf.add_(1) self.n6.n7.n8.buf.add_(1) return x + 1 class N4(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n5 = N5() def forward(self, x): x = x + self.n5.n6.buf x = x + self.n5.n6.n7.n8.buf x = x + self.n5.n6.n7.n8.n9.buf x = self.n5(x + 1) x = self.n5.n6(x + 1) x = self.n5.n6.n7.n8(x + 1) x = self.n5.n6.n7.n8.n9(x + 1) self.n5.buf.add_(1) self.n5.n6.buf.add_(1) self.n5.n6.n7.buf.add_(1) self.n5.n6.n7.n8.buf.add_(1) return x + 1 class N3(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n4 = N4() def forward(self, x): x = x + self.n4.buf x = x + self.n4.n5.n6.buf x = x + self.n4.n5.n6.n7.buf x = x + self.n4.n5.n6.n7.n8.n9.buf x = self.n4(x + 1) x = self.n4.n5(x + 1) x = self.n4.n5.n6(x + 1) x = self.n4.n5.n6.n7.n8(x + 1) x = self.n4.n5.n6.n7.n8.n9(x + 1) self.n4.n5.n6.buf.add_(1) self.n4.n5.n6.n7.n8.buf.add_(1) return x + 1 class N2(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n3 = N3() def forward(self, x): x = x + self.n3.buf x = x + self.n3.n4.buf x = x + self.n3.n4.n5.n6.n7.n8.buf x = self.n3(x + 1) x = self.n3.n4(x + 1) x = self.n3.n4.n5(x + 1) x = self.n3.n4.n5.n6(x + 1) x = self.n3.n4.n5.n6.n7.n8(x + 1) x = self.n3.n4.n5.n6.n7.n8.n9(x + 1) self.n3.buf.add_(1) self.n3.n4.buf.add_(1) self.n3.n4.n5.buf.add_(1) self.n3.n4.n5.n6.buf.add_(1) self.n3.n4.n5.n6.n7.buf.add_(1) self.n3.n4.n5.n6.n7.n8.n9.buf.add_(1) return x + 1 class N1(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n2 = N2() def forward(self, x): x = x + self.n2.buf x = x + self.n2.n3.buf x = x + self.n2.n3.n4.n5.n6.buf x = x + self.n2.n3.n4.n5.n6.n7.n8.buf x = self.n2(x + 1) x = self.n2.n3(x + 1) x = self.n2.n3.n4(x + 1) x = self.n2.n3.n4.n5(x + 1) x = self.n2.n3.n4.n5.n6(x + 1) x = self.n2.n3.n4.n5.n6.n7(x + 1) self.n2.buf.add_(1) self.n2.n3.n4.n5.n6.n7.n8.buf.add_(1) self.n2.n3.n4.n5.n6.n7.n8.n9.buf.add_(1) return x + 1 class N0(torch.nn.Module): def __init__(self): super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) self.n1 = N1() def forward(self, x): x = x + self.n1.n2.buf x = x + self.n1.n2.n3.buf x = x + self.n1.n2.n3.n4.n5.buf x = x + self.n1.n2.n3.n4.n5.n6.buf x = x + self.n1.n2.n3.n4.n5.n6.n7.buf x = self.n1(x + 1) x = self.n1.n2(x + 1) x = self.n1.n2.n3.n4(x + 1) x = self.n1.n2.n3.n4.n5.n6.n7(x + 1) x = self.n1.n2.n3.n4.n5.n6.n7.n8(x + 1) x = self.n1.n2.n3.n4.n5.n6.n7.n8.n9(x + 1) self.n1.n2.n3.buf.add_(1) self.n1.n2.n3.n4.n5.n6.n7.n8.buf.add_(1) self.n1.n2.n3.n4.n5.n6.n7.n8.n9.buf.add_(1) return x + 1 inp = (torch.ones(1),) eager = N0()(*inp) fqns = ( "n1", "n1.n2", "n1.n2.n3", "n1.n2.n3.n4", "n1.n2.n3.n4.n5", "n1.n2.n3.n4.n5.n6", "n1.n2.n3.n4.n5.n6.n7", "n1.n2.n3.n4.n5.n6.n7.n8", "n1.n2.n3.n4.n5.n6.n7.n8.n9", ) ep = export( N0(), inp, strict=False, # strict export is slow with large random dags preserve_module_call_signature=fqns, ) epm = ep.module() ufm = torch.export.unflatten(ep) assert torch.allclose(epm(*inp), eager) assert torch.allclose(ufm(*inp), eager) def test_unflatten_random_dag_const_preserving_3(self): class N2(torch.nn.Module): def __init__(self): super().__init__() self.const = torch.ones(1) def forward(self, x): return x + 1 class N1(torch.nn.Module): def __init__(self): super().__init__() self.const = torch.ones(1) self.n2 = N2() def forward(self, x): x = x + self.n2.const x = self.n2(x + 1) return x + 1 class N0(torch.nn.Module): def __init__(self): super().__init__() self.const = torch.ones(1) self.n1 = N1() def forward(self, x): x = x + self.n1.n2.const x = self.n1(x + 1) x = self.n1.n2(x + 1) return x + 1 inp = (torch.ones(1),) eager = N0()(*inp) fqns = ( "n1", "n1.n2", ) ep = export(N0(), inp, preserve_module_call_signature=fqns) epm = ep.module() ufm = torch.export.unflatten(ep) assert torch.allclose(epm(*inp), eager) assert torch.allclose(ufm(*inp), eager) def test_none_buffers(self): mod = torch.nn.InstanceNorm1d(1) args = (torch.randn(1, 2),) ep = torch.export.export(mod, args, strict=False) self.assertTrue(torch.allclose(ep.module()(*args), mod(*args))) def test_partial_patched_forward(self): class Foo(torch.nn.Module): def forward(self, x): return x + 2 if sys.version_info >= (3, 14): # functools.partial is now a method descriptor: # https://docs.python.org/3/whatsnew/3.14.html#changes-in-the-python-api def fancy_forward(self, x, y): return x + 2 + y else: def fancy_forward(x, y): return x + 2 + y Foo.forward = functools.partial(fancy_forward, y=torch.randn(4, 4)) x = torch.randn(4, 4) # strict unsupported: "Tracing through optional input" ep = export(Foo(), (x,), strict=False) ep.module()(x) class Bar(torch.nn.Module): def forward(self, x, y, z): return x + y + z mod = Bar() mod.forward = functools.partial(mod.forward, z=2) mod.forward = functools.partial(mod.forward, y=torch.randn(4)) ep = export(mod, (x,), strict=False) ep.module()(x) @testing.expectedFailureCppRuntime @testing.expectedFailureStrictV2 def test_symint_input_basic(self): class M(torch.nn.Module): def forward(self, x, y): return x * y ep = export(M(), (4, 5)) self.assertEqual(ep.module()(4, 5), 20) with self.assertRaisesRegex( AssertionError, escape("Guard failed: x == 4"), ): # expected 4, but got 3 self.assertEqual(ep.module()(3, 6), 18) ep = export(M(), (4, 5), dynamic_shapes={"x": Dim.DYNAMIC, "y": Dim.AUTO}) self.assertEqual(ep.module()(4, 5), 20) self.assertEqual(ep.module()(3, 6), 18) ep = export(M(), (4, 5), dynamic_shapes={"x": Dim.DYNAMIC, "y": Dim.AUTO}) self.assertEqual(ep.module()(4, 5), 20) self.assertEqual(ep.module()(3, 6), 18) ep = export(M(), (5, 5), dynamic_shapes={"x": None, "y": Dim.AUTO}) self.assertEqual(ep.module()(5, 6), 30) with self.assertRaisesRegex( AssertionError, escape("Guard failed: x == 5"), ): # expected 5, but got 3 self.assertEqual(ep.module()(3, 5), 18) class M(torch.nn.Module): def forward(self, x, y): return x["moo"] * y ep = export( M(), ({"moo": 2}, torch.ones(3, 3)), dynamic_shapes={"x": {"moo": Dim.DYNAMIC}, "y": {0: Dim.DYNAMIC}}, ) inp = ({"moo": 3}, torch.ones(4, 3)) self.assertTrue(torch.allclose(ep.module()(*inp), M()(*inp))) @testing.expectedFailureCppRuntime def test_symint_input_specialization(self): class M(torch.nn.Module): def forward(self, x, y): assert x == 3 assert y.shape[0] == 4 return x * y inp = (3, torch.randn(4, 4)) with self.assertRaisesRegex( torch._dynamo.exc.UserError, r"You marked.*but your code specialized it to be a constant.*" r"If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO", ): ep = export( M(), inp, dynamic_shapes=(Dim.DYNAMIC, None), ) ep = export( M(), inp, dynamic_shapes=(Dim.AUTO, None), ) with self.assertRaisesRegex( AssertionError, escape("Guard failed: x == 3"), ): # expected 3, but got 4 ep.module()(4, torch.randn(4, 4)) @testing.expectedFailureCppRuntime def test_symint_input_ranges(self): class M(torch.nn.Module): def forward(self, x, y): return x * y inp = (3, torch.randn(4, 4)) ep = export( M(), inp, dynamic_shapes=(Dim.DYNAMIC(min=3, max=10), None), ) ep.module()(4, torch.randn(4, 4)) with self.assertRaisesRegex( AssertionError, escape("Guard failed: x <= 10"), ): # expected <= 10, but got 16 ep.module()(16, torch.randn(4, 4)) with self.assertRaisesRegex( AssertionError, escape("Guard failed: x >= 3"), ): # expected >= 3, but got 2 ep.module()(2, torch.randn(4, 4)) # While tracing the range was found to be a subset of the original range class M(torch.nn.Module): def forward(self, x, y): assert x > 3 assert x <= 5 return x * y inp = (4, torch.randn(4, 4)) ep = export( M(), inp, dynamic_shapes=(Dim.DYNAMIC(min=3, max=10), None), ) constraints = list(ep.range_constraints.values()) constraint = constraints[0] self.assertEqual(constraint.lower, 4) self.assertEqual(constraint.upper, 5) # While tracing the range was found to be bigger than the original range class M(torch.nn.Module): def forward(self, x, y): assert x > 1 assert x < 20 return x * y inp = (4, torch.randn(4, 4)) ep = export( M(), inp, dynamic_shapes=(Dim.DYNAMIC(min=3, max=10), None), ) constraints = list(ep.range_constraints.values()) constraint = constraints[0] self.assertEqual(constraint.lower, 3) self.assertEqual(constraint.upper, 10) # While tracing the range was found to be outside of the original range class M(torch.nn.Module): def forward(self, x, y): assert x > 10 assert x < 20 return x * y inp = (14, torch.randn(4, 4)) with self.assertRaisesRegex( ValueError, r"\[3, 10\], conflicting with .* \[11, 19\]" ): ep = export( M(), inp, dynamic_shapes=(Dim.DYNAMIC(min=3, max=10), None), ) @testing.expectedFailureCppRuntime def test_symint_input_additional_inputs(self): class M(torch.nn.Module): def forward(self, x, y): return x + y additional_inputs = torch.export.AdditionalInputs() additional_inputs.add((5, 5)) additional_inputs.add((3, 5)) additional_inputs.add((5, 4)) ep = torch.export.export(M(), (6, 7), dynamic_shapes=additional_inputs) self.assertEqual(ep.module()(5, 5), 10) self.assertEqual(ep.module()(3, 5), 8) self.assertEqual(ep.module()(5, 4), 9) @testing.expectedFailureCppRuntime def test_symint_input_shapes_collection(self): class M(torch.nn.Module): def forward(self, x, y): return x + y import torch.utils._pytree as pytree from torch.export.dynamic_shapes import _IntWrapper args = (_IntWrapper(5), _IntWrapper(5)) shapes_collection = torch.export.ShapesCollection() shapes_collection[args[0]] = Dim.DYNAMIC shapes_collection[args[1]] = Dim.DYNAMIC ep = torch.export.export(M(), args, dynamic_shapes=shapes_collection) self.assertEqual(ep.module()(5, 5), 10) self.assertEqual(ep.module()(3, 5), 8) self.assertEqual(ep.module()(5, 4), 9) def test_dynamic_shapes_bounds(self): class M(torch.nn.Module): """ Example: bounds on dynamic shapes """ def forward(self, x: torch.Tensor, y: torch.Tensor, zs: list[torch.Tensor]): return x[:3] + y @ torch.cat(zs) m = M() x = torch.randn(7, 5) y = torch.randn(3, 6) zs = [torch.randn(2, 5), torch.randn(4, 5)] from torch.export import Dim, ShapesCollection dynamic_shapes = ShapesCollection() dynamic_shapes[x] = (Dim.DYNAMIC, Dim.DYNAMIC) dynamic_shapes[y] = (Dim.DYNAMIC, Dim.DYNAMIC) for z in zs: dynamic_shapes[z] = (Dim.DYNAMIC, Dim.DYNAMIC) with self.assertRaisesRegex( torch._dynamo.exc.UserError, r"Constraints violated.*\n.*" r"You marked L\['y'\].size\(\)\[0\] as dynamic but your code specialized it to be a constant \(3\).*" r"If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO.", ): export(m, (x, y, zs), dynamic_shapes=dynamic_shapes) def test_unflatten_random_dag_const_preserving_3_1(self): class N2(torch.nn.Module): def __init__(self): super().__init__() self.const = torch.ones(1) def forward(self, x): return x + 1 class N1(torch.nn.Module): def __init__(self): super().__init__() self.const = torch.ones(1) self.n2 = N2() def forward(self, x): x = x + self.n2.const x = self.n2(x + 1) return x + 1 class N0(torch.nn.Module): def __init__(self): super().__init__() self.const = torch.ones(1) self.n1 = N1() def forward(self, x): x = x + self.n1.const x = self.n1(x + 1) x = self.n1.n2(x + 1) return x + 1 inp = (torch.ones(1),) eager = N0()(*inp) fqns = ( "n1", "n1.n2", ) ep = export(N0(), inp, preserve_module_call_signature=fqns) epm = ep.module() ufm = torch.export.unflatten(ep) assert torch.allclose(epm(*inp), eager) assert torch.allclose(ufm(*inp), eager) def test_unflatten_no_unroll(self): inp = (torch.ones(1),) class N(torch.nn.Module): def __init__(self): super().__init__() self.const = torch.ones(1) * 4 self.buf = torch.nn.Buffer(torch.ones(1) * 4) def forward(self, x, b): if b: return x + self.const + 1 else: return x + 2 * (self.buf + 1) - self.const class K(torch.nn.Module): def __init__(self): super().__init__() self.n = N() def forward(self, x0): return self.n(x0, True) class P(torch.nn.Module): def __init__(self): super().__init__() self.n = N() def forward(self, x): x0 = x + 3 x1 = self.n(x0, True) x2 = self.n(x0, False) return x1 + x2 class Q(torch.nn.Module): def __init__(self): super().__init__() self.k = K() def forward(self, x): x0 = x + 3 x1 = self.k.n(x0, True) x2 = self.k.n(x0, False) return x1 + x2 class R(torch.nn.Module): def __init__(self): super().__init__() self.k = K() def forward(self, x): x0 = x + 3 x1 = self.k(x0) x2 = self.k.n(x0, False) return x1 + x2 class _N(torch.nn.Module): def forward(self, x): return x + 5 class _N_1(torch.nn.Module): def forward(self, x): return x + 6 for Mod, path_n in [(P, "n"), (Q, "k.n"), (R, "k.n")]: m = Mod() eager_result = m(*inp) def test(ep, swap): epm = ep.module() ufm = torch.export.unflatten(ep) exported_result = epm(*inp) self.assertTrue(torch.allclose(exported_result, eager_result)) unflattened_result = ufm(*inp) self.assertTrue(torch.allclose(unflattened_result, eager_result)) for fqn, mod in swap.items(): ufm.set_submodule(fqn, mod) unflattened_result = ufm(*inp) self.assertTrue(torch.allclose(unflattened_result, eager_result)) if not is_retracebility_test(self._testMethodName): # swapping will not work with retrace test( export(Mod(), inp, preserve_module_call_signature=(path_n,)), swap={path_n: N()}, ) test( export(Mod(), inp), swap={path_n: _N(), path_n + "@1": _N_1()}, ) def test_preserve_module_call_signature_unflatten_specialization(self): class N(torch.nn.Module): def forward(self, x, b): if b: return x + 1 else: return x + 2 class M(torch.nn.Module): def __init__(self): super().__init__() self.n = N() def forward(self, x): x0 = x + 3 x1 = self.n(x0, True) return x1 + 4 inp = (torch.ones(1),) m = M() eager_result = m(*inp) if not is_retracebility_test(self._testMethodName): # swapping will not work with retrace ep = export(M(), inp, preserve_module_call_signature=("n",)) epm = ep.module() ufm = torch.export.unflatten(ep) exported_result = epm(*inp) self.assertTrue(torch.allclose(exported_result, eager_result)) unflattened_result = ufm(*inp) self.assertTrue(torch.allclose(unflattened_result, eager_result)) ufm.set_submodule("n", N()) unflattened_result = ufm(*inp) self.assertTrue(torch.allclose(unflattened_result, eager_result)) def test_unflatten_multiple_graphs_dispatch(self): class N(torch.nn.Module): def forward(self, x, b): if b: return x + 1 else: return x + 2 class M(torch.nn.Module): def __init__(self): super().__init__() self.n = N() def forward(self, x): x = x + 3 x = self.n(x, True) x = x + 4 x = self.n(x, True) x = x + 5 x = self.n(x, False) x = x + 6 return x inp = (torch.ones(1),) m = M() eager_result = m(*inp) def test(ep): epm = ep.module() ufm = torch.export.unflatten(ep) exported_result = epm(*inp) self.assertTrue(torch.allclose(exported_result, eager_result)) unflattened_result = ufm(*inp) self.assertTrue(torch.allclose(unflattened_result, eager_result)) if is_training_ir_test(self._testMethodName): test( torch.export.export( M(), inp, strict=not is_non_strict_test(self._testMethodName), preserve_module_call_signature=("n",), ) ) test(export(M(), inp, preserve_module_call_signature=("n",))) def test_unflatten_multiple_graphs_preserve_signature_no_error(self): class N(torch.nn.Module): def forward(self, x, b): if b: return x + 1 else: return x + 2 class M(torch.nn.Module): def __init__(self): super().__init__() self.n = N() def forward(self, x): x = x + 3 x = self.n(x, True) x = x + 4 x = self.n(x, False) x = x + 5 return x inp = (torch.ones(1),) m = M() eager_result = m(*inp) def test(ep, swap=None): epm = ep.module() ufm = torch.export.unflatten(ep) exported_result = epm(*inp) self.assertTrue(torch.allclose(exported_result, eager_result)) unflattened_result = ufm(*inp) self.assertTrue(torch.allclose(unflattened_result, eager_result)) if swap: for fqn, mod in swap.items(): ufm.set_submodule(fqn, mod) unflattened_result = ufm(*inp) self.assertTrue(torch.allclose(unflattened_result, eager_result)) if not is_retracebility_test(self._testMethodName): # swapping will not work with retrace test( export(M(), inp, preserve_module_call_signature=("n",)), swap={"n": N()}, ) test(export(M(), inp)) def test_unflatten_multiple_graphs_state(self): class N(torch.nn.Module): def __init__(self): super().__init__() self.register_buffer("buf", torch.ones(1), persistent=False) def forward(self, x, b): if b: self.buf.add_(1) else: self.buf.add_(2) return x + self.buf class M(torch.nn.Module): def __init__(self): super().__init__() self.n = N() def forward(self, x): x = self.n(x, True) x = x + 1 x = self.n(x, False) x = x + 1 x = self.n(x, True) x = x + 1 x = self.n(x, False) return x inp = (torch.ones(1),) m = M() eager_result = m(*inp) def test(ep, swap=None): epm = ep.module() ufm = torch.export.unflatten(ep) exported_result = epm(*inp) self.assertTrue(torch.allclose(exported_result, eager_result)) unflattened_result = ufm(*inp) self.assertTrue(torch.allclose(unflattened_result, eager_result)) if swap: for fqn, mod in swap.items(): ufm.set_submodule(fqn, mod) unflattened_result = ufm(*inp) self.assertTrue(torch.allclose(unflattened_result, eager_result)) if not is_retracebility_test(self._testMethodName): # swapping will not work with retrace test( export(M(), inp, preserve_module_call_signature=("n",)), swap={"n": N()}, ) # running decompositions again should work for all IRs ep = export(M(), inp, preserve_module_call_signature=("n",)) test(ep.run_decompositions({}), swap={"n": N()}) test(export(M(), inp)) strict = not is_non_strict_test(self._testMethodName) ept = torch.export.export( M(), inp, strict=strict, preserve_module_call_signature=("n",), ) test(ept) def test_set_grad_unflatten(self): class M1(torch.nn.Module): def forward(self, a, b): with torch.no_grad(): return a + b class M(torch.nn.Module): def __init__(self): super().__init__() self.m1 = M1() def forward(self, a, b): return self.m1(a, b) inp = (torch.ones(3, 3), torch.ones(3, 3)) ep = export(M(), inp) epm = ep.module() ufm = torch.export.unflatten(ep) self.assertTrue(torch.allclose(ufm(*inp), epm(*inp))) def test_placeholder_update_preserving(self): class Child(torch.nn.Module): def forward(self, x): a = x.add_(3) return a - 2 class Foo(torch.nn.Module): def __init__(self): super().__init__() self.child = Child() def forward(self, x): f1 = self.child(x) # x <- 1 + 3 = 4, x - 2 = 2 f2 = x * 4 # x * 4 = 16 return f1 + f2 inp = torch.ones(2, 3, dtype=torch.float32) ep1 = export(Foo(), (inp,)) inp = torch.ones(2, 3, dtype=torch.float32) ep2 = export(Foo(), (inp,), preserve_module_call_signature=("child",)) inp = torch.ones(2, 3, dtype=torch.float32) orig_result = Foo()(inp) inp = torch.ones(2, 3, dtype=torch.float32) ep1_result = ep1.module()(inp) self.assertTrue(torch.allclose(ep1_result, orig_result)) inp = torch.ones(2, 3, dtype=torch.float32) ep2_result = ep2.module()(inp) self.assertTrue(torch.allclose(ep2_result, orig_result)) def test_constant_tensor_with_non_functional(self): class TestModel(torch.nn.Module): def __init__(self): super().__init__() self.params = torch.ones((4, 4, 10)) def forward(self, x): ff = self.params + 2 ff2 = self.params + 1 buf = torch.ops.aten.sub_.Tensor(ff, ff2) return buf.sum() + x.sum() model = TestModel() x = torch.zeros((4, 4, 10)) ep_training = torch.export.export(model, (x,), strict=False) state_dict_before = ep_training.state_dict ep = export(model, (x,), strict=False).run_decompositions() state_dict_after = ep.state_dict self.assertEqual(state_dict_before.keys(), state_dict_after.keys()) self.assertExpectedInline( str(ep.graph_module.code).strip(), """\ def forward(self, c_params, x): add = torch.ops.aten.add.Tensor(c_params, 2) add_1 = torch.ops.aten.add.Tensor(c_params, 1); c_params = None sub = torch.ops.aten.sub.Tensor(add, add_1); add = add_1 = None sum_1 = torch.ops.aten.sum.dim_IntList(sub, []); sub = None sum_2 = torch.ops.aten.sum.dim_IntList(x, []); x = None add_2 = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None return (add_2,)""", ) def test_constant_tensor_with_non_functional_nested(self): class SubMod(torch.nn.Module): def __init__(self): super().__init__() self.params = torch.ones((4, 4, 10)) def forward(self, x): return x class TestModel(torch.nn.Module): def __init__(self): super().__init__() self.submod = SubMod() def forward(self, x): ff = self.submod.params + 2 ff2 = self.submod.params + 1 buf = torch.ops.aten.sub_.Tensor(ff, ff2) return buf.sum() + x.sum() model = TestModel() x = torch.zeros((4, 4, 10)) ep_training = torch.export.export(model, (x,), strict=False) state_dict_before = ep_training.state_dict ep = export(model, (x,), strict=False).run_decompositions() state_dict_after = ep.state_dict self.assertEqual(state_dict_before.keys(), state_dict_after.keys()) self.assertExpectedInline( str(ep.graph_module.code).strip(), """\ def forward(self, c_submod_params, x): add = torch.ops.aten.add.Tensor(c_submod_params, 2) add_1 = torch.ops.aten.add.Tensor(c_submod_params, 1); c_submod_params = None sub = torch.ops.aten.sub.Tensor(add, add_1); add = add_1 = None sum_1 = torch.ops.aten.sum.dim_IntList(sub, []); sub = None sum_2 = torch.ops.aten.sum.dim_IntList(x, []); x = None add_2 = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None return (add_2,)""", ) def test_cond_unflatten(self): class M1(torch.nn.Module): def forward(self, p, a, b): def true_fn(x, y): return x + y def false_fn(x, y): return x - y return torch.cond(p, true_fn, false_fn, [a, b]) class M(torch.nn.Module): def __init__(self): super().__init__() self.m1 = M1() def forward(self, p, a, b): return self.m1(p, a, b) inp = (torch.tensor(False), torch.ones(3, 3), torch.ones(3, 3)) ep = export(M(), inp) epm = ep.module() ufm = torch.export.unflatten(ep) self.assertTrue(torch.allclose(ufm(*inp), epm(*inp))) @testing.expectedFailureStrictV2 def test_unflatten_multiple_graphs_shared_submodule(self): class N(torch.nn.Module): def forward(self, x, b): if b: return x + 1 else: return x + 2 def gen_m(n, n_1, p, p_1): # Create a module instance where self.n and self.p # share the same submodule instance. # The booleans n, n_1 and p, p_1 are passed to two calls each # to self.n and self.p, and they determine which path through # the shared submodule instance is taken during export. class M(torch.nn.Module): def __init__(self): super().__init__() self.n = N() self.p = self.n def forward(self, x): x = x + 3 x = self.n(x, n) x = x + 4 x = self.n(x, n_1) x = x + 5 x = self.p(x, p) x = x + 6 x = self.p(x, p_1) return x + 7 return M() inp = (torch.ones(1),) def test(m, expected_graph, expected_fqns, expected_duplicates): eager_result = m(*inp) ep = export(m, inp) exported_result = ep.module()(*inp) # exported and eager results should match (baseline) self.assertTrue(torch.allclose(exported_result, eager_result)) unflattened = torch.export.unflatten(ep) unflattened_result = unflattened(*inp) # unflattened and eager results should match # (needs multiple specialized graphs for shared submodule instance) self.assertTrue(torch.allclose(unflattened_result, eager_result)) # expected graph should call minimal number of specialized submodules self.assertExpectedInline( str(unflattened.graph).strip(), expected_graph, ) # expected graph should contain minimal number of specialized submodule fqns self.assertEqual( sorted( [ fqn for fqn, _ in unflattened.named_modules(remove_duplicate=False) if fqn != "_guards_fn" ] ), expected_fqns, ) # expected graph should contain minimal number of specialized submodule instances for a, b in expected_duplicates: if is_non_strict_test(self._testMethodName): # NOTE: non-strict does not de-duplicate shared submodules through different fqns. # In particular, we use different module ids for self.n and self.p calls in non-strict, # but in strict we use the same module id, which enables additional reuse. # This is pre-existing behavior that might need to be fixed orthogonally. self.assertNotEqual( id(getattr(unflattened, a)), id(getattr(unflattened, b)) ) else: self.assertEqual( id(getattr(unflattened, a)), id(getattr(unflattened, b)) ) ep = export(m, inp, preserve_module_call_signature=("n", "p")) exported_result = ep.module()(*inp) self.assertTrue(torch.allclose(exported_result, eager_result)) unflattened = torch.export.unflatten(ep) unflattened_result = unflattened(*inp) self.assertTrue(torch.allclose(unflattened_result, eager_result)) test( gen_m(n=True, n_1=False, p=False, p_1=False), # p should share n_1 graph, p_1 should be optimized away """\ graph(): %x : [num_users=1] = placeholder[target=x] %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 3), kwargs = {}) %n : [num_users=1] = call_module[target=n](args = (%add,), kwargs = {}) %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%n, 4), kwargs = {}) %n_1 : [num_users=1] = call_module[target=n@1](args = (%add_2,), kwargs = {}) %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%n_1, 5), kwargs = {}) %p : [num_users=1] = call_module[target=p](args = (%add_4,), kwargs = {}) %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%p, 6), kwargs = {}) %p_1 : [num_users=1] = call_module[target=p](args = (%add_6,), kwargs = {}) %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%p_1, 7), kwargs = {}) return (add_8,)""", ["", "n", "n@1", "p"], [("n@1", "p")], ) test( gen_m(n=True, n_1=False, p=True, p_1=False), # p should reuse n graph, p_1 should reuse n_1 graph """\ graph(): %x : [num_users=1] = placeholder[target=x] %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 3), kwargs = {}) %n : [num_users=1] = call_module[target=n](args = (%add,), kwargs = {}) %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%n, 4), kwargs = {}) %n_1 : [num_users=1] = call_module[target=n@1](args = (%add_2,), kwargs = {}) %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%n_1, 5), kwargs = {}) %p : [num_users=1] = call_module[target=p](args = (%add_4,), kwargs = {}) %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%p, 6), kwargs = {}) %p_1 : [num_users=1] = call_module[target=p@1](args = (%add_6,), kwargs = {}) %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%p_1, 7), kwargs = {}) return (add_8,)""", ["", "n", "n@1", "p", "p@1"], [("n", "p"), ("n@1", "p@1")], ) test( gen_m(n=True, n_1=True, p=True, p_1=False), # n_1 should be optimized away, p should reuse n graph """\ graph(): %x : [num_users=1] = placeholder[target=x] %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 3), kwargs = {}) %n : [num_users=1] = call_module[target=n](args = (%add,), kwargs = {}) %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%n, 4), kwargs = {}) %n_1 : [num_users=1] = call_module[target=n](args = (%add_2,), kwargs = {}) %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%n_1, 5), kwargs = {}) %p : [num_users=1] = call_module[target=p](args = (%add_4,), kwargs = {}) %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%p, 6), kwargs = {}) %p_1 : [num_users=1] = call_module[target=p@1](args = (%add_6,), kwargs = {}) %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%p_1, 7), kwargs = {}) return (add_8,)""", ["", "n", "p", "p@1"], [("n", "p")], ) test( gen_m(n=True, n_1=False, p=False, p_1=True), # p should reuse n_1 graph, p_1 should reuse n graph """\ graph(): %x : [num_users=1] = placeholder[target=x] %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 3), kwargs = {}) %n : [num_users=1] = call_module[target=n](args = (%add,), kwargs = {}) %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%n, 4), kwargs = {}) %n_1 : [num_users=1] = call_module[target=n@1](args = (%add_2,), kwargs = {}) %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%n_1, 5), kwargs = {}) %p : [num_users=1] = call_module[target=p](args = (%add_4,), kwargs = {}) %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%p, 6), kwargs = {}) %p_1 : [num_users=1] = call_module[target=p@1](args = (%add_6,), kwargs = {}) %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%p_1, 7), kwargs = {}) return (add_8,)""", ["", "n", "n@1", "p", "p@1"], [("n", "p@1"), ("p", "n@1")], ) def test_stack_trace(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) def forward(self, x): x = self.linear(x) x *= 2.0 return x ep = export( Foo(), (torch.randn(4, 4),), ).run_decompositions({}) # check correct lines are in stack trace trace_mul = [node for node in ep.graph.nodes if node.name == "mul"][0].meta.get( "stack_trace", "" ) self.assertTrue( re.search(r"test_export.py.*in forward\n.*x \*= 2.0", trace_mul) ) trace_addmm = [ node for node in ep.graph.nodes if node.name in ["addmm", "linear"] ][0].meta.get("stack_trace", "") self.assertTrue( re.search( r"test_export.py.*in forward\n.*x = self.linear\(x\)", trace_addmm ) ) def test_stack_trace_make_fx(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) def forward(self, x): x = self.linear(x) x *= 2.0 return x inp = torch.randn(4, 4) gm = torch.fx.experimental.proxy_tensor.make_fx( Foo(), record_stack_traces=True )( inp, ) # check correct lines are in stack trace trace_mul = [node for node in gm.graph.nodes if node.name == "mul_"][ 0 ].meta.get("stack_trace", "") self.assertTrue( re.search(r"test_export.py.*in forward\n.*x \*= 2.0", trace_mul) ) trace_addmm = [node for node in gm.graph.nodes if node.name in ["addmm", "t"]][ 0 ].meta.get("stack_trace", "") self.assertTrue( re.search( r"test_export.py.*in forward\n.*x = self.linear\(x\)", trace_addmm ) ) # check correct lines are still in stack trace after export ep = export( gm, (torch.randn(4, 4),), ).run_decompositions({}) # check correct lines are in stack trace trace_mul = [node for node in ep.graph.nodes if node.name == "mul"][0].meta.get( "stack_trace", "" ) self.assertTrue( re.search(r"test_export.py.*in forward\n.*x \*= 2.0", trace_mul) ) trace_addmm = [ node for node in ep.graph.nodes if node.name in ["addmm", "linear"] ][0].meta.get("stack_trace", "") self.assertTrue( re.search( r"test_export.py.*in forward\n.*x = self.linear\(x\)", trace_addmm ) ) def test_filter_traceback_frames(self): class TestTracer(torch.fx.Tracer): def __init__(self) -> None: super().__init__() self.record_stack_traces = True def _filter_traceback_frames( self, user_stack_summary: traceback.StackSummary ) -> traceback.StackSummary: # Keep the last frame user_frames = [user_stack_summary[-1]] return traceback.StackSummary.from_list(user_frames) class Foo(torch.nn.Module): def forward(self, x): x *= 2.0 return x graph = TestTracer().trace(Foo()) trace_x = [node for node in graph.nodes if node.name == "x"][0].stack_trace self.assertTrue(re.search(r"proxy.py.*in create_node\n", trace_x)) @testing.expectedFailureSerDerNonStrict # register_constant needs to handle serialization @testing.expectedFailureSerDer # register_constant needs to handle serialization def test_register_constant(self): @dataclass(frozen=True) class MyInput: int_1: int int_2: int class Foo(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, f): return x + f.int_1 + f.int_2 register_constant(MyInput) ep = export(Foo(), (torch.randn(2, 2), MyInput(4, 4)), strict=False) inp = torch.ones(2, 2) self.assertEqual(ep.module()(inp, MyInput(4, 4)), Foo()(inp, MyInput(4, 4))) def test_cond_with_module_stack_export_with(self): class Bar(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) def forward(self, x): def true_fn(x): return self.linear(x).cos() def false_fn(x): return self.linear(x).sin() return torch.cond(x.sum() > 4, true_fn, false_fn, [x]) class CondExport(torch.nn.Module): def __init__(self) -> None: super().__init__() self.bar = Bar() def forward(self, x): return x.cos() + self.bar(x) inp = (torch.randn(4, 4),) ep = torch.export.export(CondExport(), inp, strict=False) self.assertExpectedInline( ep.graph_module.code.strip(), """\ def forward(self, p_bar_linear_weight, p_bar_linear_bias, x): cos = torch.ops.aten.cos.default(x) sum_1 = torch.ops.aten.sum.default(x) gt = torch.ops.aten.gt.Scalar(sum_1, 4); sum_1 = None true_graph_0 = self.true_graph_0 false_graph_0 = self.false_graph_0 cond = torch.ops.higher_order.cond(gt, true_graph_0, false_graph_0, (p_bar_linear_bias, p_bar_linear_weight, x)); gt = true_graph_0 = false_graph_0 = p_bar_linear_bias = p_bar_linear_weight = x = None getitem = cond[0]; cond = None add = torch.ops.aten.add.Tensor(cos, getitem); cos = getitem = None return (add,)""", ) schema = get_hop_schema(ep) self.assertExpectedInline( str(schema), """cond(Tensor pred, GraphModule true_fn, GraphModule false_fn, Tensor[3] operands) -> Tensor[1]""", ) cond_top_level_nn_module_stack = [ node.meta["nn_module_stack"] for node in ep.graph.nodes if node.name == "true_graph_0" ][0] self.assertTrue( "test_cond_with_module_stack_export_with.<locals>.Bar" in str(cond_top_level_nn_module_stack) ) # TODO: See https://github.com/pytorch/pytorch/issues/115790 @unittest.expectedFailure def test_cond_with_module_stack_export_with_unflatten(self): class Bar(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) def forward(self, x): def true_fn(x): return self.linear(x).cos() def false_fn(x): return self.linear(x).sin() return torch.cond(x.shape[0] > 4, true_fn, false_fn, [x]) class CondExport(torch.nn.Module): def __init__(self) -> None: super().__init__() self.bar = Bar() def forward(self, x): return x.cos() + self.bar(x) inp = (torch.randn(4, 4),) ep = torch.export.export(CondExport(), inp, strict=False) cond_top_level_nn_module_stack = [ node.meta["nn_module_stack"] for node in ep.graph.nodes if node.name == "true_graph_0" ][0] # we can't preserve nn_module_stack for the subgraphs for now. for node in ep.graph_module.true_graph_0.graph.nodes: self.assertEqual( node.meta["nn_module_stack"], cond_top_level_nn_module_stack ) # this doesn't work today gm_unflat_strict = unflatten(ep) def test_modules_access_for_deleted_submodule(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(10, 10) self.foo = torch.nn.Linear(10, 10) def forward(self, x): for name, mod in self._modules.items(): if mod is None: continue pass return self.linear(x) mod = Foo() mod.foo = None mod(torch.randn(10, 10)) export(mod, (torch.randn(10, 10),), strict=False) def test_profiling_code(self): class Foo(torch.nn.Module): def forward(self, x): with torch.profiler.record_function("foo"): return x.sin() ep = export(Foo(), (torch.randn(5, 5),), strict=True) FileCheck().check_count( "torch.ops.profiler._record_function_enter_new.default", 0, exactly=True ).run(ep.graph_module.code) def test_replace_unbacked_with_very_large_upperbound(self): strict = True # beyond 2^53 where python floats lose precision VERY_LARGE_INT = 1000000007999999992 class Model(torch.nn.Module): def forward(self, x, t): unbacked = t.item() torch._check(unbacked <= VERY_LARGE_INT) y = torch.ones(unbacked) return x.reshape([-1]) + y inp = ( torch.randn(6, 2), torch.tensor([12]), ) spec = { "x": (Dim.AUTO, Dim.STATIC), "t": (Dim.STATIC,), } ep = export(Model(), inp, dynamic_shapes=spec, strict=strict) self.assertTrue(torch.allclose(Model()(*inp), ep.module()(*inp))) def test_predispatch_cond(self): class Model(torch.nn.Module): def __init__(self) -> None: super().__init__() self.pred = torch.nn.Buffer(torch.tensor(False)) self.t = torch.nn.Buffer(torch.tensor(10)) def forward(self, x, y): def true_fn(x, y): with torch.enable_grad(): return x - 1 + self.t + y return torch.cond( self.pred, true_fn, lambda x, y: x + 1 - self.t + y, [x, y], ) model = Model() with torch.no_grad(): exported_program = torch.export.export( model, (torch.tensor(10), torch.tensor(12)), {}, dynamic_shapes=None, strict=False, ) schema = get_hop_schema(exported_program) self.assertExpectedInline( str(schema), """cond(Tensor pred, GraphModule true_fn, GraphModule false_fn, Tensor[3] operands) -> Tensor[1]""", # noqa: B950 ) self.assertExpectedInline( str(exported_program.graph_module.code.strip()), """\ def forward(self, b_pred, b_t, x, y): true_graph_0 = self.true_graph_0 false_graph_0 = self.false_graph_0 cond = torch.ops.higher_order.cond(b_pred, true_graph_0, false_graph_0, (b_t, x, y)); b_pred = true_graph_0 = false_graph_0 = b_t = x = y = None getitem = cond[0]; cond = None return (getitem,)""", ) # noqa: B950 self.assertExpectedInline( str(exported_program.graph_module.true_graph_0.code.strip()), """\ def forward(self, b_t, x, y): submod_3 = self.submod_1 add_1 = torch.ops.higher_order.wrap_with_set_grad_enabled(True, submod_3, x, b_t, y); submod_3 = x = b_t = y = None getitem = add_1[0]; add_1 = None return (getitem,)""", ) self.assertExpectedInline( str(exported_program.graph_module.true_graph_0.submod_1.code.strip()), """\ def forward(self, x, b_t, y): sub = torch.ops.aten.sub.Tensor(x, 1); x = None add = torch.ops.aten.add.Tensor(sub, b_t); sub = b_t = None add_1 = torch.ops.aten.add.Tensor(add, y); add = y = None return (add_1,)""", ) def test_python_asserts_with_sym_int(self): class Model(torch.nn.Module): def forward(self, x): y = x + 1 assert y.max().item() > 0 return y model = Model() ep = torch.export.export(model, (torch.zeros(4, dtype=torch.int),)) """ Graph should look like: class GraphModule(torch.nn.Module): def forward(self, x: "i32[4]"): add: "i32[4]" = torch.ops.aten.add.Tensor(x, 1); x = None max_1: "i32[]" = torch.ops.aten.max.default(add) item: "Sym(u0)" = torch.ops.aten.item.default(max_1); max_1 = None ge: "Sym(u0 >= 1)" = item >= 1 _assert_scalar_default = torch.ops.aten._assert_scalar.default( ge, "Runtime assertion failed for expression u0 >= 1 on node 'ge'" ); ge = _assert_scalar_default = None gt_1: "Sym(u0 > 0)" = item > 0; item = None _assert_scalar_default_1 = torch.ops.aten._assert_scalar.default( gt_1, "Runtime assertion failed for expression 0 < u0 on node 'gt_1'" ); gt_1 = _assert_scalar_default_1 = None return (add,) """ inputs = (torch.ones(4, dtype=torch.int),) self.assertEqual(ep.module()(*inputs), model(*inputs)) inputs = (-torch.ones(4, dtype=torch.int),) with self.assertRaisesRegex( RuntimeError, "Runtime assertion failed for expression" ): ep.module()(*inputs) def test_predispatch_grad_wrappers(self): class Model(torch.nn.Module): def forward(self, x, y): with torch.enable_grad(): x = x - y with torch.no_grad(): x = x + y return x # no grad model = Model() with torch.no_grad(): ep_nograd = torch.export.export( model, (torch.tensor(10), torch.tensor(12)), {}, dynamic_shapes=None, strict=False, ) # check that only sub op is wrapped with grad_enabled getattr_nodes = [ node for node in ep_nograd.graph.nodes if node.op == "get_attr" ] self.assertEqual(len(getattr_nodes), 1) grad_subgraph = getattr(ep_nograd.graph_module, getattr_nodes[0].target) op_node = [ node for node in grad_subgraph.graph.nodes if node.op == "call_function" ][0] self.assertEqual(op_node.target._name, "aten::sub.Tensor") # enable grad model = Model() ep_grad = torch.export.export( model, (torch.tensor(10), torch.tensor(12)), {}, dynamic_shapes=None, strict=False, ) # check that only add op is wrapped with grad_enabled getattr_nodes = [node for node in ep_grad.graph.nodes if node.op == "get_attr"] self.assertEqual(len(getattr_nodes), 1) grad_subgraph = getattr(ep_grad.graph_module, getattr_nodes[0].target) op_node = [ node for node in grad_subgraph.graph.nodes if node.op == "call_function" ][0] self.assertEqual(op_node.target._name, "aten::add.Tensor") @testing.expectedFailureRetraceability def test_layer_sharing(self): N, C, H, W = 1, 2, 2, 3 class Module(torch.nn.Module): def __init__(self) -> None: super().__init__() layer = torch.nn.LayerNorm([C, H, W]) self.norms = torch.nn.ModuleList( [ layer, layer, ] ) def forward(self, x): for norm in self.norms: x = norm(x) return x m = Module() copied_m = copy.deepcopy(m) ep = export(copied_m, (torch.randn(N, C, H, W),)) self.assertEqual(copied_m.state_dict(), m.state_dict()) self.assertEqual(ep.state_dict, m.state_dict()) def test_module_list_slice(self): class ModuleListTruncated(torch.nn.Module): def __init__(self): super().__init__() self.fcs = torch.nn.ModuleList( [torch.nn.Linear(1, 1) for _ in range(2)] ) def forward(self, x): for fc in self.fcs[:1]: x = fc(x) return x x = torch.rand(2, 1) mod = ModuleListTruncated() epm = export(mod, (x,)).module() self.assertTrue(torch.allclose(mod(x), epm(x))) def test_non_persistent_buffer(self): class MyModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.foo = torch.nn.Buffer(torch.rand(2, 3), persistent=False) def forward(self, x): return self.foo + x class MyOuterModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.inner = MyModule() def forward(self, x): return self.inner(x) inp = torch.rand(2, 3) def _test(m, non_persistent_buffer): ep = export(m, (inp,), {}) self.assertEqual(ep.module()(inp), m(inp)) # Non-persistent buffers should not show up in the state dict self.assertNotIn(non_persistent_buffer, ep.state_dict) named_buffers = {name: buffer for (name, buffer) in ep.named_buffers()} # But they should show up in named_buffers() self.assertIn(non_persistent_buffer, named_buffers) self.assertIn(non_persistent_buffer, ep.constants) self.assertEqual(len(ep.constants), 1) # Check the same properties of the unlifted module mod = ep.module() self.assertNotIn(non_persistent_buffer, mod.state_dict()) mod_named_buffers = {name: buffer for (name, buffer) in mod.named_buffers()} self.assertIn(non_persistent_buffer, mod_named_buffers) self.assertIn(non_persistent_buffer, ep.constants) self.assertEqual(len(ep.constants), 1) self.assertEqual(mod(inp), m(inp)) _test(MyModule(), "foo") _test(MyOuterModule(), "inner.foo") @testing.expectedFailureTrainingIRToRunDecomp # set_grad disappears after decomp @testing.expectedFailureTrainingIRToRunDecompNonStrict # set_grad disappears after decomp def test_export_with_set_grad_enabled(self): class Model(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) def forward(self, x): with torch.no_grad(): return self.linear(x) model = Model() ep = export(model, (torch.randn(4, 4),), {}) FileCheck().check_count( "torch.ops.higher_order.wrap_with_set_grad_enabled", 1, exactly=True ).run(ep.graph_module.code) def test_export_with_autocast(self): class Model(torch.nn.Module): def forward(self, x): with torch.autocast( device_type="cuda", dtype=torch.int16, enabled=True ): y = x.sin().sum() with torch.autocast( device_type="cpu", dtype=torch.float16, enabled=True ): z = y.sin().sum() return z model = Model() ep = export(model, (torch.randn(4, 4),), {}) # autocast nodes do not exist after run_decomposition() if not is_training_ir_test(self._testMethodName): self.assertIn( "torch.ops.higher_order.wrap_with_autocast", ep.graph_module.code, ) # _export_for_traininig is using pre_dispatch=False # Therefore the autocast calls are not replaced with a hop. gm = torch.export.export(model, (torch.randn(4, 4),)).module() self.assertIn( "autocast", gm.code, ) def test_export_as_backend(self): def f(x, y): return x + y def my_custom_backend(gm, example_inputs): gm = ( torch.export.export(gm, tuple(example_inputs), strict=False) .run_decompositions() .module() ) return gm inp = (torch.randn(3, 3), torch.randn(3, 3)) new_res = torch.compile(f, backend=my_custom_backend)(*inp) self.assertTrue(torch.allclose(f(*inp), new_res)) def test_nonstrict_retrace_preserves_metadata(self): class MyModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) def forward(self, x): return self.linear(x) inp = torch.randn(4, 4) m = MyModule() ep = torch.export.export(m, (inp,), {}, strict=False) # retrace ep2 = torch.export.export(ep.module(), (inp,), {}, strict=False) for n1, n2 in zip(list(ep.graph.nodes), list(ep2.graph.nodes)): self.assertEqual(n1.meta.get("stack_trace"), n2.meta.get("stack_trace")) def test_fake_weights(self): class MyModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.foo = torch.nn.Parameter(torch.randn(4, 4)) self.bar = torch.nn.Buffer(torch.randn(4, 4), persistent=False) self.baz = torch.nn.Buffer(torch.randn(4, 4), persistent=True) def forward(self, x): return self.foo + x + self.bar + self.baz fake_mode = torch._subclasses.FakeTensorMode( shape_env=ShapeEnv(tracked_fakes=[]) ) with fake_mode: m = MyModule() inp = torch.randn(4, 4) ep = export(m, (inp,)) # Can't compare outputs because the module has fake weights. def test_fake_inputs(self): class MyModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.foo = torch.nn.Parameter(torch.randn(4, 4)) def forward(self, x): return self.foo + x fake_mode = torch._subclasses.FakeTensorMode( shape_env=ShapeEnv(tracked_fakes=[]) ) m = MyModule() with fake_mode: inp = torch.randn(4, 4) ep = export(m, (inp,)) self.assertEqual(ep.module()(torch.ones(4, 4)), m(torch.ones(4, 4))) def test_double_lifted_constants(self): class EmptyM(torch.nn.Module): def __init__(self): super().__init__() def forward(self): return (torch.tensor([1, 2, 3]), torch.tensor([4, 5, 6])) m = EmptyM() ep = torch.export.export(m, ()) for out, real_out in zip(ep.module()(), m()): self.assertTrue(torch.allclose(out, real_out)) def test_trace_under_fake(self): class MyModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.foo = torch.nn.Parameter(torch.randn(4, 4)) def forward(self, x): return self.foo + x fake_mode = torch._subclasses.FakeTensorMode( shape_env=ShapeEnv(tracked_fakes=[]) ) with fake_mode: m = MyModule() inp = torch.randn(4, 4) # Can't use unqualified export() as it will attempt to deserialize # under a new FakeTensorMode. ep = torch.export.export(m, (inp,)) def test_constant_no_user_inp(self): class Bar(torch.nn.Module): def __init__(self): super().__init__() self.a = torch.ones(4, 4) def forward(self, x): return x.sin() a = torch.ones(4, 4) class Foo(torch.nn.Module): def __init__(self): super().__init__() self.bar = Bar() self.register_buffer("buf", torch.ones(4, 4)) def forward(self): return self.bar(self.bar.a) + a + self.bar.a + self.buf export(Foo(), (), strict=False) def test_compiling_state(self): class TestModule1(torch.nn.Module): def forward(self, x): if torch._dynamo.is_compiling(): return x * 2 else: return x * 3 class TestModule2(torch.nn.Module): def forward(self, x): if torch._utils.is_compiling(): return x * 2 else: return x * 3 class TestModule3(torch.nn.Module): def forward(self, x): if torch.compiler.is_compiling(): return x * 2 else: return x * 3 for m in [TestModule1(), TestModule2(), TestModule3()]: input = torch.randn(5) ep_strict = export(m, (input,), strict=True) ep_non_strict = export(m, (input,), strict=False) self.assertTrue(torch.allclose(input * 3, m(input))) self.assertTrue(torch.allclose(input * 2, ep_strict.module()(input))) self.assertTrue(torch.allclose(input * 2, ep_non_strict.module()(input))) def test_user_input_and_buffer_mutation(self): class MyModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.foo = torch.nn.Buffer(torch.randn(4, 4)) def forward(self, x): self.foo.add_(1) x.add_(1) return self.foo + x mod = MyModule() mod_copy = copy.deepcopy(mod) ep = export(mod_copy, (torch.rand(4, 4),)) self.assertEqual(mod.foo, ep.module().foo) self.assertEqual(mod(torch.ones(4, 4)), ep.module()(torch.ones(4, 4))) def test_unbacked_scalar_constructor(self): class Foo(torch.nn.Module): def forward(self, u, zuf, b): return ( torch.tensor([u.item()]), torch.tensor([zuf.item()]), torch.tensor([b.item()]), ) mod = Foo() inps = (torch.tensor([3]), torch.tensor([3.14]), torch.tensor([True])) ep = torch.export.export(mod, inps) for eager_out, ep_out in zip(mod(*inps), ep.module()(*inps)): self.assertTrue(torch.allclose(eager_out, ep_out)) # test with other inputs inps = (torch.tensor([5]), torch.tensor([-1.2]), torch.tensor([False])) for eager_out, ep_out in zip(mod(*inps), ep.module()(*inps)): self.assertTrue(torch.allclose(eager_out, ep_out)) def test_symint_tensor_return(self): class Module(torch.nn.Module): def forward(self, x): a, b = torch.ops.testlib.returns_tensor_symint(x) return a, b self._test_export_same_as_eager(Module(), (torch.randn(4, 4),)) def test_custom_op_auto_functionalize(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() def forward(self, x, z): return torch.ops.testlib.foo(x, z) inps = (torch.ones(5), torch.ones(5)) inps_for_export = (torch.ones(5), torch.ones(5)) inps_for_export_with_decomp = (torch.ones(5), torch.ones(5)) ep = torch.export.export(M(), inps_for_export) x_new_eager, z_new_eager, legit_eager = M()(*inps) x_new_export, z_new_export, legit_export = ep.module()(*inps_for_export) self.assertTrue(torch.allclose(x_new_eager, x_new_export)) self.assertTrue(torch.allclose(z_new_eager, z_new_export)) self.assertTrue(torch.allclose(legit_eager, legit_export)) ep = ep.run_decompositions() x_new_export, z_new_export, legit_export = ep.module()( *inps_for_export_with_decomp ) self.assertTrue(torch.allclose(x_new_eager, x_new_export)) self.assertTrue(torch.allclose(z_new_eager, z_new_export)) self.assertTrue(torch.allclose(legit_eager, legit_export)) def test_custom_op_auto_functionalize_pre_dispatch(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() def forward(self, x): return torch.ops.testlib.foo_mutated(x) inps = (torch.ones(5),) ep = torch.export.export(M(), inps).run_decompositions({}) if IS_FBCODE: self.assertExpectedInline( str(ep.graph_module.code.strip()), """\ def forward(self, x): cos = torch.ops.aten.cos.default(x) auto_functionalized = torch.ops.higher_order.auto_functionalized(torch.ops.testlib.foo.default, x = x, z = cos); x = cos = None getitem_3 = auto_functionalized[3]; auto_functionalized = None cos_1 = torch.ops.aten.cos.default(getitem_3) return (getitem_3, getitem_3, cos_1)""", ) else: self.assertExpectedInline( str(ep.graph_module.code.strip()), """\ def forward(self, x): cos = torch.ops.aten.cos.default(x) auto_functionalized_v2 = torch.ops.higher_order.auto_functionalized_v2(torch.ops.testlib.foo.default, _x_base_index = 0, _z_base_index = 1, _all_bases = [x, cos]); x = cos = None getitem_3 = auto_functionalized_v2[3]; auto_functionalized_v2 = None cos_1 = torch.ops.aten.cos.default(getitem_3) return (getitem_3, getitem_3, cos_1)""", ) def test_custom_op_auto_warn_pre_dispatch(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() def forward(self, x): return torch.ops.testlib.foo_functional(x) inps = (torch.ones(5),) ep = torch.export.export(M(), inps).run_decompositions() if IS_FBCODE: self.assertExpectedInline( str(ep.graph_module.code.strip()), """\ def forward(self, x): cos = torch.ops.aten.cos.default(x) cos_1 = torch.ops.aten.cos.default(x); x = None auto_functionalized = torch.ops.higher_order.auto_functionalized(torch.ops.testlib.foo.default, x = cos, z = cos_1); cos = cos_1 = None getitem_3 = auto_functionalized[3]; auto_functionalized = None cos_2 = torch.ops.aten.cos.default(getitem_3); getitem_3 = None return (cos_2,)""", ) else: self.assertExpectedInline( str(ep.graph_module.code.strip()), """\ def forward(self, x): cos = torch.ops.aten.cos.default(x) cos_1 = torch.ops.aten.cos.default(x); x = None auto_functionalized_v2 = torch.ops.higher_order.auto_functionalized_v2(torch.ops.testlib.foo.default, _x_base_index = 0, _z_base_index = 1, _all_bases = [cos, cos_1]); cos = cos_1 = None getitem_3 = auto_functionalized_v2[3]; auto_functionalized_v2 = None cos_2 = torch.ops.aten.cos.default(getitem_3); getitem_3 = None return (cos_2,)""", ) ep = torch.export._trace._export(M(), inps, pre_dispatch=True) self.assertExpectedInline( str(ep.graph_module.code.strip()), """\ def forward(self, x): foo_functional = torch.ops.testlib.foo_functional.default(x); x = None return (foo_functional,)""", ) @testing.expectedFailureStrictV2 def test_placeholder_naming_order(self): # See https://github.com/pytorch/pytorch/issues/143732 class Mod(torch.nn.Module): def __init__(self): super().__init__() self.layer1 = torch.nn.Linear(3, 16) self.layer2 = torch.nn.Linear(3, 32) def forward(self, x1, x2, flag=True): x1o = self.layer1(x1) x2o = self.layer2(x2) return torch.cat([x1o, x2o], dim=1) mod = Mod() args = (torch.rand(1, 3),) kwargs = {"flag": False, "x2": torch.rand(1, 3)} ep = export(mod, args, kwargs) # check that graph is behaviorally correct self.assertTrue( torch.allclose(ep.module()(*args, **kwargs), mod(*args, **kwargs)) ) # check that graph input names are as expected self.assertEqual(ep.graph_signature.user_inputs, ("x1", False, "x2")) def test_kwarg_dynamic_shapes_diff_order(self): class DummyModel(torch.nn.Module): def __init__(self): super().__init__() self.a = torch.ones(4, 4) def forward(self, baba, *, start, end): return baba.sum() + start.sum() + end.sum() f = DummyModel() kwargs = { "end": torch.ones(4, 4, 4), "start": torch.ones(4, 4), } dynamic_shapes = { "baba": {0: torch.export.Dim("end_dim")}, "end": {0: torch.export.Dim("end_dim")}, "start": {0: torch.export.Dim("end_dim"), 1: torch.export.Dim("end_dim")}, } ep = torch.export.export( f, (torch.ones(4, 4),), kwargs, dynamic_shapes=dynamic_shapes ).run_decompositions() ep.module()(torch.ones(4, 4), **kwargs) @testing.expectedFailureStrictV2 def test_placeholder_naming_order_variadic(self): class Mod(torch.nn.Module): def forward(self, a, b, c, **kwargs): return a - b + c * kwargs["d"] mod = Mod() args = (torch.randn(3),) kwargs = {"c": torch.randn(3), "b": torch.randn(3), "d": torch.randn(3)} ep = export(mod, args, kwargs) self.assertTrue( torch.allclose(ep.module()(*args, **kwargs), mod(*args, **kwargs)) ) self.assertEqual(ep.graph_signature.user_inputs, ("a", "c", "b", "d")) def test_isnonzero(self): class Foo(torch.nn.Module): def forward(self, x): return torch.ops.aten.is_nonzero(x) with self.assertRaisesRegex( RuntimeError, "Boolean value of Tensor with more than" ): export(Foo(), (torch.randn(4, 4),), strict=False) @testing.expectedFailureStrictV2 def test_placeholder_naming_collisions(self): # test collisions between nested user inputs class Foo(torch.nn.Module): def forward(self, x, x_foo, x_foo_0): return x["foo"][0] + x_foo[0] + x_foo_0 inputs = ( {"foo": [torch.randn(4, 4)]}, (torch.randn(4, 4),), torch.randn(4, 4), ) ep = export(Foo(), inputs) expected_names = ["x_foo_0", "x_foo_0_1", "x_foo_0_2"] real_names = [spec.arg.name for spec in ep.graph_signature.input_specs] self.assertEqual(expected_names, real_names) # test collisions between user inputs and params, buffers, constants class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.randn(4)) self.alpha = torch.nn.Buffer(torch.randn(4), persistent=True) self.beta = torch.nn.Buffer(torch.randn(4), persistent=False) self.gamma = torch.randn(4) def forward(self, p, b_alpha, b, c_gamma): p = p["param"] + self.param b = self.alpha + self.beta + b_alpha + b["beta"] c = self.gamma + c_gamma return p, b, c inputs = ( {"param": torch.randn(4)}, torch.randn(4), {"beta": torch.randn(4)}, torch.randn(4), ) ep = export(Foo(), inputs) expected_names = [ # user inputs should be prioritized, unprefixed ("p_param_1", InputKind.PARAMETER), ("b_alpha_1", InputKind.BUFFER), ("b_beta_1", InputKind.BUFFER), ("c_gamma_1", InputKind.CONSTANT_TENSOR), ("p_param", InputKind.USER_INPUT), ("b_alpha", InputKind.USER_INPUT), ("b_beta", InputKind.USER_INPUT), ("c_gamma", InputKind.USER_INPUT), ] real_names = [ (spec.arg.name, spec.kind) for spec in ep.graph_signature.input_specs ] self.assertEqual(expected_names, real_names) # test collisions between user inputs & call_function nodes class Foo(torch.nn.Module): def forward(self, mul, add, add_1): return mul * mul + add * add_1 ep = export(Foo(), (torch.randn(4, 4), torch.randn(4, 4), torch.randn(4, 4))) expected_names_and_ops = [ ("mul", "placeholder"), ("add", "placeholder"), ("add_1", "placeholder"), ("mul_1", "call_function"), ("mul_2", "call_function"), ("add_2", "call_function"), ("output", "output"), ] real_names_and_ops = [(node.name, node.op) for node in ep.graph.nodes] self.assertEqual(expected_names_and_ops, real_names_and_ops) @skipIfCrossRef # Dynamo changes the order of ops under Torch function modes @testing.expectedFailureStrictV2 def test_placeholder_naming_collisions_hoo_subgraphs(self): # test collisions between user inputs, top-level nodes, and HOO subgraph nodes class Foo(torch.nn.Module): def forward(self, x, mul, mul_1): _mul = x * x y = cond( _mul.sum() > 0, lambda x, y, z: x * y * z, lambda x, y, z: x + y + z, [_mul, mul, mul_1], ) with torch.enable_grad(): y = y * y return y with torch.no_grad(): ep = torch.export._trace._export( Foo(), (torch.randn(4), torch.randn(4), torch.randn(4)), pre_dispatch=True, ) schema = get_hop_schema(ep) self.assertExpectedInline( str(schema), """cond(Tensor pred, GraphModule true_fn, GraphModule false_fn, Tensor[3] operands) -> Tensor[1]""", ) # test cond subgraph expected_names_and_ops = [ ("mul_2", "placeholder"), ("mul", "placeholder"), ("mul_1", "placeholder"), ("mul_3", "call_function"), ("mul_4", "call_function"), ("output", "output"), ] real_names_and_ops = [ (node.name, node.op) for node in ep.graph_module.true_graph_0.graph.nodes ] self.assertEqual(expected_names_and_ops, real_names_and_ops) # test set_grad_enabled subgraph expected_names_and_ops = [ ("getitem", "placeholder"), ("mul_1", "call_function"), ("output", "output"), ] real_names_and_ops = [ (node.name, node.op) for node in ep.graph_module.submod_1.graph.nodes ] self.assertEqual(expected_names_and_ops, real_names_and_ops) # test collisions between user inputs & higher order op subgraphs # (please never do this) class Foo(torch.nn.Module): def forward(self, input, true_graph, body_graph): x = input + true_graph[0] + true_graph[1] x = cond(x.sum() > 0, lambda x: x * 2.0, lambda x: x + 2.0, [x]) x = cond(x.sum() > 0, lambda x: x * 2.0, lambda x: x + 2.0, [x]) return x inputs = ( torch.randn(10, 4), (torch.randn(4), torch.randn(4)), (torch.randn(4),), ) ep = export(Foo(), inputs) expected_getattr_names = [ "true_graph_2", "false_graph_0", "true_graph_3", "false_graph_1", ] real_getattr_names = [ node.name for node in ep.graph.nodes if node.op == "get_attr" ] self.assertEqual(expected_getattr_names, real_getattr_names) @testing.expectedFailureStrictV2 def test_constant_input_naming(self): class Foo(torch.nn.Module): def forward(self, x, y, div="floor"): return torch.div(x, y, rounding_mode=div) f = Foo() inputs = (torch.randn(4), torch.randn(4), "floor") ep = export(f, inputs) div_spec = ep.graph_signature.input_specs[2] self.assertEqual(div_spec.arg.name, "div") self.assertEqual(div_spec.arg.value, "floor") def test_attr_assignment_extra(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x): self.bar = x.sum() return x + 2 with self.assertWarnsRegex( UserWarning, "The tensor attribute self.bar was assigned during export", ): _ = export(Foo(), (torch.randn(4, 4),), strict=False) def test_vmap_custom_autograd_function(self): from torch._dynamo._trace_wrapped_higher_order_op import TransformGetItemToIndex class IndexingModule(torch.nn.Module): def __init__(self, base_size: int = 10): super().__init__() self.register_buffer("base", torch.arange(base_size)) def forward(self, indices: torch.Tensor) -> torch.Tensor: with TransformGetItemToIndex(): # Each element of `indices` is a scalar tensor, so our override kicks in return torch.vmap(lambda i: self.base[i])(indices) m = IndexingModule(10) idxs = torch.tensor([0, 3, 7, 9]) ep = torch.export.export(m, (idxs,), strict=False) self.assertExpectedInline( ep.graph, """\ graph(): %b_base : [num_users=1] = placeholder[target=b_base] %indices : [num_users=1] = placeholder[target=indices] %lazy_load_decompositions : [num_users=0] = call_function[target=torch._functorch.predispatch.lazy_load_decompositions](args = (), kwargs = {}) %_vmap_increment_nesting : [num_users=0] = call_function[target=torch._functorch.predispatch._vmap_increment_nesting](args = (4, error), kwargs = {}) %_add_batch_dim : [num_users=1] = call_function[target=torch._functorch.predispatch._add_batch_dim](args = (%indices, 0, 1), kwargs = {}) %torch__dynamo__trace_wrapped_higher_order_op_mod_index0 : [num_users=1] = get_attr[target=torch__dynamo__trace_wrapped_higher_order_op_ModIndex0] %function_const_func_spec0 : [num_users=1] = get_attr[target=function_const_func_spec0] %flat_apply : [num_users=1] = call_function[target=torch.ops.higher_order.flat_apply](args = (%function_const_func_spec0, %torch__dynamo__trace_wrapped_higher_order_op_mod_index0, torch._dynamo._trace_wrapped_higher_order_op.ModIndex, %b_base, %_add_batch_dim), kwargs = {}) %_remove_batch_dim : [num_users=1] = call_function[target=torch._functorch.predispatch._remove_batch_dim](args = (%flat_apply, 1, 4, 0), kwargs = {}) %_vmap_decrement_nesting : [num_users=0] = call_function[target=torch._functorch.predispatch._vmap_decrement_nesting](args = (), kwargs = {}) return (_remove_batch_dim,)""", ) self.assertEqual(m(idxs), ep.module()(idxs)) ep = ep.run_decompositions({}) self.assertExpectedInline( ep.graph, """\ graph(): %b_base : [num_users=1] = placeholder[target=b_base] %indices : [num_users=1] = placeholder[target=indices] %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%b_base, [%indices]), kwargs = {}) return (index,)""", ) self.assertEqual(m(idxs), ep.module()(idxs)) def test_unbacked_deferred_runtime_retrace(self): class Foo(torch.nn.Module): def forward(self, x, y): y_sum = y.sin().sum() with torch.no_grad(): a = x.item() torch._check(a > 2) torch._check(a < 6) unbacked_shape = torch.ops.testlib.foo_unbacked(a) return y + y_sum + unbacked_shape.sum() inps = (torch.tensor(4), torch.randn(5, 5)) ep_pre = torch.export.export(Foo(), inps, strict=False) self.assertExpectedInline( str(ep_pre.graph_module.submod_1.code).strip(), """\ def forward(self, x): item = torch.ops.aten.item.default(x); x = None ge = item >= 3 _assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 3 on node 'ge'"); ge = _assert_scalar_default = None le = item <= 5 _assert_scalar_default_1 = torch.ops.aten._assert_scalar.default(le, "Runtime assertion failed for expression u0 <= 5 on node 'le'"); le = _assert_scalar_default_1 = None gt_1 = item > 2 _assert_scalar_default_2 = torch.ops.aten._assert_scalar.default(gt_1, "Runtime assertion failed for expression 2 < u0 on node 'gt_1'"); gt_1 = _assert_scalar_default_2 = None lt_1 = item < 6 _assert_scalar_default_3 = torch.ops.aten._assert_scalar.default(lt_1, "Runtime assertion failed for expression u0 < 6 on node 'lt_1'"); lt_1 = _assert_scalar_default_3 = None foo_unbacked = torch.ops.testlib.foo_unbacked.default(item); item = None return (foo_unbacked,)""", ) ep_aot = ep_pre.run_decompositions() self.assertExpectedInline( str(ep_aot.graph_module.code).strip(), """\ def forward(self, x, y): sin = torch.ops.aten.sin.default(y) sum_1 = torch.ops.aten.sum.dim_IntList(sin, []); sin = None _local_scalar_dense = torch.ops.aten._local_scalar_dense.default(x); x = None ge_1 = _local_scalar_dense >= 3 _assert_scalar_default = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u2 >= 3 on node 'ge_1'"); ge_1 = _assert_scalar_default = None le_1 = _local_scalar_dense <= 5 _assert_scalar_default_1 = torch.ops.aten._assert_scalar.default(le_1, "Runtime assertion failed for expression u2 <= 5 on node 'le_1'"); le_1 = _assert_scalar_default_1 = None gt = _local_scalar_dense > 2 _assert_scalar_2 = torch.ops.aten._assert_scalar.default(gt, "Runtime assertion failed for expression 2 < u0 on node 'gt_1'"); gt = _assert_scalar_2 = None lt = _local_scalar_dense < 6; _local_scalar_dense = None _assert_scalar_3 = torch.ops.aten._assert_scalar.default(lt, "Runtime assertion failed for expression u0 < 6 on node 'lt_1'"); lt = _assert_scalar_3 = None full = torch.ops.aten.full.default([4, 4], 1, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False) add = torch.ops.aten.add.Tensor(y, sum_1); y = sum_1 = None sum_2 = torch.ops.aten.sum.dim_IntList(full, []); full = None add_1 = torch.ops.aten.add.Tensor(add, sum_2); add = sum_2 = None return (add_1,)""", ) def test_nested_dynamic_shapes_spec(self): class Foo(torch.nn.Module): def forward(self, x): (a0, a1), (b0, b1), (c0, c1, c2) = x return a0 + a1 + b0 + b1 + c0 + c1 + c2 f = Foo() inputs = ( (1, 2), ( torch.randn(4, 4), torch.randn(4, 4), ), ( torch.randn(4, 4), torch.randn(4, 4), torch.randn(4, 4), ), ) # make sure this gets parsed correctly as 7 individual inputs, not 3 tensors dynamic_shapes = { "x": ( (None, None), (None, None), (None, None, None), ) } export(f, (inputs,), dynamic_shapes=dynamic_shapes) def test_disable_forced_specializations_ok(self): # check that we don't force specialization, and defer to runtime asserts # with prefer_deferred_runtime_asserts_over_guards=True to successfully export # case 1: modulo guards from torch.export import dims class Mod4Reshape(torch.nn.Module): def forward(self, x): return x.reshape(x.shape[0] - 1, 4, -1) # Mod(s0*s1, 4*(s0-1)) = 0 inputs = (torch.randn(10, 72),) dx, dy = dims("dx", "dy") for use_new_tracer in [True, False]: with torch._export.config.patch(use_new_tracer_experimental=use_new_tracer): ep = torch.export._trace._export( Mod4Reshape(), inputs, dynamic_shapes={"x": (dx, dy)}, prefer_deferred_runtime_asserts_over_guards=True, pre_dispatch=True, ) out1 = ep.module()(torch.randn(8, 7)) self.assertEqual(out1.shape, torch.ones(7, 4, 2).shape) out2 = ep.module()(torch.randn(12, 11)) self.assertEqual(out2.shape, torch.ones(11, 4, 3).shape) with self.assertRaisesRegex( RuntimeError, r"^Runtime assertion failed for expression Eq\(Mod\(s\d+\*s\d+, 4\*s\d+\s*-\s*4\), 0\) on node 'eq[^']*'$", ): ep.module()(torch.randn(8, 8)) # fail # case 2: 2d reshape class FreeReshape(torch.nn.Module): def forward(self, x, y, z): return x.reshape([-1]) + y.reshape([-1]) + z # s0*s1 = s2*s3 = s4 inputs = ( torch.randn(6, 8), torch.randn(3, 16), torch.randn(48), ) dynamic_shapes = { "x": [Dim(f"dx{i}", min=2) for i in range(2)], "y": [Dim(f"dy{i}", min=2) for i in range(2)], "z": [Dim(f"dz{i}", min=4) for i in range(1)], } for private_api in (True, False): if private_api: ep = torch.export.export( FreeReshape(), inputs, dynamic_shapes=dynamic_shapes, prefer_deferred_runtime_asserts_over_guards=True, ) else: ep = export(FreeReshape(), inputs, dynamic_shapes=dynamic_shapes) out1 = ep.module()(torch.randn(48, 1), torch.randn(4, 12), torch.randn(48)) self.assertEqual(out1.shape, torch.ones(48).shape) out2 = ep.module()(torch.randn(5, 8), torch.randn(4, 10), torch.randn(40)) self.assertEqual(out2.shape, torch.ones(40).shape) if private_api: with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for expression Eq\((.*)\) on node '.*'", ): # fail only at runtime ep.module()( torch.randn(5, 8), torch.randn(4, 5), torch.randn(30) ) # fail else: # no runtime assert in exported module but it fails anyway with wrong inputs with self.assertRaisesRegex( AssertionError, escape( "Guard failed: x.size()[1] * x.size()[0] == y.size()[0] * y.size()[1]" ), ): # expected 40, but got 20 ep.module()(torch.randn(5, 8), torch.randn(4, 5), torch.randn(30)) # case 3: 3d reshape (previously failing with different issue) class Reshape3d(torch.nn.Module): def forward(self, x, y): return x.reshape([-1]) + y # s0*s1*s2 = s3 inputs = ( torch.randn(4, 3, 2), torch.randn(24), ) dynamic_shapes = { "x": (Dim("dx0", min=2), Dim("dx1", min=2), Dim("dx2", min=2)), "y": (Dim("dy", min=8),), } ep = torch.export.export( Reshape3d(), inputs, dynamic_shapes=dynamic_shapes, prefer_deferred_runtime_asserts_over_guards=True, ) out1 = ep.module()(torch.randn(9, 7, 2), torch.randn(126)) self.assertEqual(out1.shape, torch.ones(126).shape) with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for expression Eq\((.*)\) on node '.*'", ): # fail only at runtime ep.module()(torch.randn(4, 3, 2), torch.randn(10)) # fail def test_disable_forced_specializations_errors(self): # check error messages with hybrid symints class Foo(torch.nn.Module): def forward(self, w, x, y, z): return w.reshape([-1]) + x, y + z # simple: s0*s1 = s2, s3 = s4 inputs = ( torch.randn(3, 4), torch.randn(12), torch.randn(4), torch.randn(4), ) dynamic_shapes = { "w": [Dim(f"dw{i}") for i in range(2)], "x": [Dim(f"dx{i}") for i in range(1)], "y": [Dim("dy")], # y & z incorrect, export is supposed to fail. "z": [Dim("dz")], # suggested fix should be to match these up. } with ( self.assertRaisesRegex( # if disable=True, suggested fixes should not specialize. torch._dynamo.exc.UserError, r".*Constraints violated(.*\n)*" r"Suggested fixes:(.*\n)*" r".*dz = dy(.*\n)*", ) as msg ): export( Foo(), inputs, dynamic_shapes=dynamic_shapes, strict=False, ) def test_preserve_requires_grad_placeholders(self): class Module(torch.nn.Module): def __init__(self) -> None: super().__init__() self.p = torch.nn.Parameter(torch.randn(3, 3)) def forward(self, x, y): return self.p + x + y m = Module() ep = export(m, (torch.randn(3, 3), torch.randn(3, 3, requires_grad=True))) placeholders = [ node for node in ep.graph_module.graph.nodes if node.op == "placeholder" ] self.assertTrue(placeholders[0].meta["val"].requires_grad) self.assertFalse(placeholders[1].meta["val"].requires_grad) self.assertTrue(placeholders[2].meta["val"].requires_grad) def test_expand_copy_export_handles_implicit_true(self): class ExpandModel(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, implicit): return torch.expand_copy(x, [3, 3], implicit=implicit) model = ExpandModel() x = torch.ones([3]) model(x, False) model(x, True) export(model, (x, False)) export(model, (x, True)) def test_unbacked_expand(self): if "cpp_runtime_nonstrict" in self.id(): self.skipTest("TODO Unexpected success in OSS but not in fbcode.") class Foo(torch.nn.Module): def forward(self, xs): u0, u1, u2 = xs.tolist() x = torch.empty(u0, u1, 1) return x.expand(-1, u1, u2) ep = export(Foo(), (torch.tensor([1, 2, 3]),)) self.assertEqual( list(ep.module()(torch.tensor([3, 4, 5])).shape), [3, 4, 5], ) self.assertEqual( list(ep.module()(torch.tensor([0, 1, 0])).shape), [0, 1, 0], ) class Bar(torch.nn.Module): def forward(self, xs): u0, u1 = xs.tolist() x = torch.empty(u0) return x.expand(u1) ep = export(Bar(), (torch.tensor([2, 2]),)) self.assertEqual( ep.module()(torch.tensor([5, 5])).shape[0], 5, ) self.assertEqual( ep.module()(torch.tensor([1, 1])).shape[0], 1, ) with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for expression Eq\(u0, u1\) .*", ): ep.module()(torch.tensor([1, 5])) def test_reshape_view_helper(self): # see: https://github.com/pytorch/pytorch/issues/126607 class Model(torch.nn.Module): def __init__(self) -> None: super().__init__() def forward(self, x): x = x.view(x.size(1), -1) # torch/_refs/__init__/_reshape_view_helper() will generate guards on reshape kernel(?) # Ne(s0, 20), so that reshape isn't no-op # Ne(Mod(s0, 20), 0), so that reshape needs to first flatten [s0, 20, 16] -> [s0*20, 16] # then split_dim -> [20, s0, 16] # check that these show up in graph return torch.nn.functional.softmax( x, dim=0 ) # don't think softmax actually creates any issues, just part of original test model = Model() x = torch.rand(1024, 20, 16) dynamic_shapes = {"x": {0: Dim("batch")}} ep = torch.export.export( model, (x,), dynamic_shapes=dynamic_shapes, prefer_deferred_runtime_asserts_over_guards=True, ) with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for expression Ne\(s77, 20\)", ): ep.module()(torch.randn(20, 20, 16)) with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for expression Ne\(Mod\(s77, 20\), 0\)", ): ep.module()(torch.randn(400, 20, 16)) ep.module()(torch.randn(42, 20, 16)) def test_full_on_scalar_tensor(self): class Foo(torch.nn.Module): def forward(self, val): return torch.full((80, 2), val, dtype=torch.float32) export(Foo(), args=(torch.tensor(1),)) def test_custom_pytree(self): class Foo: def __init__(self, attr1, attr2): if attr1 is None: raise ValueError("Shouldn't be None") self.attr1 = attr1 self.attr2 = attr2 class FooModel(torch.nn.Module): def __init__(self): super().__init__() self.foo_attr = Foo(torch.ones(4, 4), torch.ones(4, 4)) def forward(self, foo): return foo.attr1.sum() + foo.attr2.sum() + self.foo_attr.attr1.sum() def flat(foo): return torch.utils._pytree._list_flatten([foo.attr1, foo.attr2]) def flat_with_keys(foo): return torch.utils._pytree._list_flatten_with_keys([foo.attr1, foo.attr2]) def unflat(val, context): l = torch.utils._pytree._list_unflatten(val, context) return Foo(l[0], l[1]) torch.utils._pytree.register_pytree_node( Foo, flat, unflat, flatten_with_keys_fn=flat_with_keys, serialized_type_name=f"{Foo.__module__}.{Foo.__name__}", ) torch.export.export( FooModel(), (Foo(torch.ones(4, 4), torch.ones(4, 4)),), strict=False ) def test_allow_explicit_guards_as_runtime_asserts(self): # check that explicit guards are treated as runtime assertions class Foo(torch.nn.Module): def forward(self, x, y): # check that negation of first guard also shows up as runtime assertion if x.shape[0] == y.shape[0]: # False return x + y elif x.shape[0] == y.shape[0] ** 3: # False return x + 2, y + 3 elif x.shape[0] ** 2 == y.shape[0] * 3: # True return x * 2.0, y * 3.0 inputs = (torch.randn(6), torch.randn(12)) dynamic_shapes = {"x": [Dim("dx", min=4)], "y": [Dim("dy", min=4)]} ep = torch.export.export( Foo(), inputs, dynamic_shapes=dynamic_shapes, prefer_deferred_runtime_asserts_over_guards=True, ) # check forward pass out0, out1 = ep.module()(torch.randn(9), torch.randn(27)) self.assertEqual(out0.shape, torch.ones(9).shape) self.assertEqual(out1.shape, torch.ones(27).shape) with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for expression Ne\(s77, s17\)", ): # fail only at runtime ep.module()(torch.randn(4), torch.randn(4)) # fail with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for expression Ne\(s77, s17\**3\)", ): ep.module()(torch.randn(64), torch.randn(4)) # fail with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for expression Eq\(s77\**2, 3\*s17\)", ): ep.module()(torch.randn(10), torch.randn(9)) # fail # this should be set with command line flag TORCH_DYNAMO_DO_NOT_EMIT_RUNTIME_ASSERTS=1, # but dynamo checks that at torch import time, so setting os.environ makes no difference # instead, manually patch dynamo config and test. # test that setting this flag removes runtime asserts from torch._dynamo import config as _dynamo_config with _dynamo_config.patch( do_not_emit_runtime_asserts=True, ): ep = torch.export._trace._export( Foo(), inputs, dynamic_shapes=dynamic_shapes, prefer_deferred_runtime_asserts_over_guards=True, ).run_decompositions() self.assertEqual( [ node.target == torch.ops.aten._assert_scalar.default for node in ep.graph.nodes ].count(True), 0, ) def test_unbacked_kth_value(self): class Foo(torch.nn.Module): def forward(self, x, y): n = y.item() k = min(n, 128) return x.kthvalue(k, dim=0, keepdim=True).values inps = (torch.arange(64), torch.tensor([32])) ep = export(Foo(), inps) self.assertEqual(ep.module()(*inps).item(), 31) def test_constant_output_dup(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.constant = torch.ones(4, 4) def forward(self, x): return x + self.constant, self.constant ep = export(M(), (torch.ones(4, 4),)).run_decompositions() mod = ep.module() a, b = mod(torch.zeros(4, 4)) self.assertTrue(torch.allclose(a, torch.ones(4, 4))) self.assertTrue(torch.allclose(b, torch.ones(4, 4))) def test_constant_tensor_mutation(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.foo = torch.randn(2, 2) def forward(self, x): self.foo.add_(5) return self.foo + x with self.assertRaisesRegex(RuntimeError, "Constant foo is"): _ = ( export(M(), (torch.ones(2, 2),), strict=False) .run_decompositions() .graph ) def test_constant_return(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.foo = torch.randn(2, 2) def forward(self, x): return self.foo, self.foo + x graph = ( export(M(), (torch.ones(2, 2),), strict=False).run_decompositions().graph ) self.assertExpectedInline( str(graph).strip(), """\ graph(): %c_foo : [num_users=2] = placeholder[target=c_foo] %x : [num_users=1] = placeholder[target=x] %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%c_foo, %x), kwargs = {}) return (c_foo, add)""", ) def test_constant_requires_grad_const(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.foo = torch.randn(2, 2, requires_grad=True) def forward(self, x): return x.cos() + self.foo.sum() gm = export(M(), (torch.ones(2, 2),)).module() self.assertFalse(gm.foo.requires_grad) def test_constant_aliasing(self): class M1(torch.nn.Module): def __init__(self, m2, foo): super().__init__() self.m2 = m2 self.foo = foo def forward(self, x): return x + self.foo + self.m2(x) class M2(torch.nn.Module): def __init__(self) -> None: super().__init__() self.foo = torch.ones(3, 3, requires_grad=True) def forward(self, x): return x + self.foo m2 = M2() m1 = M1(m2, m2.foo) inps = (torch.ones(3, 3),) ep = export(m1, inps, strict=False) # check both constants appear in list self.assertEqual(sorted(list(ep.constants)), ["foo", "m2.foo"]) # check only one input spec exists num_constant_inputs = [ spec.kind == InputKind.CONSTANT_TENSOR for spec in ep.graph_signature.input_specs ].count(True) self.assertEqual(num_constant_inputs, 1) # unflatten unflattened = unflatten(ep) self.assertTrue(torch.allclose(m1(*inps), unflattened(*inps))) @testing.expectedFailureRetraceability def test_unused_aliases(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() # param self.alpha = torch.nn.Parameter(torch.randn(4)) self.beta = self.alpha self.gamma = self.alpha def forward(self, x): return x + self.gamma inps = (torch.randn(4),) ep = export(Foo(), inps) # placeholder nodes will be deduplicated in strict-mode, # but check that all params still appear in state dict for param in ["alpha", "beta", "gamma"]: self.assertTrue(param in ep.state_dict) # check that they also appear in unflattened state dict unep = unflatten(ep) for param in ["alpha", "beta", "gamma"]: self.assertTrue(param in unep.state_dict()) def test_intermediate_shape_comp(self): class Foo(torch.nn.Module): def forward(self, x, y): z = torch.cat([x, x], dim=0) w = z.repeat(y.shape[0]) return w.shape[0] + x.shape[0] inputs = (torch.randn(6), torch.randn(4)) shapes = { "x": (Dim("dx0"),), "y": (Dim("dy"),), } ep = export( Foo(), inputs, dynamic_shapes=shapes, ).run_decompositions({}) # test that shape is from size compute, not sym_size call add_node = [node for node in ep.graph.nodes if node.target == operator.add][0] self.assertTrue(add_node.args[0].target == operator.mul) # test sym_size calls only happen on placeholders sym_size_nodes = [ node for node in ep.graph.nodes if node.target == torch.ops.aten.sym_size.int ] self.assertEqual(len(sym_size_nodes), 2) self.assertTrue( all(node.args[0].op == "placeholder" for node in sym_size_nodes) ) # dynamo will DCE the repeat node, AOTAutograd will leave it # training IR will also DCE due to retracing repeat_nodes = [ node for node in ep.graph.nodes if node.target == torch.ops.aten.repeat.default ] self.assertEqual(len(repeat_nodes), 0) @testing.expectedFailureStrictV2 def test_checks_to_constrain_range(self): class Foo(torch.nn.Module): def forward(self, x, y): n = y.item() m = y.item() torch._check(m >= 0) torch._check(n >= 3) torch._check(-m >= -9) # m <= 9 torch._check(n <= 6) # n has range [3, 9] return x[:n] inputs = (torch.randn(10), torch.tensor(6)) ep = export(Foo(), inputs) FileCheck().check_count( "torch.ops.aten._assert_scalar.default", 2, exactly=True ).run(ep.graph_module.code) ep = ep.run_decompositions() FileCheck().check_count( "torch.ops.aten._assert_scalar.default", 2, exactly=True ).run(ep.graph_module.code) # check runtime ep.module()(torch.randn(10), torch.tensor(5)) with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for expression u[\d+] \>\= 3", ): ep.module()(torch.randn(10), torch.tensor(2)) @torch.fx.experimental._config.patch(backed_size_oblivious=True) def test_baddbmm(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.weight = torch.nn.Parameter( torch.randn(64, 64, 192, dtype=torch.float16) ) self.bias = torch.nn.Parameter( torch.randn(64, 1, 192, dtype=torch.float16) ) def forward(self, x): return torch.ops.aten.baddbmm.default(self.bias, x, self.weight) x1 = torch.randn(64, 2048, 64, dtype=torch.float16) x2 = torch.randn(64, 1, 64, dtype=torch.float16) m = M() ep = export(m, (x2,), dynamic_shapes=({1: Dim("batch")},)) self.assertTrue(torch.allclose(m(x2), ep.module()(x2))) self.assertTrue(torch.allclose(m(x1), ep.module()(x1))) @testing.expectedFailureSerDerNonStrict # constructor is not serialized today @testing.expectedFailureSerDer # constructor is not serialized today @testing.expectedFailureRetraceability # dynamo doesn't work with FlatApply op def test_capture_subclass_constructor(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() self.buffer = torch.nn.Buffer( TwoTensor(torch.randn(4, 4), torch.randn(4, 4)) ) def forward(self, x): two_tensor = TwoTensor(x, TwoTensor(x, x)) + self.buffer val = x + two_tensor return val.b.a mod = Foo() ep = torch.export.export(mod, (torch.randn(4, 4),), strict=False) self.assertExpectedInline( str(ep.graph).strip(), """\ graph(): %b_buffer : [num_users=1] = placeholder[target=b_buffer] %x : [num_users=1] = placeholder[target=x] %twotensor___init__0 : [num_users=1] = get_attr[target=twotensor___init__0] %twotensor_const_func_spec0 : [num_users=1] = get_attr[target=twotensor_const_func_spec0] %flat_apply : [num_users=2] = call_function[target=torch.ops.higher_order.flat_apply](args = (%twotensor_const_func_spec0, %twotensor___init__0, %x, %x), kwargs = {}) %access_subclass_inner_tensor_default_7 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%flat_apply, b), kwargs = {}) %twotensor___init__1 : [num_users=1] = get_attr[target=twotensor___init__1] %twotensor_const_func_spec0_1 : [num_users=1] = get_attr[target=twotensor_const_func_spec0] %flat_apply_1 : [num_users=2] = call_function[target=torch.ops.higher_order.flat_apply](args = (%twotensor_const_func_spec0_1, %twotensor___init__1, %access_subclass_inner_tensor_default_7, %flat_apply), kwargs = {}) %access_subclass_inner_tensor_default_17 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%flat_apply_1, b), kwargs = {}) %access_subclass_inner_tensor_default_23 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%access_subclass_inner_tensor_default_17, b), kwargs = {}) %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%flat_apply_1, %b_buffer), kwargs = {}) %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%access_subclass_inner_tensor_default_23, %add), kwargs = {}) %access_subclass_inner_tensor_default_24 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%add_1, b), kwargs = {}) %access_subclass_inner_tensor_default_29 : [num_users=1] = call_function[target=torch.ops.export.access_subclass_inner_tensor.default](args = (%access_subclass_inner_tensor_default_24, a), kwargs = {}) return (access_subclass_inner_tensor_default_29,)""", ) inp = torch.randn(4, 4) self.assertEqual(ep.module()(inp), mod(inp)) with torch.inference_mode(): ep = ep.run_decompositions({}) # There should be no subclases self.assertExpectedInline( str(ep.graph).strip(), """\ graph(): %b_parametrizations_buffer_original0 : [num_users=0] = placeholder[target=b_parametrizations_buffer_original0] %b_parametrizations_buffer_original1 : [num_users=1] = placeholder[target=b_parametrizations_buffer_original1] %x : [num_users=2] = placeholder[target=x] %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %b_parametrizations_buffer_original1), kwargs = {}) %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %add_1), kwargs = {}) return (add_4,)""", ) self.assertEqual(ep.module()(inp), mod(inp)) mod = Foo() ep = export(mod, (torch.randn(4, 4),)).run_decompositions({}) self.assertEqual(ep.module()(inp), mod(inp)) if is_training_ir_test(self._testMethodName): self.assertExpectedInline( str(ep.graph).strip(), """\ graph(): %b_parametrizations_buffer_original0 : [num_users=0] = placeholder[target=b_parametrizations_buffer_original0] %b_parametrizations_buffer_original1 : [num_users=1] = placeholder[target=b_parametrizations_buffer_original1] %x : [num_users=2] = placeholder[target=x] %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %b_parametrizations_buffer_original1), kwargs = {}) %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %add), kwargs = {}) return (add_1,)""", ) else: self.assertExpectedInline( str(ep.graph).strip(), """\ graph(): %b_parametrizations_buffer_original0 : [num_users=0] = placeholder[target=b_parametrizations_buffer_original0] %b_parametrizations_buffer_original1 : [num_users=1] = placeholder[target=b_parametrizations_buffer_original1] %x : [num_users=2] = placeholder[target=x] %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %b_parametrizations_buffer_original1), kwargs = {}) %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %add_1), kwargs = {}) return (add_4,)""", ) def test_capture_subclass_wrong(self): from torch._export.wrappers import ( mark_subclass_constructor_exportable_experimental, ) with self.assertRaisesRegex(RuntimeError, "on fn which is not supported. If"): @torch._disable_dynamo @mark_subclass_constructor_exportable_experimental def fn(a, b): return a + b class Foo(torch.nn.Module): @torch._disable_dynamo @mark_subclass_constructor_exportable_experimental def __init__(self): super().__init__() def forward(self, x): return x.cos() export(Foo(), (torch.randn(4, 4),)) def test_capture_subclass_constructor_torch_ir(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() self.buffer = torch.nn.Buffer( TwoTensor(torch.randn(4, 4), torch.randn(4, 4)) ) def forward(self, x): two_tensor = TwoTensor(x, TwoTensor(x, x)) + self.buffer val = x + two_tensor return val.b.a mod = Foo() gm_torch_ir = _export_to_torch_ir(mod, (torch.randn(4, 4),)) FileCheck().check_count( "torch.testing._internal.two_tensor.TwoTensor", 2, exactly=True ).run(gm_torch_ir.code) def test_sym_float_operators(self): class Module(torch.nn.Module): def forward(self, x): return -(x.max().item() / 2) + x m = Module() args = (torch.ones(4),) ep = export(m, args) self.assertEqual(ep.module()(*args), m(*args)) def test_cse_for_symint(self): class Foo(torch.nn.Module): # check sym ops only get computed once def forward(self, x, y): if ( x.shape[0] ** 2 - y.shape[0] ** 2 >= 4 # 16 and x.shape[0] ** 2 - y.shape[0] ** 2 <= 20 and x.shape[0] ** 2 - y.shape[0] ** 2 != 15 ): return x * 2, y * 2 inputs = (torch.randn(5), torch.randn(3)) shapes = {"x": (Dim("dx"),), "y": (Dim("dy"),)} ep = torch.export.export( Foo(), inputs, dynamic_shapes=shapes, prefer_deferred_runtime_asserts_over_guards=True, ) # count 2 pow nodes, 2 sym_size.int nodes self.assertEqual( [node.target for node in ep.graph.nodes].count( operator.pow, ), 2, ) FileCheck().check_count("torch.ops.aten.sym_size.int", 2, exactly=True).run( ep.graph_module.code ) ep = ep.run_decompositions() self.assertEqual( [node.target for node in ep.graph.nodes].count( operator.pow, ), 2, ) FileCheck().check_count("torch.ops.aten.sym_size.int", 2, exactly=True).run( ep.graph_module.code ) def test_shared_submodule_nn_module_stack(self): class Shared(torch.nn.Module): def __init__(self) -> None: super().__init__() layernorm = torch.nn.LayerNorm(10) self.sub_net = torch.nn.Sequential( layernorm, torch.nn.ReLU(), layernorm, torch.nn.ReLU(), ) def forward(self, x): return self.sub_net(x) eager_module = Shared() inps = (torch.rand(10),) export_module = export(eager_module, inps, {}) nn_module_stacks = [ node.meta.get("nn_module_stack") for node in export_module.graph.nodes if node.op == "call_function" and "norm" in str(node.target) ] self.assertEqual(len(nn_module_stacks), 2) filtered_nn_module_stack = [ list(nn_module_stack.values())[-1][0] for nn_module_stack in nn_module_stacks ] self.assertEqual(filtered_nn_module_stack[0], "sub_net.0") self.assertEqual(filtered_nn_module_stack[1], "sub_net.2") def test_slice_nn_module_stack(self): class N(torch.nn.Module): def forward(self, x, y): return x + y class M(torch.nn.Module): def __init__(self): super().__init__() self.n = N() self.mod_list_1 = torch.nn.Sequential(*tuple(self.n for _ in range(5))) self.mod_list_2 = torch.nn.ModuleList(self.n for _ in range(5)) def forward(self, x, y): for m in self.mod_list_1[2:3]: x = m(x, y) for m in self.mod_list_2[4:5]: x = m(x, y) return x export_module = export(M(), (torch.randn(8), torch.randn(8))) nn_module_stacks = [ node.meta.get("nn_module_stack") for node in export_module.graph.nodes if node.op == "call_function" and "add" in str(node.target) ] self.assertEqual(len(nn_module_stacks), 2) filtered_nn_module_stack = [ list(nn_module_stack.values())[-1][0] for nn_module_stack in nn_module_stacks ] if is_strict_test(self._testMethodName) or is_strict_v2_test( self._testMethodName ): self.assertEqual(filtered_nn_module_stack[0], "mod_list_1.2") self.assertEqual(filtered_nn_module_stack[1], "mod_list_2.4") else: self.assertEqual( filtered_nn_module_stack[0], "mod_list_1.slice(2, 3, None).2" ) self.assertEqual( filtered_nn_module_stack[1], "mod_list_2.slice(4, 5, None).0" ) def test_invalid_pytree_dynamo_graph_capture(self): class Block: def __init__(self, a, b): self.a = a self.b = b class Foo(torch.nn.Module): def forward(self, block): return block.a + block.b from torch._dynamo.functional_export import dynamo_graph_capture_for_export with self.assertRaisesRegex( torch._dynamo.exc.UserError, "It looks like one of the inputs with type" ): dynamo_graph_capture_for_export(Foo())( Block(torch.randn(4, 4), torch.randn(4, 4)) ) @testing.expectedFailureStrictV2 def test_enum_str(self): class TensorDim(str, enum.Enum): DDP = "ddp" FSDP = "fsdp" CP = "cp" TP = "tp" class Foo(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x): val = x.sin() if TensorDim.DDP in {"ddp"}: val += x.cos() if "ddp" in {TensorDim.DDP}: val += x.cos() return val from torch._dynamo.functional_export import _dynamo_graph_capture_for_export inp = torch.randn(4, 4) gm = export(Foo(), (inp,)).run_decompositions().module() self.assertExpectedInline( str(gm.graph).strip(), """\ graph(): %x : [num_users=4] = placeholder[target=x] %_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {}) %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%x,), kwargs = {}) %cos : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%x,), kwargs = {}) %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sin, %cos), kwargs = {}) %cos_1 : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%x,), kwargs = {}) %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %cos_1), kwargs = {}) return (add_1,)""", ) self.assertEqual(gm(inp), Foo()(inp)) def test_split_const_gm_with_lifted_constants(self): class Model(torch.nn.Module): def __init__(self) -> None: super().__init__() self.w_pre = torch.randn(4, 4) self.b = torch.randn(4) def forward(self, x): w_transpose = torch.transpose(self.w_pre, 0, 1) w_relu = torch.nn.functional.relu(w_transpose) w = w_relu + self.b return ( torch.matmul(x, w) + self.b + torch.arange(4, dtype=torch.float16) ) example_inputs = (torch.randn(4, 4),) mod = Model() ep = torch.export.export(mod, example_inputs) new_gm = copy.deepcopy(ep.graph_module) new_sig = copy.deepcopy(ep.graph_signature) placeholder_nodes = [ node for node in new_gm.graph.nodes if node.op == "placeholder" ] constants = {**ep.state_dict, **ep.constants} lifted_constants = { n.name: constants[spec.target] for n, spec in zip(placeholder_nodes, new_sig.input_specs) if spec.target is not None } # [self.w_pre, self.b] lifted_constant_names = list(lifted_constants) lifted_constant_values = [lifted_constants[n] for n in lifted_constant_names] const_gm, _ = split_const_gm(new_gm, False, lifted_constant_names) counter = 0 for node in const_gm.graph.nodes: if node.op == "call_function": counter += 1 self.assertTrue(counter == 4) counter = 0 for n in new_gm.graph.nodes: if n.op == "placeholder": counter += 1 # expect 3 existing placeholders and 2 folded constant self.assertTrue(counter == 5) # return (self.b, folded_const, folded_const) const_folded_value = const_gm(*lifted_constant_values) test_input = torch.randn(4, 4) # new_gm(c_w_pre, b, x, folded_const, folded_const) actual = new_gm( lifted_constant_values[0], const_folded_value[0], test_input, const_folded_value[1], const_folded_value[2], )[0] expected = mod(test_input) self.assertEqual(actual, expected) const_gm, _ = split_const_gm( ep.graph_module, False, lifted_constant_names, lambda x: True ) counter = 0 for node in const_gm.graph.nodes: if node.op == "call_function": self.assertTrue(False) def test_istft_op(self): class istft_class(torch.nn.Module): def forward(self, spec): window = torch.hann_window(1024).type(torch.FloatTensor) return torch.istft( spec, n_fft=1024, hop_length=512, window=window, length=144000, ) model = istft_class() real_part = torch.randn(1, 513, 282, dtype=torch.float32) imaginary_part = torch.randn(1, 513, 282, dtype=torch.float32) spec = torch.complex(real_part, imaginary_part) export(model, (spec,)) def test_custom_op_preserve(self): class M(torch.nn.Module): def forward(self, x): y = torch.ops.testlib.foo_functional.default(x) return torch.ops.testlib.foo_mutated.default(y) decomp_table = torch.export.default_decompositions() del decomp_table[torch.ops.testlib.foo_functional.default] ep = torch.export.export(M(), (torch.randn(4, 4),)).run_decompositions( decomp_table, ) if IS_FBCODE: self.assertExpectedInline( str(ep.graph_module.code).strip(), """\ def forward(self, x): foo_functional = torch.ops.testlib.foo_functional.default(x); x = None cos = torch.ops.aten.cos.default(foo_functional) auto_functionalized = torch.ops.higher_order.auto_functionalized(torch.ops.testlib.foo.default, x = foo_functional, z = cos); foo_functional = cos = None getitem_3 = auto_functionalized[3]; auto_functionalized = None cos_1 = torch.ops.aten.cos.default(getitem_3) return (getitem_3, cos_1)""", ) else: self.assertExpectedInline( str(ep.graph_module.code).strip(), """\ def forward(self, x): foo_functional = torch.ops.testlib.foo_functional.default(x); x = None cos = torch.ops.aten.cos.default(foo_functional) auto_functionalized_v2 = torch.ops.higher_order.auto_functionalized_v2(torch.ops.testlib.foo.default, _x_base_index = 0, _z_base_index = 1, _all_bases = [foo_functional, cos]); foo_functional = cos = None getitem_3 = auto_functionalized_v2[3]; auto_functionalized_v2 = None cos_1 = torch.ops.aten.cos.default(getitem_3) return (getitem_3, cos_1)""", ) @testing.expectedFailureStrictV2 def test_run_decompositions_keep_metadata(self): """Make sure the metadata is kept after exported program run_decompositions.""" @torch.library.custom_op("mylib::add", mutates_args=()) def add(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: ... @torch.library.register_fake("mylib::add") def _(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return torch.empty_like(x) class TestModel(torch.nn.Module): def forward(self, x, y): return torch.ops.mylib.add(x, y) model = TestModel() x_example = torch.randn(2, 3) y_example = torch.randn(2, 3) exported_program = export(model, (x_example, y_example)) for node in exported_program.graph.nodes: node.meta["custom"] = {"my_field": "dummy"} for node in exported_program.graph.nodes: self.assertEqual(node.meta["custom"]["my_field"], "dummy") decomposed_program = exported_program.run_decompositions() for node in decomposed_program.graph.nodes: self.assertEqual(node.meta["custom"]["my_field"], "dummy") @testing.expectedFailureStrictV2 def test_run_decompositions_keep_tensor_constant_metadata(self): """Make sure the metadata of tensor constants are kept after run_decompositions.""" class M(torch.nn.Module): def __init__(self): super().__init__() self.b = torch.ones(3, 3) self.linear = torch.nn.Linear(3, 3) def forward(self, x): return self.b + self.linear(x) ep = export(M(), (torch.ones(3, 3),)) for node in ep.graph.nodes: node.meta["custom"] = {"my_field": "dummy"} for node in ep.graph.nodes: self.assertEqual(node.meta["custom"]["my_field"], "dummy") decomp_ep = ep.run_decompositions() for node in decomp_ep.graph.nodes: self.assertEqual(node.meta["custom"]["my_field"], "dummy") def test_export_linear_preserve_dynamic_shape(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.lin = torch.nn.Linear(4, 4) def forward(self, x): return self.lin(x) mod = M() ep = export( mod, (torch.randn(8, 4),), dynamic_shapes={ "x": { 0: Dim("x"), } }, ) table = torch.export.default_decompositions() del table[torch.ops.aten.linear.default] ep = ep.run_decompositions(table) comp_mod = ep.module() inp1 = torch.randn(3, 4) inp2 = torch.randn(7, 4) self.assertTrue(torch.allclose(comp_mod(inp1), mod(inp1))) self.assertTrue(torch.allclose(comp_mod(inp2), mod(inp2))) @torch.fx.experimental._config.patch(backed_size_oblivious=True) def test_repeat_interleave(self): class M(torch.nn.Module): def forward(self, values, batch_sizes): return torch.repeat_interleave( torch.arange( values.shape[0], ), batch_sizes, ) inp = (torch.randint(0, 10, (1, 3)), torch.randint(0, 10, (1,))) ep = torch.export.export( M(), inp, dynamic_shapes=({0: Dim("dim")}, {0: Dim("dim")}) ) self.assertTrue(torch.allclose(M()(*inp), ep.module()(*inp))) inp = (torch.randint(0, 10, (2, 3)), torch.randint(0, 10, (2,))) self.assertTrue(torch.allclose(M()(*inp), ep.module()(*inp))) def test_automatic_dynamic_shapes_simple_equality(self): # The next 3 test cases tests for automatic dynamic shapes specs, verifying that automatic dynamism # leads to replacement symbols being set for equalities, and inferred relationships being checked # with runtime asserts. Check that we specialize to static values when the program says so. AUTO, STATIC = Dim.AUTO, Dim.STATIC # case 1: direct equality between symbols class SimpleEquality(torch.nn.Module): def forward(self, x, y, z): # all inputs should have shape [s0, s1] return x + y + z inputs = tuple(torch.randn(6, 3) for _ in range(3)) # fully dynamic self._check_dynamic_shapes_specs_and_shapes( SimpleEquality(), inputs, specs=[ ((AUTO, AUTO), (AUTO, AUTO), (AUTO, AUTO)), [[AUTO, AUTO], [AUTO, AUTO], [AUTO, AUTO]], {"x": (AUTO, AUTO), "y": (AUTO, AUTO), "z": (AUTO, AUTO)}, ], passing_shapes=[ ((4, 4), (4, 4), (4, 4)), ((1, 1), (1, 1), (1, 1)), ((0, 9), (0, 9), (0, 9)), ], failing_shapes=[ ((4, 4), (4, 4), (4, 3)), ((4, 4), (5, 4), (4, 5)), ], test_serdes=True, ) # static s1 self._check_dynamic_shapes_specs_and_shapes( # specifying just one dimension as static should be enough to specialize all s1 SimpleEquality(), inputs, specs=[ [{0: AUTO, 1: AUTO}, {0: AUTO, 1: AUTO}, (AUTO, None)], {"x": (AUTO, AUTO), "y": (AUTO, AUTO), "z": (AUTO, None)}, ], passing_shapes=[ ((4, 3), (4, 3), (4, 3)), ((1, 3), (1, 3), (1, 3)), ((0, 3), (0, 3), (0, 3)), ], failing_shapes=[ ((4, 4), (4, 4), (4, 4)), ((1, 1), (1, 1), (1, 1)), ((0, 9), (0, 9), (0, 9)), ], test_serdes=True, ) # fully static self._check_dynamic_shapes_specs_and_shapes( # this should specialize all SimpleEquality(), inputs, specs=[{"x": (None, AUTO), "y": (AUTO, AUTO), "z": (AUTO, None)}], passing_shapes=[ ((6, 3), (6, 3), (6, 3)), ], failing_shapes=[ ((6, 4), (6, 4), (6, 4)), ((1, 3), (1, 3), (1, 3)), ((0, 9), (0, 9), (0, 9)), ], test_serdes=True, ) def test_automatic_dynamic_shapes_constant_relation(self): AUTO, STATIC = Dim.AUTO, Dim.STATIC # case 2: related by constant: s0 + 4 = s1 class OffBy4(torch.nn.Module): def forward(self, x, y): return x + y[4:] inputs = (torch.randn(6), torch.randn(10)) # fully dynamic self._check_dynamic_shapes_specs_and_shapes( OffBy4(), inputs, specs=[ ((AUTO,), (AUTO,)), {"x": (AUTO,), "y": (AUTO,)}, ], passing_shapes=[ ((10,), (14,)), ((3,), (7,)), ((2,), (6,)), ], failing_shapes=[ ((10,), (13,)), ], test_serdes=True, ) # static s1 should specialize s0 self._check_dynamic_shapes_specs_and_shapes( OffBy4(), inputs, specs=[ {"x": (AUTO,), "y": (None,)}, ], passing_shapes=[ ((6,), (10,)), ], failing_shapes=[ ((10,), (14,)), ((3,), (7,)), ((2,), (6,)), ], test_serdes=True, ) def test_automatic_dynamic_shapes_linear_relation(self): AUTO, STATIC = Dim.AUTO, Dim.STATIC # case 3: linear relation class LinearRel(torch.nn.Module): def forward(self, x, y): # x: [s0], y: [s1] # relation seems to be (s0 + 2) // 4 == s1 return x[1::4] + y inputs = (torch.randn(21), torch.randn(5)) # fully dynamic self._check_dynamic_shapes_specs_and_shapes( LinearRel(), inputs, specs=[ ((AUTO,), (AUTO,)), {"x": (AUTO,), "y": (AUTO,)}, ], passing_shapes=[ ((33,), (8,)), ((32,), (8,)), ((31,), (8,)), ((30,), (8,)), ], failing_shapes=[ ((34,), (8,)), ((22,), (5,)), ], test_serdes=False, ) # static s1 shouldn't actually specialize s0 (guard: (s0 + 2) // 4 == 5) self._check_dynamic_shapes_specs_and_shapes( LinearRel(), inputs, specs=[ ((AUTO,), None), {"x": (AUTO,), "y": None}, ], passing_shapes=[ ((21,), (5,)), ((20,), (5,)), ((19,), (5,)), ((18,), (5,)), ], failing_shapes=[ ((33,), (8,)), ], test_serdes=False, ) # but static s0 will definitely specialize s1 (guard: (21 + 2) // 4 == s1 -> 5 == s1) self._check_dynamic_shapes_specs_and_shapes( LinearRel(), inputs, specs=[ (None, (AUTO,)), ], passing_shapes=[ ((21,), (5,)), ], failing_shapes=[ ((22,), (5,)), ], test_serdes=True, ) def test_preserve_annotation(self): class M(torch.nn.Module): def forward(self, x): with fx_traceback.annotate({"pp_stage": 0}): with fx_traceback.annotate({"fdsp_bucket": 0}): x = x + 1 x = x - 2 with fx_traceback.annotate({"cuda_stream": 2, "fsdp_bucket": 1}): x = x * 2 x = x / 3 return x m = M() with fx_traceback.preserve_node_meta(): ep = export(m, (torch.randn(10),)) for node in ep.graph.nodes: if node.op in ("placeholder", "output"): continue if node.target == torch.ops.aten.add.Tensor: self.assertTrue(node.meta["custom"], {"pp_stage": 0, "fdsp_bucket": 0}) elif node.target == torch.ops.aten.sub.Tensor: self.assertTrue(node.meta["custom"], {"pp_stage": 0}) elif node.target == torch.ops.aten.mul.Tensor: self.assertTrue( node.meta["custom"], {"pp_stage": 0, "cuda_stream": 2, "fsdp_bucket": 1}, ) elif node.target == torch.ops.aten.div.Tensor: if "custom" in node.meta: self.assertTrue(node.meta["custom"], {}) else: raise AssertionError(f"Node not checked: {node}, {node.target}") def test_dynamic_shapes_serdes_generic(self): from torch._export.serde.dynamic_shapes import ( _dump_dynamic_shapes, _load_dynamic_shapes, ) class Foo(torch.nn.Module): def forward(self, a, b, c, d): if d == "hello": x = a[0] + a[1][1:] b = torch.cat([b, b], dim=0).reshape([-1, 1]) return x + b, c * 2 # test de/serialization on some generic specs dz = Dim("dz", min=4, max=16) dx = 2 * dz dy = dx + 1 inputs = ( [ torch.randn(8, 4), torch.randn(9, 4), ], torch.randn(4), torch.randn(4, 4), "hello", ) dynamic_shapes = { "a": [ (dx, 4), (dy, 4), ], "b": (dz,), "c": None, "d": None, } ep = export(Foo(), inputs, dynamic_shapes=dynamic_shapes) self._check_dynamic_shapes_specs_and_shapes( Foo(), inputs, [dynamic_shapes], [ ([(16, 4), (17, 4)], (8,), (4, 4), "hello"), ([(24, 4), (25, 4)], (12,), (4, 4), "hello"), ], [ ([(16, 4), (17, 4)], (8,), (5, 5), "hello"), ], test_serdes=True, ) self.assertExpectedInline( _dump_dynamic_shapes(dynamic_shapes, inputs), """DynamicShapesSpec(dynamic_shapes=([['2*dz', 4], ['2*dz + 1', 4]], ['dz'], ['_DimHint.STATIC', '_DimHint.STATIC'], None), dims={'dz': RootDim(min=4, max=16, derived=['2*dz', '2*dz + 1'])})""", ) self.assertExpectedInline( _dump_dynamic_shapes(dynamic_shapes, inputs, to_dict=True), """{'dynamic_shapes': ([['2*dz', 4], ['2*dz + 1', 4]], ['dz'], ['_DimHint.STATIC', '_DimHint.STATIC'], None), 'dims': {'dz': {'min': 4, 'max': 16, 'derived': ['2*dz', '2*dz + 1']}}}""", ) ((dx, _), (dy, _)), (dz,), (_, _), _ = _load_dynamic_shapes( _dump_dynamic_shapes(dynamic_shapes, inputs) ) self.assertEqual(dx.root, dz) self.assertEqual(dy.root, dz) def test_dynamic_shapes_serdes_various(self): # serialization for dataclass inputs, Dim.AUTO/STATIC, and kwargs from torch._export.serde.dynamic_shapes import ( _dump_dynamic_shapes, _load_dynamic_shapes, ) auto, static = Dim.AUTO, Dim.STATIC @dataclass class Input: a: Tensor b: Tensor torch.export.register_dataclass( Input, serialized_type_name="test_dynamic_shapes_serdes_various.Input", ) class Foo(torch.nn.Module): def forward(self, x, y, z): return x - torch.randn(4), y.a + y.b + z[1:] args = (torch.randn(4, 4),) kwargs = { "y": Input(a=torch.randn(8, 8), b=torch.randn(8, 8)), "z": torch.randn(9, 8), } dynamic_shapes = { "x": (auto, static), "y": [(auto, auto), (auto, auto)], "z": (auto, 8), } # dump dynamic_shapes self.assertExpectedInline( _dump_dynamic_shapes(dynamic_shapes, args, kwargs), """DynamicShapesSpec(dynamic_shapes=(['_DimHint.AUTO', '_DimHint.STATIC'], [['_DimHint.AUTO', '_DimHint.AUTO'], ['_DimHint.AUTO', '_DimHint.AUTO']], ['_DimHint.AUTO', 8]), dims={})""", ) self.assertExpectedInline( _dump_dynamic_shapes(dynamic_shapes, args, kwargs, to_dict=True), """{'dynamic_shapes': (['_DimHint.AUTO', '_DimHint.STATIC'], [['_DimHint.AUTO', '_DimHint.AUTO'], ['_DimHint.AUTO', '_DimHint.AUTO']], ['_DimHint.AUTO', 8]), 'dims': {}}""", ) def test_dynamic_shapes_serdes_user_errors(self): # check error messages for dynamic shapes de/serialization from torch._export.serde.dynamic_shapes import ( _dump_dynamic_shapes, _load_dynamic_shapes, DynamicShapesSpec, RootDim, ) from torch._export.serde.serialize import _dataclass_to_dict # this stuff should be well tested in `test_mismatched_dynamic_shapes` with self.assertRaisesRegex( torch._dynamo.exc.UserError, re.escape( "Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs[0]['k']` " "is a <class 'list'>, but `dynamic_shapes[0]['k']` is a <class 'tuple'>" ), ): dynamic_shapes = {"x": {"k": (Dim("dx"), Dim("dy"))}} _dump_dynamic_shapes(dynamic_shapes, ({"k": [torch.randn(4, 4)]},)) # loading with from_dict=True/False spec = DynamicShapesSpec( dynamic_shapes=[["dx"]], dims={"dx": RootDim(min=4, max=16, derived=[])}, ) spec_dict = _dataclass_to_dict(spec) with self.assertRaisesRegex( torch._dynamo.exc.UserError, re.escape( "With from_dict=True, expected `spec` to be a dict, " "got <class 'torch._export.serde.dynamic_shapes.DynamicShapesSpec'>" ), ): _load_dynamic_shapes(spec, from_dict=True) with self.assertRaisesRegex( torch._dynamo.exc.UserError, re.escape("Expected `spec` to be a DynamicShapesSpec, got <class 'dict'>"), ): _load_dynamic_shapes(spec_dict, from_dict=False) self.assertExpectedInline( _load_dynamic_shapes(spec, from_dict=False), """[[Dim('dx', min=4, max=16)]]""", ) # check incorrect info in dims with self.assertRaisesRegex( torch._dynamo.exc.UserError, re.escape( "Expected dims in `spec['dims']` to map `min` to an int, got dx: None" ), ): spec = { "dynamic_shapes": [["dx"]], "dims": { "dx": { "min": None, "max": 4, "derived": [], }, }, } _load_dynamic_shapes(spec, from_dict=True) with self.assertRaisesRegex( torch._dynamo.exc.UserError, re.escape( "Expected dims in `spec['dynamic_shapes']` to be tracked in `spec['dims']`, " "got dx which is not in dict_keys(['dy'])" ), ): spec = { "dynamic_shapes": [["dx"]], "dims": { "dy": { "min": 2, "max": 4, "derived": [], }, }, } _load_dynamic_shapes(spec, from_dict=True) with self.assertRaisesRegex( torch._dynamo.exc.UserError, re.escape( "Expected derived expressions to be linear expressions, got dx**2 + 4" ), ): spec = { "dynamic_shapes": [["dx"]], "dims": { "dx": { "min": 2, "max": 4, "derived": ["dx**2 + 4"], }, }, } _load_dynamic_shapes(spec, from_dict=True) # Previously export run_decomp would dispatch # sdpa to math backend which doesn't guarantee # to return contiguous tensor. As a result, downstream # view op would fail. In eager (or normal export), sdpa # decomps to flash_attention which has correct handling # for non-contiguous output. Since in normal export, we # dispatch to flash_attention, we also force run_decomp # to follow flash_attention. def test_attention(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.embed_dim = 768 self.num_heads = 12 self.dropout = 0.0 self.batch_first = True self.self_attention = torch.nn.MultiheadAttention( self.embed_dim, self.num_heads, dropout=self.dropout, batch_first=self.batch_first, ) def forward(self, input1: torch.Tensor): x, _ = self.self_attention(input1, input1, input1, need_weights=False) return x inps = (torch.randn(1, 224, 768, device="cpu"),) export(Foo(), inps) def test_dim_dynamic(self): dynamic = Dim.DYNAMIC # dynamic should infer equalities and relations class Relations(torch.nn.Module): def forward(self, u, w, x, y, z): a = u[1:] + w + x # s0 == s1 + 1 == s2 + 1 b = y.flatten() + z # s2*s3 == s4 return a, b inputs = ( torch.randn(5), torch.randn(4), torch.randn(4), torch.randn(4, 4), torch.randn(16), ) ep = export( Relations(), inputs, dynamic_shapes={ "u": (dynamic,), "w": (dynamic,), "x": (dynamic,), "y": (dynamic, dynamic), "z": (dynamic,), }, ) ep.module()( torch.randn(6), torch.randn(5), torch.randn(5), torch.randn(7, 8), torch.randn(56), ) # dynamic should complain when force specialized class Specialize(torch.nn.Module): def forward(self, x): torch._check(x.shape[0] == 4) return x + 2 with self.assertRaisesRegex( torch._dynamo.exc.UserError, r"You marked.*but your code specialized it to be a constant.*" r"If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO", ): ep = export( Specialize(), (torch.randn(4, 8),), dynamic_shapes={ "x": (dynamic, dynamic), }, ) # dynamic should handle complex guards in the same way as auto class ModConstraint(torch.nn.Module): def forward(self, x: torch.Tensor) -> torch.Tensor: return x.view(x.shape[0] - 1, -1) for private_api in (True, False): if private_api: ep = torch.export.export( ModConstraint(), (torch.randn(3, 4),), dynamic_shapes={"x": (dynamic, dynamic)}, prefer_deferred_runtime_asserts_over_guards=True, ) else: ep = export( ModConstraint(), (torch.randn(3, 4),), dynamic_shapes={"x": (dynamic, dynamic)}, ) ep.module()(torch.randn(5, 8)) num_asserts = [ node.target == torch.ops.aten._assert_scalar.default for node in ep.graph.nodes ].count(True) if private_api: self.assertEqual(num_asserts, 6) with self.assertRaisesRegex( RuntimeError, r"Runtime assertion failed for expression Eq\(Mod\(s27\*s77, s77 - 1\), 0\)", ): ep.module()(torch.randn(4, 2)) else: # no runtime assert in exported module self.assertEqual(num_asserts, 0) # but it fails anyway with wrong inputs with self.assertRaisesRegex( AssertionError, escape( "Guard failed: x.size()[1] * x.size()[0] % (-1 + x.size()[0]) == 0" ), ): # expected 3*..., but got 8 ep.module()(torch.randn(4, 2)) @testing.expectedFailureStrictV2 def test_hints_wrapper(self): strict = True class M(torch.nn.Module): def __init__(self) -> None: super().__init__() def forward(self, x, y): x = x + y def inner_body_fn(x, y): x = torch.relu(x) x = x + y return x def outer_body_fn(x, y): x = hints_wrapper( inner_body_fn, (x, y), {}, hints={"inner_body": True} ) x = torch.abs(x) return x res = hints_wrapper( outer_body_fn, (x, y), {}, hints={"outer_body": True} ) return res x = torch.randn(2, 4) y = torch.ones(4) ep_for_training = torch.export.export(M(), (x, y), strict=strict) self.assertExpectedInline( normalize_gm( ep_for_training.graph_module.print_readable(print_output=False) ), """\
GraphModule
python
django__django
tests/postgres_tests/test_hstore.py
{ "start": 12734, "end": 15305 }
class ____(PostgreSQLSimpleTestCase): def test_valid(self): field = forms.HStoreField() value = field.clean('{"a": "b"}') self.assertEqual(value, {"a": "b"}) def test_invalid_json(self): field = forms.HStoreField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean('{"a": "b"') self.assertEqual(cm.exception.messages[0], "Could not load JSON data.") self.assertEqual(cm.exception.code, "invalid_json") def test_non_dict_json(self): field = forms.HStoreField() msg = "Input must be a JSON dictionary." with self.assertRaisesMessage(exceptions.ValidationError, msg) as cm: field.clean('["a", "b", 1]') self.assertEqual(cm.exception.code, "invalid_format") def test_not_string_values(self): field = forms.HStoreField() value = field.clean('{"a": 1}') self.assertEqual(value, {"a": "1"}) def test_none_value(self): field = forms.HStoreField() value = field.clean('{"a": null}') self.assertEqual(value, {"a": None}) def test_empty(self): field = forms.HStoreField(required=False) value = field.clean("") self.assertEqual(value, {}) def test_model_field_formfield(self): model_field = HStoreField() form_field = model_field.formfield() self.assertIsInstance(form_field, forms.HStoreField) def test_field_has_changed(self): class HStoreFormTest(Form): f1 = forms.HStoreField() form_w_hstore = HStoreFormTest() self.assertFalse(form_w_hstore.has_changed()) form_w_hstore = HStoreFormTest({"f1": '{"a": 1}'}) self.assertTrue(form_w_hstore.has_changed()) form_w_hstore = HStoreFormTest({"f1": '{"a": 1}'}, initial={"f1": '{"a": 1}'}) self.assertFalse(form_w_hstore.has_changed()) form_w_hstore = HStoreFormTest({"f1": '{"a": 2}'}, initial={"f1": '{"a": 1}'}) self.assertTrue(form_w_hstore.has_changed()) form_w_hstore = HStoreFormTest({"f1": '{"a": 1}'}, initial={"f1": {"a": 1}}) self.assertFalse(form_w_hstore.has_changed()) form_w_hstore = HStoreFormTest({"f1": '{"a": 2}'}, initial={"f1": {"a": 1}}) self.assertTrue(form_w_hstore.has_changed()) def test_prepare_value(self): field = forms.HStoreField() self.assertEqual( field.prepare_value({"aira_maplayer": "Αρδευτικό δίκτυο"}), '{"aira_maplayer": "Αρδευτικό δίκτυο"}', )
TestFormField
python
gevent__gevent
src/gevent/_hub_primitives.py
{ "start": 1338, "end": 4577 }
class ____(SwitchOutGreenletWithLoop): # pylint:disable=undefined-variable def wait(self, watcher): """ Wait until the *watcher* (which must not be started) is ready. The current greenlet will be unscheduled during this time. """ waiter = Waiter(self) # pylint:disable=undefined-variable watcher.start(waiter.switch, waiter) try: result = waiter.get() if result is not waiter: raise InvalidSwitchError( 'Invalid switch into %s: got %r (expected %r; waiting on %r with %r)' % ( getcurrent(), # pylint:disable=undefined-variable result, waiter, self, watcher ) ) finally: watcher.stop() def cancel_waits_close_and_then(self, watchers, exc_kind, then, *then_args): deferred = [] for watcher in watchers: if watcher is None: continue if watcher.callback is None: watcher.close() else: deferred.append(watcher) if deferred: self.loop.run_callback(self._cancel_waits_then, deferred, exc_kind, then, then_args) else: then(*then_args) def _cancel_waits_then(self, watchers, exc_kind, then, then_args): for watcher in watchers: self._cancel_wait(watcher, exc_kind, True) then(*then_args) def cancel_wait(self, watcher, error, close_watcher=False): """ Cancel an in-progress call to :meth:`wait` by throwing the given *error* in the waiting greenlet. .. versionchanged:: 1.3a1 Added the *close_watcher* parameter. If true, the watcher will be closed after the exception is thrown. The watcher should then be discarded. Closing the watcher is important to release native resources. .. versionchanged:: 1.3a2 Allow the *watcher* to be ``None``. No action is taken in that case. """ if watcher is None: # Presumably already closed. # See https://github.com/gevent/gevent/issues/1089 return if watcher.callback is not None: self.loop.run_callback(self._cancel_wait, watcher, error, close_watcher) return if close_watcher: watcher.close() def _cancel_wait(self, watcher, error, close_watcher): # Running in the hub. Switches to the waiting greenlet to raise # the error; assuming the waiting greenlet dies, switches back # to this (because the waiting greenlet's parent is the hub.) # We have to check again to see if it was still active by the time # our callback actually runs. active = watcher.active cb = watcher.callback if close_watcher: watcher.close() if active: # The callback should be greenlet.switch(). It may or may not be None. glet = getattr(cb, '__self__', None) if glet is not None: glet.throw(error)
WaitOperationsGreenlet
python
huggingface__transformers
tests/models/aya_vision/test_processing_aya_vision.py
{ "start": 928, "end": 5517 }
class ____(ProcessorTesterMixin, unittest.TestCase): processor_class = AyaVisionProcessor model_id = "hf-internal-testing/namespace-CohereForAI-repo_name_aya-vision-8b" @classmethod def _setup_test_attributes(cls, processor): cls.image_token = processor.image_token @classmethod def _setup_tokenizer(cls): tokenizer_class = cls._get_component_class_from_processor("tokenizer") return tokenizer_class.from_pretrained(cls.model_id, padding_side="left") @classmethod def _setup_image_processor(cls): image_processor_class = cls._get_component_class_from_processor("image_processor") return image_processor_class( do_resize=True, size={"height": 20, "width": 20}, max_patches=2, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=[0.485, 0.456, 0.406], image_std=[0.229, 0.224, 0.225], do_convert_rgb=True, ) @staticmethod def prepare_processor_dict(): return {"patch_size": 10, "img_size": 20} @unittest.skip(reason="Text needs image tokens, tested in other tests") def test_processor_with_multiple_inputs(self): pass def test_get_num_vision_tokens(self): "Tests general functionality of the helper used internally in vLLM" processor = self.get_processor() output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)]) self.assertTrue("num_image_tokens" in output) self.assertEqual(len(output["num_image_tokens"]), 3) self.assertTrue("num_image_patches" in output) self.assertEqual(len(output["num_image_patches"]), 3) @require_torch def test_process_interleaved_images_videos(self): processor = self.get_processor() messages = [ [ { "role": "user", "content": [ { "type": "image", "url": url_to_local_path( "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" ), }, { "type": "image", "url": url_to_local_path( "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg" ), }, {"type": "text", "text": "What are the differences between these two images?"}, ], }, ], [ { "role": "user", "content": [ { "type": "image", "url": url_to_local_path("https://llava-vl.github.io/static/images/view.jpg"), }, {"type": "text", "text": "Write a haiku for this image"}, ], } ], ] inputs_batched = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", padding=True, ) # Process non batched inputs to check if the pixel_values and input_ids are reconstructed in the correct order when batched together images_patches_index = 0 for i, message in enumerate(messages): inputs = processor.apply_chat_template( message, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", padding=True, ) # We slice with [-inputs["input_ids"].shape[1] :] as the input_ids are left padded torch.testing.assert_close( inputs["input_ids"][0], inputs_batched["input_ids"][i][-inputs["input_ids"].shape[1] :] ) torch.testing.assert_close( inputs["pixel_values"], inputs_batched["pixel_values"][ images_patches_index : images_patches_index + inputs["pixel_values"].shape[0] ], ) images_patches_index += inputs["pixel_values"].shape[0]
AyaVisionProcessorTest
python
pytorch__pytorch
torch/_dynamo/variables/misc.py
{ "start": 28227, "end": 28606 }
class ____(VariableTracker): def __init__(self, **kwargs) -> None: super().__init__(**kwargs) def produce_trampoline_autograd_apply(fn_cls): def trampoline_autograd_apply(*args, **kwargs): return fn_cls.apply(*args, **kwargs) trampoline_autograd_apply._origin = produce_trampoline_autograd_apply return trampoline_autograd_apply
NewGlobalVariable
python
django__django
tests/auth_tests/test_views.py
{ "start": 40238, "end": 41054 }
class ____(AuthViewsTestCase): """Tests for settings.LOGIN_REDIRECT_URL.""" def assertLoginRedirectURLEqual(self, url): response = self.login() self.assertRedirects(response, url, fetch_redirect_response=False) def test_default(self): self.assertLoginRedirectURLEqual("/accounts/profile/") @override_settings(LOGIN_REDIRECT_URL="/custom/") def test_custom(self): self.assertLoginRedirectURLEqual("/custom/") @override_settings(LOGIN_REDIRECT_URL="password_reset") def test_named(self): self.assertLoginRedirectURLEqual("/password_reset/") @override_settings(LOGIN_REDIRECT_URL="http://remote.example.com/welcome/") def test_remote(self): self.assertLoginRedirectURLEqual("http://remote.example.com/welcome/")
LoginRedirectUrlTest
python
pytorch__pytorch
torch/distributions/relaxed_categorical.py
{ "start": 3825, "end": 5752 }
class ____(TransformedDistribution): r""" Creates a RelaxedOneHotCategorical distribution parametrized by :attr:`temperature`, and either :attr:`probs` or :attr:`logits`. This is a relaxed version of the :class:`OneHotCategorical` distribution, so its samples are on simplex, and are reparametrizable. Example:: >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> m = RelaxedOneHotCategorical(torch.tensor([2.2]), ... torch.tensor([0.1, 0.2, 0.3, 0.4])) >>> m.sample() tensor([ 0.1294, 0.2324, 0.3859, 0.2523]) Args: temperature (Tensor): relaxation temperature probs (Tensor): event probabilities logits (Tensor): unnormalized log probability for each event """ arg_constraints = {"probs": constraints.simplex, "logits": constraints.real_vector} # pyrefly: ignore [bad-override] support = constraints.simplex has_rsample = True # pyrefly: ignore [bad-override] base_dist: ExpRelaxedCategorical def __init__( self, temperature: Tensor, probs: Optional[Tensor] = None, logits: Optional[Tensor] = None, validate_args: Optional[bool] = None, ) -> None: base_dist = ExpRelaxedCategorical( temperature, probs, logits, validate_args=validate_args ) super().__init__(base_dist, ExpTransform(), validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(RelaxedOneHotCategorical, _instance) return super().expand(batch_shape, _instance=new) @property def temperature(self) -> Tensor: return self.base_dist.temperature @property def logits(self) -> Tensor: return self.base_dist.logits @property def probs(self) -> Tensor: return self.base_dist.probs
RelaxedOneHotCategorical
python
encode__django-rest-framework
tests/test_fields.py
{ "start": 39859, "end": 40995 }
class ____(FieldValues): """ Valid and invalid values for `DecimalField` with min and max limits. """ valid_inputs = { '10.0': Decimal('10.0'), '20.0': Decimal('20.0'), } invalid_inputs = { '9.9': ['Ensure this value is greater than or equal to 10.0.'], '20.1': ['Ensure this value is less than or equal to 20.0.'], } outputs = {} field = serializers.DecimalField( max_digits=3, decimal_places=1, min_value=10.0, max_value=20.0 ) def test_warning_when_not_decimal_types(self, caplog): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') serializers.DecimalField( max_digits=3, decimal_places=1, min_value=10.0, max_value=20.0 ) assert len(w) == 2 assert all(issubclass(i.category, UserWarning) for i in w) assert 'max_value should be an integer or Decimal instance' in str(w[0].message) assert 'min_value should be an integer or Decimal instance' in str(w[1].message)
TestMinMaxDecimalField
python
getsentry__sentry
tests/sentry/auth/test_access.py
{ "start": 3237, "end": 23861 }
class ____(AccessFactoryTestCase): def test_no_access(self) -> None: organization = self.create_organization() team = self.create_team(organization=organization) project = self.create_project(organization=organization, teams=[team]) user = self.create_user() request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert not result.sso_is_valid assert not result.requires_sso assert not result.scopes assert not result.has_team_access(team) assert not result.has_team_scope(team, "project:read") assert not result.has_project_access(project) assert not result.has_projects_access([project]) assert not result.has_project_scope(project, "project:read") assert not result.has_project_membership(project) assert not result.permissions def test_no_deleted_projects(self) -> None: user = self.create_user() organization = self.create_organization(owner=self.user) team = self.create_team(organization=organization) self.create_member(organization=organization, user=user, role="owner", teams=[team]) deleted_project = self.create_project( organization=organization, status=ObjectStatus.PENDING_DELETION, teams=[team] ) request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert result.has_project_access(deleted_project) is False assert result.has_project_membership(deleted_project) is False assert len(result.project_ids_with_team_membership) == 0 def test_no_deleted_teams(self) -> None: user = self.create_user() organization = self.create_organization(owner=self.user) team = self.create_team(organization=organization) deleted_team = self.create_team( organization=organization, status=TeamStatus.PENDING_DELETION ) self.create_member( organization=organization, user=user, role="owner", teams=[team, deleted_team] ) request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert result.has_team_access(team) is True assert result.has_team_access(deleted_team) is False assert result.team_ids_with_membership == frozenset({team.id}) def test_unique_projects(self) -> None: user = self.create_user() organization = self.create_organization(owner=self.user) team = self.create_team(organization=organization) other_team = self.create_team(organization=organization) self.create_member( organization=organization, user=user, role="owner", teams=[team, other_team] ) project = self.create_project(organization=organization, teams=[team, other_team]) request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert result.has_project_access(project) assert len(result.project_ids_with_team_membership) == 1 def test_mixed_access(self) -> None: user = self.create_user() organization = self.create_organization(flags=0) # disable default allow_joinleave team = self.create_team(organization=organization) team_no_access = self.create_team(organization=organization) project = self.create_project(organization=organization, teams=[team]) project_no_access = self.create_project(organization=organization, teams=[team_no_access]) self.create_member(organization=organization, user=user, teams=[team]) request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert result.has_project_access(project) assert not result.has_project_access(project_no_access) assert not result.has_projects_access([project, project_no_access]) def test_owner_all_teams(self) -> None: user = self.create_user() organization = self.create_organization(owner=self.user) member = self.create_member(organization=organization, user=user, role="owner") team = self.create_team(organization=organization) project = self.create_project(organization=organization, teams=[team]) request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert result.sso_is_valid assert not result.requires_sso assert result.scopes == member.get_scopes() assert result.has_team_access(team) assert result.has_team_scope(team, "project:read") assert result.has_project_access(project) assert result.has_projects_access([project]) assert result.has_project_scope(project, "project:read") assert result.has_any_project_scope(project, ["project:read", "project:admin"]) # owners should have access but not membership assert result.has_project_membership(project) is False def test_member_no_teams_closed_membership(self) -> None: user = self.create_user() organization = self.create_organization( owner=self.user, flags=0 # disable default allow_joinleave ) member = self.create_member(organization=organization, user=user, role="member") team = self.create_team(organization=organization) project = self.create_project(organization=organization, teams=[team]) request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert result.sso_is_valid assert not result.requires_sso assert result.scopes == member.get_scopes() assert not result.has_team_access(team) assert not result.has_team_scope(team, "project:read") assert not result.has_project_access(project) assert not result.has_projects_access([project]) assert not result.has_project_scope(project, "project:read") assert not result.has_any_project_scope(project, ["project:read", "project:admin"]) assert not result.has_project_membership(project) def test_member_no_teams_open_membership(self) -> None: user = self.create_user() organization = self.create_organization( owner=self.user, flags=Organization.flags.allow_joinleave ) member = self.create_member(organization=organization, user=user, role="member", teams=()) team = self.create_team(organization=organization) project = self.create_project(organization=organization, teams=[team]) request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert result.sso_is_valid assert not result.requires_sso assert result.scopes == member.get_scopes() assert result.has_team_access(team) assert result.has_team_scope(team, "project:read") assert result.has_project_access(project) assert result.has_projects_access([project]) assert result.has_project_scope(project, "project:read") assert not result.has_project_scope(project, "project:write") assert result.has_any_project_scope(project, ["project:read", "project:write"]) assert not result.has_any_project_scope(project, ["project:write", "project:admin"]) assert not result.has_project_membership(project) def test_team_restricted_org_member_access(self) -> None: user = self.create_user() organization = self.create_organization() team = self.create_team(organization=organization) project = self.create_project(organization=organization, teams=[team]) member = self.create_member(organization=organization, user=user, teams=[team]) request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert result.sso_is_valid assert not result.requires_sso assert result.scopes == member.get_scopes() assert result.has_team_access(team) assert result.has_team_scope(team, "project:read") assert result.has_project_access(project) assert result.has_projects_access([project]) assert result.has_project_scope(project, "project:read") assert not result.has_project_scope(project, "project:write") assert result.has_any_project_scope(project, ["project:read", "project:write"]) assert not result.has_any_project_scope(project, ["project:write", "project:admin"]) assert result.has_project_membership(project) @with_feature("organizations:team-roles") def test_has_project_scope_from_team_role(self) -> None: organization = self.create_organization() team = self.create_team(organization=organization) project = self.create_project(organization=organization, teams=[team]) team_other = self.create_team(organization=organization) project_other = self.create_project(organization=organization, teams=[team_other]) # Team Contributor user = self.create_user() member = self.create_member(organization=organization, user=user) self.create_team_membership(team, member, role="contributor") request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: # Does not have scopes from org-role assert not result.has_scope("team:admin") assert not result.has_scope("team:write") assert not result.has_scope("project:admin") assert not result.has_scope("project:write") # Has scopes from team-role assert not result.has_team_scope(team, "team:admin") assert not result.has_team_scope(team, "team:write") assert result.has_team_scope(team, "team:read") assert not result.has_project_scope(project, "project:admin") assert not result.has_project_scope(project, "project:write") assert result.has_project_scope(project, "project:read") # Team Admin user = self.create_user() member = self.create_member(organization=organization, user=user) self.create_team_membership(team, member, role="admin") request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: # Does not have scopes from org-role assert not result.has_scope("team:admin") assert not result.has_scope("team:write") assert not result.has_scope("project:admin") assert not result.has_scope("project:write") # Has scopes from team-role assert result.has_team_scope(team, "team:admin") assert result.has_team_scope(team, "team:write") assert result.has_team_scope(team, "team:read") assert result.has_project_scope(project, "project:admin") assert result.has_project_scope(project, "project:write") assert result.has_project_scope(project, "project:read") # Does not have scope from other team assert not result.has_team_scope(team_other, "team:admin") assert not result.has_team_scope(team_other, "team:write") assert result.has_team_scope(team_other, "team:read") assert not result.has_project_scope(project_other, "project:admin") assert not result.has_project_scope(project_other, "project:write") assert result.has_project_scope(project_other, "project:read") def test_unlinked_sso(self) -> None: user = self.create_user() organization = self.create_organization(owner=user) self.create_team(organization=organization) ap = self.create_auth_provider(organization=organization, provider="dummy") self.create_auth_identity(auth_provider=ap, user=user) request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert not result.sso_is_valid assert result.requires_sso def test_last_verified_sso(self) -> None: user = self.create_user() organization = self.create_organization(owner=user) ap = self.create_auth_provider(organization=organization, provider="dummy") ai = self.create_auth_identity(auth_provider=ap, user=user) om = organization_service.check_membership_by_id( organization_id=organization.id, user_id=ai.user_id ) assert om setattr(om.flags, "sso:linked", True) organization_service.update_membership_flags(organization_member=om) request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert result.sso_is_valid assert result.requires_sso # If the auth identity has not been updated in awhile, it is not valid. with assume_test_silo_mode(SiloMode.CONTROL): ai.update(last_verified=timezone.now() - timedelta(days=10)) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert not result.sso_is_valid assert result.requires_sso # but it is valid if the requires_fresh is False with patch.object(DummyProvider, "requires_refresh", False): results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert result.sso_is_valid assert result.requires_sso def test_unlinked_sso_with_owner_from_team(self) -> None: organization = self.create_organization() ap = self.create_auth_provider(organization=organization, provider="dummy") user = self.create_user() owner_team = self.create_team(organization=organization) self.create_member(organization=organization, user=user, teams=[owner_team]) self.create_auth_identity(auth_provider=ap, user=user) request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert not result.sso_is_valid assert result.requires_sso def test_unlinked_sso_with_no_owners(self) -> None: user = self.create_user() organization = self.create_organization(owner=user) self.create_team(organization=organization) self.create_auth_provider(organization=organization, provider="dummy") request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert not result.sso_is_valid assert not result.requires_sso def test_sso_without_link_requirement(self) -> None: user = self.create_user() organization = self.create_organization(owner=user) self.create_team(organization=organization) self.create_auth_provider( organization=organization, provider="dummy", flags=AuthProvider.flags.allow_unlinked ) request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert result.sso_is_valid assert not result.requires_sso def test_anonymous_user(self) -> None: user = self.create_user() anon_user = AnonymousUser() organization = self.create_organization(owner=user) # TODO: make test work with from_request result = self.from_user(anon_user, organization) assert result is access.DEFAULT def test_inactive_user(self) -> None: user = self.create_user(is_active=False) organization = self.create_organization(owner=user) request = self.make_request(user=user) results = [self.from_user(user, organization), self.from_request(request, organization)] for result in results: assert result is access.DEFAULT def test_user_permissions_as_superuser(self) -> None: user = self.create_user(is_superuser=True) self.add_user_permission(user, "test.permission") result = self.from_user(user) assert not result.has_permission("test.permission") result = self.from_user(user, is_superuser=True) assert result.has_permission("test.permission") def test_user_permissions_as_staff(self) -> None: user = self.create_user(is_staff=True) self.add_user_permission(user, "test.permission") result = self.from_user(user) assert not result.has_permission("test.permission") result = self.from_user(user, is_staff=True) assert result.has_permission("test.permission") @with_feature("organizations:team-roles") def test_enforce_upper_bound_scope(self) -> None: organization = self.create_organization() team = self.create_team(organization=organization) project = self.create_project(organization=organization, teams=[team]) team_other = self.create_team(organization=organization) project_other = self.create_project(organization=organization, teams=[team_other]) # Team Admin user = self.create_user() member = self.create_member(organization=organization, user=user) self.create_team_membership(team, member, role="admin") request = self.make_request(user=user) results = [ self.from_user(user, organization, scopes=["org:read", "team:admin"]), self.from_request(request, organization, scopes=["org:read", "team:admin"]), ] for result in results: # Does not have scopes from org-role assert not result.has_scope("org:admin") assert not result.has_scope("org:write") assert result.has_scope("org:read") assert not result.has_scope("team:admin") # Org-member do not have team:admin scope assert not result.has_scope("team:read") assert not result.has_scope("team:write") assert not result.has_scope("project:admin") assert not result.has_scope("project:write") assert not result.has_scope("project:read") # Has scopes from team-role assert result.has_team_scope(team, "team:admin") # From being a team-admin assert not result.has_team_scope(team, "team:write") assert not result.has_team_scope(team, "team:read") assert not result.has_project_scope(project, "project:admin") assert not result.has_project_scope(project, "project:write") assert not result.has_project_scope(project, "project:read") # Does not have scope from other team assert not result.has_team_scope(team_other, "team:admin") assert not result.has_team_scope(team_other, "team:write") assert not result.has_team_scope(team_other, "team:read") assert not result.has_project_scope(project_other, "project:admin") assert not result.has_project_scope(project_other, "project:write") assert not result.has_project_scope(project_other, "project:read") @all_silo_test
FromUserTest
python
huggingface__transformers
src/transformers/models/dinov3_convnext/modeling_dinov3_convnext.py
{ "start": 8204, "end": 10115 }
class ____(DINOv3ConvNextPreTrainedModel): def __init__(self, config: DINOv3ConvNextConfig): super().__init__(config) self.config = config self.stages = nn.ModuleList([DINOv3ConvNextStage(config, stage_idx) for stage_idx in range(config.num_stages)]) self.layer_norm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps) # final norm layer self.pool = nn.AdaptiveAvgPool2d(1) self.post_init() @can_return_tuple @auto_docstring def forward( self, pixel_values: torch.FloatTensor, output_hidden_states: Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: hidden_states = pixel_values output_hidden_states = output_hidden_states or self.config.output_hidden_states all_hidden_states = [hidden_states] if output_hidden_states else [] for stage in self.stages: hidden_states = stage(hidden_states) # store intermediate stage outputs if output_hidden_states: all_hidden_states.append(hidden_states) # make global representation, a.k.a [CLS] token pooled_output = self.pool(hidden_states) # (batch_size, channels, height, width) -> (batch_size, height * width, channels) pooled_output = pooled_output.flatten(2).transpose(1, 2) hidden_states = hidden_states.flatten(2).transpose(1, 2) # concat "cls" and "patch tokens" as (batch_size, 1 + height * width, channels) hidden_states = torch.cat([pooled_output, hidden_states], dim=1) hidden_states = self.layer_norm(hidden_states) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=hidden_states, pooler_output=hidden_states[:, 0], hidden_states=tuple(all_hidden_states) if output_hidden_states else None, ) @auto_docstring
DINOv3ConvNextModel
python
huggingface__transformers
src/transformers/models/janus/modeling_janus.py
{ "start": 31450, "end": 31859 }
class ____(nn.Module): def __init__(self, in_channels): super().__init__() self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, hidden_states): hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest") hidden_states = self.conv(hidden_states) return hidden_states
JanusVQVAEConvUpsample
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-opendal/llama_index/readers/opendal/s3/base.py
{ "start": 348, "end": 2363 }
class ____(BaseReader): """General reader for any S3 file or directory.""" def __init__( self, bucket: str, path: str = "/", endpoint: str = "", region: str = "", access_key_id: str = "", secret_access_key: str = "", file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None, ) -> None: """ Initialize S3 bucket and key, along with credentials if needed. If key is not set, the entire bucket (filtered by prefix) is parsed. Args: bucket (str): the name of your S3 bucket path (str): the path of the data. If none is provided, this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loeader will load the file. endpoint Optional[str]: the endpoint of the S3 service. region: Optional[str]: the region of the S3 service. access_key_id (Optional[str]): provide AWS access key directly. secret_access_key (Optional[str]): provide AWS access key directly. file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file extension to a BaseReader class that specifies how to convert that file to text. See `SimpleDirectoryReader` for more details. """ super().__init__() self.path = path self.file_extractor = file_extractor # opendal service related config. self.options = { "access_key": access_key_id, "secret_key": secret_access_key, "endpoint": endpoint, "region": region, "bucket": bucket, } def load_data(self) -> List[Document]: """Load file(s) from OpenDAL.""" loader = OpendalReader( scheme="s3", path=self.path, file_extractor=self.file_extractor, **self.options, ) return loader.load_data()
OpendalS3Reader
python
python-openxml__python-docx
src/docx/oxml/simpletypes.py
{ "start": 12315, "end": 12785 }
class ____(BaseSimpleType): @classmethod def convert_from_xml(cls, str_value: str) -> Emu: float_part, units_part = str_value[:-2], str_value[-2:] quantity = float(float_part) multiplier = { "mm": 36000, "cm": 360000, "in": 914400, "pt": 12700, "pc": 152400, "pi": 152400, }[units_part] return Emu(int(round(quantity * multiplier)))
ST_UniversalMeasure
python
HypothesisWorks__hypothesis
hypothesis-python/tests/django/toystore/forms.py
{ "start": 1575, "end": 1688 }
class ____(ReprModelForm): class Meta: model = ManyNumerics fields = "__all__"
ManyNumericsForm
python
apache__airflow
airflow-ctl/src/airflowctl/api/datamodels/generated.py
{ "start": 9306, "end": 9668 }
class ____(BaseModel): """ DAG Source serializer for responses. """ content: Annotated[str | None, Field(title="Content")] = None dag_id: Annotated[str, Field(title="Dag Id")] version_number: Annotated[int | None, Field(title="Version Number")] = None dag_display_name: Annotated[str, Field(title="Dag Display Name")]
DAGSourceResponse
python
sqlalchemy__sqlalchemy
test/orm/declarative/test_abs_import_only.py
{ "start": 471, "end": 2599 }
class ____( sqlalchemy.testing.fixtures.TestBase, sqlalchemy.testing.AssertsCompiledSQL ): __dialect__ = "default" def test_fully_qualified_mapped_name(self, decl_base): """test #8853 *again*, as reported in #9335 this failed to be fixed""" class Foo(decl_base): __tablename__ = "foo" id: sqlalchemy.orm.Mapped[int] = sqlalchemy.orm.mapped_column( primary_key=True ) data: sqlalchemy.orm.Mapped[int] = sqlalchemy.orm.mapped_column() data2: sqlalchemy.orm.Mapped[int] data3: orm.Mapped[int] self.assert_compile( sqlalchemy.select(Foo), "SELECT foo.id, foo.data, foo.data2, foo.data3 FROM foo", ) @sqlalchemy.testing.variation( "construct", ["Mapped", "WriteOnlyMapped", "DynamicMapped"] ) def test_fully_qualified_writeonly_mapped_name(self, decl_base, construct): """futher variation in issue #10412""" class Foo(decl_base): __tablename__ = "foo" id: sqlalchemy.orm.Mapped[int] = sqlalchemy.orm.mapped_column( primary_key=True ) if construct.Mapped: bars: orm.Mapped[typing.List[Bar]] = orm.relationship() elif construct.WriteOnlyMapped: bars: orm.WriteOnlyMapped[typing.List[Bar]] = ( orm.relationship() ) elif construct.DynamicMapped: bars: orm.DynamicMapped[typing.List[Bar]] = orm.relationship() else: construct.fail() class Bar(decl_base): __tablename__ = "bar" id: sqlalchemy.orm.Mapped[int] = sqlalchemy.orm.mapped_column( primary_key=True ) foo_id: sqlalchemy.orm.Mapped[int] = sqlalchemy.orm.mapped_column( sqlalchemy.ForeignKey("foo.id") ) self.assert_compile( sqlalchemy.select(Foo).join(Foo.bars), "SELECT foo.id FROM foo JOIN bar ON foo.id = bar.foo_id", )
MappedColumnTest
python
pyca__cryptography
tests/hazmat/primitives/test_ec.py
{ "start": 25248, "end": 29186 }
class ____: def test_public_numbers_eq(self): pub = ec.EllipticCurvePublicNumbers(1, 2, ec.SECP192R1()) assert pub == ec.EllipticCurvePublicNumbers(1, 2, ec.SECP192R1()) def test_public_numbers_ne(self): pub = ec.EllipticCurvePublicNumbers(1, 2, ec.SECP192R1()) assert pub != ec.EllipticCurvePublicNumbers(1, 2, ec.SECP384R1()) assert pub != ec.EllipticCurvePublicNumbers(1, 3, ec.SECP192R1()) assert pub != ec.EllipticCurvePublicNumbers(2, 2, ec.SECP192R1()) assert pub != object() def test_private_numbers_eq(self): pub = ec.EllipticCurvePublicNumbers(1, 2, ec.SECP192R1()) priv = ec.EllipticCurvePrivateNumbers(1, pub) assert priv == ec.EllipticCurvePrivateNumbers( 1, ec.EllipticCurvePublicNumbers(1, 2, ec.SECP192R1()) ) def test_private_numbers_ne(self): pub = ec.EllipticCurvePublicNumbers(1, 2, ec.SECP192R1()) priv = ec.EllipticCurvePrivateNumbers(1, pub) assert priv != ec.EllipticCurvePrivateNumbers( 2, ec.EllipticCurvePublicNumbers(1, 2, ec.SECP192R1()) ) assert priv != ec.EllipticCurvePrivateNumbers( 1, ec.EllipticCurvePublicNumbers(2, 2, ec.SECP192R1()) ) assert priv != ec.EllipticCurvePrivateNumbers( 1, ec.EllipticCurvePublicNumbers(1, 3, ec.SECP192R1()) ) assert priv != ec.EllipticCurvePrivateNumbers( 1, ec.EllipticCurvePublicNumbers(1, 2, ec.SECP521R1()) ) assert priv != object() def test_public_key_equality(self, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) key_bytes = load_vectors_from_file( os.path.join("asymmetric", "PKCS8", "ec_private_key.pem"), lambda pemfile: pemfile.read().encode(), ) key1 = serialization.load_pem_private_key(key_bytes, None).public_key() key2 = serialization.load_pem_private_key(key_bytes, None).public_key() key3 = ec.generate_private_key(ec.SECP256R1()).public_key() assert key1 == key2 assert key1 != key3 assert key1 != object() with pytest.raises(TypeError): key1 < key2 # type: ignore[operator] def test_public_key_copy(self, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) key_bytes = load_vectors_from_file( os.path.join("asymmetric", "PKCS8", "ec_private_key.pem"), lambda pemfile: pemfile.read().encode(), ) key1 = serialization.load_pem_private_key(key_bytes, None).public_key() key2 = copy.copy(key1) assert key1 == key2 def test_public_key_deepcopy(self, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) key_bytes = load_vectors_from_file( os.path.join("asymmetric", "PKCS8", "ec_private_key.pem"), lambda pemfile: pemfile.read().encode(), ) key1 = serialization.load_pem_private_key(key_bytes, None).public_key() key2 = copy.deepcopy(key1) assert key1 == key2 def test_private_key_copy(self, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) key_bytes = load_vectors_from_file( os.path.join("asymmetric", "PKCS8", "ec_private_key.pem"), lambda pemfile: pemfile.read().encode(), ) key1 = serialization.load_pem_private_key(key_bytes, None) key2 = copy.copy(key1) assert key1 == key2 def test_private_key_deepcopy(self, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) key_bytes = load_vectors_from_file( os.path.join("asymmetric", "PKCS8", "ec_private_key.pem"), lambda pemfile: pemfile.read().encode(), ) key1 = serialization.load_pem_private_key(key_bytes, None) key2 = copy.deepcopy(key1) assert key1 == key2
TestECEquality
python
cython__cython
Cython/Compiler/Annotate.py
{ "start": 325, "end": 13208 }
class ____(CCodeWriter): # also used as marker for detection of complete code emission in tests COMPLETE_CODE_TITLE = "Complete cythonized code" def __init__(self, create_from=None, buffer=None, copy_formatting=True, show_entire_c_code=False, source_desc=None): CCodeWriter.__init__(self, create_from, buffer, copy_formatting=copy_formatting) self.show_entire_c_code = show_entire_c_code if create_from is None: self.annotation_buffer = StringIO() self.last_annotated_pos = None # annotations[filename][line] -> [(column, AnnotationItem)*] self.annotations = defaultdict(partial(defaultdict, list)) # code[filename][line] -> str self.code = defaultdict(partial(defaultdict, str)) # scopes[filename][line] -> set(scopes) self.scopes = defaultdict(partial(defaultdict, set)) else: # When creating an insertion point, keep references to the same database self.annotation_buffer = create_from.annotation_buffer self.annotations = create_from.annotations self.code = create_from.code self.scopes = create_from.scopes self.last_annotated_pos = create_from.last_annotated_pos def create_new(self, create_from, buffer, copy_formatting): return AnnotationCCodeWriter(create_from, buffer, copy_formatting) def _write_to_buffer(self, s): self.buffer.write(s) self.annotation_buffer.write(s) def mark_pos(self, pos, trace=True): if pos is not None: CCodeWriter.mark_pos(self, pos, trace) if self.funcstate and self.funcstate.scope: # lambdas and genexprs can result in multiple scopes per line => keep them in a set self.scopes[pos[0].filename][pos[1]].add(self.funcstate.scope) if self.last_annotated_pos: source_desc, line, _ = self.last_annotated_pos pos_code = self.code[source_desc.filename] pos_code[line] += self.annotation_buffer.getvalue() self.annotation_buffer = StringIO() self.last_annotated_pos = pos def annotate(self, pos, item): self.annotations[pos[0].filename][pos[1]].append((pos[2], item)) def _css(self): """css template will later allow to choose a colormap""" css = [self._css_template] for i in range(255): color_shade = int(255.0 // (1.0 + i/10.0)) css.append(f'.cython.score-{i:d} {{background-color: #FFFF{color_shade:02x};}}') try: from pygments.formatters import HtmlFormatter except ImportError: pass else: css.append(HtmlFormatter().get_style_defs('.cython')) return '\n'.join(css) _css_template = textwrap.dedent(""" body.cython { font-family: courier; font-size: 12; } .cython.tag { } .cython.line { color: #000000; margin: 0em } .cython.code { font-size: 9; color: #444444; display: none; margin: 0px 0px 0px 8px; border-left: 8px none; } .cython.line .run { background-color: #B0FFB0; } .cython.line .mis { background-color: #FFB0B0; } .cython.code.run { border-left: 8px solid #B0FFB0; } .cython.code.mis { border-left: 8px solid #FFB0B0; } .cython.code .py_c_api { color: red; } .cython.code .py_macro_api { color: #FF7000; } .cython.code .pyx_c_api { color: #FF3000; } .cython.code .pyx_macro_api { color: #FF7000; } .cython.code .refnanny { color: #FFA000; } .cython.code .trace { color: #FFA000; } .cython.code .error_goto { color: #FFA000; } .cython.code .coerce { color: #008000; border: 1px dotted #008000 } .cython.code .py_attr { color: #FF0000; font-weight: bold; } .cython.code .c_attr { color: #0000FF; } .cython.code .py_call { color: #FF0000; font-weight: bold; } .cython.code .c_call { color: #0000FF; } """) # on-click toggle function to show/hide C source code _onclick_attr = ' onclick="{}"'.format(( # Use local JS variables by declaring them as function arguments. "(function(f, s, c) {" " c = f.nodeValue == '+';" " s.display = c ? 'block' : 'none';" " f.nodeValue = c ? '−' : '+'" "})(this.firstChild, this.nextElementSibling.style)" ).replace(' ', '') # poor dev's JS minification ) def save_annotation(self, source_filename, target_filename, coverage_xml=None): with Utils.open_source_file(source_filename) as f: code = f.read() generated_code = self.code.get(source_filename, {}) c_file = Utils.decode_filename(os.path.basename(target_filename)) html_filename = os.path.splitext(target_filename)[0] + ".html" with open(html_filename, "w", encoding="UTF-8") as out_buffer: out_buffer.write(self._save_annotation(code, generated_code, c_file, source_filename, coverage_xml)) def _save_annotation_header(self, c_file, source_filename, coverage_timestamp=None): coverage_info = '' if coverage_timestamp: coverage_info = ' with coverage data from {timestamp}'.format( timestamp=datetime.fromtimestamp(int(coverage_timestamp) // 1000)) outlist = [ textwrap.dedent('''\ <!DOCTYPE html> <!-- Generated by Cython {watermark} --> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Cython: {filename}</title> <style type="text/css"> {css} </style> </head> <body class="cython"> <p><span style="border-bottom: solid 1px grey;">Generated by Cython {watermark}</span>{more_info}</p> <p> <span style="background-color: #FFFF00">Yellow lines</span> hint at Python interaction.<br /> Click on a line that starts with a "<code>+</code>" to see the C code that Cython generated for it. </p> ''').format(css=self._css(), watermark=Version.watermark, filename=os.path.basename(source_filename) if source_filename else '', more_info=coverage_info) ] if c_file: outlist.append('<p>Raw output: <a href="%s">%s</a></p>\n' % (c_file, c_file)) return outlist def _save_annotation_footer(self): return ('</body></html>\n',) def _save_annotation(self, code, generated_code, c_file=None, source_filename=None, coverage_xml=None): """ lines : original cython source code split by lines generated_code : generated c code keyed by line number in original file target filename : name of the file in which to store the generated html c_file : filename in which the c_code has been written """ if coverage_xml is not None and source_filename: coverage_timestamp = coverage_xml.get('timestamp', '').strip() covered_lines = self._get_line_coverage(coverage_xml, source_filename) else: coverage_timestamp = covered_lines = None annotation_items = dict(self.annotations[source_filename]) scopes = dict(self.scopes[source_filename]) outlist = [] outlist.extend(self._save_annotation_header(c_file, source_filename, coverage_timestamp)) outlist.extend(self._save_annotation_body(code, generated_code, annotation_items, scopes, covered_lines)) outlist.extend(self._save_annotation_footer()) return ''.join(outlist) def _get_line_coverage(self, coverage_xml, source_filename): coverage_data = None for entry in coverage_xml.iterfind('.//class'): if not entry.get('filename'): continue if (entry.get('filename') == source_filename or os.path.abspath(entry.get('filename')) == source_filename): coverage_data = entry break elif source_filename.endswith(entry.get('filename')): coverage_data = entry # but we might still find a better match... if coverage_data is None: return None return { int(line.get('number')): int(line.get('hits')) for line in coverage_data.iterfind('lines/line') } def _htmlify_code(self, code, language): try: from pygments import highlight from pygments.lexers import CythonLexer, CppLexer from pygments.formatters import HtmlFormatter except ImportError: # no Pygments, just escape the code return html_escape(code) if language == "cython": lexer = CythonLexer(stripnl=False, stripall=False) elif language == "c/cpp": lexer = CppLexer(stripnl=False, stripall=False) else: # unknown language, use fallback return html_escape(code) html_code = highlight( code, lexer, HtmlFormatter(nowrap=True)) return html_code def _save_annotation_body(self, cython_code, generated_code, annotation_items, scopes, covered_lines=None): outlist = ['<div class="cython">'] pos_comment_marker = '/* \N{HORIZONTAL ELLIPSIS} */\n' new_calls_map = { name: 0 for name in 'refnanny trace py_macro_api py_c_api pyx_macro_api pyx_c_api error_goto'.split() }.copy self.mark_pos(None) def annotate(match): group_name = match.lastgroup calls[group_name] += 1 return f"<span class='{group_name}'>{match.group(group_name)}</span>" lines = self._htmlify_code(cython_code, "cython").splitlines() lineno_width = len(str(len(lines))) if not covered_lines: covered_lines = None for k, line in enumerate(lines, 1): try: c_code = generated_code[k] except KeyError: c_code = '' else: c_code = _replace_pos_comment(pos_comment_marker, c_code) if c_code.startswith(pos_comment_marker): c_code = c_code[len(pos_comment_marker):] c_code = html_escape(c_code) calls = new_calls_map() c_code = _parse_code(annotate, c_code) score = (5 * calls['py_c_api'] + 2 * calls['pyx_c_api'] + calls['py_macro_api'] + calls['pyx_macro_api']) if c_code: onclick = self._onclick_attr expandsymbol = '+' else: onclick = '' expandsymbol = '&#xA0;' covered = '' if covered_lines is not None and k in covered_lines: hits = covered_lines[k] if hits is not None: covered = 'run' if hits else 'mis' outlist.append( f'<pre class="cython line score-{score}"{onclick}>' # generate line number with expand symbol in front, # and the right number of digit f'{expandsymbol}<span class="{covered}">{k:0{lineno_width}d}</span>: {line.rstrip()}</pre>\n' ) if c_code: outlist.append(f"<pre class='cython code score-{score} {covered}'>{c_code}</pre>") outlist.append("</div>") # now the whole c-code if needed: if self.show_entire_c_code: complete_code_as_html = self._htmlify_code(self.buffer.getvalue(), "c/cpp") outlist.append( '<p><div class="cython">' f"<pre class='cython line'{self._onclick_attr}>+ {AnnotationCCodeWriter.COMPLETE_CODE_TITLE}</pre>\n" f"<pre class='cython code'>{complete_code_as_html}</pre>" "</div></p>" ) return outlist _parse_code = re.compile(( br'(?P<refnanny>__Pyx_X?(?:GOT|GIVE)REF|__Pyx_RefNanny[A-Za-z]+)|' br'(?P<trace>__Pyx_Trace[A-Za-z]+)|' br'(?:' br'(?P<pyx_macro_api>__Pyx_[A-Z][A-Z_]+)|' br'(?P<pyx_c_api>(?:__Pyx_[A-Z][a-z_][A-Za-z_]*)|__pyx_convert_[A-Za-z_]*)|' br'(?P<py_macro_api>Py[A-Z][a-z]+_[A-Z][A-Z_]+)|' br'(?P<py_c_api>Py[A-Z][a-z]+_[A-Z][a-z][A-Za-z_]*)' br')(?=\()|' # look-ahead to exclude subsequent '(' from replacement br'(?P<error_goto>(?:(?<=;) *if [^;]* +)?__PYX_ERR\([^)]+\))' ).decode('ascii')).sub _replace_pos_comment = re.compile( # this matches what Cython generates as code line marker comment br'^\s*/\*(?:(?:[^*]|\*[^/])*\n)+\s*\*/\s*\n'.decode('ascii'), re.M ).sub
AnnotationCCodeWriter
python
walkccc__LeetCode
solutions/1071. Greatest Common Divisor of Strings/1071.py
{ "start": 0, "end": 504 }
class ____: def gcdOfStrings(self, str1: str, str2: str) -> str: for sz in range(min(len(str1), len(str2)), 0, -1): if self._isDivisible(str1, str2, sz): return str1[:sz] return '' def _isDivisible(self, str1: str, str2: str, sz: int) -> bool: """Returns True if str1 and str2 are divisible by str1[0..sz).""" if len(str1) % sz > 0 or len(str2) % sz > 0: return False gcd = str1[:sz] return str1.replace(gcd, '') == '' and str2.replace(gcd, '') == ''
Solution
python
realpython__materials
python-import/namespace_package/third_party/serializers/xml.py
{ "start": 71, "end": 476 }
class ____: def __init__(self): self._element = None def start_object(self, object_name, object_id): self._element = et.Element(object_name, attrib={"id": object_id}) def add_property(self, name, value): prop = et.SubElement(self._element, name) prop.text = value def __str__(self): return et.tostring(self._element, encoding="unicode")
XmlSerializer
python
openai__openai-python
src/openai/types/responses/response_output_text.py
{ "start": 804, "end": 1252 }
class ____(BaseModel): end_index: int """The index of the last character of the URL citation in the message.""" start_index: int """The index of the first character of the URL citation in the message.""" title: str """The title of the web resource.""" type: Literal["url_citation"] """The type of the URL citation. Always `url_citation`.""" url: str """The URL of the web resource."""
AnnotationURLCitation
python
cython__cython
Cython/Debugger/libpython.py
{ "start": 3932, "end": 5160 }
class ____(RuntimeError): pass def safety_limit(val): # Given an integer value from the process being debugged, limit it to some # safety threshold so that arbitrary breakage within said process doesn't # break the gdb process too much (e.g. sizes of iterations, sizes of lists) return min(val, 1000) def safe_range(val): # As per range, but don't trust the value too much: cap it to a safety # threshold in case the data was corrupted return range(safety_limit(int(val))) def write_unicode(file, text): file.write(text) try: os_fsencode = os.fsencode except AttributeError: def os_fsencode(filename): if not isinstance(filename, unicode): return filename encoding = sys.getfilesystemencoding() if encoding == 'mbcs': # mbcs doesn't support surrogateescape return filename.encode(encoding) encoded = [] for char in filename: # surrogateescape error handler if 0xDC80 <= ord(char) <= 0xDCFF: byte = chr(ord(char) - 0xDC00) else: byte = char.encode(encoding) encoded.append(byte) return ''.join(encoded)
NullPyObjectPtr
python
getsentry__sentry
src/sentry/db/models/fields/slug.py
{ "start": 418, "end": 1962 }
class ____(Lookup): lookup_name = "id_or_slug" def as_sql(self, compiler, connection): lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) # Use Django's built-in SQL compiler methods to properly quote table and column names # lhs initially looks like "table_name"."column_name" table_name = lhs.split(".")[0] if "." in lhs else None if table_name: table_name_quoted = compiler.quote_name_unless_alias(table_name) id_column_quoted = compiler.quote_name_unless_alias("id") slug_column_quoted = compiler.quote_name_unless_alias("slug") else: # If no table name, assume default quoting id_column_quoted = '"id"' slug_column_quoted = '"slug"' if rhs_params and str(rhs_params[0]).isdecimal(): # If numeric, use the 'id' field for comparison if table_name: return f"{table_name_quoted}.{id_column_quoted} = {rhs}", rhs_params else: return f"{id_column_quoted} = {rhs}", rhs_params else: # If not numeric, use the 'slug' field for comparison if table_name: return f"{table_name_quoted}.{slug_column_quoted} = {rhs}", rhs_params else: return f"{slug_column_quoted} = {rhs}", rhs_params SentrySlugField.register_lookup(IdOrSlugLookup) SentryOrgSlugField.register_lookup(IdOrSlugLookup)
IdOrSlugLookup
python
dagster-io__dagster
python_modules/dagster/dagster/components/testing/test_cases.py
{ "start": 5812, "end": 6908 }
class ____: """Pytest test class for testing customization of op spec. You can subclass this class and implement a test_op_customization function using the various fixtures in order to comprehensively test op spec customization options for your component. """ @pytest.fixture( params=[ ( {"name": "my_op"}, lambda op: op.name == "my_op", ), ( {"tags": {"foo": "bar"}}, lambda op: op.tags.get("foo") == "bar", ), ( {"backfill_policy": {"type": "single_run"}}, lambda op: op.backfill_policy.max_partitions_per_run is None, ), ], ids=["name", "tags", "backfill_policy"], ) def translation_test_case(self, request): return request.param @pytest.fixture def attributes(self, translation_test_case): return translation_test_case[0] @pytest.fixture def assertion(self, translation_test_case): return translation_test_case[1]
TestOpCustomization
python
davidhalter__jedi
jedi/inference/value/klass.py
{ "start": 4704, "end": 7892 }
class ____(ParserTreeFilter): def __init__(self, class_value, node_context=None, until_position=None, origin_scope=None, is_instance=False): super().__init__( class_value.as_context(), node_context, until_position=until_position, origin_scope=origin_scope, ) self._class_value = class_value self._is_instance = is_instance def _convert_names(self, names): return [ ClassName( class_value=self._class_value, tree_name=name, name_context=self._node_context, apply_decorators=not self._is_instance, ) for name in names ] def _equals_origin_scope(self): node = self._origin_scope while node is not None: if node == self._parser_scope or node == self.parent_context: return True node = get_cached_parent_scope(self._parso_cache_node, node) return False def _access_possible(self, name): # Filter for name mangling of private variables like __foo return not name.value.startswith('__') or name.value.endswith('__') \ or self._equals_origin_scope() def _filter(self, names): names = super()._filter(names) return [name for name in names if self._access_possible(name)] def init_param_value(arg_nodes) -> Optional[bool]: """ Returns: - ``True`` if ``@dataclass(init=True)`` - ``False`` if ``@dataclass(init=False)`` - ``None`` if not specified ``@dataclass()`` """ for arg_node in arg_nodes: if ( arg_node.type == "argument" and arg_node.children[0].value == "init" ): if arg_node.children[2].value == "False": return False elif arg_node.children[2].value == "True": return True return None def get_dataclass_param_names(cls) -> List[DataclassParamName]: """ ``cls`` is a :class:`ClassMixin`. The type is only documented as mypy would complain that some fields are missing. .. code:: python @dataclass class A: a: int b: str = "toto" For the previous example, the param names would be ``a`` and ``b``. """ param_names = [] filter_ = cls.as_context().get_global_filter() for name in sorted(filter_.values(), key=lambda name: name.start_pos): d = name.tree_name.get_definition() annassign = d.children[1] if d.type == 'expr_stmt' and annassign.type == 'annassign': node = annassign.children[1] if node.type == "atom_expr" and node.children[0].value == "ClassVar": continue if len(annassign.children) < 4: default = None else: default = annassign.children[3] param_names.append(DataclassParamName( parent_context=cls.parent_context, tree_name=name.tree_name, annotation_node=annassign.children[1], default_node=default, )) return param_names
ClassFilter
python
realpython__materials
python-print/custom_class.py
{ "start": 0, "end": 275 }
class ____: def __init__(self, name, age): self.name = name self.age = age def __str__(self): class_name = type(self).__name__ return f"{class_name}(name={self.name!r}, age={self.age!r})" jdoe = Person("John Doe", 42) print(jdoe)
Person
python
Textualize__textual
src/textual/widgets/_select.py
{ "start": 1203, "end": 5607 }
class ____(OptionList): """The 'pop-up' overlay for the Select control.""" BINDINGS = [("escape", "dismiss", "Dismiss menu")] @dataclass class Dismiss(Message): """Inform ancestor the overlay should be dismissed.""" lost_focus: bool = False """True if the overlay lost focus.""" @dataclass class UpdateSelection(Message): """Inform ancestor the selection was changed.""" option_index: int """The index of the new selection.""" def __init__(self, type_to_search: bool = True) -> None: super().__init__() self._type_to_search = type_to_search """If True (default), the user can type to search for a matching option and the cursor will jump to it.""" self._search_query: str = "" """The current search query used to find a matching option and jump to it.""" self._search_reset_delay: float = 0.7 """The number of seconds to wait after the most recent key press before resetting the search query.""" def on_mount(self) -> None: def reset_query() -> None: self._search_query = "" self._search_reset_timer = Timer( self, self._search_reset_delay, callback=reset_query ) def watch_has_focus(self, value: bool) -> None: self._search_query = "" if value: self._search_reset_timer._start() else: self._search_reset_timer.reset() self._search_reset_timer.stop() super().watch_has_focus(value) async def _on_key(self, event: events.Key) -> None: if not self._type_to_search: return self._search_reset_timer.reset() if event.character is not None and event.is_printable: event.time = 0 event.stop() event.prevent_default() # Update the search query and jump to the next option that matches. self._search_query += event.character index = self._find_search_match(self._search_query) if index is not None: self.select(index) def check_consume_key(self, key: str, character: str | None = None) -> bool: """Check if the widget may consume the given key.""" return ( self._type_to_search and character is not None and character.isprintable() ) def select(self, index: int | None) -> None: """Move selection. Args: index: Index of new selection. """ self.highlighted = index self.scroll_to_highlight() def _find_search_match(self, query: str) -> int | None: """A simple substring search which favors options containing the substring earlier in the prompt. Args: query: The substring to search for. Returns: The index of the option that matches the query, or `None` if no match is found. """ best_match: int | None = None minimum_index: int | None = None query = query.lower() for index, option in enumerate(self._options): prompt = option.prompt if isinstance(prompt, Text): lower_prompt = prompt.plain.lower() elif isinstance(prompt, str): lower_prompt = prompt.lower() else: continue match_index = lower_prompt.find(query) if match_index != -1 and ( minimum_index is None or match_index < minimum_index ): best_match = index minimum_index = match_index return best_match def action_dismiss(self) -> None: """Dismiss the overlay.""" self.post_message(self.Dismiss()) def _on_blur(self, _event: events.Blur) -> None: """On blur we want to dismiss the overlay.""" self.post_message(self.Dismiss(lost_focus=True)) self.suppress_click() def on_option_list_option_selected(self, event: OptionList.OptionSelected) -> None: """Inform parent when an option is selected.""" event.stop() self.post_message(self.UpdateSelection(event.option_index)) def on_option_list_option_highlighted( self, event: OptionList.OptionHighlighted ) -> None: """Stop option list highlighted messages leaking.""" event.stop()
SelectOverlay
python
wandb__wandb
wandb/vendor/pygments/styles/monokai.py
{ "start": 506, "end": 5080 }
class ____(Style): """ This style mimics the Monokai color scheme. """ background_color = "#272822" highlight_color = "#49483e" styles = { # No corresponding class for the following: Text: "#f8f8f2", # class: '' Whitespace: "", # class: 'w' Error: "#960050 bg:#1e0010", # class: 'err' Other: "", # class 'x' Comment: "#75715e", # class: 'c' Comment.Multiline: "", # class: 'cm' Comment.Preproc: "", # class: 'cp' Comment.Single: "", # class: 'c1' Comment.Special: "", # class: 'cs' Keyword: "#66d9ef", # class: 'k' Keyword.Constant: "", # class: 'kc' Keyword.Declaration: "", # class: 'kd' Keyword.Namespace: "#f92672", # class: 'kn' Keyword.Pseudo: "", # class: 'kp' Keyword.Reserved: "", # class: 'kr' Keyword.Type: "", # class: 'kt' Operator: "#f92672", # class: 'o' Operator.Word: "", # class: 'ow' - like keywords Punctuation: "#f8f8f2", # class: 'p' Name: "#f8f8f2", # class: 'n' Name.Attribute: "#a6e22e", # class: 'na' - to be revised Name.Builtin: "", # class: 'nb' Name.Builtin.Pseudo: "", # class: 'bp' Name.Class: "#a6e22e", # class: 'nc' - to be revised Name.Constant: "#66d9ef", # class: 'no' - to be revised Name.Decorator: "#a6e22e", # class: 'nd' - to be revised Name.Entity: "", # class: 'ni' Name.Exception: "#a6e22e", # class: 'ne' Name.Function: "#a6e22e", # class: 'nf' Name.Property: "", # class: 'py' Name.Label: "", # class: 'nl' Name.Namespace: "", # class: 'nn' - to be revised Name.Other: "#a6e22e", # class: 'nx' Name.Tag: "#f92672", # class: 'nt' - like a keyword Name.Variable: "", # class: 'nv' - to be revised Name.Variable.Class: "", # class: 'vc' - to be revised Name.Variable.Global: "", # class: 'vg' - to be revised Name.Variable.Instance: "", # class: 'vi' - to be revised Number: "#ae81ff", # class: 'm' Number.Float: "", # class: 'mf' Number.Hex: "", # class: 'mh' Number.Integer: "", # class: 'mi' Number.Integer.Long: "", # class: 'il' Number.Oct: "", # class: 'mo' Literal: "#ae81ff", # class: 'l' Literal.Date: "#e6db74", # class: 'ld' String: "#e6db74", # class: 's' String.Backtick: "", # class: 'sb' String.Char: "", # class: 'sc' String.Doc: "", # class: 'sd' - like a comment String.Double: "", # class: 's2' String.Escape: "#ae81ff", # class: 'se' String.Heredoc: "", # class: 'sh' String.Interpol: "", # class: 'si' String.Other: "", # class: 'sx' String.Regex: "", # class: 'sr' String.Single: "", # class: 's1' String.Symbol: "", # class: 'ss' Generic: "", # class: 'g' Generic.Deleted: "#f92672", # class: 'gd', Generic.Emph: "italic", # class: 'ge' Generic.Error: "", # class: 'gr' Generic.Heading: "", # class: 'gh' Generic.Inserted: "#a6e22e", # class: 'gi' Generic.Output: "", # class: 'go' Generic.Prompt: "", # class: 'gp' Generic.Strong: "bold", # class: 'gs' Generic.Subheading: "#75715e", # class: 'gu' Generic.Traceback: "", # class: 'gt' }
MonokaiStyle
python
sqlalchemy__sqlalchemy
test/dialect/postgresql/test_reflection.py
{ "start": 2118, "end": 2555 }
class ____: @testing.fixture( params=[ "engine", "connection", ] ) def inspect_fixture(self, request, metadata, testing_engine): engine = request.param eng = testing_engine() conn = eng.connect() if engine == "connection": yield inspect(eng), conn else: yield inspect(conn), conn conn.close()
ReflectionFixtures
python
bokeh__bokeh
tests/unit/bokeh/models/test_mappers.py
{ "start": 3239, "end": 3532 }
class ____: def test_basic(self) -> None: mapper = bmm.CategoricalPatternMapper() check_properties_existence(mapper, [ "factors", "patterns", "start", "end", "default_value"], )
Test_CategoricalPatternMapper
python
django__django
tests/queries/tests.py
{ "start": 150937, "end": 153289 }
class ____(TestCase): """ The queries reuse joins sensibly (for example, direct joins are always reused). """ def test_fk_reuse(self): qs = Annotation.objects.filter(tag__name="foo").filter(tag__name="bar") self.assertEqual(str(qs.query).count("JOIN"), 1) def test_fk_reuse_select_related(self): qs = Annotation.objects.filter(tag__name="foo").select_related("tag") self.assertEqual(str(qs.query).count("JOIN"), 1) def test_fk_reuse_annotation(self): qs = Annotation.objects.filter(tag__name="foo").annotate(cnt=Count("tag__name")) self.assertEqual(str(qs.query).count("JOIN"), 1) def test_fk_reuse_disjunction(self): qs = Annotation.objects.filter(Q(tag__name="foo") | Q(tag__name="bar")) self.assertEqual(str(qs.query).count("JOIN"), 1) def test_fk_reuse_order_by(self): qs = Annotation.objects.filter(tag__name="foo").order_by("tag__name") self.assertEqual(str(qs.query).count("JOIN"), 1) def test_revo2o_reuse(self): qs = Detail.objects.filter(member__name="foo").filter(member__name="foo") self.assertEqual(str(qs.query).count("JOIN"), 1) def test_revfk_noreuse(self): qs = Author.objects.filter(report__name="r4").filter(report__name="r1") self.assertEqual(str(qs.query).count("JOIN"), 2) def test_inverted_q_across_relations(self): """ When a trimmable join is specified in the query (here school__), the ORM detects it and removes unnecessary joins. The set of reusable joins are updated after trimming the query so that other lookups don't consider that the outer query's filters are in effect for the subquery (#26551). """ springfield_elementary = School.objects.create() hogward = School.objects.create() Student.objects.create(school=springfield_elementary) hp = Student.objects.create(school=hogward) Classroom.objects.create(school=hogward, name="Potion") Classroom.objects.create(school=springfield_elementary, name="Main") qs = Student.objects.filter( ~( Q(school__classroom__name="Main") & Q(school__classroom__has_blackboard=None) ) ) self.assertSequenceEqual(qs, [hp])
JoinReuseTest
python
django__django
tests/forms_tests/models.py
{ "start": 3235, "end": 3337 }
class ____(models.Model): file = models.FileField(storage=temp_storage, upload_to="tests")
FileModel
python
pypa__setuptools
setuptools/_distutils/compilers/C/cygwin.py
{ "start": 954, "end": 8532 }
class ____(unix.Compiler): """Handles the Cygwin port of the GNU C compiler to Windows.""" compiler_type = 'cygwin' obj_extension = ".o" static_lib_extension = ".a" shared_lib_extension = ".dll.a" dylib_lib_extension = ".dll" static_lib_format = "lib%s%s" shared_lib_format = "lib%s%s" dylib_lib_format = "cyg%s%s" exe_extension = ".exe" def __init__(self, verbose=False, dry_run=False, force=False): super().__init__(verbose, dry_run, force) status, details = check_config_h() self.debug_print(f"Python's GCC status: {status} (details: {details})") if status is not CONFIG_H_OK: self.warn( "Python's pyconfig.h doesn't seem to support your compiler. " f"Reason: {details}. " "Compiling may fail because of undefined preprocessor macros." ) self.cc, self.cxx = get_config_vars('CC', 'CXX') # Override 'CC' and 'CXX' environment variables for # building using MINGW compiler for MSVC python. self.cc = os.environ.get('CC', self.cc or 'gcc') self.cxx = os.environ.get('CXX', self.cxx or 'g++') self.linker_dll = self.cc self.linker_dll_cxx = self.cxx shared_option = "-shared" self.set_executables( compiler=f'{self.cc} -mcygwin -O -Wall', compiler_so=f'{self.cc} -mcygwin -mdll -O -Wall', compiler_cxx=f'{self.cxx} -mcygwin -O -Wall', compiler_so_cxx=f'{self.cxx} -mcygwin -mdll -O -Wall', linker_exe=f'{self.cc} -mcygwin', linker_so=f'{self.linker_dll} -mcygwin {shared_option}', linker_exe_cxx=f'{self.cxx} -mcygwin', linker_so_cxx=f'{self.linker_dll_cxx} -mcygwin {shared_option}', ) self.dll_libraries = get_msvcr() @property def gcc_version(self): # Older numpy depended on this existing to check for ancient # gcc versions. This doesn't make much sense with clang etc so # just hardcode to something recent. # https://github.com/numpy/numpy/pull/20333 warnings.warn( "gcc_version attribute of CygwinCCompiler is deprecated. " "Instead of returning actual gcc version a fixed value 11.2.0 is returned.", DeprecationWarning, stacklevel=2, ) with suppress_known_deprecation(): return LooseVersion("11.2.0") def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): """Compiles the source by spawning GCC and windres if needed.""" if ext in ('.rc', '.res'): # gcc needs '.res' and '.rc' compiled to object files !!! try: self.spawn(["windres", "-i", src, "-o", obj]) except DistutilsExecError as msg: raise CompileError(msg) else: # for other files use the C-compiler try: if self.detect_language(src) == 'c++': self.spawn( self.compiler_so_cxx + cc_args + [src, '-o', obj] + extra_postargs ) else: self.spawn( self.compiler_so + cc_args + [src, '-o', obj] + extra_postargs ) except DistutilsExecError as msg: raise CompileError(msg) def link( self, target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=False, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None, ): """Link the objects.""" # use separate copies, so we can modify the lists extra_preargs = copy.copy(extra_preargs or []) libraries = copy.copy(libraries or []) objects = copy.copy(objects or []) if runtime_library_dirs: self.warn(_runtime_library_dirs_msg) # Additional libraries libraries.extend(self.dll_libraries) # handle export symbols by creating a def-file # with executables this only works with gcc/ld as linker if (export_symbols is not None) and ( target_desc != self.EXECUTABLE or self.linker_dll == "gcc" ): # (The linker doesn't do anything if output is up-to-date. # So it would probably better to check if we really need this, # but for this we had to insert some unchanged parts of # UnixCCompiler, and this is not what we want.) # we want to put some files in the same directory as the # object files are, build_temp doesn't help much # where are the object files temp_dir = os.path.dirname(objects[0]) # name of dll to give the helper files the same base name (dll_name, dll_extension) = os.path.splitext( os.path.basename(output_filename) ) # generate the filenames for these files def_file = os.path.join(temp_dir, dll_name + ".def") # Generate .def file contents = [f"LIBRARY {os.path.basename(output_filename)}", "EXPORTS"] contents.extend(export_symbols) self.execute(write_file, (def_file, contents), f"writing {def_file}") # next add options for def-file # for gcc/ld the def-file is specified as any object files objects.append(def_file) # end: if ((export_symbols is not None) and # (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")): # who wants symbols and a many times larger output file # should explicitly switch the debug mode on # otherwise we let ld strip the output file # (On my machine: 10KiB < stripped_file < ??100KiB # unstripped_file = stripped_file + XXX KiB # ( XXX=254 for a typical python extension)) if not debug: extra_preargs.append("-s") super().link( target_desc, objects, output_filename, output_dir, libraries, library_dirs, runtime_library_dirs, None, # export_symbols, we do this in our def-file debug, extra_preargs, extra_postargs, build_temp, target_lang, ) def runtime_library_dir_option(self, dir): # cygwin doesn't support rpath. While in theory we could error # out like MSVC does, code might expect it to work like on Unix, so # just warn and hope for the best. self.warn(_runtime_library_dirs_msg) return [] # -- Miscellaneous methods ----------------------------------------- def _make_out_path(self, output_dir, strip_dir, src_name): # use normcase to make sure '.rc' is really '.rc' and not '.RC' norm_src_name = os.path.normcase(src_name) return super()._make_out_path(output_dir, strip_dir, norm_src_name) @property def out_extensions(self): """ Add support for rc and res files. """ return { **super().out_extensions, **{ext: ext + self.obj_extension for ext in ('.res', '.rc')}, } # the same as cygwin plus some additional parameters
Compiler
python
huggingface__transformers
src/transformers/models/clvp/modeling_clvp.py
{ "start": 36920, "end": 43764 }
class ____(ClvpPreTrainedModel): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`ClvpEncoderLayer`]. Args: config: ClvpConfig """ def __init__(self, config: ClvpConfig): super().__init__(config) self.config = config self.token_embedding = nn.Embedding(config.vocab_size, config.hidden_size) self.rotary_pos_emb = ClvpRotaryPositionalEmbedding(config) if config.use_rotary_embedding else None self.layers = nn.ModuleList([ClvpEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.sequence_summary = ClvpSequenceSummary(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.token_embedding def set_input_embeddings(self, value): self.token_embedding = value def forward( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): input embeddings for the model. This bypasses the model's internal embedding lookup matrix. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor`, *optional*): Denotes the position ids of `input_ids`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) inputs_embeds = self.token_embedding(input_ids) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") # expand attention_mask and create position_ids if needed if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange(input_shape[1], dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None rotary_pos_emb = self.rotary_pos_emb(inputs_embeds) if self.rotary_pos_emb is not None else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = torch.utils.checkpoint.checkpoint( encoder_layer.__call__, hidden_states, rotary_pos_emb, attention_mask, position_ids, ) else: layer_outputs = encoder_layer( hidden_states, rotary_pos_emb, attention_mask, position_ids, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) last_hidden_state = hidden_states last_hidden_state = self.final_layer_norm(last_hidden_state) # take the mean over axis 1 and get pooled output pooled_output = self.sequence_summary(last_hidden_state) # apply the projection layer embeds = self.projection(pooled_output) if not return_dict: return tuple( v for v in [embeds, last_hidden_state, pooled_output, encoder_states, all_attentions] if v is not None ) return ClvpEncoderOutput( embeds=embeds, last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_states, attentions=all_attentions, )
ClvpEncoder
python
pypa__pipenv
tests/integration/conftest.py
{ "start": 3469, "end": 5654 }
class ____: def __init__(self, path, index): self.path = path self.index = index if self.path.exists(): self.loads() else: self.document = tomlkit.document() self.document["source"] = self.document.get("source", tomlkit.aot()) self.document["requires"] = self.document.get("requires", tomlkit.table()) self.document["packages"] = self.document.get("packages", tomlkit.table()) self.document["dev-packages"] = self.document.get("dev-packages", tomlkit.table()) self.write() def install(self, package, value, dev=False): section = "packages" if not dev else "dev-packages" if isinstance(value, dict): table = tomlkit.inline_table() table.update(value) self.document[section][package] = table else: self.document[section][package] = value self.write() def remove(self, package, dev=False): section = "packages" if not dev else "dev-packages" if ( not dev and package not in self.document[section] and package in self.document["dev-packages"] ): section = "dev-packages" del self.document[section][package] self.write() def add(self, package, value, dev=False): self.install(package, value, dev=dev) def update(self, package, value, dev=False): self.install(package, value, dev=dev) def loads(self): self.document = tomlkit.loads(self.path.read_text()) def dumps(self): if not self.document.get("source"): source_table = tomlkit.table() source_table["url"] = self.index source_table["verify_ssl"] = bool(self.index.startswith("https")) source_table["name"] = "pipenv_test_index" self.document["source"].append(source_table) return tomlkit.dumps(self.document) def write(self): self.path.write_text(self.dumps()) @classmethod def get_fixture_path(cls, path, fixtures="test_artifacts"): return Path(Path(__file__).resolve().parent.parent / fixtures / path)
_Pipfile
python
gevent__gevent
src/greentest/3.10/test_signal.py
{ "start": 24078, "end": 28080 }
class ____(unittest.TestCase): def setUp(self): self.hndl_called = False self.hndl_count = 0 self.itimer = None self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm) def tearDown(self): signal.signal(signal.SIGALRM, self.old_alarm) if self.itimer is not None: # test_itimer_exc doesn't change this attr # just ensure that itimer is stopped signal.setitimer(self.itimer, 0) def sig_alrm(self, *args): self.hndl_called = True def sig_vtalrm(self, *args): self.hndl_called = True if self.hndl_count > 3: # it shouldn't be here, because it should have been disabled. raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL " "timer.") elif self.hndl_count == 3: # disable ITIMER_VIRTUAL, this function shouldn't be called anymore signal.setitimer(signal.ITIMER_VIRTUAL, 0) self.hndl_count += 1 def sig_prof(self, *args): self.hndl_called = True signal.setitimer(signal.ITIMER_PROF, 0) def test_itimer_exc(self): # XXX I'm assuming -1 is an invalid itimer, but maybe some platform # defines it ? self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0) # Negative times are treated as zero on some platforms. if 0: self.assertRaises(signal.ItimerError, signal.setitimer, signal.ITIMER_REAL, -1) def test_itimer_real(self): self.itimer = signal.ITIMER_REAL signal.setitimer(self.itimer, 1.0) signal.pause() self.assertEqual(self.hndl_called, True) # Issue 3864, unknown if this affects earlier versions of freebsd also @unittest.skipIf(sys.platform in ('netbsd5',), 'itimer not reliable (does not mix well with threading) on some BSDs.') def test_itimer_virtual(self): self.itimer = signal.ITIMER_VIRTUAL signal.signal(signal.SIGVTALRM, self.sig_vtalrm) signal.setitimer(self.itimer, 0.3, 0.2) start_time = time.monotonic() while time.monotonic() - start_time < 60.0: # use up some virtual time by doing real work _ = pow(12345, 67890, 10000019) if signal.getitimer(self.itimer) == (0.0, 0.0): break # sig_vtalrm handler stopped this itimer else: # Issue 8424 self.skipTest("timeout: likely cause: machine too slow or load too " "high") # virtual itimer should be (0.0, 0.0) now self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0)) # and the handler should have been called self.assertEqual(self.hndl_called, True) def test_itimer_prof(self): self.itimer = signal.ITIMER_PROF signal.signal(signal.SIGPROF, self.sig_prof) signal.setitimer(self.itimer, 0.2, 0.2) start_time = time.monotonic() while time.monotonic() - start_time < 60.0: # do some work _ = pow(12345, 67890, 10000019) if signal.getitimer(self.itimer) == (0.0, 0.0): break # sig_prof handler stopped this itimer else: # Issue 8424 self.skipTest("timeout: likely cause: machine too slow or load too " "high") # profiling itimer should be (0.0, 0.0) now self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0)) # and the handler should have been called self.assertEqual(self.hndl_called, True) def test_setitimer_tiny(self): # bpo-30807: C setitimer() takes a microsecond-resolution interval. # Check that float -> timeval conversion doesn't round # the interval down to zero, which would disable the timer. self.itimer = signal.ITIMER_REAL signal.setitimer(self.itimer, 1e-6) time.sleep(1) self.assertEqual(self.hndl_called, True)
ItimerTest
python
chroma-core__chroma
chromadb/api/types.py
{ "start": 59896, "end": 59994 }
class ____: bool_inverted_index: Optional[BoolInvertedIndexType] = None @dataclass
BoolValueType
python
apache__airflow
providers/opsgenie/tests/unit/opsgenie/notifications/test_opsgenie.py
{ "start": 1084, "end": 5202 }
class ____: @pytest.fixture(autouse=True) def setup_connections(self, create_connection_without_db): create_connection_without_db( Connection( conn_id="opsgenie_default", conn_type="opsgenie", host="https://api.opsgenie.com/", password="eb243592-faa2-4ba2-a551q-1afdf565c889", ) ) _config = { "message": "An example alert message", "alias": "Life is too short for no alias", "description": "Every alert needs a description", "responders": [ {"id": "4513b7ea-3b91-438f-b7e4-e3e54af9147c", "type": "team"}, {"name": "NOC", "type": "team"}, {"id": "bb4d9938-c3c2-455d-aaab-727aa701c0d8", "type": "user"}, {"username": "trinity@opsgenie.com", "type": "user"}, {"id": "aee8a0de-c80f-4515-a232-501c0bc9d715", "type": "escalation"}, {"name": "Nightwatch Escalation", "type": "escalation"}, {"id": "80564037-1984-4f38-b98e-8a1f662df552", "type": "schedule"}, {"name": "First Responders Schedule", "type": "schedule"}, ], "visible_to": [ {"id": "4513b7ea-3b91-438f-b7e4-e3e54af9147c", "type": "team"}, {"name": "rocket_team", "type": "team"}, {"id": "bb4d9938-c3c2-455d-aaab-727aa701c0d8", "type": "user"}, {"username": "trinity@opsgenie.com", "type": "user"}, ], "actions": ["Restart", "AnExampleAction"], "tags": ["OverwriteQuietHours", "Critical"], "details": {"key1": "value1", "key2": "value2"}, "entity": "An example entity", "source": "Airflow", "priority": "P1", "user": "Jesse", "note": "Write this down", } expected_payload_dict = { "message": _config["message"], "alias": _config["alias"], "description": _config["description"], "responders": _config["responders"], "visible_to": _config["visible_to"], "actions": _config["actions"], "tags": _config["tags"], "details": _config["details"], "entity": _config["entity"], "source": _config["source"], "priority": _config["priority"], "user": _config["user"], "note": _config["note"], } @mock.patch.object(OpsgenieAlertHook, "get_conn") def test_notifier(self, mock_opsgenie_alert_hook): notifier = send_opsgenie_notification(payload=self._config) notifier.notify({}) args, _ = mock_opsgenie_alert_hook.return_value.create_alert.call_args assert args[0] == self.expected_payload_dict @mock.patch.object(OpsgenieAlertHook, "get_conn") def test_notifier_with_notifier_class(self, mock_opsgenie_alert_hook): notifier = OpsgenieNotifier(payload=self._config) notifier.notify({}) args, _ = mock_opsgenie_alert_hook.return_value.create_alert.call_args assert args[0] == self.expected_payload_dict @mock.patch.object(OpsgenieAlertHook, "get_conn") def test_notifier_templated(self, mock_opsgenie_alert_hook, create_dag_without_db): dag_id = "test_notifier" template_fields = ("message", "alias", "description", "entity", "priority", "note") templated_config = {} for key, value in self._config.items(): if key in template_fields: templated_config[key] = value + " {{dag.dag_id}}" else: templated_config[key] = value templated_expected_payload_dict = {} for key, value in self.expected_payload_dict.items(): if key in template_fields: templated_expected_payload_dict[key] = value + f" {dag_id}" else: templated_expected_payload_dict[key] = value notifier = OpsgenieNotifier(payload=templated_config) notifier({"dag": create_dag_without_db(dag_id=dag_id)}) args, _ = mock_opsgenie_alert_hook.return_value.create_alert.call_args assert args[0] == templated_expected_payload_dict
TestOpsgenieNotifier
python
django__django
tests/select_related_regress/models.py
{ "start": 1976, "end": 2078 }
class ____(Client): value = models.IntegerField() # Some model inheritance exercises
SpecialClient
python
huggingface__transformers
src/transformers/models/udop/modeling_udop.py
{ "start": 38347, "end": 43111 }
class ____(nn.Module, ABC): """ Base class of relative biases. Args: num_heads (`int`): Number of attention heads in the model, it will create embeddings of size `num_heads`, which will be added to the scores of each token pair. relative_attention_num_buckets (`int`, *optional*, defaults to 32): Pair token metric (distance in the sequence, distance in pixels etc.) will be bucketed, parameter is defining number of such buckets. bidirectional (`bool`, *optional*, defaults to `True`): Whether the distance should be bidirectional for a pair of tokens. If `False`, then distance(tok1, tok2) == distance(tok2, tok1). scaling_factor (`int`, *optional*, defaults to 1): Defining factor which will be used to scale relative distance. max_distance (`int`, *optional*, defaults to 128): All distances above this value will end up in the one/same bucket. augmentation (`bool`, *optional*, defaults to `False`): Whether to multiply relative distances by a random scalar. expand (`bool`, *optional*, defaults to `False`): Whether to expand an existing pretrained model with subsequent additions of prefix_bucket. """ def __init__( self, num_heads=None, relative_attention_num_buckets=32, bidirectional=True, scaling_factor=1, max_distance=128, level="tokens", augmentation=False, prefix_bucket=False, expand=False, ): super().__init__() self.prefix_bucket = prefix_bucket self.augmentation = augmentation self.level = level self.max_distance = max_distance self.scaling_factor = scaling_factor self.bidirectional = bidirectional self.num_heads = num_heads self.expand = expand self.relative_attention_num_buckets = relative_attention_num_buckets extra_head = 2 if prefix_bucket and not self.expand else 0 self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets + extra_head, self.num_heads) @abstractmethod def prepare_input( self, attention_mask: Optional[Tensor] = None, bbox: Optional[dict[str, Any]] = None, ) -> Tensor: pass def get_bucket(self, attention_mask: Optional[Tensor] = None, bbox: Optional[dict[str, Any]] = None) -> Tensor: relative_position = self.prepare_input(attention_mask, bbox) rp_bucket: Tensor = get_relative_position_bucket( relative_position, bidirectional=self.bidirectional, num_buckets=self.relative_attention_num_buckets, max_distance=self.max_distance, ) return rp_bucket def get_relative_position(self, positions): context_position = positions[:, :, None] memory_position = positions[:, None, :] relative_position = memory_position - context_position if self.augmentation and self.training: relative_position *= random.uniform(*AUGMENTATION_RANGE) relative_position *= self.scaling_factor return relative_position.to(torch.long) def forward(self, attention_mask: Optional[Tensor] = None, bbox: Optional[dict[str, Any]] = None) -> Tensor: # re-using pretrained model with subsequent addition of prefix_bucket if self.expand and self.prefix_bucket: new_bias = nn.Embedding(self.relative_attention_num_buckets + 2, self.num_heads) new_bias.weight.data[: self.relative_attention_num_buckets] = self.relative_attention_bias.weight.data new_bias.weight.data[self.relative_attention_num_buckets :] = 0.1 self.relative_attention_bias = new_bias self.expand = False rp_bucket = self.get_bucket(attention_mask, bbox) if self.prefix_bucket: if rp_bucket.size(0) == 1 and attention_mask.size(0) > 1: rp_bucket = rp_bucket.repeat(attention_mask.size(0), 1, 1) # based on assumption that prefix bboxes are negative is_prefix = bbox[:, :, 1] < 0 num_prefix = is_prefix.sum(-1) for idx, num_prefix_row in enumerate(num_prefix.cpu().numpy()): rp_bucket[idx, :num_prefix_row, num_prefix_row:] = self.relative_attention_num_buckets rp_bucket[idx, num_prefix_row:, :num_prefix_row] = self.relative_attention_num_buckets + 1 values: Tensor = self.relative_attention_bias(rp_bucket) if values.dim() != 4: raise ValueError("Wrong dimension of values tensor") values = values.permute([0, 3, 1, 2]) return values
RelativePositionBiasBase
python
doocs__leetcode
solution/2100-2199/2161.Partition Array According to Given Pivot/Solution.py
{ "start": 0, "end": 318 }
class ____: def pivotArray(self, nums: List[int], pivot: int) -> List[int]: a, b, c = [], [], [] for x in nums: if x < pivot: a.append(x) elif x == pivot: b.append(x) else: c.append(x) return a + b + c
Solution
python
pytorch__pytorch
benchmarks/operator_benchmark/pt/add_test.py
{ "start": 542, "end": 1508 }
class ____(op_bench.TorchBenchmarkBase): def init(self, M, N, K, device): self.inputs = { "input_one": torch.rand( M, N, K, device=device, requires_grad=self.auto_set() ), "input_two": torch.rand( M, N, K, device=device, requires_grad=self.auto_set() ), } self.set_module_name("add") def forward(self, input_one, input_two): return torch.add(input_one, input_two) # The generated test names based on add_short_configs will be in the following pattern: # add_M8_N16_K32_devicecpu # add_M8_N16_K32_devicecpu_bwdall # add_M8_N16_K32_devicecpu_bwd1 # add_M8_N16_K32_devicecpu_bwd2 # ... # Those names can be used to filter tests. op_bench.generate_pt_test(add_long_configs + add_short_configs, AddBenchmark) op_bench.generate_pt_gradient_test(add_long_configs + add_short_configs, AddBenchmark) """Mircobenchmark for addr operator."""
AddBenchmark
python
more-itertools__more-itertools
tests/test_more.py
{ "start": 128383, "end": 129777 }
class ____(TestCase): def test_empty(self): # empty iterable -> empty list self.assertEqual(list(mi.circular_shifts([])), []) def test_simple_circular_shifts(self): # test the a simple iterator case self.assertEqual( list(mi.circular_shifts(range(4))), [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)], ) def test_duplicates(self): # test non-distinct entries self.assertEqual( list(mi.circular_shifts([0, 1, 0, 1])), [(0, 1, 0, 1), (1, 0, 1, 0), (0, 1, 0, 1), (1, 0, 1, 0)], ) def test_steps_positive(self): actual = list(mi.circular_shifts(range(5), steps=2)) expected = [ (0, 1, 2, 3, 4), (2, 3, 4, 0, 1), (4, 0, 1, 2, 3), (1, 2, 3, 4, 0), (3, 4, 0, 1, 2), ] self.assertEqual(actual, expected) def test_steps_negative(self): actual = list(mi.circular_shifts(range(5), steps=-2)) expected = [ (0, 1, 2, 3, 4), (3, 4, 0, 1, 2), (1, 2, 3, 4, 0), (4, 0, 1, 2, 3), (2, 3, 4, 0, 1), ] self.assertEqual(actual, expected) def test_steps_zero(self): with self.assertRaises(ValueError): list(mi.circular_shifts(range(5), steps=0))
CircularShiftsTests
python
tensorflow__tensorflow
tensorflow/python/ops/distributions/exponential.py
{ "start": 5023, "end": 5752 }
class ____(Exponential): """Exponential with softplus transform on `rate`.""" @deprecation.deprecated( "2019-01-01", "Use `tfd.Exponential(tf.nn.softplus(rate)).", warn_once=True) def __init__(self, rate, validate_args=False, allow_nan_stats=True, name="ExponentialWithSoftplusRate"): parameters = dict(locals()) with ops.name_scope(name, values=[rate]) as name: super(ExponentialWithSoftplusRate, self).__init__( rate=nn.softplus(rate, name="softplus_rate"), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name) self._parameters = parameters
ExponentialWithSoftplusRate
python
kamyu104__LeetCode-Solutions
Python/count-the-number-of-incremovable-subarrays-i.py
{ "start": 681, "end": 1159 }
class ____(object): def incremovableSubarrayCount(self, nums): """ :type nums: List[int] :rtype: int """ return sum((left == 0 or right == len(nums)-1 or nums[left-1] < nums[right+1]) and all(nums[i] < nums[i+1] for i in xrange(left-1)) and all(nums[i] < nums[i+1] for i in xrange(right+1, len(nums)-1)) for left in xrange(len(nums)) for right in xrange(left, len(nums)))
Solution2
python
encode__django-rest-framework
tests/test_serializer.py
{ "start": 22186, "end": 23399 }
class ____: def setup_method(self): class ExampleSerializer(serializers.Serializer): char = serializers.CharField(default='abc') integer = serializers.IntegerField() self.Serializer = ExampleSerializer def test_default_should_included_on_create(self): serializer = self.Serializer(data={'integer': 456}) assert serializer.is_valid() assert serializer.validated_data == {'char': 'abc', 'integer': 456} assert serializer.errors == {} def test_default_should_be_included_on_update(self): instance = MockObject(char='def', integer=123) serializer = self.Serializer(instance, data={'integer': 456}) assert serializer.is_valid() assert serializer.validated_data == {'char': 'abc', 'integer': 456} assert serializer.errors == {} def test_default_should_not_be_included_on_partial_update(self): instance = MockObject(char='def', integer=123) serializer = self.Serializer(instance, data={'integer': 456}, partial=True) assert serializer.is_valid() assert serializer.validated_data == {'integer': 456} assert serializer.errors == {}
TestDefaultInclusions
python
apache__airflow
airflow-core/src/airflow/timetables/base.py
{ "start": 2711, "end": 3502 }
class ____(NamedTuple): """ Restriction on when a DAG can be scheduled for a run. Specifically, the run must not be earlier than ``earliest``, nor later than ``latest``. If ``catchup`` is *False*, the run must also not be earlier than the current time, i.e. "missed" schedules are not backfilled. These values are generally set on the DAG or task's ``start_date``, ``end_date``, and ``catchup`` arguments. Both ``earliest`` and ``latest``, if not *None*, are inclusive; a DAG run can happen exactly at either point of time. They are guaranteed to be aware (i.e. contain timezone information) for ``TimeRestriction`` instances created by Airflow. """ earliest: DateTime | None latest: DateTime | None catchup: bool
TimeRestriction
python
PyCQA__isort
isort/exceptions.py
{ "start": 6498, "end": 7008 }
class ____(ISortError): """Raised when isort encounters an import that matches a section that is not defined""" def __init__(self, import_module: str, section: str): super().__init__( f"Found {import_module} import while parsing, but {section} was not included " "in the `sections` setting of your config. Please add it before continuing\n" "See https://pycqa.github.io/isort/#custom-sections-and-ordering " "for more info." )
MissingSection
python
readthedocs__readthedocs.org
readthedocs/search/faceted_search.py
{ "start": 9213, "end": 14131 }
class ____(RTDFacetedSearch): facets = { "project": TermsFacet(field="project"), } doc_types = [PageDocument] index = PageDocument._index._name # boosting for these fields need to be close enough # to be re-boosted by the page rank. _outer_fields = ["title^1.5"] _section_fields = ["sections.title^2", "sections.content"] fields = _outer_fields excludes = ["rank", "sections", "commit", "build"] def _get_projects_query(self): """ Get filter by projects query. If it's a dict, filter by project and version, if it's a list filter by project. """ if not self.projects: return None if isinstance(self.projects, dict): versions_query = [ Bool(must=[Term(project=project), Term(version=version)]) for project, version in self.projects.items() ] return Bool(should=versions_query) if isinstance(self.projects, list): return Terms(project=self.projects) raise ValueError("projects must be a list or a dict!") def query(self, search, query): """ Manipulates the query to support nested queries and a custom rank for pages. If `self.projects` was given, we use it to filter the documents that match the same project and version. """ search = search.highlight_options(**self._highlight_options) search = search.source(excludes=self.excludes) queries = self._get_queries( query=query, fields=self.fields, ) sections_nested_query = self._get_nested_query( query=query, path="sections", fields=self._section_fields, limit=3, ) queries.append(sections_nested_query) bool_query = Bool(should=queries) projects_query = self._get_projects_query() if projects_query: bool_query = Bool(must=[bool_query], filter=projects_query) final_query = FunctionScore( query=bool_query, script_score=self._get_script_score(), ) search = search.query(final_query) return search def _get_nested_query(self, *, query, path, fields, limit=3): """Generate a nested query with passed parameters.""" queries = self._get_queries( query=query, fields=fields, ) bool_query = Bool(should=queries) raw_fields = [ # Remove boosting from the field re.sub(r"\^.*$", "", field) for field in fields ] highlight = dict( self._highlight_options, fields={field: {} for field in raw_fields}, ) return Nested( path=path, inner_hits={"highlight": highlight, "size": limit}, query=bool_query, ) def _get_script_score(self): """ Gets an ES script to map the page rank to a valid score weight. ES expects the rank to be a number greater than 0, but users can set this between [-10, +10]. We map that range to [0.01, 2] (21 possible values). The first lower rank (0.8) needs to bring the score from the highest boost (sections.title^2) close to the lowest boost (title^1.5), that way exact results take priority: - 2.0 * 0.8 = 1.6 (score close to 1.5, but not lower than it) - 1.5 * 0.8 = 1.2 (score lower than 1.5) The first higher rank (1.2) needs to bring the score from the lowest boost (title^1.5) close to the highest boost (sections.title^2), that way exact results take priority: - 2.0 * 1.3 = 2.6 (score higher thank 2.0) - 1.5 * 1.3 = 1.95 (score close to 2.0, but not higher than it) The next lower and higher ranks need to decrease/increase both scores. See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-score-query.html#field-value-factor # noqa """ ranking = [ 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 1.93, 1.96, 2, ] # Each rank maps to a element in the ranking list. # -10 will map to the first element (-10 + 10 = 0) and so on. source = """ int rank = doc['rank'].size() == 0 ? 0 : (int) doc['rank'].value; return params.ranking[rank + 10] * _score; """ return { "script": { "source": source, "params": {"ranking": ranking}, }, }
PageSearch
python
pytorch__pytorch
torch/_dynamo/variables/misc.py
{ "start": 74830, "end": 75106 }
class ____(ConstantLikeVariable): _error_prefix = "torch.__version__" def __init__(self, **kwargs) -> None: kwargs.setdefault("value", torch.__version__) assert kwargs["value"] is torch.__version__ super().__init__(**kwargs)
TorchVersionVariable
python
tensorflow__tensorflow
tensorflow/python/util/lock_util_test.py
{ "start": 875, "end": 1838 }
class ____(test.TestCase, parameterized.TestCase): @parameterized.parameters(1, 2, 3, 5, 10) def testGroups(self, num_groups): lock = lock_util.GroupLock(num_groups) num_threads = 10 finished = set() def thread_fn(thread_id): time.sleep(random.random() * 0.1) group_id = thread_id % num_groups with lock.group(group_id): time.sleep(random.random() * 0.1) self.assertGreater(lock._group_member_counts[group_id], 0) for g, c in enumerate(lock._group_member_counts): if g != group_id: self.assertEqual(0, c) finished.add(thread_id) threads = [ self.checkedThread(target=thread_fn, args=(i,)) for i in range(num_threads) ] for i in range(num_threads): threads[i].start() for i in range(num_threads): threads[i].join() self.assertEqual(set(range(num_threads)), finished) if __name__ == "__main__": test.main()
GroupLockTest
python
google__pytype
pytype/tests/test_enums.py
{ "start": 108, "end": 40653 }
class ____(test_base.BaseTest): """Tests the overlay.""" def test_can_import_module_members(self): self.Check(""" import enum enum.Enum enum.IntEnum enum.IntFlag enum.Flag enum.unique enum.auto """) def test_create_basic_enum(self): self.Check(""" import enum class Colors(enum.Enum): RED = 1 GREEN = 2 BLUE = 3 _ = (Colors.RED, Colors.GREEN, Colors.BLUE) _ = Colors.RED.name _ = Colors.RED.value """) def test_output_basic_enum(self): ty = self.Infer(""" import enum class Colors(enum.Enum): RED = 1 GREEN = 2 BLUE = 3 """) self.assertTypesMatchPytd( ty, """ import enum from typing import Literal class Colors(enum.Enum): BLUE: Literal[3] GREEN: Literal[2] RED: Literal[1] """, ) def test_access_members_and_values(self): self.CheckWithErrors(""" import enum class Colors(enum.Enum): RED = 1 GREEN = 2 BLUE = 3 ### Will pass: assert_type(Colors.RED.value, "int") assert_type(Colors.BLUE, "Colors") ### Will fail: assert_type(Colors.RED, "int") # assert-type assert_type(Colors.GREEN.value, "Colors") # assert-type """) def test_sunderscore_name_value(self): self.Check(""" import enum class M(enum.Enum): A = 1 assert_type(M.A._name_, str) assert_type(M.A._value_, int) def f(m: M): assert_type(m._name_, str) assert_type(m._value_, int) """) def test_sunderscore_name_value_pytd(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum class M(enum.Enum): A: int """, ) self.Check( """ from typing import Any import foo assert_type(foo.M.A._name_, str) assert_type(foo.M.A._value_, int) def f(m: foo.M): assert_type(m._name_, str) assert_type(m._value_, Any) """, pythonpath=[d.path], ) def test_basic_enum_from_pyi(self): with test_utils.Tempdir() as d: d.create_file( "e.pyi", """ import enum class Colors(enum.Enum): RED: int BLUE: int GREEN: int """, ) ty = self.Infer( """ import e c = e.Colors.RED n = e.Colors.BLUE.name v = e.Colors.GREEN.value """, pythonpath=[d.path], ) self.assertTypesMatchPytd( ty, """ import e c: e.Colors n: str v: int """, ) def test_enum_from_pyi_recur(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum class Recur(enum.Enum): A: Recur """, ) self.Check( """ import foo Recur = foo.Recur """, pythonpath=[d.path], ) def test_canonical_enum_members(self): # Checks that enum members created by instantiate() behave similarly to # real enum members. with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum class F(enum.Enum): X: int """, ) self.Check( """ import enum from foo import F class M(enum.Enum): A = 1 def get_name(x: M) -> str: return x.name def get_pyi_name(x: F) -> str: return x.name def get_value(x: M) -> int: return x.value def get_pyi_value(x: F) -> int: return x.value """, pythonpath=[d.path], ) def test_pytd_returns_enum(self): # Ensure that canonical enums created by PytdSignature.instantiate_return # have name and value fields. with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum class M(enum.Enum): A: int def get_m(name: str) -> M: ... """, ) self.Check( """ import foo def print_m(name: str): print(foo.get_m(name).name) print(foo.get_m(name).value) """, pythonpath=[d.path], ) def test_name_value_overlap(self): # Make sure enum members named "name" and "value" work correctly. self.Check(""" import enum class M(enum.Enum): name = 1 value = "hello" assert_type(M.name, "M") assert_type(M.name.name, "str") assert_type(M.name.value, "int") assert_type(M.value, "M") assert_type(M.value.name, "str") assert_type(M.value.value, "str") """) def test_name_value_overlap_pyi(self): # Make sure enum members named "name" and "value" work correctly. with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum class M(enum.Enum): name: int value: str """, ) self.Check( """ import foo assert_type(foo.M.name, "foo.M") assert_type(foo.M.name.name, "str") assert_type(foo.M.name.value, "int") assert_type(foo.M.value, "foo.M") assert_type(foo.M.value.name, "str") assert_type(foo.M.value.value, "str") """, pythonpath=[d.path], ) def test_name_lookup(self): with test_utils.Tempdir() as d: d.create_file("e.pyi", "a_string: str") self.CheckWithErrors( """ import enum import e class M(enum.Enum): A = 1 B = "b" assert_type(M["A"].value, "int") assert_type(M["B"].value, "str") assert_type(M[e.a_string].value, "Union[int, str]") _ = M["C"] # attribute-error """, pythonpath=[d.path], ) def test_name_lookup_pytd(self): with test_utils.Tempdir() as d: d.create_file( "e.pyi", """ import enum a_string: str class M(enum.Enum): A: int B: str """, ) self.CheckWithErrors( """ import e assert_type(e.M["A"].value, "int") assert_type(e.M["B"].value, "str") assert_type(e.M[e.a_string].value, "Any") _ = e.M["C"] # attribute-error """, pythonpath=[d.path], ) def test_name_lookup_from_canonical(self): # Canonical enum members should have non-atomic names. self.Check(""" import enum class M(enum.Enum): A = 1 def get(m: M): m = M[m.name] """) def test_bad_name_lookup(self): self.CheckWithErrors(""" import enum class M(enum.Enum): A = 1 M[1] # unsupported-operands """) def test_enum_named_name(self): self.Check(""" import enum class M(enum.Enum): name = 1 value = "hello" assert_type(M.name, "M") assert_type(M.name.name, "str") assert_type(M.name.value, "int") assert_type(M.value, "M") assert_type(M.value.name, "str") assert_type(M.value.value, "str") """) def test_enum_pytd_named_name(self): with test_utils.Tempdir() as d: d.create_file( "m.pyi", """ import enum class M(enum.Enum): name: int value: str """, ) self.Check( """ from m import M assert_type(M.name, "m.M") assert_type(M.name.name, "str") assert_type(M.name.value, "int") assert_type(M.value, "m.M") assert_type(M.value.name, "str") assert_type(M.value.value, "str") """, pythonpath=[d.path], ) def test_value_lookup(self): self.CheckWithErrors(""" import enum from typing import Union class M(enum.Enum): A = 1 assert_type(M(1), "M") assert_type(M(1).value, "int") assert_type(M(-500), "M") assert_type(M(M.A), "M") M("str") # wrong-arg-types class N(enum.Enum): A = 1 B = "str" assert_type(N(1), "N") assert_type(N("str"), "N") assert_type(N(499).value, "Union[int, str]") N(M.A) # wrong-arg-types """) def test_value_lookup_pytd(self): with test_utils.Tempdir() as d: d.create_file( "m.pyi", """ import enum class M(enum.Enum): A: int class N(enum.Enum): A: int B: str """, ) self.CheckWithErrors( """ from typing import Union from m import M, N assert_type(M(1), "m.M") assert_type(M(M.A), "m.M") # assert_type(M(1).value, "int") assert_type(M(-500), "m.M") M("str") # wrong-arg-types assert_type(N(1), "m.N") assert_type(N("str"), "m.N") # assert_type(N(499).value, "Union[int, str]") N(M.A) # wrong-arg-types """, pythonpath=[d.path], ) def test_value_lookup_no_members(self): self.Check(""" import enum class M(enum.Enum): pass x = M(1) """) def test_value_lookup_no_members_pytd(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum class M(enum.Enum): ... """, ) self.Check( """ import foo x = foo.M # to force foo.M to be loaded by the overlay. y = foo.M(1) """, pythonpath=[d.path], ) def test_reingest_literal_members(self): with self.DepTree([( "foo.py", """ import enum class A(enum.Enum): FOO = 1 BAR = 2 """, )]): self.Check(""" from typing import Literal from foo import A def f(x: Literal[1]): ... def g(x: int): a = A(x) # this should take a non-concrete int b = f(A.FOO.value) # this should preserve the concrete pyval """) @test_base.skip("Stricter equality disabled due to b/195136939") def test_enum_eq(self): # Note that this test only checks __eq__'s behavior. Though enums support # comparisons using `is`, pytype doesn't check `is` the same way as __eq__. self.Check(""" import enum class M(enum.Enum): A = 1 class N(enum.Enum): A = 1 # Boolean values indicate the expected result. if M.A == N.A: a = None else: a = False if M.A == M.A: b = True else: b = None if M["A"] == M.A: c = True else: c = None assert_type(a, "bool") assert_type(b, "bool") assert_type(c, "bool") """) @test_base.skip("Stricter equality disabled due to b/195136939") def test_enum_pytd_eq(self): with test_utils.Tempdir() as d: d.create_file( "m.pyi", """ import enum class M(enum.Enum): A: int class N(enum.Enum): A: int """, ) self.Check( """ from m import M, N # Boolean values indicate the expected result. if M.A == N.A: a = None else: a = False if M.A == M.A: b = True else: b = None if M["A"] == M.A: c = True else: c = None assert_type(a, "bool") assert_type(b, "bool") assert_type(c, "bool") """, pythonpath=[d.path], ) def test_metaclass_methods(self): self.CheckWithErrors(""" import enum class M(enum.Enum): A = 1 class N(enum.Enum): A = 1 # __contains__ M.A in M N.A in M 1 in M # unsupported-operands # __iter__ assert_type([e for e in M], "list[M]") # __len__ assert_type(len(M), "int") # __bool__ assert_type(bool(M), "bool") """) def test_pytd_metaclass_methods(self): with test_utils.Tempdir() as d: d.create_file( "m.pyi", """ import enum class M(enum.Enum): A: int """, ) self.CheckWithErrors( """ import enum from m import M class N(enum.Enum): A = 1 # __contains__ M.A in M N.A in M 1 in M # unsupported-operands # __iter__ assert_type([e for e in M], "list[m.M]") # __len__ assert_type(len(M), "int") # __bool__ assert_type(bool(M), "bool") """, pythonpath=[d.path], ) def test_functional_api(self): self.Check(""" import enum M = enum.Enum("M", "A, B") assert_type(M.B, "M") assert_type(M.B.value, "int") N = enum.Enum("N", ["A", "B"]) assert_type(N.B, "N") assert_type(N.B.value, "int") class Marker: pass O = enum.Enum("O", [("A", Marker()), ("B", Marker())]) assert_type(O.B, "O") assert_type(O.B.value, "Marker") P = enum.Enum("P", {"A": "a", "B": "b"}) assert_type(P.B, "P") assert_type(P.B.value, "str") """) def test_functional_api_empty_enum(self): # Empty enums can be extended (subclassed) so they can be used for the # functional api. self.Check(""" import enum class Pretty(enum.Enum): def __str__(self) -> str: return self.name.replace("_", " ").title() M = Pretty("M", "A B C") """) def test_functional_api_empty_pytd_enum(self): # Empty enums can be extended (subclassed) so they can be used for the # functional api. with test_utils.Tempdir() as d: d.create_file( "pretty.pyi", """ enum: module class Pretty(enum.Enum): def __str__(self) -> str: ... """, ) self.Check( """ from pretty import Pretty M = Pretty("M", "A B C") """, pythonpath=[d.path], ) def test_functional_api_errors(self): self.CheckWithErrors(""" import enum enum.Enum(1, "A") # wrong-arg-types enum.Enum("X", [1, 2]) # wrong-arg-types enum.Enum("X", object()) # wrong-arg-types enum.Enum("Y", "A", start="4") # wrong-arg-types """) def test_functional_api_no_constants(self): with test_utils.Tempdir() as d: d.create_file("m.pyi", "A: str") self.Check( """ import enum import m F = enum.Enum("F", [(m.A, m.A)]) for x in F: print(x) """, pythonpath=[d.path], ) def test_functional_api_intenum(self): # Technically, any subclass of Enum without any members can be used for the # functional API. This is annoying and hard to detect, but we should support # it for the other classes in the enum library. self.Check(""" import enum FI = enum.IntEnum("FI", ["A", "B", "C"]) assert_type(FI.A, FI) assert_type(FI.A.value, int) """) def test_functional_api_actually_lookup(self): # Sometimes a Type[Enum] will be called to lookup a value, which will go # to EnumBuilder.call instead of a specific EnumInstance.__new__. self.Check(""" import enum from typing import Type def just_a_lookup(name: str, category: Type[enum.Enum]): category(name) """) def test_auto_basic(self): self.Check(""" import enum class M(enum.Enum): A = enum.auto() assert_type(M.A, "M") assert_type(M.A.value, "int") """) def test_auto_mixed(self): self.Check(""" import enum class M(enum.Enum): A = "hello" B = enum.auto() assert_type(M.A.value, "str") assert_type(M.B.value, "int") """) def test_auto_generate_basic(self): self.Check(""" import enum class M(enum.Enum): def _generate_next_value_(name, start, count, last_values): return name A = enum.auto() assert_type(M.A, "M") assert_type(M.A.value, "str") """) def test_auto_generate_staticmethod(self): self.Check(""" import enum class M(enum.Enum): @staticmethod def _generate_next_value_(name, start, count, last_values): return name A = enum.auto() assert_type(M.A, "M") assert_type(M.A.value, "str") """) def test_auto_generate_error(self): self.CheckWithErrors(""" import enum class M(enum.Enum): def _generate_next_value_(name, start, count, last_values): return name + count # unsupported-operands A = enum.auto() """) def test_auto_generate_wrong_annots(self): self.CheckWithErrors(""" import enum class M(enum.Enum): # wrong-arg-types def _generate_next_value_(name: int, start: int, count: int, last_values: int): return name A = enum.auto() """) def test_auto_generate_from_pyi_base(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum class Base(enum.Enum): def _generate_next_value_(name: str, start: int, count: int, last_values: list) -> str: ... """, ) self.Check( """ import enum import foo class M(foo.Base): A = enum.auto() assert_type(M.A.value, "str") """, pythonpath=[d.path], ) def test_auto_generate_from_pyi_base_staticmethod(self): # It's possible that _generate_next_value_ will appear in a type stub as a # staticmethod. This should not change how pytype handles it. with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum class Base(enum.Enum): @staticmethod def _generate_next_value_(name: str, start: int, count: int, last_values: list) -> str: ... """, ) self.Check( """ import enum import foo class M(foo.Base): A = enum.auto() assert_type(M.A.value, "str") """, pythonpath=[d.path], ) def test_auto_pytd(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum class M(enum.Enum): A: int B: int def _generate_next_value_(name: str, start: int, count: int, last_values: list) -> str: ... """, ) self.Check( """ from typing import Callable from foo import M assert_type(M.A, "foo.M") assert_type(M.A.value, "int") assert_type(M.B.value, "int") assert_type(M._generate_next_value_, Callable[[str, int, int, list], str]) """, pythonpath=[d.path], ) def test_auto_flag(self): # Flag enums can be defined using bitwise ops, even when using auto. self.Check(""" from enum import auto, Flag class Color(Flag): RED = auto() BLUE = auto() GREEN = auto() WHITE = RED | BLUE | GREEN assert_type(Color.RED, Color) assert_type(Color.BLUE, Color) assert_type(Color.GREEN, Color) assert_type(Color.WHITE, Color) assert_type(Color.RED.value, int) assert_type(Color.BLUE.value, int) assert_type(Color.GREEN.value, int) assert_type(Color.WHITE.value, int) """) def test_subclassing_simple(self): self.Check(""" import enum class Base(enum.Enum): pass class M(Base): A = 1 assert_type(M.A, "M") assert_type(M.A.name, "str") assert_type(M.A.value, "int") assert_type(M["A"], "M") assert_type(M(1), "M") """) def test_subclassing_pytd_simple(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum class Base(enum.Enum): ... class M(Base): A: int """, ) self.Check( """ from foo import M assert_type(M.A, "foo.M") assert_type(M.A.name, "str") assert_type(M.A.value, "int") assert_type(M["A"], "foo.M") assert_type(M(1), "foo.M") """, pythonpath=[d.path], ) def test_subclassing_pytd_cross_file(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum class Base(enum.Enum): ... """, ) self.Check( """ from foo import Base class M(Base): A = 1 assert_type(M.A, "M") assert_type(M.A.name, "str") assert_type(M.A.value, "int") assert_type(M["A"], "M") assert_type(M(1), "M") """, pythonpath=[d.path], ) def test_subclassing_base_types(self): self.Check(""" import enum class Base(enum.Enum): pass class M(Base): A = 1 assert_type(M.A, M) assert_type(M.A.value, int) class F(float, enum.Enum): A = 1 assert_type(F.A.value, float) class C(complex, enum.Enum): pass class C2(C): A = 1 assert_type(C2.A.value, complex) class D(str, enum.Enum): pass class D1(D): pass class D2(D1): A = 1 assert_type(D2.A.value, str) class X(D): def _generate_next_value(n, s, c, l): return float(c) A = enum.auto() assert_type(X.A.value, str) """) def test_subclassing_base_types_pyi(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum class NoBase(enum.Enum): ... class StrBase(str, enum.Enum): ... class OnceRemoved(StrBase): ... """, ) self.Check( """ import enum import foo class M(foo.NoBase): A = 1 assert_type(M.A.value, int) class N(float, foo.NoBase): A = 1 assert_type(N.A.value, float) class O(foo.NoBase): pass class O2(O): A = 1 assert_type(O2.A.value, int) class P(foo.StrBase): A = 1 assert_type(P.A.value, str) class Q(foo.StrBase): pass class Q2(Q): A = 1 assert_type(Q2.A.value, str) class R(foo.OnceRemoved): A = 1 assert_type(R.A.value, str) class Y(foo.StrBase): def _generate_next_value(n, s, c, l): return float(c) A = enum.auto() assert_type(Y.A.value, str) """, pythonpath=[d.path], ) def test_base_types(self): self.CheckWithErrors(""" import enum from typing import Tuple class T(tuple, enum.Enum): A = (1, 2) assert_type(T.A.value, Tuple[int, ...]) class S(str, enum.Enum): # wrong-arg-types A = (1, 2) """) def test_submeta(self): self.Check(""" import enum from typing import Any class Custom(enum.EnumMeta): pass class Base(enum.Enum, metaclass=Custom): pass class M(Base): A = 1 # Ideally, this would be "M" and "int", but M is a dynamic enum. assert_type(M.A, Any) assert_type(M.A.value, Any) def take_m(m: M): print(m.value) """) def test_submeta_withmetaclass(self): # Ensure six.with_metaclass works with enums, even with a custom metaclass. self.Check(""" import enum import six from typing import Any class Custom(enum.EnumMeta): pass class C(six.with_metaclass(Custom, enum.Enum)): pass class C2(C): A = 1 # Ideally, this would be "C2" and "int", but C2 is a dynamic enum. assert_type(C2.A, Any) assert_type(C2.A.value, Any) def print_c(c: C): print(c.value) """) @test_base.skip("Fails due to __getattr__ in pytd.") def test_dynamic_attributes(self): self.CheckWithErrors(""" import enum class Normal(enum.Enum): A = 1 Normal.B # attribute-error class Custom(enum.EnumMeta): def __new__(cls, name, bases, dct): for name in ["FOO", "BAR", "QUUX"]: dct[name] = name return super().__new__(cls, name, bases, dct) class Yes(enum.Enum, metaclass=Custom): A = 1 Yes.B """) def test_dynamic_attributes_pytd(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum class Normal: A: int class Custom(enum.EnumMeta): def __new__(cls, name, bases, dct): ... class Yes(enum.Enum, metaclass=Custom): A: int """, ) self.CheckWithErrors( """ import foo foo.Normal.B # attribute-error foo.Yes.B """, pythonpath=[d.path], ) def test_typical_subclassed_meta(self): # The typical pattern when subclassing EnumMeta is to create a base enum # using that metaclass, then subclass that enum in other files. # In this case, all enums that have the custom metaclass should be dynamic. with test_utils.Tempdir() as d: d.create_file( "base_enum.pyi", """ import enum class CustomMeta(enum.EnumMeta): pass class Base(enum.Enum, metaclass=CustomMeta): pass """, ) self.Check( """ import base_enum class M(base_enum.Base): A = 1 M.A M.B base_enum.Base.A """, pythonpath=[d.path], ) def test_intenum_basic(self): self.Check(""" import enum class I(enum.IntEnum): A = 1 """) def test_flag_basic(self): self.Check(""" import enum class F(enum.Flag): A = 1 """) def test_intflag_basic(self): self.Check(""" import enum class IF(enum.IntFlag): A = 1 """) def test_strenum(self): self.Check(""" import enum class MyEnum(enum.StrEnum): A = 'A' for x in MyEnum: assert_type(x, MyEnum) """) def test_unique_enum_in_dict(self): # Regression test for a recursion error in matcher.py self.assertNoCrash( self.Check, """ import enum from typing import Dict, Generic, TypeVar Feature = enum.unique(enum.Enum) F = TypeVar('F', bound=Feature) class Features(Dict[F, bool], Generic[F]): def __setitem__(self, feature: F, on: bool): super(Features, self).__setitem__(feature, on) class _FeaturesParser(Generic[F]): def parse(self) -> Features[F]: result = Features() result[Feature('')] = True return result """, ) def test_if_statement(self): # See b/195136939 self.Check(""" import enum class M(enum.Enum): A = 1 B = 2 def f(m: M) -> int: if m == M.A: x = 1 elif m == M.B: x = 2 else: x = 3 return x + 1 class A: def __init__(self, m: M): if m == M.A: self._x = 1 elif m == M.B: self._x = 2 def do(self): return self._x + 1 """) def test_own_init_simple(self): self.Check(""" from enum import Enum class M(Enum): A = 1 def __init__(self, a): self._value_ = str(a + self._value_) assert_type(M.A, M) assert_type(M.A.value, str) """) def test_own_init_tuple_value(self): # https://docs.python.org/3/library/enum.html#planet self.Check(""" from enum import Enum from typing import Tuple class Planet(Enum): MERCURY = (3.303e+23, 2.4397e6) VENUS = (4.869e+24, 6.0518e6) EARTH = (5.976e+24, 6.37814e6) MARS = (6.421e+23, 3.3972e6) JUPITER = (1.9e+27, 7.1492e7) SATURN = (5.688e+26, 6.0268e7) URANUS = (8.686e+25, 2.5559e7) NEPTUNE = (1.024e+26, 2.4746e7) def __init__(self, mass, radius): self.mass = mass # in kilograms self.radius = radius # in meters @property def surface_gravity(self): # universal gravitational constant (m3 kg-1 s-2) G = 6.67300E-11 return G * self.mass / (self.radius * self.radius) assert_type(Planet.EARTH, Planet) assert_type(Planet.EARTH.name, str) assert_type(Planet.EARTH.value, Tuple[float, float]) assert_type(Planet.EARTH.mass, float) assert_type(Planet.EARTH.radius, float) assert_type(Planet.EARTH.surface_gravity, float) """) def test_own_init_canonical(self): self.Check(""" import enum class Protocol(enum.Enum): ssh = 22 def __init__(self, port_number): self.port_number = port_number def get_port(protocol: str) -> int: return Protocol[protocol].port_number """) def test_own_init_errors(self): self.CheckWithErrors(""" import enum class X(enum.Enum): # missing-parameter A = 1 def __init__(self, a, b, c): self.x = a + b + c """) def test_own_new_with_base_type(self): self.Check(""" import enum class M(str, enum.Enum): def __new__(cls, value, a, b, c, d): obj = str.__new__(cls, [value]) obj._value_ = value obj.a = a obj.b = b obj.c = c obj.d = d return obj A = ('a', 1, 2, 3, 4) B = ('b', 2, 3, 4, 5) def lookup(m: M): m = M(m) """) def test_own_member_new(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum from typing import Annotated, Any, Type, TypeVar _TOrderedEnum = TypeVar('_TOrderedEnum', bound=OrderedEnum) class OrderedEnum(enum.Enum): _pos: Annotated[int, 'property'] @classmethod def __new_member__(cls: Type[_TOrderedEnum], value: Any) -> _TOrderedEnum: ... """, ) self.Check( """ import foo class Stage(foo.OrderedEnum): DEMAND = 1 QUOTA = 2 AGGREGATION = 3 HEADROOM = 4 ORDER = 5 """, pythonpath=[d.path], ) def test_dynamic_base_enum(self): self.Check(""" import enum class DynBase(enum.Enum): _HAS_DYNAMIC_ATTRIBUTES = True class M(DynBase): A = 1 M.B """) def test_dynamic_base_enum_pyi(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum class DynBase(enum.Enum): _HAS_DYNAMIC_ATTRIBUTES = True """, ) self.Check( """ import foo class M(foo.DynBase): A = 1 M.B """, pythonpath=[d.path], ) def test_instance_attrs_property_output(self): ty = self.Infer(""" import enum class M(enum.Enum): A = 1 def __init__(self, val): self.str_v = str(val) @property def combo(self) -> str: return f"{self.str_v}+{self.value}" """) self.assertTypesMatchPytd( ty, """ import enum from typing import Annotated, Literal class M(enum.Enum): A: Literal[1] combo: Annotated[str, 'property'] str_v: Annotated[str, 'property'] def __init__(self, val) -> None: ... """, ) def test_instance_attrs_property_input(self): # Instance attributes are marked using @property. with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum from typing import Annotated class Fn(enum.Enum): A: int @property def x(self) -> str: ... class NoFn(enum.Enum): A: int x: Annotated[str, 'property'] """, ) self.Check( """ import foo assert_type(foo.Fn.A.value, int) assert_type(foo.Fn.A.x, str) assert_type(foo.NoFn.A.value, int) assert_type(foo.NoFn.A.x, str) # These should be attribute errors but pytype does not differentiate # between class and instance attributes for PyTDClass. foo.Fn.x foo.NoFn.x """, pythonpath=[d.path], ) def test_instance_attrs_canonical(self): # Test that canonical instances have instance attributes. with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import enum from typing import Annotated class F(enum.Enum): A: str x = Annotated[int, 'property'] """, ) self.Check( """ import enum import foo class M(enum.Enum): A = 'a' @property def x(self) -> int: return 1 def take_f(f: foo.F): return f.x def take_m(m: M): return m.x """, pythonpath=[d.path], ) def test_instance_attrs_self_referential(self): self.Check(""" from dataclasses import dataclass from enum import Enum from typing import Optional @dataclass class O: thing: Optional["Thing"] = None class Thing(Enum): A = O() def __init__(self, o: O): self.b = o.thing """) def test_enum_bases(self): self.CheckWithErrors(""" import enum class BadBaseOrder(enum.Enum, int): # base-class-error A = 1 """) def test_multiple_value_bindings(self): self.Check(""" import enum class M(str, enum.Enum): A = (__any_object__ or '') + "1" """) def test_classvar_attributes_out(self): ty = self.Infer(""" import enum class M(enum.Enum): A = 1 M.class_attr = 2 """) self.assertTypesMatchPytd( ty, """ import enum from typing import ClassVar, Literal class M(enum.Enum): A: Literal[1] class_attr: ClassVar[int] """, ) def test_classvar_attributes_in(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ from typing import ClassVar import enum class M(enum.Enum): A: int class_attr: ClassVar[int] """, ) self.Check( """ from foo import M assert_type(M.A, M) assert_type(M.class_attr, int) assert_type(M.A.class_attr, int) """, pythonpath=[d.path], ) def test_namedtuple_base_type(self): # This is a fun case for the base type. The value for A is an Item, but # the enum has a base type, which is also Item. # When there's a base type, the enum std lib calls the base's `__new__` to # create the values for the enums. So this looks like it should fail, except # __new__ is called like: Item.__new__(Item, *value). # Since value is a NamedTuple, it unpacks cleanly and a new Item is made. # So this test basically checks that the enum overlay correctly prepares # value as an argument to __new__. self.Check(""" import enum from typing import NamedTuple class Item(NamedTuple): x: int class M(Item, enum.Enum): A = Item(1) """) def test_mixin_base_type(self): # Don't try to use a base class as a base_type if it's actually a mixin. self.Check(""" import enum class Token: name: str value: str def __str__(self) -> str: return self.value # Each member of M is a Token, but Token.__init__ is never called. class M(Token, enum.Enum): A = "hello" def take_token(t: Token) -> str: return str(t) take_token(M.A) assert_type(M.A.value, str) """) def test_valid_members_functions(self): self.Check(""" import enum from typing import Any, Callable class M(enum.Enum): A = lambda x: x B = 1 assert_type(M.A, Callable[[Any], Any]) assert_type(M.B, M) """) def test_valid_members_pytd_functions(self): with test_utils.Tempdir() as d: d.create_file("foo.pyi", "def a(x) -> None: ...") self.Check( """ import enum from typing import Any, Callable import foo class M(enum.Enum): A = foo.a B = 1 assert_type(M.A, Callable[[Any], None]) assert_type(M.B, M) """, pythonpath=[d.path], ) def test_valid_members_dundername(self): self.Check(""" import enum class M(enum.Enum): __A__ = "hello" B = "world" assert_type(M.__A__, str) assert_type(M.B, M) """) def test_valid_members_dundername_pytd(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ enum: module class M(enum.Enum): __A__: str B: str """, ) self.Check( """ import foo assert_type(foo.M.__A__, str) assert_type(foo.M.B, foo.M) """, pythonpath=[d.path], ) def test_valid_members_class(self): # Class are callables, but they aren't descriptors. self.Check(""" import enum class Vclass: pass class M(enum.Enum): V = Vclass assert_type(M.V, M) """) def test_valid_members_class_descriptor(self): # Classes that have __get__ are descriptors though. # TODO(b/172045608): M.V should be Vclass, not str. self.Check(""" import enum class Vclass: def __get__(self, *args, **kwargs): return "I'm a descriptor" class M(enum.Enum): V = Vclass I = Vclass() assert_type(M.V, str) # Should be Vclass (b/172045608) assert_type(M.I, str) """) def test_not_supported_yet(self): self.CheckWithErrors(""" import enum enum.ReprEnum # not-supported-yet """) def test_members_with_value_attribute(self): with self.DepTree([( "foo.py", """ import enum from typing import List class Attr: def __init__(self, values: list[str]): self.value = [v for v in values] @classmethod def make(cls, values: List[str]) -> 'Attr': return cls(values) class MyEnum(enum.Enum): A = Attr(['a']) B = Attr.make(['b']) @property def value_alias(self): return self.value """, )]): self.Check(""" import foo assert_type(foo.MyEnum.A, foo.MyEnum) assert_type(foo.MyEnum.A.value_alias, foo.Attr) assert_type(foo.MyEnum.B, foo.MyEnum) assert_type(foo.MyEnum.B.value_alias, foo.Attr) """) def test_missing(self): self.Check(""" import enum class E(enum.Enum): X = 42 @classmethod def _missing_(cls, value: object) -> "E": return cls.X assert_type(E("FOO"), E) """) def test_missing_pyi(self): with self.DepTree([( "foo.pyi", """ import enum class E(enum.Enum): X = 42 @classmethod def _missing_(cls, value: object) -> E: ... """, )]): self.Check(""" import foo assert_type(foo.E("FOO"), foo.E) """) def test_member(self): """Tests that enum.member acts as a no-op.""" self.Check(""" import enum class E(enum.Enum): X = enum.member(1) assert_type(E.X, E) assert_type(E.X.value, int) """) def test_member_with_partial(self): """Tests that enum.member acts as a no-op even with a partial.""" self.Check(""" import enum import functools def foo(x: int) -> int: return x + 1 class E(enum.Enum): X = enum.member(functools.partial(foo)) assert_type(E.X, E) """) if __name__ == "__main__": test_base.main()
EnumOverlayTest
python
huggingface__transformers
src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
{ "start": 131770, "end": 147171 }
class ____(SeamlessM4TPreTrainedModel, GenerationMixin): output_modalities = ("audio",) _keys_to_ignore_on_load_missing = ["speech_encoder"] main_input_name = "input_ids" _tied_weights_keys = { "lm_head.weight": "shared.weight", "text_encoder.embed_tokens.weight": "shared.weight", "text_decoder.embed_tokens.weight": "shared.weight", } def __init__(self, config: SeamlessM4TConfig): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) self.text_encoder = SeamlessM4TEncoder(config) self.text_decoder = SeamlessM4TDecoder(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.t2u_model = SeamlessM4TTextToUnitForConditionalGeneration(config) self.vocoder = SeamlessM4TCodeHifiGan(config) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.text_encoder def get_decoder(self): return self.text_decoder def get_input_embeddings(self): return self.text_decoder.embed_tokens def set_input_embeddings(self, value): self.text_encoder.embed_tokens = value self.text_decoder.embed_tokens = value self.shared = value @auto_docstring(custom_args=SEAMLESS_M4T_COMMON_CUSTOM_ARGS) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn logger.warning( "This is the same forward method as `SeamlessM4TForTextToText`." "It doesn't use the text-to-unit model `SeamlessM4TTextToUnitForConditionalGeneration`." "If you want to generate speech, use the `.generate` method." ) encoder_outputs = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) encoder_attention_mask = attention_mask # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn) decoder_outputs = self.text_decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) lm_logits = self.lm_head(decoder_outputs[0]) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(lm_logits.device) masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: outputs = decoder_outputs + encoder_outputs output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @torch.no_grad() def generate( self, input_ids: Optional[torch.Tensor] = None, return_intermediate_token_ids: Optional[bool] = None, tgt_lang: Optional[str] = None, spkr_id: Optional[int] = 0, **kwargs, ) -> Union[torch.Tensor, SeamlessM4TGenerationOutput]: """ Generates translated audio waveforms. <Tip> This method successively calls the `.generate` function of two different sub-models. You can specify keyword arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments that will be passed to one of them. For example, calling `.generate(input_ids, num_beams=4, speech_do_sample=True)` will successively perform beam-search decoding on the text model, and multinomial beam-search sampling on the speech model. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) return_intermediate_token_ids (`bool`, *optional*): If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want to get translated text alongside the audio. tgt_lang (`str`, *optional*): The language to use as target language for translation. spkr_id (`int`, *optional*, defaults to 0): The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`. kwargs (*optional*): Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword arguments are of two types: - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model, except for `decoder_input_ids` which will only be passed through the text components. - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the text model and speech model respectively. It has the priority over the keywords without a prefix. This means you can, for example, specify a generation strategy for one generation but not for the other. Returns: `Union[SeamlessM4TGenerationOutput, tuple[Tensor]]`: - If `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`]. - If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size, sequence_length)` and `waveform_lengths` which gives the length of each sample. """ batch_size = len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds")) if tgt_lang is None: raise ValueError("You must specify a `tgt_lang` to generate translated speech.") else: # also accept __xxx__ tgt_lang = tgt_lang.replace("__", "") for key in ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"]: lang_code_to_id = getattr(self.generation_config, key, None) if lang_code_to_id is None: raise ValueError( f"""This model generation config doesn't have a `{key}` key which maps the target language to the right token id. Make sure to load the right generation config.""" ) elif tgt_lang not in lang_code_to_id: raise ValueError( f"""`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in {",".join(lang_code_to_id.keys())}. Note that SeamlessM4T supports more languages for text translation than for speech synthesis.""" ) kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs) kwargs_text["output_hidden_states"] = True kwargs_text["return_dict_in_generate"] = True kwargs_text["output_scores"] = True text_decoder_input_ids = kwargs_text.get("decoder_input_ids") # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids. text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang) text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device) kwargs_text["decoder_input_ids"] = text_decoder_input_ids # first generation text_generation_output = super().generate(input_ids, **kwargs_text) sequences = text_generation_output.sequences # prepare second generation num_return_sequences = len(sequences) // batch_size attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None)) encoder_hidden_states = text_generation_output.encoder_hidden_states[-1] # take care of num_return_sequences # take most probable hidden states per batch of return_sequences # (batch_size*num_return_sequences, ...) -> (batch_size,...) if num_return_sequences > 1: idx_most_probable_sequences_per_batch = text_generation_output.sequences_scores.view(batch_size, -1) idx_most_probable_sequences_per_batch = idx_most_probable_sequences_per_batch.argmax(-1) idx_most_probable_sequences_per_batch = ( idx_most_probable_sequences_per_batch + torch.arange(batch_size, device=self.device) * num_return_sequences ) sequences = sequences[idx_most_probable_sequences_per_batch] # get decoder last hidden state - must do a pass through the text decoder t2u_input_embeds = self.text_decoder( input_ids=sequences, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask, ).last_hidden_state pad_token_id = self.generation_config.pad_token_id # Compute new attention mask seq_lens = (sequences != pad_token_id).int().sum(1) t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens) kwargs_speech["attention_mask"] = t2u_model_attention_mask # Compute t2u decoder_input_ids t2u_decoder_input_ids = kwargs_speech.get("decoder_input_ids") t2u_tgt_lang_id = self.generation_config.t2u_lang_code_to_id.get(tgt_lang) t2u_decoder_input_ids = torch.tensor( [[self.config.t2u_eos_token_id, t2u_tgt_lang_id]] * batch_size, device=self.device ) kwargs_speech["decoder_input_ids"] = t2u_decoder_input_ids # second generation unit_ids = self.t2u_model.generate(inputs_embeds=t2u_input_embeds, **kwargs_speech) output_unit_ids = unit_ids.detach().clone() # get rid of t2u_decoder_input_ids unit_ids = unit_ids[:, kwargs_speech["decoder_input_ids"].shape[1] :] # replace eos per pad unit_ids[unit_ids == self.config.t2u_eos_token_id] = self.config.t2u_pad_token_id # offset of control symbols unit_ids = torch.where( unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset ) vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang) vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids), device=self.device) spkr_id = torch.tensor([[spkr_id]] * len(unit_ids), device=self.device) waveform, waveform_lengths = self.vocoder(input_ids=unit_ids, spkr_id=spkr_id, lang_id=vocoder_tgt_lang_id) if return_intermediate_token_ids: return SeamlessM4TGenerationOutput( waveform=waveform, waveform_lengths=waveform_lengths, sequences=sequences, unit_sequences=output_unit_ids, ) return waveform, waveform_lengths @auto_docstring( custom_intro=""" The speech-to-speech SeamlessM4T Model transformer which can be used for S2ST. """ )
SeamlessM4TForTextToSpeech
python
numpy__numpy
tools/swig/test/testTensor.py
{ "start": 14065, "end": 14375 }
class ____(TensorTestCase): def __init__(self, methodName="runTest"): TensorTestCase.__init__(self, methodName) self.typeStr = "longLong" self.typeCode = "q" self.result = int(self.result) ######################################################################
longLongTestCase
python
PrefectHQ__prefect
src/prefect/client/schemas/actions.py
{ "start": 26190, "end": 26711 }
class ____(ActionBaseModel): """Data used to create block document reference.""" id: UUID = Field(default_factory=uuid4) parent_block_document_id: UUID = Field( default=..., description="ID of block document the reference is nested within" ) reference_block_document_id: UUID = Field( default=..., description="ID of the nested block document" ) name: str = Field( default=..., description="The name that the reference is nested under" )
BlockDocumentReferenceCreate
python
spyder-ide__spyder
spyder/plugins/updatemanager/widgets/status.py
{ "start": 787, "end": 3452 }
class ____(StatusBarWidget): """Status bar widget for update manager.""" ID = 'update_manager_status' INTERACT_ON_CLICK = True sig_check_update = Signal() """Signal to request checking for updates.""" sig_start_update = Signal() """Signal to start the update process.""" sig_show_progress_dialog = Signal() """Signal to show the progress dialog.""" CUSTOM_WIDGET_CLASS = QLabel def __init__(self, parent): self.tooltip = "" super().__init__(parent) # Check for updates action menu self.menu = SpyderMenu(self) # Set aligment attributes for custom widget to match default label # values self.custom_widget.setAlignment(Qt.AlignRight | Qt.AlignVCenter) # Signals self.sig_clicked.connect(self.show_dialog_or_menu) def set_value(self, value): """Set update manager status.""" if value == DOWNLOADING_INSTALLER: self.tooltip = _( "Downloading the update will continue in the background.\n" "Click here to show the download dialog again." ) self.custom_widget.show() self.show() elif value == CHECKING: self.tooltip = value self.custom_widget.hide() self.hide() elif value == PENDING: self.tooltip = value self.custom_widget.hide() self.show() elif value == UPDATING_UPDATER: self.tooltip = value self.custom_widget.hide() self.show() else: self.tooltip = "" if self.custom_widget: self.custom_widget.hide() self.hide() self.update_tooltip() logger.debug(f"Update manager status: {value}") super().set_value(value) def set_no_status(self): """Convenience method to set status to NO_STATUS""" self.set_value(NO_STATUS) def get_tooltip(self): """Reimplementation to get a dynamic tooltip.""" return self.tooltip def get_icon(self): return ima.icon('update') def set_download_progress(self, percent_progress): """Set download progress in status bar""" self.custom_widget.setText(f"{percent_progress}%") @Slot() def show_dialog_or_menu(self): """Show download dialog or status bar menu.""" if self.value in (DOWNLOADING_INSTALLER, UPDATING_UPDATER): self.sig_show_progress_dialog.emit() elif self.value in (PENDING, DOWNLOAD_FINISHED, INSTALL_ON_CLOSE): self.sig_start_update.emit()
UpdateManagerStatus
python
pypa__warehouse
warehouse/sitemap/models.py
{ "start": 126, "end": 300 }
class ____: sitemap_bucket: Mapped[str] = mapped_column( server_default=FetchedValue(), server_onupdate=FetchedValue(), index=True, )
SitemapMixin
python
doocs__leetcode
solution/1400-1499/1498.Number of Subsequences That Satisfy the Given Sum Condition/Solution.py
{ "start": 0, "end": 466 }
class ____: def numSubseq(self, nums: List[int], target: int) -> int: mod = 10**9 + 7 nums.sort() n = len(nums) f = [1] + [0] * n for i in range(1, n + 1): f[i] = f[i - 1] * 2 % mod ans = 0 for i, x in enumerate(nums): if x * 2 > target: break j = bisect_right(nums, target - x, i + 1) - 1 ans = (ans + f[j - i]) % mod return ans
Solution
python
anthropics__anthropic-sdk-python
tests/test_legacy_response.py
{ "start": 3595, "end": 4240 }
class ____(pydantic.BaseModel): a: str @pytest.mark.parametrize("client", [False], indirect=True) # loose validation def test_response_parse_expect_model_union_non_json_content(client: Anthropic) -> None: response = LegacyAPIResponse( raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), client=client, stream=False, stream_cls=None, cast_to=str, options=FinalRequestOptions.construct(method="get", url="/foo"), ) obj = response.parse(to=cast(Any, Union[CustomModel, OtherModel])) assert isinstance(obj, str) assert obj == "foo"
OtherModel
python
plotly__plotly.py
plotly/express/_special_inputs.py
{ "start": 590, "end": 958 }
class ____(object): """ Objects of this class can be passed to Plotly Express functions that expect column identifiers or list-like objects to indicate that this attribute should take on a constant value. An optional label can be provided. """ def __init__(self, value, label=None): self.value = value self.label = label
Constant
python
jazzband__django-pipeline
pipeline/storage.py
{ "start": 1732, "end": 3030 }
class ____: gzip_patterns = ("*.css", "*.js") def _compress(self, original_file): content = BytesIO() gzip_file = gzip.GzipFile(mode="wb", fileobj=content) gzip_file.write(original_file.read()) gzip_file.close() content.seek(0) return File(content) def post_process(self, paths, dry_run=False, **options): super_class = super() if hasattr(super_class, "post_process"): for name, hashed_name, processed in super_class.post_process( paths.copy(), dry_run, **options ): if hashed_name != name: paths[hashed_name] = (self, hashed_name) yield name, hashed_name, processed if dry_run: return for path in paths: if path: if not matches_patterns(path, self.gzip_patterns): continue original_file = self.open(path) gzipped_path = f"{path}.gz" if self.exists(gzipped_path): self.delete(gzipped_path) gzipped_file = self._compress(original_file) gzipped_path = self.save(gzipped_path, gzipped_file) yield gzipped_path, gzipped_path, True
GZIPMixin
python
kamyu104__LeetCode-Solutions
Python/find-the-xor-of-numbers-which-appear-twice.py
{ "start": 42, "end": 333 }
class ____(object): def duplicateNumbersXOR(self, nums): """ :type nums: List[int] :rtype: int """ return reduce(lambda x, y: x^y, nums, 0)^reduce(lambda x, y: x^y, set(nums), 0) # Time: O(n) # Space: O(n) import collections # freq table
Solution
python
dateutil__dateutil
src/dateutil/zoneinfo/__init__.py
{ "start": 313, "end": 660 }
class ____(_tzfile): def __reduce__(self): return (gettz, (self._filename,)) def getzoneinfofile_stream(): try: return BytesIO(get_data(__name__, ZONEFILENAME)) except IOError as e: # TODO switch to FileNotFoundError? warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror)) return None
tzfile
python
getsentry__sentry
src/sentry/users/web/accounts_form.py
{ "start": 3073, "end": 4678 }
class ____(forms.Form): username = forms.CharField(max_length=128, required=False, widget=forms.TextInput()) password = forms.CharField(widget=forms.PasswordInput()) tos_check = forms.BooleanField( label=_( f"I agree to the <a href={settings.TERMS_URL}>Terms of Service</a> and <a href={settings.PRIVACY_URL}>Privacy Policy</a>" ), widget=forms.CheckboxInput(), required=False, initial=False, ) def __init__(self, *args: Any, **kwargs: Any) -> None: self.user = kwargs.pop("user", None) super().__init__(*args, **kwargs) self.fields["username"].widget.attrs.update(placeholder=self.user.username) def clean_username(self) -> str | None: value = self.cleaned_data.get("username") or self.user.username value = re.sub(r"[ \n\t\r\0]*", "", value) if not value: return None if User.objects.filter(username__iexact=value).exclude(id=self.user.id).exists(): raise forms.ValidationError(_("An account is already registered with that username.")) return value.lower() def clean_password(self) -> str: password = self.cleaned_data["password"] password_validation.validate_password(password, user=self.user) return password def clean_tos_check(self) -> None: value = self.cleaned_data.get("tos_check") if not value: raise forms.ValidationError( _("You must agree to the Terms of Service and Privacy Policy before proceeding.") ) return None
RelocationForm
python
django__django
tests/proxy_model_inheritance/models.py
{ "start": 201, "end": 295 }
class ____(ConcreteModelSubclass): class Meta: proxy = True
ConcreteModelSubclassProxy
python
matplotlib__matplotlib
lib/matplotlib/backend_tools.py
{ "start": 11618, "end": 11897 }
class ____(ToolBase): """Tool to call the figure manager destroy method.""" description = 'Quit all figures' default_keymap = property(lambda self: mpl.rcParams['keymap.quit_all']) def trigger(self, sender, event, data=None): Gcf.destroy_all()
ToolQuitAll
python
cython__cython
Cython/Compiler/ExprNodes.py
{ "start": 318976, "end": 347890 }
class ____(ExprNode): # obj.attribute # # obj ExprNode # attribute string # needs_none_check boolean Used if obj is an extension type. # If set to True, it is known that the type is not None. # # Used internally: # # is_py_attr boolean Is a Python getattr operation # member string C name of struct member # is_called boolean Function call is being done on result # entry Entry Symbol table entry of attribute is_attribute = 1 subexprs = ['obj'] entry = None is_called = 0 needs_none_check = True is_memslice_transpose = False is_special_lookup = False is_py_attr = 0 def as_cython_attribute(self): if (isinstance(self.obj, NameNode) and self.obj.is_cython_module and not self.attribute == "parallel"): return self.attribute cy = self.obj.as_cython_attribute() if cy: return "%s.%s" % (cy, self.attribute) return None def coerce_to(self, dst_type, env): # If coercing to a generic pyobject and this is a cpdef function # we can create the corresponding attribute if dst_type is py_object_type: entry = self.entry if entry and entry.is_cfunction and entry.as_variable: # must be a cpdef function self.is_temp = 1 self.entry = entry.as_variable self.analyse_as_python_attribute(env) return self elif entry and entry.is_cfunction and self.obj.type is not Builtin.type_type: # "bound" cdef function. # This implementation is likely a little inefficient and could be improved. # Essentially it does: # __import__("functools").partial(coerce_to_object(self), self.obj) from .UtilNodes import EvalWithTempExprNode, ResultRefNode # take self.obj out to a temp because it's used twice obj_node = ResultRefNode(self.obj, type=self.obj.type) obj_node.result_ctype = self.obj.result_ctype self.obj = obj_node unbound_node = ExprNode.coerce_to(self, dst_type, env) utility_code=UtilityCode.load_cached( "PyMethodNew2Arg", "ObjectHandling.c" ) func_type = PyrexTypes.CFuncType( PyrexTypes.py_object_type, [ PyrexTypes.CFuncTypeArg("func", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("self", PyrexTypes.py_object_type, None) ], ) binding_call = PythonCapiCallNode( self.pos, function_name="__Pyx_PyMethod_New2Arg", func_type=func_type, args=[unbound_node, obj_node], utility_code=utility_code, ) complete_call = EvalWithTempExprNode(obj_node, binding_call) return complete_call.analyse_types(env) return ExprNode.coerce_to(self, dst_type, env) def calculate_constant_result(self): attr = self.attribute if attr.startswith("__") and attr.endswith("__"): return self.constant_result = getattr(self.obj.constant_result, attr) def compile_time_value(self, denv): attr = self.attribute if attr.startswith("__") and attr.endswith("__"): error(self.pos, "Invalid attribute name '%s' in compile-time expression" % attr) return None obj = self.obj.compile_time_value(denv) try: return getattr(obj, attr) except Exception as e: self.compile_time_value_error(e) def type_dependencies(self, env): return self.obj.type_dependencies(env) def infer_type(self, env): # FIXME: this is way too redundant with analyse_types() node = self.analyse_as_cimported_attribute_node(env, target=False) if node is not None: if node.entry.type and node.entry.type.is_cfunction: # special-case - function converted to pointer return PyrexTypes.CPtrType(node.entry.type) else: return node.entry.type node = self.analyse_as_type_attribute(env) if node is not None: return node.entry.type obj_type = self.obj.infer_type(env) self.analyse_attribute(env, obj_type=obj_type) if obj_type.is_builtin_type and self.type.is_cfunction: # special case: C-API replacements for C methods of # builtin types cannot be inferred as C functions as # that would prevent their use as bound methods return py_object_type elif self.entry and self.entry.is_cmethod: # special case: bound methods should not be inferred # as their unbound method types return py_object_type return self.type def analyse_target_declaration(self, env): self.is_target = True def analyse_target_types(self, env): node = self.analyse_types(env, target = 1) if node.type.is_const: error(self.pos, "Assignment to const attribute '%s'" % self.attribute) if not node.is_lvalue(): error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type) return node def analyse_types(self, env, target = 0): if not self.type: self.type = PyrexTypes.error_type # default value if it isn't analysed successfully self.initialized_check = env.directives['initializedcheck'] node = self.analyse_as_cimported_attribute_node(env, target) if node is None and not target: node = self.analyse_as_type_attribute(env) if node is None: node = self.analyse_as_ordinary_attribute_node(env, target) assert node is not None if (node.is_attribute or node.is_name) and node.entry: node.entry.used = True if node.is_attribute: node.wrap_obj_in_nonecheck(env) return node def analyse_as_cimported_attribute_node(self, env, target): # Try to interpret this as a reference to an imported # C const, type, var or function. If successful, mutates # this node into a NameNode and returns 1, otherwise # returns 0. module_scope = self.obj.analyse_as_module(env) if module_scope: entry = module_scope.lookup_here(self.attribute) if entry and not entry.known_standard_library_import and ( entry.is_cglobal or entry.is_cfunction or entry.is_type or entry.is_const): return self.as_name_node(env, entry, target) if self.is_cimported_module_without_shadow(env): # TODO: search for submodule error(self.pos, "cimported module has no attribute '%s'" % self.attribute) return self return None def analyse_as_type_attribute(self, env): # Try to interpret this as a reference to an unbound # C method of an extension type or builtin type. If successful, # creates a corresponding NameNode and returns it, otherwise # returns None. if self.obj.is_string_literal: return type = self.obj.analyse_as_type(env) if type: if type.is_extension_type or type.is_builtin_type or type.is_cpp_class: entry = type.scope.lookup_here(self.attribute) if entry and (entry.is_cmethod or type.is_cpp_class and entry.type.is_cfunction): if type.is_builtin_type: if not self.is_called: # must handle this as Python object return None ubcm_entry = entry else: ubcm_entry = self._create_unbound_cmethod_entry(type, entry, env) ubcm_entry.overloaded_alternatives = [ self._create_unbound_cmethod_entry(type, overloaded_alternative, env) for overloaded_alternative in entry.overloaded_alternatives ] return self.as_name_node(env, ubcm_entry, target=False) elif type.is_enum or type.is_cpp_enum: if self.attribute in type.values: for entry in type.entry.enum_values: if entry.name == self.attribute: return self.as_name_node(env, entry, target=False) else: error(self.pos, "%s not a known value of %s" % (self.attribute, type)) elif self.attribute.startswith('__') and self.attribute.endswith('__'): # Special attribute, look up at runtime. return None else: error(self.pos, "%s not a known value of %s" % (self.attribute, type)) return None def _create_unbound_cmethod_entry(self, type, entry, env): # Create a temporary entry describing the unbound C method in `entry` # as an ordinary function. if entry.func_cname and entry.type.op_arg_struct is None: cname = entry.func_cname if entry.type.is_static_method or ( env.parent_scope and env.parent_scope.is_cpp_class_scope): ctype = entry.type elif type.is_cpp_class: error(self.pos, "%s not a static member of %s" % (entry.name, type)) ctype = PyrexTypes.error_type else: # Fix self type. ctype = copy.copy(entry.type) ctype.args = ctype.args[:] ctype.args[0] = PyrexTypes.CFuncTypeArg('self', type, 'self', None) else: cname = "%s->%s" % (type.vtabptr_cname, entry.cname) ctype = entry.type ubcm_entry = Symtab.Entry(entry.name, cname, ctype) ubcm_entry.is_cfunction = 1 ubcm_entry.func_cname = entry.func_cname ubcm_entry.is_unbound_cmethod = 1 ubcm_entry.scope = entry.scope return ubcm_entry def analyse_as_type(self, env): module_scope = self.obj.analyse_as_module(env) if module_scope: return module_scope.lookup_type(self.attribute) if not self.obj.is_string_literal: base_type = self.obj.analyse_as_type(env) if base_type and getattr(base_type, 'scope', None) is not None: return base_type.scope.lookup_type(self.attribute) return None def analyse_as_extension_type(self, env): # Try to interpret this as a reference to an extension type # in a cimported module. Returns the extension type, or None. module_scope = self.obj.analyse_as_module(env) if module_scope: entry = module_scope.lookup_here(self.attribute) if entry and entry.is_type: if entry.type.is_extension_type or entry.type.is_builtin_type: return entry.type return None def analyse_as_module(self, env): # Try to interpret this as a reference to a cimported module # in another cimported module. Returns the module scope, or None. module_scope = self.obj.analyse_as_module(env) if module_scope: entry = module_scope.lookup_here(self.attribute) if entry and entry.as_module: return entry.as_module return None def as_name_node(self, env, entry, target): # Create a corresponding NameNode from this node and complete the # analyse_types phase. node = NameNode.from_node(self, name=self.attribute, entry=entry) if target: node = node.analyse_target_types(env) else: node = node.analyse_rvalue_entry(env) node.entry.used = 1 return node def analyse_as_ordinary_attribute_node(self, env, target): self.obj = self.obj.analyse_types(env) self.analyse_attribute(env) if self.entry and self.entry.is_cmethod and not self.is_called: # error(self.pos, "C method can only be called") pass ## Reference to C array turns into pointer to first element. #while self.type.is_array: # self.type = self.type.element_ptr_type() if self.is_py_attr: if not target: self.is_temp = 1 self.result_ctype = py_object_type elif target and self.obj.type.is_builtin_type: error(self.pos, "Assignment to an immutable object field") elif self.entry and self.entry.is_cproperty: if not target: return SimpleCallNode.for_cproperty(self.pos, self.obj, self.entry).analyse_types(env) # TODO: implement writable C-properties? error(self.pos, "Assignment to a read-only property") #elif self.type.is_memoryviewslice and not target: # self.is_temp = True return self def analyse_attribute(self, env, obj_type = None): # Look up attribute and set self.type and self.member. immutable_obj = obj_type is not None # used during type inference self.is_py_attr = 0 self.member = self.attribute if obj_type is None: if self.obj.type.is_string or self.obj.type.is_pyunicode_ptr: self.obj = self.obj.coerce_to_pyobject(env) obj_type = self.obj.type else: if obj_type.is_string or obj_type.is_pyunicode_ptr: obj_type = py_object_type if obj_type.is_ptr or obj_type.is_array: obj_type = obj_type.base_type self.op = "->" elif obj_type.is_extension_type or obj_type.is_builtin_type: self.op = "->" elif obj_type.is_reference and obj_type.is_fake_reference: self.op = "->" else: self.op = "." if obj_type.has_attributes: if obj_type.attributes_known(): entry = obj_type.scope.lookup_here(self.attribute) if obj_type.is_memoryviewslice and not entry: if self.attribute == 'T': self.is_memslice_transpose = True self.is_temp = True self.use_managed_ref = True self.type = self.obj.type.transpose(self.pos) return else: obj_type.declare_attribute(self.attribute, env, self.pos) entry = obj_type.scope.lookup_here(self.attribute) if entry and entry.is_member: entry = None else: error(self.pos, "Cannot select attribute of incomplete type '%s'" % obj_type) self.type = PyrexTypes.error_type return self.entry = entry if entry: if obj_type.is_extension_type and entry.name == "__weakref__": error(self.pos, "Illegal use of special attribute __weakref__") # def methods need the normal attribute lookup # because they do not have struct entries # fused function go through assignment synthesis # (foo = pycfunction(foo_func_obj)) and need to go through # regular Python lookup as well if entry.is_cproperty: self.type = entry.type return elif (entry.is_variable and not entry.fused_cfunction) or entry.is_cmethod: self.type = entry.type self.member = entry.cname return else: # If it's not a variable or C method, it must be a Python # method of an extension type, so we treat it like a Python # attribute. pass # If we get here, the base object is not a struct/union/extension # type, or it is an extension type and the attribute is either not # declared or is declared as a Python method. Treat it as a Python # attribute reference. self.analyse_as_python_attribute(env, obj_type, immutable_obj) def analyse_as_python_attribute(self, env, obj_type=None, immutable_obj=False): if obj_type is None: obj_type = self.obj.type # mangle private '__*' Python attributes used inside of a class self.attribute = env.mangle_class_private_name(self.attribute) self.member = self.attribute self.type = py_object_type self.is_py_attr = 1 if not obj_type.is_pyobject and not obj_type.is_error: # Expose python methods for immutable objects. if (obj_type.is_string or obj_type.is_cpp_string or obj_type.is_buffer or obj_type.is_memoryviewslice or obj_type.is_numeric or (obj_type.is_ctuple and obj_type.can_coerce_to_pyobject(env)) or (obj_type.is_struct and obj_type.can_coerce_to_pyobject(env))): if not immutable_obj: self.obj = self.obj.coerce_to_pyobject(env) elif (obj_type.is_cfunction and (self.obj.is_name or self.obj.is_attribute) and self.obj.entry.as_variable and self.obj.entry.as_variable.type.is_pyobject): # might be an optimised builtin function => unpack it if not immutable_obj: self.obj = self.obj.coerce_to_pyobject(env) else: error(self.pos, "Object of type '%s' has no attribute '%s'" % (obj_type, self.attribute)) def wrap_obj_in_nonecheck(self, env): if not env.directives['nonecheck']: return msg = None format_args = () if (self.obj.type.is_extension_type and self.needs_none_check and not self.is_py_attr): msg = "'NoneType' object has no attribute '%{}s'".format('.30' if len(self.attribute) <= 30 else '') format_args = (self.attribute,) elif self.obj.type.is_memoryviewslice: if self.is_memslice_transpose: msg = "Cannot transpose None memoryview slice" else: entry = self.obj.type.scope.lookup_here(self.attribute) if entry: # copy/is_c_contig/shape/strides etc msg = "Cannot access '%s' attribute of None memoryview slice" format_args = (entry.name,) if msg: self.obj = self.obj.as_none_safe_node(msg, 'PyExc_AttributeError', format_args=format_args) def nogil_check(self, env): if self.is_py_attr: self.gil_error() gil_message = "Accessing Python attribute" def is_cimported_module_without_shadow(self, env): return self.obj.is_cimported_module_without_shadow(env) def is_simple(self): if self.obj: return self.result_in_temp() or self.obj.is_simple() else: return NameNode.is_simple(self) def is_lvalue(self): if self.obj: return True else: return NameNode.is_lvalue(self) def is_ephemeral(self): if self.obj: return self.obj.is_ephemeral() else: return NameNode.is_ephemeral(self) def calculate_result_code(self): result = self.calculate_access_code() if self.entry and self.entry.is_cpp_optional and not self.is_target: result = "(*%s)" % result return result def calculate_access_code(self): # Does the job of calculate_result_code but doesn't dereference cpp_optionals # Therefore allowing access to the holder variable obj = self.obj obj_code = obj.result_as(obj.type) #print "...obj_code =", obj_code ### if self.entry and self.entry.is_cmethod: if obj.type.is_extension_type and not self.entry.is_builtin_cmethod: if self.entry.final_func_cname: return self.entry.final_func_cname if self.type.from_fused: # If the attribute was specialized through indexing, make # sure to get the right fused name, as our entry was # replaced by our parent index node # (AnalyseExpressionsTransform) self.member = self.entry.cname return "((struct %s *)%s%s%s)->%s" % ( obj.type.vtabstruct_cname, obj_code, self.op, obj.type.vtabslot_cname, self.member) elif self.result_is_used: return self.member # Generating no code at all for unused access to optimised builtin # methods fixes the problem that some optimisations only exist as # macros, i.e. there is no function pointer to them, so we would # generate invalid C code here. return elif obj.type.is_complex: return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code) else: if obj.type.is_builtin_type and self.entry and self.entry.is_variable: # accessing a field of a builtin type, need to cast better than result_as() does obj_code = obj.type.cast_code(obj.result(), to_object_struct = True) return "%s%s%s" % (obj_code, self.op, self.member) def generate_result_code(self, code): if self.is_py_attr: if self.is_special_lookup: code.globalstate.use_utility_code( UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c")) lookup_func_name = '__Pyx_PyObject_LookupSpecial' else: code.globalstate.use_utility_code( UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c")) lookup_func_name = '__Pyx_PyObject_GetAttrStr' code.putln( '%s = %s(%s, %s); %s' % ( self.result(), lookup_func_name, self.obj.py_result(), code.intern_identifier(self.attribute), code.error_goto_if_null(self.result(), self.pos))) self.generate_gotref(code) elif self.type.is_memoryviewslice: if self.is_memslice_transpose: # transpose the slice for access, packing in self.type.axes: if access == 'ptr': error(self.pos, "Transposing not supported for slices " "with indirect dimensions") return code.putln("%s = %s;" % (self.result(), self.obj.result())) code.put_incref_memoryviewslice(self.result(), self.type, have_gil=True) T = "__pyx_memslice_transpose(&%s)" % self.result() code.putln(code.error_goto_if_neg(T, self.pos)) elif self.initialized_check: code.putln( 'if (unlikely(!%s.memview)) {' 'PyErr_SetString(PyExc_AttributeError,' '"Memoryview is not initialized");' '%s' '}' % (self.result(), code.error_goto(self.pos))) elif self.entry.is_cpp_optional and self.initialized_check: if self.is_target: undereferenced_result = self.result() else: assert not self.is_temp # calculate_access_code() only makes sense for non-temps undereferenced_result = self.calculate_access_code() unbound_check_code = self.type.cpp_optional_check_for_null_code(undereferenced_result) code.put_error_if_unbound(self.pos, self.entry, self.in_nogil_context, unbound_check_code=unbound_check_code) else: # result_code contains what is needed, but we may need to insert # a check and raise an exception if self.obj.type and self.obj.type.is_extension_type: pass elif self.entry and self.entry.is_cmethod: # C method implemented as function call with utility code code.globalstate.use_entry_utility_code(self.entry) def generate_disposal_code(self, code): if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose: # mirror condition for putting the memview incref here: code.put_xdecref_clear(self.result(), self.type, have_gil=True) else: ExprNode.generate_disposal_code(self, code) def generate_assignment_code(self, rhs, code, overloaded_assignment=False, exception_check=None, exception_value=None): self.obj.generate_evaluation_code(code) if self.is_py_attr: code.globalstate.use_utility_code( UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c")) code.put_error_if_neg(self.pos, '__Pyx_PyObject_SetAttrStr(%s, %s, %s)' % ( self.obj.py_result(), code.intern_identifier(self.attribute), rhs.py_result())) rhs.generate_disposal_code(code) rhs.free_temps(code) elif self.obj.type.is_complex: code.putln("__Pyx_SET_C%s%s(%s, %s);" % ( self.member.upper(), self.obj.type.implementation_suffix, self.obj.result_as(self.obj.type), rhs.result_as(self.ctype()))) rhs.generate_disposal_code(code) rhs.free_temps(code) else: select_code = self.result() if self.type.is_pyobject and self.use_managed_ref: rhs.make_owned_reference(code) rhs.generate_giveref(code) code.put_gotref(select_code, self.type) code.put_decref(select_code, self.ctype()) elif self.type.is_memoryviewslice: from . import MemoryView MemoryView.put_assign_to_memviewslice( select_code, rhs, rhs.result(), self.type, code) if not self.type.is_memoryviewslice: code.putln( "%s = %s;" % ( select_code, rhs.move_result_rhs_as(self.ctype()))) #rhs.result())) rhs.generate_post_assignment_code(code) rhs.free_temps(code) self.obj.generate_disposal_code(code) self.obj.free_temps(code) def generate_deletion_code(self, code, ignore_nonexisting=False): self.obj.generate_evaluation_code(code) if self.is_py_attr or (self.entry.scope.is_property_scope and '__del__' in self.entry.scope.entries): code.globalstate.use_utility_code( UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c")) code.put_error_if_neg(self.pos, '__Pyx_PyObject_DelAttrStr(%s, %s)' % ( self.obj.py_result(), code.intern_identifier(self.attribute))) else: error(self.pos, "Cannot delete C attribute of extension type") self.obj.generate_disposal_code(code) self.obj.free_temps(code) def annotate(self, code): if self.is_py_attr: style, text = 'py_attr', 'python attribute (%s)' else: style, text = 'c_attr', 'c attribute (%s)' code.annotate(self.pos, AnnotationItem(style, text % self.type, size=len(self.attribute))) def get_known_standard_library_import(self): module_name = self.obj.get_known_standard_library_import() if module_name: return StringEncoding.EncodedString("%s.%s" % (module_name, self.attribute)) return None #------------------------------------------------------------------- # # Constructor nodes # #-------------------------------------------------------------------
AttributeNode
python
getsentry__sentry
src/sentry/incidents/metric_issue_detector.py
{ "start": 3251, "end": 4889 }
class ____(BaseDataConditionValidator): supported_conditions = frozenset( ( Condition.GREATER, Condition.LESS, Condition.GREATER_OR_EQUAL, Condition.LESS_OR_EQUAL, Condition.ANOMALY_DETECTION, ) ) supported_condition_results = frozenset( (DetectorPriorityLevel.HIGH, DetectorPriorityLevel.MEDIUM, DetectorPriorityLevel.OK) ) def validate_type(self, value: str) -> Condition: try: type = Condition(value) except ValueError: type = None if type not in self.supported_conditions: raise serializers.ValidationError(f"Unsupported type {value}") return type def validate_comparison(self, value: dict | float | int | str) -> float | dict: if isinstance(value, (float, int)): try: value = float(value) except ValueError: raise serializers.ValidationError("A valid number is required.") return value elif isinstance(value, dict): return super().validate_comparison(value) else: raise serializers.ValidationError("A valid number or dict is required.") def validate_condition_result(self, value: str) -> DetectorPriorityLevel: try: result = DetectorPriorityLevel(int(value)) except ValueError: result = None if result not in self.supported_condition_results: raise serializers.ValidationError("Unsupported condition result") return result
MetricIssueComparisonConditionValidator
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/coercions.py
{ "start": 27011, "end": 27211 }
class ____(_CoerceLiterals, RoleImpl): __slots__ = () _coerce_consts = True def _text_coercion(self, element, argname=None): return elements.TextClause(element)
StatementOptionImpl
python
spack__spack
lib/spack/spack/vendor/ruamel/yaml/nodes.py
{ "start": 2785, "end": 3236 }
class ____(Node): __slots__ = ('flow_style',) def __init__( self, tag, value, start_mark=None, end_mark=None, flow_style=None, comment=None, anchor=None, ): # type: (Any, Any, Any, Any, Any, Any, Any) -> None Node.__init__(self, tag, value, start_mark, end_mark, comment=comment) self.flow_style = flow_style self.anchor = anchor
CollectionNode
python
kamyu104__LeetCode-Solutions
Python/find-xor-sum-of-all-pairs-bitwise-and.py
{ "start": 47, "end": 283 }
class ____(object): def getXORSum(self, arr1, arr2): """ :type arr1: List[int] :type arr2: List[int] :rtype: int """ return reduce(operator.xor, arr1) & reduce(operator.xor, arr2)
Solution
python
scrapy__scrapy
scrapy/commands/view.py
{ "start": 203, "end": 892 }
class ____(fetch.Command): def short_desc(self) -> str: return "Open URL in browser, as seen by Scrapy" def long_desc(self) -> str: return ( "Fetch a URL using the Scrapy downloader and show its contents in a browser" ) def add_options(self, parser: argparse.ArgumentParser) -> None: super().add_options(parser) parser.add_argument("--headers", help=argparse.SUPPRESS) def _print_response(self, response: Response, opts: argparse.Namespace) -> None: if not isinstance(response, TextResponse): logger.error("Cannot view a non-text response.") return open_in_browser(response)
Command
python
cython__cython
Cython/Compiler/Nodes.py
{ "start": 21509, "end": 23859 }
class ____(CDeclaratorNode): # base CDeclaratorNode # dimension ExprNode child_attrs = ["base", "dimension"] def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False): if ((base_type.is_cpp_class and base_type.is_template_type()) or base_type.is_cfunction or base_type.python_type_constructor_name): from .ExprNodes import TupleNode if isinstance(self.dimension, TupleNode): args = self.dimension.args else: args = self.dimension, values = [v.analyse_as_type(env) for v in args] if None in values: ix = values.index(None) error(args[ix].pos, "Template parameter not a type") base_type = error_type else: base_type = base_type.specialize_here(self.pos, env, values) return self.base.analyse(base_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd) size = None if self.dimension: self.dimension = self.dimension.analyse_const_expression(env) if not self.dimension.type.is_int: error(self.dimension.pos, "Array dimension not integer") if self.dimension.type.is_const and self.dimension.entry.visibility != 'extern': # extern const variables declaring C constants are allowed error(self.dimension.pos, "Array dimension cannot be const variable") size = (self.dimension.constant_result if isinstance(self.dimension.constant_result, int) else self.dimension.get_constant_c_result_code()) try: size = int(size) except ValueError: # runtime constant? pass if not base_type.is_complete(): error(self.pos, "Array element type '%s' is incomplete" % base_type) if base_type.is_pyobject: error(self.pos, "Array element cannot be a Python object") if base_type.is_cfunction: error(self.pos, "Array element cannot be a function") array_type = PyrexTypes.c_array_type(base_type, size) return self.base.analyse(array_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
CArrayDeclaratorNode
python
huggingface__transformers
tests/utils/test_cache_utils.py
{ "start": 1952, "end": 4958 }
class ____(unittest.TestCase): """Cache tests that don't require loading models""" def test_static_cache_mha_mqa_gqa(self): """ Tests that static cache works with multi-head attention (MHA), grouped query attention (GQA), and multi-query attention (MQA) """ def _random_kvs(config): # shape for key and values: (batch_size, num_heads, seq_len, head_dim) random_keys = torch.rand( (1, config.num_key_value_heads, 1, config.hidden_size // config.num_attention_heads), device=torch_device, ) random_values = torch.rand( (1, config.num_key_value_heads, 1, config.hidden_size // config.num_attention_heads), device=torch_device, ) return random_keys, random_values mha_config = LlamaConfig(num_attention_heads=32) mha_static_cache = StaticCache(config=mha_config, max_cache_len=10) cached_keys, cached_values = mha_static_cache.update( *_random_kvs(mha_config), 0, cache_kwargs={"cache_position": torch.arange(1).to(torch_device)} ) self.assertTrue(cached_keys.shape == (1, 32, 10, 128)) self.assertTrue(cached_values.shape == (1, 32, 10, 128)) gqa_config = LlamaConfig(num_attention_heads=32, num_key_value_heads=4) gqa_static_cache = StaticCache(config=gqa_config, max_cache_len=10) cached_keys, cached_values = gqa_static_cache.update( *_random_kvs(gqa_config), 0, cache_kwargs={"cache_position": torch.arange(1).to(torch_device)} ) self.assertTrue(cached_keys.shape == (1, 4, 10, 128)) self.assertTrue(cached_values.shape == (1, 4, 10, 128)) mqa_config = LlamaConfig(num_attention_heads=32, num_key_value_heads=1) mqa_static_cache = StaticCache(config=mqa_config, max_cache_len=10) cached_keys, cached_values = mqa_static_cache.update( *_random_kvs(mqa_config), 0, cache_kwargs={"cache_position": torch.arange(1).to(torch_device)} ) self.assertTrue(cached_keys.shape == (1, 1, 10, 128)) self.assertTrue(cached_values.shape == (1, 1, 10, 128)) def _skip_on_failed_cache_prerequisites(test, cache_implementation): """Function to skip tests on failed cache prerequisites, given a cache implementation""" # Installed dependencies if cache_implementation == "quantized" and not is_optimum_quanto_available(): test.skipTest("Quanto is not available") # Devices if "offloaded" in cache_implementation: has_accelerator = torch_device is not None and torch_device != "cpu" if not has_accelerator: test.skipTest("Offloaded caches require an accelerator") if cache_implementation in ["offloaded_static", "offloaded_hybrid_chunked"]: if backend_device_count(torch_device) != 1: test.skipTest("Offloaded static caches require exactly 1 accelerator")
CacheTest
python
numba__numba
numba/cuda/cudadrv/dummyarray.py
{ "start": 3374, "end": 14209 }
class ____(object): """A dummy numpy array-like object. Consider it an array without the actual data, but offset from the base data pointer. Attributes ---------- dims: tuple of Dim describing each dimension of the array ndim: int number of dimension shape: tuple of int size of each dimension strides: tuple of int stride of each dimension itemsize: int itemsize extent: (start, end) start and end offset containing the memory region """ is_array = True @classmethod def from_desc(cls, offset, shape, strides, itemsize): dims = [] for ashape, astride in zip(shape, strides): dim = Dim(offset, offset + ashape * astride, ashape, astride, single=False) dims.append(dim) offset = 0 # offset only applies to first dimension return cls(dims, itemsize) def __init__(self, dims, itemsize): self.dims = tuple(dims) self.ndim = len(self.dims) self.shape = tuple(dim.size for dim in self.dims) self.strides = tuple(dim.stride for dim in self.dims) self.itemsize = itemsize self.size = functools.reduce(operator.mul, self.shape, 1) self.extent = self._compute_extent() self.flags = self._compute_layout() def _compute_layout(self): # The logic here is based on that in _UpdateContiguousFlags from # numpy/core/src/multiarray/flagsobject.c in NumPy v1.19.1 (commit # 13661ac70). # https://github.com/numpy/numpy/blob/maintenance/1.19.x/numpy/core/src/multiarray/flagsobject.c#L123-L191 # Records have no dims, and we can treat them as contiguous if not self.dims: return {'C_CONTIGUOUS': True, 'F_CONTIGUOUS': True} # If this is a broadcast array then it is not contiguous if any([dim.stride == 0 for dim in self.dims]): return {'C_CONTIGUOUS': False, 'F_CONTIGUOUS': False} flags = {'C_CONTIGUOUS': True, 'F_CONTIGUOUS': True} # Check C contiguity sd = self.itemsize for dim in reversed(self.dims): if dim.size == 0: # Contiguous by definition return {'C_CONTIGUOUS': True, 'F_CONTIGUOUS': True} if dim.size != 1: if dim.stride != sd: flags['C_CONTIGUOUS'] = False sd *= dim.size # Check F contiguity sd = self.itemsize for dim in self.dims: if dim.size != 1: if dim.stride != sd: flags['F_CONTIGUOUS'] = False return flags sd *= dim.size return flags def _compute_extent(self): firstidx = [0] * self.ndim lastidx = [s - 1 for s in self.shape] start = compute_index(firstidx, self.dims) stop = compute_index(lastidx, self.dims) + self.itemsize stop = max(stop, start) # ensure positive extent return Extent(start, stop) def __repr__(self): return '<Array dims=%s itemsize=%s>' % (self.dims, self.itemsize) def __getitem__(self, item): if not isinstance(item, tuple): item = [item] else: item = list(item) nitem = len(item) ndim = len(self.dims) if nitem > ndim: raise IndexError("%d extra indices given" % (nitem - ndim,)) # Add empty slices for missing indices while len(item) < ndim: item.append(slice(None, None)) dims = [dim.__getitem__(it) for dim, it in zip(self.dims, item)] newshape = [d.size for d in dims if not d.single] arr = Array(dims, self.itemsize) if newshape: return arr.reshape(*newshape)[0] else: return Element(arr.extent) @property def is_c_contig(self): return self.flags['C_CONTIGUOUS'] @property def is_f_contig(self): return self.flags['F_CONTIGUOUS'] def iter_contiguous_extent(self): """ Generates extents """ if self.is_c_contig or self.is_f_contig: yield self.extent else: if self.dims[0].stride < self.dims[-1].stride: innerdim = self.dims[0] outerdims = self.dims[1:] outershape = self.shape[1:] else: innerdim = self.dims[-1] outerdims = self.dims[:-1] outershape = self.shape[:-1] if innerdim.is_contiguous(self.itemsize): oslen = [range(s) for s in outershape] for indices in itertools.product(*oslen): base = compute_index(indices, outerdims) yield base + innerdim.start, base + innerdim.stop else: oslen = [range(s) for s in self.shape] for indices in itertools.product(*oslen): offset = compute_index(indices, self.dims) yield offset, offset + self.itemsize def reshape(self, *newdims, **kws): oldnd = self.ndim newnd = len(newdims) if newdims == self.shape: return self, None order = kws.pop('order', 'C') if kws: raise TypeError('unknown keyword arguments %s' % kws.keys()) if order not in 'CFA': raise ValueError('order not C|F|A') # check for exactly one instance of -1 in newdims # https://github.com/numpy/numpy/blob/623bc1fae1d47df24e7f1e29321d0c0ba2771ce0/numpy/core/src/multiarray/shape.c#L470-L515 # noqa: E501 unknownidx = -1 knownsize = 1 for i, dim in enumerate(newdims): if dim < 0: if unknownidx == -1: unknownidx = i else: raise ValueError("can only specify one unknown dimension") else: knownsize *= dim # compute the missing dimension if unknownidx >= 0: if knownsize == 0 or self.size % knownsize != 0: raise ValueError("cannot infer valid shape " "for unknown dimension") else: newdims = newdims[0:unknownidx] \ + (self.size // knownsize,) \ + newdims[unknownidx + 1:] newsize = functools.reduce(operator.mul, newdims, 1) if order == 'A': order = 'F' if self.is_f_contig else 'C' if newsize != self.size: raise ValueError("reshape changes the size of the array") if self.is_c_contig or self.is_f_contig: if order == 'C': newstrides = list(iter_strides_c_contig(self, newdims)) elif order == 'F': newstrides = list(iter_strides_f_contig(self, newdims)) else: raise AssertionError("unreachable") else: newstrides = np.empty(newnd, np.ctypeslib.c_intp) # need to keep these around in variables, not temporaries, so they # don't get GC'ed before we call into the C code olddims = np.array(self.shape, dtype=np.ctypeslib.c_intp) oldstrides = np.array(self.strides, dtype=np.ctypeslib.c_intp) newdims = np.array(newdims, dtype=np.ctypeslib.c_intp) if not attempt_nocopy_reshape( oldnd, olddims, oldstrides, newnd, newdims, newstrides, self.itemsize, order == 'F', ): raise NotImplementedError('reshape would require copy') ret = self.from_desc(self.extent.begin, shape=newdims, strides=newstrides, itemsize=self.itemsize) return ret, list(self.iter_contiguous_extent()) def squeeze(self, axis=None): newshape, newstrides = [], [] if axis is None: for length, stride in zip(self.shape, self.strides): if length != 1: newshape.append(length) newstrides.append(stride) else: if not isinstance(axis, tuple): axis = (axis,) for ax in axis: if self.shape[ax] != 1: raise ValueError( "cannot select an axis to squeeze out which has size " "not equal to one" ) for i, (length, stride) in enumerate(zip(self.shape, self.strides)): if i not in axis: newshape.append(length) newstrides.append(stride) newarr = self.from_desc( self.extent.begin, shape=newshape, strides=newstrides, itemsize=self.itemsize, ) return newarr, list(self.iter_contiguous_extent()) def ravel(self, order='C'): if order not in 'CFA': raise ValueError('order not C|F|A') if (order in 'CA' and self.is_c_contig or order in 'FA' and self.is_f_contig): newshape = (self.size,) newstrides = (self.itemsize,) arr = self.from_desc(self.extent.begin, newshape, newstrides, self.itemsize) return arr, list(self.iter_contiguous_extent()) else: raise NotImplementedError("ravel on non-contiguous array") def iter_strides_f_contig(arr, shape=None): """yields the f-contiguous strides """ shape = arr.shape if shape is None else shape itemsize = arr.itemsize yield itemsize sum = 1 for s in shape[:-1]: sum *= s yield sum * itemsize def iter_strides_c_contig(arr, shape=None): """yields the c-contiguous strides """ shape = arr.shape if shape is None else shape itemsize = arr.itemsize def gen(): yield itemsize sum = 1 for s in reversed(shape[1:]): sum *= s yield sum * itemsize for i in reversed(list(gen())): yield i def is_element_indexing(item, ndim): if isinstance(item, slice): return False elif isinstance(item, tuple): if len(item) == ndim: if not any(isinstance(it, slice) for it in item): return True else: return True return False def _compute_size(start, stop, step): """Algorithm adapted from cpython rangeobject.c """ if step > 0: lo = start hi = stop else: lo = stop hi = start step = -step if lo >= hi: return 0 return (hi - lo - 1) // step + 1
Array
python
django__django
tests/postgres_tests/test_indexes.py
{ "start": 8020, "end": 8611 }
class ____(IndexTestMixin, PostgreSQLSimpleTestCase): index_class = SpGistIndex def test_suffix(self): self.assertEqual(SpGistIndex.suffix, "spgist") def test_deconstruction(self): index = SpGistIndex(fields=["title"], name="test_title_spgist", fillfactor=80) path, args, kwargs = index.deconstruct() self.assertEqual(path, "django.contrib.postgres.indexes.SpGistIndex") self.assertEqual(args, ()) self.assertEqual( kwargs, {"fields": ["title"], "name": "test_title_spgist", "fillfactor": 80} )
SpGistIndexTests