language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
simonw__datasette
datasette/events.py
{ "start": 3678, "end": 4173 }
class ____(Event): """ Event name: ``upsert-rows`` Rows were upserted into a table. :ivar database: The name of the database where the rows were inserted. :type database: str :ivar table: The name of the table where the rows were inserted. :type table: str :ivar num_rows: The number of rows that were requested to be inserted. :type num_rows: int """ name = "upsert-rows" database: str table: str num_rows: int @dataclass
UpsertRowsEvent
python
numba__numba
numba/core/cpu.py
{ "start": 914, "end": 1045 }
class ____(cgutils.Structure): _fields = [ ('globals', types.pyobject), ('consts', types.pyobject), ]
EnvBody
python
python__mypy
mypy/plugins/singledispatch.py
{ "start": 730, "end": 823 }
class ____(NamedTuple): return_type: Type fallback: CallableType
SingledispatchTypeVars
python
PyCQA__pylint
tests/functional/r/regression/regression_2913.py
{ "start": 184, "end": 272 }
class ____: # pylint: disable=too-few-public-methods """My base class."""
BaseCorrect
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_theme_color01.py
{ "start": 350, "end": 1185 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("theme_color01.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with a theme color.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() # Add theme colors to the worksheet. for row in range(6): col = 0 color = col # Theme color index. shade = row # Theme shade index. theme_color = Color((color, shade)) color_format = workbook.add_format({"bg_color": theme_color}) worksheet.write(row, col, "", color_format) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
joke2k__faker
tests/providers/test_automotive.py
{ "start": 11594, "end": 11764 }
class ____(_SimpleAutomotiveTestMixin): """Test sv_SE automotive provider methods""" license_plate_pattern: Pattern = re.compile(r"[A-Z]{3} \d{2}[\dA-Z]")
TestSvSe
python
astropy__astropy
astropy/io/fits/tests/test_core.py
{ "start": 22268, "end": 56672 }
class ____(FitsTestCase): """ Tests various basic I/O operations, specifically in the astropy.io.fits.file._File class. """ def test_open_nonexistent(self): """Test that trying to open a non-existent file results in an OSError (and not some other arbitrary exception). """ with pytest.raises(OSError, match=r"No such file or directory"): fits.open(self.temp("foobar.fits")) # But opening in ostream or append mode should be okay, since they # allow writing new files for mode in ("ostream", "append"): with fits.open(self.temp("foobar.fits"), mode=mode) as _: pass assert os.path.exists(self.temp("foobar.fits")) os.remove(self.temp("foobar.fits")) def test_open_file_handle(self): # Make sure we can open a FITS file from an open file handle with open(self.data("test0.fits"), "rb") as handle: with fits.open(handle) as _: pass with open(self.temp("temp.fits"), "wb") as handle: with fits.open(handle, mode="ostream") as _: pass # Opening without explicitly specifying binary mode should fail with pytest.raises(ValueError): with open(self.data("test0.fits")) as handle: with fits.open(handle) as _: pass # All of these read modes should fail for mode in ["r", "rt"]: with pytest.raises(ValueError): with open(self.data("test0.fits"), mode=mode) as handle: with fits.open(handle) as _: pass # These update or write modes should fail as well for mode in ["w", "wt", "w+", "wt+", "r+", "rt+", "a", "at", "a+", "at+"]: with pytest.raises(ValueError): with open(self.temp("temp.fits"), mode=mode) as handle: with fits.open(handle) as _: pass def test_fits_file_handle_mode_combo(self): # This should work fine since no mode is given with open(self.data("test0.fits"), "rb") as handle: with fits.open(handle) as _: pass # This should work fine since the modes are compatible with open(self.data("test0.fits"), "rb") as handle: with fits.open(handle, mode="readonly") as _: pass # This should not work since the modes conflict with pytest.raises(ValueError): with open(self.data("test0.fits"), "rb") as handle: with fits.open(handle, mode="ostream") as _: pass def test_open_from_url(self): file_url = "file:///" + self.data("test0.fits").lstrip("/") with urllib.request.urlopen(file_url) as urlobj: with fits.open(urlobj) as _: pass # It will not be possible to write to a file that is from a URL object for mode in ("ostream", "append", "update"): with pytest.raises(ValueError): with urllib.request.urlopen(file_url) as urlobj: with fits.open(urlobj, mode=mode) as _: pass @pytest.mark.remote_data(source="astropy") def test_open_from_remote_url(self): for dataurl in (conf.dataurl, conf.dataurl_mirror): remote_url = f"{dataurl}/allsky/allsky_rosat.fits" try: with urllib.request.urlopen(remote_url) as urlobj: with fits.open(urlobj) as fits_handle: assert len(fits_handle) == 1 for mode in ("ostream", "append", "update"): with pytest.raises(ValueError): with urllib.request.urlopen(remote_url) as urlobj: with fits.open(urlobj, mode=mode) as fits_handle: assert len(fits_handle) == 1 except (urllib.error.HTTPError, urllib.error.URLError): continue else: break else: raise Exception("Could not download file") def test_open_gzipped(self): gzip_file = self._make_gzip_file() with fits.open(gzip_file) as fits_handle: assert fits_handle._file.compression == "gzip" assert len(fits_handle) == 5 with fits.open(gzip_file, decompress_in_memory=True) as fits_handle: assert fits_handle._file.compression == "gzip" assert len(fits_handle) == 5 with fits.open(gzip.GzipFile(gzip_file)) as fits_handle: assert fits_handle._file.compression == "gzip" assert len(fits_handle) == 5 def test_open_gzipped_from_handle(self): with open(self._make_gzip_file(), "rb") as handle: with fits.open(handle) as fits_handle: assert fits_handle._file.compression == "gzip" def test_detect_gzipped(self): """Test detection of a gzip file when the extension is not .gz.""" with fits.open(self._make_gzip_file("test0.fz")) as fits_handle: assert fits_handle._file.compression == "gzip" assert len(fits_handle) == 5 def test_writeto_append_mode_gzip(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/33 Check that a new GzipFile opened in append mode can be used to write out a new FITS file. """ # Note: when opening a GzipFile the 'b+' is superfluous, but this was # still how the original test case looked # Note: with statement not supported on GzipFile in older Python # versions fileobj = gzip.GzipFile(self.temp("test.fits.gz"), "ab+") h = fits.PrimaryHDU() try: h.writeto(fileobj) finally: fileobj.close() with fits.open(self.temp("test.fits.gz")) as hdul: assert hdul[0].header == h.header def test_fits_update_mode_gzip(self): """Test updating a GZipped FITS file""" with fits.open(self._make_gzip_file("update.gz"), mode="update") as fits_handle: hdu = fits.ImageHDU(data=list(range(100))) fits_handle.append(hdu) with fits.open(self.temp("update.gz")) as new_handle: assert len(new_handle) == 6 assert (new_handle[-1].data == list(range(100))).all() def test_fits_append_mode_gzip(self): """Make sure that attempting to open an existing GZipped FITS file in 'append' mode raises an error""" with pytest.raises(OSError): with fits.open(self._make_gzip_file("append.gz"), mode="append") as _: pass @pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module") def test_open_bzipped(self): bzip_file = self._make_bzip2_file() with fits.open(bzip_file) as fits_handle: assert fits_handle._file.compression == "bzip2" assert len(fits_handle) == 5 with fits.open(bzip_file, decompress_in_memory=True) as fits_handle: assert fits_handle._file.compression == "bzip2" assert len(fits_handle) == 5 with fits.open(bz2.BZ2File(bzip_file)) as fits_handle: assert fits_handle._file.compression == "bzip2" assert len(fits_handle) == 5 for mode in ("append", "update"): with pytest.raises( OSError, match="update and append modes are not supported with bzip2 files", ): with fits.open(bzip_file, mode=mode) as fits_handle: pass @pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module") def test_open_bzipped_from_handle(self): with open(self._make_bzip2_file(), "rb") as handle: with fits.open(handle) as fits_handle: assert fits_handle._file.compression == "bzip2" assert len(fits_handle) == 5 @pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module") def test_detect_bzipped(self): """Test detection of a bzip2 file when the extension is not .bz2.""" with fits.open(self._make_bzip2_file("test0.xx")) as fits_handle: assert fits_handle._file.compression == "bzip2" assert len(fits_handle) == 5 @pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module") def test_writeto_bzip2_fileobj(self): """Test writing to a bz2.BZ2File file like object""" fileobj = bz2.BZ2File(self.temp("test.fits.bz2"), "w") h = fits.PrimaryHDU() try: h.writeto(fileobj) finally: fileobj.close() with fits.open(self.temp("test.fits.bz2")) as hdul: assert hdul[0].header == h.header @pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module") def test_writeto_bzip2_filename(self): """Test writing to a bzip2 file by name""" filename = self.temp("testname.fits.bz2") h = fits.PrimaryHDU() h.writeto(filename) with fits.open(self.temp("testname.fits.bz2")) as hdul: assert hdul[0].header == h.header @pytest.mark.skipif(not HAS_LZMA, reason="Python built without lzma module") def test_open_lzma(self): lzma_file = self._make_lzma_file() with fits.open(lzma_file) as fits_handle: assert fits_handle._file.compression == "lzma" assert len(fits_handle) == 5 with fits.open(lzma_file, decompress_in_memory=True) as fits_handle: assert fits_handle._file.compression == "lzma" assert len(fits_handle) == 5 with fits.open(lzma.LZMAFile(lzma_file)) as fits_handle: assert fits_handle._file.compression == "lzma" assert len(fits_handle) == 5 for mode in ("append", "update"): with pytest.raises( OSError, match="update and append modes are not supported with lzma files", ): with fits.open(lzma_file, mode=mode) as fits_handle: pass @pytest.mark.skipif(not HAS_LZMA, reason="Python built without lzma module") def test_open_lzma_from_handle(self): with open(self._make_lzma_file(), "rb") as handle: with fits.open(handle) as fits_handle: assert fits_handle._file.compression == "lzma" assert len(fits_handle) == 5 @pytest.mark.skipif(not HAS_LZMA, reason="Python built without lzma module") def test_detect_lzma(self): """Test detection of a lzma file when the extension is not .xz.""" with fits.open(self._make_lzma_file("test0.xx")) as fits_handle: assert fits_handle._file.compression == "lzma" assert len(fits_handle) == 5 @pytest.mark.skipif(not HAS_LZMA, reason="Python built without lzma module") def test_writeto_lzma_fileobj(self): """Test writing to a lzma.LZMAFile file like object""" fileobj = lzma.LZMAFile(self.temp("test.fits.xz"), "w") h = fits.PrimaryHDU() try: h.writeto(fileobj) finally: fileobj.close() with fits.open(self.temp("test.fits.xz")) as hdul: assert hdul[0].header == h.header @pytest.mark.skipif(not HAS_LZMA, reason="Python built without lzma module") def test_writeto_lzma_filename(self): """Test writing to a lzma file by name""" filename = self.temp("testname.fits.xz") h = fits.PrimaryHDU() h.writeto(filename) with fits.open(self.temp("testname.fits.xz")) as hdul: assert hdul[0].header == h.header @pytest.mark.skipif( not HAS_UNCOMPRESSPY, reason="Optional package uncompresspy not installed" ) def test_open_lzw(self): lzw_file = self._make_lzw_file() arcfile = "ONTT.1991-12-30T08:55:46.000.fits" last_datapoint = 53 with fits.open(lzw_file) as fits_handle: assert fits_handle._file.compression == "lzw" assert len(fits_handle) == 1 assert fits_handle[0].header["ARCFILE"] == arcfile assert fits_handle[0].data[-1, -1] == last_datapoint with fits.open(lzw_file, decompress_in_memory=True) as fits_handle: assert fits_handle._file.compression == "lzw" assert len(fits_handle) == 1 assert fits_handle[0].header["ARCFILE"] == arcfile assert fits_handle[0].data[-1, -1] == last_datapoint with fits.open(uncompresspy.LZWFile(lzw_file)) as fits_handle: assert fits_handle._file.compression == "lzw" assert len(fits_handle) == 1 assert fits_handle[0].header["ARCFILE"] == arcfile assert fits_handle[0].data[-1, -1] == last_datapoint for mode in ("append", "update"): with pytest.raises( OSError, match=f"{mode} mode not supported with LZW files" ): with fits.open(lzw_file, mode=mode) as fits_handle: pass @pytest.mark.skipif( not HAS_UNCOMPRESSPY, reason="Optional package uncompresspy not installed" ) def test_open_lzw_from_handle(self): arcfile = "ONTT.1991-12-30T08:55:46.000.fits" last_datapoint = 53 with open(self._make_lzw_file(), "rb") as handle: with fits.open(handle) as fits_handle: assert fits_handle._file.compression == "lzw" assert len(fits_handle) == 1 assert fits_handle[0].header["ARCFILE"] == arcfile assert fits_handle[0].data[-1, -1] == last_datapoint @pytest.mark.skipif( not HAS_UNCOMPRESSPY, reason="Optional package uncompresspy not installed" ) def test_detect_lzw(self): """Test detection of a lzw file when the extension is not .Z.""" arcfile = "ONTT.1991-12-30T08:55:46.000.fits" last_datapoint = 53 with fits.open(self._make_lzw_file("test0.xx")) as fits_handle: assert fits_handle._file.compression == "lzw" assert len(fits_handle) == 1 assert fits_handle[0].header["ARCFILE"] == arcfile assert fits_handle[0].data[-1, -1] == last_datapoint @pytest.mark.skipif( not HAS_UNCOMPRESSPY, reason="Optional package uncompresspy not installed" ) def test_writeto_lzw_filename(self): """Test writing to a LZW file by name. This should fail as writing LZW is not supported.""" filename = self.temp("testname.fits.Z") h = fits.PrimaryHDU() with pytest.raises(OSError, match="mode not supported with LZW files"): h.writeto(filename) def test_open_zipped(self): zip_file = self._make_zip_file() with fits.open(zip_file) as fits_handle: assert fits_handle._file.compression == "zip" assert len(fits_handle) == 5 with fits.open(zip_file, decompress_in_memory=True) as fits_handle: assert fits_handle._file.compression == "zip" assert len(fits_handle) == 5 with fits.open(zipfile.ZipFile(zip_file)) as fits_handle: assert fits_handle._file.compression == "zip" assert len(fits_handle) == 5 def test_open_zipped_from_handle(self): with open(self._make_zip_file(), "rb") as handle: with fits.open(handle) as fits_handle: assert fits_handle._file.compression == "zip" assert len(fits_handle) == 5 def test_detect_zipped(self): """Test detection of a zip file when the extension is not .zip.""" zf = self._make_zip_file(filename="test0.fz") with fits.open(zf) as fits_handle: assert len(fits_handle) == 5 def test_open_zipped_writeable(self): """Opening zipped files in a writeable mode should fail.""" zf = self._make_zip_file() pytest.raises(OSError, fits.open, zf, "update") pytest.raises(OSError, fits.open, zf, "append") zf = zipfile.ZipFile(zf, "a") pytest.raises(OSError, fits.open, zf, "update") pytest.raises(OSError, fits.open, zf, "append") def test_read_open_astropy_gzip_file(self): """ Regression test for https://github.com/astropy/astropy/issues/2774 This tests reading from a ``GzipFile`` object from Astropy's compatibility copy of the ``gzip`` module. """ gf = gzip.GzipFile(self._make_gzip_file()) try: assert len(fits.open(gf)) == 5 finally: gf.close() def test_open_multiple_member_zipfile(self): """ Opening zip files containing more than one member files should fail as there's no obvious way to specify which file is the FITS file to read. """ zfile = zipfile.ZipFile(self.temp("test0.zip"), "w") zfile.write(self.data("test0.fits")) zfile.writestr("foo", "bar") zfile.close() with pytest.raises(OSError): fits.open(zfile.filename) def test_read_open_file(self): """Read from an existing file object.""" with open(self.data("test0.fits"), "rb") as f: assert len(fits.open(f)) == 5 def test_read_closed_file(self): """Read from an existing file object that's been closed.""" f = open(self.data("test0.fits"), "rb") f.close() with fits.open(f) as f2: assert len(f2) == 5 def test_read_open_gzip_file(self): """Read from an open gzip file object.""" gf = gzip.GzipFile(self._make_gzip_file()) try: assert len(fits.open(gf)) == 5 finally: gf.close() def test_open_gzip_file_for_writing(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/195.""" gf = self._make_gzip_file() with fits.open(gf, mode="update") as h: h[0].header["EXPFLAG"] = "ABNORMAL" h[1].data[0, 0] = 1 with fits.open(gf) as h: # Just to make sure the update worked; if updates work # normal writes should work too... assert h[0].header["EXPFLAG"] == "ABNORMAL" assert h[1].data[0, 0] == 1 def test_write_read_gzip_file(self, home_is_temp): """ Regression test for https://github.com/astropy/astropy/issues/2794 Ensure files written through gzip are readable. """ data = np.arange(100) hdu = fits.PrimaryHDU(data=data) hdu.writeto(self.temp("test.fits.gz")) with open(os.path.expanduser(self.temp("test.fits.gz")), "rb") as f: assert f.read(3) == GZIP_MAGIC with fits.open(self.temp("test.fits.gz")) as hdul: assert np.all(hdul[0].data == data) @pytest.mark.parametrize("ext", ["gz", "bz2", "zip", "xz", "Z"]) def test_compressed_ext_but_not_compressed(self, ext): testfile = self.temp(f"test0.fits.{ext}") shutil.copy(self.data("test0.fits"), testfile) with fits.open(testfile) as hdul: assert len(hdul) == 5 fits.append(testfile, np.arange(5)) with fits.open(testfile) as hdul: assert len(hdul) == 6 def test_read_file_like_object(self): """Test reading a FITS file from a file-like object.""" filelike = io.BytesIO() with open(self.data("test0.fits"), "rb") as f: filelike.write(f.read()) filelike.seek(0) assert len(fits.open(filelike)) == 5 def test_updated_file_permissions(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/79 Tests that when a FITS file is modified in update mode, the file permissions are preserved. """ filename = self.temp("test.fits") hdul = [fits.PrimaryHDU(), fits.ImageHDU()] hdul = fits.HDUList(hdul) hdul.writeto(filename) old_mode = os.stat(filename).st_mode hdul = fits.open(filename, mode="update") hdul.insert(1, fits.ImageHDU()) hdul.flush() hdul.close() assert old_mode == os.stat(filename).st_mode def test_fileobj_mode_guessing(self): """Tests whether a file opened without a specified io.fits mode ('readonly', etc.) is opened in a mode appropriate for the given file object. """ testfile = self.copy_file("test0.fits") # Opening in text mode should outright fail for mode in ("r", "w", "a"): with open(testfile, mode) as f: pytest.raises(ValueError, fits.HDUList.fromfile, f) # Need to re-copy the file since opening it in 'w' mode blew it away testfile = self.copy_file("test0.fits") with open(testfile, "rb") as f: with fits.HDUList.fromfile(f) as h: assert h.fileinfo(0)["filemode"] == "readonly" for mode in ("wb", "ab"): with open(testfile, mode) as f: with fits.HDUList.fromfile(f) as h: # Basically opening empty files for output streaming assert len(h) == 0 # Need to re-copy the file since opening it in 'w' mode blew it away with open(self.copy_file("test0.fits"), "wb+") as f: with fits.HDUList.fromfile(f) as h: # wb+ still causes an existing file to be overwritten so there # are no HDUs assert len(h) == 0 # Need to re-copy the file since opening it in 'w' mode blew it away testfile = self.copy_file("test0.fits") with open(testfile, "rb+") as f: with fits.HDUList.fromfile(f) as h: assert h.fileinfo(0)["filemode"] == "update" with open(testfile, "ab+") as f: with fits.HDUList.fromfile(f) as h: assert h.fileinfo(0)["filemode"] == "append" def test_mmap_unwriteable(self): """Regression test for https://github.com/astropy/astropy/issues/968 Temporarily patches mmap.mmap to exhibit platform-specific bad behavior. """ class MockMmap(mmap.mmap): def flush(self): raise OSError("flush is broken on this platform") old_mmap = mmap.mmap mmap.mmap = MockMmap # Force the mmap test to be rerun _File.__dict__["_mmap_available"]._cache.clear() try: testfile = self.copy_file("test0.fits") with pytest.warns( AstropyUserWarning, match=r"mmap\.flush is unavailable" ) as w: with fits.open(testfile, mode="update", memmap=True) as h: h[1].data[0, 0] = 999 assert len(w) == 1 # Double check that writing without mmap still worked with fits.open(testfile) as h: assert h[1].data[0, 0] == 999 finally: mmap.mmap = old_mmap _File.__dict__["_mmap_available"]._cache.clear() def test_mmap_allocate_error(self): """ Regression test for https://github.com/astropy/astropy/issues/1380 Temporarily patches mmap.mmap to raise an OSError if mode is ACCESS_COPY. """ mmap_original = mmap.mmap # We patch mmap here to raise an error if access=mmap.ACCESS_COPY, which # emulates an issue that an OSError is raised if the available address # space is less than the size of the file even if memory mapping is used. def mmap_patched(*args, **kwargs): if kwargs.get("access") == mmap.ACCESS_COPY: exc = OSError() if sys.platform.startswith("win32"): exc.errno = errno.EINVAL exc.winerror = 1455 else: exc.errno = errno.ENOMEM raise exc else: return mmap_original(*args, **kwargs) with fits.open(self.data("test0.fits"), memmap=True) as hdulist: with patch.object(mmap, "mmap", side_effect=mmap_patched) as p: with pytest.warns( AstropyUserWarning, match=r"Could not memory map array with mode='readonly'", ): data = hdulist[1].data p.reset_mock() assert not data.flags.writeable def test_mmap_closing(self): """ Tests that the mmap reference is closed/removed when there aren't any HDU data references left. """ if not _File._mmap_available: pytest.xfail("not expected to work on platforms without mmap support") with fits.open(self.data("test0.fits"), memmap=True) as hdul: assert hdul._file._mmap is None hdul[1].data assert hdul._file._mmap is not None del hdul[1].data # Should be no more references to data in the file so close the # mmap assert hdul._file._mmap is None hdul[1].data hdul[2].data del hdul[1].data # hdul[2].data is still references so keep the mmap open assert hdul._file._mmap is not None del hdul[2].data assert hdul._file._mmap is None assert hdul._file._mmap is None with fits.open(self.data("test0.fits"), memmap=True) as hdul: hdul[1].data # When the only reference to the data is on the hdu object, and the # hdulist it belongs to has been closed, the mmap should be closed as # well assert hdul._file._mmap is None with fits.open(self.data("test0.fits"), memmap=True) as hdul: data = hdul[1].data # also make a copy data_copy = data.copy() # The HDUList is closed; in fact, get rid of it completely del hdul # The data array should still work though... assert np.all(data == data_copy) def test_uncloseable_file(self): """ Regression test for https://github.com/astropy/astropy/issues/2356 Demonstrates that FITS files can still be read from file-like objects that don't have an obvious "open" or "closed" state. """ class MyFileLike: def __init__(self, foobar): self._foobar = foobar def read(self, n): return self._foobar.read(n) def seek(self, offset, whence=os.SEEK_SET): self._foobar.seek(offset, whence) def tell(self): return self._foobar.tell() with open(self.data("test0.fits"), "rb") as f: fileobj = MyFileLike(f) with fits.open(fileobj) as hdul1: with fits.open(self.data("test0.fits")) as hdul2: assert hdul1.info(output=False) == hdul2.info(output=False) for hdu1, hdu2 in zip(hdul1, hdul2): assert hdu1.header == hdu2.header if hdu1.data is not None and hdu2.data is not None: assert np.all(hdu1.data == hdu2.data) def test_write_bytesio_discontiguous(self): """ Regression test related to https://github.com/astropy/astropy/issues/2794#issuecomment-55441539 Demonstrates that writing an HDU containing a discontiguous Numpy array should work properly. """ data = np.arange(100)[::3] hdu = fits.PrimaryHDU(data=data) fileobj = io.BytesIO() hdu.writeto(fileobj) fileobj.seek(0) with fits.open(fileobj) as h: assert np.all(h[0].data == data) def test_write_bytesio(self): """ Regression test for https://github.com/astropy/astropy/issues/2463 Test against `io.BytesIO`. `io.StringIO` is not supported. """ self._test_write_string_bytes_io(io.BytesIO()) @pytest.mark.skipif( sys.platform.startswith("win32"), reason="Cannot test on Windows" ) def test_filename_with_colon(self): """ Test reading and writing a file with a colon in the filename. Regression test for https://github.com/astropy/astropy/issues/3122 """ # Skip on Windows since colons in filenames makes NTFS sad. filename = "APEXHET.2014-04-01T15:18:01.000.fits" hdu = fits.PrimaryHDU(data=np.arange(10)) hdu.writeto(self.temp(filename)) with fits.open(self.temp(filename)) as hdul: assert np.all(hdul[0].data == hdu.data) def test_writeto_full_disk(self, monkeypatch): """ Test that it gives a readable error when trying to write an hdulist to a full disk. """ def _writeto(self, array): raise OSError("Fake error raised when writing file.") def get_free_space_in_dir(path): return 0 msg = ( "Not enough space on disk: requested 8000, available 0. " "Fake error raised when writing file." ) with pytest.raises(OSError, match=msg) as exc: monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writeto", _writeto) monkeypatch.setattr(data, "get_free_space_in_dir", get_free_space_in_dir) n = np.arange(0, 1000, dtype="int64") hdu = fits.PrimaryHDU(n) hdulist = fits.HDUList(hdu) filename = self.temp("test.fits") with open(filename, mode="wb") as fileobj: hdulist.writeto(fileobj) def test_flush_full_disk(self, monkeypatch): """ Test that it gives a readable error when trying to update an hdulist to a full disk. """ filename = self.temp("test.fits") hdul = [fits.PrimaryHDU(), fits.ImageHDU()] hdul = fits.HDUList(hdul) hdul[0].data = np.arange(0, 1000, dtype="int64") hdul.writeto(filename) def _writedata(self, fileobj): raise OSError("Fake error raised when writing file.") def get_free_space_in_dir(path): return 0 monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writedata", _writedata) monkeypatch.setattr(data, "get_free_space_in_dir", get_free_space_in_dir) msg = ( "Not enough space on disk: requested 8000, available 0. " "Fake error raised when writing file." ) with pytest.raises(OSError, match=msg) as exc: with fits.open(filename, mode="update") as hdul: hdul[0].data = np.arange(0, 1000, dtype="int64") hdul.insert(1, fits.ImageHDU()) hdul.flush() def _test_write_string_bytes_io(self, fileobj): """ Implemented for both test_write_stringio and test_write_bytesio. """ with fits.open(self.data("test0.fits")) as hdul: hdul.writeto(fileobj) hdul2 = fits.HDUList.fromstring(fileobj.getvalue()) assert FITSDiff(hdul, hdul2).identical def _make_gzip_file(self, filename="test0.fits.gz"): gzfile = self.temp(filename) with open(self.data("test0.fits"), "rb") as f: gz = gzip.open(gzfile, "wb") gz.write(f.read()) gz.close() return gzfile def test_write_overwrite(self, home_is_temp): filename = self.temp("test_overwrite.fits") hdu = fits.PrimaryHDU(data=np.arange(10)) hdu.writeto(filename) with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH): hdu.writeto(filename) hdu.writeto(filename, overwrite=True) def _make_zip_file(self, mode="copyonwrite", filename="test0.fits.zip"): zfile = zipfile.ZipFile(self.temp(filename), "w") zfile.write(self.data("test0.fits")) zfile.close() return zfile.filename def _make_bzip2_file(self, filename="test0.fits.bz2"): bzfile = self.temp(filename) with open(self.data("test0.fits"), "rb") as f: bz = bz2.BZ2File(bzfile, "w") bz.write(f.read()) bz.close() return bzfile def _make_lzma_file(self, filename="test0.fits.xz"): lzmafile = self.temp(filename) with open(self.data("test0.fits"), "rb") as f: lz = lzma.LZMAFile(lzmafile, "w") lz.write(f.read()) lz.close() return lzmafile def _make_lzw_file(self, new_filename=None): return self.copy_file("lzw.fits.Z", new_filename) def test_simulateonly(self): """Write to None simulates writing.""" with fits.open(self.data("test0.fits")) as hdul: hdul.writeto(None) hdul[0].writeto(None) hdul[0].header.tofile(None) def test_bintablehdu_zero_bytes(self): """Make sure we don't have any zero-byte writes in BinTableHDU""" bright = np.rec.array( [ (1, "Sirius", -1.45, "A1V"), (2, "Canopus", -0.73, "F0Ib"), (3, "Rigil Kent", -0.1, "G2V"), ], formats="int16,S20,float32,S10", names="order,name,mag,Sp", ) hdu_non_zero = fits.BinTableHDU(bright) # use safeio, a special file handler meant to fail on zero-byte writes fh = safeio.CatchZeroByteWriter(open(self.temp("bright.fits"), mode="wb")) hdu_non_zero.writeto(fh) fh.close() def test_primaryhdu_zero_bytes(self): """ Make sure we don't have any zero-byte writes from an ImageHDU (or other) of `size % BLOCK_SIZE == 0` """ hdu_img_2880 = fits.PrimaryHDU(data=np.arange(720, dtype="i4")) # use safeio, a special file handler meant to fail on zero-byte writes fh = safeio.CatchZeroByteWriter(open(self.temp("image.fits"), mode="wb")) hdu_img_2880.writeto(fh) fh.close() def test_HDUList_writeto_stdout(self): # see https://github.com/astropy/astropy/issues/3427 hdul = fits.HDUList([fits.PrimaryHDU()]) hdul.writeto(sys.stdout)
TestFileFunctions
python
huggingface__transformers
tests/models/camembert/test_tokenization_camembert.py
{ "start": 243, "end": 2114 }
class ____(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = ["almanach/camembert-base"] tokenizer_class = CamembertTokenizer integration_expected_tokens = ['▁This', '▁is', '▁a', '▁test', '▁', '😊', '▁I', '▁was', '▁', 'born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 'sé', '.', '▁', '生活的真谛是', '▁Hi', '▁Hello', '▁Hi', '▁Hello', '▁Hello', '<s>', '▁hi', '<s>', '▁the', 're', '▁The', '▁', 'follow', 'ing', '▁string', '▁s', 'h', 'ould', '▁be', '▁pro', 'per', 'ly', '▁en', 'code', 'd', ':', '▁Hello', '.', '▁But', '▁i', 'rd', '▁and', '▁', 'ปี', '▁i', 'rd', '▁', 'ด', '▁Hey', '▁h', 'ow', '▁are', '▁you', '▁do', 'ing'] # fmt: skip integration_expected_token_ids = [17526, 2856, 33, 2006, 21, 3, 551, 15760, 21, 24900, 378, 419, 13233, 7, 1168, 9098, 2856, 19289, 5100, 9, 21, 3, 5108, 9774, 5108, 9774, 9774, 5, 7874, 5, 808, 346, 908, 21, 31189, 402, 20468, 52, 133, 19306, 2446, 909, 1399, 1107, 22, 14420, 204, 92, 9774, 9, 10503, 1723, 6682, 1168, 21, 3, 1723, 6682, 21, 3, 20128, 616, 3168, 9581, 4835, 7503, 402] # fmt: skip expected_tokens_from_ids = ['▁This', '▁is', '▁a', '▁test', '▁', '<unk>', '▁I', '▁was', '▁', 'born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 'sé', '.', '▁', '<unk>', '▁Hi', '▁Hello', '▁Hi', '▁Hello', '▁Hello', '<s>', '▁hi', '<s>', '▁the', 're', '▁The', '▁', 'follow', 'ing', '▁string', '▁s', 'h', 'ould', '▁be', '▁pro', 'per', 'ly', '▁en', 'code', 'd', ':', '▁Hello', '.', '▁But', '▁i', 'rd', '▁and', '▁', '<unk>', '▁i', 'rd', '▁', '<unk>', '▁Hey', '▁h', 'ow', '▁are', '▁you', '▁do', 'ing'] # fmt: skip integration_expected_decoded_text = "This is a test <unk> I was born in 92000, and this is falsé. <unk> Hi Hello Hi Hello Hello<s> hi<s> there The following string should be properly encoded: Hello. But ird and <unk> ird <unk> Hey how are you doing"
CamembertTokenizationTest
python
pandas-dev__pandas
pandas/io/stata.py
{ "start": 19966, "end": 23502 }
class ____: """ Parse a categorical column and prepare formatted output Parameters ---------- catarray : Series Categorical Series to encode encoding : {"latin-1", "utf-8"} Encoding to use for value labels. """ def __init__( self, catarray: Series, encoding: Literal["latin-1", "utf-8"] = "latin-1" ) -> None: if encoding not in ("latin-1", "utf-8"): raise ValueError("Only latin-1 and utf-8 are supported.") self.labname = catarray.name self._encoding = encoding categories = catarray.cat.categories self.value_labels = enumerate(categories) self._prepare_value_labels() def _prepare_value_labels(self) -> None: """Encode value labels.""" self.text_len = 0 self.txt: list[bytes] = [] self.n = 0 # Offsets (length of categories), converted to int32 self.off = np.array([], dtype=np.int32) # Values, converted to int32 self.val = np.array([], dtype=np.int32) self.len = 0 # Compute lengths and setup lists of offsets and labels offsets: list[int] = [] values: list[float] = [] for vl in self.value_labels: category: str | bytes = vl[1] if not isinstance(category, str): category = str(category) warnings.warn( value_label_mismatch_doc.format(self.labname), ValueLabelTypeMismatch, stacklevel=find_stack_level(), ) category = category.encode(self._encoding) offsets.append(self.text_len) self.text_len += len(category) + 1 # +1 for the padding values.append(vl[0]) self.txt.append(category) self.n += 1 # Ensure int32 self.off = np.array(offsets, dtype=np.int32) self.val = np.array(values, dtype=np.int32) # Total length self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len def generate_value_label(self, byteorder: str) -> bytes: """ Generate the binary representation of the value labels. Parameters ---------- byteorder : str Byte order of the output Returns ------- value_label : bytes Bytes containing the formatted value label """ encoding = self._encoding bio = BytesIO() null_byte = b"\x00" # len bio.write(struct.pack(byteorder + "i", self.len)) # labname labname = str(self.labname)[:32].encode(encoding) lab_len = 32 if encoding not in ("utf-8", "utf8") else 128 labname = _pad_bytes(labname, lab_len + 1) bio.write(labname) # padding - 3 bytes for i in range(3): bio.write(struct.pack("c", null_byte)) # value_label_table # n - int32 bio.write(struct.pack(byteorder + "i", self.n)) # textlen - int32 bio.write(struct.pack(byteorder + "i", self.text_len)) # off - int32 array (n elements) for offset in self.off: bio.write(struct.pack(byteorder + "i", offset)) # val - int32 array (n elements) for value in self.val: bio.write(struct.pack(byteorder + "i", value)) # txt - Text labels, null terminated for text in self.txt: bio.write(text + null_byte) return bio.getvalue()
StataValueLabel
python
donnemartin__system-design-primer
solutions/object_oriented_design/deck_of_cards/deck_of_cards.py
{ "start": 459, "end": 1186 }
class ____(Card): def __init__(self, value, suit): super(BlackJackCard, self).__init__(value, suit) def is_ace(self): return True if self._value == 1 else False def is_face_card(self): """Jack = 11, Queen = 12, King = 13""" return True if 10 < self._value <= 13 else False @property def value(self): if self.is_ace() == 1: return 1 elif self.is_face_card(): return 10 else: return self._value @value.setter def value(self, new_value): if 1 <= new_value <= 13: self._value = new_value else: raise ValueError('Invalid card value: {}'.format(new_value))
BlackJackCard
python
pyinstaller__pyinstaller
bootloader/waflib/Task.py
{ "start": 1377, "end": 2333 }
class ____(type): def __init__(cls, name, bases, dict): super(store_task_type, cls).__init__(name, bases, dict) name = cls.__name__ if name != 'evil' and name != 'Task': if getattr(cls, 'run_str', None): (f, dvars) = compile_fun(cls.run_str, cls.shell) cls.hcode = Utils.h_cmd(cls.run_str) cls.orig_run_str = cls.run_str cls.run_str = None cls.run = f cls.vars = list(set(cls.vars + dvars)) cls.vars.sort() if cls.vars: fun = compile_sig_vars(cls.vars) if fun: cls.sig_vars = fun elif getattr(cls, 'run', None) and not 'hcode' in cls.__dict__: cls.hcode = Utils.h_cmd(cls.run) getattr(cls, 'register', classes)[name] = cls evil = store_task_type('evil', (object,), {})
store_task_type
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_area06.py
{ "start": 315, "end": 1575 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_area06.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "area"}) chart.axis_ids = [60957824, 60959360] data = [ [1, 2, 3, 4, 5], [8, 7, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series( { "categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$B$1:$B$5", } ) chart.add_series( { "categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$C$1:$C$5", } ) worksheet.insert_chart( "E9", chart, {"description": "Some alternative text", "decorative": 1} ) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
coleifer__peewee
tests/schema.py
{ "start": 593, "end": 819 }
class ____(TestModel): alpha = IntegerField() beta = IntegerField() gamma = IntegerField() class Meta: indexes = ( (('alpha', 'beta'), True), (('beta', 'gamma'), False))
TMIndexes
python
spyder-ide__spyder
spyder/plugins/console/widgets/internalshell.py
{ "start": 2327, "end": 3470 }
class ____(QObject): """Handle Shell widget refresh signal""" sig_new_prompt = Signal(str) sig_set_readonly = Signal(bool) sig_edit = Signal(str, bool) sig_wait_input = Signal(str) def __init__(self, input_condition): QObject.__init__(self) # External editor self._gotoline = None self._path = None self.input_data = None self.input_condition = input_condition def new_prompt(self, prompt): self.sig_new_prompt.emit(prompt) def set_readonly(self, state): self.sig_set_readonly.emit(state) def edit(self, filename, external_editor=False): self.sig_edit.emit(filename, external_editor) def data_available(self): """Return True if input data is available""" return self.input_data is not WidgetProxyData def wait_input(self, prompt=''): self.input_data = WidgetProxyData self.sig_wait_input.emit(prompt) def end_input(self, cmd): self.input_condition.acquire() self.input_data = cmd self.input_condition.notify() self.input_condition.release()
WidgetProxy
python
mlflow__mlflow
tests/sagemaker/mock/__init__.py
{ "start": 38086, "end": 38645 }
class ____(TimestampedResource): """ Object representing a SageMaker model. The SageMakerBackend will create and manage Models. """ def __init__(self, model_name, primary_container, execution_role_arn, tags, vpc_config): super().__init__() self.model_name = model_name self.primary_container = primary_container self.execution_role_arn = execution_role_arn self.tags = tags self.vpc_config = vpc_config @property def arn_descriptor(self): return f":model/{self.model_name}"
Model
python
scrapy__scrapy
scrapy/utils/request.py
{ "start": 3701, "end": 7345 }
class ____: """Default fingerprinter. It takes into account a canonical version (:func:`w3lib.url.canonicalize_url`) of :attr:`request.url <scrapy.Request.url>` and the values of :attr:`request.method <scrapy.Request.method>` and :attr:`request.body <scrapy.Request.body>`. It then generates an `SHA1 <https://en.wikipedia.org/wiki/SHA-1>`_ hash. """ @classmethod def from_crawler(cls, crawler: Crawler) -> Self: return cls(crawler) def __init__(self, crawler: Crawler | None = None): self._fingerprint = fingerprint def fingerprint(self, request: Request) -> bytes: return self._fingerprint(request) def request_httprepr(request: Request) -> bytes: """Return the raw HTTP representation (as bytes) of the given request. This is provided only for reference since it's not the actual stream of bytes that will be send when performing the request (that's controlled by Twisted). """ parsed = urlparse_cached(request) path = urlunparse(("", "", parsed.path or "/", parsed.params, parsed.query, "")) s = to_bytes(request.method) + b" " + to_bytes(path) + b" HTTP/1.1\r\n" s += b"Host: " + to_bytes(parsed.hostname or b"") + b"\r\n" if request.headers: s += request.headers.to_string() + b"\r\n" s += b"\r\n" s += request.body return s def referer_str(request: Request) -> str | None: """Return Referer HTTP header suitable for logging.""" referrer = request.headers.get("Referer") if referrer is None: return referrer return to_unicode(referrer, errors="replace") def request_from_dict(d: dict[str, Any], *, spider: Spider | None = None) -> Request: """Create a :class:`~scrapy.Request` object from a dict. If a spider is given, it will try to resolve the callbacks looking at the spider for methods with the same name. """ request_cls: type[Request] = load_object(d["_class"]) if "_class" in d else Request kwargs = {key: value for key, value in d.items() if key in request_cls.attributes} if d.get("callback") and spider: kwargs["callback"] = _get_method(spider, d["callback"]) if d.get("errback") and spider: kwargs["errback"] = _get_method(spider, d["errback"]) return request_cls(**kwargs) def _get_method(obj: Any, name: Any) -> Any: """Helper function for request_from_dict""" name = str(name) try: return getattr(obj, name) except AttributeError: raise ValueError(f"Method {name!r} not found in: {obj}") def request_to_curl(request: Request) -> str: """ Converts a :class:`~scrapy.Request` object to a curl command. :param :class:`~scrapy.Request`: Request object to be converted :return: string containing the curl command """ method = request.method data = f"--data-raw '{request.body.decode('utf-8')}'" if request.body else "" headers = " ".join( f"-H '{k.decode()}: {v[0].decode()}'" for k, v in request.headers.items() ) url = request.url cookies = "" if request.cookies: if isinstance(request.cookies, dict): cookie = "; ".join(f"{k}={v}" for k, v in request.cookies.items()) cookies = f"--cookie '{cookie}'" elif isinstance(request.cookies, list): cookie = "; ".join( f"{next(iter(c.keys()))}={next(iter(c.values()))}" for c in request.cookies ) cookies = f"--cookie '{cookie}'" curl_cmd = f"curl -X {method} {url} {data} {headers} {cookies}".strip() return " ".join(curl_cmd.split())
RequestFingerprinter
python
chroma-core__chroma
chromadb/auth/token_authn/__init__.py
{ "start": 3264, "end": 3594 }
class ____(TypedDict): """ A simple User class for use in this module only. If you need a generic way to represent a User, please use UserIdentity as this class keeps track of sensitive tokens. """ id: str role: str tenant: Optional[str] databases: Optional[List[str]] tokens: List[str]
User
python
astropy__astropy
astropy/cosmology/_src/tests/parameter/test_parameter.py
{ "start": 926, "end": 2261 }
class ____: """Test :class:`astropy.cosmology.Parameter` not on a cosmology.""" @pytest.mark.parametrize( "kwargs", [ {}, dict( default=1.0, fvalidate="float", doc="DOCSTRING", unit="km", equivalencies=[u.mass_energy()], derived=True, ), ], ) def test_Parameter_init(self, kwargs): """Test :class:`astropy.cosmology.Parameter` instantiation.""" unit = kwargs.get("unit") param = Parameter(**kwargs) assert param.default == kwargs.get("default", MISSING) assert param.fvalidate is _REGISTRY_FVALIDATORS.get( kwargs.get("fvalidate"), validate_with_unit ) assert param.doc == kwargs.get("doc") assert param.unit is (u.Unit(unit) if unit is not None else None) assert param.equivalencies == kwargs.get("equivalencies", []) assert param.derived is kwargs.get("derived", False) assert param.name == "name not initialized" def test_Parameter_default(self): """Test :attr:`astropy.cosmology.Parameter.default`.""" parameter = Parameter() assert parameter.default is MISSING assert repr(parameter.default) == "<MISSING>"
Test_Parameter
python
wandb__wandb
wandb/sdk/data_types/object_3d.py
{ "start": 7578, "end": 19043 }
class ____(BatchableMedia): """W&B class for 3D point clouds.""" SUPPORTED_TYPES: ClassVar[Set[str]] = { "obj", "gltf", "glb", "babylon", "stl", "pts.json", } SUPPORTED_POINT_CLOUD_TYPES: ClassVar[Set[str]] = {"lidar/beta"} _log_type: ClassVar[str] = "object3D-file" def __init__( self, data_or_path: Union["np.ndarray", str, pathlib.Path, "TextIO", dict], caption: Optional[str] = None, **kwargs: Optional[Union[str, "FileFormat3D"]], ) -> None: """Creates a W&B Object3D object. Args: data_or_path: Object3D can be initialized from a file or a numpy array. caption: Caption associated with the object for display. Examples: The shape of the numpy array must be one of either ```text [[x y z], ...] nx3 [[x y z c], ...] nx4 where c is a category with supported range [1, 14] [[x y z r g b], ...] nx6 where is rgb is color ``` """ super().__init__(caption=caption) if hasattr(data_or_path, "name") and not isinstance(data_or_path, pathlib.Path): # if the file has a path, we just detect the type and copy it from there. # this does not work for pathlib.Path objects, # where `.name` returns the last directory in the path. data_or_path = data_or_path.name if hasattr(data_or_path, "read"): if hasattr(data_or_path, "seek"): data_or_path.seek(0) object_3d = data_or_path.read() extension = kwargs.pop("file_type", None) if extension is None: raise ValueError( "Must pass file type keyword argument when using io objects." ) if extension not in Object3D.SUPPORTED_TYPES: raise ValueError( "Object 3D only supports numpy arrays or files of the type: " + ", ".join(Object3D.SUPPORTED_TYPES) ) extension = "." + extension tmp_path = os.path.join(MEDIA_TMP.name, runid.generate_id() + extension) with open(tmp_path, "w") as f: f.write(object_3d) self._set_file(tmp_path, is_tmp=True, extension=extension) elif isinstance(data_or_path, (str, pathlib.Path)): data_or_path = str(data_or_path) path = data_or_path extension = None for supported_type in Object3D.SUPPORTED_TYPES: if path.endswith(supported_type): extension = "." + supported_type break if not extension: raise ValueError( "File '" + path + "' is not compatible with Object3D: supported types are: " + ", ".join(Object3D.SUPPORTED_TYPES) ) self._set_file(data_or_path, is_tmp=False, extension=extension) # Supported different types and scene for 3D scenes elif isinstance(data_or_path, dict) and "type" in data_or_path: if data_or_path["type"] == "lidar/beta": data = { "type": data_or_path["type"], "vectors": data_or_path["vectors"].tolist() if "vectors" in data_or_path else [], "points": data_or_path["points"].tolist() if "points" in data_or_path else [], "boxes": data_or_path["boxes"].tolist() if "boxes" in data_or_path else [], } else: raise ValueError( "Type not supported, only 'lidar/beta' is currently supported" ) tmp_path = os.path.join(MEDIA_TMP.name, runid.generate_id() + ".pts.json") with codecs.open(tmp_path, "w", encoding="utf-8") as fp: json.dump( data, fp, separators=(",", ":"), sort_keys=True, indent=4, ) self._set_file(tmp_path, is_tmp=True, extension=".pts.json") elif util.is_numpy_array(data_or_path): np_data = data_or_path # The following assertion is required for numpy to trust that # np_data is numpy array. The reason it is behind a False # guard is to ensure that this line does not run at runtime, # which would cause a runtime error if the user's machine did # not have numpy installed. if TYPE_CHECKING: assert isinstance(np_data, np.ndarray) if len(np_data.shape) != 2 or np_data.shape[1] not in {3, 4, 6}: raise ValueError( """ The shape of the numpy array must be one of either [[x y z], ...] nx3 [x y z c], ...] nx4 where c is a category with supported range [1, 14] [x y z r g b], ...] nx6 where rgb is color """ ) list_data = np_data.tolist() tmp_path = os.path.join(MEDIA_TMP.name, runid.generate_id() + ".pts.json") with codecs.open(tmp_path, "w", encoding="utf-8") as fp: json.dump( list_data, fp, separators=(",", ":"), sort_keys=True, indent=4, ) self._set_file(tmp_path, is_tmp=True, extension=".pts.json") else: raise ValueError("data must be a numpy array, dict or a file object") @classmethod def from_file( cls, data_or_path: Union["TextIO", str], file_type: Optional["FileFormat3D"] = None, ) -> "Object3D": """Initializes Object3D from a file or stream. Args: data_or_path (Union["TextIO", str]): A path to a file or a `TextIO` stream. file_type (str): Specifies the data format passed to `data_or_path`. Required when `data_or_path` is a `TextIO` stream. This parameter is ignored if a file path is provided. The type is taken from the file extension. <!-- lazydoc-ignore-classmethod: internal --> """ # if file_type is not None and file_type not in cls.SUPPORTED_TYPES: # raise ValueError( # f"Unsupported file type: {file_type}. Supported types are: {cls.SUPPORTED_TYPES}" # ) return cls(data_or_path, file_type=file_type) @classmethod def from_numpy(cls, data: "np.ndarray") -> "Object3D": """Initializes Object3D from a numpy array. Args: data (numpy array): Each entry in the array will represent one point in the point cloud. The shape of the numpy array must be one of either: ```text [[x y z], ...] # nx3. [[x y z c], ...] # nx4 where c is a category with supported range [1, 14]. [[x y z r g b], ...] # nx6 where is rgb is color. ``` <!-- lazydoc-ignore-classmethod: internal --> """ if not util.is_numpy_array(data): raise ValueError("`data` must be a numpy array") if len(data.shape) != 2 or data.shape[1] not in {3, 4, 6}: raise ValueError( """ The shape of the numpy array must be one of either: [[x y z], ...] nx3 [x y z c], ...] nx4 where c is a category with supported range [1, 14] [x y z r g b], ...] nx6 where rgb is color """ ) return cls(data) @classmethod def from_point_cloud( cls, points: Sequence["Point"], boxes: Sequence["Box3D"], vectors: Optional[Sequence["Vector3D"]] = None, point_cloud_type: "PointCloudType" = "lidar/beta", # camera: Optional[Camera] = None, ) -> "Object3D": """Initializes Object3D from a python object. Args: points (Sequence["Point"]): The points in the point cloud. boxes (Sequence["Box3D"]): 3D bounding boxes for labeling the point cloud. Boxes are displayed in point cloud visualizations. vectors (Optional[Sequence["Vector3D"]]): Each vector is displayed in the point cloud visualization. Can be used to indicate directionality of bounding boxes. Defaults to None. point_cloud_type ("lidar/beta"): At this time, only the "lidar/beta" type is supported. Defaults to "lidar/beta". <!-- lazydoc-ignore-classmethod: internal --> """ if point_cloud_type not in cls.SUPPORTED_POINT_CLOUD_TYPES: raise ValueError("Point cloud type not supported") numpy = wandb.util.get_module( "numpy", required="wandb.Object3D.from_point_cloud requires numpy. Install with `pip install numpy`", ) data = { "type": point_cloud_type, "points": numpy.array(points), "boxes": numpy.array(boxes), "vectors": numpy.array(vectors) if vectors is not None else numpy.array([]), } return cls(data) @classmethod def get_media_subdir(cls: Type["Object3D"]) -> str: """Get media subdirectory. <!-- lazydoc-ignore-classmethod: internal --> """ return os.path.join("media", "object3D") def to_json(self, run_or_artifact: Union["LocalRun", "Artifact"]) -> dict: """Returns the JSON representation expected by the backend. <!-- lazydoc-ignore: internal --> """ json_dict = super().to_json(run_or_artifact) json_dict["_type"] = Object3D._log_type if isinstance(run_or_artifact, wandb.Artifact): if self._path is None or not self._path.endswith(".pts.json"): raise ValueError( "Non-point cloud 3D objects are not yet supported with Artifacts" ) return json_dict @classmethod def seq_to_json( cls: Type["Object3D"], seq: Sequence["BatchableMedia"], run: "LocalRun", key: str, step: Union[int, str], ) -> dict: """Convert a sequence of Audio objects to a JSON representation. <!-- lazydoc-ignore-classmethod: internal --> """ seq = list(seq) jsons = [obj.to_json(run) for obj in seq] for obj in jsons: expected = LogicalPath(cls.get_media_subdir()) if not obj["path"].startswith(expected): raise ValueError( "Files in an array of Object3D's must be in the {} directory, not {}".format( expected, obj["path"] ) ) return { "_type": "object3D", "filenames": [ os.path.relpath(j["path"], cls.get_media_subdir()) for j in jsons ], "count": len(jsons), "objects": jsons, }
Object3D
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/math_ops/zero_division_test.py
{ "start": 974, "end": 2457 }
class ____(test.TestCase): def testZeros(self): with test_util.use_gpu(): for dtype in dtypes.uint8, dtypes.int16, dtypes.int32, dtypes.int64: zero = constant_op.constant(0, dtype=dtype) one = constant_op.constant(1, dtype=dtype) bads = [lambda x, y: x // y] if dtype in (dtypes.int32, dtypes.int64): bads.append(lambda x, y: x % y) for bad in bads: try: result = self.evaluate(bad(one, zero)) except (errors.OpError, errors.InvalidArgumentError) as e: # Ideally, we'd get a nice exception. In theory, this should only # happen on CPU, but 32 bit integer GPU division is actually on # CPU due to a placer bug. # TODO(irving): Make stricter once the placer bug is fixed. self.assertIn('Integer division by zero', str(e)) else: # On the GPU, integer division by zero produces all bits set. # But apparently on some GPUs "all bits set" for 64 bit division # means 32 bits set, so we allow 0xffffffff as well. This isn't # very portable, so we may need to expand this list if other GPUs # do different things. # # XLA constant folds integer division by zero to 1. self.assertTrue(test.is_gpu_available()) self.assertIn(result, (-1, 1, 2, 0xff, 0xffffffff)) if __name__ == '__main__': test.main()
ZeroDivisionTest
python
gevent__gevent
src/gevent/tests/test__local.py
{ "start": 1125, "end": 1365 }
class ____(local): CLASS_PROP = 42 def __init__(self): local.__init__(self) self.sentinel = Sentinel() created_sentinels.append(id(self.sentinel)) @property def desc(self): return self
MyLocal
python
apache__airflow
providers/google/tests/unit/google/cloud/hooks/test_video_intelligence.py
{ "start": 1375, "end": 4121 }
class ____: def setup_method(self): with mock.patch( "airflow.providers.google.cloud.hooks.video_intelligence.CloudVideoIntelligenceHook.__init__", new=mock_base_gcp_hook_default_project_id, ): self.hook = CloudVideoIntelligenceHook(gcp_conn_id="test") @mock.patch( "airflow.providers.google.cloud.hooks.video_intelligence.CloudVideoIntelligenceHook.get_credentials" ) @mock.patch("airflow.providers.google.cloud.hooks.video_intelligence.VideoIntelligenceServiceClient") def test_video_intelligence_service_client_creation(self, mock_client, mock_get_creds): result = self.hook.get_conn() mock_client.assert_called_once_with(credentials=mock_get_creds.return_value, client_info=CLIENT_INFO) assert mock_client.return_value == result assert self.hook._conn == result @mock.patch("airflow.providers.google.cloud.hooks.video_intelligence.CloudVideoIntelligenceHook.get_conn") def test_annotate_video(self, get_conn): # Given annotate_video_method = get_conn.return_value.annotate_video get_conn.return_value.annotate_video.return_value = ANNOTATE_VIDEO_RESPONSE # When result = self.hook.annotate_video(input_uri=INPUT_URI, features=FEATURES) # Then assert result is ANNOTATE_VIDEO_RESPONSE annotate_video_method.assert_called_once_with( request={ "input_uri": INPUT_URI, "input_content": None, "features": FEATURES, "video_context": None, "output_uri": None, "location_id": None, }, retry=DEFAULT, timeout=None, metadata=(), ) @mock.patch("airflow.providers.google.cloud.hooks.video_intelligence.CloudVideoIntelligenceHook.get_conn") def test_annotate_video_with_output_uri(self, get_conn): # Given annotate_video_method = get_conn.return_value.annotate_video get_conn.return_value.annotate_video.return_value = ANNOTATE_VIDEO_RESPONSE # When result = self.hook.annotate_video(input_uri=INPUT_URI, output_uri=OUTPUT_URI, features=FEATURES) # Then assert result is ANNOTATE_VIDEO_RESPONSE annotate_video_method.assert_called_once_with( request={ "input_uri": INPUT_URI, "output_uri": OUTPUT_URI, "input_content": None, "features": FEATURES, "video_context": None, "location_id": None, }, retry=DEFAULT, timeout=None, metadata=(), )
TestCloudVideoIntelligenceHook
python
allegroai__clearml
clearml/backend_api/services/v2_23/frames.py
{ "start": 310904, "end": 331861 }
class ____(Response): """ Response of frames.get_next_for_dataview_id endpoint. :param frames: Frames list :type frames: Sequence[Frame] :param frames_returned: Number of frames returned :type frames_returned: int :param scroll_state: JSON object representing the scroll state :type scroll_state: dict :param scroll_id: Scroll session id to be provided in order to get the next batch of images :type scroll_id: str :param roi_stats: Json object containing the count per labels in frames, e.g. { 'background': 312, 'boat': 2, 'bus': 4, 'car': 2, } :type roi_stats: dict :param eof: When 'frames' is empty, represents whether there are no more frames left. If "false", client can retry the operation. :type eof: bool :param random_seed: Random seed used for frame selection :type random_seed: int """ _service = "frames" _action = "get_next_for_dataview_id" _version = "2.23" _schema = { "definitions": { "augmentation": { "properties": { "arguments": { "additionalProperties": True, "description": "Arguments dictionary, passed to custom augmentations.", "type": ["object", "null"], }, "cls": { "description": "Augmentation class (see global definitions)", "type": ["string", "null"], }, "params": { "description": ( "Transform parameters, an array ot 3 randomly generated values. Fixed values are passed in" " case of affine reflect augmentation." ), "items": {"type": "number"}, "type": ["array", "null"], }, "strength": { "description": "Transform strength. Required for pixel transforms.", "type": ["number", "null"], }, "trans_mat": { "description": "Transform matrix (list of lists). Required for affine transforms.", "items": {"items": {"type": "number"}, "type": "array"}, "type": ["array", "null"], }, "type": { "description": "Augmentation type (see global definitions)", "type": ["string", "null"], }, }, "type": "object", }, "dataset_version": { "properties": { "id": {"description": "Dataset id", "type": ["string", "null"]}, "version": { "description": "Dataset version id", "type": ["string", "null"], }, }, "type": "object", }, "frame": { "properties": { "augmentation": { "description": "List of augmentations", "items": {"$ref": "#/definitions/augmentation"}, "type": ["array", "null"], }, "blob": { "description": "Raw data (blob) for the frame", "type": ["string", "null"], }, "context_id": { "description": ( "Context ID. Used for the default frames sorting. If not set then it is filled from the uri" " of the first source." ), "type": ["string", "null"], }, "dataset": { "description": "Frame's dataset version", "oneOf": [ {"$ref": "#/definitions/dataset_version"}, {"type": "null"}, ], }, "id": {"description": "Frame id", "type": ["string", "null"]}, "is_key_frame": { "description": "Is this a key frame (only applicable in frames who'se src is a video)", "type": ["boolean", "null"], }, "key_frame": { "description": "ID of the key frame that this frame belongs to", "type": ["string", "null"], }, "label_rule_counts": { "additionalProperties": True, "description": "The number of matched roi per lable rule", "type": ["object", "null"], }, "labels_size": { "description": "Number of labels returned", "type": ["integer", "null"], }, "meta": { "additionalProperties": True, "description": ( "Additional metadata dictionary for the frame. Please note that using this field" " effectively defines a schema (dictionary structure and types used as values) - frames" " within the same dataset cannot use conflicting schemas for this field (see documentation" " for more details)." ), "type": ["object", "null"], }, "meta_blob": { "additionalProperties": True, "description": ( "Non searchable metadata dictionary for the frame. The fields in this object cannot be" " searched by and are not added to the frame schema" ), "type": ["object", "null"], }, "new_ver": { "description": "Newer version of this frame, if asked to merge", "oneOf": [{"$ref": "#/definitions/frame"}, {"type": "null"}], }, "rois": { "description": "Frame regions of interest", "items": {"$ref": "#/definitions/roi"}, "type": ["array", "null"], }, "rule_name": { "description": ( "Name of the filtering rule according to which this frame was provided (if applicable)" ), "type": ["string", "null"], }, "saved": { "description": "Last time frame was saved (timestamp)", "type": ["integer", "null"], }, "saved_in_version": { "description": "Last version this frame was saved in (version ID)", "type": ["string", "null"], }, "sources": { "description": "Sources of this frame", "items": {"$ref": "#/definitions/source"}, "type": ["array", "null"], }, "timestamp": { "description": ( "Frame's offset in milliseconds, used primarily for video content. Used for the default" " frames sorting as the secondary key (with the primary key being 'context_id'). For" " images, this value should typically be 0. If not set, value is filled from the timestamp" " of the first source. We recommend using this field only in cases concerning the default" " sorting behavior." ), "type": ["integer", "null"], }, "updated": { "description": "Last time frame was saved (timestamp)", "type": ["integer", "null"], }, "updated_in_version": { "description": "Last version this frame was updated in (version ID)", "type": ["string", "null"], }, "video_gop": { "description": ( "Video encoding GOP value for the source of this frame. Only valid for video frames" ), "type": ["number", "null"], }, }, "type": "object", }, "mask": { "properties": { "content_type": { "description": "Content type (e.g. 'image/jpeg', 'image/png')", "type": ["string", "null"], }, "height": { "description": "Height in pixels", "type": ["integer", "null"], }, "id": { "description": "unique ID (in this frame)", "type": ["string", "null"], }, "timestamp": { "default": 0, "description": ( "Timestamp in the source data (for video content. for images, this value should be 0)" ), "type": ["integer", "null"], }, "uri": {"description": "Data URI", "type": ["string", "null"]}, "width": { "description": "Width in pixels", "type": ["integer", "null"], }, }, "type": "object", }, "preview": { "properties": { "content_type": { "description": "Content type (e.g. 'image/jpeg', 'image/png')", "type": ["string", "null"], }, "height": { "description": "Height in pixels", "type": ["integer", "null"], }, "timestamp": { "default": 0, "description": ( "Timestamp in the source data (for video content. for images, this value should be 0)" ), "type": ["integer", "null"], }, "uri": {"description": "Data URI", "type": ["string", "null"]}, "width": { "description": "Width in pixels", "type": ["integer", "null"], }, }, "type": "object", }, "roi": { "properties": { "area": { "description": "ROI area (not used)", "type": ["integer", "null"], }, "confidence": { "description": "ROI confidence", "type": ["number", "null"], }, "id": {"description": "ROI id", "type": ["string", "null"]}, "label": { "description": "ROI labels", "items": {"type": "string"}, "type": ["array", "null"], }, "label_num": { "description": ( "Label number according to the specified labels mapping Used only when ROI is returned as" " part of a task's frame." ), "type": ["integer", "null"], }, "mask": { "description": "Mask info for this ROI", "oneOf": [{"$ref": "#/definitions/roi_mask"}, {"type": "null"}], }, "meta": { "additionalProperties": True, "description": "Additional metadata dictionary for the roi", "type": ["object", "null"], }, "poly": { "description": "ROI polygon (x0, y0, ..., xn, yn)", "items": {"type": "number"}, "type": ["array", "null"], }, "sources": { "description": "Sources that this ROI belongs to", "items": {"type": "string"}, "type": ["array", "null"], }, }, "type": "object", }, "roi_mask": { "properties": { "id": {"description": "Mask ID", "type": "string"}, "value": { "description": "Mask value", "items": {"type": "integer"}, "type": "array", }, }, "required": ["id", "value"], "type": "object", }, "source": { "properties": { "content_type": { "description": "Content type (e.g. 'image/jpeg', 'image/png')", "type": ["string", "null"], }, "height": { "description": "Height in pixels", "type": ["integer", "null"], }, "id": { "description": "unique ID (in this frame)", "type": ["string", "null"], }, "masks": { "items": {"$ref": "#/definitions/mask"}, "type": ["array", "null"], }, "meta": { "additionalProperties": True, "description": "Additional metadata dictionary for the source", "type": ["object", "null"], }, "preview": { "oneOf": [{"$ref": "#/definitions/preview"}, {"type": "null"}] }, "timestamp": { "default": 0, "description": ( "Timestamp in the source data (for video content. for images, this value should be 0)" ), "type": ["integer", "null"], }, "uri": {"description": "Data URI", "type": ["string", "null"]}, "width": { "description": "Width in pixels", "type": ["integer", "null"], }, }, "type": "object", }, }, "properties": { "eof": { "description": ( "When 'frames' is empty, represents whether there are no more frames left. " 'If "false",\n' " client can retry the " "operation." ), "type": ["boolean", "null"], }, "frames": { "description": "Frames list", "items": {"$ref": "#/definitions/frame"}, "type": ["array", "null"], }, "frames_returned": { "description": "Number of frames returned", "type": ["integer", "null"], }, "random_seed": { "description": "Random seed used for frame selection", "type": ["integer", "null"], }, "roi_stats": { "additionalProperties": {"type": "integer"}, "description": ( "Json object containing the count per labels in frames, e.g.\n {\n " " 'background': 312,\n 'boat': 2,\n 'bus': 4,\n " " 'car': 2,\n }" ), "type": ["object", "null"], }, "scroll_id": { "description": "Scroll session id to be provided in order to get the next batch of images", "type": ["string", "null"], }, "scroll_state": { "additionalProperties": True, "description": "JSON object representing the scroll state", "type": ["object", "null"], }, }, "type": "object", } def __init__( self, frames=None, frames_returned=None, scroll_state=None, scroll_id=None, roi_stats=None, eof=None, random_seed=None, **kwargs ): super(GetNextForDataviewIdResponse, self).__init__(**kwargs) self.frames = frames self.frames_returned = frames_returned self.scroll_state = scroll_state self.scroll_id = scroll_id self.roi_stats = roi_stats self.eof = eof self.random_seed = random_seed @schema_property("frames") def frames(self): return self._property_frames @frames.setter def frames(self, value): if value is None: self._property_frames = None return self.assert_isinstance(value, "frames", (list, tuple)) if any(isinstance(v, dict) for v in value): value = [Frame.from_dict(v) if isinstance(v, dict) else v for v in value] else: self.assert_isinstance(value, "frames", Frame, is_array=True) self._property_frames = value @schema_property("frames_returned") def frames_returned(self): return self._property_frames_returned @frames_returned.setter def frames_returned(self, value): if value is None: self._property_frames_returned = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "frames_returned", six.integer_types) self._property_frames_returned = value @schema_property("scroll_state") def scroll_state(self): return self._property_scroll_state @scroll_state.setter def scroll_state(self, value): if value is None: self._property_scroll_state = None return self.assert_isinstance(value, "scroll_state", (dict,)) self._property_scroll_state = value @schema_property("scroll_id") def scroll_id(self): return self._property_scroll_id @scroll_id.setter def scroll_id(self, value): if value is None: self._property_scroll_id = None return self.assert_isinstance(value, "scroll_id", six.string_types) self._property_scroll_id = value @schema_property("roi_stats") def roi_stats(self): return self._property_roi_stats @roi_stats.setter def roi_stats(self, value): if value is None: self._property_roi_stats = None return self.assert_isinstance(value, "roi_stats", (dict,)) self._property_roi_stats = value @schema_property("eof") def eof(self): return self._property_eof @eof.setter def eof(self, value): if value is None: self._property_eof = None return self.assert_isinstance(value, "eof", (bool,)) self._property_eof = value @schema_property("random_seed") def random_seed(self): return self._property_random_seed @random_seed.setter def random_seed(self, value): if value is None: self._property_random_seed = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "random_seed", six.integer_types) self._property_random_seed = value
GetNextForDataviewIdResponse
python
getsentry__sentry
src/sentry/issues/endpoints/organization_group_search_views_starred.py
{ "start": 724, "end": 1636 }
class ____(OrganizationEndpoint): publish_status = { "GET": ApiPublishStatus.EXPERIMENTAL, } owner = ApiOwner.ISSUES permission_classes = (MemberPermission,) def get(self, request: Request, organization: Organization) -> Response: """ Retrieve a list of starred views for the current organization member. """ starred_views = GroupSearchViewStarred.objects.filter( organization=organization, user_id=request.user.id ) return self.paginate( request=request, queryset=starred_views, order_by="position", on_results=lambda x: serialize( x, request.user, serializer=GroupSearchViewStarredSerializer( organization=organization, ), ), )
OrganizationGroupSearchViewsStarredEndpoint
python
pypa__warehouse
tests/unit/test_views.py
{ "start": 12000, "end": 13699 }
class ____: def test_renders_503(self, pyramid_config, pyramid_request): renderer = pyramid_config.testing_add_renderer("503.html") renderer.string_response = "A 503 Error" resp = service_unavailable(pretend.stub(), pyramid_request) assert resp.status_code == 503 assert resp.content_type == "text/html" assert resp.body == b"A 503 Error" _assert_has_cors_headers(resp.headers) def test_favicon(pyramid_request): pyramid_request.static_path = pretend.call_recorder(lambda path: f"/static/{path}") # Construct the path to the favicon.ico file relative to the codebase directory codebase_dir = Path(__file__).resolve().parent.parent.parent favicon_path = ( codebase_dir / "warehouse" / "static" / "dist" / "images" / "favicon.ico" ) # Create a dummy file to test the favicon favicon_path.parent.mkdir(parents=True, exist_ok=True) favicon_path.touch() response = views.favicon(pyramid_request) assert isinstance(response, FileResponse) assert pyramid_request.response.content_type == "image/x-icon" def test_robotstxt(pyramid_request): assert robotstxt(pyramid_request) == {} assert pyramid_request.response.content_type == "text/plain" def test_funding_manifest_urls(pyramid_request): response = funding_manifest_urls(pyramid_request) assert response.text == "https://www.python.org/funding.json" assert response.content_type == "text/plain" assert response.charset == "utf-8" def test_opensearchxml(pyramid_request): assert opensearchxml(pyramid_request) == {} assert pyramid_request.response.content_type == "text/xml"
TestServiceUnavailableView
python
astropy__astropy
astropy/utils/masked/tests/test_function_helpers.py
{ "start": 46959, "end": 48079 }
class ____: @classmethod def setup_class(cls): cls.a = np.array([15, 255, 0], dtype="u1") cls.mask_a = np.array([False, True, False]) cls.ma = Masked(cls.a, mask=cls.mask_a) cls.b = np.unpackbits(cls.a).reshape(6, 4) cls.mask_b = np.array([False] * 15 + [True, True] + [False] * 7).reshape(6, 4) cls.mb = Masked(cls.b, mask=cls.mask_b) @pytest.mark.parametrize("axis", [None, 1, 0]) def test_packbits(self, axis): out = np.packbits(self.mb, axis=axis) if axis is None: expected = self.a else: expected = np.packbits(self.b, axis=axis) expected_mask = np.packbits(self.mask_b, axis=axis) > 0 assert_array_equal(out.unmasked, expected) assert_array_equal(out.mask, expected_mask) def test_unpackbits(self): out = np.unpackbits(self.ma) mask = np.where(self.mask_a, np.uint8(255), np.uint8(0)) expected_mask = np.unpackbits(mask) > 0 assert_array_equal(out.unmasked, self.b.ravel()) assert_array_equal(out.mask, expected_mask)
TestBitFunctions
python
tensorflow__tensorflow
tensorflow/python/framework/ops_test.py
{ "start": 15989, "end": 17884 }
class ____(test_util.TensorFlowTestCase): def testToTensor(self): values = constant_op.constant([2, 3, 5, 7], shape=[2, 2]) indices = constant_op.constant([0, 2]) x = indexed_slices.IndexedSlices(values, indices) with self.assertRaises(ValueError): tensor = ops.convert_to_tensor(x, name="tensor") self.assertEqual(tensor_shape.TensorShape(None), x.shape) dense_shape = constant_op.constant([3, 2]) y = indexed_slices.IndexedSlices(values, indices, dense_shape) tensor = ops.convert_to_tensor(y, name="tensor") self.assertAllEqual(tensor.shape, y.shape) self.assertAllEqual(self.evaluate(tensor), [[2, 3], [0, 0], [5, 7]]) @test_util.run_gpu_only def testEagerCopy(self): with context.eager_mode(): var = variables.Variable([[0.0], [0.0], [0.0], [0.0]], name="tensor") with backprop.GradientTape() as tape: a = array_ops.gather(array_ops.gather(var, [0, 1]), [0, 1]) b = array_ops.gather(array_ops.gather(var, [2, 3]), [0, 1]) r = special_math_ops.einsum("ij,ij->i", a, b) g = tape.gradient(r, [var])[0] values = g.values if isinstance(g, indexed_slices.IndexedSlices) else g self.assertAllEqual(values.get_shape(), [4, 1]) def testNegation(self): values = constant_op.constant([2, 3, 5, 7], shape=[2, 2]) indices = constant_op.constant([0, 2]) x = -indexed_slices.IndexedSlices(values, indices) self.assertAllEqual(x.values, [[-2, -3], [-5, -7]]) self.assertAllEqual(x.indices, [0, 2]) def testScalarMul(self): values = constant_op.constant([2, 3, 5, 7], shape=[2, 2]) indices = constant_op.constant([0, 2]) x = math_ops.scalar_mul(-2, indexed_slices.IndexedSlices(values, indices)) self.assertAllEqual(x.values, [[-4, -6], [-10, -14]]) self.assertAllEqual(x.indices, [0, 2]) @test_util.run_all_in_graph_and_eager_modes
IndexedSlicesTest
python
doocs__leetcode
solution/2800-2899/2896.Apply Operations to Make Two Strings Equal/Solution.py
{ "start": 0, "end": 519 }
class ____: def minOperations(self, s1: str, s2: str, x: int) -> int: @cache def dfs(i: int, j: int) -> int: if i > j: return 0 a = dfs(i + 1, j - 1) + x b = dfs(i + 2, j) + idx[i + 1] - idx[i] c = dfs(i, j - 2) + idx[j] - idx[j - 1] return min(a, b, c) n = len(s1) idx = [i for i in range(n) if s1[i] != s2[i]] m = len(idx) if m & 1: return -1 return dfs(0, m - 1)
Solution
python
ray-project__ray
rllib/models/tf/tf_action_dist.py
{ "start": 7832, "end": 9529 }
class ____(Categorical): """MultiCategorical distribution for MultiDiscrete action spaces. The action space must be uniform, meaning all nvec items have the same size, e.g. MultiDiscrete([10, 10, 10]), where 10 is the number of candidates to pick from and 3 is the slate size (pick 3 out of 10). When picking candidates, no candidate must be picked more than once. """ def __init__( self, inputs: List[TensorType], model: ModelV2 = None, temperature: float = 1.0, action_space: Optional[gym.spaces.MultiDiscrete] = None, all_slates=None, ): assert temperature > 0.0, "Categorical `temperature` must be > 0.0!" # Allow softmax formula w/ temperature != 1.0: # Divide inputs by temperature. super().__init__(inputs / temperature, model) self.action_space = action_space # Assert uniformness of the action space (all discrete buckets have the same # size). assert isinstance(self.action_space, gym.spaces.MultiDiscrete) and all( n == self.action_space.nvec[0] for n in self.action_space.nvec ) self.all_slates = all_slates @override(ActionDistribution) def deterministic_sample(self) -> TensorType: # Get a sample from the underlying Categorical (batch of ints). sample = super().deterministic_sample() # Use the sampled ints to pick the actual slates. return tf.gather(self.all_slates, sample) @override(ActionDistribution) def logp(self, x: TensorType) -> TensorType: # TODO: Implement. return tf.ones_like(self.inputs[:, 0]) @OldAPIStack
SlateMultiCategorical
python
PyCQA__pylint
doc/data/messages/o/overridden-final-method/bad.py
{ "start": 101, "end": 189 }
class ____(Animal): def can_breathe(self): # [overridden-final-method] pass
Cat
python
dask__dask
dask/dataframe/backends.py
{ "start": 1666, "end": 17026 }
class ____(DaskBackendEntrypoint): """Dask-DataFrame version of ``DaskBackendEntrypoint`` See Also -------- PandasBackendEntrypoint """ @staticmethod def from_dict(data: dict, *, npartitions: int, **kwargs): """Create a DataFrame collection from a dictionary Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. npartitions : int The desired number of output partitions. **kwargs : Optional backend kwargs. See Also -------- dask.dataframe.io.io.from_dict """ raise NotImplementedError @staticmethod def read_parquet(path: str | list, **kwargs): """Read Parquet files into a DataFrame collection Parameters ---------- path : str or list Source path(s). **kwargs : Optional backend kwargs. See Also -------- dask.dataframe.io.parquet.core.read_parquet """ raise NotImplementedError @staticmethod def read_json(url_path: str | list, **kwargs): """Read json files into a DataFrame collection Parameters ---------- url_path : str or list Source path(s). **kwargs : Optional backend kwargs. See Also -------- dask.dataframe.io.json.read_json """ raise NotImplementedError @staticmethod def read_orc(path: str | list, **kwargs): """Read ORC files into a DataFrame collection Parameters ---------- path : str or list Source path(s). **kwargs : Optional backend kwargs. See Also -------- dask.dataframe.io.orc.core.read_orc """ raise NotImplementedError @staticmethod def read_csv(urlpath: str | list, **kwargs): """Read CSV files into a DataFrame collection Parameters ---------- urlpath : str or list Source path(s). **kwargs : Optional backend kwargs. See Also -------- dask.dataframe.io.csv.read_csv """ raise NotImplementedError @staticmethod def read_hdf(pattern: str | list, key: str, **kwargs): """Read HDF5 files into a DataFrame collection Parameters ---------- pattern : str or list Source path(s). key : str Group identifier in the store. **kwargs : Optional backend kwargs. See Also -------- dask.dataframe.io.hdf.read_hdf """ raise NotImplementedError dataframe_creation_dispatch = CreationDispatch( module_name="dataframe", default="pandas", entrypoint_class=DataFrameBackendEntrypoint, name="dataframe_creation_dispatch", ) ########## # Pandas # ########## @make_scalar.register(np.dtype) def _(dtype): return _scalar_from_dtype(dtype) @make_scalar.register(pd.Timestamp) @make_scalar.register(pd.Timedelta) @make_scalar.register(pd.Period) @make_scalar.register(pd.Interval) def _(x): return x @make_meta_dispatch.register((pd.Series, pd.DataFrame)) def _(x, index=None): out = x.iloc[:0].copy(deep=True) # https://github.com/pandas-dev/pandas/issues/61930 # pandas shallow copies arrow-backed extension arrays. # Use pyarrow.compute.take to get a new array that doesn't # share any memory with the original array. for k, v in out.items(): if isinstance(v.array, pd.arrays.ArrowExtensionArray): values = pyarrow.compute.take( pyarrow.array(v.array), pyarrow.array([], type="int32") ) out[k] = v._constructor( pd.array(values, dtype=v.array.dtype), index=v.index, name=v.name ) # index isn't copied by default in pandas, even if deep=true out.index = out.index.copy(deep=True) return out @make_meta_dispatch.register(pd.Index) def _(x, index=None): return x[0:0].copy(deep=True) meta_object_types: tuple[type, ...] = (pd.Series, pd.DataFrame, pd.Index, pd.MultiIndex) try: import scipy.sparse as sp meta_object_types += (sp.spmatrix,) except ImportError: pass @pyarrow_schema_dispatch.register((pd.DataFrame,)) def get_pyarrow_schema_pandas(obj, preserve_index=None): return pa.Schema.from_pandas(obj, preserve_index=preserve_index) @to_pyarrow_table_dispatch.register((pd.DataFrame,)) def get_pyarrow_table_from_pandas(obj, **kwargs): # `kwargs` must be supported by `pyarrow.Table.to_pandas` return pa.Table.from_pandas(obj, **kwargs) @from_pyarrow_table_dispatch.register((pd.DataFrame,)) def get_pandas_dataframe_from_pyarrow(meta, table, **kwargs): # `kwargs` must be supported by `pyarrow.Table.to_pandas` def default_types_mapper(pyarrow_dtype: pa.DataType) -> object: # Avoid converting strings from `string[pyarrow]` to # `string[python]` if we have *any* `string[pyarrow]` if ( pyarrow_dtype in {pa.large_string(), pa.string()} and pd.StringDtype("pyarrow") in meta.dtypes.values ): return pd.StringDtype("pyarrow") return None types_mapper = kwargs.pop("types_mapper", default_types_mapper) return table.to_pandas(types_mapper=types_mapper, **kwargs) @partd_encode_dispatch.register(pd.DataFrame) def partd_pandas_blocks(_): from partd import PandasBlocks return PandasBlocks @meta_nonempty.register(pd.DatetimeTZDtype) @make_meta_dispatch.register(pd.DatetimeTZDtype) def make_meta_pandas_datetime_tz(x, index=None): return _nonempty_scalar(x) @make_meta_obj.register(meta_object_types) def make_meta_object(x, index=None): """Create an empty pandas object containing the desired metadata. Parameters ---------- x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or an iterable of `(name, dtype)` tuples. To create a `Series`, provide a tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index should match the desired output. If a dtype or scalar, a scalar of the same dtype is returned. index : pd.Index, optional Any pandas index to use in the metadata. If none provided, a `RangeIndex` will be used. Examples -------- >>> make_meta_object([('a', 'i8'), ('b', 'O')]) Empty DataFrame Columns: [a, b] Index: [] >>> make_meta_object(('a', 'f8')) Series([], Name: a, dtype: float64) >>> make_meta_object('i8') np.int64(1) """ if is_arraylike(x) and x.shape: return x[:0] if index is not None: index = make_meta_dispatch(index) if isinstance(x, dict): return pd.DataFrame( {c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index ) if isinstance(x, tuple) and len(x) == 2: return _empty_series(x[0], x[1], index=index) elif isinstance(x, Iterable) and not isinstance(x, str): if not all(isinstance(i, tuple) and len(i) == 2 for i in x): raise ValueError(f"Expected iterable of tuples of (name, dtype), got {x}") return pd.DataFrame( {c: _empty_series(c, d, index=index) for (c, d) in x}, columns=[c for c, d in x], index=index, ) elif not hasattr(x, "dtype") and x is not None: # could be a string, a dtype object, or a python type. Skip `None`, # because it is implicitly converted to `dtype('f8')`, which we don't # want here. try: dtype = np.dtype(x) return _scalar_from_dtype(dtype) except Exception: # Continue on to next check pass if is_scalar(x): return _nonempty_scalar(x) raise TypeError(f"Don't know how to create metadata from {x}") @meta_nonempty.register(object) def meta_nonempty_object(x): """Create a nonempty pandas object from the given metadata. Returns a pandas DataFrame, Series, or Index that contains two rows of fake data. """ if is_scalar(x): return _nonempty_scalar(x) else: raise TypeError( "Expected Pandas-like Index, Series, DataFrame, or scalar, " f"got {typename(type(x))}" ) @meta_nonempty.register(pd.DataFrame) def meta_nonempty_dataframe(x): idx = meta_nonempty(x.index) dt_s_dict = dict() data = dict() for i in range(len(x.columns)): series = x.iloc[:, i] dt = series.dtype if dt not in dt_s_dict: dt_s_dict[dt] = _nonempty_series(x.iloc[:, i], idx=idx) data[i] = dt_s_dict[dt] res = pd.DataFrame(data, index=idx, columns=np.arange(len(x.columns))) res.columns = x.columns res.attrs = x.attrs return res @meta_nonempty.register(pd.Index) def _nonempty_index(idx): typ = type(idx) if typ is pd.RangeIndex: return pd.RangeIndex(2, name=idx.name, dtype=idx.dtype) elif is_any_real_numeric_dtype(idx): return typ([1, 2], name=idx.name, dtype=idx.dtype) elif typ is pd.DatetimeIndex: start = "1970-01-01" # Need a non-monotonic decreasing index to avoid issues with # partial string indexing see https://github.com/dask/dask/issues/2389 # and https://github.com/pandas-dev/pandas/issues/16515 # This doesn't mean `_meta_nonempty` should ever rely on # `self.monotonic_increasing` or `self.monotonic_decreasing` try: return pd.date_range( start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name, unit=idx.unit, ) except ValueError: # older pandas versions data = [start, "1970-01-02"] if idx.freq is None else None return pd.DatetimeIndex( data, start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name ) elif typ is pd.PeriodIndex: return pd.period_range( start="1970-01-01", periods=2, freq=idx.freq, name=idx.name ) elif typ is pd.TimedeltaIndex: start = np.timedelta64(1, "D") try: return pd.timedelta_range( start=start, periods=2, freq=idx.freq, name=idx.name ) except ValueError: # older pandas versions start = np.timedelta64(1, "D") data = [start, start + 1] if idx.freq is None else None return pd.TimedeltaIndex( data, start=start, periods=2, freq=idx.freq, name=idx.name ) elif typ is pd.CategoricalIndex: if len(idx.categories) == 0: data = pd.Categorical(_nonempty_index(idx.categories), ordered=idx.ordered) else: data = pd.Categorical.from_codes( [-1, 0], categories=idx.categories, ordered=idx.ordered ) return pd.CategoricalIndex(data, name=idx.name) elif typ is pd.MultiIndex: levels = [_nonempty_index(l) for l in idx.levels] codes = [[0, 0] for i in idx.levels] try: return pd.MultiIndex(levels=levels, codes=codes, names=idx.names) except TypeError: # older pandas versions return pd.MultiIndex(levels=levels, labels=codes, names=idx.names) elif typ is pd.Index: if type(idx.dtype) in make_array_nonempty._lookup: return pd.Index( make_array_nonempty(idx.dtype), dtype=idx.dtype, name=idx.name ) elif idx.dtype == bool: # pd 1.5 introduce bool dtypes and respect non-uniqueness return pd.Index([True, False], name=idx.name) else: # for pd 1.5 in the case of bool index this would be cast as [True, True] # breaking uniqueness return pd.Index(["a", "b"], name=idx.name, dtype=idx.dtype) raise TypeError(f"Don't know how to handle index of type {typename(type(idx))}") @meta_nonempty.register(pd.Series) def _nonempty_series(s, idx=None): # TODO: Use register dtypes with make_array_nonempty if idx is None: idx = _nonempty_index(s.index) dtype = s.dtype if len(s) > 0: # use value from meta if provided data = [s.iloc[0]] * 2 elif isinstance(dtype, pd.DatetimeTZDtype): entry = pd.Timestamp("1970-01-01", tz=dtype.tz) data = pd.array([entry, entry], dtype=dtype) elif isinstance(dtype, pd.CategoricalDtype): if len(s.cat.categories): codes = [0, 0] else: codes = [-1, -1] data = pd.Categorical.from_codes(codes, dtype=s.dtype) elif is_integer_na_dtype(dtype): data = pd.array([1, None], dtype=dtype) elif is_float_na_dtype(dtype): data = pd.array([1.0, None], dtype=dtype) elif isinstance(dtype, pd.PeriodDtype): # pandas 0.24.0+ should infer this to be Series[Period[freq]] freq = dtype.freq data = [pd.Period("2000", freq), pd.Period("2001", freq)] elif isinstance(dtype, pd.SparseDtype): entry = _scalar_from_dtype(dtype.subtype) data = pd.array([entry, entry], dtype=dtype) elif isinstance(dtype, pd.IntervalDtype): entry = _scalar_from_dtype(dtype.subtype) data = pd.array([entry, entry], dtype=dtype) elif type(dtype) in make_array_nonempty._lookup: data = make_array_nonempty(dtype) else: entry = _scalar_from_dtype(dtype) data = np.array([entry, entry], dtype=dtype) out = pd.Series(data, name=s.name, index=idx) out.attrs = s.attrs return out @meta_lib_from_array.register(Array) def _meta_lib_from_array_da(x): # Use x._meta for dask arrays return meta_lib_from_array(x._meta) @meta_lib_from_array.register(np.ndarray) def _meta_lib_from_array_numpy(x): # numpy -> pandas return pd @union_categoricals_dispatch.register( (pd.DataFrame, pd.Series, pd.Index, pd.Categorical) ) def union_categoricals_pandas(to_union, sort_categories=False, ignore_order=False): return pd.api.types.union_categoricals( to_union, sort_categories=sort_categories, ignore_order=ignore_order ) @get_parallel_type.register(pd.Series) def get_parallel_type_series(_): from dask.dataframe.dask_expr._collection import Series return Series @get_parallel_type.register(pd.DataFrame) def get_parallel_type_dataframe(_): from dask.dataframe.dask_expr._collection import DataFrame return DataFrame @get_parallel_type.register(pd.Index) def get_parallel_type_index(_): from dask.dataframe.dask_expr._collection import Index return Index @get_parallel_type.register(object) def get_parallel_type_object(_): from dask.dataframe.dask_expr._collection import Scalar return Scalar @hash_object_dispatch.register((pd.DataFrame, pd.Series, pd.Index)) def hash_object_pandas( obj, index=True, encoding="utf8", hash_key=None, categorize=True ): return pd.util.hash_pandas_object( obj, index=index, encoding=encoding, hash_key=hash_key, categorize=categorize )
DataFrameBackendEntrypoint
python
django__django
tests/backends/sqlite/test_creation.py
{ "start": 270, "end": 2044 }
class ____(SimpleTestCase): def test_custom_test_name(self): test_connection = copy.copy(connections[DEFAULT_DB_ALIAS]) test_connection.settings_dict = copy.deepcopy( connections[DEFAULT_DB_ALIAS].settings_dict ) test_connection.settings_dict["NAME"] = None test_connection.settings_dict["TEST"]["NAME"] = "custom.sqlite.db" signature = test_connection.creation_class(test_connection).test_db_signature() self.assertEqual(signature, (None, "custom.sqlite.db")) def test_get_test_db_clone_settings_name(self): test_connection = copy.copy(connections[DEFAULT_DB_ALIAS]) test_connection.settings_dict = copy.deepcopy( connections[DEFAULT_DB_ALIAS].settings_dict, ) tests = [ ("test.sqlite3", "test_1.sqlite3"), ("test", "test_1"), ] for test_db_name, expected_clone_name in tests: with self.subTest(test_db_name=test_db_name): test_connection.settings_dict["NAME"] = test_db_name test_connection.settings_dict["TEST"]["NAME"] = test_db_name creation_class = test_connection.creation_class(test_connection) clone_settings_dict = creation_class.get_test_db_clone_settings("1") self.assertEqual(clone_settings_dict["NAME"], expected_clone_name) @mock.patch.object(multiprocessing, "get_start_method", return_value="unsupported") def test_get_test_db_clone_settings_not_supported(self, *mocked_objects): msg = "Cloning with start method 'unsupported' is not supported." with self.assertRaisesMessage(NotSupportedError, msg): connection.creation.get_test_db_clone_settings(1)
TestDbSignatureTests
python
great-expectations__great_expectations
tests/core/test_validation_definition.py
{ "start": 6621, "end": 18773 }
class ____: @pytest.fixture def mock_validator(self, mocker: MockerFixture): """Set up our ProjectManager to return a mock Validator""" with mock.patch.object(ProjectManager, "get_validator") as mock_get_validator: with mock.patch.object(OldValidator, "graph_validate"): gx.get_context(mode="ephemeral") mock_execution_engine = mocker.MagicMock( spec=ExecutionEngine, batch_manager=mocker.MagicMock( active_batch_id=BATCH_ID, active_batch_spec=ACTIVE_BATCH_SPEC, active_batch_definition=ACTIVE_BATCH_DEFINITION, active_batch_markers=BATCH_MARKERS, ), ) mock_validator = OldValidator(execution_engine=mock_execution_engine) mock_get_validator.return_value = mock_validator yield mock_validator @pytest.mark.unit def test_passes_simple_data_to_validator( self, mock_validator: MagicMock, validation_definition: ValidationDefinition, ): validation_definition.suite.add_expectation( gxe.ExpectColumnMaxToBeBetween(column="foo", max_value=1) ) mock_validator.graph_validate.return_value = [ExpectationValidationResult(success=True)] validation_definition.run() mock_validator.graph_validate.assert_called_with( configurations=[ ExpectationConfiguration( type="expect_column_max_to_be_between", kwargs={"column": "foo", "max_value": 1.0}, ) ], runtime_configuration={"result_format": "SUMMARY"}, ) @mock.patch.object(_PandasDataAsset, "build_batch_request", autospec=True) @pytest.mark.unit def test_passes_complex_data_to_validator( self, mock_build_batch_request, mock_validator: MagicMock, validation_definition: ValidationDefinition, ): validation_definition.suite.add_expectation( gxe.ExpectColumnMaxToBeBetween(column="foo", max_value={"$PARAMETER": "max_value"}) ) mock_validator.graph_validate.return_value = [ExpectationValidationResult(success=True)] validation_definition.run( batch_parameters={"year": 2024}, expectation_parameters={"max_value": 9000}, result_format=ResultFormat.COMPLETE, ) mock_validator.graph_validate.assert_called_with( configurations=[ ExpectationConfiguration( type="expect_column_max_to_be_between", kwargs={"column": "foo", "max_value": 9000}, ) ], runtime_configuration={"result_format": "COMPLETE"}, ) @pytest.mark.unit def test_returns_expected_data( self, mock_validator: MagicMock, validation_definition: ValidationDefinition, ): graph_validate_results = [ExpectationValidationResult(success=True)] mock_validator.graph_validate.return_value = graph_validate_results output = validation_definition.run() # Ignore meta for purposes of this test output["meta"] = {} assert output == ExpectationSuiteValidationResult( results=graph_validate_results, success=True, suite_name="empty_suite", statistics={ "evaluated_expectations": 1, "successful_expectations": 1, "unsuccessful_expectations": 0, "success_percent": 100.0, }, meta={}, ) @pytest.mark.parametrize("checkpoint_id", [None, "my_checkpoint_id"]) @pytest.mark.unit def test_adds_requisite_fields( self, mock_validator: MagicMock, validation_definition: ValidationDefinition, checkpoint_id: str | None, ): mock_validator.graph_validate.return_value = [] output = validation_definition.run(checkpoint_id=checkpoint_id) assert output.meta == { "validation_id": validation_definition.id, "checkpoint_id": checkpoint_id, "batch_parameters": None, "batch_spec": ACTIVE_BATCH_SPEC, "batch_markers": BATCH_MARKERS, "active_batch_definition": ACTIVE_BATCH_DEFINITION, "great_expectations_version": GX_VERSION, "run_id": ANY, "validation_time": ANY, } assert isinstance(output.meta["run_id"], RunIdentifier) assert isinstance(output.meta["validation_time"], datetime.datetime) @pytest.mark.unit def test_adds_correct_batch_parameter_field_for_dataframes( self, mock_validator: MagicMock, dataframe_validation_definition: ValidationDefinition, ) -> None: mock_validator.graph_validate.return_value = [] output = dataframe_validation_definition.run( checkpoint_id=None, batch_parameters={"dataframe": pd.DataFrame({"a": ["1", "2", "3", "4", "5"]})}, ) assert output.meta == { "validation_id": dataframe_validation_definition.id, "checkpoint_id": None, "batch_parameters": {"dataframe": "<DATAFRAME>"}, "batch_spec": ACTIVE_BATCH_SPEC, "batch_markers": BATCH_MARKERS, "active_batch_definition": ACTIVE_BATCH_DEFINITION, "great_expectations_version": GX_VERSION, "run_id": ANY, "validation_time": ANY, } assert isinstance(output.meta["run_id"], RunIdentifier) assert isinstance(output.meta["validation_time"], datetime.datetime) @pytest.mark.parametrize( "batch_parameters", [ pytest.param(None), pytest.param({"year": 2024}), pytest.param({"year": 2024, "month": 10}), ], ) @pytest.mark.postgresql def test_adds_correct_batch_parameter_fields_for_postgres( self, mock_validator: MagicMock, postgres_validation_definition: ValidationDefinition, batch_parameters: dict | None, ) -> None: mock_validator.graph_validate.return_value = [] output = postgres_validation_definition.run( checkpoint_id=None, batch_parameters=batch_parameters, ) assert output.meta == { "validation_id": postgres_validation_definition.id, "checkpoint_id": None, "batch_parameters": batch_parameters, "batch_spec": ACTIVE_BATCH_SPEC, "batch_markers": BATCH_MARKERS, "active_batch_definition": ACTIVE_BATCH_DEFINITION, "great_expectations_version": GX_VERSION, "run_id": ANY, "validation_time": ANY, } assert isinstance(output.meta["run_id"], RunIdentifier) assert isinstance(output.meta["validation_time"], datetime.datetime) @mock.patch.object(ValidationResultsStore, "set") @pytest.mark.unit def test_persists_validation_results_for_non_cloud( self, mock_validation_results_store_set: MagicMock, mock_validator: MagicMock, validation_definition: ValidationDefinition, ): validation_definition.suite.add_expectation( gxe.ExpectColumnMaxToBeBetween(column="foo", max_value=1) ) mock_validator.graph_validate.return_value = [ExpectationValidationResult(success=True)] validation_definition.run() mock_validator.graph_validate.assert_called_with( configurations=[ ExpectationConfiguration( type="expect_column_max_to_be_between", kwargs={"column": "foo", "max_value": 1.0}, ) ], runtime_configuration={"result_format": "SUMMARY"}, ) # validate we are calling set on the store with data that's roughly the right shape [(_, kwargs)] = mock_validation_results_store_set.call_args_list key = kwargs["key"] value = kwargs["value"] assert isinstance(key, ValidationResultIdentifier) assert key.batch_identifier == BATCH_ID assert value.success is True @mock.patch.object(ValidationResultsStore, "set") @pytest.mark.unit def test_persists_validation_results_for_cloud( self, mock_validation_results_store_set: MagicMock, mock_validator: MagicMock, cloud_validation_definition: ValidationDefinition, ): expectation = gxe.ExpectColumnMaxToBeBetween(column="foo", max_value=1) cloud_validation_definition.suite.add_expectation(expectation=expectation) cloud_validation_definition.suite.save() mock_validator.graph_validate.return_value = [ ExpectationValidationResult(success=True, expectation_config=expectation.configuration) ] cloud_validation_definition.run() # validate we are calling set on the store with data that's roughly the right shape [(_, kwargs)] = mock_validation_results_store_set.call_args_list key = kwargs["key"] value = kwargs["value"] assert isinstance(key, GXCloudIdentifier) assert value.success is True @mock.patch.object( ValidationResultsStore, "set", return_value=GXCloudResourceRef( resource_type="validation_result", id="59b72ca5-4636-44be-a367-46b54ae51fe1", url="https://api.greatexpectations.io/api/v1/organizations/11111111-ba69-4295-8fe1-61eef96f12b4/validation-results", response_json={"data": {"result_url": "my_result_url"}}, ), ) @pytest.mark.unit def test_cloud_validation_def_adds_id_and_url_to_result( self, mock_validation_results_store_set: MagicMock, mock_validator: MagicMock, cloud_validation_definition: ValidationDefinition, ): expectation = gxe.ExpectColumnMaxToBeBetween(column="foo", max_value=1) cloud_validation_definition.suite.add_expectation(expectation=expectation) cloud_validation_definition.suite.save() mock_validator.graph_validate.return_value = [ ExpectationValidationResult(success=True, expectation_config=expectation.configuration) ] result = cloud_validation_definition.run() assert result.id == "59b72ca5-4636-44be-a367-46b54ae51fe1" assert result.result_url == "my_result_url" @mock.patch.object(ValidationResultsStore, "set") @pytest.mark.unit def test_cloud_validation_def_creates_rendered_content( self, mock_validation_results_store_set: MagicMock, mock_validator: MagicMock, cloud_validation_definition: ValidationDefinition, ): expectation = gxe.ExpectColumnMaxToBeBetween(column="foo", max_value=1) cloud_validation_definition.suite.add_expectation(expectation=expectation) cloud_validation_definition.suite.save() mock_validator.graph_validate.return_value = [ ExpectationValidationResult(success=True, expectation_config=expectation.configuration) ] result = cloud_validation_definition.run() assert len(result.results) == 1 assert result.results[0].expectation_config is not None assert result.results[0].expectation_config.rendered_content is not None assert result.results[0].rendered_content is not None @pytest.mark.unit def test_dependencies_not_added_raises_error(self, validation_definition: ValidationDefinition): validation_definition.suite.id = None validation_definition.data.id = None with pytest.raises(ValidationDefinitionRelatedResourcesFreshnessError) as e: validation_definition.run() assert [type(err) for err in e.value.errors] == [ BatchDefinitionNotAddedError, ExpectationSuiteNotAddedError, ]
TestValidationRun
python
plotly__plotly.py
tests/test_optional/test_figure_factory/test_figure_factory.py
{ "start": 28478, "end": 40751 }
class ____(NumpyTestUtilsMixin, TestCaseNoTemplate): def test_default_dendrogram(self): X = np.array([[1, 2, 3, 4], [1, 1, 3, 4], [1, 2, 1, 4], [1, 2, 3, 1]]) dendro = ff.create_dendrogram(X=X) expected_dendro = go.Figure( data=[ go.Scatter( x=np.array([25.0, 25.0, 35.0, 35.0]), y=np.array([0.0, 1.0, 1.0, 0.0]), marker=go.scatter.Marker(color="rgb(61,153,112)"), mode="lines", xaxis="x", yaxis="y", hoverinfo="text", text=None, ), go.Scatter( x=np.array([15.0, 15.0, 30.0, 30.0]), y=np.array([0.0, 2.23606798, 2.23606798, 1.0]), marker=go.scatter.Marker(color="rgb(61,153,112)"), mode="lines", xaxis="x", yaxis="y", hoverinfo="text", text=None, ), go.Scatter( x=np.array([5.0, 5.0, 22.5, 22.5]), y=np.array([0.0, 3.60555128, 3.60555128, 2.23606798]), marker=go.scatter.Marker(color="rgb(0,116,217)"), mode="lines", xaxis="x", yaxis="y", hoverinfo="text", text=None, ), ], layout=go.Layout( autosize=False, height=np_inf(), hovermode="closest", showlegend=False, width=np_inf(), xaxis=go.layout.XAxis( mirror="allticks", rangemode="tozero", showgrid=False, showline=True, showticklabels=True, tickmode="array", ticks="outside", ticktext=np.array(["3", "2", "0", "1"]), tickvals=[5.0, 15.0, 25.0, 35.0], type="linear", zeroline=False, ), yaxis=go.layout.YAxis( mirror="allticks", rangemode="tozero", showgrid=False, showline=True, showticklabels=True, ticks="outside", type="linear", zeroline=False, ), ), ) self.assertEqual(len(dendro["data"]), 3) # this is actually a bit clearer when debugging tests. self.assert_fig_equal(dendro["data"][0], expected_dendro["data"][0]) self.assert_fig_equal(dendro["data"][1], expected_dendro["data"][1]) self.assert_fig_equal(dendro["data"][2], expected_dendro["data"][2]) self.assert_fig_equal(dendro["layout"], expected_dendro["layout"]) def test_dendrogram_random_matrix(self): # create a random uncorrelated matrix X = np.random.rand(5, 5) # variable 2 is correlated with all the other variables X[2, :] = sum(X, 0) names = ["Jack", "Oxana", "John", "Chelsea", "Mark"] dendro = ff.create_dendrogram(X, labels=names) expected_dendro = go.Figure( data=[ go.Scatter( marker=go.scatter.Marker(color="rgb(61,153,112)"), mode="lines", xaxis="x", yaxis="y", hoverinfo="text", text=None, ), go.Scatter( marker=go.scatter.Marker(color="rgb(61,153,112)"), mode="lines", xaxis="x", yaxis="y", hoverinfo="text", text=None, ), go.Scatter( marker=go.scatter.Marker(color="rgb(61,153,112)"), mode="lines", xaxis="x", yaxis="y", hoverinfo="text", text=None, ), go.Scatter( marker=go.scatter.Marker(color="rgb(0,116,217)"), mode="lines", xaxis="x", yaxis="y", hoverinfo="text", text=None, ), ], layout=go.Layout( autosize=False, height=np_inf(), hovermode="closest", showlegend=False, width=np_inf(), xaxis=go.layout.XAxis( mirror="allticks", rangemode="tozero", showgrid=False, showline=True, showticklabels=True, tickmode="array", ticks="outside", tickvals=[5.0, 15.0, 25.0, 35.0, 45.0], type="linear", zeroline=False, ), yaxis=go.layout.YAxis( mirror="allticks", rangemode="tozero", showgrid=False, showline=True, showticklabels=True, ticks="outside", type="linear", zeroline=False, ), ), ) self.assertEqual(len(dendro["data"]), 4) # it's random, so we can only check that the values aren't equal y_vals = [ dendro["data"][0].to_plotly_json().pop("y"), dendro["data"][1].to_plotly_json().pop("y"), dendro["data"][2].to_plotly_json().pop("y"), dendro["data"][3].to_plotly_json().pop("y"), ] for i in range(len(y_vals)): for j in range(len(y_vals)): if i != j: self.assertFalse(np.allclose(y_vals[i], y_vals[j])) x_vals = [ dendro["data"][0].to_plotly_json().pop("x"), dendro["data"][1].to_plotly_json().pop("x"), dendro["data"][2].to_plotly_json().pop("x"), dendro["data"][3].to_plotly_json().pop("x"), ] for i in range(len(x_vals)): for j in range(len(x_vals)): if i != j: self.assertFalse(np.allclose(x_vals[i], x_vals[j])) # we also need to check the ticktext manually xaxis_ticktext = dendro["layout"].to_plotly_json()["xaxis"].pop("ticktext") self.assertEqual(xaxis_ticktext[0], "John") # this is actually a bit clearer when debugging tests. self.assert_fig_equal( dendro["data"][0], expected_dendro["data"][0], ignore=["uid", "x", "y"] ) self.assert_fig_equal( dendro["data"][1], expected_dendro["data"][1], ignore=["uid", "x", "y"] ) self.assert_fig_equal( dendro["data"][2], expected_dendro["data"][2], ignore=["uid", "x", "y"] ) self.assert_fig_equal( dendro["data"][3], expected_dendro["data"][3], ignore=["uid", "x", "y"] ) # layout except xaxis self.assert_fig_equal( dendro["layout"], expected_dendro["layout"], ignore=["xaxis"] ) # xaxis self.assert_fig_equal( dendro["layout"]["xaxis"], expected_dendro["layout"]["xaxis"], ignore=["ticktext"], ) def test_dendrogram_orientation(self): X = np.random.rand(5, 5) dendro_left = ff.create_dendrogram(X, orientation="left") self.assertEqual(len(dendro_left["layout"]["yaxis"]["ticktext"]), 5) tickvals_left = np.array(dendro_left["layout"]["yaxis"]["tickvals"]) self.assertTrue((tickvals_left <= 0).all()) dendro_right = ff.create_dendrogram(X, orientation="right") tickvals_right = np.array(dendro_right["layout"]["yaxis"]["tickvals"]) self.assertTrue((tickvals_right >= 0).all()) dendro_bottom = ff.create_dendrogram(X, orientation="bottom") self.assertEqual(len(dendro_bottom["layout"]["xaxis"]["ticktext"]), 5) tickvals_bottom = np.array(dendro_bottom["layout"]["xaxis"]["tickvals"]) self.assertTrue((tickvals_bottom >= 0).all()) dendro_top = ff.create_dendrogram(X, orientation="top") tickvals_top = np.array(dendro_top["layout"]["xaxis"]["tickvals"]) self.assertTrue((tickvals_top <= 0).all()) def test_dendrogram_colorscale(self): X = np.array([[1, 2, 3, 4], [1, 1, 3, 4], [1, 2, 1, 4], [1, 2, 3, 1]]) greyscale = [ "rgb(0,0,0)", # black "rgb(05,105,105)", # dim grey "rgb(128,128,128)", # grey "rgb(169,169,169)", # dark grey "rgb(192,192,192)", # silver "rgb(211,211,211)", # light grey "rgb(220,220,220)", # gainsboro "rgb(245,245,245)", # white smoke ] dendro = ff.create_dendrogram(X, colorscale=greyscale) expected_dendro = go.Figure( data=[ go.Scatter( x=np.array([25.0, 25.0, 35.0, 35.0]), y=np.array([0.0, 1.0, 1.0, 0.0]), marker=go.scatter.Marker(color="rgb(128,128,128)"), mode="lines", xaxis="x", yaxis="y", hoverinfo="text", text=None, ), go.Scatter( x=np.array([15.0, 15.0, 30.0, 30.0]), y=np.array([0.0, 2.23606798, 2.23606798, 1.0]), marker=go.scatter.Marker(color="rgb(128,128,128)"), mode="lines", xaxis="x", yaxis="y", hoverinfo="text", text=None, ), go.Scatter( x=np.array([5.0, 5.0, 22.5, 22.5]), y=np.array([0.0, 3.60555128, 3.60555128, 2.23606798]), marker=go.scatter.Marker(color="rgb(0,0,0)"), mode="lines", xaxis="x", yaxis="y", hoverinfo="text", text=None, ), ], layout=go.Layout( autosize=False, height=np_inf(), hovermode="closest", showlegend=False, width=np_inf(), xaxis=go.layout.XAxis( mirror="allticks", rangemode="tozero", showgrid=False, showline=True, showticklabels=True, tickmode="array", ticks="outside", ticktext=np.array(["3", "2", "0", "1"]), tickvals=[5.0, 15.0, 25.0, 35.0], type="linear", zeroline=False, ), yaxis=go.layout.YAxis( mirror="allticks", rangemode="tozero", showgrid=False, showline=True, showticklabels=True, ticks="outside", type="linear", zeroline=False, ), ), ) self.assertEqual(len(dendro["data"]), 3) # this is actually a bit clearer when debugging tests. self.assert_fig_equal(dendro["data"][0], expected_dendro["data"][0]) self.assert_fig_equal(dendro["data"][1], expected_dendro["data"][1]) self.assert_fig_equal(dendro["data"][2], expected_dendro["data"][2]) def test_dendrogram_ticklabels(self): X = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 3, 5, 6], [1, 4, 2, 3]]) dendro = ff.create_dendrogram(X=X) self.assertEqual(len(dendro.layout.xaxis.ticktext), 4) self.assertEqual(len(dendro.layout.xaxis.tickvals), 4)
TestDendrogram
python
pypa__warehouse
warehouse/admin/views/organizations.py
{ "start": 1323, "end": 1627 }
class ____(wtforms.Form): role_name = wtforms.SelectField( choices=[(role.value, role.value) for role in OrganizationRoleType], coerce=OrganizationRoleType, validators=[ wtforms.validators.InputRequired(message="Select a role"), ], )
OrganizationRoleForm
python
sqlalchemy__sqlalchemy
test/orm/test_composites.py
{ "start": 29549, "end": 32401 }
class ____(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table( "graphs", metadata, Column("id", Integer, primary_key=True), Column("version_id", Integer, primary_key=True, nullable=True), Column("name", String(30)), ) @classmethod def setup_mappers(cls): graphs = cls.tables.graphs class Version(cls.Comparable): def __init__(self, id_, version): self.id = id_ self.version = version def __composite_values__(self): return (self.id, self.version) __hash__ = None def __eq__(self, other): return ( isinstance(other, Version) and other.id == self.id and other.version == self.version ) def __ne__(self, other): return not self.__eq__(other) class Graph(cls.Comparable): def __init__(self, version): self.version = version cls.mapper_registry.map_imperatively( Graph, graphs, properties={ "version": sa.orm.composite( Version, graphs.c.id, graphs.c.version_id ) }, ) def _fixture(self): Graph, Version = self.classes.Graph, self.classes.Version sess = fixture_session() g = Graph(Version(1, 1)) sess.add(g) sess.commit() return sess def test_get_by_col(self): Graph = self.classes.Graph sess = self._fixture() g = sess.query(Graph).first() g2 = sess.get(Graph, [g.id, g.version_id]) eq_(g.version, g2.version) def test_get_by_composite(self): Graph, Version = self.classes.Graph, self.classes.Version sess = self._fixture() g = sess.query(Graph).first() g2 = sess.get(Graph, Version(g.id, g.version_id)) eq_(g.version, g2.version) def test_pk_mutation(self): Graph, Version = self.classes.Graph, self.classes.Version sess = self._fixture() g = sess.query(Graph).first() g.version = Version(2, 1) sess.commit() g2 = sess.get(Graph, Version(2, 1)) eq_(g.version, g2.version) @testing.fails_on_everything_except("sqlite") def test_null_pk(self): Graph, Version = self.classes.Graph, self.classes.Version sess = fixture_session() # test pk with one column NULL # only sqlite can really handle this g = Graph(Version(2, None)) sess.add(g) sess.commit() g2 = sess.query(Graph).filter_by(version=Version(2, None)).one() eq_(g.version, g2.version)
PrimaryKeyTest
python
PrefectHQ__prefect
tests/test_states.py
{ "start": 1193, "end": 6484 }
class ____: def test_works_in_sync_context(self, state_cls): with pytest.raises(ValueError, match="Test"): @flow def test_flow(): raise_state_exception(state_cls(data=ValueError("Test"))) test_flow() async def test_raises_state_exception(self, state_cls): with pytest.raises(ValueError, match="Test"): await raise_state_exception(state_cls(data=ValueError("Test"))) async def test_returns_without_error_for_completed_states(self, state_cls): assert await raise_state_exception(Completed()) is None async def test_raises_nested_state_exception(self, state_cls): with pytest.raises(ValueError, match="Test"): await raise_state_exception(state_cls(data=Failed(data=ValueError("Test")))) async def test_raises_value_error_if_nested_state_is_not_failed(self, state_cls): with pytest.raises( ValueError, match="Expected failed or crashed state got Completed" ): await raise_state_exception(state_cls(data=Completed(data="test"))) async def test_raises_first_nested_multistate_exception(self, state_cls): # TODO: We may actually want to raise a "multi-error" here where we have several # exceptions displayed at once inner_states = [ Completed(data="test"), Failed(data=ValueError("Test")), Failed(data=ValueError("Should not be raised")), ] with pytest.raises(ValueError, match="Test"): await raise_state_exception(state_cls(data=inner_states)) async def test_value_error_if_all_multistates_are_not_failed(self, state_cls): inner_states = [ Completed(), Completed(), Completed(data=ValueError("Should not be raised")), ] with pytest.raises( ValueError, match="Failed state result was an iterable of states but none were failed", ): await raise_state_exception(state_cls(data=inner_states)) async def test_raises_wrapper_with_message_if_result_is_string(self, state_cls): state_to_exception = { Failed: FailedRun, Crashed: CrashedRun, Cancelled: CancelledRun, } with pytest.raises(state_to_exception[state_cls]): await raise_state_exception(state_cls(data="foo")) async def test_raises_base_exception(self, state_cls): with pytest.raises(BaseException): await raise_state_exception(state_cls(data=BaseException("foo"))) async def test_raises_wrapper_with_state_message_if_result_is_null(self, state_cls): state_to_exception = { Failed: FailedRun, Crashed: CrashedRun, Cancelled: CancelledRun, } with pytest.raises(state_to_exception[state_cls]): await raise_state_exception(state_cls(data=None, message="foo")) async def test_raises_error_if_failed_state_does_not_contain_exception( self, state_cls ): with pytest.raises(TypeError, match="int cannot be resolved into an exception"): await raise_state_exception(state_cls(data=2)) async def test_quoted_state_does_not_raise_state_exception(self, state_cls): @flow def test_flow(): return quote(state_cls()) actual = test_flow() assert isinstance(actual, quote) assert isinstance(actual.unquote(), State) async def test_aget_state_exception_from_result_record_metadata(self, state_cls): store = ResultStore() exception = ValueError("persisted error") record = store.create_result_record(exception) await store.apersist_result_record(record) state = state_cls(data=record.metadata) result = await aget_state_exception(state) assert isinstance(result, ValueError) assert str(result) == "persisted error" def test_get_state_exception_from_result_record_metadata(self, state_cls): store = ResultStore() exception = ValueError("persisted error") record = store.create_result_record(exception) store.persist_result_record(record) state = state_cls(data=record.metadata) result = get_state_exception(state) assert isinstance(result, ValueError) assert str(result) == "persisted error" async def test_araise_state_exception_from_result_record_metadata(self, state_cls): store = ResultStore() exception = ValueError("persisted error") record = store.create_result_record(exception) await store.apersist_result_record(record) state = state_cls(data=record.metadata) with pytest.raises(ValueError, match="persisted error"): await araise_state_exception(state) def test_raise_state_exception_from_result_record_metadata(self, state_cls): store = ResultStore() exception = ValueError("persisted error") record = store.create_result_record(exception) store.persist_result_record(record) state = state_cls(data=record.metadata) with pytest.raises(ValueError, match="persisted error"): raise_state_exception(state)
TestRaiseStateException
python
jmcnamara__XlsxWriter
xlsxwriter/core.py
{ "start": 357, "end": 5434 }
class ____(xmlwriter.XMLwriter): """ A class for writing the Excel XLSX Core file. """ ########################################################################### # # Public API. # ########################################################################### def __init__(self) -> None: """ Constructor. """ super().__init__() self.properties = {} self.iso_date = "" ########################################################################### # # Private API. # ########################################################################### def _assemble_xml_file(self) -> None: # Assemble and write the XML file. # Set the creation date for the file. date = self.properties.get("created") if not isinstance(date, datetime): date = datetime.now(timezone.utc) self.iso_date = date.strftime("%Y-%m-%dT%H:%M:%SZ") # Write the XML declaration. self._xml_declaration() self._write_cp_core_properties() self._write_dc_title() self._write_dc_subject() self._write_dc_creator() self._write_cp_keywords() self._write_dc_description() self._write_cp_last_modified_by() self._write_dcterms_created() self._write_dcterms_modified() self._write_cp_category() self._write_cp_content_status() self._xml_end_tag("cp:coreProperties") # Close the file. self._xml_close() def _set_properties(self, properties: Dict[str, Union[str, datetime]]) -> None: # Set the document properties. self.properties = properties ########################################################################### # # XML methods. # ########################################################################### def _write_cp_core_properties(self) -> None: # Write the <cp:coreProperties> element. xmlns_cp = ( "http://schemas.openxmlformats.org/package/2006/" + "metadata/core-properties" ) xmlns_dc = "http://purl.org/dc/elements/1.1/" xmlns_dcterms = "http://purl.org/dc/terms/" xmlns_dcmitype = "http://purl.org/dc/dcmitype/" xmlns_xsi = "http://www.w3.org/2001/XMLSchema-instance" attributes = [ ("xmlns:cp", xmlns_cp), ("xmlns:dc", xmlns_dc), ("xmlns:dcterms", xmlns_dcterms), ("xmlns:dcmitype", xmlns_dcmitype), ("xmlns:xsi", xmlns_xsi), ] self._xml_start_tag("cp:coreProperties", attributes) def _write_dc_creator(self) -> None: # Write the <dc:creator> element. data = self.properties.get("author", "") self._xml_data_element("dc:creator", data) def _write_cp_last_modified_by(self) -> None: # Write the <cp:lastModifiedBy> element. data = self.properties.get("author", "") self._xml_data_element("cp:lastModifiedBy", data) def _write_dcterms_created(self) -> None: # Write the <dcterms:created> element. attributes = [("xsi:type", "dcterms:W3CDTF")] self._xml_data_element("dcterms:created", self.iso_date, attributes) def _write_dcterms_modified(self) -> None: # Write the <dcterms:modified> element. attributes = [("xsi:type", "dcterms:W3CDTF")] self._xml_data_element("dcterms:modified", self.iso_date, attributes) def _write_dc_title(self) -> None: # Write the <dc:title> element. if "title" in self.properties: data = self.properties["title"] else: return self._xml_data_element("dc:title", data) def _write_dc_subject(self) -> None: # Write the <dc:subject> element. if "subject" in self.properties: data = self.properties["subject"] else: return self._xml_data_element("dc:subject", data) def _write_cp_keywords(self) -> None: # Write the <cp:keywords> element. if "keywords" in self.properties: data = self.properties["keywords"] else: return self._xml_data_element("cp:keywords", data) def _write_dc_description(self) -> None: # Write the <dc:description> element. if "comments" in self.properties: data = self.properties["comments"] else: return self._xml_data_element("dc:description", data) def _write_cp_category(self) -> None: # Write the <cp:category> element. if "category" in self.properties: data = self.properties["category"] else: return self._xml_data_element("cp:category", data) def _write_cp_content_status(self) -> None: # Write the <cp:contentStatus> element. if "status" in self.properties: data = self.properties["status"] else: return self._xml_data_element("cp:contentStatus", data)
Core
python
apache__airflow
airflow-core/src/airflow/api_fastapi/auth/tokens.py
{ "start": 1738, "end": 3210 }
class ____(ValueError): """Raised when a claim in the JWT is invalid.""" def __init__(self, claim: str): super().__init__(f"Invalid claim: {claim}") def key_to_jwk_dict(key: AllowedKeys, kid: str | None = None): """Convert a public or private key into a valid JWKS dict.""" from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey, Ed25519PublicKey from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey, RSAPublicKey from jwt.algorithms import OKPAlgorithm, RSAAlgorithm if isinstance(key, (RSAPrivateKey, Ed25519PrivateKey)): key = key.public_key() if isinstance(key, RSAPublicKey): jwk_dict = RSAAlgorithm(RSAAlgorithm.SHA256).to_jwk(key, as_dict=True) elif isinstance(key, Ed25519PublicKey): jwk_dict = OKPAlgorithm().to_jwk(key, as_dict=True) else: raise ValueError(f"Unknown key object {type(key)}") if not kid: kid = thumbprint(jwk_dict) jwk_dict["kid"] = kid return jwk_dict def _guess_best_algorithm(key: AllowedPrivateKeys): from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey if isinstance(key, RSAPrivateKey): return "RS512" if isinstance(key, Ed25519PrivateKey): return "EdDSA" raise ValueError(f"Unknown key object {type(key)}") @attrs.define(repr=False)
InvalidClaimError
python
huggingface__transformers
src/transformers/models/glm4/modeling_glm4.py
{ "start": 23056, "end": 23305 }
class ____(GenericForTokenClassification, Glm4PreTrainedModel): pass __all__ = [ "Glm4PreTrainedModel", "Glm4Model", "Glm4ForCausalLM", "Glm4ForSequenceClassification", "Glm4ForTokenClassification", ]
Glm4ForTokenClassification
python
pytorch__pytorch
torch/distributed/elastic/rendezvous/dynamic_rendezvous.py
{ "start": 5887, "end": 6779 }
class ____: """Hold the settings of the rendezvous. Attributes: run_id: The run id of the rendezvous. min_nodes: The minimum number of nodes to admit to the rendezvous. max_nodes: The maximum number of nodes to admit to the rendezvous. timeout: The timeout configuration of the rendezvous. keep_alive_interval: The amount of time a node waits before sending a heartbeat to keep it alive in the rendezvous. keep_alive_max_attempt: The maximum number of failed heartbeat attempts after which a node is considered dead. """ run_id: str min_nodes: int max_nodes: int timeout: RendezvousTimeout keep_alive_interval: timedelta keep_alive_max_attempt: int @dataclass(eq=True, order=True, frozen=True)
RendezvousSettings
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 931623, "end": 932000 }
class ____( sgqlc.types.Type, Node, AuditEntry, RepositoryAuditEntryData, OrganizationAuditEntryData, ): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("visibility",) visibility = sgqlc.types.Field( RepoArchivedAuditEntryVisibility, graphql_name="visibility" )
RepoArchivedAuditEntry
python
langchain-ai__langchain
libs/core/langchain_core/runnables/retry.py
{ "start": 1154, "end": 13682 }
class ____(RunnableBindingBase[Input, Output]): # type: ignore[no-redef] """Retry a Runnable if it fails. RunnableRetry can be used to add retry logic to any object that subclasses the base Runnable. Such retries are especially useful for network calls that may fail due to transient errors. The RunnableRetry is implemented as a RunnableBinding. The easiest way to use it is through the `.with_retry()` method on all Runnables. Example: Here's an example that uses a RunnableLambda to raise an exception ```python import time def foo(input) -> None: '''Fake function that raises an exception.''' raise ValueError(f"Invoking foo failed. At time {time.time()}") runnable = RunnableLambda(foo) runnable_with_retries = runnable.with_retry( retry_if_exception_type=(ValueError,), # Retry only on ValueError wait_exponential_jitter=True, # Add jitter to the exponential backoff stop_after_attempt=2, # Try twice exponential_jitter_params={"initial": 2}, # if desired, customize backoff ) # The method invocation above is equivalent to the longer form below: runnable_with_retries = RunnableRetry( bound=runnable, retry_exception_types=(ValueError,), max_attempt_number=2, wait_exponential_jitter=True, exponential_jitter_params={"initial": 2}, ) ``` This logic can be used to retry any Runnable, including a chain of Runnables, but in general it's best practice to keep the scope of the retry as small as possible. For example, if you have a chain of Runnables, you should only retry the Runnable that is likely to fail, not the entire chain. Example: ```python from langchain_core.chat_models import ChatOpenAI from langchain_core.prompts import PromptTemplate template = PromptTemplate.from_template("tell me a joke about {topic}.") model = ChatOpenAI(temperature=0.5) # Good chain = template | model.with_retry() # Bad chain = template | model retryable_chain = chain.with_retry() ``` """ retry_exception_types: tuple[type[BaseException], ...] = (Exception,) """The exception types to retry on. By default all exceptions are retried. In general you should only retry on exceptions that are likely to be transient, such as network errors. Good exceptions to retry are all server errors (5xx) and selected client errors (4xx) such as 429 Too Many Requests. """ wait_exponential_jitter: bool = True """Whether to add jitter to the exponential backoff.""" exponential_jitter_params: ExponentialJitterParams | None = None """Parameters for `tenacity.wait_exponential_jitter`. Namely: `initial`, `max`, `exp_base`, and `jitter` (all `float` values). """ max_attempt_number: int = 3 """The maximum number of attempts to retry the Runnable.""" @property def _kwargs_retrying(self) -> dict[str, Any]: kwargs: dict[str, Any] = {} if self.max_attempt_number: kwargs["stop"] = stop_after_attempt(self.max_attempt_number) if self.wait_exponential_jitter: kwargs["wait"] = wait_exponential_jitter( **(self.exponential_jitter_params or {}) ) if self.retry_exception_types: kwargs["retry"] = retry_if_exception_type(self.retry_exception_types) return kwargs def _sync_retrying(self, **kwargs: Any) -> Retrying: return Retrying(**self._kwargs_retrying, **kwargs) def _async_retrying(self, **kwargs: Any) -> AsyncRetrying: return AsyncRetrying(**self._kwargs_retrying, **kwargs) @staticmethod def _patch_config( config: RunnableConfig, run_manager: "T", retry_state: RetryCallState, ) -> RunnableConfig: attempt = retry_state.attempt_number tag = f"retry:attempt:{attempt}" if attempt > 1 else None return patch_config(config, callbacks=run_manager.get_child(tag)) def _patch_config_list( self, config: list[RunnableConfig], run_manager: list["T"], retry_state: RetryCallState, ) -> list[RunnableConfig]: return [ self._patch_config(c, rm, retry_state) for c, rm in zip(config, run_manager, strict=False) ] def _invoke( self, input_: Input, run_manager: "CallbackManagerForChainRun", config: RunnableConfig, **kwargs: Any, ) -> Output: for attempt in self._sync_retrying(reraise=True): with attempt: result = super().invoke( input_, self._patch_config(config, run_manager, attempt.retry_state), **kwargs, ) if attempt.retry_state.outcome and not attempt.retry_state.outcome.failed: attempt.retry_state.set_result(result) return result @override def invoke( self, input: Input, config: RunnableConfig | None = None, **kwargs: Any ) -> Output: return self._call_with_config(self._invoke, input, config, **kwargs) async def _ainvoke( self, input_: Input, run_manager: "AsyncCallbackManagerForChainRun", config: RunnableConfig, **kwargs: Any, ) -> Output: async for attempt in self._async_retrying(reraise=True): with attempt: result = await super().ainvoke( input_, self._patch_config(config, run_manager, attempt.retry_state), **kwargs, ) if attempt.retry_state.outcome and not attempt.retry_state.outcome.failed: attempt.retry_state.set_result(result) return result @override async def ainvoke( self, input: Input, config: RunnableConfig | None = None, **kwargs: Any ) -> Output: return await self._acall_with_config(self._ainvoke, input, config, **kwargs) def _batch( self, inputs: list[Input], run_manager: list["CallbackManagerForChainRun"], config: list[RunnableConfig], **kwargs: Any, ) -> list[Output | Exception]: results_map: dict[int, Output] = {} not_set: list[Output] = [] result = not_set try: for attempt in self._sync_retrying(): with attempt: # Retry for inputs that have not yet succeeded # Determine which original indices remain. remaining_indices = [ i for i in range(len(inputs)) if i not in results_map ] if not remaining_indices: break pending_inputs = [inputs[i] for i in remaining_indices] pending_configs = [config[i] for i in remaining_indices] pending_run_managers = [run_manager[i] for i in remaining_indices] # Invoke underlying batch only on remaining elements. result = super().batch( pending_inputs, self._patch_config_list( pending_configs, pending_run_managers, attempt.retry_state ), return_exceptions=True, **kwargs, ) # Register the results of the inputs that have succeeded, mapping # back to their original indices. first_exception = None for offset, r in enumerate(result): if isinstance(r, Exception): if not first_exception: first_exception = r continue orig_idx = remaining_indices[offset] results_map[orig_idx] = r # If any exception occurred, raise it, to retry the failed ones if first_exception: raise first_exception if ( attempt.retry_state.outcome and not attempt.retry_state.outcome.failed ): attempt.retry_state.set_result(result) except RetryError as e: if result is not_set: result = cast("list[Output]", [e] * len(inputs)) outputs: list[Output | Exception] = [] for idx in range(len(inputs)): if idx in results_map: outputs.append(results_map[idx]) else: outputs.append(result.pop(0)) return outputs @override def batch( self, inputs: list[Input], config: RunnableConfig | list[RunnableConfig] | None = None, *, return_exceptions: bool = False, **kwargs: Any, ) -> list[Output]: return self._batch_with_config( self._batch, inputs, config, return_exceptions=return_exceptions, **kwargs ) async def _abatch( self, inputs: list[Input], run_manager: list["AsyncCallbackManagerForChainRun"], config: list[RunnableConfig], **kwargs: Any, ) -> list[Output | Exception]: results_map: dict[int, Output] = {} not_set: list[Output] = [] result = not_set try: async for attempt in self._async_retrying(): with attempt: # Retry for inputs that have not yet succeeded # Determine which original indices remain. remaining_indices = [ i for i in range(len(inputs)) if i not in results_map ] if not remaining_indices: break pending_inputs = [inputs[i] for i in remaining_indices] pending_configs = [config[i] for i in remaining_indices] pending_run_managers = [run_manager[i] for i in remaining_indices] result = await super().abatch( pending_inputs, self._patch_config_list( pending_configs, pending_run_managers, attempt.retry_state ), return_exceptions=True, **kwargs, ) # Register the results of the inputs that have succeeded, mapping # back to their original indices. first_exception = None for offset, r in enumerate(result): if isinstance(r, Exception): if not first_exception: first_exception = r continue orig_idx = remaining_indices[offset] results_map[orig_idx] = r # If any exception occurred, raise it, to retry the failed ones if first_exception: raise first_exception if ( attempt.retry_state.outcome and not attempt.retry_state.outcome.failed ): attempt.retry_state.set_result(result) except RetryError as e: if result is not_set: result = cast("list[Output]", [e] * len(inputs)) outputs: list[Output | Exception] = [] for idx in range(len(inputs)): if idx in results_map: outputs.append(results_map[idx]) else: outputs.append(result.pop(0)) return outputs @override async def abatch( self, inputs: list[Input], config: RunnableConfig | list[RunnableConfig] | None = None, *, return_exceptions: bool = False, **kwargs: Any, ) -> list[Output]: return await self._abatch_with_config( self._abatch, inputs, config, return_exceptions=return_exceptions, **kwargs ) # stream() and transform() are not retried because retrying a stream # is not very intuitive.
RunnableRetry
python
numba__numba
numba/np/ufunc/dufunc.py
{ "start": 5768, "end": 40089 }
class ____(serialize.ReduceMixin, _internal._DUFunc, UfuncBase): """ Dynamic universal function (DUFunc) intended to act like a normal Numpy ufunc, but capable of call-time (just-in-time) compilation of fast loops specialized to inputs. """ # NOTE: __base_kwargs must be kept in synch with the kwlist in # _internal.c:dufunc_init() __base_kwargs = set(('identity', '_keepalive', 'nin', 'nout')) def __init__(self, py_func, identity=None, cache=False, targetoptions=None): if targetoptions is None: targetoptions = {} if is_jitted(py_func): py_func = py_func.py_func with ufuncbuilder._suppress_deprecation_warning_nopython_not_supplied(): dispatcher = jit(_target='npyufunc', cache=cache, **targetoptions)(py_func) self._initialize(dispatcher, identity) functools.update_wrapper(self, py_func) def _initialize(self, dispatcher, identity): identity = ufuncbuilder.parse_identity(identity) super(DUFunc, self).__init__(dispatcher, identity=identity) # Loop over a copy of the keys instead of the keys themselves, # since we're changing the dictionary while looping. self.reorderable = (identity != _internal.PyUFunc_None) self.__name__ = dispatcher.py_func.__name__ self.__doc__ = dispatcher.py_func.__doc__ self._lower_me = DUFuncLowerer(self) self._install_cg() self._install_type() def _reduce_states(self): """ NOTE: part of ReduceMixin protocol """ siglist = list(self._dispatcher.overloads.keys()) return dict( dispatcher=self._dispatcher, identity=self.identity, frozen=self._frozen, siglist=siglist, ) @classmethod def _rebuild(cls, dispatcher, identity, frozen, siglist): """ NOTE: part of ReduceMixin protocol """ self = _internal._DUFunc.__new__(cls) self._initialize(dispatcher, identity) # Re-add signatures for sig in siglist: self.add(sig) if frozen: self.disable_compile() return self def build_ufunc(self): """ For compatibility with the various *UFuncBuilder classes. """ return self @property def targetoptions(self): return self._dispatcher.targetoptions @property def nin(self): return self.ufunc.nin @property def nout(self): return self.ufunc.nout @property def nargs(self): return self.ufunc.nargs @property def ntypes(self): return self.ufunc.ntypes @property def types(self): return self.ufunc.types @property def identity(self): return self.ufunc.identity @property def signature(self): return self.ufunc.signature def disable_compile(self): """ Disable the compilation of new signatures at call time. """ # If disabling compilation then there must be at least one signature assert len(self._dispatcher.overloads) > 0 self._frozen = True def add(self, sig): """ Compile the DUFunc for the given signature. """ args, return_type = sigutils.normalize_signature(sig) return self._compile_for_argtys(args, return_type) def __call__(self, *args, **kws): """ Allow any argument that has overridden __array_ufunc__ (NEP-18) to take control of DUFunc.__call__. """ default = numpy_support.np.ndarray.__array_ufunc__ for arg in args + tuple(kws.values()): if getattr(type(arg), "__array_ufunc__", default) is not default: output = arg.__array_ufunc__(self, "__call__", *args, **kws) if output is not NotImplemented: return output else: return super().__call__(*args, **kws) def _compile_for_args(self, *args, **kws): nin = self.ufunc.nin if kws: if 'out' in kws: out = kws.pop('out') args += (out,) if kws: raise TypeError("unexpected keyword arguments to ufunc: %s" % ", ".join(repr(k) for k in sorted(kws))) args_len = len(args) assert (args_len == nin) or (args_len == nin + self.ufunc.nout) assert not kws argtys = [] for arg in args[:nin]: argty = typeof(arg) if isinstance(argty, types.Array): argty = argty.dtype else: # To avoid a mismatch in how Numba types scalar values as # opposed to Numpy, we need special logic for scalars. # For example, on 64-bit systems, numba.typeof(3) => int32, but # np.array(3).dtype => int64. # Note: this will not handle numpy "duckarrays" correctly, # including but not limited to those implementing `__array__` # and `__array_ufunc__`. argty = numpy_support.map_arrayscalar_type(arg) argtys.append(argty) return self._compile_for_argtys(tuple(argtys)) @global_compiler_lock def _compile_for_argtys(self, argtys, return_type=None): """ Given a tuple of argument types (these should be the array dtypes, and not the array types themselves), compile the element-wise function for those inputs, generate a UFunc loop wrapper, and register the loop with the Numpy ufunc object for this DUFunc. """ if self._frozen: raise RuntimeError("compilation disabled for %s" % (self,)) assert isinstance(argtys, tuple) if return_type is None: sig = argtys else: sig = return_type(*argtys) for k, cres in self._dispatcher.overloads.items(): if argtys == k.args: msg = ("Compilation requested for previously compiled argument" f" types ({argtys}). This has no effect and perhaps " "indicates a bug in the calling code (compiling a " "ufunc more than once for the same signature") warnings.warn(msg, errors.NumbaWarning) return cres cres, argtys, return_type = ufuncbuilder._compile_element_wise_function( self._dispatcher, self.targetoptions, sig) actual_sig = ufuncbuilder._finalize_ufunc_signature( cres, argtys, return_type) dtypenums, ptr, env = ufuncbuilder._build_element_wise_ufunc_wrapper( cres, actual_sig) self._add_loop(int(ptr), dtypenums) self._keepalive.append((ptr, cres.library, env)) self._lower_me.libs.append(cres.library) return cres def match_signature(self, ewise_types, sig): return sig.args == ewise_types def _install_ufunc_attributes(self, template) -> None: def get_attr_fn(attr: str) -> Callable: def impl(ufunc): val = getattr(ufunc.key[0], attr) return lambda ufunc: val return impl # ntypes/types needs "at" to be a BoundFunction rather than a Function # But this fails as it cannot a weak reference to an ufunc due to NumPy # not setting the "tp_weaklistoffset" field. See: # https://github.com/numpy/numpy/blob/7fc72776b972bfbfdb909e4b15feb0308cf8adba/numpy/core/src/umath/ufunc_object.c#L6968-L6983 # noqa: E501 at = types.Function(template) attributes = ('nin', 'nout', 'nargs', # 'ntypes', # 'types', 'identity', 'signature') for attr in attributes: attr_fn = get_attr_fn(attr) overload_attribute(at, attr)(attr_fn) def _install_ufunc_methods(self, template) -> None: self._install_ufunc_reduce(template) self._install_ufunc_reduceat(template) self._install_ufunc_at(template) def _install_ufunc_at(self, template) -> None: at = types.Function(template) @overload_method(at, 'at') def ol_at(ufunc, a, indices, b=None): warnings.warn("ufunc.at feature is experimental", category=errors.NumbaExperimentalFeatureWarning) if not isinstance(a, types.Array): msg = 'The first argument "a" must be array-like' raise errors.NumbaTypeError(msg) indices_arr = isinstance(indices, types.Array) indices_list = isinstance(indices, types.List) indices_tuple = isinstance(indices, types.Tuple) indices_slice = isinstance(indices, types.SliceType) indices_scalar = not (indices_arr or indices_slice or indices_tuple) indices_empty_tuple = indices_tuple and len(indices) == 0 b_array = isinstance(b, (types.Array, types.Sequence, types.List, types.Tuple)) b_none = cgutils.is_nonelike(b) b_scalar = not (b_array or b_none) need_cast = any([indices_list]) nin = self.ufunc.nin # missing second argument? if nin == 2 and cgutils.is_nonelike(b): raise errors.TypingError('second operand needed for ufunc') # extra second argument if nin == 1 and not cgutils.is_nonelike(b): msg = 'second operand provided when ufunc is unary' raise errors.TypingError(msg) if cgutils.is_nonelike(b): self.add((a.dtype,)) elif b_scalar: self.add((a.dtype, b)) else: self.add((a.dtype, b.dtype)) def apply_ufunc_codegen(context, builder, sig, args): from numba.np.arrayobj import make_array if len(args) == 4: _, aty, idxty, bty = sig.args _, a, indices, b = args else: _, aty, idxty, bty = sig.args + (None,) _, a, indices, b = args + (None,) a = make_array(aty)(context, builder, a) at_iter = UfuncAtIterator(ufunc, a, aty, indices, idxty, b, bty) at_iter.run(context, builder) @intrinsic def apply_a_b_ufunc(typingctx, ufunc, a, indices, b): sig = types.none(ufunc, a, indices, b) return sig, apply_ufunc_codegen @intrinsic def apply_a_ufunc(typingctx, ufunc, a, indices): sig = types.none(ufunc, a, indices) return sig, apply_ufunc_codegen def impl_cast(ufunc, a, indices, b=None): if b_none: return ufunc.at(a, np.asarray(indices)) else: return ufunc.at(a, np.asarray(indices), np.asarray(b)) def impl_generic(ufunc, a, indices, b=None): if b_none: apply_a_ufunc(ufunc, a, indices,) else: b_ = np.asarray(b) a_ = a[indices] b_ = np.broadcast_to(b_, a_.shape) apply_a_b_ufunc(ufunc, a, indices, b_.flat) def impl_indices_empty_b_scalar(ufunc, a, indices, b=None): a[()] = ufunc(a[()], b) def impl_scalar_scalar(ufunc, a, indices, b=None): if b_none: a[indices] = ufunc(a[indices]) else: a[indices] = ufunc(a[indices], b) if need_cast: return impl_cast elif indices_empty_tuple and b_scalar: return impl_indices_empty_b_scalar elif indices_scalar and b_scalar: return impl_scalar_scalar else: return impl_generic def _install_ufunc_reduce(self, template) -> None: at = types.Function(template) @overload_method(at, 'reduce') def ol_reduce(ufunc, array, axis=0, dtype=None, initial=None): warnings.warn("ufunc.reduce feature is experimental", category=errors.NumbaExperimentalFeatureWarning) if not isinstance(array, types.Array): msg = 'The first argument "array" must be array-like' raise errors.NumbaTypeError(msg) axis_int_tuple = isinstance(axis, types.UniTuple) and \ isinstance(axis.dtype, types.Integer) axis_empty_tuple = isinstance(axis, types.Tuple) and len(axis) == 0 axis_none = cgutils.is_nonelike(axis) identity_none = self.ufunc.identity is None ufunc_name = self.ufunc.__name__ # In NumPy, a ufunc is reorderable if its identity type is **not** # PyUfunc_None. if not self.reorderable and axis_int_tuple and len(axis) > 1: msg = (f"reduction operation '{ufunc_name}' is not " "reorderable, so at most one axis may be specified") raise errors.NumbaTypeError(msg) tup_init = (0,) * (array.ndim) tup_init_m1 = (0,) * (array.ndim - 1) nb_dtype = array.dtype if cgutils.is_nonelike(dtype) else dtype identity = self.identity id_none = cgutils.is_nonelike(identity) init_none = cgutils.is_nonelike(initial) @register_jitable def tuple_slice(tup, pos): # Same as # tup = tup[0 : pos] + tup[pos + 1:] s = tup_init_m1 i = 0 for j, e in enumerate(tup): if j == pos: continue s = tuple_setitem(s, i, e) i += 1 return s @register_jitable def tuple_slice_append(tup, pos, val): # Same as # tup = tup[0 : pos] + val + tup[pos + 1:] s = tup_init i, j, sz = 0, 0, len(s) while j < sz: if j == pos: s = tuple_setitem(s, j, val) else: e = tup[i] s = tuple_setitem(s, j, e) i += 1 j += 1 return s @intrinsic def compute_flat_idx(typingctx, strides, itemsize, idx, axis): sig = types.intp(strides, itemsize, idx, axis) len_idx = len(idx) def gen_block(builder, block_pos, block_name, bb_end, args): strides, _, idx, _ = args bb = builder.append_basic_block(name=block_name) with builder.goto_block(bb): zero = ir.IntType(64)(0) flat_idx = zero if block_pos == 0: for i in range(1, len_idx): stride = builder.extract_value(strides, i - 1) idx_i = builder.extract_value(idx, i) m = builder.mul(stride, idx_i) flat_idx = builder.add(flat_idx, m) elif 0 < block_pos < len_idx - 1: for i in range(0, block_pos): stride = builder.extract_value(strides, i) idx_i = builder.extract_value(idx, i) m = builder.mul(stride, idx_i) flat_idx = builder.add(flat_idx, m) for i in range(block_pos + 1, len_idx): stride = builder.extract_value(strides, i - 1) idx_i = builder.extract_value(idx, i) m = builder.mul(stride, idx_i) flat_idx = builder.add(flat_idx, m) else: for i in range(0, len_idx - 1): stride = builder.extract_value(strides, i) idx_i = builder.extract_value(idx, i) m = builder.mul(stride, idx_i) flat_idx = builder.add(flat_idx, m) builder.branch(bb_end) return bb, flat_idx def codegen(context, builder, sig, args): strides, itemsize, idx, axis = args bb = builder.basic_block switch_end = builder.append_basic_block(name='axis_end') l = [] for i in range(len_idx): block, flat_idx = gen_block(builder, i, f"axis_{i}", switch_end, args) l.append((block, flat_idx)) with builder.goto_block(bb): switch = builder.switch(axis, l[-1][0]) for i in range(len_idx): switch.add_case(i, l[i][0]) builder.position_at_end(switch_end) phi = builder.phi(l[0][1].type) for block, value in l: phi.add_incoming(value, block) return builder.sdiv(phi, itemsize) return sig, codegen @register_jitable def fixup_axis(axis, ndim): ax = axis for i in range(len(axis)): val = axis[i] + ndim if axis[i] < 0 else axis[i] ax = tuple_setitem(ax, i, val) return ax @register_jitable def find_min(tup): idx, e = 0, tup[0] for i in range(len(tup)): if tup[i] < e: idx, e = i, tup[i] return idx, e def impl_1d(ufunc, array, axis=0, dtype=None, initial=None): if identity_none and initial is None and len(array) == 0: msg = ('zero-size array to reduction operation ' f'{ufunc_name} which has no identity') raise ValueError(msg) start = 0 if init_none and id_none: start = 1 r = array[0] elif init_none: r = identity else: r = initial sz = array.shape[0] for i in range(start, sz): r = ufunc(r, array[i]) return r def impl_nd_axis_int(ufunc, array, axis=0, dtype=None, initial=None): if axis is None: raise ValueError("'axis' must be specified") if axis < 0: axis += array.ndim if axis < 0 or axis >= array.ndim: raise ValueError("Invalid axis") if identity_none and initial is None and array.shape[axis] == 0: msg = ('zero-size array to reduction operation ' f'{ufunc_name} which has no identity') raise ValueError(msg) # create result array shape = tuple_slice(array.shape, axis) if initial is None and identity is None: r = np.empty(shape, dtype=nb_dtype) for idx, _ in np.ndenumerate(r): # shape[0:axis] + 0 + shape[axis:] result_idx = tuple_slice_append(idx, axis, 0) r[idx] = array[result_idx] elif initial is None and identity is not None: # Checking if identity is not none is redundant but required # compile this block r = np.full(shape, fill_value=identity, dtype=nb_dtype) else: r = np.full(shape, fill_value=initial, dtype=nb_dtype) # One approach to implement reduce is to remove the axis index # from the indexing tuple returned by "np.ndenumerate". For # instance, if idx = (X, Y, Z) and axis=1, the result index # is (X, Y). # Another way is to compute the result index using strides, # which is faster than manipulating tuples. view = r.ravel() if initial is None and identity is None: for idx, val in np.ndenumerate(array): if idx[axis] == 0: continue else: flat_pos = compute_flat_idx(r.strides, r.itemsize, idx, axis) lhs, rhs = view[flat_pos], val view[flat_pos] = ufunc(lhs, rhs) else: for idx, val in np.ndenumerate(array): if initial is None and identity is None and \ idx[axis] == 0: continue flat_pos = compute_flat_idx(r.strides, r.itemsize, idx, axis) lhs, rhs = view[flat_pos], val view[flat_pos] = ufunc(lhs, rhs) return r def impl_nd_axis_tuple(ufunc, array, axis=0, dtype=None, initial=None): axis_ = fixup_axis(axis, array.ndim) for i in range(0, len(axis_)): if axis_[i] < 0 or axis_[i] >= array.ndim: raise ValueError("Invalid axis") for j in range(i + 1, len(axis_)): if axis_[i] == axis_[j]: raise ValueError("duplicate value in 'axis'") min_idx, min_elem = find_min(axis_) r = ufunc.reduce(array, axis=min_elem, dtype=dtype, initial=initial) if len(axis) == 1: return r elif len(axis) == 2: return ufunc.reduce(r, axis=axis_[(min_idx + 1) % 2] - 1) else: ax = axis_tup for i in range(len(ax)): if i != min_idx: ax = tuple_setitem(ax, i, axis_[i]) return ufunc.reduce(r, axis=ax) def impl_axis_empty_tuple(ufunc, array, axis=0, dtype=None, initial=None): return array def impl_axis_none(ufunc, array, axis=0, dtype=None, initial=None): return ufunc.reduce(array, axis_tup, dtype, initial) if array.ndim == 1 and not axis_empty_tuple: return impl_1d elif axis_empty_tuple: # ufunc(array, axis=()) return impl_axis_empty_tuple elif axis_none: # ufunc(array, axis=None) axis_tup = tuple(range(array.ndim)) return impl_axis_none elif axis_int_tuple: # axis is tuple of integers # ufunc(array, axis=(1, 2, ...)) axis_tup = (0,) * (len(axis) - 1) return impl_nd_axis_tuple elif axis == 0 or isinstance(axis, (types.Integer, types.Omitted, types.IntegerLiteral)): # axis is default value (0) or an integer # ufunc(array, axis=0) return impl_nd_axis_int def _install_ufunc_reduceat(self, template) -> None: at = types.Function(template) @overload_method(at, 'reduceat') def ol_reduceat(ufunc, array, indices, axis=0, dtype=None, out=None): warnings.warn("ufunc.reduceat feature is experimental", category=errors.NumbaExperimentalFeatureWarning) if self.ufunc.nin != 2: msg = 'reduceat only supported for binary functions' raise errors.NumbaTypeError(msg) if not numpy_support.type_can_asarray(array): msg = 'The first argument "array" must be array-like' raise errors.NumbaTypeError(msg) if not numpy_support.type_can_asarray(indices): msg = 'The second argument "indices" must be array-like' raise errors.NumbaTypeError(msg) if not isinstance(axis, (types.Integer, types.Omitted, types.IntegerLiteral, int)): msg = '"axis" must be an integer' raise errors.NumbaTypeError(msg) out_none = cgutils.is_nonelike(out) if not out_none and not isinstance(out, types.Array): raise errors.NumbaTypeError('output must be an array') array_arr = isinstance(array, types.Array) array_ndim = array.ndim if array_arr else 0 tup_m1 = (0,) * (array_ndim - 1) indices_arr = isinstance(indices, types.Array) dt = array.dtype if cgutils.is_nonelike(dtype) else dtype need_cast = not (array_arr and indices_arr) if indices_arr and indices.ndim != 1: msg = ('Expect "indices" array to have at most 1 dimension. ' f'Got {indices.ndim}') raise errors.NumbaValueError(msg) if need_cast: def impl_cast(ufunc, array, indices, axis=0, dtype=None, out=None): # noqa: E501 return ufunc.reduceat(np.asarray(array), np.asarray(indices), axis, dtype=dtype, out=out) return impl_cast @register_jitable def tuple_slice(tup, pos): s = tup_m1 i, j = 0, 0 while i < len(tup): if i != pos: s = tuple_setitem(s, j, tup[i]) j += 1 i += 1 return s # importing here as an import at the top scope brings unwanted # stuff. See numba/tests/test_import.py::test_no_impl_import from numba.np.arrayobj import generate_getitem_setitem_with_axis setitem = generate_getitem_setitem_with_axis(array.ndim, 'setitem') def impl(ufunc, array, indices, axis=0, dtype=None, out=None): sz = indices.shape[0] ax = axis if axis < 0: axis += array_ndim if axis < 0 or axis >= array_ndim: raise ValueError(f"axis {ax} is out of bounds for array " f"of dimension {array_ndim}") shape = tuple_setitem(array.shape, axis, sz) if not out_none and out.shape != shape: # TODO: improve error message once #9524 gets merged msg = ('operands could not be broadcast together with ' 'remmaped shapes') raise ValueError(msg) if out_none: out = np.zeros(shape, dtype=dt) # short-circuit to avoid overflow on Windows if len(indices) == 0: return out j = 0 for i in range(len(indices) - 1): if indices[i] < indices[i + 1]: idx = np.arange(indices[i], indices[i + 1]) if array_ndim > 1: arr_slice = np.take(array, idx, axis) else: arr_slice = array[idx] arr_reduce = ufunc.reduce(arr_slice, axis=axis) setitem(out, j, axis, arr_reduce) elif indices[i] >= indices[i + 1]: idx = indices[i] arr_slice = np.take(array, idx, axis) if array_ndim > 1: _slice = tuple_slice(array.shape, axis) arr_slice = arr_slice.reshape(_slice) setitem(out, j, axis, arr_slice) elif indices[i] >= sz or indices[i] < 0: raise ValueError('Invalid value for indices') j += 1 # last index idx = np.arange(indices[-1], array.shape[axis]) if array_ndim > 1: arr_slice = np.take(array, idx, axis) else: arr_slice = array[idx] arr_reduce = ufunc.reduce(arr_slice, axis) setitem(out, j, axis, arr_reduce) return out return impl def at(self, a, indices, b=None): # dynamic compile ufunc.at args = (a,) if cgutils.is_nonelike(b) else (a, b) argtys = (typeof(arg) for arg in args) ewise_types = tuple(arg.dtype if isinstance(arg, types.Array) else arg for arg in argtys) if self.find_ewise_function(ewise_types) == (None, None): # cannot find a matching function and compilation is disabled if self._frozen: msg = "compilation disabled for %s.at(...)" % (self,) raise RuntimeError(msg) self._compile_for_args(*args) # all good, just dispatch to the function if cgutils.is_nonelike(b): return super().at(a, indices) else: return super().at(*(a, indices, b)) def _install_type(self, typingctx=None): """Constructs and installs a typing class for a DUFunc object in the input typing context. If no typing context is given, then _install_type() installs into the typing context of the dispatcher object (should be same default context used by jit() and njit()). """ if typingctx is None: typingctx = self._dispatcher.targetdescr.typing_context _ty_cls = type('DUFuncTyping_' + self.ufunc.__name__, (AbstractTemplate,), dict(key=self, generic=self._type_me)) typingctx.insert_user_function(self, _ty_cls) self._install_ufunc_attributes(_ty_cls) self._install_ufunc_methods(_ty_cls) def find_ewise_function(self, ewise_types): """ Given a tuple of element-wise argument types, find a matching signature in the dispatcher. Return a 2-tuple containing the matching signature, and compilation result. Will return two None's if no matching signature was found. """ if self._frozen: # If we cannot compile, coerce to the best matching loop loop = numpy_support.ufunc_find_matching_loop(self, ewise_types) if loop is None: return None, None ewise_types = tuple(loop.inputs + loop.outputs)[:len(ewise_types)] for sig, cres in self._dispatcher.overloads.items(): if sig.args == ewise_types: return sig, cres return None, None def _type_me(self, argtys, kwtys): """ Implement AbstractTemplate.generic() for the typing class built by DUFunc._install_type(). Return the call-site signature after either validating the element-wise signature or compiling for it. """ assert not kwtys ufunc = self.ufunc _handle_inputs_result = npydecl.Numpy_rules_ufunc._handle_inputs( ufunc, argtys, kwtys) base_types, explicit_outputs, ndims, layout = _handle_inputs_result explicit_output_count = len(explicit_outputs) if explicit_output_count > 0: ewise_types = tuple(base_types[:-len(explicit_outputs)]) else: ewise_types = tuple(base_types) sig, cres = self.find_ewise_function(ewise_types) if sig is None: # Matching element-wise signature was not found; must # compile. if self._frozen: raise errors.NumbaTypeError("cannot call %s with types %s" % (self, argtys)) self._compile_for_argtys(ewise_types) sig, cres = self.find_ewise_function(ewise_types) assert sig is not None if explicit_output_count > 0: outtys = list(explicit_outputs) elif ufunc.nout == 1: if ndims > 0: outtys = [types.Array(sig.return_type, ndims, layout)] else: outtys = [sig.return_type] else: raise errors.NumbaNotImplementedError("typing gufuncs (nout > 1)") outtys.extend(argtys) return signature(*outtys) array_analysis.MAP_TYPES.append(DUFunc)
DUFunc
python
great-expectations__great_expectations
great_expectations/core/data_context_key.py
{ "start": 193, "end": 2073 }
class ____(metaclass=ABCMeta): """DataContextKey objects are used to uniquely identify resources used by the DataContext. A DataContextKey is designed to support clear naming with multiple representations including a hashable version making it suitable for use as the key in a dictionary. """ # noqa: E501 # FIXME CoP @abstractmethod def to_tuple(self) -> tuple: raise NotImplementedError @classmethod def from_tuple(cls, tuple_): return cls(*tuple_) def to_fixed_length_tuple(self) -> tuple: raise NotImplementedError @classmethod def from_fixed_length_tuple(cls, tuple_) -> DataContextKey: raise NotImplementedError @override def __eq__(self, other): if not isinstance(other, self.__class__): # Delegate comparison to the other instance's __eq__. return NotImplemented return self.to_tuple() == other.to_tuple() @override def __ne__(self, other): return not self == other def __lt__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.to_tuple() < other.to_tuple() def __le__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.to_tuple() <= other.to_tuple() def __gt__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.to_tuple() > other.to_tuple() def __ge__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.to_tuple() >= other.to_tuple() @override def __hash__(self): return hash(self.to_tuple()) @override def __repr__(self): return f"{self.__class__.__name__}::{'/'.join(self.to_tuple())}"
DataContextKey
python
facebook__pyre-check
client/commands/infer.py
{ "start": 11976, "end": 12652 }
class ____: annotate_attributes: bool = False use_future_annotations: bool = False quote_annotations: bool = False simple_annotations: bool = False dequalify: bool = False debug_infer: bool = False def __post__init__(self) -> None: if self.quote_annotations and (self.use_future_annotations or self.dequalify): raise ValueError( "You should not mix the `quote_annotations` option, which causes pyre " "to generate quoted and qualified annotations, with the " "`use_future_annotations` or `dequalify` options." ) @dataclasses.dataclass(frozen=True)
StubGenerationOptions
python
huggingface__transformers
src/transformers/models/bitnet/modeling_bitnet.py
{ "start": 15065, "end": 15611 }
class ____(PreTrainedModel): config: BitNetConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["BitNetDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = True _supports_attention_backend = True _can_record_outputs = { "hidden_states": BitNetDecoderLayer, "attentions": BitNetAttention, } @auto_docstring
BitNetPreTrainedModel
python
great-expectations__great_expectations
tests/integration/metrics/column/test_null_count.py
{ "start": 548, "end": 1470 }
class ____: @parameterize_batch_for_data_sources( data_source_configs=SQL_DATA_SOURCES + PANDAS_DATA_SOURCES, data=DATA_FRAME, ) def test_success(self, batch_for_datasource: Batch) -> None: metric = ColumnNullCount(column=STRING_COLUMN_NAME) metric_result = batch_for_datasource.compute_metrics(metric) assert isinstance(metric_result, ColumnNullCountResult) assert metric_result.value == 2 @parameterize_batch_for_data_sources( data_source_configs=SPARK_DATA_SOURCES, data=DATA_FRAME, ) @pytest.mark.xfail(strict=True) def test_spark(self, batch_for_datasource: Batch) -> None: metric = ColumnNullCount(column=STRING_COLUMN_NAME) metric_result = batch_for_datasource.compute_metrics(metric) assert isinstance(metric_result, ColumnNullCountResult) assert metric_result.value == 2
TestColumnNullCount
python
walkccc__LeetCode
solutions/3063. Linked List Frequency/3063.py
{ "start": 0, "end": 364 }
class ____: def frequenciesOfElements(self, head: ListNode | None) -> ListNode | None: count = collections.Counter() curr = head while curr: count[curr.val] += 1 curr = curr.next dummy = ListNode(0) tail = dummy for freq in count.values(): tail.next = ListNode(freq) tail = tail.next return dummy.next
Solution
python
huggingface__transformers
src/transformers/models/bart/modeling_bart.py
{ "start": 46225, "end": 56152 }
class ____(BartPreTrainedModel, GenerationMixin): base_model_prefix = "model" _tied_weights_keys = { "lm_head.weight": "model.shared.weight", } _keys_to_ignore_on_load_missing = ["final_logits_bias"] def __init__(self, config: BartConfig): super().__init__(config) self.model = BartModel(config) self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) # Initialize weights and apply final processing self.post_init() def tie_weights(self, missing_keys: Optional[set[str]] = None, recompute_mapping: bool = True): """We need to overload here to handle the wrong key saved in some main checkpoints.""" if self.config.tie_word_embeddings: # Some model checkpoints like "facebook/bart-large-cnn"'s embedding weight is in decoder.embed_tokens, # need check here, see issue #36247 if missing_keys is not None: if "model.shared.weight" in missing_keys and "model.decoder.embed_tokens.weight" not in missing_keys: self.model.encoder.embed_tokens.weight = self.model.decoder.embed_tokens.weight self.model.shared.weight = self.model.decoder.embed_tokens.weight missing_keys.discard("model.encoder.embed_token.weight") missing_keys.discard("model.shared.weight") # needs to be done after, otherwise it raises an Error because the correct weights are not present super().tie_weights(missing_keys=missing_keys, recompute_mapping=recompute_mapping) def resize_token_embeddings( self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True ) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) self._resize_final_logits_bias(new_embeddings.weight.shape[0]) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[list[torch.FloatTensor]] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple, Seq2SeqLMOutput]: r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_bart._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example summarization: ```python >>> from transformers import AutoTokenizer, BartForConditionalGeneration >>> model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn") >>> ARTICLE_TO_SUMMARIZE = ( ... "PG&E stated it scheduled the blackouts in response to forecasts for high winds " ... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were " ... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow." ... ) >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="pt") >>> # Generate Summary >>> summary_ids = model.generate(inputs["input_ids"], num_beams=2, min_length=0, max_length=20) >>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] 'PG&E scheduled the blackouts in response to forecasts for high winds amid dry conditions' ``` Mask filling example: ```python >>> from transformers import AutoTokenizer, BartForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-base") >>> model = BartForConditionalGeneration.from_pretrained("facebook/bart-base") >>> TXT = "My friends are <mask> but they eat too many carbs." >>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"] >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> tokenizer.decode(predictions).split() ['not', 'good', 'healthy', 'great', 'very'] ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) lm_logits = self.lm_head(outputs[0]) lm_logits = lm_logits + self.final_logits_bias.to(lm_logits.device) masked_lm_loss = None if labels is not None: labels = labels.to(lm_logits.device) loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) @auto_docstring( custom_intro=""" Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ )
BartForConditionalGeneration
python
modin-project__modin
modin/core/execution/ray/common/engine_wrapper.py
{ "start": 1960, "end": 7600 }
class ____: """Mixin that provides means of running functions remotely and getting local results.""" _func_cache = {} @classmethod def deploy( cls, func, f_args=None, f_kwargs=None, return_pandas_df=None, num_returns=1 ): """ Run local `func` remotely. Parameters ---------- func : callable or ray.ObjectID The function to perform. f_args : list or tuple, optional Positional arguments to pass to ``func``. f_kwargs : dict, optional Keyword arguments to pass to ``func``. return_pandas_df : bool, optional Whether to convert the result of `func` to a pandas DataFrame or not. num_returns : int, default: 1 Amount of return values expected from `func`. Returns ------- ray.ObjectRef or list Ray identifier of the result being put to Plasma store. """ args = [] if f_args is None else f_args kwargs = {} if f_kwargs is None else f_kwargs return _deploy_ray_func.options( num_returns=num_returns, resources=RayTaskCustomResources.get() ).remote(func, *args, return_pandas_df=return_pandas_df, **kwargs) @classmethod def is_future(cls, item): """ Check if the item is a Future. Parameters ---------- item : ray.ObjectID or object Future or object to check. Returns ------- boolean If the value is a future. """ return isinstance(item, ObjectRefTypes) @classmethod def materialize(cls, obj_id): """ Get the value of object from the Plasma store. Parameters ---------- obj_id : ray.ObjectID Ray object identifier to get the value by. Returns ------- object Whatever was identified by `obj_id`. """ if isinstance(obj_id, MaterializationHook): obj = obj_id.pre_materialize() return ( obj_id.post_materialize(ray.get(obj)) if isinstance(obj, ray.ObjectRef) else obj ) if not isinstance(obj_id, Sequence): return ray.get(obj_id) if isinstance(obj_id, ray.ObjectRef) else obj_id if all(isinstance(obj, ray.ObjectRef) for obj in obj_id): return ray.get(obj_id) ids = {} result = [] for obj in obj_id: if not isinstance(obj, ObjectRefTypes): result.append(obj) continue if isinstance(obj, MaterializationHook): oid = obj.pre_materialize() if isinstance(oid, ray.ObjectRef): hook = obj obj = oid else: result.append(oid) continue else: hook = None idx = ids.get(obj, None) if idx is None: ids[obj] = idx = len(ids) if hook is None: result.append(obj) else: hook._materialized_idx = idx result.append(hook) if len(ids) == 0: return result materialized = ray.get(list(ids.keys())) for i in range(len(result)): if isinstance((obj := result[i]), ObjectRefTypes): if isinstance(obj, MaterializationHook): result[i] = obj.post_materialize( materialized[obj._materialized_idx] ) else: result[i] = materialized[ids[obj]] return result @classmethod def put(cls, data, **kwargs): """ Store an object in the object store. Parameters ---------- data : object The Python object to be stored. **kwargs : dict Additional keyword arguments. Returns ------- ray.ObjectID Ray object identifier to get the value by. """ if isinstance(data, FunctionType): qname = data.__qualname__ if "<locals>" not in qname and "<lambda>" not in qname: ref = cls._func_cache.get(data, None) if ref is None: if len(cls._func_cache) < 1024: ref = ray.put(data) cls._func_cache[data] = ref else: msg = "To many functions in the RayWrapper cache!" assert "MODIN_GITHUB_CI" not in os.environ, msg ErrorMessage.warn(msg) return ref return ray.put(data, **kwargs) @classmethod def wait(cls, obj_ids, num_returns=None): """ Wait on the objects without materializing them (blocking operation). ``ray.wait`` assumes a list of unique object references: see https://github.com/modin-project/modin/issues/5045 Parameters ---------- obj_ids : list, scalar num_returns : int, optional """ if not isinstance(obj_ids, Sequence): obj_ids = list(obj_ids) ids = set() for obj in obj_ids: if isinstance(obj, MaterializationHook): obj = obj.pre_materialize() if isinstance(obj, ray.ObjectRef): ids.add(obj) if num_ids := len(ids): ray.wait(list(ids), num_returns=num_returns or num_ids) @ray.remote
RayWrapper
python
fastai__fastai
fastai/text/data.py
{ "start": 8601, "end": 10419 }
class ____(TfmdDL): "A `DataLoader` that goes throught the item in the order given by `sort_func`" def __init__(self, dataset, sort_func=None, res=None, **kwargs): super().__init__(dataset, **kwargs) self.sort_func = _default_sort if sort_func is None else sort_func if res is None and self.sort_func == _default_sort: res = _get_lengths(dataset) self.res = [self.sort_func(self.do_item(i)) for i in range_of(self.dataset)] if res is None else res if len(self.res) > 0: self.idx_max = np.argmax(self.res) def get_idxs(self): idxs = super().get_idxs() if self.shuffle: return idxs return sorted(idxs, key=lambda i: self.res[i], reverse=True) def shuffle_fn(self,idxs): idxs = np.random.permutation(len(self.dataset)) idx_max = np.where(idxs==self.idx_max)[0][0] idxs[0],idxs[idx_max] = idxs[idx_max],idxs[0] sz = self.bs*50 chunks = [idxs[i:i+sz] for i in range(0, len(idxs), sz)] chunks = [sorted(s, key=lambda i: self.res[i], reverse=True) for s in chunks] sort_idx = np.concatenate(chunks) sz = self.bs batches = [sort_idx[i:i+sz] for i in range(0, len(sort_idx), sz)] sort_idx = np.concatenate(np.random.permutation(batches[1:-1])) if len(batches) > 2 else np.array([],dtype=int) sort_idx = np.concatenate((batches[0], sort_idx) if len(batches)==1 else (batches[0], sort_idx, batches[-1])) return iter(sort_idx) @delegates(TfmdDL.new) def new(self, dataset=None, **kwargs): if 'val_res' in kwargs and kwargs['val_res'] is not None: res = kwargs['val_res'] else: res = self.res if dataset is None else None return super().new(dataset=dataset, res=res, **kwargs) # %% ../../nbs/31_text.data.ipynb 62
SortedDL
python
python-attrs__attrs
typing-examples/mypy.py
{ "start": 394, "end": 447 }
class ____: y = attr.ib(type="List[int]") @attr.s
E
python
getsentry__sentry
src/sentry/analytics/events/first_feedback_sent.py
{ "start": 76, "end": 276 }
class ____(analytics.Event): organization_id: int project_id: int platform: str | None = None user_id: int | None = None analytics.register(FirstFeedbackSentEvent)
FirstFeedbackSentEvent
python
sphinx-doc__sphinx
utils/bump_version.py
{ "start": 3291, "end": 5106 }
class ____: def __init__(self, path: Path) -> None: self.path = path with open(self.path, encoding='utf-8') as f: version = f.readline().strip() matched = re.fullmatch(r'Release (.*) \((.*)\)', version) if matched is None: msg = f'Unknown CHANGES format: {version}' raise RuntimeError(msg) self.version, release_date = matched.groups() self.in_development = release_date == 'in development' self.version_tuple = parse_version(self.version).version_tuple def finalise_release_date(self) -> None: release_date = time.strftime('%b %d, %Y') heading = f'Release {self.version} (released {release_date})' with open(self.path, 'r+', encoding='utf-8') as f: f.readline() # skip first two lines f.readline() body = f.read() f.seek(0) f.truncate(0) f.write(heading + '\n') f.write('=' * len(heading) + '\n') f.write(self.filter_empty_sections(body)) def add_release(self, version_info: VersionInfo) -> None: heading = f'Release {version_info.version} (in development)' tmpl = (script_dir / 'CHANGES_template.rst').read_text(encoding='utf-8') with open(self.path, 'r+', encoding='utf-8') as f: body = f.read() f.seek(0) f.truncate(0) f.write(heading + '\n') f.write('=' * len(heading) + '\n') f.write('\n') f.write(tmpl) f.write('\n') f.write(body) @staticmethod def filter_empty_sections(body: str) -> str: return re.sub( '^\n.+\n-{3,}\n+(?=\n.+\n[-=]{3,}\n)', '', body, flags=re.MULTILINE )
Changes
python
scipy__scipy
scipy/stats/_continuous_distns.py
{ "start": 85960, "end": 89362 }
class ____(rv_continuous): r"""A doubly truncated Weibull minimum continuous random variable. %(before_notes)s See Also -------- weibull_min, truncexpon Notes ----- The probability density function for `truncweibull_min` is: .. math:: f(x, a, b, c) = \frac{c x^{c-1} \exp(-x^c)}{\exp(-a^c) - \exp(-b^c)} for :math:`a < x <= b`, :math:`0 \le a < b` and :math:`c > 0`. `truncweibull_min` takes :math:`a`, :math:`b`, and :math:`c` as shape parameters. Notice that the truncation values, :math:`a` and :math:`b`, are defined in standardized form: .. math:: a = (u_l - loc)/scale b = (u_r - loc)/scale where :math:`u_l` and :math:`u_r` are the specific left and right truncation values, respectively. In other words, the support of the distribution becomes :math:`(a*scale + loc) < x <= (b*scale + loc)` when :math:`loc` and/or :math:`scale` are provided. %(after_notes)s References ---------- .. [1] Rinne, H. "The Weibull Distribution: A Handbook". CRC Press (2009). %(example)s """ def _argcheck(self, c, a, b): return (a >= 0.) & (b > a) & (c > 0.) def _shape_info(self): ic = _ShapeInfo("c", False, (0, np.inf), (False, False)) ia = _ShapeInfo("a", False, (0, np.inf), (True, False)) ib = _ShapeInfo("b", False, (0, np.inf), (False, False)) return [ic, ia, ib] def _fitstart(self, data): # Arbitrary, but default a=b=c=1 is not valid return super()._fitstart(data, args=(1, 0, 1)) def _get_support(self, c, a, b): return a, b def _pdf(self, x, c, a, b): denum = (np.exp(-pow(a, c)) - np.exp(-pow(b, c))) return (c * pow(x, c-1) * np.exp(-pow(x, c))) / denum def _logpdf(self, x, c, a, b): logdenum = np.log(np.exp(-pow(a, c)) - np.exp(-pow(b, c))) return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c) - logdenum def _cdf(self, x, c, a, b): num = (np.exp(-pow(a, c)) - np.exp(-pow(x, c))) denum = (np.exp(-pow(a, c)) - np.exp(-pow(b, c))) return num / denum def _logcdf(self, x, c, a, b): lognum = np.log(np.exp(-pow(a, c)) - np.exp(-pow(x, c))) logdenum = np.log(np.exp(-pow(a, c)) - np.exp(-pow(b, c))) return lognum - logdenum def _sf(self, x, c, a, b): num = (np.exp(-pow(x, c)) - np.exp(-pow(b, c))) denum = (np.exp(-pow(a, c)) - np.exp(-pow(b, c))) return num / denum def _logsf(self, x, c, a, b): lognum = np.log(np.exp(-pow(x, c)) - np.exp(-pow(b, c))) logdenum = np.log(np.exp(-pow(a, c)) - np.exp(-pow(b, c))) return lognum - logdenum def _isf(self, q, c, a, b): return pow( -np.log((1 - q) * np.exp(-pow(b, c)) + q * np.exp(-pow(a, c))), 1/c ) def _ppf(self, q, c, a, b): return pow( -np.log((1 - q) * np.exp(-pow(a, c)) + q * np.exp(-pow(b, c))), 1/c ) def _munp(self, n, c, a, b): gamma_fun = sc.gamma(n/c + 1.) * ( sc.gammainc(n/c + 1., pow(b, c)) - sc.gammainc(n/c + 1., pow(a, c)) ) denum = (np.exp(-pow(a, c)) - np.exp(-pow(b, c))) return gamma_fun / denum truncweibull_min = truncweibull_min_gen(name='truncweibull_min') truncweibull_min._support = ('a', 'b')
truncweibull_min_gen
python
getsentry__sentry
tests/sentry/integrations/github_enterprise/test_repository.py
{ "start": 293, "end": 1839 }
class ____(TestCase): _IP_ADDRESS = "35.232.149.196" def setUp(self) -> None: super().setUp() self.integration = self.create_integration( organization=self.organization, provider="github_enterprise", external_id="github_external_id", metadata={ "domain_name": f"{self._IP_ADDRESS}/getsentry", "installation_id": "installation_id", "installation": {"id": 2, "private_key": "private_key", "verify_ssl": True}, }, ) @cached_property def provider(self) -> GitHubEnterpriseRepositoryProvider: return GitHubEnterpriseRepositoryProvider("integrations:github_enterprise") @responses.activate def test_build_repository_config(self) -> None: organization = self.create_organization() with assume_test_silo_mode(SiloMode.CONTROL): self.integration.add_organization(organization, self.user) data = { "identifier": "getsentry/example-repo", "external_id": "654321", "integration_id": self.integration.id, } rc_data = self.provider.build_repository_config(organization, data) assert rc_data == { "config": {"name": "getsentry/example-repo"}, "external_id": "654321", "integration_id": self.integration.id, "name": "getsentry/example-repo", "url": f"https://{self._IP_ADDRESS}/getsentry/example-repo", }
GitHubEnterpriseRepositoryTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-google-ads/source_google_ads/streams.py
{ "start": 1057, "end": 4266 }
class ____(Stream, ABC): CATCH_CUSTOMER_NOT_ENABLED_ERROR = True def __init__(self, api: GoogleAds, customers: List[CustomerModel]): self.google_ads_client = api self.customers = customers def get_query(self, stream_slice: Mapping[str, Any]) -> str: fields = GoogleAds.get_fields_from_schema(self.get_json_schema()) table_name = get_resource_name(self.name) query = GoogleAds.convert_schema_into_query(fields=fields, table_name=table_name) return query def parse_response(self, response: SearchPager, stream_slice: Optional[Mapping[str, Any]] = None) -> Iterable[Mapping]: for result in response: yield self.google_ads_client.parse_single_result(self.get_json_schema(), result) def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]: for customer in self.customers: yield {"customer_id": customer.id, "login_customer_id": customer.login_customer_id} @generator_backoff( wait_gen=backoff.constant, exception=(TimeoutError), max_tries=5, on_backoff=lambda details: logger.info( f"Caught retryable error {details['exception']} after {details['tries']} tries. Waiting {details['wait']} seconds then retrying..." ), interval=1, ) @detached(timeout_minutes=5) def request_records_job(self, customer_id, login_customer_id, query, stream_slice): response_records = self.google_ads_client.send_request(query=query, customer_id=customer_id, login_customer_id=login_customer_id) yield from self.parse_records_with_backoff(response_records, stream_slice) def read_records(self, sync_mode, stream_slice: Optional[Mapping[str, Any]] = None, **kwargs) -> Iterable[Mapping[str, Any]]: if stream_slice is None: return [] customer_id = stream_slice["customer_id"] login_customer_id = stream_slice["login_customer_id"] try: yield from self.request_records_job(customer_id, login_customer_id, self.get_query(stream_slice), stream_slice) except (GoogleAdsException, Unauthenticated) as exception: traced_exception(exception, customer_id, self.CATCH_CUSTOMER_NOT_ENABLED_ERROR) except TimeoutError as exception: # Prevent sync failure logger.warning(f"Timeout: Failed to access {self.name} stream data. {str(exception)}") @generator_backoff( wait_gen=backoff.expo, exception=(InternalServerError, ServerError, ServiceUnavailable, TooManyRequests), max_tries=5, max_time=600, on_backoff=lambda details: logger.info( f"Caught retryable error {details['exception']} after {details['tries']} tries. Waiting {details['wait']} seconds then retrying..." ), factor=5, ) def parse_records_with_backoff( self, response_records: Iterator[SearchGoogleAdsResponse], stream_slice: Optional[Mapping[str, Any]] = None ) -> Iterable[Mapping[str, Any]]: for response in response_records: yield from self.parse_response(response, stream_slice)
GoogleAdsStream
python
getsentry__sentry
tests/sentry/workflow_engine/endpoints/validators/test_base_data_condition_group.py
{ "start": 3511, "end": 4795 }
class ____(TestBaseDataConditionGroupValidator): def test_create(self) -> None: # Validate the data and raise any exceptions if invalid to halt test self.validator.is_valid(raise_exception=True) result = self.validator.create(self.validator.validated_data) # Validate the condition group is created correctly assert result.logic_type == DataConditionGroup.Type.ANY assert result.organization_id == self.organization.id assert result.conditions.count() == 0 def test_create__with_conditions(self) -> None: self.valid_data["conditions"] = [ { "type": Condition.EQUAL, "comparison": 1, "conditionResult": True, } ] validator = BaseDataConditionGroupValidator(data=self.valid_data, context=self.context) validator.is_valid(raise_exception=True) result = validator.create(validator.validated_data) assert result.conditions.count() == 1 condition = result.conditions.first() assert condition is not None assert condition.type == Condition.EQUAL assert condition.comparison == 1 assert condition.condition_group == result
TestBaseDataConditionGroupValidatorCreate
python
altair-viz__altair
altair/vegalite/v6/schema/channels.py
{ "start": 3465, "end": 6444 }
class ____: _encoding_name: str def to_dict( self, validate: bool = True, ignore: list[str] | None = None, context: dict[str, Any] | None = None, ) -> dict | list[dict]: context = context or {} ignore = ignore or [] shorthand = self._get("shorthand") # type: ignore[attr-defined] field = self._get("field") # type: ignore[attr-defined] if shorthand is not Undefined and field is not Undefined: msg = f"{self.__class__.__name__} specifies both shorthand={shorthand} and field={field}. " raise ValueError(msg) if isinstance(shorthand, (tuple, list)): # If given a list of shorthands, then transform it to a list of classes kwds = self._kwds.copy() # type: ignore[attr-defined] kwds.pop("shorthand") return [ self.__class__(sh, **kwds).to_dict( # type: ignore[call-arg] validate=validate, ignore=ignore, context=context ) for sh in shorthand ] if shorthand is Undefined: parsed = {} elif isinstance(shorthand, str): data: nw.DataFrame | Any = context.get("data", None) parsed = parse_shorthand(shorthand, data=data) type_required = "type" in self._kwds # type: ignore[attr-defined] type_in_shorthand = "type" in parsed type_defined_explicitly = self._get("type") is not Undefined # type: ignore[attr-defined] if not type_required: # Secondary field names don't require a type argument in VegaLite 3+. # We still parse it out of the shorthand, but drop it here. parsed.pop("type", None) elif not (type_in_shorthand or type_defined_explicitly): if isinstance(data, nw.DataFrame): msg = ( f'Unable to determine data type for the field "{shorthand}";' " verify that the field name is not misspelled." " If you are referencing a field from a transform," " also confirm that the data type is specified correctly." ) raise ValueError(msg) else: msg = ( f"{shorthand} encoding field is specified without a type; " "the type cannot be automatically inferred because " "the data is not specified as a pandas.DataFrame." ) raise ValueError(msg) else: # Shorthand is not a string; we pass the definition to field, # and do not do any parsing. parsed = {"field": shorthand} context["parsed_shorthand"] = parsed return super().to_dict(validate=validate, ignore=ignore, context=context)
FieldChannelMixin
python
kamyu104__LeetCode-Solutions
Python/minimum-number-of-coins-to-be-added.py
{ "start": 689, "end": 1337 }
class ____(object): def minimumAddedCoins(self, coins, target): """ :type coins: List[int] :type target: int :rtype: int """ coins.sort() result = reachable = 0 for x in coins: while not reachable >= x-1: result += 1 reachable += reachable+1 if reachable >= target: return result reachable += x if reachable >= target: return result while not reachable >= target: result += 1 reachable += reachable+1 return result
Solution2
python
sphinx-doc__sphinx
sphinx/roles.py
{ "start": 9861, "end": 11420 }
class ____(ReferenceRole): def run(self) -> tuple[list[Node], list[system_message]]: target_id = 'index-%s' % self.env.new_serialno('index') entries = [ ( 'single', _('Python Enhancement Proposals; PEP %s') % self.target, target_id, '', None, ) ] index = addnodes.index(entries=entries) target = nodes.target('', '', ids=[target_id]) self.inliner.document.note_explicit_target(target) try: refuri = self.build_uri() reference = nodes.reference( '', '', internal=False, refuri=refuri, classes=['pep'] ) if self.has_explicit_title: reference += nodes.strong(self.title, self.title) else: title = 'PEP ' + self.title reference += nodes.strong(title, title) except ValueError: msg = self.inliner.reporter.error( __('invalid PEP number %s') % self.target, line=self.lineno ) prb = self.inliner.problematic(self.rawtext, self.rawtext, msg) return [prb], [msg] return [index, target, reference], [] def build_uri(self) -> str: base_url = self.inliner.document.settings.pep_base_url ret = self.target.partition('#') if ret[1]: return base_url + 'pep-%04d/#%s' % (int(ret[0]), ret[2]) else: return base_url + 'pep-%04d/' % int(ret[0])
PEP
python
sqlalchemy__sqlalchemy
test/sql/test_computed.py
{ "start": 499, "end": 2888 }
class ____(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = "default" @combinations( ("no_persisted", "", "ignore"), ("persisted_none", "", None), ("persisted_true", " STORED", True), ("persisted_false", " VIRTUAL", False), id_="iaa", ) def test_column_computed(self, text, persisted): m = MetaData() kwargs = {"persisted": persisted} if persisted != "ignore" else {} t = Table( "t", m, Column("x", Integer), Column("y", Integer, Computed("x + 2", **kwargs)), ) self.assert_compile( CreateTable(t), "CREATE TABLE t (x INTEGER, y INTEGER GENERATED " "ALWAYS AS (x + 2)%s)" % text, ) def test_other_options(self): t = Table( "t", MetaData(), Column( "y", Integer, Computed("x + 2"), nullable=False, unique=True ), ) self.assert_compile( CreateTable(t), "CREATE TABLE t (" "y INTEGER GENERATED ALWAYS AS (x + 2) NOT NULL, UNIQUE (y))", ) def test_server_default_onupdate(self): text = ( "A generated column cannot specify a server_default or a " "server_onupdate argument" ) def fn(**kwargs): m = MetaData() Table( "t", m, Column("x", Integer), Column("y", Integer, Computed("x + 2"), **kwargs), ) assert_raises_message(ArgumentError, text, fn, server_default="42") assert_raises_message(ArgumentError, text, fn, server_onupdate="42") def test_to_metadata(self): comp1 = Computed("x + 2") m = MetaData() t = Table("t", m, Column("x", Integer), Column("y", Integer, comp1)) is_(comp1.column, t.c.y) is_(t.c.y.server_onupdate, comp1) is_(t.c.y.server_default, comp1) m2 = MetaData() t2 = t.to_metadata(m2) comp2 = t2.c.y.server_default is_not(comp1, comp2) is_(comp1.column, t.c.y) is_(t.c.y.server_onupdate, comp1) is_(t.c.y.server_default, comp1) is_(comp2.column, t2.c.y) is_(t2.c.y.server_onupdate, comp2) is_(t2.c.y.server_default, comp2)
DDLComputedTest
python
marshmallow-code__marshmallow
src/marshmallow/fields.py
{ "start": 53921, "end": 58793 }
class ____(Field[_MappingT]): """An abstract class for objects with key-value pairs. This class should not be used within schemas. :param keys: A field class or instance for dict keys. :param values: A field class or instance for dict values. :param kwargs: The same keyword arguments that :class:`Field` receives. .. note:: When the structure of nested data is not known, you may omit the `keys` and `values` arguments to prevent content validation. .. versionadded:: 3.0.0rc4 .. versionchanged:: 3.24.0 `Mapping <marshmallow.fields.Mapping>` should no longer be used as a field within a `Schema <marshmallow.Schema>`. Use `Dict <marshmallow.fields.Dict>` instead. """ mapping_type: type[_MappingT] #: Default error messages. default_error_messages = {"invalid": "Not a valid mapping type."} def __init__( self, keys: Field | type[Field] | None = None, values: Field | type[Field] | None = None, **kwargs: Unpack[_BaseFieldKwargs], ): super().__init__(**kwargs) if keys is None: self.key_field = None else: try: self.key_field = _resolve_field_instance(keys) except _FieldInstanceResolutionError as error: raise ValueError( '"keys" must be a subclass or instance of marshmallow.fields.Field.' ) from error if values is None: self.value_field = None else: try: self.value_field = _resolve_field_instance(values) except _FieldInstanceResolutionError as error: raise ValueError( '"values" must be a subclass or instance of ' "marshmallow.fields.Field." ) from error if isinstance(self.value_field, Nested): self.only = self.value_field.only self.exclude = self.value_field.exclude def _bind_to_schema(self, field_name, parent): super()._bind_to_schema(field_name, parent) if self.value_field: self.value_field = copy.deepcopy(self.value_field) self.value_field._bind_to_schema(field_name, self) if isinstance(self.value_field, Nested): self.value_field.only = self.only self.value_field.exclude = self.exclude if self.key_field: self.key_field = copy.deepcopy(self.key_field) self.key_field._bind_to_schema(field_name, self) def _serialize(self, value, attr, obj, **kwargs): if value is None: return None if not self.value_field and not self.key_field: return self.mapping_type(value) # Serialize keys if self.key_field is None: keys = {k: k for k in value} else: keys = { k: self.key_field._serialize(k, None, None, **kwargs) for k in value } # Serialize values result = self.mapping_type() if self.value_field is None: for k, v in value.items(): if k in keys: result[keys[k]] = v else: for k, v in value.items(): result[keys[k]] = self.value_field._serialize(v, None, None, **kwargs) return result def _deserialize(self, value, attr, data, **kwargs): if not isinstance(value, _Mapping): raise self.make_error("invalid") if not self.value_field and not self.key_field: return self.mapping_type(value) errors = collections.defaultdict(dict) # Deserialize keys if self.key_field is None: keys = {k: k for k in value} else: keys = {} for key in value: try: keys[key] = self.key_field.deserialize(key, **kwargs) except ValidationError as error: errors[key]["key"] = error.messages # Deserialize values result = self.mapping_type() if self.value_field is None: for k, v in value.items(): if k in keys: result[keys[k]] = v else: for key, val in value.items(): try: deser_val = self.value_field.deserialize(val, **kwargs) except ValidationError as error: errors[key]["value"] = error.messages if error.valid_data is not None and key in keys: result[keys[key]] = error.valid_data else: if key in keys: result[keys[key]] = deser_val if errors: raise ValidationError(errors, valid_data=result) return result
Mapping
python
Netflix__metaflow
metaflow/_vendor/click/_termui_impl.py
{ "start": 13345, "end": 20702 }
class ____(object): def __init__(self, editor=None, env=None, require_save=True, extension=".txt"): self.editor = editor self.env = env self.require_save = require_save self.extension = extension def get_editor(self): if self.editor is not None: return self.editor for key in "VISUAL", "EDITOR": rv = os.environ.get(key) if rv: return rv if WIN: return "notepad" for editor in "sensible-editor", "vim", "nano": if os.system("which {} >/dev/null 2>&1".format(editor)) == 0: return editor return "vi" def edit_file(self, filename): import subprocess editor = self.get_editor() if self.env: environ = os.environ.copy() environ.update(self.env) else: environ = None try: c = subprocess.Popen( '{} "{}"'.format(editor, filename), env=environ, shell=True, ) exit_code = c.wait() if exit_code != 0: raise ClickException("{}: Editing failed!".format(editor)) except OSError as e: raise ClickException("{}: Editing failed: {}".format(editor, e)) def edit(self, text): import tempfile text = text or "" if text and not text.endswith("\n"): text += "\n" fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension) try: if WIN: encoding = "utf-8-sig" text = text.replace("\n", "\r\n") else: encoding = "utf-8" text = text.encode(encoding) f = os.fdopen(fd, "wb") f.write(text) f.close() timestamp = os.path.getmtime(name) self.edit_file(name) if self.require_save and os.path.getmtime(name) == timestamp: return None f = open(name, "rb") try: rv = f.read() finally: f.close() return rv.decode("utf-8-sig").replace("\r\n", "\n") finally: os.unlink(name) def open_url(url, wait=False, locate=False): import subprocess def _unquote_file(url): try: import urllib except ImportError: import urllib if url.startswith("file://"): url = urllib.unquote(url[7:]) return url if sys.platform == "darwin": args = ["open"] if wait: args.append("-W") if locate: args.append("-R") args.append(_unquote_file(url)) null = open("/dev/null", "w") try: return subprocess.Popen(args, stderr=null).wait() finally: null.close() elif WIN: if locate: url = _unquote_file(url) args = 'explorer /select,"{}"'.format(_unquote_file(url.replace('"', ""))) else: args = 'start {} "" "{}"'.format( "/WAIT" if wait else "", url.replace('"', "") ) return os.system(args) elif CYGWIN: if locate: url = _unquote_file(url) args = 'cygstart "{}"'.format(os.path.dirname(url).replace('"', "")) else: args = 'cygstart {} "{}"'.format("-w" if wait else "", url.replace('"', "")) return os.system(args) try: if locate: url = os.path.dirname(_unquote_file(url)) or "." else: url = _unquote_file(url) c = subprocess.Popen(["xdg-open", url]) if wait: return c.wait() return 0 except OSError: if url.startswith(("http://", "https://")) and not locate and not wait: import webbrowser webbrowser.open(url) return 0 return 1 def _translate_ch_to_exc(ch): if ch == u"\x03": raise KeyboardInterrupt() if ch == u"\x04" and not WIN: # Unix-like, Ctrl+D raise EOFError() if ch == u"\x1a" and WIN: # Windows, Ctrl+Z raise EOFError() if WIN: import msvcrt @contextlib.contextmanager def raw_terminal(): yield def getchar(echo): # The function `getch` will return a bytes object corresponding to # the pressed character. Since Windows 10 build 1803, it will also # return \x00 when called a second time after pressing a regular key. # # `getwch` does not share this probably-bugged behavior. Moreover, it # returns a Unicode object by default, which is what we want. # # Either of these functions will return \x00 or \xe0 to indicate # a special key, and you need to call the same function again to get # the "rest" of the code. The fun part is that \u00e0 is # "latin small letter a with grave", so if you type that on a French # keyboard, you _also_ get a \xe0. # E.g., consider the Up arrow. This returns \xe0 and then \x48. The # resulting Unicode string reads as "a with grave" + "capital H". # This is indistinguishable from when the user actually types # "a with grave" and then "capital H". # # When \xe0 is returned, we assume it's part of a special-key sequence # and call `getwch` again, but that means that when the user types # the \u00e0 character, `getchar` doesn't return until a second # character is typed. # The alternative is returning immediately, but that would mess up # cross-platform handling of arrow keys and others that start with # \xe0. Another option is using `getch`, but then we can't reliably # read non-ASCII characters, because return values of `getch` are # limited to the current 8-bit codepage. # # Anyway, Click doesn't claim to do this Right(tm), and using `getwch` # is doing the right thing in more situations than with `getch`. if echo: func = msvcrt.getwche else: func = msvcrt.getwch rv = func() if rv in (u"\x00", u"\xe0"): # \x00 and \xe0 are control characters that indicate special key, # see above. rv += func() _translate_ch_to_exc(rv) return rv else: import tty import termios @contextlib.contextmanager def raw_terminal(): if not isatty(sys.stdin): f = open("/dev/tty") fd = f.fileno() else: fd = sys.stdin.fileno() f = None try: old_settings = termios.tcgetattr(fd) try: tty.setraw(fd) yield fd finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) sys.stdout.flush() if f is not None: f.close() except termios.error: pass def getchar(echo): with raw_terminal() as fd: ch = os.read(fd, 32) ch = ch.decode(get_best_encoding(sys.stdin), "replace") if echo and isatty(sys.stdout): sys.stdout.write(ch) _translate_ch_to_exc(ch) return ch
Editor
python
pennersr__django-allauth
allauth/socialaccount/providers/basecamp/provider.py
{ "start": 223, "end": 404 }
class ____(ProviderAccount): def get_avatar_url(self): return None def get_user_data(self): return self.account.extra_data.get("identity", {})
BasecampAccount
python
dask__dask
dask/dataframe/dask_expr/_reductions.py
{ "start": 32625, "end": 33036 }
class ____(Reduction): reduction_aggregate = sum @staticmethod def reduction_chunk(df): return df.size def _simplify_down(self): if is_dataframe_like(self.frame._meta) and len(self.frame.columns) > 1: return len(self.frame.columns) * Len(self.frame) else: return Len(self.frame) def _simplify_up(self, parent, dependents): return
Size
python
google__jax
jax/_src/compilation_cache_interface.py
{ "start": 673, "end": 865 }
class ____(util.StrictABC): _path: pathlib.Path @abc.abstractmethod def get(self, key: str): pass @abc.abstractmethod def put(self, key: str, value: bytes): pass
CacheInterface
python
getsentry__sentry
src/sentry/integrations/repository/notification_action.py
{ "start": 602, "end": 1515 }
class ____(BaseNotificationMessage): action: Action | None = None group: Group | None = None open_period_start: datetime | None = None @classmethod def from_model(cls, instance: NotificationMessage) -> NotificationActionNotificationMessage: return NotificationActionNotificationMessage( id=instance.id, error_code=instance.error_code, error_details=instance.error_details, message_identifier=instance.message_identifier, parent_notification_message_id=( instance.parent_notification_message.id if instance.parent_notification_message else None ), action=instance.action, group=instance.group, open_period_start=instance.open_period_start, date_added=instance.date_added, )
NotificationActionNotificationMessage
python
modin-project__modin
asv_bench/benchmarks/benchmarks.py
{ "start": 19593, "end": 19919 }
class ____(BaseTimeValueCounts): param_names = ["shape", "ngroups", "subset"] params = [ get_benchmark_shapes("TimeValueCountsFrame"), GROUPBY_NGROUPS, [2, 10], ] def time_value_counts(self, *args, **kwargs): execute(self.df.value_counts(subset=self.subset))
TimeValueCountsFrame
python
protocolbuffers__protobuf
python/google/protobuf/internal/text_format_test.py
{ "start": 1904, "end": 2901 }
class ____(unittest.TestCase): def ReadGolden(self, golden_filename): with test_util.GoldenFile(golden_filename) as f: return [golden_line.decode('utf-8') for golden_line in f] def CompareToGoldenFile(self, text, golden_filename): golden_lines = self.ReadGolden(golden_filename) self.assertMultiLineEqual(text, ''.join(golden_lines)) def CompareToGoldenText(self, text, golden_text): self.assertEqual(text, golden_text) def RemoveRedundantZeros(self, text): # Some platforms print 1e+5 as 1e+005. This is fine, but we need to remove # these zeros in order to match the golden file. text = text.replace('e+0','e+').replace('e+0','e+') \ .replace('e-0','e-').replace('e-0','e-') # Floating point fields are printed with .0 suffix even if they are # actually integer numbers. text = re.compile(r'\.0$', re.MULTILINE).sub('', text) return text @parameterized.parameters(unittest_pb2, unittest_proto3_arena_pb2)
TextFormatBase
python
huggingface__transformers
src/transformers/models/blt/modular_blt.py
{ "start": 30667, "end": 38522 }
class ____(BltPreTrainedModel): def __init__(self, config: BltConfig): super().__init__(config) self.gradient_checkpointing = False self.config = config self.local_encoder = BltLocalEncoder(config.encoder_config) self.global_transformer = BltGlobalTransformer(config.global_config) self.local_decoder = BltLocalDecoder(config.decoder_config) num_embeddings = config.encoder_hash_byte_group_nb_functions * len(config.encoder_hash_byte_group_size) total_vocab_size = config.encoder_hash_byte_group_vocab * num_embeddings self.encoder_hash_tok_embedding = nn.Embedding(total_vocab_size, config.encoder_config.hidden_size) if self.config.patch_in_forward: self.patcher = BltPatcher(config.patcher_config) self.patcher.eval() for param in self.patcher.parameters(): param.requires_grad = False else: self.patcher = None self.post_init() @check_model_inputs() def forward( self, input_ids: Optional[torch.LongTensor] = None, patch_lengths: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) # Extract input embeddings as early as possible if inputs_embeds is not None: encoder_embeds = inputs_embeds batch_size, sequence_length, _ = inputs_embeds.shape else: batch_size, sequence_length = input_ids.shape encoder_embeds = compute_hash_embeddings( input_ids, self.local_encoder, self.encoder_hash_tok_embedding, self.config.encoder_hash_byte_group_nb_functions, self.config.encoder_hash_byte_group_size, self.config.encoder_hash_byte_group_vocab, ) if patch_lengths is None: if self.config.patching_mode == "entropy" and self.patcher is not None: if input_ids is None: raise ValueError("input_ids is required for entropy-based patching") _, patch_lengths, _ = self.patcher( input_ids, patch_size=self.config.patch_size, threshold=self.config.patching_threshold, max_patch_length=self.config.max_patch_length, patching_batch_size=self.config.patching_batch_size, device=input_ids.device, ) else: device = input_ids.device if input_ids is not None else inputs_embeds.device dtype = input_ids.dtype if input_ids is not None else inputs_embeds.dtype patch_lengths = process_patch_lengths( torch.ones((batch_size, sequence_length + 1), dtype=dtype, device=device), self.config.max_patch_length, ) patch_ids = self._patch_ids_from_lengths(patch_lengths, sequence_length) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + encoder_embeds.shape[1], device=encoder_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=encoder_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values.self_attention_cache if past_key_values is not None else None, position_ids=position_ids, ) cross_attn_mask_enc = _prepare_patch_cross_attention_mask( patch_ids=patch_ids, num_patches=patch_lengths.shape[1], sequence_length=sequence_length, patches_as_queries=True, cross_attn_k=self.config.cross_attn_k, dtype=encoder_embeds.dtype, ) encoder_hidden_states, encoder_cross_states = self.local_encoder( input_ids=input_ids, inputs_embeds=encoder_embeds, attention_mask=causal_mask, position_ids=position_ids, encoder_attention_mask=cross_attn_mask_enc, num_patches=patch_lengths.shape[1], patch_ids=patch_ids, past_key_values=past_key_values.self_attention_cache if past_key_values is not None else None, **kwargs, ) encoder_cross_states = encoder_cross_states.view(batch_size, patch_lengths.shape[1], -1) global_cache_position = torch.arange(0, encoder_cross_states.shape[1], device=encoder_cross_states.device) global_position_ids = global_cache_position.unsqueeze(0) global_causal_mask = create_causal_mask( config=self.config, input_embeds=encoder_cross_states, attention_mask=None, cache_position=global_cache_position, past_key_values=None, position_ids=None, ) global_hidden_states = self.global_transformer( input_embeds=encoder_cross_states, attention_mask=global_causal_mask, position_ids=global_position_ids, **kwargs, ) decoder_patch_ids = self._patch_ids_from_lengths(patch_lengths[:, 1:], sequence_length) cross_attn_mask_dec = _prepare_patch_cross_attention_mask( patch_ids=decoder_patch_ids, num_patches=patch_lengths.shape[1], sequence_length=sequence_length, patches_as_queries=False, cross_attn_k=self.config.cross_attn_k, dtype=encoder_embeds.dtype, ) output = self.local_decoder( input_ids=input_ids, inputs_embeds=encoder_hidden_states, patch_embeds=global_hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values.cross_attention_cache if past_key_values is not None else None, cache_position=cache_position, encoder_attention_mask=cross_attn_mask_dec, **kwargs, ) return BaseModelOutputWithPast( last_hidden_state=output, past_key_values=past_key_values, ) def get_input_embeddings(self): return self.local_encoder.embed_tokens def set_input_embeddings(self, value): self.local_encoder.embed_tokens = value def _patch_ids_from_lengths(self, patch_lengths: torch.Tensor, seq_len: int) -> torch.Tensor: batch_size = patch_lengths.shape[0] patch_starts = torch.cat( [ torch.zeros(batch_size, 1, dtype=patch_lengths.dtype, device=patch_lengths.device), patch_lengths.cumsum(dim=-1)[:, :-1], ], dim=-1, ) token_positions = torch.arange(seq_len, device=patch_lengths.device) return (patch_starts.unsqueeze(1) <= token_positions.unsqueeze(0).unsqueeze(-1)).sum(dim=-1) - 1
BltModel
python
tensorflow__tensorflow
tensorflow/python/trackable/data_structures_test.py
{ "start": 12014, "end": 18367 }
class ____(test.TestCase): def testJSONSerialization(self): obj = autotrackable.AutoTrackable() obj.d = {"a": 2} json.dumps(obj.d, default=serialization.get_json_type) def testNoOverwrite(self): mapping = data_structures.Mapping() original = data_structures.List() mapping["a"] = original with self.assertRaises(ValueError): mapping["a"] = data_structures.List() self.assertIs(original, mapping["a"]) with self.assertRaises(AttributeError): del mapping["a"] # pylint: disable=unsupported-delete-operation mapping.update(b=data_structures.Mapping()) with self.assertRaises(ValueError): mapping.update({"b": data_structures.Mapping()}) def testNonStringKeys(self): mapping = data_structures.Mapping() with self.assertRaises(TypeError): mapping[1] = data_structures.List() def testHashing(self): has_mappings = set([data_structures.Mapping(), data_structures.Mapping()]) self.assertEqual(2, len(has_mappings)) self.assertNotIn(data_structures.Mapping(), has_mappings) # In contrast to Mapping, dict wrappers are not hashable a = autotrackable.AutoTrackable() a.d = {} self.assertEqual({}, a.d) self.assertFalse({} != a.d) # pylint: disable=g-explicit-bool-comparison self.assertNotEqual({1: 2}, a.d) with self.assertRaisesRegex(TypeError, "unhashable"): set([a.d]) def testListShallowCopy(self): root = autotrackable.AutoTrackable() orig_list = [[1.]] root.a = orig_list copied = copy.copy(root.a) self.assertAllEqual([[1.]], copied) self.assertIsNot(root.a, copied) self.assertIs(root.a[0], copied[0]) # Dirtiness should be inherited util.list_objects(root.a) orig_list.append(1.) with self.assertRaises(ValueError): util.list_objects(root.a) with self.assertRaises(ValueError): util.list_objects(copy.copy(root.a)) def testListDeepCopy(self): root = autotrackable.AutoTrackable() orig_list = [[1.]] root.a = orig_list copied = copy.deepcopy(root.a) self.assertAllEqual([[1.]], copied) self.assertIsNot(root.a, copied) self.assertIsNot(root.a[0], copied[0]) # Dirtiness should be inherited util.list_objects(root.a) orig_list.append(1.) with self.assertRaises(ValueError): util.list_objects(root.a) with self.assertRaises(ValueError): util.list_objects(copy.deepcopy(root.a)) def testDictShallowCopy(self): root = autotrackable.AutoTrackable() orig_dict = {"a": [1.]} root.a = orig_dict copied = copy.copy(root.a) self.assertAllEqual([1.], copied["a"]) self.assertIsNot(root.a, copied) self.assertIs(root.a["a"], copied["a"]) copied = root.a.copy() self.assertAllEqual([1.], copied["a"]) self.assertIsNot(root.a, copied) self.assertIs(root.a["a"], copied["a"]) # Dirtiness should be inherited util.list_objects(root.a) orig_dict["b"] = [] with self.assertRaises(ValueError): util.list_objects(root.a) with self.assertRaises(ValueError): util.list_objects(copy.copy(root.a)) def testDictDeepCopy(self): root = autotrackable.AutoTrackable() orig_dict = {"a": [1.]} root.a = orig_dict copied = copy.deepcopy(root.a) self.assertAllEqual([1.], copied["a"]) self.assertIsNot(root.a, copied) self.assertIsNot(root.a["a"], copied["a"]) # Dirtiness should be inherited util.list_objects(root.a) orig_dict["b"] = [] with self.assertRaises(ValueError): util.list_objects(root.a) with self.assertRaises(ValueError): util.list_objects(copy.deepcopy(root.a)) def testShallowCopyTrackable(self): original = autotrackable.AutoTrackable() original_sub = autotrackable.AutoTrackable() original.a = [[1.]] original.b = {"a": original_sub} shallow_copied = copy.copy(original) self.assertIs(original_sub, shallow_copied.b["a"]) self.assertIsNot(original, shallow_copied) self.assertEqual([[1.]], shallow_copied.a) shallow_deps = util.list_objects(shallow_copied) self.assertIn(shallow_copied.a, shallow_deps) self.assertIn(shallow_copied.b, shallow_deps) self.assertIn(shallow_copied.b["a"], shallow_deps) def testDeepCopyTrackable(self): original = autotrackable.AutoTrackable() original_sub = autotrackable.AutoTrackable() original.a = [[1.]] original.b = {"a": original_sub} self.assertIsInstance(original.b, dict) deep_copied = copy.deepcopy(original) self.assertIsInstance(deep_copied.b, dict) self.assertIsNot(original, deep_copied) self.assertIsNot(original_sub, deep_copied.b["a"]) self.assertEqual([[1.]], deep_copied.a) self.assertIsInstance(deep_copied.b["a"], autotrackable.AutoTrackable) deps = util.list_objects(deep_copied) self.assertIn(deep_copied.a, deps) self.assertIn(deep_copied.b, deps) self.assertIn(deep_copied.b["a"], deps) self.assertNotIn(original_sub, deps) def testConstructableFromSequence(self): result = data_structures._DictWrapper([(1, 2), (3, 4)]) self.assertIsInstance(result, dict) self.assertEqual({1: 2, 3: 4}, result) def testPickle(self): original = data_structures._DictWrapper(dict(a=1, b=2)) serialized = pickle.dumps(original) del original deserialized = pickle.loads(serialized) self.assertEqual(dict(a=1, b=2), deserialized) def testListAddOrder(self): self.assertEqual([1., 2.], data_structures.ListWrapper([1.]) + data_structures.ListWrapper([2.])) self.assertEqual([1., 2.], data_structures.ListWrapper([1.]) + [2.]) self.assertEqual([1., 2.], [1.] + data_structures.ListWrapper([2.])) def testSameStructure(self): d = {1: "a"} nest.assert_same_structure(d, data_structures._DictWrapper(d.copy())) def testFunctionCaching(self): @def_function.function def f(dict_input): return dict_input["x"] + constant_op.constant(1.) first_trace = f.get_concrete_function({"x": constant_op.constant(2.)}) second_trace = f.get_concrete_function( data_structures._DictWrapper({"x": constant_op.constant(3.)})) self.assertIs(first_trace, second_trace)
MappingTests
python
getsentry__sentry
tests/sentry/integrations/utils/test_sync.py
{ "start": 787, "end": 9000 }
class ____(TestCase): def setUp(self) -> None: self.example_integration = self.create_integration( organization=self.group.organization, external_id="123456", provider="example", oi_params={ "config": { "sync_comments": True, "sync_status_outbound": True, "sync_status_inbound": True, "sync_assignee_outbound": True, "sync_assignee_inbound": True, } }, ) self.test_user = self.create_user("test@example.com") self.create_member(organization=self.organization, user=self.test_user, teams=[self.team]) with assume_test_silo_mode_of(UserEmail): UserEmail.objects.filter(user=self.test_user).update(is_verified=True) assert UserEmail.objects.filter( user=self.test_user, email="test@example.com", is_verified=True ).exists() def assign_default_group_to_user(self, user: User, group: Group | None = None): group_to_update: Group = group or self.group GroupAssignee.objects.assign(group_to_update, serialize_rpc_user(user)) group_to_update.refresh_from_db() group_assignee = group_to_update.get_assignee() assert group_assignee is not None and group_assignee.id == user.id @mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") def test_no_affected_groups(self, mock_record_event: mock.MagicMock) -> None: self.assign_default_group_to_user(self.test_user) sync_group_assignee_inbound( integration=self.example_integration, email="foo@example.com", external_issue_key="this-does-not-exist", assign=True, ) mock_record_event.record_event(EventLifecycleOutcome.SUCCESS) @mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") def test_unassign(self, mock_record_event: mock.MagicMock) -> None: self.assign_default_group_to_user(self.test_user) external_issue = self.create_integration_external_issue( group=self.group, key="foo-123", integration=self.example_integration, ) sync_group_assignee_inbound( integration=self.example_integration, email="test@example.com", external_issue_key=external_issue.key, assign=False, ) assert self.group.get_assignee() is None mock_record_event.assert_called_with(EventLifecycleOutcome.SUCCESS, None, False, None) @mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") def test_assignment(self, mock_record_event: mock.MagicMock) -> None: assert self.group.get_assignee() is None external_issue = self.create_integration_external_issue( group=self.group, key="foo-123", integration=self.example_integration, ) sync_group_assignee_inbound( integration=self.example_integration, email="test@example.com", external_issue_key=external_issue.key, assign=True, ) updated_assignee = self.group.get_assignee() assert updated_assignee is not None assert updated_assignee.id == self.test_user.id assert updated_assignee.email == "test@example.com" mock_record_event.assert_called_with(EventLifecycleOutcome.SUCCESS, None, False, None) @mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") def test_assign_with_multiple_groups(self, mock_record_event: mock.MagicMock) -> None: # Create a couple new test unassigned test groups groups_to_assign: list[Group] = [] for _ in range(2): org = self.create_organization(owner=self.create_user()) team = self.create_team(organization=org) project = self.create_project(organization=org, teams=[team]) self.create_member(organization=org, user=self.test_user, teams=[team]) self.create_organization_integration( organization_id=org.id, integration=self.example_integration, config={ "sync_comments": True, "sync_status_outbound": True, "sync_status_inbound": True, "sync_assignee_outbound": True, "sync_assignee_inbound": True, }, ) groups_to_assign.append( self.create_group(project=project), ) external_issue_key = "foo-123" for group in groups_to_assign: assert group.get_assignee() is None self.create_integration_external_issue( group=group, key="foo-123", integration=self.example_integration, ) sync_group_assignee_inbound( integration=self.example_integration, email="test@example.com", external_issue_key=external_issue_key, assign=True, ) for group in groups_to_assign: assignee = group.get_assignee() assert assignee is not None assert isinstance(assignee, RpcUser) assert assignee.id == self.test_user.id assert assignee.email == "test@example.com" mock_record_event.assert_called_with(EventLifecycleOutcome.SUCCESS, None, False, None) @mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_halt") def test_assign_with_no_user_found(self, mock_record_halt: mock.MagicMock) -> None: assert self.group.get_assignee() is None external_issue = self.create_integration_external_issue( group=self.group, key="foo-123", integration=self.example_integration, ) sync_group_assignee_inbound( integration=self.example_integration, email="oopsnotfound@example.com", external_issue_key=external_issue.key, assign=True, ) updated_assignee = self.group.get_assignee() assert updated_assignee is None mock_record_halt.assert_called_with( "inbound-assignee-not-found", extra={ "integration_id": self.example_integration.id, "email": "oopsnotfound@example.com", "issue_key": external_issue.key, "method": AssigneeInboundSyncMethod.EMAIL.value, "assign": True, }, ) @mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_failure") @mock.patch("sentry.models.groupassignee.GroupAssigneeManager.assign") def test_assignment_fails( self, mock_group_assign: mock.MagicMock, mock_record_failure: mock.MagicMock ) -> None: def raise_exception(*args, **kwargs): raise Exception("oops, something went wrong") assert self.group.get_assignee() is None mock_group_assign.side_effect = raise_exception external_issue = self.create_integration_external_issue( group=self.group, key="foo-123", integration=self.example_integration, ) with pytest.raises(Exception) as exc: sync_group_assignee_inbound( integration=self.example_integration, email="test@example.com", external_issue_key=external_issue.key, assign=True, ) assert exc.match("oops, something went wrong") updated_assignee = self.group.get_assignee() assert updated_assignee is None mock_record_failure.assert_called_once_with(mock.ANY, create_issue=True) exception_param = mock_record_failure.call_args_list[0].args[0] assert isinstance(exception_param, Exception) assert exception_param.args[0] == "oops, something went wrong" @region_silo_test @with_feature("organizations:integrations-github-project-management")
TestSyncAssigneeInbound
python
keras-team__keras
keras/src/saving/serialization_lib_test.py
{ "start": 286, "end": 536 }
class ____(keras.layers.Layer): def __init__(self, factor): super().__init__() self.factor = factor def call(self, x): return x * self.factor def get_config(self): return {"factor": self.factor}
CustomLayer
python
huggingface__transformers
src/transformers/models/mobilevit/modeling_mobilevit.py
{ "start": 8797, "end": 9258 }
class ____(nn.Module): def __init__(self, config: MobileViTConfig, hidden_size: int) -> None: super().__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
MobileViTSelfOutput
python
astropy__astropy
astropy/visualization/wcsaxes/wcsapi.py
{ "start": 12125, "end": 13836 }
class ____(CurvedTransform): """ WCS transformation from world to pixel coordinates. """ has_inverse = True frame_in = None units_in = None def __init__(self, wcs, invert_xy=False): super().__init__() if wcs.pixel_n_dim > 2: raise ValueError("Only pixel_n_dim =< 2 is supported") self.wcs = wcs self.invert_xy = invert_xy self.frame_in = wcsapi_to_celestial_frame(wcs) self.units_in = wcs.world_axis_units def __hash__(self): return hash((type(self), self.wcs, self.invert_xy)) def __eq__(self, other): return ( isinstance(other, type(self)) and self.wcs is other.wcs and self.invert_xy == other.invert_xy ) @property def input_dims(self): return self.wcs.world_n_dim def transform(self, world): # Convert to a list of arrays world = list(world.T) if len(world) != 2: raise ValueError(f"Expected 2 world coordinates, got {len(world)}") if self.wcs.world_n_dim == 1: world_non_wcs = world[1] world = world[0:1] if len(world[0]) == 0: pixel = np.zeros((0, 2)) else: pixel = self.wcs.world_to_pixel_values(*world) if self.invert_xy: pixel = pixel[::-1] if self.wcs.world_n_dim == 1: pixel = [pixel, world_non_wcs] return np.array(pixel).T transform_non_affine = transform def inverted(self): """ Return the inverse of the transform. """ return WCSPixel2WorldTransform(self.wcs, invert_xy=self.invert_xy)
WCSWorld2PixelTransform
python
langchain-ai__langchain
libs/partners/anthropic/tests/unit_tests/test_output_parsers.py
{ "start": 615, "end": 654 }
class ____(BaseModel): bar: int
_Foo1
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 769037, "end": 770012 }
class ____(sgqlc.types.Type, Node): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("database_id", "name", "protection_rules") database_id = sgqlc.types.Field(Int, graphql_name="databaseId") name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name") protection_rules = sgqlc.types.Field( sgqlc.types.non_null(DeploymentProtectionRuleConnection), graphql_name="protectionRules", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), )
Environment
python
pandas-dev__pandas
asv_bench/benchmarks/array.py
{ "start": 3210, "end": 4282 }
class ____: params = [ [ "boolean[pyarrow]", "float64[pyarrow]", "int64[pyarrow]", "string[pyarrow]", "timestamp[ns][pyarrow]", ], [False, True], ] param_names = ["dtype", "hasna"] def setup(self, dtype, hasna): N = 100_000 if dtype == "boolean[pyarrow]": data = np.random.choice([True, False], N, replace=True) elif dtype == "float64[pyarrow]": data = np.random.randn(N) elif dtype == "int64[pyarrow]": data = np.arange(N) elif dtype == "string[pyarrow]": data = np.array([str(i) for i in range(N)], dtype=object) elif dtype == "timestamp[ns][pyarrow]": data = pd.date_range("2000-01-01", freq="s", periods=N) else: raise NotImplementedError arr = pd.array(data, dtype=dtype) if hasna: arr[::2] = pd.NA self.arr = arr def time_to_numpy(self, dtype, hasna): self.arr.to_numpy()
ArrowExtensionArray
python
allegroai__clearml
clearml/utilities/pyhocon/config_tree.py
{ "start": 16789, "end": 16881 }
class ____(object): def __init__(self, tokens): self.tokens = tokens
ConfigInclude
python
django__django
tests/syndication_tests/feeds.py
{ "start": 7398, "end": 7730 }
class ____(TestAtomFeed): """ A feed to test that Atom feeds work with a single enclosure. """ def item_enclosure_url(self, item): return "http://example.com" def item_enclosure_size(self, item): return 0 def item_mime_type(self, item): return "image/png"
TestSingleEnclosureAtomFeed
python
great-expectations__great_expectations
tests/expectations/core/test_expect_column_values_to_match_regex_parameterized.py
{ "start": 182, "end": 1404 }
class ____(gxe.ExpectColumnValuesToMatchRegex): regex: str = "^\\d+$" @pytest.mark.big def test_expect_column_values_as_string_to_be_positive_integers_pass( empty_data_context: AbstractDataContext, ): df = pd.DataFrame({"a": ["1", "2", "3", "4", "5"]}) data_asset = empty_data_context.data_sources.pandas_default.add_dataframe_asset("my_dataframe") batch = data_asset.add_batch_definition_whole_dataframe("my_batch_definition").get_batch( batch_parameters={"dataframe": df} ) result = batch.validate(ExpectColumnValuesAsStringToBePositiveInteger(column="a")) assert result.success @pytest.mark.big def test_expect_column_values_as_string_to_be_positive_integers_fail( empty_data_context: AbstractDataContext, ): df = pd.DataFrame({"a": ["1", "2", "3", "4", "a"]}) data_asset = empty_data_context.data_sources.pandas_default.add_dataframe_asset("my_dataframe") batch = data_asset.add_batch_definition_whole_dataframe("my_batch_definition").get_batch( batch_parameters={"dataframe": df} ) result = batch.validate(ExpectColumnValuesAsStringToBePositiveInteger(column="a")) assert not result.success
ExpectColumnValuesAsStringToBePositiveInteger
python
kamyu104__LeetCode-Solutions
Python/find-consecutive-integers-from-a-data-stream.py
{ "start": 37, "end": 476 }
class ____(object): def __init__(self, value, k): """ :type value: int :type k: int """ self.__value = value self.__k = k self.__cnt = 0 def consec(self, num): """ :type num: int :rtype: bool """ if num == self.__value: self.__cnt += 1 else: self.__cnt = 0 return self.__cnt >= self.__k
DataStream
python
tensorflow__tensorflow
tensorflow/python/training/moving_averages_test.py
{ "start": 1406, "end": 6945 }
class ____(test.TestCase): @test_util.run_in_graph_and_eager_modes def testAssignMovingAverageWithoutZeroDebias(self): var = variables.Variable([10.0, 11.0]) val = constant_op.constant([1.0, 2.0], dtypes.float32) decay = 0.25 if context.executing_eagerly(): self.assertAllClose([10.0, 11.0], self.evaluate(var)) assign = moving_averages.assign_moving_average( var, val, decay, zero_debias=False) self.assertAllClose( [10.0 * 0.25 + 1.0 * (1.0 - 0.25), 11.0 * 0.25 + 2.0 * (1.0 - 0.25)], self.evaluate(var)) else: assign = moving_averages.assign_moving_average( var, val, decay, zero_debias=False) self.evaluate(variables.global_variables_initializer()) self.assertAllClose([10.0, 11.0], self.evaluate(var)) assign.op.run() self.assertAllClose( [10.0 * 0.25 + 1.0 * (1.0 - 0.25), 11.0 * 0.25 + 2.0 * (1.0 - 0.25)], self.evaluate(var)) @test_util.run_in_graph_and_eager_modes def testAssignMovingAverage(self): var = variables.Variable([0.0, 0.0]) val = constant_op.constant([1.0, 2.0], dtypes.float32) decay = 0.25 if context.executing_eagerly(): self.assertAllClose([0.0, 0.0], self.evaluate(var)) assign = moving_averages.assign_moving_average(var, val, decay) self.assertAllClose( [1.0 * (1.0 - 0.25) / (1 - 0.25), 2.0 * (1.0 - 0.25) / (1 - 0.25)], self.evaluate(var)) else: assign = moving_averages.assign_moving_average(var, val, decay) self.evaluate(variables.global_variables_initializer()) self.assertAllClose([0.0, 0.0], self.evaluate(var)) assign.op.run() self.assertAllClose( [1.0 * (1.0 - 0.25) / (1 - 0.25), 2.0 * (1.0 - 0.25) / (1 - 0.25)], self.evaluate(var)) @test_util.deprecated_graph_mode_only def testAssignMovingAverageNewNamingMultipleCalls(self): with variable_scope.variable_scope("scope1") as vs1: with variable_scope.variable_scope("scope2"): var = variables.Variable(1.0, name="Var") moving_averages.assign_moving_average(var, 0.0, 0.99) moving_averages.assign_moving_average(var, 0.0, 0.99) expected_names = ["scope1/scope2/Var:0", "scope1/scope2/scope1/scope2/Var/biased:0", "scope1/scope2/scope1/scope2/Var/local_step:0", "scope1/scope2/scope1/scope2/Var/biased_1:0", "scope1/scope2/scope1/scope2/Var/local_step_1:0"] actual_names = [v.name for v in vs1.global_variables()] self.assertSetEqual(set(expected_names), set(actual_names)) @test_util.deprecated_graph_mode_only def testAssignMovingAverageNewNamingMultipleCallsWithReuse(self): with variable_scope.variable_scope("scope1") as vs1: var = variable_scope.get_variable("Var", shape=[]) moving_averages.assign_moving_average(var, 0.0, 0.99) moving_averages.assign_moving_average(var, 0.0, 0.99) with variable_scope.variable_scope(vs1, reuse=True): var = variable_scope.get_variable("Var", shape=[]) moving_averages.assign_moving_average(var, 0.0, 0.99) moving_averages.assign_moving_average(var, 0.0, 0.99) @test_util.deprecated_graph_mode_only def testWeightedMovingAverage(self): with self.cached_session() as sess: decay = 0.5 weight = array_ops.placeholder(dtypes.float32, []) val = array_ops.placeholder(dtypes.float32, []) wma = moving_averages.weighted_moving_average(val, decay, weight) self.evaluate(variables.global_variables_initializer()) # Get the first weighted moving average. val_1 = 3.0 weight_1 = 4.0 wma_array = sess.run(wma, feed_dict={val: val_1, weight: weight_1}) numerator_1 = val_1 * weight_1 * (1.0 - decay) denominator_1 = weight_1 * (1.0 - decay) self.assertAllClose(numerator_1 / denominator_1, wma_array) # Get the second weighted moving average. val_2 = 11.0 weight_2 = 22.0 wma_array = sess.run(wma, feed_dict={val: val_2, weight: weight_2}) numerator_2 = numerator_1 * decay + val_2 * weight_2 * (1.0 - decay) denominator_2 = denominator_1 * decay + weight_2 * (1.0 - decay) self.assertAllClose(numerator_2 / denominator_2, wma_array) @test_util.deprecated_graph_mode_only def testWeightedMovingAverageBfloat16(self): with self.cached_session() as sess: decay = 0.5 weight = array_ops.placeholder(dtypes.bfloat16, []) val = array_ops.placeholder(dtypes.bfloat16, []) wma = moving_averages.weighted_moving_average(val, decay, weight) self.evaluate(variables.global_variables_initializer()) # Get the first weighted moving average. val_1 = 3.0 weight_1 = 4.0 wma_array = sess.run(wma, feed_dict={val: val_1, weight: weight_1}) numerator_1 = val_1 * weight_1 * (1.0 - decay) denominator_1 = weight_1 * (1.0 - decay) self.assertAllClose(numerator_1 / denominator_1, wma_array) # Get the second weighted moving average. val_2 = 11.0 weight_2 = 22.0 wma_array = sess.run(wma, feed_dict={val: val_2, weight: weight_2}) numerator_2 = numerator_1 * decay + val_2 * weight_2 * (1.0 - decay) denominator_2 = denominator_1 * decay + weight_2 * (1.0 - decay) self.assertAllClose( dtypes._np_bfloat16(numerator_2 / denominator_2), wma_array) def _Repeat(value, dim): if dim == 1: return value return [value] * dim
MovingAveragesTest
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 36248, "end": 36439 }
class ____(sgqlc.types.Enum): """ See source code for more info. """ __schema__ = graphql_schema __choices__ = ("DIRECT_MEMBER", "OWNER", "UNAFFILIATED")
RoleInOrganization
python
tensorflow__tensorflow
tensorflow/tools/proto_splitter/split_graph_def.py
{ "start": 9249, "end": 9922 }
class ____(SplitBasedOnSize): """Splits a message into a separaet chunk if its over a certain size.""" __slots__ = ("size_check",) def __init__(self, proto, proto_size, size_check=_GREEDY_SPLIT, **kwargs): """Initializer.""" self.size_check = size_check super().__init__(proto, proto_size, **kwargs) def build_chunks(self) -> int: """Creates a chunk for the entire proto and returns the original size.""" if self.size_check(self.proto_size): new_proto = type(self._proto)() new_proto.MergeFrom(self._proto) self._proto.Clear() self.add_chunk(new_proto, []) return self.proto_size return 0
LargeMessageSplitter
python
scrapy__scrapy
tests/spiders.py
{ "start": 8279, "end": 8938 }
class ____(FollowAllSpider): def __init__(self, max_items=10, max_requests=10, *args, **kwargs): super().__init__(*args, **kwargs) self.max_items = max_items self.max_requests = max_requests def parse(self, response): self.items_scraped = 0 self.pages_crawled = 1 # account for the start url for request in super().parse(response): if self.pages_crawled < self.max_requests: yield request self.pages_crawled += 1 if self.items_scraped < self.max_items: yield Item() self.items_scraped += 1
MaxItemsAndRequestsSpider
python
python-pillow__Pillow
src/PIL/PngImagePlugin.py
{ "start": 37748, "end": 51138 }
class ____(NamedTuple): im: Image.Image bbox: tuple[int, int, int, int] | None encoderinfo: dict[str, Any] def _write_multiple_frames( im: Image.Image, fp: IO[bytes], chunk: Callable[..., None], mode: str, rawmode: str, default_image: Image.Image | None, append_images: list[Image.Image], ) -> Image.Image | None: duration = im.encoderinfo.get("duration") loop = im.encoderinfo.get("loop", im.info.get("loop", 0)) disposal = im.encoderinfo.get("disposal", im.info.get("disposal", Disposal.OP_NONE)) blend = im.encoderinfo.get("blend", im.info.get("blend", Blend.OP_SOURCE)) if default_image: chain = itertools.chain(append_images) else: chain = itertools.chain([im], append_images) im_frames: list[_Frame] = [] frame_count = 0 for im_seq in chain: for im_frame in ImageSequence.Iterator(im_seq): if im_frame.mode == mode: im_frame = im_frame.copy() else: im_frame = im_frame.convert(mode) encoderinfo = im.encoderinfo.copy() if isinstance(duration, (list, tuple)): encoderinfo["duration"] = duration[frame_count] elif duration is None and "duration" in im_frame.info: encoderinfo["duration"] = im_frame.info["duration"] if isinstance(disposal, (list, tuple)): encoderinfo["disposal"] = disposal[frame_count] if isinstance(blend, (list, tuple)): encoderinfo["blend"] = blend[frame_count] frame_count += 1 if im_frames: previous = im_frames[-1] prev_disposal = previous.encoderinfo.get("disposal") prev_blend = previous.encoderinfo.get("blend") if prev_disposal == Disposal.OP_PREVIOUS and len(im_frames) < 2: prev_disposal = Disposal.OP_BACKGROUND if prev_disposal == Disposal.OP_BACKGROUND: base_im = previous.im.copy() dispose = Image.core.fill("RGBA", im.size, (0, 0, 0, 0)) bbox = previous.bbox if bbox: dispose = dispose.crop(bbox) else: bbox = (0, 0) + im.size base_im.paste(dispose, bbox) elif prev_disposal == Disposal.OP_PREVIOUS: base_im = im_frames[-2].im else: base_im = previous.im delta = ImageChops.subtract_modulo( im_frame.convert("RGBA"), base_im.convert("RGBA") ) bbox = delta.getbbox(alpha_only=False) if ( not bbox and prev_disposal == encoderinfo.get("disposal") and prev_blend == encoderinfo.get("blend") and "duration" in encoderinfo ): previous.encoderinfo["duration"] += encoderinfo["duration"] continue else: bbox = None im_frames.append(_Frame(im_frame, bbox, encoderinfo)) if len(im_frames) == 1 and not default_image: return im_frames[0].im # animation control chunk( fp, b"acTL", o32(len(im_frames)), # 0: num_frames o32(loop), # 4: num_plays ) # default image IDAT (if it exists) if default_image: if im.mode != mode: im = im.convert(mode) ImageFile._save( im, cast(IO[bytes], _idat(fp, chunk)), [ImageFile._Tile("zip", (0, 0) + im.size, 0, rawmode)], ) seq_num = 0 for frame, frame_data in enumerate(im_frames): im_frame = frame_data.im if not frame_data.bbox: bbox = (0, 0) + im_frame.size else: bbox = frame_data.bbox im_frame = im_frame.crop(bbox) size = im_frame.size encoderinfo = frame_data.encoderinfo frame_duration = int(round(encoderinfo.get("duration", 0))) frame_disposal = encoderinfo.get("disposal", disposal) frame_blend = encoderinfo.get("blend", blend) # frame control chunk( fp, b"fcTL", o32(seq_num), # sequence_number o32(size[0]), # width o32(size[1]), # height o32(bbox[0]), # x_offset o32(bbox[1]), # y_offset o16(frame_duration), # delay_numerator o16(1000), # delay_denominator o8(frame_disposal), # dispose_op o8(frame_blend), # blend_op ) seq_num += 1 # frame data if frame == 0 and not default_image: # first frame must be in IDAT chunks for backwards compatibility ImageFile._save( im_frame, cast(IO[bytes], _idat(fp, chunk)), [ImageFile._Tile("zip", (0, 0) + im_frame.size, 0, rawmode)], ) else: fdat_chunks = _fdat(fp, chunk, seq_num) ImageFile._save( im_frame, cast(IO[bytes], fdat_chunks), [ImageFile._Tile("zip", (0, 0) + im_frame.size, 0, rawmode)], ) seq_num = fdat_chunks.seq_num return None def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: _save(im, fp, filename, save_all=True) def _save( im: Image.Image, fp: IO[bytes], filename: str | bytes, chunk: Callable[..., None] = putchunk, save_all: bool = False, ) -> None: # save an image to disk (called by the save method) if save_all: default_image = im.encoderinfo.get( "default_image", im.info.get("default_image") ) modes = set() sizes = set() append_images = im.encoderinfo.get("append_images", []) for im_seq in itertools.chain([im], append_images): for im_frame in ImageSequence.Iterator(im_seq): modes.add(im_frame.mode) sizes.add(im_frame.size) for mode in ("RGBA", "RGB", "P"): if mode in modes: break else: mode = modes.pop() size = tuple(max(frame_size[i] for frame_size in sizes) for i in range(2)) else: size = im.size mode = im.mode outmode = mode if mode == "P": # # attempt to minimize storage requirements for palette images if "bits" in im.encoderinfo: # number of bits specified by user colors = min(1 << im.encoderinfo["bits"], 256) else: # check palette contents if im.palette: colors = max(min(len(im.palette.getdata()[1]) // 3, 256), 1) else: colors = 256 if colors <= 16: if colors <= 2: bits = 1 elif colors <= 4: bits = 2 else: bits = 4 outmode += f";{bits}" # encoder options im.encoderconfig = ( im.encoderinfo.get("optimize", False), im.encoderinfo.get("compress_level", -1), im.encoderinfo.get("compress_type", -1), im.encoderinfo.get("dictionary", b""), ) # get the corresponding PNG mode try: rawmode, bit_depth, color_type = _OUTMODES[outmode] except KeyError as e: msg = f"cannot write mode {mode} as PNG" raise OSError(msg) from e if outmode == "I": deprecate("Saving I mode images as PNG", 13, stacklevel=4) # # write minimal PNG file fp.write(_MAGIC) chunk( fp, b"IHDR", o32(size[0]), # 0: size o32(size[1]), bit_depth, color_type, b"\0", # 10: compression b"\0", # 11: filter category b"\0", # 12: interlace flag ) chunks = [b"cHRM", b"cICP", b"gAMA", b"sBIT", b"sRGB", b"tIME"] icc = im.encoderinfo.get("icc_profile", im.info.get("icc_profile")) if icc: # ICC profile # according to PNG spec, the iCCP chunk contains: # Profile name 1-79 bytes (character string) # Null separator 1 byte (null character) # Compression method 1 byte (0) # Compressed profile n bytes (zlib with deflate compression) name = b"ICC Profile" data = name + b"\0\0" + zlib.compress(icc) chunk(fp, b"iCCP", data) # You must either have sRGB or iCCP. # Disallow sRGB chunks when an iCCP-chunk has been emitted. chunks.remove(b"sRGB") info = im.encoderinfo.get("pnginfo") if info: chunks_multiple_allowed = [b"sPLT", b"iTXt", b"tEXt", b"zTXt"] for info_chunk in info.chunks: cid, data = info_chunk[:2] if cid in chunks: chunks.remove(cid) chunk(fp, cid, data) elif cid in chunks_multiple_allowed: chunk(fp, cid, data) elif cid[1:2].islower(): # Private chunk after_idat = len(info_chunk) == 3 and info_chunk[2] if not after_idat: chunk(fp, cid, data) if im.mode == "P": palette_byte_number = colors * 3 palette_bytes = im.im.getpalette("RGB")[:palette_byte_number] while len(palette_bytes) < palette_byte_number: palette_bytes += b"\0" chunk(fp, b"PLTE", palette_bytes) transparency = im.encoderinfo.get("transparency", im.info.get("transparency", None)) if transparency or transparency == 0: if im.mode == "P": # limit to actual palette size alpha_bytes = colors if isinstance(transparency, bytes): chunk(fp, b"tRNS", transparency[:alpha_bytes]) else: transparency = max(0, min(255, transparency)) alpha = b"\xff" * transparency + b"\0" chunk(fp, b"tRNS", alpha[:alpha_bytes]) elif im.mode in ("1", "L", "I", "I;16"): transparency = max(0, min(65535, transparency)) chunk(fp, b"tRNS", o16(transparency)) elif im.mode == "RGB": red, green, blue = transparency chunk(fp, b"tRNS", o16(red) + o16(green) + o16(blue)) else: if "transparency" in im.encoderinfo: # don't bother with transparency if it's an RGBA # and it's in the info dict. It's probably just stale. msg = "cannot use transparency for this mode" raise OSError(msg) else: if im.mode == "P" and im.im.getpalettemode() == "RGBA": alpha = im.im.getpalette("RGBA", "A") alpha_bytes = colors chunk(fp, b"tRNS", alpha[:alpha_bytes]) dpi = im.encoderinfo.get("dpi") if dpi: chunk( fp, b"pHYs", o32(int(dpi[0] / 0.0254 + 0.5)), o32(int(dpi[1] / 0.0254 + 0.5)), b"\x01", ) if info: chunks = [b"bKGD", b"hIST"] for info_chunk in info.chunks: cid, data = info_chunk[:2] if cid in chunks: chunks.remove(cid) chunk(fp, cid, data) exif = im.encoderinfo.get("exif") if exif: if isinstance(exif, Image.Exif): exif = exif.tobytes(8) if exif.startswith(b"Exif\x00\x00"): exif = exif[6:] chunk(fp, b"eXIf", exif) single_im: Image.Image | None = im if save_all: single_im = _write_multiple_frames( im, fp, chunk, mode, rawmode, default_image, append_images ) if single_im: ImageFile._save( single_im, cast(IO[bytes], _idat(fp, chunk)), [ImageFile._Tile("zip", (0, 0) + single_im.size, 0, rawmode)], ) if info: for info_chunk in info.chunks: cid, data = info_chunk[:2] if cid[1:2].islower(): # Private chunk after_idat = len(info_chunk) == 3 and info_chunk[2] if after_idat: chunk(fp, cid, data) chunk(fp, b"IEND", b"") if hasattr(fp, "flush"): fp.flush() # -------------------------------------------------------------------- # PNG chunk converter def getchunks(im: Image.Image, **params: Any) -> list[tuple[bytes, bytes, bytes]]: """Return a list of PNG chunks representing this image.""" from io import BytesIO chunks = [] def append(fp: IO[bytes], cid: bytes, *data: bytes) -> None: byte_data = b"".join(data) crc = o32(_crc32(byte_data, _crc32(cid))) chunks.append((cid, byte_data, crc)) fp = BytesIO() try: im.encoderinfo = params _save(im, fp, "", append) finally: del im.encoderinfo return chunks # -------------------------------------------------------------------- # Registry Image.register_open(PngImageFile.format, PngImageFile, _accept) Image.register_save(PngImageFile.format, _save) Image.register_save_all(PngImageFile.format, _save_all) Image.register_extensions(PngImageFile.format, [".png", ".apng"]) Image.register_mime(PngImageFile.format, "image/png")
_Frame
python
walkccc__LeetCode
solutions/1817. Finding the Users Active Minutes/1817.py
{ "start": 0, "end": 340 }
class ____: def findingUsersActiveMinutes( self, logs: list[list[int]], k: int, ) -> list[int]: idToTimes = collections.defaultdict(set) for id, time in logs: idToTimes[id].add(time) c = collections.Counter(len(times) for times in idToTimes.values()) return [c[i] for i in range(1, k + 1)]
Solution
python
numpy__numpy
numpy/_pytesttester.py
{ "start": 1429, "end": 6328 }
class ____: """ Pytest test runner. A test function is typically added to a package's __init__.py like so:: from numpy._pytesttester import PytestTester test = PytestTester(__name__).test del PytestTester Calling this test function finds and runs all tests associated with the module and all its sub-modules. Attributes ---------- module_name : str Full path to the package to test. Parameters ---------- module_name : module name The name of the module to test. Notes ----- Unlike the previous ``nose``-based implementation, this class is not publicly exposed as it performs some ``numpy``-specific warning suppression. """ def __init__(self, module_name): self.module_name = module_name self.__module__ = module_name def __call__(self, label='fast', verbose=1, extra_argv=None, doctests=False, coverage=False, durations=-1, tests=None): """ Run tests for module using pytest. Parameters ---------- label : {'fast', 'full'}, optional Identifies the tests to run. When set to 'fast', tests decorated with `pytest.mark.slow` are skipped, when 'full', the slow marker is ignored. verbose : int, optional Verbosity value for test outputs, in the range 1-3. Default is 1. extra_argv : list, optional List with any extra arguments to pass to pytests. doctests : bool, optional .. note:: Not supported coverage : bool, optional If True, report coverage of NumPy code. Default is False. Requires installation of (pip) pytest-cov. durations : int, optional If < 0, do nothing, If 0, report time of all tests, if > 0, report the time of the slowest `timer` tests. Default is -1. tests : test or list of tests Tests to be executed with pytest '--pyargs' Returns ------- result : bool Return True on success, false otherwise. Notes ----- Each NumPy module exposes `test` in its namespace to run all tests for it. For example, to run all tests for numpy.lib: >>> np.lib.test() #doctest: +SKIP Examples -------- >>> result = np.lib.test() #doctest: +SKIP ... 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds >>> result True """ import warnings import pytest module = sys.modules[self.module_name] module_path = os.path.abspath(module.__path__[0]) # setup the pytest arguments pytest_args = ["-l"] # offset verbosity. The "-q" cancels a "-v". pytest_args += ["-q"] if sys.version_info < (3, 12): with warnings.catch_warnings(): warnings.simplefilter("always") # Filter out distutils cpu warnings (could be localized to # distutils tests). ASV has problems with top level import, # so fetch module for suppression here. from numpy.distutils import cpuinfo # noqa: F401 # Filter out annoying import messages. Want these in both develop and # release mode. pytest_args += [ "-W ignore:Not importing directory", "-W ignore:numpy.dtype size changed", "-W ignore:numpy.ufunc size changed", "-W ignore::UserWarning:cpuinfo", ] # When testing matrices, ignore their PendingDeprecationWarnings pytest_args += [ "-W ignore:the matrix subclass is not", "-W ignore:Importing from numpy.matlib is", ] if doctests: pytest_args += ["--doctest-modules"] if extra_argv: pytest_args += list(extra_argv) if verbose > 1: pytest_args += ["-" + "v" * (verbose - 1)] if coverage: pytest_args += ["--cov=" + module_path] if label == "fast": # not importing at the top level to avoid circular import of module from numpy.testing import IS_PYPY if IS_PYPY: pytest_args += ["-m", "not slow and not slow_pypy"] else: pytest_args += ["-m", "not slow"] elif label != "full": pytest_args += ["-m", label] if durations >= 0: pytest_args += [f"--durations={durations}"] if tests is None: tests = [self.module_name] pytest_args += ["--pyargs"] + list(tests) # run tests. _show_numpy_info() try: code = pytest.main(pytest_args) except SystemExit as exc: code = exc.code return code == 0
PytestTester
python
tensorflow__tensorflow
tensorflow/python/debug/lib/source_utils_test.py
{ "start": 5391, "end": 13485 }
class ____(test_util.TensorFlowTestCase): def createAndRunGraphHelper(self): """Create and run a TensorFlow Graph to generate debug dumps. This is intentionally done in separate method, to make it easier to test the stack-top mode of source annotation. """ self.dump_root = self.get_temp_dir() self.curr_file_path = os.path.abspath( tf_inspect.getfile(tf_inspect.currentframe())) # Run a simple TF graph to generate some debug dumps that can be used in # source annotation. with session.Session() as sess: self.u_init = constant_op.constant( np.array([[5.0, 3.0], [-1.0, 0.0]]), shape=[2, 2], name="u_init") self.u_init_line_number = line_number_above() self.u = variables.Variable(self.u_init, name="u") self.u_line_number = line_number_above() self.v_init = constant_op.constant( np.array([[2.0], [-1.0]]), shape=[2, 1], name="v_init") self.v_init_line_number = line_number_above() self.v = variables.Variable(self.v_init, name="v") self.v_line_number = line_number_above() self.w = math_ops.matmul(self.u, self.v, name="w") self.w_line_number = line_number_above() self.evaluate(self.u.initializer) self.evaluate(self.v.initializer) run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_urls=["file://%s" % self.dump_root]) run_metadata = config_pb2.RunMetadata() sess.run(self.w, options=run_options, run_metadata=run_metadata) self.dump = debug_data.DebugDumpDir( self.dump_root, partition_graphs=run_metadata.partition_graphs) self.dump.set_python_graph(sess.graph) def setUp(self): self.createAndRunGraphHelper() self.helper_line_number = line_number_above() def tearDown(self): if os.path.isdir(self.dump_root): file_io.delete_recursively(self.dump_root) ops.reset_default_graph() def testAnnotateWholeValidSourceFileGivesCorrectResult(self): source_annotation = source_utils.annotate_source(self.dump, self.curr_file_path) self.assertIn(self.u_init.op.name, source_annotation[self.u_init_line_number]) self.assertIn(self.u.op.name, source_annotation[self.u_line_number]) self.assertIn(self.v_init.op.name, source_annotation[self.v_init_line_number]) self.assertIn(self.v.op.name, source_annotation[self.v_line_number]) self.assertIn(self.w.op.name, source_annotation[self.w_line_number]) # In the non-stack-top (default) mode, the helper line should be annotated # with all the ops as well. self.assertIn(self.u_init.op.name, source_annotation[self.helper_line_number]) self.assertIn(self.u.op.name, source_annotation[self.helper_line_number]) self.assertIn(self.v_init.op.name, source_annotation[self.helper_line_number]) self.assertIn(self.v.op.name, source_annotation[self.helper_line_number]) self.assertIn(self.w.op.name, source_annotation[self.helper_line_number]) def testAnnotateWithStackTopGivesCorrectResult(self): source_annotation = source_utils.annotate_source( self.dump, self.curr_file_path, file_stack_top=True) self.assertIn(self.u_init.op.name, source_annotation[self.u_init_line_number]) self.assertIn(self.u.op.name, source_annotation[self.u_line_number]) self.assertIn(self.v_init.op.name, source_annotation[self.v_init_line_number]) self.assertIn(self.v.op.name, source_annotation[self.v_line_number]) self.assertIn(self.w.op.name, source_annotation[self.w_line_number]) # In the stack-top mode, the helper line should not have been annotated. self.assertNotIn(self.helper_line_number, source_annotation) def testAnnotateSubsetOfLinesGivesCorrectResult(self): source_annotation = source_utils.annotate_source( self.dump, self.curr_file_path, min_line=self.u_line_number, max_line=self.u_line_number + 1) self.assertIn(self.u.op.name, source_annotation[self.u_line_number]) self.assertNotIn(self.v_line_number, source_annotation) def testAnnotateDumpedTensorsGivesCorrectResult(self): source_annotation = source_utils.annotate_source( self.dump, self.curr_file_path, do_dumped_tensors=True) # Note: Constant Tensors u_init and v_init may not get dumped due to # constant-folding. self.assertIn(self.u.name, source_annotation[self.u_line_number]) self.assertIn(self.v.name, source_annotation[self.v_line_number]) self.assertIn(self.w.name, source_annotation[self.w_line_number]) self.assertNotIn(self.u.op.name, source_annotation[self.u_line_number]) self.assertNotIn(self.v.op.name, source_annotation[self.v_line_number]) self.assertNotIn(self.w.op.name, source_annotation[self.w_line_number]) self.assertIn(self.u.name, source_annotation[self.helper_line_number]) self.assertIn(self.v.name, source_annotation[self.helper_line_number]) self.assertIn(self.w.name, source_annotation[self.helper_line_number]) def testCallingAnnotateSourceWithoutPythonGraphRaisesException(self): self.dump.set_python_graph(None) with self.assertRaises(ValueError): source_utils.annotate_source(self.dump, self.curr_file_path) def testCallingAnnotateSourceOnUnrelatedSourceFileDoesNotError(self): # Create an unrelated source file. fd, unrelated_source_path = tempfile.mkstemp() with open(fd, "wt") as source_file: source_file.write("print('hello, world')\n") self.assertEqual({}, source_utils.annotate_source(self.dump, unrelated_source_path)) # Clean up unrelated source file. os.remove(unrelated_source_path) def testLoadingPythonSourceFileWithNonAsciiChars(self): fd, source_path = tempfile.mkstemp() with open(fd, "wb") as source_file: source_file.write(u"print('\U0001f642')\n".encode("utf-8")) source_lines, _ = source_utils.load_source(source_path) self.assertEqual(source_lines, [u"print('\U0001f642')", u""]) # Clean up unrelated source file. os.remove(source_path) def testLoadNonexistentNonParPathFailsWithIOError(self): bad_path = os.path.join(self.get_temp_dir(), "nonexistent.py") with self.assertRaisesRegex(IOError, "neither exists nor can be loaded.*par.*"): source_utils.load_source(bad_path) def testLoadingPythonSourceFileInParFileSucceeds(self): # Create the .par file first. temp_file_path = os.path.join(self.get_temp_dir(), "model.py") with open(temp_file_path, "wb") as f: f.write(b"import tensorflow as tf\nx = tf.constant(42.0)\n") par_path = os.path.join(self.get_temp_dir(), "train_model.par") with zipfile.ZipFile(par_path, "w") as zf: zf.write(temp_file_path, os.path.join("tensorflow_models", "model.py")) source_path = os.path.join(par_path, "tensorflow_models", "model.py") source_lines, _ = source_utils.load_source(source_path) self.assertEqual( source_lines, ["import tensorflow as tf", "x = tf.constant(42.0)", ""]) def testLoadingPythonSourceFileInParFileFailsRaisingIOError(self): # Create the .par file first. temp_file_path = os.path.join(self.get_temp_dir(), "model.py") with open(temp_file_path, "wb") as f: f.write(b"import tensorflow as tf\nx = tf.constant(42.0)\n") par_path = os.path.join(self.get_temp_dir(), "train_model.par") with zipfile.ZipFile(par_path, "w") as zf: zf.write(temp_file_path, os.path.join("tensorflow_models", "model.py")) source_path = os.path.join(par_path, "tensorflow_models", "nonexistent.py") with self.assertRaisesRegex(IOError, "neither exists nor can be loaded.*par.*"): source_utils.load_source(source_path) @test_util.run_v1_only("Sessions are not available in TF 2.x")
SourceHelperTest
python
sympy__sympy
sympy/physics/biomechanics/tests/test_curve.py
{ "start": 19043, "end": 27149 }
class ____: @pytest.fixture(autouse=True) def _fiber_force_length_passive_arguments_fixture(self): self.l_M_tilde = Symbol('l_M_tilde') self.c0 = Symbol('c_0') self.c1 = Symbol('c_1') self.constants = (self.c0, self.c1) @staticmethod def test_class(): assert issubclass(FiberForceLengthPassiveDeGroote2016, Function) assert issubclass(FiberForceLengthPassiveDeGroote2016, CharacteristicCurveFunction) assert FiberForceLengthPassiveDeGroote2016.__name__ == 'FiberForceLengthPassiveDeGroote2016' def test_instance(self): fl_M_pas = FiberForceLengthPassiveDeGroote2016(self.l_M_tilde, *self.constants) assert isinstance(fl_M_pas, FiberForceLengthPassiveDeGroote2016) assert str(fl_M_pas) == 'FiberForceLengthPassiveDeGroote2016(l_M_tilde, c_0, c_1)' def test_doit(self): fl_M_pas = FiberForceLengthPassiveDeGroote2016(self.l_M_tilde, *self.constants).doit() assert fl_M_pas == (exp((self.c1*(self.l_M_tilde - 1))/self.c0) - 1)/(exp(self.c1) - 1) def test_doit_evaluate_false(self): fl_M_pas = FiberForceLengthPassiveDeGroote2016(self.l_M_tilde, *self.constants).doit(evaluate=False) assert fl_M_pas == (exp((self.c1*UnevaluatedExpr(self.l_M_tilde - 1))/self.c0) - 1)/(exp(self.c1) - 1) def test_with_defaults(self): constants = ( Float('0.6'), Float('4.0'), ) fl_M_pas_manual = FiberForceLengthPassiveDeGroote2016(self.l_M_tilde, *constants) fl_M_pas_constants = FiberForceLengthPassiveDeGroote2016.with_defaults(self.l_M_tilde) assert fl_M_pas_manual == fl_M_pas_constants def test_differentiate_wrt_l_M_tilde(self): fl_M_pas = FiberForceLengthPassiveDeGroote2016(self.l_M_tilde, *self.constants) expected = self.c1*exp(self.c1*UnevaluatedExpr(self.l_M_tilde - 1)/self.c0)/(self.c0*(exp(self.c1) - 1)) assert fl_M_pas.diff(self.l_M_tilde) == expected def test_differentiate_wrt_c0(self): fl_M_pas = FiberForceLengthPassiveDeGroote2016(self.l_M_tilde, *self.constants) expected = ( -self.c1*exp(self.c1*UnevaluatedExpr(self.l_M_tilde - 1)/self.c0) *UnevaluatedExpr(self.l_M_tilde - 1)/(self.c0**2*(exp(self.c1) - 1)) ) assert fl_M_pas.diff(self.c0) == expected def test_differentiate_wrt_c1(self): fl_M_pas = FiberForceLengthPassiveDeGroote2016(self.l_M_tilde, *self.constants) expected = ( -exp(self.c1)*(-1 + exp(self.c1*UnevaluatedExpr(self.l_M_tilde - 1)/self.c0))/(exp(self.c1) - 1)**2 + exp(self.c1*UnevaluatedExpr(self.l_M_tilde - 1)/self.c0)*(self.l_M_tilde - 1)/(self.c0*(exp(self.c1) - 1)) ) assert fl_M_pas.diff(self.c1) == expected def test_inverse(self): fl_M_pas = FiberForceLengthPassiveDeGroote2016(self.l_M_tilde, *self.constants) assert fl_M_pas.inverse() is FiberForceLengthPassiveInverseDeGroote2016 def test_function_print_latex(self): fl_M_pas = FiberForceLengthPassiveDeGroote2016(self.l_M_tilde, *self.constants) expected = r'\operatorname{fl}^M_{pas} \left( l_{M tilde} \right)' assert LatexPrinter().doprint(fl_M_pas) == expected def test_expression_print_latex(self): fl_M_pas = FiberForceLengthPassiveDeGroote2016(self.l_M_tilde, *self.constants) expected = r'\frac{e^{\frac{c_{1} \left(l_{M tilde} - 1\right)}{c_{0}}} - 1}{e^{c_{1}} - 1}' assert LatexPrinter().doprint(fl_M_pas.doit()) == expected @pytest.mark.parametrize( 'code_printer, expected', [ ( C89CodePrinter, '(0.01865736036377405*(-1 + exp(6.666666666666667*(l_M_tilde - 1))))', ), ( C99CodePrinter, '(0.01865736036377405*(-1 + exp(6.666666666666667*(l_M_tilde - 1))))', ), ( C11CodePrinter, '(0.01865736036377405*(-1 + exp(6.666666666666667*(l_M_tilde - 1))))', ), ( CXX98CodePrinter, '(0.01865736036377405*(-1 + exp(6.666666666666667*(l_M_tilde - 1))))', ), ( CXX11CodePrinter, '(0.01865736036377405*(-1 + std::exp(6.666666666666667*(l_M_tilde - 1))))', ), ( CXX17CodePrinter, '(0.01865736036377405*(-1 + std::exp(6.666666666666667*(l_M_tilde - 1))))', ), ( FCodePrinter, ' (0.0186573603637741d0*(-1 + exp(6.666666666666667d0*(l_M_tilde - 1\n' ' @ ))))', ), ( OctaveCodePrinter, '(0.0186573603637741*(-1 + exp(6.66666666666667*(l_M_tilde - 1))))', ), ( PythonCodePrinter, '(0.0186573603637741*(-1 + math.exp(6.66666666666667*(l_M_tilde - 1))))', ), ( NumPyPrinter, '(0.0186573603637741*(-1 + numpy.exp(6.66666666666667*(l_M_tilde - 1))))', ), ( SciPyPrinter, '(0.0186573603637741*(-1 + numpy.exp(6.66666666666667*(l_M_tilde - 1))))', ), ( CuPyPrinter, '(0.0186573603637741*(-1 + cupy.exp(6.66666666666667*(l_M_tilde - 1))))', ), ( JaxPrinter, '(0.0186573603637741*(-1 + jax.numpy.exp(6.66666666666667*(l_M_tilde - 1))))', ), ( MpmathPrinter, '(mpmath.mpf((0, 672202249456079, -55, 50))*(-1 + mpmath.exp(' 'mpmath.mpf((0, 7505999378950827, -50, 53))*(l_M_tilde - 1))))', ), ( LambdaPrinter, '(0.0186573603637741*(-1 + math.exp(6.66666666666667*(l_M_tilde - 1))))', ), ] ) def test_print_code(self, code_printer, expected): fl_M_pas = FiberForceLengthPassiveDeGroote2016.with_defaults(self.l_M_tilde) assert code_printer().doprint(fl_M_pas) == expected def test_derivative_print_code(self): fl_M_pas = FiberForceLengthPassiveDeGroote2016.with_defaults(self.l_M_tilde) fl_M_pas_dl_M_tilde = fl_M_pas.diff(self.l_M_tilde) expected = '0.12438240242516*math.exp(6.66666666666667*(l_M_tilde - 1))' assert PythonCodePrinter().doprint(fl_M_pas_dl_M_tilde) == expected def test_lambdify(self): fl_M_pas = FiberForceLengthPassiveDeGroote2016.with_defaults(self.l_M_tilde) fl_M_pas_callable = lambdify(self.l_M_tilde, fl_M_pas) assert fl_M_pas_callable(1.0) == pytest.approx(0.0) @pytest.mark.skipif(numpy is None, reason='NumPy not installed') def test_lambdify_numpy(self): fl_M_pas = FiberForceLengthPassiveDeGroote2016.with_defaults(self.l_M_tilde) fl_M_pas_callable = lambdify(self.l_M_tilde, fl_M_pas, 'numpy') l_M_tilde = numpy.array([0.5, 0.8, 0.9, 1.0, 1.1, 1.2, 1.5]) expected = numpy.array([ -0.0179917778, -0.0137393336, -0.0090783522, 0.0, 0.0176822155, 0.0521224686, 0.5043387669, ]) numpy.testing.assert_allclose(fl_M_pas_callable(l_M_tilde), expected) @pytest.mark.skipif(jax is None, reason='JAX not installed') def test_lambdify_jax(self): fl_M_pas = FiberForceLengthPassiveDeGroote2016.with_defaults(self.l_M_tilde) fl_M_pas_callable = jax.jit(lambdify(self.l_M_tilde, fl_M_pas, 'jax')) l_M_tilde = jax.numpy.array([0.5, 0.8, 0.9, 1.0, 1.1, 1.2, 1.5]) expected = jax.numpy.array([ -0.0179917778, -0.0137393336, -0.0090783522, 0.0, 0.0176822155, 0.0521224686, 0.5043387669, ]) numpy.testing.assert_allclose(fl_M_pas_callable(l_M_tilde), expected)
TestFiberForceLengthPassiveDeGroote2016
python
walkccc__LeetCode
solutions/611. Valid Triangle Number/611.py
{ "start": 0, "end": 314 }
class ____: def triangleNumber(self, nums: list[int]) -> int: ans = 0 nums.sort() for k in range(len(nums) - 1, 1, -1): i = 0 j = k - 1 while i < j: if nums[i] + nums[j] > nums[k]: ans += j - i j -= 1 else: i += 1 return ans
Solution
python
ray-project__ray
python/ray/train/v2/api/data_parallel_trainer.py
{ "start": 2397, "end": 13178 }
class ____: """Base class for distributed data parallel training on Ray. This class supports the SPMD parallelization pattern, where a single training function is executed in parallel across multiple workers, and different shards of data are processed by each worker. """ def __init__( self, train_loop_per_worker: Union[Callable[[], None], Callable[[Dict], None]], *, train_loop_config: Optional[Dict] = None, backend_config: Optional[BackendConfig] = None, scaling_config: Optional[ScalingConfig] = None, run_config: Optional[RunConfig] = None, datasets: Optional[Dict[str, GenDataset]] = None, dataset_config: Optional[DataConfig] = None, # TODO: [Deprecated] Remove in future release resume_from_checkpoint: Optional[Checkpoint] = None, metadata: Optional[Dict[str, Any]] = None, ): self.run_config = run_config or RunConfig() self.train_loop_per_worker = train_loop_per_worker self.train_loop_config = train_loop_config self.scaling_config = scaling_config or ScalingConfig() self.backend_config = backend_config or BackendConfig() self.datasets = datasets or {} self.data_config = dataset_config or DataConfig() self.running_in_local_mode = self.scaling_config.num_workers == 0 self.train_run_context = TrainRunContext( run_config=self.run_config, train_loop_config=self.train_loop_config, scaling_config=self.scaling_config, backend_config=self.backend_config, datasets=self.datasets, dataset_config=self.data_config, ) if resume_from_checkpoint is not None: raise DeprecationWarning(_RESUME_FROM_CHECKPOINT_DEPRECATION_WARNING) if metadata is not None: raise DeprecationWarning(_GET_METADATA_DEPRECATION_MESSAGE) self._validate_configs() usage_lib.record_library_usage("train") tag_train_v2_trainer(self) def _validate_configs(self): if not is_v2_enabled(): raise ValueError( f"Ray Train V2 must be enabled with `{V2_ENABLED_ENV_VAR}=1` " "when using this V2 Trainer API." ) from ray.train.v2.api.config import ( RunConfig as RunConfigV2, ScalingConfig as ScalingConfigV2, ) if not isinstance(self.run_config, RunConfigV2): raise ValueError( f"Invalid `RunConfig` type: {self.run_config.__class__}. " "Use `ray.train.RunConfig` instead. " "See this issue for more context: " "https://github.com/ray-project/ray/issues/49454" ) if not isinstance(self.scaling_config, ScalingConfigV2): raise ValueError( f"Invalid `ScalingConfig` type: {self.scaling_config.__class__}. " "Use `ray.train.ScalingConfig` instead. " "See this issue for more context: " "https://github.com/ray-project/ray/issues/49454" ) def _get_train_func(self) -> Callable[[], None]: return construct_train_func( self.train_loop_per_worker, config=self.train_loop_config, train_func_context=self.backend_config.train_func_context, fn_arg_name="train_loop_per_worker", ) def fit(self) -> Result: """Launches the Ray Train controller to run training on workers. Returns: A Result object containing the training result. Raises: ray.train.TrainingFailedError: This is a union of the ControllerError and WorkerGroupError. This returns a :class:`ray.train.ControllerError` if internal Ray Train controller logic encounters a non-retryable error or reaches the controller failure limit configured in `FailureConfig`. This returns a :class:`ray.train.WorkerGroupError` if one or more workers fail during training and reaches the worker group failure limit configured in `FailureConfig(max_failures)`. """ train_fn = self._get_train_func() if self.running_in_local_mode: return self._initialize_and_run_local_controller(train_fn) else: train_fn_ref = ObjectRefWrapper(train_fn) result = self._initialize_and_run_controller( train_fn_ref=train_fn_ref, scaling_policy=create_scaling_policy(self.scaling_config), failure_policy=create_failure_policy(self.run_config.failure_config), train_run_context=self.train_run_context, callbacks=self._create_default_callbacks(), ) if result.error: # NOTE: If the training run errored out, raise an error back to the # user's driver script. # For example, if the Train `FailurePolicy` runs out of retries, # and one of the workers errors. The controller will exit, and # the error will be raised here. raise result.error return result def _get_local_controller(self) -> LocalController: return LocalController( experiment_name=self.run_config.name, datasets=self.datasets, ) def _create_default_callbacks(self) -> List[RayTrainCallback]: # Initialize callbacks from environment variable callbacks = _initialize_env_callbacks() accelerator_setup_callback = AcceleratorSetupCallback( self.backend_config, self.scaling_config ) backend_setup_callback = BackendSetupCallback(self.backend_config) datasets_callback = DatasetsSetupCallback( train_run_context=self.train_run_context ) tpu_reservation_setup_callback = TPUReservationCallback() callbacks.extend( [ accelerator_setup_callback, tpu_reservation_setup_callback, backend_setup_callback, datasets_callback, ] ) if env_bool(RAY_CHDIR_TO_TRIAL_DIR, True): working_directory_setup_callback = WorkingDirectorySetupCallback() callbacks.append(working_directory_setup_callback) if env_bool(METRICS_ENABLED_ENV_VAR, True): callbacks.append(ControllerMetricsCallback()) callbacks.append(WorkerMetricsCallback(self.train_run_context)) if env_bool(RAY_TRAIN_ENABLE_STATE_TRACKING, False): callbacks.append(StateManagerCallback()) run_config_callbacks = ( self.run_config.callbacks if self.run_config.callbacks is not None else [] ) # Add internal callback that invokes all user-defined callbacks. user_callbacks = [ cb for cb in run_config_callbacks if isinstance(cb, UserCallback) ] callbacks.append( UserCallbackHandler( user_callbacks=user_callbacks, train_run_context=self.train_run_context ) ) # Append all other callbacks to the full list. This allows custom workarounds # built on top of internal callbacks to work. callbacks.extend( [cb for cb in run_config_callbacks if not isinstance(cb, UserCallback)] ) return callbacks def _initialize_and_run_local_controller( self, train_func: Callable[[], None] ) -> Result: return self._get_local_controller().run(train_func) def _initialize_and_run_controller(self, **controller_init_kwargs) -> Result: env_vars = get_env_vars_to_propagate() env_vars.setdefault( RAY_WARN_BLOCKING_GET_INSIDE_ASYNC_ENV_VAR, DEFAULT_RAY_WARN_BLOCKING_GET_INSIDE_ASYNC_VALUE, ) # Attach the controller to the node running the driver script. controller_actor_cls = ray.remote( num_cpus=0, scheduling_strategy=NodeAffinitySchedulingStrategy( node_id=ray.get_runtime_context().get_node_id(), soft=False ), # TODO: Extract env variables that affect controller behavior # and pass them as explicit args runtime_env={"env_vars": env_vars}, )(TrainController) controller = controller_actor_cls.remote(**controller_init_kwargs) # If this is not the main thread - as is the case when running in Tune - # registering the SIGINT handler raises an exception. if threading.current_thread() is threading.main_thread(): self._register_sigint_handler(controller) ray.get(controller.run.remote()) return ray.get(controller.get_result.remote()) def _register_sigint_handler(self, controller: ActorHandle[TrainController]): """Register SIGINT handler so user Ctrl C gracefully aborts run.""" sigint_count = 0 def sigint_handler(signum, frame): logger.info( "Received SIGINT. Gracefully aborting the training run — this " "may take a few seconds. To forcefully abort immediately, you " "can send a different signal, such as SIGKILL." ) nonlocal sigint_count sigint_count += 1 if sigint_count >= 3: logger.info( "Received SIGINT at least 3 times. " "Forcefully aborting the training run." ) sys.exit(0) if sigint_count <= 1: try: ray.get(controller.abort.remote()) except ray.exceptions.ActorDiedError: # We catch the error and exit 0 to indicate graceful termination. # However, for some reason the process still exits with 1. sys.exit(0) signal.signal(signal.SIGINT, sigint_handler) @classmethod @Deprecated def restore(cls, *args, **kwargs): """[Deprecated] Restores a Train experiment from a previously interrupted/failed run. This method is deprecated and will be removed in a future release. """ raise DeprecationWarning(_TRAINER_RESTORE_DEPRECATION_WARNING) @classmethod @Deprecated def can_restore(cls, *args, **kwargs): """[Deprecated] Checks if a Train experiment can be restored from a previously interrupted/failed run. This method is deprecated and will be removed in a future release. """ raise DeprecationWarning(_TRAINER_RESTORE_DEPRECATION_WARNING)
DataParallelTrainer
python
tensorflow__tensorflow
tensorflow/compiler/tests/pooling_ops_test.py
{ "start": 2235, "end": 9806 }
class ____(xla_test.XLATestCase): def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding, data_format, expected): """Verifies the output values of the pooling function. Args: pool_func: Function to be called, currently only co.MaxPool. input_sizes: Input tensor dimensions. ksize: The kernel size dimensions strides: The stride dimensions padding: Padding type. data_format: The data format we use to run the pooling operation. expected: An array containing the expected operation outputs. """ total_size = np.prod(input_sizes) # Initializes the input tensor with array containing incrementing # numbers from 1. x = np.array([f * 1.0 for f in range(1, total_size + 1)], dtype=np.float32) x = x.reshape(input_sizes) with self.session() as sess: with self.test_scope(): inputs = array_ops.placeholder(dtypes.float32) t = inputs if data_format == "NCHW": t = NHWCToNCHW(t) ksize = NHWCToNCHW(ksize) strides = NHWCToNCHW(strides) t = pool_func(t, ksize=ksize, strides=strides, padding=padding, data_format=data_format) if data_format == "NCHW": t = NCHWToNHWC(t) actual = sess.run(t, {inputs: x}) self.assertAllClose(expected, actual.flatten(), rtol=1e-5, atol=1e-6) def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding, expected): """Verifies the output values of the pooling function. Args: pool_func: Function to be called, co.MaxPool, co.AvgPool, or the Lua version. input_sizes: Input tensor dimensions. ksize: The kernel size dimensions strides: The stride dimensions padding: Padding type. expected: An array containing the expected operation outputs. """ for data_format in GetTestConfigs(): self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding, data_format, expected) def testMaxPoolValidPadding(self): expected_output = [13.0, 14.0, 15.0] self._VerifyValues(nn_ops.max_pool, input_sizes=[1, 3, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID", expected=expected_output) def testMaxPoolSamePadding(self): expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0] self._VerifyValues(nn_ops.max_pool, input_sizes=[1, 2, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output) def testMaxPoolSamePaddingNonSquareWindow(self): # input is: # [1.0, 2.0 # 3.0 4.0] # # Window of [x, x] should do: # # [max(1.0, 2.0), max(2.0, padded0), # max(3.0, 4.0), max(4.0, padded0)] self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 2, 2, 1], ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1], padding="SAME", expected=[2.0, 2.0, 4.0, 4.0]) def testMaxPoolValidPaddingUnevenStride(self): self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 4, 4, 1], ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1], padding="VALID", expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0]) self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 4, 4, 1], ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1], padding="VALID", expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0]) def testMaxPoolSamePaddingFilter4(self): expected_output = [ 21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0, 61.0, 62.0, 63.0, 64.0 ] self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 4, 4, 4], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output) def testMaxPoolSamePaddingFilter8(self): expected_output = [ 145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 161.0, 162.0, 163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 177.0, 178.0, 179.0, 180.0, 181.0, 182.0, 183.0, 184.0, 185.0, 186.0, 187.0, 188.0, 189.0, 190.0, 191.0, 192.0, 273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0, 289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0, 305.0, 306.0, 307.0, 308.0, 309.0, 310.0, 311.0, 312.0, 313.0, 314.0, 315.0, 316.0, 317.0, 318.0, 319.0, 320.0, 401.0, 402.0, 403.0, 404.0, 405.0, 406.0, 407.0, 408.0, 417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0, 433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0, 441.0, 442.0, 443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 465.0, 466.0, 467.0, 468.0, 469.0, 470.0, 471.0, 472.0, 481.0, 482.0, 483.0, 484.0, 485.0, 486.0, 487.0, 488.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0, 505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0 ] self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 8, 8, 8], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output) # Tests for DepthwiseMaxPooling on CPU only. def testDepthwiseMaxPool1x1DepthWindow1(self): # input is: # [1.0, ..., 10.0] along depth, # # We maxpool by depth in patches of 2. self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 1, 1, 10], ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2], padding="SAME", expected=[2.0, 4.0, 6.0, 8.0, 10.0]) def testDepthwiseMaxPool2x2DepthWindow3(self): # input is: # # a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2 # output. Each node has contiguous values, so the depthwise max # should be multiples of 3.0. self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 2, 2, 6], ksize=[1, 1, 1, 3], strides=[1, 1, 1, 3], padding="SAME", expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0]) def testKernelSmallerThanStrideValid(self): self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 7, 7, 1], ksize=[1, 2, 2, 1], strides=[1, 3, 3, 1], padding="VALID", expected=[9, 12, 30, 33]) def testKernelSmallerThanStrideSame(self): self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 3, 3, 1], ksize=[1, 1, 1, 1], strides=[1, 2, 2, 1], padding="SAME", expected=[1, 3, 7, 9]) self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 4, 4, 1], ksize=[1, 1, 1, 1], strides=[1, 2, 2, 1], padding="SAME", expected=[1, 3, 9, 11]) # Average pooling def testAvgPoolValidPadding(self): expected_output = [7, 8, 9] self._VerifyValues( nn_ops.avg_pool, input_sizes=[1, 3, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID", expected=expected_output) def testAvgPoolSamePadding(self): expected_output = [7., 8., 9., 11.5, 12.5, 13.5] self._VerifyValues( nn_ops.avg_pool, input_sizes=[1, 2, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output)
PoolingTest
python
walkccc__LeetCode
solutions/2561. Rearranging Fruits/2561.py
{ "start": 0, "end": 671 }
class ____: def minCost(self, basket1: list[int], basket2: list[int]) -> int: swapped = [] count = collections.Counter(basket1) count.subtract(collections.Counter(basket2)) for num, freq in count.items(): if freq % 2 != 0: return -1 swapped += [num] * abs(freq // 2) swapped.sort() minNum = min(min(basket1), min(basket2)) # Other than directly swap basket1[i] and basket2[j], we can swap basket1[i] # with `minNum` first then swap `minNum` with basket2[j], and vice versa. # That's why we take min(2 * minNum, num) in the below. return sum(min(2 * minNum, num) for num in swapped[0:len(swapped) // 2])
Solution
python
huggingface__transformers
tests/models/recurrent_gemma/test_modeling_recurrent_gemma.py
{ "start": 4435, "end": 13288 }
class ____(unittest.TestCase): input_text = ["Hello I am doing", "Hi today"] input_long_text = ['<bos><s>Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a phone at the wreckage site. The two publications described the supposed video, but did not post it on their websites. The publications said that they watched the video, which was found by a source close to the investigation. "One can hear cries of \'My God\' in several languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt, editor-in-chief of Bild online. An official with France\'s accident investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col.'] # fmt: skip model_id = "google/recurrentgemma-2b" @require_read_token def test_2b_generate(self): EXPECTED_TEXTS = ['Hello I am doing a project on the topic of "The impact of the internet on the society" and I am looking for some information on the topic. I am looking for some information on the impact of the internet on the society. I am looking for some information on the impact of the internet on the society. I am looking for some', 'Hi today is a new app that allows you to make money by watching videos.\n\nThe app is very simple to use and you can earn money by watching videos.\n\nThe app is available for both Android and iOS devices and you can download it from the Google Play Store or the App Store.\n\nOnce you have downloaded the app'] # fmt: skip model = AutoModelForCausalLM.from_pretrained( self.model_id, ).to(torch_device) tokenizer = AutoTokenizer.from_pretrained(self.model_id) tokenizer.padding_side = "right" inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=64, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) tokenizer.padding_side = "left" EXPECTED_TEXTS = ['Hello I am doing a project on the topic of "The impact of the internet on the society" and I am looking for some information on the topic. I am looking for some information on the impact of the internet on the society. I am looking for some information on the impact of the internet on the society. I am looking for some', 'Hi today I’m going to show you how to make a simple and easy to make a <strong>DIY</strong> <strong>DIY</strong> <strong>DIY</strong> <strong>DIY</strong> <strong>DIY</strong> <strong>DIY</strong> <strong>DIY</strong> <strong>DIY</strong> <strong>DIY</strong> <strong>DIY</strong> <strong>DIY</strong> <strong>DIY'] # fmt: skip inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=64, do_sample=False) del model output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) model = AutoModelForCausalLM.from_pretrained(self.model_id, dtype=torch.float16).to(torch_device) output = model.generate(**inputs, max_new_tokens=64, do_sample=False) del model output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) @require_read_token def test_2b_sample(self): set_seed(0) expectations = Expectations( { (None, None): [ "What is Deep learning ?\n\nDeep learning is the next frontier in computer vision. It is an Artificial Intelligence (AI) discipline that is rapidly being adopted across industries. The success of Deep" ], ("cuda", 8): [ "What is Deep learning ?\n\nDeep learning is the next frontier in computer vision, it’s an incredibly powerful branch of artificial intelligence.\n\nWhat is Dalle?\n\nDalle is", ], } ) EXPECTED_TEXT = expectations.get_expectation() model = AutoModelForCausalLM.from_pretrained(self.model_id).to(torch_device) tokenizer = AutoTokenizer.from_pretrained(self.model_id) inputs = tokenizer("What is Deep learning ?", return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=32, do_sample=True) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXT) @require_bitsandbytes @require_read_token def test_model_2b_8bit(self): # fmt: off EXPECTED_TEXTS = Expectations( { ("xpu", None): ['Hello I am doing a project on the topic of "The impact of the internet on the society" and I am stuck', "Hi today I'm going to show you how to make a simple and easy to make a 3D"], (None, None): ['Hello I am doing a project on the topic of "The impact of social media on the society" and I am looking', "Hi today I'm going to show you how to make a simple and easy to make a 3D"], } ) # fmt: on EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation() model = AutoModelForCausalLM.from_pretrained( "gg-hf/recurrent-gemma-2b-hf", device_map={"": torch_device}, quantization_config=BitsAndBytesConfig(load_in_8bit=True), dtype=torch.bfloat16, ) tokenizer = AutoTokenizer.from_pretrained(self.model_id, padding_side="left") inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXT) @require_read_token def test_long_context(self): EXPECTED_GENERATION = [' Jean-Paul Delannoy told CNN that the BEA is "not aware of any video footage that could have been taken on board the plane." He added that the BEA is "not aware of any video footage that could have been taken on board the plane." The BEA is the French equivalent of the National Transportation Safety Board'] # fmt: skip model = AutoModelForCausalLM.from_pretrained(self.model_id, dtype=torch.float16).to(torch_device) tokenizer = AutoTokenizer.from_pretrained(self.model_id, padding_side="left") inputs = tokenizer(self.input_long_text, return_tensors="pt").to(torch_device) output = model.generate(**inputs, max_new_tokens=64, do_sample=False) output_text = tokenizer.batch_decode(output[:, inputs.input_ids.shape[1] :], skip_special_tokens=True) print(output_text) self.assertEqual(output_text, EXPECTED_GENERATION) @require_read_token def test_longer_than_window(self): EXPECTED_GENERATION = [" Robin's comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French Alps. All 150 on board were killed. Paris Match and Bild reported that the"] # fmt: skip model = AutoModelForCausalLM.from_pretrained(self.model_id, dtype=torch.float16).to(torch_device) model.config.attention_window_size = 256 # Make the attention window size shorter than the current prompt tokenizer = AutoTokenizer.from_pretrained(self.model_id, padding_side="left") inputs = tokenizer(self.input_long_text, return_tensors="pt").to(torch_device) output = model.generate(**inputs, max_new_tokens=64, do_sample=False) output_text = tokenizer.batch_decode(output[:, inputs.input_ids.shape[1] :], skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_GENERATION)
RecurrentGemmaIntegrationTest
python
dagster-io__dagster
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/plus/constants.py
{ "start": 122, "end": 205 }
class ____(Enum): SERVERLESS = "SERVERLESS" HYBRID = "HYBRID"
DgPlusAgentType
python
huggingface__transformers
tests/models/marian/test_modeling_marian.py
{ "start": 20587, "end": 20952 }
class ____(MarianIntegrationTest): src = "ru" tgt = "fr" src_text = ["Он показал мне рукопись своей новой пьесы."] expected_text = ["Il m'a montré le manuscrit de sa nouvelle pièce."] @slow def test_batch_generation_ru_fr(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers
TestMarian_RU_FR