language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
pydata__xarray
xarray/tests/test_plot.py
{ "start": 104435, "end": 111471 }
class ____(PlotTestCase): @pytest.fixture(autouse=True) def setUp(self) -> None: das = [ DataArray( np.random.randn(3, 3, 4, 4), dims=["x", "row", "col", "hue"], coords=[range(k) for k in [3, 3, 4, 4]], ) for _ in [1, 2] ] ds = Dataset({"A": das[0], "B": das[1]}) ds.hue.name = "huename" ds.hue.attrs["units"] = "hunits" ds.x.attrs["units"] = "xunits" ds.col.attrs["units"] = "colunits" ds.row.attrs["units"] = "rowunits" ds.A.attrs["units"] = "Aunits" ds.B.attrs["units"] = "Bunits" self.ds = ds def test_accessor(self) -> None: from xarray.plot.accessor import DatasetPlotAccessor assert Dataset.plot is DatasetPlotAccessor assert isinstance(self.ds.plot, DatasetPlotAccessor) @pytest.mark.parametrize( "add_guide, hue_style, legend, colorbar", [ (None, None, False, True), (False, None, False, False), (True, None, False, True), (True, "continuous", False, True), (False, "discrete", False, False), (True, "discrete", True, False), ], ) def test_add_guide( self, add_guide: bool | None, hue_style: Literal["continuous", "discrete"] | None, legend: bool, colorbar: bool, ) -> None: meta_data = _infer_meta_data( self.ds, x="A", y="B", hue="hue", hue_style=hue_style, add_guide=add_guide, funcname="scatter", ) assert meta_data["add_legend"] is legend assert meta_data["add_colorbar"] is colorbar def test_facetgrid_shape(self) -> None: g = self.ds.plot.scatter(x="A", y="B", row="row", col="col") assert g.axs.shape == (len(self.ds.row), len(self.ds.col)) g = self.ds.plot.scatter(x="A", y="B", row="col", col="row") assert g.axs.shape == (len(self.ds.col), len(self.ds.row)) def test_default_labels(self) -> None: g = self.ds.plot.scatter(x="A", y="B", row="row", col="col", hue="hue") # Top row should be labeled for label, ax in zip(self.ds.coords["col"].values, g.axs[0, :], strict=True): assert substring_in_axes(str(label), ax) # Bottom row should have name of x array name and units for ax in g.axs[-1, :]: assert ax.get_xlabel() == "A [Aunits]" # Leftmost column should have name of y array name and units for ax in g.axs[:, 0]: assert ax.get_ylabel() == "B [Bunits]" def test_axes_in_faceted_plot(self) -> None: with pytest.raises(ValueError): self.ds.plot.scatter(x="A", y="B", row="row", ax=plt.axes()) def test_figsize_and_size(self) -> None: with pytest.raises(ValueError): self.ds.plot.scatter(x="A", y="B", row="row", size=3, figsize=(4, 3)) @pytest.mark.parametrize( "x, y, hue, add_legend, add_colorbar, error_type", [ pytest.param( "A", "The Spanish Inquisition", None, None, None, KeyError, id="bad_y" ), pytest.param( "The Spanish Inquisition", "B", None, None, True, ValueError, id="bad_x" ), ], ) def test_bad_args( self, x: Hashable, y: Hashable, hue: Hashable | None, add_legend: bool | None, add_colorbar: bool | None, error_type: type[Exception], ) -> None: with pytest.raises(error_type): self.ds.plot.scatter( x=x, y=y, hue=hue, add_legend=add_legend, add_colorbar=add_colorbar ) def test_datetime_hue(self) -> None: ds2 = self.ds.copy() # TODO: Currently plots as categorical, should it behave as numerical? ds2["hue"] = pd.date_range("2000-1-1", periods=4) ds2.plot.scatter(x="A", y="B", hue="hue") ds2["hue"] = pd.timedelta_range("-1D", periods=4, freq="D") ds2.plot.scatter(x="A", y="B", hue="hue") def test_facetgrid_hue_style(self) -> None: ds2 = self.ds.copy() # Numbers plots as continuous: g = ds2.plot.scatter(x="A", y="B", row="row", col="col", hue="hue") assert isinstance(g._mappables[-1], mpl.collections.PathCollection) # Datetimes plots as categorical: # TODO: Currently plots as categorical, should it behave as numerical? ds2["hue"] = pd.date_range("2000-1-1", periods=4) g = ds2.plot.scatter(x="A", y="B", row="row", col="col", hue="hue") assert isinstance(g._mappables[-1], mpl.collections.PathCollection) # Strings plots as categorical: ds2["hue"] = ["a", "a", "b", "b"] g = ds2.plot.scatter(x="A", y="B", row="row", col="col", hue="hue") assert isinstance(g._mappables[-1], mpl.collections.PathCollection) @pytest.mark.parametrize( ["x", "y", "hue", "markersize"], [("A", "B", "x", "col"), ("x", "row", "A", "B")], ) def test_scatter( self, x: Hashable, y: Hashable, hue: Hashable, markersize: Hashable ) -> None: self.ds.plot.scatter(x=x, y=y, hue=hue, markersize=markersize) with pytest.raises(ValueError, match=r"u, v"): self.ds.plot.scatter(x=x, y=y, u="col", v="row") def test_non_numeric_legend(self) -> None: ds2 = self.ds.copy() ds2["hue"] = ["a", "b", "c", "d"] pc = ds2.plot.scatter(x="A", y="B", markersize="hue") axes = pc.axes assert axes is not None # should make a discrete legend assert hasattr(axes, "legend_") assert axes.legend_ is not None def test_legend_labels(self) -> None: # regression test for #4126: incorrect legend labels ds2 = self.ds.copy() ds2["hue"] = ["a", "a", "b", "b"] pc = ds2.plot.scatter(x="A", y="B", markersize="hue") axes = pc.axes assert axes is not None legend = axes.get_legend() assert legend is not None actual = [t.get_text() for t in legend.texts] expected = ["hue", "a", "b"] assert actual == expected def test_legend_labels_facetgrid(self) -> None: ds2 = self.ds.copy() ds2["hue"] = ["d", "a", "c", "b"] g = ds2.plot.scatter(x="A", y="B", hue="hue", markersize="x", col="col") legend = g.figlegend assert legend is not None actual = tuple(t.get_text() for t in legend.texts) expected = ( "x [xunits]", "$\\mathdefault{0}$", "$\\mathdefault{1}$", "$\\mathdefault{2}$", ) assert actual == expected def test_add_legend_by_default(self) -> None: sc = self.ds.plot.scatter(x="A", y="B", hue="hue") fig = sc.figure assert fig is not None assert len(fig.axes) == 2
TestDatasetScatterPlots
python
neetcode-gh__leetcode
python/1845-seat-reservation-manager.py
{ "start": 14, "end": 285 }
class ____: def __init__(self, n: int): self.seats = [i for i in range(1, n + 1)] def reserve(self) -> int: return heapq.heappop(self.seats) def unreserve(self, seatNumber: int) -> None: heapq.heappush(self.seats, seatNumber)
SeatManager
python
PyCQA__pylint
tests/functional/m/method_hidden.py
{ "start": 1777, "end": 1883 }
class ____: def __init__(self): self._protected = None self._protected_two = None
Parent
python
dagster-io__dagster
examples/docs_snippets/docs_snippets/guides/components/shell-script-component/pythonic/2-shell-command-empty-no-model-dataclass.py
{ "start": 69, "end": 381 }
class ____(dg.Component, dg.Resolvable): """COMPONENT SUMMARY HERE. COMPONENT DESCRIPTION HERE. """ # Add schema fields here def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions: # Add definition construction logic here. return dg.Definitions()
ShellCommand
python
django__django
tests/migrations/test_migrations_manual_porting/0004_fourth.py
{ "start": 81, "end": 590 }
class ____(migrations.Migration): dependencies = [ ("migrations", "0002_second"), ] replaces = [ ("migrations", "0003_third"), ] operations = [ migrations.AlterUniqueTogether( name="somemodel", unique_together={("id", "name")}, ), migrations.AlterUniqueTogether( name="somemodel", unique_together={("name",)}, ), migrations.RunPython(forwards, migrations.RunPython.noop), ]
Migration
python
sqlalchemy__sqlalchemy
test/orm/test_dynamic.py
{ "start": 1278, "end": 4851 }
class ____: lazy = "dynamic" @testing.fixture def user_address_fixture(self): users, Address, addresses, User = ( self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User, ) def _user_address_fixture(addresses_args={}): self.mapper_registry.map_imperatively( User, users, properties={ "addresses": relationship( Address, lazy=self.lazy, **addresses_args ) }, ) self.mapper_registry.map_imperatively(Address, addresses) return User, Address yield _user_address_fixture @testing.fixture def order_item_fixture(self): def _order_item_fixture(items_args={}): items, Order, orders, order_items, Item = ( self.tables.items, self.classes.Order, self.tables.orders, self.tables.order_items, self.classes.Item, ) self.mapper_registry.map_imperatively( Order, orders, properties={ "items": relationship( Item, secondary=order_items, lazy=self.lazy, **items_args, ) }, ) self.mapper_registry.map_imperatively(Item, items) return Order, Item yield _order_item_fixture @testing.fixture def user_order_item_fixture(self): ( users, Keyword, items, order_items, item_keywords, Item, User, keywords, Order, orders, ) = ( self.tables.users, self.classes.Keyword, self.tables.items, self.tables.order_items, self.tables.item_keywords, self.classes.Item, self.classes.User, self.tables.keywords, self.classes.Order, self.tables.orders, ) def _user_order_item_fixture(): self.mapper_registry.map_imperatively( User, users, properties={ "orders": relationship( Order, order_by=orders.c.id, lazy=self.lazy ) }, ) self.mapper_registry.map_imperatively( Order, orders, properties={ "items": relationship( Item, secondary=order_items, order_by=items.c.id ), }, ) self.mapper_registry.map_imperatively( Item, items, properties={ "keywords": relationship( Keyword, secondary=item_keywords ) # m2m }, ) self.mapper_registry.map_imperatively(Keyword, keywords) return User, Order, Item, Keyword yield _user_order_item_fixture def _expect_no_iteration(self): return expect_raises_message( exc.InvalidRequestError, 'Collection "User.addresses" does not support implicit ' "iteration", )
_DynamicFixture
python
kamyu104__LeetCode-Solutions
Python/merge-in-between-linked-lists.py
{ "start": 151, "end": 697 }
class ____(object): def mergeInBetween(self, list1, a, b, list2): """ :type list1: ListNode :type a: int :type b: int :type list2: ListNode :rtype: ListNode """ prev_first, last = None, list1 for i in xrange(b): if i == a-1: prev_first = last last = last.next prev_first.next = list2 while list2.next: list2 = list2.next list2.next = last.next last.next = None return list1
Solution
python
numba__numba
numba/core/ir.py
{ "start": 29458, "end": 29672 }
class ____(Stmt): """Marker statement for a pop block op code""" def __init__(self, loc): assert isinstance(loc, Loc) self.loc = loc def __str__(self): return 'pop_block'
PopBlock
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/triggers/dataproc.py
{ "start": 24075, "end": 27262 }
class ____(DataprocBaseTrigger): """ Trigger that periodically polls information on a long running operation from Dataproc API to verify status. Implementation leverages asynchronous transport. """ def __init__(self, name: str, operation_type: str | None = None, **kwargs: Any): super().__init__(**kwargs) self.name = name self.operation_type = operation_type def serialize(self): return ( "airflow.providers.google.cloud.triggers.dataproc.DataprocOperationTrigger", { "name": self.name, "operation_type": self.operation_type, "project_id": self.project_id, "region": self.region, "gcp_conn_id": self.gcp_conn_id, "impersonation_chain": self.impersonation_chain, "polling_interval_seconds": self.polling_interval_seconds, }, ) async def run(self) -> AsyncIterator[TriggerEvent]: hook = self.get_async_hook() try: while True: operation = await hook.get_operation(region=self.region, operation_name=self.name) if operation.done: if operation.error.message: status = "error" message = operation.error.message else: status = "success" message = "Operation is successfully ended." if self.operation_type == DataprocOperationType.DIAGNOSE.value: gcs_regex = rb"gs:\/\/[a-z0-9][a-z0-9_-]{1,61}[a-z0-9_\-\/]*" gcs_uri_value = operation.response.value match = re.search(gcs_regex, gcs_uri_value) if match: output_uri = match.group(0).decode("utf-8", "ignore") else: output_uri = gcs_uri_value yield TriggerEvent( { "status": status, "message": message, "output_uri": output_uri, } ) else: yield TriggerEvent( { "operation_name": operation.name, "operation_done": operation.done, "status": status, "message": message, } ) return else: self.log.info("Sleeping for %s seconds.", self.polling_interval_seconds) await asyncio.sleep(self.polling_interval_seconds) except Exception as e: self.log.exception("Exception occurred while checking operation status.") yield TriggerEvent( { "status": "failed", "message": str(e), } )
DataprocOperationTrigger
python
tiangolo__fastapi
docs_src/request_form_models/tutorial001_an.py
{ "start": 124, "end": 278 }
class ____(BaseModel): username: str password: str @app.post("/login/") async def login(data: Annotated[FormData, Form()]): return data
FormData
python
numpy__numpy
numpy/_core/tests/test_umath.py
{ "start": 59528, "end": 78445 }
class ____: def test_exp_values(self): with np.errstate(under='raise', over='raise'): x = [np.nan, np.nan, np.inf, 0.] y = [np.nan, -np.nan, np.inf, -np.inf] for dt in ['e', 'f', 'd', 'g']: xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt) assert_equal(np.exp(yf), xf) # See: https://github.com/numpy/numpy/issues/19192 @pytest.mark.xfail( _glibc_older_than("2.17"), reason="Older glibc versions may not raise appropriate FP exceptions" ) def test_exp_exceptions(self): with np.errstate(over='raise'): assert_raises(FloatingPointError, np.exp, np.float16(11.0899)) assert_raises(FloatingPointError, np.exp, np.float32(100.)) assert_raises(FloatingPointError, np.exp, np.float32(1E19)) assert_raises(FloatingPointError, np.exp, np.float64(800.)) assert_raises(FloatingPointError, np.exp, np.float64(1E19)) with np.errstate(under='raise'): assert_raises(FloatingPointError, np.exp, np.float16(-17.5)) assert_raises(FloatingPointError, np.exp, np.float32(-1000.)) assert_raises(FloatingPointError, np.exp, np.float32(-1E19)) assert_raises(FloatingPointError, np.exp, np.float64(-1000.)) assert_raises(FloatingPointError, np.exp, np.float64(-1E19)) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_log_values(self): with np.errstate(all='ignore'): x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan] y = [np.nan, -np.nan, np.inf, -np.inf, 0.0, -1.0] y1p = [np.nan, -np.nan, np.inf, -np.inf, -1.0, -2.0] for dt in ['e', 'f', 'd', 'g']: xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt) yf1p = np.array(y1p, dtype=dt) assert_equal(np.log(yf), xf) assert_equal(np.log2(yf), xf) assert_equal(np.log10(yf), xf) assert_equal(np.log1p(yf1p), xf) with np.errstate(divide='raise'): for dt in ['e', 'f', 'd']: assert_raises(FloatingPointError, np.log, np.array(0.0, dtype=dt)) assert_raises(FloatingPointError, np.log2, np.array(0.0, dtype=dt)) assert_raises(FloatingPointError, np.log10, np.array(0.0, dtype=dt)) assert_raises(FloatingPointError, np.log1p, np.array(-1.0, dtype=dt)) with np.errstate(invalid='raise'): for dt in ['e', 'f', 'd']: assert_raises(FloatingPointError, np.log, np.array(-np.inf, dtype=dt)) assert_raises(FloatingPointError, np.log, np.array(-1.0, dtype=dt)) assert_raises(FloatingPointError, np.log2, np.array(-np.inf, dtype=dt)) assert_raises(FloatingPointError, np.log2, np.array(-1.0, dtype=dt)) assert_raises(FloatingPointError, np.log10, np.array(-np.inf, dtype=dt)) assert_raises(FloatingPointError, np.log10, np.array(-1.0, dtype=dt)) assert_raises(FloatingPointError, np.log1p, np.array(-np.inf, dtype=dt)) assert_raises(FloatingPointError, np.log1p, np.array(-2.0, dtype=dt)) # See https://github.com/numpy/numpy/issues/18005 with assert_no_warnings(): a = np.array(1e9, dtype='float32') np.log(a) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") @pytest.mark.parametrize('dtype', ['e', 'f', 'd', 'g']) def test_sincos_values(self, dtype): with np.errstate(all='ignore'): x = [np.nan, np.nan, np.nan, np.nan] y = [np.nan, -np.nan, np.inf, -np.inf] xf = np.array(x, dtype=dtype) yf = np.array(y, dtype=dtype) assert_equal(np.sin(yf), xf) assert_equal(np.cos(yf), xf) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") @pytest.mark.xfail( sys.platform.startswith("darwin"), reason="underflow is triggered for scalar 'sin'" ) def test_sincos_underflow(self): with np.errstate(under='raise'): underflow_trigger = np.array( float.fromhex("0x1.f37f47a03f82ap-511"), dtype=np.float64 ) np.sin(underflow_trigger) np.cos(underflow_trigger) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") @pytest.mark.parametrize('callable', [np.sin, np.cos]) @pytest.mark.parametrize('dtype', ['e', 'f', 'd']) @pytest.mark.parametrize('value', [np.inf, -np.inf]) def test_sincos_errors(self, callable, dtype, value): with np.errstate(invalid='raise'): assert_raises(FloatingPointError, callable, np.array([value], dtype=dtype)) @pytest.mark.parametrize('callable', [np.sin, np.cos]) @pytest.mark.parametrize('dtype', ['f', 'd']) @pytest.mark.parametrize('stride', [-1, 1, 2, 4, 5]) def test_sincos_overlaps(self, callable, dtype, stride): N = 100 M = N // abs(stride) rng = np.random.default_rng(42) x = rng.standard_normal(N, dtype) y = callable(x[::stride]) callable(x[::stride], out=x[:M]) assert_equal(x[:M], y) @pytest.mark.parametrize('dt', ['e', 'f', 'd', 'g']) def test_sqrt_values(self, dt): with np.errstate(all='ignore'): x = [np.nan, np.nan, np.inf, np.nan, 0.] y = [np.nan, -np.nan, np.inf, -np.inf, 0.] xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt) assert_equal(np.sqrt(yf), xf) # with np.errstate(invalid='raise'): # assert_raises( # FloatingPointError, np.sqrt, np.array(-100., dtype=dt) # ) def test_abs_values(self): x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0] y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0] for dt in ['e', 'f', 'd', 'g']: xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt) assert_equal(np.abs(yf), xf) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_square_values(self): x = [np.nan, np.nan, np.inf, np.inf] y = [np.nan, -np.nan, np.inf, -np.inf] with np.errstate(all='ignore'): for dt in ['e', 'f', 'd', 'g']: xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt) assert_equal(np.square(yf), xf) with np.errstate(over='raise'): assert_raises(FloatingPointError, np.square, np.array(1E3, dtype='e')) assert_raises(FloatingPointError, np.square, np.array(1E32, dtype='f')) assert_raises(FloatingPointError, np.square, np.array(1E200, dtype='d')) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_reciprocal_values(self): with np.errstate(all='ignore'): x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf] y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.] for dt in ['e', 'f', 'd', 'g']: xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt) assert_equal(np.reciprocal(yf), xf) with np.errstate(divide='raise'): for dt in ['e', 'f', 'd', 'g']: assert_raises(FloatingPointError, np.reciprocal, np.array(-0.0, dtype=dt)) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_tan(self): with np.errstate(all='ignore'): in_ = [np.nan, -np.nan, 0.0, -0.0, np.inf, -np.inf] out = [np.nan, np.nan, 0.0, -0.0, np.nan, np.nan] for dt in ['e', 'f', 'd']: in_arr = np.array(in_, dtype=dt) out_arr = np.array(out, dtype=dt) assert_equal(np.tan(in_arr), out_arr) with np.errstate(invalid='raise'): for dt in ['e', 'f', 'd']: assert_raises(FloatingPointError, np.tan, np.array(np.inf, dtype=dt)) assert_raises(FloatingPointError, np.tan, np.array(-np.inf, dtype=dt)) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_arcsincos(self): with np.errstate(all='ignore'): in_ = [np.nan, -np.nan, np.inf, -np.inf] out = [np.nan, np.nan, np.nan, np.nan] for dt in ['e', 'f', 'd']: in_arr = np.array(in_, dtype=dt) out_arr = np.array(out, dtype=dt) assert_equal(np.arcsin(in_arr), out_arr) assert_equal(np.arccos(in_arr), out_arr) for callable in [np.arcsin, np.arccos]: for value in [np.inf, -np.inf, 2.0, -2.0]: for dt in ['e', 'f', 'd']: with np.errstate(invalid='raise'): assert_raises(FloatingPointError, callable, np.array(value, dtype=dt)) def test_arctan(self): with np.errstate(all='ignore'): in_ = [np.nan, -np.nan] out = [np.nan, np.nan] for dt in ['e', 'f', 'd']: in_arr = np.array(in_, dtype=dt) out_arr = np.array(out, dtype=dt) assert_equal(np.arctan(in_arr), out_arr) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_sinh(self): in_ = [np.nan, -np.nan, np.inf, -np.inf] out = [np.nan, np.nan, np.inf, -np.inf] for dt in ['e', 'f', 'd']: in_arr = np.array(in_, dtype=dt) out_arr = np.array(out, dtype=dt) assert_equal(np.sinh(in_arr), out_arr) with np.errstate(over='raise'): assert_raises(FloatingPointError, np.sinh, np.array(12.0, dtype='e')) assert_raises(FloatingPointError, np.sinh, np.array(120.0, dtype='f')) assert_raises(FloatingPointError, np.sinh, np.array(1200.0, dtype='d')) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") @pytest.mark.skipif('bsd' in sys.platform, reason="fallback implementation may not raise, see gh-2487") def test_cosh(self): in_ = [np.nan, -np.nan, np.inf, -np.inf] out = [np.nan, np.nan, np.inf, np.inf] for dt in ['e', 'f', 'd']: in_arr = np.array(in_, dtype=dt) out_arr = np.array(out, dtype=dt) assert_equal(np.cosh(in_arr), out_arr) with np.errstate(over='raise'): assert_raises(FloatingPointError, np.cosh, np.array(12.0, dtype='e')) assert_raises(FloatingPointError, np.cosh, np.array(120.0, dtype='f')) assert_raises(FloatingPointError, np.cosh, np.array(1200.0, dtype='d')) def test_tanh(self): in_ = [np.nan, -np.nan, np.inf, -np.inf] out = [np.nan, np.nan, 1.0, -1.0] for dt in ['e', 'f', 'd']: in_arr = np.array(in_, dtype=dt) out_arr = np.array(out, dtype=dt) assert_array_max_ulp(np.tanh(in_arr), out_arr, 3) def test_arcsinh(self): in_ = [np.nan, -np.nan, np.inf, -np.inf] out = [np.nan, np.nan, np.inf, -np.inf] for dt in ['e', 'f', 'd']: in_arr = np.array(in_, dtype=dt) out_arr = np.array(out, dtype=dt) assert_equal(np.arcsinh(in_arr), out_arr) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_arccosh(self): with np.errstate(all='ignore'): in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, 0.0] out = [np.nan, np.nan, np.inf, np.nan, 0.0, np.nan] for dt in ['e', 'f', 'd']: in_arr = np.array(in_, dtype=dt) out_arr = np.array(out, dtype=dt) assert_equal(np.arccosh(in_arr), out_arr) for value in [0.0, -np.inf]: with np.errstate(invalid='raise'): for dt in ['e', 'f', 'd']: assert_raises(FloatingPointError, np.arccosh, np.array(value, dtype=dt)) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_arctanh(self): with np.errstate(all='ignore'): in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, -1.0, 2.0] out = [np.nan, np.nan, np.nan, np.nan, np.inf, -np.inf, np.nan] for dt in ['e', 'f', 'd']: in_arr = np.array(in_, dtype=dt) out_arr = np.array(out, dtype=dt) assert_equal(np.arctanh(in_arr), out_arr) for value in [1.01, np.inf, -np.inf, 1.0, -1.0]: with np.errstate(invalid='raise', divide='raise'): for dt in ['e', 'f', 'd']: assert_raises(FloatingPointError, np.arctanh, np.array(value, dtype=dt)) # Make sure glibc < 2.18 atanh is not used, issue 25087 assert np.signbit(np.arctanh(-1j).real) # See: https://github.com/numpy/numpy/issues/20448 @pytest.mark.xfail( _glibc_older_than("2.17"), reason="Older glibc versions may not raise appropriate FP exceptions" ) def test_exp2(self): with np.errstate(all='ignore'): in_ = [np.nan, -np.nan, np.inf, -np.inf] out = [np.nan, np.nan, np.inf, 0.0] for dt in ['e', 'f', 'd']: in_arr = np.array(in_, dtype=dt) out_arr = np.array(out, dtype=dt) assert_equal(np.exp2(in_arr), out_arr) for value in [2000.0, -2000.0]: with np.errstate(over='raise', under='raise'): for dt in ['e', 'f', 'd']: assert_raises(FloatingPointError, np.exp2, np.array(value, dtype=dt)) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_expm1(self): with np.errstate(all='ignore'): in_ = [np.nan, -np.nan, np.inf, -np.inf] out = [np.nan, np.nan, np.inf, -1.0] for dt in ['e', 'f', 'd']: in_arr = np.array(in_, dtype=dt) out_arr = np.array(out, dtype=dt) assert_equal(np.expm1(in_arr), out_arr) for value in [200.0, 2000.0]: with np.errstate(over='raise'): for dt in ['e', 'f']: assert_raises(FloatingPointError, np.expm1, np.array(value, dtype=dt)) # test to ensure no spurious FP exceptions are raised due to SIMD INF_INVALID_ERR = [ np.cos, np.sin, np.tan, np.arccos, np.arcsin, np.spacing, np.arctanh ] NEG_INVALID_ERR = [ np.log, np.log2, np.log10, np.log1p, np.sqrt, np.arccosh, np.arctanh ] ONE_INVALID_ERR = [ np.arctanh, ] LTONE_INVALID_ERR = [ np.arccosh, ] BYZERO_ERR = [ np.log, np.log2, np.log10, np.reciprocal, np.arccosh ] @pytest.mark.parametrize("ufunc", UFUNCS_UNARY_FP) @pytest.mark.parametrize("dtype", ('e', 'f', 'd')) @pytest.mark.parametrize("data, escape", ( ([0.03], LTONE_INVALID_ERR), ([0.03] * 32, LTONE_INVALID_ERR), # neg ([-1.0], NEG_INVALID_ERR), ([-1.0] * 32, NEG_INVALID_ERR), # flat ([1.0], ONE_INVALID_ERR), ([1.0] * 32, ONE_INVALID_ERR), # zero ([0.0], BYZERO_ERR), ([0.0] * 32, BYZERO_ERR), ([-0.0], BYZERO_ERR), ([-0.0] * 32, BYZERO_ERR), # nan ([0.5, 0.5, 0.5, np.nan], LTONE_INVALID_ERR), ([0.5, 0.5, 0.5, np.nan] * 32, LTONE_INVALID_ERR), ([np.nan, 1.0, 1.0, 1.0], ONE_INVALID_ERR), ([np.nan, 1.0, 1.0, 1.0] * 32, ONE_INVALID_ERR), ([np.nan], []), ([np.nan] * 32, []), # inf ([0.5, 0.5, 0.5, np.inf], INF_INVALID_ERR + LTONE_INVALID_ERR), ([0.5, 0.5, 0.5, np.inf] * 32, INF_INVALID_ERR + LTONE_INVALID_ERR), ([np.inf, 1.0, 1.0, 1.0], INF_INVALID_ERR), ([np.inf, 1.0, 1.0, 1.0] * 32, INF_INVALID_ERR), ([np.inf], INF_INVALID_ERR), ([np.inf] * 32, INF_INVALID_ERR), # ninf ([0.5, 0.5, 0.5, -np.inf], NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR), ([0.5, 0.5, 0.5, -np.inf] * 32, NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR), ([-np.inf, 1.0, 1.0, 1.0], NEG_INVALID_ERR + INF_INVALID_ERR), ([-np.inf, 1.0, 1.0, 1.0] * 32, NEG_INVALID_ERR + INF_INVALID_ERR), ([-np.inf], NEG_INVALID_ERR + INF_INVALID_ERR), ([-np.inf] * 32, NEG_INVALID_ERR + INF_INVALID_ERR), )) def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): if escape and ufunc in escape: return # FIXME: NAN raises FP invalid exception: # - ceil/float16 on MSVC:32-bit # - spacing/float16 on almost all platforms # - spacing/float32,float64 on Windows MSVC with VS2022 if ufunc in (np.spacing, np.ceil) and dtype == 'e': return # Skip spacing tests with NaN on Windows MSVC (all dtypes) import platform if (ufunc == np.spacing and platform.system() == 'Windows' and any(np.isnan(d) if isinstance(d, (int, float)) else False for d in data)): pytest.skip("spacing with NaN generates warnings on Windows/VS2022") array = np.array(data, dtype=dtype) with assert_no_warnings(): ufunc(array) @pytest.mark.parametrize("dtype", ('e', 'f', 'd')) def test_divide_spurious_fpexception(self, dtype): dt = np.dtype(dtype) dt_info = np.finfo(dt) subnorm = dt_info.smallest_subnormal # Verify a bug fix caused due to filling the remaining lanes of the # partially loaded dividend SIMD vector with ones, which leads to # raising an overflow warning when the divisor is denormal. # see https://github.com/numpy/numpy/issues/25097 with assert_no_warnings(): np.zeros(128 + 1, dtype=dt) / subnorm
TestSpecialFloats
python
mwaskom__seaborn
seaborn/_core/typing.py
{ "start": 1481, "end": 1601 }
class ____: def __repr__(self): return "<deprecated>" default = Default() deprecated = Deprecated()
Deprecated
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/dataplex.py
{ "start": 152786, "end": 158140 }
class ____(DataplexCatalogBaseOperator): """ Create an Entry resource. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:DataplexCatalogCreateEntryOperator` :param entry_id: Required. Entry identifier. It has to be unique within an Entry Group. Entries corresponding to Google Cloud resources use an Entry ID format based on `full resource names <https://cloud.google.com/apis/design/resource_names#full_resource_name>`__. The format is a full resource name of the resource without the prefix double slashes in the API service name part of the full resource name. This allows retrieval of entries using their associated resource name. For example, if the full resource name of a resource is ``//library.googleapis.com/shelves/shelf1/books/book2``, then the suggested entry_id is ``library.googleapis.com/shelves/shelf1/books/book2``. It is also suggested to follow the same convention for entries corresponding to resources from providers or systems other than Google Cloud. The maximum size of the field is 4000 characters. :param entry_group_id: Required. EntryGroup resource name to which created Entry will belong to. :param entry_configuration: Required. Entry configuration. For more details please see API documentation: https://cloud.google.com/dataplex/docs/reference/rest/v1/projects.locations.entryGroups.entries#Entry :param project_id: Required. The ID of the Google Cloud project where the service is used. :param location: Required. The ID of the Google Cloud region where the service is used. :param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud. :param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not be retried. :param timeout: Optional. The amount of time, in seconds, to wait for the request to complete. Note that if `retry` is specified, the timeout applies to each individual attempt. :param metadata: Optional. Additional metadata that is provided to the method. :param impersonation_chain: Optional. Service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = tuple( {"entry_id", "entry_group_id", "entry_configuration"} | set(DataplexCatalogBaseOperator.template_fields) ) operator_extra_links = (DataplexCatalogEntryLink(),) def __init__( self, entry_id: str, entry_group_id: str, entry_configuration: Entry | dict, *args, **kwargs, ) -> None: super().__init__(*args, **kwargs) self.entry_id = entry_id self.entry_group_id = entry_group_id self.entry_configuration = entry_configuration def _validate_fields(self, entry_configuration): required_fields = ["name", "entry_type"] missing_fields = [field for field in required_fields if not entry_configuration.get(field)] if missing_fields: raise AirflowException( f"Missing required fields in Entry configuration: {', '.join(missing_fields)}. " ) @property def extra_links_params(self) -> dict[str, Any]: return { **super().extra_links_params, "entry_id": self.entry_id, "entry_group_id": self.entry_group_id, } def execute(self, context: Context): DataplexCatalogEntryLink.persist(context=context) self._validate_fields(self.entry_configuration) try: entry = self.hook.create_entry( entry_id=self.entry_id, entry_group_id=self.entry_group_id, entry_configuration=self.entry_configuration, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) except AlreadyExists: entry = self.hook.get_entry( entry_id=self.entry_id, entry_group_id=self.entry_group_id, location=self.location, project_id=self.project_id, ) self.log.info( "Dataplex Catalog Entry %s already exists.", self.entry_id, ) result = Entry.to_dict(entry) return result except Exception as ex: raise AirflowException(ex) else: result = Entry.to_dict(entry) self.log.info("Dataplex Catalog Entry %s was successfully created.", self.entry_id) return result
DataplexCatalogCreateEntryOperator
python
tensorflow__tensorflow
tensorflow/python/debug/lib/debug_events_reader.py
{ "start": 13105, "end": 14541 }
class ____(BaseDigest): """Light-weight digest summarizing top-level execution event. Use `DebugDataReader.read_execution(execution_digest)` to load the more detailed data object concerning the execution event (`Execution`). Properties: op_type: Type name of the executed op. In the case of the eager execution of an individual op, it is the name of the op (e.g., "MatMul"). In the case of the execution of a tf.function (FuncGraph), this is the internally-generated name of the function (e.g., "__inference_my_func_123"). output_tensor_device_ids: IDs of the devices on which the output tensors of the execution reside. For no-output execution, this is `None`. """ def __init__(self, wall_time, locator, op_type, output_tensor_device_ids=None): super().__init__(wall_time, locator) self._op_type = op_type self._output_tensor_device_ids = _tuple_or_none(output_tensor_device_ids) @property def op_type(self): return self._op_type @property def output_tensor_device_ids(self): return self._output_tensor_device_ids def to_json(self): output = super().to_json() output.update({ "op_type": self.op_type, "output_tensor_device_ids": self.output_tensor_device_ids, }) return output def _tuple_or_none(data): return tuple(data) if data else None
ExecutionDigest
python
huggingface__transformers
src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
{ "start": 54992, "end": 60613 }
class ____(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[BigBirdPegasusConfig] = None, layer_idx: Optional[int] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once( f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and " "will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, cache_position: Optional[torch.Tensor] = None, # TODO: we need a refactor so that the different attention modules can get their specific kwargs # ATM, we have mixed things encoder, decoder, and encoder-decoder attn **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None # determine input shapes bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) # get query proj query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) is_updated = False if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_values = past_key_values.cross_attention_cache else: curr_past_key_values = past_key_values.self_attention_cache else: curr_past_key_values = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_states = curr_past_key_values.layers[self.layer_idx].keys value_states = curr_past_key_values.layers[self.layer_idx].values else: key_states = self.k_proj(current_states) value_states = self.v_proj(current_states) key_states = key_states.view(*kv_input_shape).transpose(1, 2) value_states = value_states.view(*kv_input_shape).transpose(1, 2) if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_values.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, **kwargs, ) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return attn_output, attn_weights
BigBirdPegasusDecoderAttention
python
pytorch__pytorch
torch/_numpy/_dtypes.py
{ "start": 1059, "end": 1108 }
class ____(inexact): name = "floating"
floating
python
dagster-io__dagster
python_modules/libraries/dagster-dg-core/dagster_dg_core/check.py
{ "start": 1834, "end": 7776 }
class ____(NamedTuple): object_key: Optional[EnvRegistryKey] error: "ValidationError" source_position_tree: ValueAndSourcePositionTree def check_yaml( dg_context: DgContext, resolved_paths: Sequence[Path], validate_requirements: bool, ) -> bool: # defer for import performance from jsonschema import Draft202012Validator, ValidationError top_level_component_validator = Draft202012Validator(schema=COMPONENT_FILE_SCHEMA) validation_errors: list[ErrorInput] = [] all_specified_env_var_deps = set() component_contents_by_key: dict[EnvRegistryKey, Any] = {} modules_to_fetch = set() for component_dir in dg_context.defs_path.rglob("*"): if resolved_paths and not any( path == component_dir or path in component_dir.parents for path in resolved_paths ): continue defs_yaml_path = component_dir / "defs.yaml" component_yaml_path = component_dir / "component.yaml" yaml_path = ( defs_yaml_path if defs_yaml_path.exists() else component_yaml_path if component_yaml_path.exists() else None ) if yaml_path: text = yaml_path.read_text() try: component_doc_trees = parse_yamls_with_source_position( text, filename=str(yaml_path) ) except ScannerError as se: validation_errors.append( ErrorInput( None, ValidationError(f"Unable to parse YAML: {se.context}, {se.problem}"), _scaffold_value_and_source_position_tree( filename=str(yaml_path), row=se.problem_mark.line + 1 if se.problem_mark else 1, col=se.problem_mark.column + 1 if se.problem_mark else 1, ), ) ) continue # Validate each YAML document in multi-document files for component_doc_tree in component_doc_trees: if validate_requirements: specified_env_var_deps = get_specified_env_var_deps(component_doc_tree.value) used_env_vars = get_used_env_vars(component_doc_tree.value) all_specified_env_var_deps.update(specified_env_var_deps) if used_env_vars - specified_env_var_deps: msg = ( "Component uses environment variables that are not specified in the component file: " + ", ".join(sorted(used_env_vars - specified_env_var_deps)) ) validation_errors.append( ErrorInput( None, ValidationError( msg, path=["requirements", "env"], ), component_doc_tree, ) ) # First, validate the top-level structure of the component file # (type and params keys) before we try to validate the params themselves. top_level_errs = list( top_level_component_validator.iter_errors(component_doc_tree.value) ) for err in top_level_errs: validation_errors.append(ErrorInput(None, err, component_doc_tree)) if top_level_errs: continue raw_key = component_doc_tree.value.get("type") component_instance_module = dg_context.get_component_instance_module_name( component_dir.name ) qualified_key = ( f"{component_instance_module}{raw_key}" if raw_key.startswith(".") else raw_key ) key = EnvRegistryKey.from_typename(qualified_key) component_contents_by_key[key] = component_doc_tree # Add every module referenced to be explicitly fetched. If we don't do this, only # modules that are explicitly declared as registry modules will work. # `from_dg_context()` on the registry ensures that modules aren't double-fetched. modules_to_fetch.add(key.namespace) # Fetch the local component types, if we need any local components component_registry = EnvRegistry.from_dg_context( dg_context, extra_modules=list(modules_to_fetch) ) for key, component_doc_tree in component_contents_by_key.items(): try: json_schema = component_registry.get(key).component_schema or {} v = Draft202012Validator(json_schema) for err in v.iter_errors(component_doc_tree.value.get("attributes", {})): validation_errors.append(ErrorInput(key, err, component_doc_tree)) except KeyError: # No matching component type found validation_errors.append( ErrorInput( None, ValidationError(f"Component type '{key.to_typename()}' not found."), component_doc_tree, ) ) if validation_errors: for key, error, component_doc_tree in validation_errors: click.echo( error_dict_to_formatted_error( key, error, source_position_tree=component_doc_tree.source_position_tree, prefix=["attributes"] if key else [], ) ) return False else: click.echo("All component YAML validated successfully.") return True
ErrorInput
python
sqlalchemy__sqlalchemy
test/base/test_utils.py
{ "start": 3710, "end": 5943 }
class ____(fixtures.TestBase): def test_odict(self): o = util.OrderedDict() o["a"] = 1 o["b"] = 2 o["snack"] = "attack" o["c"] = 3 eq_(list(o.keys()), ["a", "b", "snack", "c"]) eq_(list(o.values()), [1, 2, "attack", 3]) o.pop("snack") eq_(list(o.keys()), ["a", "b", "c"]) eq_(list(o.values()), [1, 2, 3]) try: o.pop("eep") assert False except KeyError: pass eq_(o.pop("eep", "woot"), "woot") try: o.pop("whiff", "bang", "pow") assert False except TypeError: pass eq_(list(o.keys()), ["a", "b", "c"]) eq_(list(o.values()), [1, 2, 3]) o2 = util.OrderedDict(d=4) o2["e"] = 5 eq_(list(o2.keys()), ["d", "e"]) eq_(list(o2.values()), [4, 5]) o.update(o2) eq_(list(o.keys()), ["a", "b", "c", "d", "e"]) eq_(list(o.values()), [1, 2, 3, 4, 5]) o.setdefault("c", "zzz") o.setdefault("f", 6) eq_(list(o.keys()), ["a", "b", "c", "d", "e", "f"]) eq_(list(o.values()), [1, 2, 3, 4, 5, 6]) def test_odict_constructor(self): o = util.OrderedDict( [("name", "jbe"), ("fullname", "jonathan"), ("password", "")] ) eq_(list(o.keys()), ["name", "fullname", "password"]) def test_odict_copy(self): o = util.OrderedDict() o["zzz"] = 1 o["aaa"] = 2 eq_(list(o.keys()), ["zzz", "aaa"]) o2 = o.copy() eq_(list(o2.keys()), list(o.keys())) o3 = copy.copy(o) eq_(list(o3.keys()), list(o.keys())) def test_no_sort_legacy_dictionary(self): d1 = {"c": 1, "b": 2, "a": 3} util.sort_dictionary(d1) eq_(list(d1), ["a", "b", "c"]) def test_sort_dictionary(self): o = util.OrderedDict() o["za"] = 1 o["az"] = 2 o["cc"] = 3 eq_( list(o), ["za", "az", "cc"], ) util.sort_dictionary(o) eq_(list(o), ["az", "cc", "za"]) util.sort_dictionary(o, lambda key: key[1]) eq_(list(o), ["za", "cc", "az"])
OrderedDictTest
python
Pylons__pyramid
tests/test_util.py
{ "start": 25465, "end": 26168 }
class ____(unittest.TestCase): def _callFUT(self, val): from pyramid.util import get_callable_name return get_callable_name(val) def test_valid_ascii_bytes(self): name = b'hello world' self.assertEqual(self._callFUT(name), 'hello world') def test_valid_ascii_string(self): from pyramid.exceptions import ConfigurationError name = b'La Pe\xc3\xb1a'.decode('utf-8') self.assertRaises(ConfigurationError, self._callFUT, name) def test_invalid_ascii(self): from pyramid.exceptions import ConfigurationError name = b'La Pe\xc3\xb1a' self.assertRaises(ConfigurationError, self._callFUT, name)
TestCallableName
python
scipy__scipy
scipy/stats/_odds_ratio.py
{ "start": 5029, "end": 17005 }
class ____: """ Result of `scipy.stats.contingency.odds_ratio`. See the docstring for `odds_ratio` for more details. Attributes ---------- statistic : float The computed odds ratio. * If `kind` is ``'sample'``, this is sample (or unconditional) estimate, given by ``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``. * If `kind` is ``'conditional'``, this is the conditional maximum likelihood estimate for the odds ratio. It is the noncentrality parameter of Fisher's noncentral hypergeometric distribution with the same hypergeometric parameters as `table` and whose mean is ``table[0, 0]``. Methods ------- confidence_interval : Confidence interval for the odds ratio. """ def __init__(self, _table, _kind, statistic): # for now, no need to make _table and _kind public, since this sort of # information is returned in very few `scipy.stats` results self._table = _table self._kind = _kind self.statistic = statistic def __repr__(self): return f"OddsRatioResult(statistic={self.statistic})" def confidence_interval(self, confidence_level=0.95, alternative='two-sided'): """ Confidence interval for the odds ratio. Parameters ---------- confidence_level: float Desired confidence level for the confidence interval. The value must be given as a fraction between 0 and 1. Default is 0.95 (meaning 95%). alternative : {'two-sided', 'less', 'greater'}, optional The alternative hypothesis of the hypothesis test to which the confidence interval corresponds. That is, suppose the null hypothesis is that the true odds ratio equals ``OR`` and the confidence interval is ``(low, high)``. Then the following options for `alternative` are available (default is 'two-sided'): * 'two-sided': the true odds ratio is not equal to ``OR``. There is evidence against the null hypothesis at the chosen `confidence_level` if ``high < OR`` or ``low > OR``. * 'less': the true odds ratio is less than ``OR``. The ``low`` end of the confidence interval is 0, and there is evidence against the null hypothesis at the chosen `confidence_level` if ``high < OR``. * 'greater': the true odds ratio is greater than ``OR``. The ``high`` end of the confidence interval is ``np.inf``, and there is evidence against the null hypothesis at the chosen `confidence_level` if ``low > OR``. Returns ------- ci : ``ConfidenceInterval`` instance The confidence interval, represented as an object with attributes ``low`` and ``high``. Notes ----- When `kind` is ``'conditional'``, the limits of the confidence interval are the conditional "exact confidence limits" as described by Fisher [1]_. The conditional odds ratio and confidence interval are also discussed in Section 4.1.2 of the text by Sahai and Khurshid [2]_. When `kind` is ``'sample'``, the confidence interval is computed under the assumption that the logarithm of the odds ratio is normally distributed with standard error given by:: se = sqrt(1/a + 1/b + 1/c + 1/d) where ``a``, ``b``, ``c`` and ``d`` are the elements of the contingency table. (See, for example, [2]_, section 3.1.3.2, or [3]_, section 2.3.3). References ---------- .. [1] R. A. Fisher (1935), The logic of inductive inference, Journal of the Royal Statistical Society, Vol. 98, No. 1, pp. 39-82. .. [2] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology: Methods, Techniques, and Applications, CRC Press LLC, Boca Raton, Florida. .. [3] Alan Agresti, An Introduction to Categorical Data Analysis (second edition), Wiley, Hoboken, NJ, USA (2007). """ if alternative not in ['two-sided', 'less', 'greater']: raise ValueError("`alternative` must be 'two-sided', 'less' or " "'greater'.") if confidence_level < 0 or confidence_level > 1: raise ValueError('confidence_level must be between 0 and 1') if self._kind == 'conditional': ci = self._conditional_odds_ratio_ci(confidence_level, alternative) else: ci = self._sample_odds_ratio_ci(confidence_level, alternative) return ci def _conditional_odds_ratio_ci(self, confidence_level=0.95, alternative='two-sided'): """ Confidence interval for the conditional odds ratio. """ table = self._table if 0 in table.sum(axis=0) or 0 in table.sum(axis=1): # If both values in a row or column are zero, the p-value is 1, # the odds ratio is NaN and the confidence interval is (0, inf). ci = (0, np.inf) else: ci = _conditional_oddsratio_ci(table, confidence_level=confidence_level, alternative=alternative) return ConfidenceInterval(low=ci[0], high=ci[1]) def _sample_odds_ratio_ci(self, confidence_level=0.95, alternative='two-sided'): """ Confidence interval for the sample odds ratio. """ if confidence_level < 0 or confidence_level > 1: raise ValueError('confidence_level must be between 0 and 1') table = self._table if 0 in table.sum(axis=0) or 0 in table.sum(axis=1): # If both values in a row or column are zero, the p-value is 1, # the odds ratio is NaN and the confidence interval is (0, inf). ci = (0, np.inf) else: ci = _sample_odds_ratio_ci(table, confidence_level=confidence_level, alternative=alternative) return ConfidenceInterval(low=ci[0], high=ci[1]) def odds_ratio(table, *, kind='conditional'): r""" Compute the odds ratio for a 2x2 contingency table. Parameters ---------- table : array_like of ints A 2x2 contingency table. Elements must be non-negative integers. kind : str, optional Which kind of odds ratio to compute, either the sample odds ratio (``kind='sample'``) or the conditional odds ratio (``kind='conditional'``). Default is ``'conditional'``. Returns ------- result : `~scipy.stats._result_classes.OddsRatioResult` instance The returned object has two computed attributes: statistic : float * If `kind` is ``'sample'``, this is sample (or unconditional) estimate, given by ``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``. * If `kind` is ``'conditional'``, this is the conditional maximum likelihood estimate for the odds ratio. It is the noncentrality parameter of Fisher's noncentral hypergeometric distribution with the same hypergeometric parameters as `table` and whose mean is ``table[0, 0]``. The object has the method `confidence_interval` that computes the confidence interval of the odds ratio. See Also -------- scipy.stats.fisher_exact relative_risk :ref:`hypothesis_odds_ratio` : Extended example Notes ----- The conditional odds ratio was discussed by Fisher (see "Example 1" of [1]_). Texts that cover the odds ratio include [2]_ and [3]_. .. versionadded:: 1.10.0 References ---------- .. [1] R. A. Fisher (1935), The logic of inductive inference, Journal of the Royal Statistical Society, Vol. 98, No. 1, pp. 39-82. .. [2] Breslow NE, Day NE (1980). Statistical methods in cancer research. Volume I - The analysis of case-control studies. IARC Sci Publ. (32):5-338. PMID: 7216345. (See section 4.2.) .. [3] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology: Methods, Techniques, and Applications, CRC Press LLC, Boca Raton, Florida. Examples -------- In epidemiology, individuals are classified as "exposed" or "unexposed" to some factor or treatment. If the occurrence of some illness is under study, those who have the illness are often classified as "cases", and those without it are "noncases". The counts of the occurrences of these classes gives a contingency table:: exposed unexposed cases a b noncases c d The sample odds ratio may be written ``(a/c) / (b/d)``. ``a/c`` can be interpreted as the odds of a case occurring in the exposed group, and ``b/d`` as the odds of a case occurring in the unexposed group. The sample odds ratio is the ratio of these odds. If the odds ratio is greater than 1, it suggests that there is a positive association between being exposed and being a case. Interchanging the rows or columns of the contingency table inverts the odds ratio, so it is important to understand the meaning of labels given to the rows and columns of the table when interpreting the odds ratio. Consider a hypothetical example where it is hypothesized that exposure to a certain chemical is associated with increased occurrence of a certain disease. Suppose we have the following table for a collection of 410 people:: exposed unexposed cases 7 15 noncases 58 472 The question we ask is "Is exposure to the chemical associated with increased risk of the disease?" Compute the odds ratio: >>> from scipy.stats.contingency import odds_ratio >>> res = odds_ratio([[7, 15], [58, 472]]) >>> res.statistic 3.7836687705553493 For this sample, the odds of getting the disease for those who have been exposed to the chemical are almost 3.8 times that of those who have not been exposed. We can compute the 95% confidence interval for the odds ratio: >>> res.confidence_interval(confidence_level=0.95) ConfidenceInterval(low=1.2514829132266785, high=10.363493716701269) The 95% confidence interval for the conditional odds ratio is approximately (1.25, 10.4). For a more detailed example, see :ref:`hypothesis_odds_ratio`. """ if kind not in ['conditional', 'sample']: raise ValueError("`kind` must be 'conditional' or 'sample'.") c = np.asarray(table) if c.shape != (2, 2): raise ValueError(f"Invalid shape {c.shape}. The input `table` must be " "of shape (2, 2).") if not np.issubdtype(c.dtype, np.integer): raise ValueError("`table` must be an array of integers, but got " f"type {c.dtype}") c = c.astype(np.int64) if np.any(c < 0): raise ValueError("All values in `table` must be nonnegative.") if 0 in c.sum(axis=0) or 0 in c.sum(axis=1): # If both values in a row or column are zero, the p-value is NaN and # the odds ratio is NaN. result = OddsRatioResult(_table=c, _kind=kind, statistic=np.nan) return result if kind == 'sample': oddsratio = _sample_odds_ratio(c) else: # kind is 'conditional' oddsratio = _conditional_oddsratio(c) result = OddsRatioResult(_table=c, _kind=kind, statistic=oddsratio) return result
OddsRatioResult
python
airbytehq__airbyte
airbyte-integrations/connectors/source-shopify/source_shopify/auth.py
{ "start": 325, "end": 629 }
class ____(Exception): """Not implemented Auth option error""" logger = logging.getLogger("airbyte") def __init__(self, auth_method: str = None): self.message = f"Not implemented Auth method = {auth_method}" super().__init__(self.logger.error(self.message))
NotImplementedAuth
python
dagster-io__dagster
docs/sphinx/_ext/dagster-sphinx/dagster_sphinx/__init__.py
{ "start": 2796, "end": 8942 }
class ____(ClassDocumenter): """Overrides the default autodoc ClassDocumenter to adds some extra options.""" objtype = "class" def get_object_members(self, want_all: bool) -> tuple[bool, list[ObjectMember]]: # the @record transform creates a new outer class, so redirect # sphinx to target the original class for scraping members out of __dict__ if is_record(self.object): self.object = get_original_class(self.object) _, unfiltered_members = super().get_object_members(want_all) # Use form `is_public(self.object, attr_name) if possible, because to access a descriptor # object (returned by e.g. `@staticmethod`) you need to go in through # `self.object.__dict__`-- the value provided in the member list is _not_ the descriptor! filtered_members = [ m for m in unfiltered_members if m.__name__ in self.object.__dict__ and self._is_member_public(self.object.__dict__[m.__name__]) ] for member in filtered_members: check_public_method_has_docstring(self.env, member.__name__, member.object) return False, filtered_members def _is_member_public(self, member: object) -> bool: return self.fullname.startswith("dagster_pipes") or is_public(member) # This is a hook that will be executed for every processed docstring. It modifies the lines of the # docstring in place. def process_docstring( app: Sphinx, what: AutodocObjectType, name: str, obj: object, options: AutodocOptions, lines: list[str], ) -> None: assert app.env is not None if has_attrs(lines): record_error(f'Object {name} has "Attributes:" in docstring. Use "Args:" instead.') if is_deprecated(obj): inject_object_flag(obj, get_deprecated_info(obj), lines) if is_superseded(obj): inject_object_flag(obj, get_superseded_info(obj), lines) if is_preview(obj): inject_object_flag(obj, get_preview_info(obj), lines) if is_beta(obj): inject_object_flag(obj, get_beta_info(obj), lines) if has_beta_params(obj): params = get_beta_params(obj) for param, info in params.items(): inject_param_flag(lines, param, info) if has_deprecated_params(obj): params = get_deprecated_params(obj) for param, info in params.items(): inject_param_flag(lines, param, info) T_Node = TypeVar("T_Node", bound=nodes.Node) def get_child_as(node: nodes.Node, index: int, node_type: type[T_Node]) -> T_Node: child = node.children[index] assert isinstance(child, node_type), ( f"Docutils node not of expected type. Expected `{node_type}`, got `{type(child)}`." ) return child def transform_inventory_uri(uri: str) -> str: """Transform Sphinx source paths to final documentation URLs. Transforms paths like: sections/api/apidocs/dagster/internals/ to: api/dagster/internals """ # Remove the 'sections/api/apidocs/' prefix if uri.startswith("sections/api/apidocs/"): transformed = uri.replace("sections/api/apidocs/", "api/", 1) # Remove trailing slash if present if transformed.endswith("/"): transformed = transformed[:-1] return transformed return uri def fix_inventory_uris(app: Sphinx, env) -> None: """Fix URIs in the Sphinx inventory before it's written. This hook runs during env-updated which happens after all documents are read and before the build writes output files, allowing us to transform the URIs in the domain data. """ if env is None: return # Access the inventory data from the Python domain py_domain = env.domaindata.get("py", {}) objects = py_domain.get("objects", {}) # Transform each URI # In modern Sphinx (8.x), objects is dict[str, ObjectEntry] # ObjectEntry is a namedtuple/dataclass with (docname, node_id, objtype, aliased) modified_count = 0 for name, obj_data in list(objects.items()): if isinstance(obj_data, ObjectEntry): # New format: ObjectEntry with docname attribute old_docname = obj_data.docname new_docname = transform_inventory_uri(old_docname) if new_docname != old_docname: # Create a new ObjectEntry with the transformed docname objects[name] = ObjectEntry( docname=new_docname, node_id=obj_data.node_id, objtype=obj_data.objtype, aliased=obj_data.aliased, ) modified_count += 1 elif isinstance(obj_data, tuple): # Old format: (docname, node_id, objtype, aliased) docname, node_id, objtype, aliased = obj_data new_docname = transform_inventory_uri(docname) if new_docname != docname: objects[name] = (new_docname, node_id, objtype, aliased) modified_count += 1 if modified_count > 0: logger.info( f"[dagster_sphinx] Transformed {modified_count} inventory URIs for correct URL structure" ) def setup(app): app.setup_extension("sphinx.ext.autodoc") # Require autodoc extension app.add_autodocumenter(ConfigurableDocumenter) # override allows `.. autoclass::` to invoke DagsterClassDocumenter instead of default app.add_autodocumenter(DagsterClassDocumenter, override=True) app.add_directive("flag", FlagDirective) app.add_node(inline_flag, html=(visit_inline_flag, depart_flag)) app.add_node(flag, html=(visit_flag, depart_flag)) app.add_role("inline-flag", inline_flag_role) app.connect("autodoc-process-docstring", process_docstring) # Connect to env-updated event which happens after reading all docs and before writing app.connect("env-updated", fix_inventory_uris) # app.connect("doctree-resolved", substitute_deprecated_text) return { "version": "0.1", "parallel_read_safe": True, "parallel_write_safe": True, }
DagsterClassDocumenter
python
protocolbuffers__protobuf
python/google/protobuf/internal/field_mask_test.py
{ "start": 683, "end": 19110 }
class ____(unittest.TestCase): def testStringFormat(self): mask = field_mask_pb2.FieldMask() self.assertEqual('', mask.ToJsonString()) mask.paths.append('foo') self.assertEqual('foo', mask.ToJsonString()) mask.paths.append('bar') self.assertEqual('foo,bar', mask.ToJsonString()) mask.FromJsonString('') self.assertEqual('', mask.ToJsonString()) mask.FromJsonString('foo') self.assertEqual(['foo'], mask.paths) mask.FromJsonString('foo,bar') self.assertEqual(['foo', 'bar'], mask.paths) # Test camel case mask.Clear() mask.paths.append('foo_bar') self.assertEqual('fooBar', mask.ToJsonString()) mask.paths.append('bar_quz') self.assertEqual('fooBar,barQuz', mask.ToJsonString()) mask.FromJsonString('') self.assertEqual('', mask.ToJsonString()) self.assertEqual([], mask.paths) mask.FromJsonString('fooBar') self.assertEqual(['foo_bar'], mask.paths) mask.FromJsonString('fooBar,barQuz') self.assertEqual(['foo_bar', 'bar_quz'], mask.paths) def testDescriptorToFieldMask(self): mask = field_mask_pb2.FieldMask() msg_descriptor = unittest_pb2.TestAllTypes.DESCRIPTOR mask.AllFieldsFromDescriptor(msg_descriptor) self.assertEqual(80, len(mask.paths)) self.assertTrue(mask.IsValidForDescriptor(msg_descriptor)) for field in msg_descriptor.fields: self.assertTrue(field.name in mask.paths) def testIsValidForDescriptor(self): msg_descriptor = unittest_pb2.TestAllTypes.DESCRIPTOR # Empty mask mask = field_mask_pb2.FieldMask() self.assertTrue(mask.IsValidForDescriptor(msg_descriptor)) # All fields from descriptor mask.AllFieldsFromDescriptor(msg_descriptor) self.assertTrue(mask.IsValidForDescriptor(msg_descriptor)) # Child under optional message mask.paths.append('optional_nested_message.bb') self.assertTrue(mask.IsValidForDescriptor(msg_descriptor)) # Repeated field is only allowed in the last position of path mask.paths.append('repeated_nested_message.bb') self.assertFalse(mask.IsValidForDescriptor(msg_descriptor)) # Invalid top level field mask = field_mask_pb2.FieldMask() mask.paths.append('xxx') self.assertFalse(mask.IsValidForDescriptor(msg_descriptor)) # Invalid field in root mask = field_mask_pb2.FieldMask() mask.paths.append('xxx.zzz') self.assertFalse(mask.IsValidForDescriptor(msg_descriptor)) # Invalid field in internal node mask = field_mask_pb2.FieldMask() mask.paths.append('optional_nested_message.xxx.zzz') self.assertFalse(mask.IsValidForDescriptor(msg_descriptor)) # Invalid field in leaf mask = field_mask_pb2.FieldMask() mask.paths.append('optional_nested_message.xxx') self.assertFalse(mask.IsValidForDescriptor(msg_descriptor)) def testCanonicalFrom(self): mask = field_mask_pb2.FieldMask() out_mask = field_mask_pb2.FieldMask() # Paths will be sorted. mask.FromJsonString('baz.quz,bar,foo') out_mask.CanonicalFormFromMask(mask) self.assertEqual('bar,baz.quz,foo', out_mask.ToJsonString()) # Duplicated paths will be removed. mask.FromJsonString('foo,bar,foo') out_mask.CanonicalFormFromMask(mask) self.assertEqual('bar,foo', out_mask.ToJsonString()) # Sub-paths of other paths will be removed. mask.FromJsonString('foo.b1,bar.b1,foo.b2,bar') out_mask.CanonicalFormFromMask(mask) self.assertEqual('bar,foo.b1,foo.b2', out_mask.ToJsonString()) # Test more deeply nested cases. mask.FromJsonString('foo.bar.baz1,foo.bar.baz2.quz,foo.bar.baz2') out_mask.CanonicalFormFromMask(mask) self.assertEqual('foo.bar.baz1,foo.bar.baz2', out_mask.ToJsonString()) mask.FromJsonString('foo.bar.baz1,foo.bar.baz2,foo.bar.baz2.quz') out_mask.CanonicalFormFromMask(mask) self.assertEqual('foo.bar.baz1,foo.bar.baz2', out_mask.ToJsonString()) mask.FromJsonString('foo.bar.baz1,foo.bar.baz2,foo.bar.baz2.quz,foo.bar') out_mask.CanonicalFormFromMask(mask) self.assertEqual('foo.bar', out_mask.ToJsonString()) mask.FromJsonString('foo.bar.baz1,foo.bar.baz2,foo.bar.baz2.quz,foo') out_mask.CanonicalFormFromMask(mask) self.assertEqual('foo', out_mask.ToJsonString()) def testUnion(self): mask1 = field_mask_pb2.FieldMask() mask2 = field_mask_pb2.FieldMask() out_mask = field_mask_pb2.FieldMask() mask1.FromJsonString('foo,baz') mask2.FromJsonString('bar,quz') out_mask.Union(mask1, mask2) self.assertEqual('bar,baz,foo,quz', out_mask.ToJsonString()) # Overlap with duplicated paths. mask1.FromJsonString('foo,baz.bb') mask2.FromJsonString('baz.bb,quz') out_mask.Union(mask1, mask2) self.assertEqual('baz.bb,foo,quz', out_mask.ToJsonString()) # Overlap with paths covering some other paths. mask1.FromJsonString('foo.bar.baz,quz') mask2.FromJsonString('foo.bar,bar') out_mask.Union(mask1, mask2) self.assertEqual('bar,foo.bar,quz', out_mask.ToJsonString()) src = unittest_pb2.TestAllTypes() with self.assertRaises(ValueError): out_mask.Union(src, mask2) def testIntersect(self): mask1 = field_mask_pb2.FieldMask() mask2 = field_mask_pb2.FieldMask() out_mask = field_mask_pb2.FieldMask() # Test cases without overlapping. mask1.FromJsonString('foo,baz') mask2.FromJsonString('bar,quz') out_mask.Intersect(mask1, mask2) self.assertEqual('', out_mask.ToJsonString()) self.assertEqual(len(out_mask.paths), 0) self.assertEqual(out_mask.paths, []) # Overlap with duplicated paths. mask1.FromJsonString('foo,baz.bb') mask2.FromJsonString('baz.bb,quz') out_mask.Intersect(mask1, mask2) self.assertEqual('baz.bb', out_mask.ToJsonString()) # Overlap with paths covering some other paths. mask1.FromJsonString('foo.bar.baz,quz') mask2.FromJsonString('foo.bar,bar') out_mask.Intersect(mask1, mask2) self.assertEqual('foo.bar.baz', out_mask.ToJsonString()) mask1.FromJsonString('foo.bar,bar') mask2.FromJsonString('foo.bar.baz,quz') out_mask.Intersect(mask1, mask2) self.assertEqual('foo.bar.baz', out_mask.ToJsonString()) # Intersect '' with '' mask1.Clear() mask2.Clear() mask1.paths.append('') mask2.paths.append('') self.assertEqual(mask1.paths, ['']) self.assertEqual('', mask1.ToJsonString()) out_mask.Intersect(mask1, mask2) self.assertEqual(out_mask.paths, []) def testMergeMessageWithoutMapFields(self): # Test merge one field. src = unittest_pb2.TestAllTypes() test_util.SetAllFields(src) for field in src.DESCRIPTOR.fields: if field.containing_oneof: continue field_name = field.name dst = unittest_pb2.TestAllTypes() # Only set one path to mask. mask = field_mask_pb2.FieldMask() mask.paths.append(field_name) mask.MergeMessage(src, dst) # The expected result message. msg = unittest_pb2.TestAllTypes() if field.is_repeated: repeated_src = getattr(src, field_name) repeated_msg = getattr(msg, field_name) if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: for item in repeated_src: repeated_msg.add().CopyFrom(item) else: repeated_msg.extend(repeated_src) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: getattr(msg, field_name).CopyFrom(getattr(src, field_name)) else: setattr(msg, field_name, getattr(src, field_name)) # Only field specified in mask is merged. self.assertEqual(msg, dst) # Test merge nested fields. nested_src = unittest_pb2.NestedTestAllTypes() nested_dst = unittest_pb2.NestedTestAllTypes() nested_src.child.payload.optional_int32 = 1234 nested_src.child.child.payload.optional_int32 = 5678 mask = field_mask_pb2.FieldMask() mask.FromJsonString('child.payload') mask.MergeMessage(nested_src, nested_dst) self.assertEqual(1234, nested_dst.child.payload.optional_int32) self.assertEqual(0, nested_dst.child.child.payload.optional_int32) mask.FromJsonString('child.child.payload') mask.MergeMessage(nested_src, nested_dst) self.assertEqual(1234, nested_dst.child.payload.optional_int32) self.assertEqual(5678, nested_dst.child.child.payload.optional_int32) nested_dst.Clear() mask.FromJsonString('child.child.payload') mask.MergeMessage(nested_src, nested_dst) self.assertEqual(0, nested_dst.child.payload.optional_int32) self.assertEqual(5678, nested_dst.child.child.payload.optional_int32) nested_dst.Clear() mask.FromJsonString('child') mask.MergeMessage(nested_src, nested_dst) self.assertEqual(1234, nested_dst.child.payload.optional_int32) self.assertEqual(5678, nested_dst.child.child.payload.optional_int32) # Test MergeOptions. nested_dst.Clear() nested_dst.child.payload.optional_int64 = 4321 # Message fields will be merged by default. mask.FromJsonString('child.payload') mask.MergeMessage(nested_src, nested_dst) self.assertEqual(1234, nested_dst.child.payload.optional_int32) self.assertEqual(4321, nested_dst.child.payload.optional_int64) # Change the behavior to replace message fields. mask.FromJsonString('child.payload') mask.MergeMessage(nested_src, nested_dst, True, False) self.assertEqual(1234, nested_dst.child.payload.optional_int32) self.assertEqual(0, nested_dst.child.payload.optional_int64) # By default, fields missing in source are not cleared in destination. nested_dst.payload.optional_int32 = 1234 self.assertTrue(nested_dst.HasField('payload')) mask.FromJsonString('payload') mask.MergeMessage(nested_src, nested_dst) self.assertTrue(nested_dst.HasField('payload')) # But they are cleared when replacing message fields. nested_dst.Clear() nested_dst.payload.optional_int32 = 1234 mask.FromJsonString('payload') mask.MergeMessage(nested_src, nested_dst, True, False) self.assertFalse(nested_dst.HasField('payload')) nested_src.payload.repeated_int32.append(1234) nested_dst.payload.repeated_int32.append(5678) # Repeated fields will be appended by default. mask.FromJsonString('payload.repeatedInt32') mask.MergeMessage(nested_src, nested_dst) self.assertEqual(2, len(nested_dst.payload.repeated_int32)) self.assertEqual(5678, nested_dst.payload.repeated_int32[0]) self.assertEqual(1234, nested_dst.payload.repeated_int32[1]) # Change the behavior to replace repeated fields. mask.FromJsonString('payload.repeatedInt32') mask.MergeMessage(nested_src, nested_dst, False, True) self.assertEqual(1, len(nested_dst.payload.repeated_int32)) self.assertEqual(1234, nested_dst.payload.repeated_int32[0]) # Test Merge oneof field. new_msg = unittest_pb2.TestOneof2() dst = unittest_pb2.TestOneof2() dst.foo_message.moo_int = 1 mask = field_mask_pb2.FieldMask() mask.FromJsonString('fooMessage,fooLazyMessage.mooInt') mask.MergeMessage(new_msg, dst) self.assertTrue(dst.HasField('foo_message')) self.assertFalse(dst.HasField('foo_lazy_message')) def testMergeMessageWithoutMapFieldsOrFieldPresence(self): # Test merge one field. src = unittest_no_field_presence_pb2.TestAllTypes() test_util.SetAllFields(src) for field in src.DESCRIPTOR.fields: if field.containing_oneof: continue field_name = field.name dst = unittest_no_field_presence_pb2.TestAllTypes() # Only set one path to mask. mask = field_mask_pb2.FieldMask() mask.paths.append(field_name) mask.MergeMessage(src, dst) # The expected result message. msg = unittest_no_field_presence_pb2.TestAllTypes() if field.is_repeated: repeated_src = getattr(src, field_name) repeated_msg = getattr(msg, field_name) if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: for item in repeated_src: repeated_msg.add().CopyFrom(item) else: repeated_msg.extend(repeated_src) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: getattr(msg, field_name).CopyFrom(getattr(src, field_name)) else: setattr(msg, field_name, getattr(src, field_name)) # Only field specified in mask is merged. self.assertEqual(msg, dst) def testMergeMessageWithMapField(self): empty_map = map_unittest_pb2.TestRecursiveMapMessage() src_level_2 = map_unittest_pb2.TestRecursiveMapMessage() src_level_2.a['src level 2'].CopyFrom(empty_map) src = map_unittest_pb2.TestRecursiveMapMessage() src.a['common key'].CopyFrom(src_level_2) src.a['src level 1'].CopyFrom(src_level_2) dst_level_2 = map_unittest_pb2.TestRecursiveMapMessage() dst_level_2.a['dst level 2'].CopyFrom(empty_map) dst = map_unittest_pb2.TestRecursiveMapMessage() dst.a['common key'].CopyFrom(dst_level_2) dst.a['dst level 1'].CopyFrom(empty_map) mask = field_mask_pb2.FieldMask() mask.FromJsonString('a') mask.MergeMessage(src, dst) # map from dst is replaced with map from src. self.assertEqual(dst.a['common key'], src_level_2) self.assertEqual(dst.a['src level 1'], src_level_2) self.assertEqual(dst.a['dst level 1'], empty_map) def testMergeMessageWithUnsetFieldsWithFieldPresence(self): # Test merging each empty field one at a time. src = unittest_pb2.TestAllTypes() for field in src.DESCRIPTOR.fields: if field.containing_oneof: continue field_name = field.name dst = unittest_pb2.TestAllTypes() # Only set one path to mask. mask = field_mask_pb2.FieldMask() mask.paths.append(field_name) mask.MergeMessage(src, dst) # Nothing should be merged. self.assertEqual(unittest_pb2.TestAllTypes(), dst) # Test merge clears previously set fields when source is unset. dst_template = unittest_pb2.TestAllTypes() test_util.SetAllFields(dst_template) for field in src.DESCRIPTOR.fields: if field.containing_oneof: continue dst = unittest_pb2.TestAllTypes() dst.CopyFrom(dst_template) # Only set one path to mask. mask = field_mask_pb2.FieldMask() mask.paths.append(field.name) mask.MergeMessage( src, dst, replace_message_field=True, replace_repeated_field=True ) msg = unittest_pb2.TestAllTypes() msg.CopyFrom(dst_template) msg.ClearField(field.name) self.assertEqual(msg, dst) def testMergeMessageWithUnsetFieldsWithoutFieldPresence(self): # Test merging each empty field one at a time. src = unittest_no_field_presence_pb2.TestAllTypes() for field in src.DESCRIPTOR.fields: if field.containing_oneof: continue field_name = field.name dst = unittest_no_field_presence_pb2.TestAllTypes() # Only set one path to mask. mask = field_mask_pb2.FieldMask() mask.paths.append(field_name) mask.MergeMessage(src, dst) # Nothing should be merged. self.assertEqual(unittest_no_field_presence_pb2.TestAllTypes(), dst) # Test merge clears previously set fields when source is unset. dst_template = unittest_no_field_presence_pb2.TestAllTypes() test_util.SetAllFields(dst_template) for field in src.DESCRIPTOR.fields: if field.containing_oneof: continue dst = unittest_no_field_presence_pb2.TestAllTypes() dst.CopyFrom(dst_template) # Only set one path to mask. mask = field_mask_pb2.FieldMask() mask.paths.append(field.name) mask.MergeMessage( src, dst, replace_message_field=True, replace_repeated_field=True ) msg = unittest_no_field_presence_pb2.TestAllTypes() msg.CopyFrom(dst_template) msg.ClearField(field.name) self.assertEqual(msg, dst) def testMergeErrors(self): src = unittest_pb2.TestAllTypes() dst = unittest_pb2.TestAllTypes() mask = field_mask_pb2.FieldMask() test_util.SetAllFields(src) mask.FromJsonString('optionalInt32.field') with self.assertRaises(ValueError) as e: mask.MergeMessage(src, dst) self.assertEqual( 'Error: Field optional_int32 in message ' 'proto2_unittest.TestAllTypes is not a singular ' 'message field and cannot have sub-fields.', str(e.exception), ) def testSnakeCaseToCamelCase(self): self.assertEqual('fooBar', field_mask._SnakeCaseToCamelCase('foo_bar')) self.assertEqual('FooBar', field_mask._SnakeCaseToCamelCase('_foo_bar')) self.assertEqual('foo3Bar', field_mask._SnakeCaseToCamelCase('foo3_bar')) # No uppercase letter is allowed. self.assertRaisesRegex( ValueError, 'Fail to print FieldMask to Json string: Path name Foo must ' 'not contain uppercase letters.', field_mask._SnakeCaseToCamelCase, 'Foo', ) # Any character after a "_" must be a lowercase letter. # 1. "_" cannot be followed by another "_". # 2. "_" cannot be followed by a digit. # 3. "_" cannot appear as the last character. self.assertRaisesRegex( ValueError, 'Fail to print FieldMask to Json string: The character after a ' '"_" must be a lowercase letter in path name foo__bar.', field_mask._SnakeCaseToCamelCase, 'foo__bar', ) self.assertRaisesRegex( ValueError, 'Fail to print FieldMask to Json string: The character after a ' '"_" must be a lowercase letter in path name foo_3bar.', field_mask._SnakeCaseToCamelCase, 'foo_3bar', ) self.assertRaisesRegex( ValueError, 'Fail to print FieldMask to Json string: Trailing "_" in path ' 'name foo_bar_.', field_mask._SnakeCaseToCamelCase, 'foo_bar_', ) def testCamelCaseToSnakeCase(self): self.assertEqual('foo_bar', field_mask._CamelCaseToSnakeCase('fooBar')) self.assertEqual('_foo_bar', field_mask._CamelCaseToSnakeCase('FooBar')) self.assertEqual('foo3_bar', field_mask._CamelCaseToSnakeCase('foo3Bar')) self.assertRaisesRegex( ValueError, 'Fail to parse FieldMask: Path name foo_bar must not contain "_"s.', field_mask._CamelCaseToSnakeCase, 'foo_bar', ) if __name__ == '__main__': unittest.main()
FieldMaskTest
python
sympy__sympy
sympy/functions/special/singularity_functions.py
{ "start": 595, "end": 8346 }
class ____(DefinedFunction): r""" Singularity functions are a class of discontinuous functions. Explanation =========== Singularity functions take a variable, an offset, and an exponent as arguments. These functions are represented using Macaulay brackets as: SingularityFunction(x, a, n) := <x - a>^n The singularity function will automatically evaluate to ``Derivative(DiracDelta(x - a), x, -n - 1)`` if ``n < 0`` and ``(x - a)**n*Heaviside(x - a, 1)`` if ``n >= 0``. Examples ======== >>> from sympy import SingularityFunction, diff, Piecewise, DiracDelta, Heaviside, Symbol >>> from sympy.abc import x, a, n >>> SingularityFunction(x, a, n) SingularityFunction(x, a, n) >>> y = Symbol('y', positive=True) >>> n = Symbol('n', nonnegative=True) >>> SingularityFunction(y, -10, n) (y + 10)**n >>> y = Symbol('y', negative=True) >>> SingularityFunction(y, 10, n) 0 >>> SingularityFunction(x, 4, -1).subs(x, 4) oo >>> SingularityFunction(x, 10, -2).subs(x, 10) oo >>> SingularityFunction(4, 1, 5) 243 >>> diff(SingularityFunction(x, 1, 5) + SingularityFunction(x, 1, 4), x) 4*SingularityFunction(x, 1, 3) + 5*SingularityFunction(x, 1, 4) >>> diff(SingularityFunction(x, 4, 0), x, 2) SingularityFunction(x, 4, -2) >>> SingularityFunction(x, 4, 5).rewrite(Piecewise) Piecewise(((x - 4)**5, x >= 4), (0, True)) >>> expr = SingularityFunction(x, a, n) >>> y = Symbol('y', positive=True) >>> n = Symbol('n', nonnegative=True) >>> expr.subs({x: y, a: -10, n: n}) (y + 10)**n The methods ``rewrite(DiracDelta)``, ``rewrite(Heaviside)``, and ``rewrite('HeavisideDiracDelta')`` returns the same output. One can use any of these methods according to their choice. >>> expr = SingularityFunction(x, 4, 5) + SingularityFunction(x, -3, -1) - SingularityFunction(x, 0, -2) >>> expr.rewrite(Heaviside) (x - 4)**5*Heaviside(x - 4, 1) + DiracDelta(x + 3) - DiracDelta(x, 1) >>> expr.rewrite(DiracDelta) (x - 4)**5*Heaviside(x - 4, 1) + DiracDelta(x + 3) - DiracDelta(x, 1) >>> expr.rewrite('HeavisideDiracDelta') (x - 4)**5*Heaviside(x - 4, 1) + DiracDelta(x + 3) - DiracDelta(x, 1) See Also ======== DiracDelta, Heaviside References ========== .. [1] https://en.wikipedia.org/wiki/Singularity_function """ is_real = True def fdiff(self, argindex=1): """ Returns the first derivative of a DiracDelta Function. Explanation =========== The difference between ``diff()`` and ``fdiff()`` is: ``diff()`` is the user-level function and ``fdiff()`` is an object method. ``fdiff()`` is a convenience method available in the ``Function`` class. It returns the derivative of the function without considering the chain rule. ``diff(function, x)`` calls ``Function._eval_derivative`` which in turn calls ``fdiff()`` internally to compute the derivative of the function. """ if argindex == 1: x, a, n = self.args if n in (S.Zero, S.NegativeOne, S(-2), S(-3)): return self.func(x, a, n-1) elif n.is_positive: return n*self.func(x, a, n-1) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, variable, offset, exponent): """ Returns a simplified form or a value of Singularity Function depending on the argument passed by the object. Explanation =========== The ``eval()`` method is automatically called when the ``SingularityFunction`` class is about to be instantiated and it returns either some simplified instance or the unevaluated instance depending on the argument passed. In other words, ``eval()`` method is not needed to be called explicitly, it is being called and evaluated once the object is called. Examples ======== >>> from sympy import SingularityFunction, Symbol, nan >>> from sympy.abc import x, a, n >>> SingularityFunction(x, a, n) SingularityFunction(x, a, n) >>> SingularityFunction(5, 3, 2) 4 >>> SingularityFunction(x, a, nan) nan >>> SingularityFunction(x, 3, 0).subs(x, 3) 1 >>> SingularityFunction(4, 1, 5) 243 >>> x = Symbol('x', positive = True) >>> a = Symbol('a', negative = True) >>> n = Symbol('n', nonnegative = True) >>> SingularityFunction(x, a, n) (-a + x)**n >>> x = Symbol('x', negative = True) >>> a = Symbol('a', positive = True) >>> SingularityFunction(x, a, n) 0 """ x = variable a = offset n = exponent shift = (x - a) if fuzzy_not(im(shift).is_zero): raise ValueError("Singularity Functions are defined only for Real Numbers.") if fuzzy_not(im(n).is_zero): raise ValueError("Singularity Functions are not defined for imaginary exponents.") if shift is S.NaN or n is S.NaN: return S.NaN if (n + 4).is_negative: raise ValueError("Singularity Functions are not defined for exponents less than -4.") if shift.is_extended_negative: return S.Zero if n.is_nonnegative: if shift.is_zero: # use literal 0 in case of Symbol('z', zero=True) return S.Zero**n if shift.is_extended_nonnegative: return shift**n if n in (S.NegativeOne, -2, -3, -4): if shift.is_negative or shift.is_extended_positive: return S.Zero if shift.is_zero: return oo def _eval_rewrite_as_Piecewise(self, *args, **kwargs): ''' Converts a Singularity Function expression into its Piecewise form. ''' x, a, n = self.args if n in (S.NegativeOne, S(-2), S(-3), S(-4)): return Piecewise((oo, Eq(x - a, 0)), (0, True)) elif n.is_nonnegative: return Piecewise(((x - a)**n, x - a >= 0), (0, True)) def _eval_rewrite_as_Heaviside(self, *args, **kwargs): ''' Rewrites a Singularity Function expression using Heavisides and DiracDeltas. ''' x, a, n = self.args if n == -4: return diff(Heaviside(x - a), x.free_symbols.pop(), 4) if n == -3: return diff(Heaviside(x - a), x.free_symbols.pop(), 3) if n == -2: return diff(Heaviside(x - a), x.free_symbols.pop(), 2) if n == -1: return diff(Heaviside(x - a), x.free_symbols.pop(), 1) if n.is_nonnegative: return (x - a)**n*Heaviside(x - a, 1) def _eval_as_leading_term(self, x, logx, cdir): z, a, n = self.args shift = (z - a).subs(x, 0) if n < 0: return S.Zero elif n.is_zero and shift.is_zero: return S.Zero if cdir == -1 else S.One elif shift.is_positive: return shift**n return S.Zero def _eval_nseries(self, x, n, logx=None, cdir=0): z, a, n = self.args shift = (z - a).subs(x, 0) if n < 0: return S.Zero elif n.is_zero and shift.is_zero: return S.Zero if cdir == -1 else S.One elif shift.is_positive: return ((z - a)**n)._eval_nseries(x, n, logx=logx, cdir=cdir) return S.Zero _eval_rewrite_as_DiracDelta = _eval_rewrite_as_Heaviside _eval_rewrite_as_HeavisideDiracDelta = _eval_rewrite_as_Heaviside
SingularityFunction
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 247564, "end": 248207 }
class ____(sgqlc.types.Input): """Autogenerated input type of LinkProjectV2ToTeam""" __schema__ = github_schema __field_names__ = ("project_id", "team_id", "client_mutation_id") project_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectId") """The ID of the project to link to the team.""" team_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="teamId") """The ID of the team to link to the project.""" client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
LinkProjectV2ToTeamInput
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py
{ "start": 28402, "end": 28658 }
class ____(graphene.ObjectType): """Output indicating that runless asset events were reported.""" assetKey = graphene.NonNull(GrapheneAssetKey) class Meta: name = "ReportRunlessAssetEventsSuccess"
GrapheneReportRunlessAssetEventsSuccess
python
graphql-python__graphene
graphene/types/definitions.py
{ "start": 742, "end": 817 }
class ____(GrapheneGraphQLType, GraphQLUnionType): pass
GrapheneUnionType
python
django__django
tests/select_related_regress/models.py
{ "start": 2750, "end": 2813 }
class ____(Base): a_field = models.CharField(max_length=10)
A
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/links/vertex_ai.py
{ "start": 4730, "end": 4942 }
class ____(BaseGoogleLink): """Helper class for constructing Vertex AI Datasets Link.""" name = "Dataset List" key = "datasets_conf" format_str = VERTEX_AI_DATASET_LIST_LINK
VertexAIDatasetListLink
python
huggingface__transformers
src/transformers/models/evolla/modeling_evolla.py
{ "start": 53202, "end": 59158 }
class ____(EvollaPreTrainedModel): def __init__(self, config: EvollaConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(self.vocab_size, config.hidden_size, self.padding_idx) self.protein_encoder = EvollaProteinEncoder(config=config) self.layers = nn.ModuleList( [ EvollaDecoderLayer( config=config, layer_idx=layer_idx, ) for layer_idx in range(config.num_hidden_layers) ] ) self.norm = EvollaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = getattr(config, "gradient_checkpointing", False) self.rotary_emb = EvollaRotaryEmbedding(config=config) self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @auto_docstring @check_model_inputs() def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, protein_input_ids: Optional[torch.LongTensor] = None, protein_attention_mask: Optional[torch.Tensor] = None, structure_feats: Optional[torch.FloatTensor] = None, msa_feats: Optional[torch.FloatTensor] = None, structure_batch_mask: Optional[torch.Tensor] = None, msa_batch_mask: Optional[torch.Tensor] = None, **kwargs, ) -> Union[tuple, BaseModelOutputWithPast]: r""" protein_input_ids (torch.LongTensor): The input IDs for the protein sequence in structure-aware tokens. Should be of shape `(batch_size, protein_seq_length)` and type `torch.LongTensor`. protein_attention_mask (torch.Tensor): The attention mask for the protein sequence. Should be of shape `(batch_size, protein_seq_length)` and type `torch.Tensor`. structure_feats (torch.FloatTensor): The input IDs for purely structure-based features. Should be of shape `(batch_size, structure_seq_length, structure_feat_dim)` and type `torch.FloatTensor`. Dummy input for now. msa_feats (torch.FloatTensor): The input IDs for purely MSA-based features. Should be of shape `(batch_size, msa_seq_length, msa_feat_dim)` and type `torch.FloatTensor`. Dummy input for now. structure_batch_mask (torch.Tensor): The batch mask to decide which protein sequences are purely structure-based. Should be of shape `(batch_size)` and type `torch.Tensor`. Should be paired with `structure_feats`. Dummpy input for now. msa_batch_mask (torch.Tensor): The batch mask to decide which protein sequences are purely MSA-based. Should be of shape `(batch_size)` and type `torch.Tensor`. Should be paired with `msa_feats`. Dummpy input for now. """ if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) protein_feats = None protein_batch_mask = None # If provided, actually compute them if protein_input_ids is not None and protein_attention_mask is not None: protein_outputs = self.protein_encoder( input_ids=protein_input_ids, attention_mask=protein_attention_mask, ) protein_feats = protein_outputs.sequence_compressor_output protein_batch_mask = torch.tensor([True] * protein_input_ids.shape[0], device=protein_input_ids.device) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, ) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids) for decoder_layer in self.layers: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, protein_kv_states=protein_feats, structure_kv_states=structure_feats, msa_kv_states=msa_feats, protein_batch_mask=protein_batch_mask, structure_batch_mask=structure_batch_mask, msa_batch_mask=msa_batch_mask, query_attn_mask=attention_mask, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) output = BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) return output
EvollaModel
python
pydata__xarray
xarray/core/accessor_str.py
{ "start": 4530, "end": 99670 }
class ____(Generic[T_DataArray]): r"""Vectorized string functions for string-like arrays. Similar to pandas, fields can be accessed through the `.str` attribute for applicable DataArrays. >>> da = xr.DataArray(["some", "text", "in", "an", "array"]) >>> da.str.len() <xarray.DataArray (dim_0: 5)> Size: 40B array([4, 4, 2, 2, 5]) Dimensions without coordinates: dim_0 It also implements ``+``, ``*``, and ``%``, which operate as elementwise versions of the corresponding ``str`` methods. These will automatically broadcast for array-like inputs. >>> da1 = xr.DataArray(["first", "second", "third"], dims=["X"]) >>> da2 = xr.DataArray([1, 2, 3], dims=["Y"]) >>> da1.str + da2 <xarray.DataArray (X: 3, Y: 3)> Size: 252B array([['first1', 'first2', 'first3'], ['second1', 'second2', 'second3'], ['third1', 'third2', 'third3']], dtype='<U7') Dimensions without coordinates: X, Y >>> da1 = xr.DataArray(["a", "b", "c", "d"], dims=["X"]) >>> reps = xr.DataArray([3, 4], dims=["Y"]) >>> da1.str * reps <xarray.DataArray (X: 4, Y: 2)> Size: 128B array([['aaa', 'aaaa'], ['bbb', 'bbbb'], ['ccc', 'cccc'], ['ddd', 'dddd']], dtype='<U4') Dimensions without coordinates: X, Y >>> da1 = xr.DataArray(["%s_%s", "%s-%s", "%s|%s"], dims=["X"]) >>> da2 = xr.DataArray([1, 2], dims=["Y"]) >>> da3 = xr.DataArray([0.1, 0.2], dims=["Z"]) >>> da1.str % (da2, da3) <xarray.DataArray (X: 3, Y: 2, Z: 2)> Size: 240B array([[['1_0.1', '1_0.2'], ['2_0.1', '2_0.2']], <BLANKLINE> [['1-0.1', '1-0.2'], ['2-0.1', '2-0.2']], <BLANKLINE> [['1|0.1', '1|0.2'], ['2|0.1', '2|0.2']]], dtype='<U5') Dimensions without coordinates: X, Y, Z .. note:: When using ``%`` formatting with a dict, the values are always used as a single value, they are not applied elementwise. >>> da1 = xr.DataArray(["%(a)s"], dims=["X"]) >>> da2 = xr.DataArray([1, 2, 3], dims=["Y"]) >>> da1 % {"a": da2} <xarray.DataArray (X: 1)> Size: 8B array(['<xarray.DataArray (Y: 3)> Size: 24B\narray([1, 2, 3])\nDimensions without coordinates: Y'], dtype=object) Dimensions without coordinates: X """ __slots__ = ("_obj",) def __init__(self, obj: T_DataArray) -> None: self._obj = obj def _stringify(self, invar: Any) -> str | bytes | Any: """ Convert a string-like to the correct string/bytes type. This is mostly here to tell mypy a pattern is a str/bytes not a re.Pattern. """ if hasattr(invar, "astype"): return invar.astype(self._obj.dtype.kind) else: return self._obj.dtype.type(invar) def _apply( self, *, func: Callable, dtype: DTypeLike | None = None, output_core_dims: list | tuple = ((),), output_sizes: Mapping[Any, int] | None = None, func_args: tuple = (), func_kwargs: Mapping = {}, ) -> T_DataArray: return _apply_str_ufunc( obj=self._obj, func=func, dtype=dtype, output_core_dims=output_core_dims, output_sizes=output_sizes, func_args=func_args, func_kwargs=func_kwargs, ) def _re_compile( self, *, pat: str | bytes | Pattern | Any, flags: int = 0, case: bool | None = None, ) -> Pattern | Any: is_compiled_re = isinstance(pat, re.Pattern) if is_compiled_re and flags != 0: raise ValueError("Flags cannot be set when pat is a compiled regex.") if is_compiled_re and case is not None: raise ValueError("Case cannot be set when pat is a compiled regex.") if is_compiled_re: # no-op, needed to tell mypy this isn't a string return re.compile(pat) if case is None: case = True # The case is handled by the re flags internally. # Add it to the flags if necessary. if not case: flags |= re.IGNORECASE if getattr(pat, "dtype", None) != np.object_: pat = self._stringify(pat) def func(x): return re.compile(x, flags=flags) if isinstance(pat, np.ndarray): # apply_ufunc doesn't work for numpy arrays with output object dtypes func_ = np.vectorize(func) return func_(pat) else: return _apply_str_ufunc(func=func, obj=pat, dtype=np.object_) def len(self) -> T_DataArray: """ Compute the length of each string in the array. Returns ------- lengths array : array of int """ return self._apply(func=len, dtype=int) def __getitem__( self, key: int | slice, ) -> T_DataArray: if isinstance(key, slice): return self.slice(start=key.start, stop=key.stop, step=key.step) else: return self.get(key) def __add__(self, other: Any) -> T_DataArray: return self.cat(other, sep="") def __mul__( self, num: int | Any, ) -> T_DataArray: return self.repeat(num) def __mod__( self, other: Any, ) -> T_DataArray: if isinstance(other, dict): other = {key: self._stringify(val) for key, val in other.items()} return self._apply(func=lambda x: x % other) elif isinstance(other, tuple): other = tuple(self._stringify(x) for x in other) return self._apply(func=lambda x, *y: x % y, func_args=other) else: return self._apply(func=lambda x, y: x % y, func_args=(other,)) def get( self, i: int | Any, default: str | bytes = "", ) -> T_DataArray: """ Extract character number `i` from each string in the array. If `i` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- i : int or array-like of int Position of element to extract. If array-like, it is broadcast. default : str or bytes, default: "" Value for out-of-range index. Returns ------- items : array of object """ def f(x, iind): islice = slice(-1, None) if iind == -1 else slice(iind, iind + 1) item = x[islice] return item or default return self._apply(func=f, func_args=(i,)) def slice( self, start: int | Any | None = None, stop: int | Any | None = None, step: int | Any | None = None, ) -> T_DataArray: """ Slice substrings from each string in the array. If `start`, `stop`, or 'step` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- start : int or array-like of int, optional Start position for slice operation. If array-like, it is broadcast. stop : int or array-like of int, optional Stop position for slice operation. If array-like, it is broadcast. step : int or array-like of int, optional Step size for slice operation. If array-like, it is broadcast. Returns ------- sliced strings : same type as values """ f = lambda x, istart, istop, istep: x[slice(istart, istop, istep)] return self._apply(func=f, func_args=(start, stop, step)) def slice_replace( self, start: int | Any | None = None, stop: int | Any | None = None, repl: str | bytes | Any = "", ) -> T_DataArray: """ Replace a positional slice of a string with another value. If `start`, `stop`, or 'repl` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- start : int or array-like of int, optional Left index position to use for the slice. If not specified (None), the slice is unbounded on the left, i.e. slice from the start of the string. If array-like, it is broadcast. stop : int or array-like of int, optional Right index position to use for the slice. If not specified (None), the slice is unbounded on the right, i.e. slice until the end of the string. If array-like, it is broadcast. repl : str or array-like of str, default: "" String for replacement. If not specified, the sliced region is replaced with an empty string. If array-like, it is broadcast. Returns ------- replaced : same type as values """ repl = self._stringify(repl) def func(x, istart, istop, irepl): if len(x[istart:istop]) == 0: local_stop = istart else: local_stop = istop y = self._stringify("") if istart is not None: y += x[:istart] y += irepl if istop is not None: y += x[local_stop:] return y return self._apply(func=func, func_args=(start, stop, repl)) def cat(self, *others, sep: str | bytes | Any = "") -> T_DataArray: """ Concatenate strings elementwise in the DataArray with other strings. The other strings can either be string scalars or other array-like. Dimensions are automatically broadcast together. An optional separator `sep` can also be specified. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- *others : str or array-like of str Strings or array-like of strings to concatenate elementwise with the current DataArray. sep : str or array-like of str, default: "". Separator to use between strings. It is broadcast in the same way as the other input strings. If array-like, its dimensions will be placed at the end of the output array dimensions. Returns ------- concatenated : same type as values Examples -------- Create a string array >>> myarray = xr.DataArray( ... ["11111", "4"], ... dims=["X"], ... ) Create some arrays to concatenate with it >>> values_1 = xr.DataArray( ... ["a", "bb", "cccc"], ... dims=["Y"], ... ) >>> values_2 = np.array(3.4) >>> values_3 = "" >>> values_4 = np.array("test", dtype=np.str_) Determine the separator to use >>> seps = xr.DataArray( ... [" ", ", "], ... dims=["ZZ"], ... ) Concatenate the arrays using the separator >>> myarray.str.cat(values_1, values_2, values_3, values_4, sep=seps) <xarray.DataArray (X: 2, Y: 3, ZZ: 2)> Size: 1kB array([[['11111 a 3.4 test', '11111, a, 3.4, , test'], ['11111 bb 3.4 test', '11111, bb, 3.4, , test'], ['11111 cccc 3.4 test', '11111, cccc, 3.4, , test']], <BLANKLINE> [['4 a 3.4 test', '4, a, 3.4, , test'], ['4 bb 3.4 test', '4, bb, 3.4, , test'], ['4 cccc 3.4 test', '4, cccc, 3.4, , test']]], dtype='<U24') Dimensions without coordinates: X, Y, ZZ See Also -------- pandas.Series.str.cat str.join """ sep = self._stringify(sep) others = tuple(self._stringify(x) for x in others) others = others + (sep,) # sep will go at the end of the input arguments. func = lambda *x: x[-1].join(x[:-1]) return self._apply( func=func, func_args=others, dtype=self._obj.dtype.kind, ) def join( self, dim: Hashable = None, sep: str | bytes | Any = "", ) -> T_DataArray: """ Concatenate strings in a DataArray along a particular dimension. An optional separator `sep` can also be specified. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable, optional Dimension along which the strings should be concatenated. Only one dimension is allowed at a time. Optional for 0D or 1D DataArrays, required for multidimensional DataArrays. sep : str or array-like, default: "". Separator to use between strings. It is broadcast in the same way as the other input strings. If array-like, its dimensions will be placed at the end of the output array dimensions. Returns ------- joined : same type as values Examples -------- Create an array >>> values = xr.DataArray( ... [["a", "bab", "abc"], ["abcd", "", "abcdef"]], ... dims=["X", "Y"], ... ) Determine the separator >>> seps = xr.DataArray( ... ["-", "_"], ... dims=["ZZ"], ... ) Join the strings along a given dimension >>> values.str.join(dim="Y", sep=seps) <xarray.DataArray (X: 2, ZZ: 2)> Size: 192B array([['a-bab-abc', 'a_bab_abc'], ['abcd--abcdef', 'abcd__abcdef']], dtype='<U12') Dimensions without coordinates: X, ZZ See Also -------- pandas.Series.str.join str.join """ if self._obj.ndim > 1 and dim is None: raise ValueError("Dimension must be specified for multidimensional arrays.") if self._obj.ndim > 1: # Move the target dimension to the start and split along it dimshifted = list(self._obj.transpose(dim, ...)) elif self._obj.ndim == 1: dimshifted = list(self._obj) else: dimshifted = [self._obj] start, *others = dimshifted # concatenate the resulting arrays return start.str.cat(*others, sep=sep) def format( self, *args: Any, **kwargs: Any, ) -> T_DataArray: """ Perform python string formatting on each element of the DataArray. This is equivalent to calling `str.format` on every element of the DataArray. The replacement values can either be a string-like scalar or array-like of string-like values. If array-like, the values will be broadcast and applied elementwiseto the input DataArray. .. note:: Array-like values provided as `*args` will have their dimensions added even if those arguments are not used in any string formatting. .. warning:: Array-like arguments are only applied elementwise for `*args`. For `**kwargs`, values are used as-is. Parameters ---------- *args : str or bytes or array-like of str or bytes Values for positional formatting. If array-like, the values are broadcast and applied elementwise. The dimensions will be placed at the end of the output array dimensions in the order they are provided. **kwargs : str or bytes or array-like of str or bytes Values for keyword-based formatting. These are **not** broadcast or applied elementwise. Returns ------- formatted : same type as values Examples -------- Create an array to format. >>> values = xr.DataArray( ... ["{} is {adj0}", "{} and {} are {adj1}"], ... dims=["X"], ... ) Set the values to fill. >>> noun0 = xr.DataArray( ... ["spam", "egg"], ... dims=["Y"], ... ) >>> noun1 = xr.DataArray( ... ["lancelot", "arthur"], ... dims=["ZZ"], ... ) >>> adj0 = "unexpected" >>> adj1 = "like a duck" Insert the values into the array >>> values.str.format(noun0, noun1, adj0=adj0, adj1=adj1) <xarray.DataArray (X: 2, Y: 2, ZZ: 2)> Size: 1kB array([[['spam is unexpected', 'spam is unexpected'], ['egg is unexpected', 'egg is unexpected']], <BLANKLINE> [['spam and lancelot are like a duck', 'spam and arthur are like a duck'], ['egg and lancelot are like a duck', 'egg and arthur are like a duck']]], dtype='<U33') Dimensions without coordinates: X, Y, ZZ See Also -------- str.format """ args = tuple(self._stringify(x) for x in args) kwargs = {key: self._stringify(val) for key, val in kwargs.items()} return self._apply( func=self._obj.dtype.type.format, func_args=args, func_kwargs={"kwargs": kwargs}, ) def capitalize(self) -> T_DataArray: """ Convert strings in the array to be capitalized. Returns ------- capitalized : same type as values Examples -------- >>> da = xr.DataArray( ... ["temperature", "PRESSURE", "PreCipiTation", "daily rainfall"], dims="x" ... ) >>> da <xarray.DataArray (x: 4)> Size: 224B array(['temperature', 'PRESSURE', 'PreCipiTation', 'daily rainfall'], dtype='<U14') Dimensions without coordinates: x >>> capitalized = da.str.capitalize() >>> capitalized <xarray.DataArray (x: 4)> Size: 224B array(['Temperature', 'Pressure', 'Precipitation', 'Daily rainfall'], dtype='<U14') Dimensions without coordinates: x """ return self._apply(func=lambda x: x.capitalize()) def lower(self) -> T_DataArray: """ Convert strings in the array to lowercase. Returns ------- lowered : same type as values Examples -------- >>> da = xr.DataArray(["Temperature", "PRESSURE"], dims="x") >>> da <xarray.DataArray (x: 2)> Size: 88B array(['Temperature', 'PRESSURE'], dtype='<U11') Dimensions without coordinates: x >>> lowered = da.str.lower() >>> lowered <xarray.DataArray (x: 2)> Size: 88B array(['temperature', 'pressure'], dtype='<U11') Dimensions without coordinates: x """ return self._apply(func=lambda x: x.lower()) def swapcase(self) -> T_DataArray: """ Convert strings in the array to be swapcased. Returns ------- swapcased : same type as values Examples -------- >>> import xarray as xr >>> da = xr.DataArray(["temperature", "PRESSURE", "HuMiDiTy"], dims="x") >>> da <xarray.DataArray (x: 3)> Size: 132B array(['temperature', 'PRESSURE', 'HuMiDiTy'], dtype='<U11') Dimensions without coordinates: x >>> swapcased = da.str.swapcase() >>> swapcased <xarray.DataArray (x: 3)> Size: 132B array(['TEMPERATURE', 'pressure', 'hUmIdItY'], dtype='<U11') Dimensions without coordinates: x """ return self._apply(func=lambda x: x.swapcase()) def title(self) -> T_DataArray: """ Convert strings in the array to titlecase. Returns ------- titled : same type as values Examples -------- >>> da = xr.DataArray(["temperature", "PRESSURE", "HuMiDiTy"], dims="x") >>> da <xarray.DataArray (x: 3)> Size: 132B array(['temperature', 'PRESSURE', 'HuMiDiTy'], dtype='<U11') Dimensions without coordinates: x >>> titled = da.str.title() >>> titled <xarray.DataArray (x: 3)> Size: 132B array(['Temperature', 'Pressure', 'Humidity'], dtype='<U11') Dimensions without coordinates: x """ return self._apply(func=lambda x: x.title()) def upper(self) -> T_DataArray: """ Convert strings in the array to uppercase. Returns ------- uppered : same type as values Examples -------- >>> da = xr.DataArray(["temperature", "HuMiDiTy"], dims="x") >>> da <xarray.DataArray (x: 2)> Size: 88B array(['temperature', 'HuMiDiTy'], dtype='<U11') Dimensions without coordinates: x >>> uppered = da.str.upper() >>> uppered <xarray.DataArray (x: 2)> Size: 88B array(['TEMPERATURE', 'HUMIDITY'], dtype='<U11') Dimensions without coordinates: x """ return self._apply(func=lambda x: x.upper()) def casefold(self) -> T_DataArray: """ Convert strings in the array to be casefolded. Casefolding is similar to converting to lowercase, but removes all case distinctions. This is important in some languages that have more complicated cases and case conversions. For example, the 'ß' character in German is case-folded to 'ss', whereas it is lowercased to 'ß'. Returns ------- casefolded : same type as values Examples -------- >>> da = xr.DataArray(["TEMPERATURE", "HuMiDiTy"], dims="x") >>> da <xarray.DataArray (x: 2)> Size: 88B array(['TEMPERATURE', 'HuMiDiTy'], dtype='<U11') Dimensions without coordinates: x >>> casefolded = da.str.casefold() >>> casefolded <xarray.DataArray (x: 2)> Size: 88B array(['temperature', 'humidity'], dtype='<U11') Dimensions without coordinates: x >>> da = xr.DataArray(["ß", "İ"], dims="x") >>> da <xarray.DataArray (x: 2)> Size: 8B array(['ß', 'İ'], dtype='<U1') Dimensions without coordinates: x >>> casefolded = da.str.casefold() >>> casefolded <xarray.DataArray (x: 2)> Size: 16B array(['ss', 'i̇'], dtype='<U2') Dimensions without coordinates: x """ return self._apply(func=lambda x: x.casefold()) def normalize( self, form: str, ) -> T_DataArray: """ Return the Unicode normal form for the strings in the datarray. For more information on the forms, see the documentation for :func:`unicodedata.normalize`. Parameters ---------- form : {"NFC", "NFKC", "NFD", "NFKD"} Unicode form. Returns ------- normalized : same type as values """ return self._apply(func=lambda x: normalize(form, x)) # type: ignore[arg-type] def isalnum(self) -> T_DataArray: """ Check whether all characters in each string are alphanumeric. Returns ------- isalnum : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["H2O", "NaCl-"], dims="x") >>> da <xarray.DataArray (x: 2)> Size: 40B array(['H2O', 'NaCl-'], dtype='<U5') Dimensions without coordinates: x >>> isalnum = da.str.isalnum() >>> isalnum <xarray.DataArray (x: 2)> Size: 2B array([ True, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isalnum(), dtype=bool) def isalpha(self) -> T_DataArray: """ Check whether all characters in each string are alphabetic. Returns ------- isalpha : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["Mn", "H2O", "NaCl-"], dims="x") >>> da <xarray.DataArray (x: 3)> Size: 60B array(['Mn', 'H2O', 'NaCl-'], dtype='<U5') Dimensions without coordinates: x >>> isalpha = da.str.isalpha() >>> isalpha <xarray.DataArray (x: 3)> Size: 3B array([ True, False, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isalpha(), dtype=bool) def isdecimal(self) -> T_DataArray: """ Check whether all characters in each string are decimal. Returns ------- isdecimal : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["2.3", "123", "0"], dims="x") >>> da <xarray.DataArray (x: 3)> Size: 36B array(['2.3', '123', '0'], dtype='<U3') Dimensions without coordinates: x >>> isdecimal = da.str.isdecimal() >>> isdecimal <xarray.DataArray (x: 3)> Size: 3B array([False, True, True]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isdecimal(), dtype=bool) def isdigit(self) -> T_DataArray: """ Check whether all characters in each string are digits. Returns ------- isdigit : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["123", "1.2", "0", "CO2", "NaCl"], dims="x") >>> da <xarray.DataArray (x: 5)> Size: 80B array(['123', '1.2', '0', 'CO2', 'NaCl'], dtype='<U4') Dimensions without coordinates: x >>> isdigit = da.str.isdigit() >>> isdigit <xarray.DataArray (x: 5)> Size: 5B array([ True, False, True, False, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isdigit(), dtype=bool) def islower(self) -> T_DataArray: """ Check whether all characters in each string are lowercase. Returns ------- islower : array of bool Array of boolean values with the same shape as the original array indicating whether all characters of each element of the string array are lowercase (True) or not (False). Examples -------- >>> da = xr.DataArray(["temperature", "HUMIDITY", "pREciPiTaTioN"], dims="x") >>> da <xarray.DataArray (x: 3)> Size: 156B array(['temperature', 'HUMIDITY', 'pREciPiTaTioN'], dtype='<U13') Dimensions without coordinates: x >>> islower = da.str.islower() >>> islower <xarray.DataArray (x: 3)> Size: 3B array([ True, False, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.islower(), dtype=bool) def isnumeric(self) -> T_DataArray: """ Check whether all characters in each string are numeric. Returns ------- isnumeric : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["123", "2.3", "H2O", "NaCl-", "Mn"], dims="x") >>> da <xarray.DataArray (x: 5)> Size: 100B array(['123', '2.3', 'H2O', 'NaCl-', 'Mn'], dtype='<U5') Dimensions without coordinates: x >>> isnumeric = da.str.isnumeric() >>> isnumeric <xarray.DataArray (x: 5)> Size: 5B array([ True, False, False, False, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isnumeric(), dtype=bool) def isspace(self) -> T_DataArray: """ Check whether all characters in each string are spaces. Returns ------- isspace : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["", " ", "\\t", "\\n"], dims="x") >>> da <xarray.DataArray (x: 4)> Size: 16B array(['', ' ', '\\t', '\\n'], dtype='<U1') Dimensions without coordinates: x >>> isspace = da.str.isspace() >>> isspace <xarray.DataArray (x: 4)> Size: 4B array([False, True, True, True]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isspace(), dtype=bool) def istitle(self) -> T_DataArray: """ Check whether all characters in each string are titlecase. Returns ------- istitle : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray( ... [ ... "The Evolution Of Species", ... "The Theory of relativity", ... "the quantum mechanics of atoms", ... ], ... dims="title", ... ) >>> da <xarray.DataArray (title: 3)> Size: 360B array(['The Evolution Of Species', 'The Theory of relativity', 'the quantum mechanics of atoms'], dtype='<U30') Dimensions without coordinates: title >>> istitle = da.str.istitle() >>> istitle <xarray.DataArray (title: 3)> Size: 3B array([ True, False, False]) Dimensions without coordinates: title """ return self._apply(func=lambda x: x.istitle(), dtype=bool) def isupper(self) -> T_DataArray: """ Check whether all characters in each string are uppercase. Returns ------- isupper : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["TEMPERATURE", "humidity", "PreCIpiTAtioN"], dims="x") >>> da <xarray.DataArray (x: 3)> Size: 156B array(['TEMPERATURE', 'humidity', 'PreCIpiTAtioN'], dtype='<U13') Dimensions without coordinates: x >>> isupper = da.str.isupper() >>> isupper <xarray.DataArray (x: 3)> Size: 3B array([ True, False, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isupper(), dtype=bool) def count( self, pat: str | bytes | Pattern | Any, flags: int = 0, case: bool | None = None ) -> T_DataArray: """ Count occurrences of pattern in each string of the array. This function is used to count the number of times a particular regex pattern is repeated in each of the string elements of the :class:`~xarray.DataArray`. The pattern `pat` can either be a single ``str`` or `re.Pattern` or array-like of ``str`` or `re.Pattern`. If array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern or array-like of str or re.Pattern A string containing a regular expression or a compiled regular expression object. If array-like, it is broadcast. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags <https://docs.python.org/3/howto/regex.html#compilation-flags>`_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. Returns ------- counts : array of int Examples -------- >>> da = xr.DataArray(["jjklmn", "opjjqrs", "t-JJ99vwx"], dims="x") >>> da <xarray.DataArray (x: 3)> Size: 108B array(['jjklmn', 'opjjqrs', 't-JJ99vwx'], dtype='<U9') Dimensions without coordinates: x Using a string: >>> da.str.count("jj") <xarray.DataArray (x: 3)> Size: 24B array([1, 1, 0]) Dimensions without coordinates: x Enable case-insensitive matching by setting case to false: >>> counts = da.str.count("jj", case=False) >>> counts <xarray.DataArray (x: 3)> Size: 24B array([1, 1, 1]) Dimensions without coordinates: x Using regex: >>> pat = "JJ[0-9]{2}[a-z]{3}" >>> counts = da.str.count(pat) >>> counts <xarray.DataArray (x: 3)> Size: 24B array([0, 0, 1]) Dimensions without coordinates: x Using an array of strings (the pattern will be broadcast against the array): >>> pat = xr.DataArray(["jj", "JJ"], dims="y") >>> counts = da.str.count(pat) >>> counts <xarray.DataArray (x: 3, y: 2)> Size: 48B array([[1, 0], [1, 0], [0, 1]]) Dimensions without coordinates: x, y """ pat = self._re_compile(pat=pat, flags=flags, case=case) func = lambda x, ipat: len(ipat.findall(x)) return self._apply(func=func, func_args=(pat,), dtype=int) def startswith(self, pat: str | bytes | Any) -> T_DataArray: """ Test if the start of each string in the array matches a pattern. The pattern `pat` can either be a ``str`` or array-like of ``str``. If array-like, it will be broadcast and applied elementwise. Parameters ---------- pat : str Character sequence. Regular expressions are not accepted. If array-like, it is broadcast. Returns ------- startswith : array of bool An array of booleans indicating whether the given pattern matches the start of each string element. Examples -------- >>> da = xr.DataArray(["$100", "£23", "100"], dims="x") >>> da <xarray.DataArray (x: 3)> Size: 48B array(['$100', '£23', '100'], dtype='<U4') Dimensions without coordinates: x >>> startswith = da.str.startswith("$") >>> startswith <xarray.DataArray (x: 3)> Size: 3B array([ True, False, False]) Dimensions without coordinates: x """ pat = self._stringify(pat) func = lambda x, y: x.startswith(y) return self._apply(func=func, func_args=(pat,), dtype=bool) def endswith(self, pat: str | bytes | Any) -> T_DataArray: """ Test if the end of each string in the array matches a pattern. The pattern `pat` can either be a ``str`` or array-like of ``str``. If array-like, it will be broadcast and applied elementwise. Parameters ---------- pat : str Character sequence. Regular expressions are not accepted. If array-like, it is broadcast. Returns ------- endswith : array of bool A Series of booleans indicating whether the given pattern matches the end of each string element. Examples -------- >>> da = xr.DataArray(["10C", "10c", "100F"], dims="x") >>> da <xarray.DataArray (x: 3)> Size: 48B array(['10C', '10c', '100F'], dtype='<U4') Dimensions without coordinates: x >>> endswith = da.str.endswith("C") >>> endswith <xarray.DataArray (x: 3)> Size: 3B array([ True, False, False]) Dimensions without coordinates: x """ pat = self._stringify(pat) func = lambda x, y: x.endswith(y) return self._apply(func=func, func_args=(pat,), dtype=bool) def pad( self, width: int | Any, side: str = "left", fillchar: str | bytes | Any = " ", ) -> T_DataArray: """ Pad strings in the array up to width. If `width` or 'fillchar` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Minimum width of resulting string; additional characters will be filled with character defined in ``fillchar``. If array-like, it is broadcast. side : {"left", "right", "both"}, default: "left" Side from which to fill resulting string. fillchar : str or array-like of str, default: " " Additional character for filling, default is a space. If array-like, it is broadcast. Returns ------- filled : same type as values Array with a minimum number of char in each element. Examples -------- Pad strings in the array with a single string on the left side. Define the string in the array. >>> da = xr.DataArray(["PAR184", "TKO65", "NBO9139", "NZ39"], dims="x") >>> da <xarray.DataArray (x: 4)> Size: 112B array(['PAR184', 'TKO65', 'NBO9139', 'NZ39'], dtype='<U7') Dimensions without coordinates: x Pad the strings >>> filled = da.str.pad(8, side="left", fillchar="0") >>> filled <xarray.DataArray (x: 4)> Size: 128B array(['00PAR184', '000TKO65', '0NBO9139', '0000NZ39'], dtype='<U8') Dimensions without coordinates: x Pad strings on the right side >>> filled = da.str.pad(8, side="right", fillchar="0") >>> filled <xarray.DataArray (x: 4)> Size: 128B array(['PAR18400', 'TKO65000', 'NBO91390', 'NZ390000'], dtype='<U8') Dimensions without coordinates: x Pad strings on both sides >>> filled = da.str.pad(8, side="both", fillchar="0") >>> filled <xarray.DataArray (x: 4)> Size: 128B array(['0PAR1840', '0TKO6500', 'NBO91390', '00NZ3900'], dtype='<U8') Dimensions without coordinates: x Using an array-like width >>> width = xr.DataArray([8, 10], dims="y") >>> filled = da.str.pad(width, side="left", fillchar="0") >>> filled <xarray.DataArray (x: 4, y: 2)> Size: 320B array([['00PAR184', '0000PAR184'], ['000TKO65', '00000TKO65'], ['0NBO9139', '000NBO9139'], ['0000NZ39', '000000NZ39']], dtype='<U10') Dimensions without coordinates: x, y Using an array-like value for fillchar >>> fillchar = xr.DataArray(["0", "-"], dims="y") >>> filled = da.str.pad(8, side="left", fillchar=fillchar) >>> filled <xarray.DataArray (x: 4, y: 2)> Size: 256B array([['00PAR184', '--PAR184'], ['000TKO65', '---TKO65'], ['0NBO9139', '-NBO9139'], ['0000NZ39', '----NZ39']], dtype='<U8') Dimensions without coordinates: x, y """ if side == "left": func = self.rjust elif side == "right": func = self.ljust elif side == "both": func = self.center else: # pragma: no cover raise ValueError("Invalid side") return func(width=width, fillchar=fillchar) def _padder( self, *, func: Callable, width: int | Any, fillchar: str | bytes | Any = " ", ) -> T_DataArray: """ Wrapper function to handle padding operations """ fillchar = self._stringify(fillchar) def overfunc(x, iwidth, ifillchar): if len(ifillchar) != 1: raise TypeError("fillchar must be a character, not str") return func(x, int(iwidth), ifillchar) return self._apply(func=overfunc, func_args=(width, fillchar)) def center( self, width: int | Any, fillchar: str | bytes | Any = " " ) -> T_DataArray: """ Pad left and right side of each string in the array. If `width` or 'fillchar` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Minimum width of resulting string; additional characters will be filled with ``fillchar``. If array-like, it is broadcast. fillchar : str or array-like of str, default: " " Additional character for filling, default is a space. If array-like, it is broadcast. Returns ------- filled : same type as values """ func = self._obj.dtype.type.center return self._padder(func=func, width=width, fillchar=fillchar) def ljust( self, width: int | Any, fillchar: str | bytes | Any = " ", ) -> T_DataArray: """ Pad right side of each string in the array. If `width` or 'fillchar` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Minimum width of resulting string; additional characters will be filled with ``fillchar``. If array-like, it is broadcast. fillchar : str or array-like of str, default: " " Additional character for filling, default is a space. If array-like, it is broadcast. Returns ------- filled : same type as values """ func = self._obj.dtype.type.ljust return self._padder(func=func, width=width, fillchar=fillchar) def rjust( self, width: int | Any, fillchar: str | bytes | Any = " ", ) -> T_DataArray: """ Pad left side of each string in the array. If `width` or 'fillchar` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Minimum width of resulting string; additional characters will be filled with ``fillchar``. If array-like, it is broadcast. fillchar : str or array-like of str, default: " " Additional character for filling, default is a space. If array-like, it is broadcast. Returns ------- filled : same type as values """ func = self._obj.dtype.type.rjust return self._padder(func=func, width=width, fillchar=fillchar) def zfill(self, width: int | Any) -> T_DataArray: """ Pad each string in the array by prepending '0' characters. Strings in the array are padded with '0' characters on the left of the string to reach a total string length `width`. Strings in the array with length greater or equal to `width` are unchanged. If `width` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Minimum length of resulting string; strings with length less than `width` be prepended with '0' characters. If array-like, it is broadcast. Returns ------- filled : same type as values """ return self.rjust(width, fillchar="0") def contains( self, pat: str | bytes | Pattern | Any, case: bool | None = None, flags: int = 0, regex: bool = True, ) -> T_DataArray: """ Test if pattern or regex is contained within each string of the array. Return boolean array based on whether a given pattern or regex is contained within a string of the array. The pattern `pat` can either be a single ``str`` or `re.Pattern` or array-like of ``str`` or `re.Pattern`. If array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern or array-like of str or re.Pattern Character sequence, a string containing a regular expression, or a compiled regular expression object. If array-like, it is broadcast. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags <https://docs.python.org/3/howto/regex.html#compilation-flags>`_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. regex : bool, default: True If True, assumes the pat is a regular expression. If False, treats the pat as a literal string. Cannot be set to `False` if `pat` is a compiled regex. Returns ------- contains : array of bool An array of boolean values indicating whether the given pattern is contained within the string of each element of the array. """ is_compiled_re = _contains_compiled_re(pat) if is_compiled_re and not regex: raise ValueError( "Must use regular expression matching for regular expression object." ) if regex: if not is_compiled_re: pat = self._re_compile(pat=pat, flags=flags, case=case) def func(x, ipat): if ipat.groups > 0: # pragma: no cover raise ValueError("This pattern has match groups.") return bool(ipat.search(x)) else: pat = self._stringify(pat) if case or case is None: func = lambda x, ipat: ipat in x elif self._obj.dtype.char == "U": uppered = self.casefold() uppat = StringAccessor(pat).casefold() # type: ignore[type-var] # hack? return uppered.str.contains(uppat, regex=False) # type: ignore[return-value] else: uppered = self.upper() uppat = StringAccessor(pat).upper() # type: ignore[type-var] # hack? return uppered.str.contains(uppat, regex=False) # type: ignore[return-value] return self._apply(func=func, func_args=(pat,), dtype=bool) def match( self, pat: str | bytes | Pattern | Any, case: bool | None = None, flags: int = 0, ) -> T_DataArray: """ Determine if each string in the array matches a regular expression. The pattern `pat` can either be a single ``str`` or `re.Pattern` or array-like of ``str`` or `re.Pattern`. If array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern or array-like of str or re.Pattern A string containing a regular expression or a compiled regular expression object. If array-like, it is broadcast. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags <https://docs.python.org/3/howto/regex.html#compilation-flags>`_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. Returns ------- matched : array of bool """ pat = self._re_compile(pat=pat, flags=flags, case=case) func = lambda x, ipat: bool(ipat.match(x)) return self._apply(func=func, func_args=(pat,), dtype=bool) def strip( self, to_strip: str | bytes | Any = None, side: str = "both" ) -> T_DataArray: """ Remove leading and trailing characters. Strip whitespaces (including newlines) or a set of specified characters from each string in the array from left and/or right sides. `to_strip` can either be a ``str`` or array-like of ``str``. If array-like, it will be broadcast and applied elementwise. Parameters ---------- to_strip : str or array-like of str or None, default: None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. If array-like, it is broadcast. side : {"left", "right", "both"}, default: "both" Side from which to strip. Returns ------- stripped : same type as values """ if to_strip is not None: to_strip = self._stringify(to_strip) if side == "both": func = lambda x, y: x.strip(y) elif side == "left": func = lambda x, y: x.lstrip(y) elif side == "right": func = lambda x, y: x.rstrip(y) else: # pragma: no cover raise ValueError("Invalid side") return self._apply(func=func, func_args=(to_strip,)) def lstrip(self, to_strip: str | bytes | Any = None) -> T_DataArray: """ Remove leading characters. Strip whitespaces (including newlines) or a set of specified characters from each string in the array from the left side. `to_strip` can either be a ``str`` or array-like of ``str``. If array-like, it will be broadcast and applied elementwise. Parameters ---------- to_strip : str or array-like of str or None, default: None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. If array-like, it is broadcast. Returns ------- stripped : same type as values """ return self.strip(to_strip, side="left") def rstrip(self, to_strip: str | bytes | Any = None) -> T_DataArray: """ Remove trailing characters. Strip whitespaces (including newlines) or a set of specified characters from each string in the array from the right side. `to_strip` can either be a ``str`` or array-like of ``str``. If array-like, it will be broadcast and applied elementwise. Parameters ---------- to_strip : str or array-like of str or None, default: None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. If array-like, it is broadcast. Returns ------- stripped : same type as values """ return self.strip(to_strip, side="right") def wrap(self, width: int | Any, **kwargs) -> T_DataArray: """ Wrap long strings in the array in paragraphs with length less than `width`. This method has the same keyword parameters and defaults as :class:`textwrap.TextWrapper`. If `width` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Maximum line-width. If array-like, it is broadcast. **kwargs keyword arguments passed into :class:`textwrap.TextWrapper`. Returns ------- wrapped : same type as values """ ifunc = lambda x: textwrap.TextWrapper(width=x, **kwargs) tw = StringAccessor(width)._apply(func=ifunc, dtype=np.object_) # type: ignore[type-var] # hack? func = lambda x, itw: "\n".join(itw.wrap(x)) return self._apply(func=func, func_args=(tw,)) # Mapping is only covariant in its values, maybe use a custom CovariantMapping? def translate(self, table: Mapping[Any, str | bytes | int | None]) -> T_DataArray: """ Map characters of each string through the given mapping table. Parameters ---------- table : dict-like from and to str or bytes or int A a mapping of Unicode ordinals to Unicode ordinals, strings, int or None. Unmapped characters are left untouched. Characters mapped to None are deleted. :meth:`str.maketrans` is a helper function for making translation tables. Returns ------- translated : same type as values """ func = lambda x: x.translate(table) return self._apply(func=func) def repeat( self, repeats: int | Any, ) -> T_DataArray: """ Repeat each string in the array. If `repeats` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- repeats : int or array-like of int Number of repetitions. If array-like, it is broadcast. Returns ------- repeated : same type as values Array of repeated string objects. """ func = lambda x, y: x * y return self._apply(func=func, func_args=(repeats,)) def find( self, sub: str | bytes | Any, start: int | Any = 0, end: int | Any = None, side: str = "left", ) -> T_DataArray: """ Return lowest or highest indexes in each strings in the array where the substring is fully contained between [start:end]. Return -1 on failure. If `start`, `end`, or 'sub` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- sub : str or array-like of str Substring being searched. If array-like, it is broadcast. start : int or array-like of int Left edge index. If array-like, it is broadcast. end : int or array-like of int Right edge index. If array-like, it is broadcast. side : {"left", "right"}, default: "left" Starting side for search. Returns ------- found : array of int """ sub = self._stringify(sub) if side == "left": method = "find" elif side == "right": method = "rfind" else: # pragma: no cover raise ValueError("Invalid side") func = lambda x, isub, istart, iend: getattr(x, method)(isub, istart, iend) return self._apply(func=func, func_args=(sub, start, end), dtype=int) def rfind( self, sub: str | bytes | Any, start: int | Any = 0, end: int | Any = None, ) -> T_DataArray: """ Return highest indexes in each strings in the array where the substring is fully contained between [start:end]. Return -1 on failure. If `start`, `end`, or 'sub` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- sub : str or array-like of str Substring being searched. If array-like, it is broadcast. start : int or array-like of int Left edge index. If array-like, it is broadcast. end : int or array-like of int Right edge index. If array-like, it is broadcast. Returns ------- found : array of int """ return self.find(sub, start=start, end=end, side="right") def index( self, sub: str | bytes | Any, start: int | Any = 0, end: int | Any = None, side: str = "left", ) -> T_DataArray: """ Return lowest or highest indexes in each strings where the substring is fully contained between [start:end]. This is the same as ``str.find`` except instead of returning -1, it raises a ValueError when the substring is not found. If `start`, `end`, or 'sub` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- sub : str or array-like of str Substring being searched. If array-like, it is broadcast. start : int or array-like of int Left edge index. If array-like, it is broadcast. end : int or array-like of int Right edge index. If array-like, it is broadcast. side : {"left", "right"}, default: "left" Starting side for search. Returns ------- found : array of int Raises ------ ValueError substring is not found """ sub = self._stringify(sub) if side == "left": method = "index" elif side == "right": method = "rindex" else: # pragma: no cover raise ValueError("Invalid side") func = lambda x, isub, istart, iend: getattr(x, method)(isub, istart, iend) return self._apply(func=func, func_args=(sub, start, end), dtype=int) def rindex( self, sub: str | bytes | Any, start: int | Any = 0, end: int | Any = None, ) -> T_DataArray: """ Return highest indexes in each strings where the substring is fully contained between [start:end]. This is the same as ``str.rfind`` except instead of returning -1, it raises a ValueError when the substring is not found. If `start`, `end`, or 'sub` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- sub : str or array-like of str Substring being searched. If array-like, it is broadcast. start : int or array-like of int Left edge index. If array-like, it is broadcast. end : int or array-like of int Right edge index. If array-like, it is broadcast. Returns ------- found : array of int Raises ------ ValueError substring is not found """ return self.index(sub, start=start, end=end, side="right") def replace( self, pat: str | bytes | Pattern | Any, repl: str | bytes | Callable | Any, n: int | Any = -1, case: bool | None = None, flags: int = 0, regex: bool = True, ) -> T_DataArray: """ Replace occurrences of pattern/regex in the array with some string. If `pat`, `repl`, or 'n` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern or array-like of str or re.Pattern String can be a character sequence or regular expression. If array-like, it is broadcast. repl : str or callable or array-like of str or callable Replacement string or a callable. The callable is passed the regex match object and must return a replacement string to be used. See :func:`re.sub`. If array-like, it is broadcast. n : int or array of int, default: -1 Number of replacements to make from start. Use ``-1`` to replace all. If array-like, it is broadcast. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags <https://docs.python.org/3/howto/regex.html#compilation-flags>`_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. regex : bool, default: True If True, assumes the passed-in pattern is a regular expression. If False, treats the pattern as a literal string. Cannot be set to False if `pat` is a compiled regex or `repl` is a callable. Returns ------- replaced : same type as values A copy of the object with all matching occurrences of `pat` replaced by `repl`. """ if _contains_str_like(repl): repl = self._stringify(repl) elif not _contains_callable(repl): # pragma: no cover raise TypeError("repl must be a string or callable") is_compiled_re = _contains_compiled_re(pat) if not regex and is_compiled_re: raise ValueError( "Cannot use a compiled regex as replacement pattern with regex=False" ) if not regex and callable(repl): raise ValueError("Cannot use a callable replacement when regex=False") if regex: pat = self._re_compile(pat=pat, flags=flags, case=case) func = lambda x, ipat, irepl, i_n: ipat.sub( repl=irepl, string=x, count=max(i_n, 0) ) else: pat = self._stringify(pat) func = lambda x, ipat, irepl, i_n: x.replace(ipat, irepl, i_n) return self._apply(func=func, func_args=(pat, repl, n)) def extract( self, pat: str | bytes | Pattern | Any, dim: Hashable, case: bool | None = None, flags: int = 0, ) -> T_DataArray: r""" Extract the first match of capture groups in the regex pat as a new dimension in a DataArray. For each string in the DataArray, extract groups from the first match of regular expression pat. If `pat` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern or array-like of str or re.Pattern A string containing a regular expression or a compiled regular expression object. If array-like, it is broadcast. dim : hashable or None Name of the new dimension to store the captured strings in. If None, the pattern must have only one capture group and the resulting DataArray will have the same size as the original. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags <https://docs.python.org/3/howto/regex.html#compilation-flags>`_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. Returns ------- extracted : same type as values or object array Raises ------ ValueError `pat` has no capture groups. ValueError `dim` is None and there is more than one capture group. ValueError `case` is set when `pat` is a compiled regular expression. KeyError The given dimension is already present in the DataArray. Examples -------- Create a string array >>> value = xr.DataArray( ... [ ... [ ... "a_Xy_0", ... "ab_xY_10-bab_Xy_110-baab_Xy_1100", ... "abc_Xy_01-cbc_Xy_2210", ... ], ... [ ... "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", ... "", ... "abcdef_Xy_101-fef_Xy_5543210", ... ], ... ], ... dims=["X", "Y"], ... ) Extract matches >>> value.str.extract(r"(\w+)_Xy_(\d*)", dim="match") <xarray.DataArray (X: 2, Y: 3, match: 2)> Size: 288B array([[['a', '0'], ['bab', '110'], ['abc', '01']], <BLANKLINE> [['abcd', ''], ['', ''], ['abcdef', '101']]], dtype='<U6') Dimensions without coordinates: X, Y, match See Also -------- DataArray.str.extractall DataArray.str.findall re.compile re.search pandas.Series.str.extract """ pat = self._re_compile(pat=pat, flags=flags, case=case) if isinstance(pat, re.Pattern): maxgroups = pat.groups else: maxgroups = ( _apply_str_ufunc(obj=pat, func=lambda x: x.groups, dtype=np.int_) .max() .data.tolist() ) if maxgroups == 0: raise ValueError("No capture groups found in pattern.") if dim is None and maxgroups != 1: raise ValueError( "Dimension must be specified if more than one capture group is given." ) if dim is not None and dim in self._obj.dims: raise KeyError(f"Dimension '{dim}' already present in DataArray.") def _get_res_single(val, pat): match = pat.search(val) if match is None: return "" res = match.group(1) if res is None: res = "" return res def _get_res_multi(val, pat): match = pat.search(val) if match is None: return np.array([""], val.dtype) match = match.groups() match = [grp if grp is not None else "" for grp in match] return np.array(match, val.dtype) if dim is None: return self._apply(func=_get_res_single, func_args=(pat,)) else: # dtype MUST be object or strings can be truncated # See: https://github.com/numpy/numpy/issues/8352 return duck_array_ops.astype( self._apply( func=_get_res_multi, func_args=(pat,), dtype=np.object_, output_core_dims=[[dim]], output_sizes={dim: maxgroups}, ), self._obj.dtype.kind, ) def extractall( self, pat: str | bytes | Pattern | Any, group_dim: Hashable, match_dim: Hashable, case: bool | None = None, flags: int = 0, ) -> T_DataArray: r""" Extract all matches of capture groups in the regex pat as new dimensions in a DataArray. For each string in the DataArray, extract groups from all matches of regular expression pat. Equivalent to applying re.findall() to all the elements in the DataArray and splitting the results across dimensions. If `pat` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern A string containing a regular expression or a compiled regular expression object. If array-like, it is broadcast. group_dim : hashable Name of the new dimensions corresponding to the capture groups. This dimension is added to the new DataArray first. match_dim : hashable Name of the new dimensions corresponding to the matches for each group. This dimension is added to the new DataArray second. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags <https://docs.python.org/3/howto/regex.html#compilation-flags>`_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. Returns ------- extracted : same type as values or object array Raises ------ ValueError `pat` has no capture groups. ValueError `case` is set when `pat` is a compiled regular expression. KeyError Either of the given dimensions is already present in the DataArray. KeyError The given dimensions names are the same. Examples -------- Create a string array >>> value = xr.DataArray( ... [ ... [ ... "a_Xy_0", ... "ab_xY_10-bab_Xy_110-baab_Xy_1100", ... "abc_Xy_01-cbc_Xy_2210", ... ], ... [ ... "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", ... "", ... "abcdef_Xy_101-fef_Xy_5543210", ... ], ... ], ... dims=["X", "Y"], ... ) Extract matches >>> value.str.extractall( ... r"(\w+)_Xy_(\d*)", group_dim="group", match_dim="match" ... ) <xarray.DataArray (X: 2, Y: 3, group: 3, match: 2)> Size: 1kB array([[[['a', '0'], ['', ''], ['', '']], <BLANKLINE> [['bab', '110'], ['baab', '1100'], ['', '']], <BLANKLINE> [['abc', '01'], ['cbc', '2210'], ['', '']]], <BLANKLINE> <BLANKLINE> [[['abcd', ''], ['dcd', '33210'], ['dccd', '332210']], <BLANKLINE> [['', ''], ['', ''], ['', '']], <BLANKLINE> [['abcdef', '101'], ['fef', '5543210'], ['', '']]]], dtype='<U7') Dimensions without coordinates: X, Y, group, match See Also -------- DataArray.str.extract DataArray.str.findall re.compile re.findall pandas.Series.str.extractall """ pat = self._re_compile(pat=pat, flags=flags, case=case) if group_dim in self._obj.dims: raise KeyError( f"Group dimension '{group_dim}' already present in DataArray." ) if match_dim in self._obj.dims: raise KeyError( f"Match dimension '{match_dim}' already present in DataArray." ) if group_dim == match_dim: raise KeyError( f"Group dimension '{group_dim}' is the same as match dimension '{match_dim}'." ) _get_count = lambda x, ipat: len(ipat.findall(x)) maxcount = ( self._apply(func=_get_count, func_args=(pat,), dtype=np.int_) .max() .data.tolist() ) if isinstance(pat, re.Pattern): maxgroups = pat.groups else: maxgroups = ( _apply_str_ufunc(obj=pat, func=lambda x: x.groups, dtype=np.int_) .max() .data.tolist() ) def _get_res(val, ipat, imaxcount=maxcount, dtype=self._obj.dtype): if ipat.groups == 0: raise ValueError("No capture groups found in pattern.") matches = ipat.findall(val) res = np.zeros([maxcount, ipat.groups], dtype) if ipat.groups == 1: for imatch, match in enumerate(matches): res[imatch, 0] = match else: for imatch, match in enumerate(matches): for jmatch, submatch in enumerate(match): res[imatch, jmatch] = submatch return res return duck_array_ops.astype( self._apply( # dtype MUST be object or strings can be truncated # See: https://github.com/numpy/numpy/issues/8352 func=_get_res, func_args=(pat,), dtype=np.object_, output_core_dims=[[group_dim, match_dim]], output_sizes={group_dim: maxgroups, match_dim: maxcount}, ), self._obj.dtype.kind, ) def findall( self, pat: str | bytes | Pattern | Any, case: bool | None = None, flags: int = 0, ) -> T_DataArray: r""" Find all occurrences of pattern or regular expression in the DataArray. Equivalent to applying re.findall() to all the elements in the DataArray. Results in an object array of lists. If there is only one capture group, the lists will be a sequence of matches. If there are multiple capture groups, the lists will be a sequence of lists, each of which contains a sequence of matches. If `pat` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern A string containing a regular expression or a compiled regular expression object. If array-like, it is broadcast. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags <https://docs.python.org/3/howto/regex.html#compilation-flags>`_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. Returns ------- extracted : object array Raises ------ ValueError `pat` has no capture groups. ValueError `case` is set when `pat` is a compiled regular expression. Examples -------- Create a string array >>> value = xr.DataArray( ... [ ... [ ... "a_Xy_0", ... "ab_xY_10-bab_Xy_110-baab_Xy_1100", ... "abc_Xy_01-cbc_Xy_2210", ... ], ... [ ... "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", ... "", ... "abcdef_Xy_101-fef_Xy_5543210", ... ], ... ], ... dims=["X", "Y"], ... ) Extract matches >>> value.str.findall(r"(\w+)_Xy_(\d*)") <xarray.DataArray (X: 2, Y: 3)> Size: 48B array([[list([('a', '0')]), list([('bab', '110'), ('baab', '1100')]), list([('abc', '01'), ('cbc', '2210')])], [list([('abcd', ''), ('dcd', '33210'), ('dccd', '332210')]), list([]), list([('abcdef', '101'), ('fef', '5543210')])]], dtype=object) Dimensions without coordinates: X, Y See Also -------- DataArray.str.extract DataArray.str.extractall re.compile re.findall pandas.Series.str.findall """ pat = self._re_compile(pat=pat, flags=flags, case=case) def func(x, ipat): if ipat.groups == 0: raise ValueError("No capture groups found in pattern.") return ipat.findall(x) return self._apply(func=func, func_args=(pat,), dtype=np.object_) def _partitioner( self, *, func: Callable, dim: Hashable | None, sep: str | bytes | Any | None, ) -> T_DataArray: """ Implements logic for `partition` and `rpartition`. """ sep = self._stringify(sep) if dim is None: listfunc = lambda x, isep: list(func(x, isep)) return self._apply(func=listfunc, func_args=(sep,), dtype=np.object_) # _apply breaks on an empty array in this case if not self._obj.size: return self._obj.copy().expand_dims({dim: 0}, axis=-1) arrfunc = lambda x, isep: np.array(func(x, isep), dtype=self._obj.dtype) # dtype MUST be object or strings can be truncated # See: https://github.com/numpy/numpy/issues/8352 return duck_array_ops.astype( self._apply( func=arrfunc, func_args=(sep,), dtype=np.object_, output_core_dims=[[dim]], output_sizes={dim: 3}, ), self._obj.dtype.kind, ) def partition( self, dim: Hashable | None, sep: str | bytes | Any = " ", ) -> T_DataArray: """ Split the strings in the DataArray at the first occurrence of separator `sep`. This method splits the string at the first occurrence of `sep`, and returns 3 elements containing the part before the separator, the separator itself, and the part after the separator. If the separator is not found, return 3 elements containing the string itself, followed by two empty strings. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable or None Name for the dimension to place the 3 elements in. If `None`, place the results as list elements in an object DataArray. sep : str or bytes or array-like, default: " " String to split on. If array-like, it is broadcast. Returns ------- partitioned : same type as values or object array See Also -------- DataArray.str.rpartition str.partition pandas.Series.str.partition """ return self._partitioner(func=self._obj.dtype.type.partition, dim=dim, sep=sep) def rpartition( self, dim: Hashable | None, sep: str | bytes | Any = " ", ) -> T_DataArray: """ Split the strings in the DataArray at the last occurrence of separator `sep`. This method splits the string at the last occurrence of `sep`, and returns 3 elements containing the part before the separator, the separator itself, and the part after the separator. If the separator is not found, return 3 elements containing two empty strings, followed by the string itself. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable or None Name for the dimension to place the 3 elements in. If `None`, place the results as list elements in an object DataArray. sep : str or bytes or array-like, default: " " String to split on. If array-like, it is broadcast. Returns ------- rpartitioned : same type as values or object array See Also -------- DataArray.str.partition str.rpartition pandas.Series.str.rpartition """ return self._partitioner(func=self._obj.dtype.type.rpartition, dim=dim, sep=sep) def _splitter( self, *, func: Callable, pre: bool, dim: Hashable, sep: str | bytes | Any | None, maxsplit: int, ) -> DataArray: """ Implements logic for `split` and `rsplit`. """ if sep is not None: sep = self._stringify(sep) if dim is None: f_none = lambda x, isep: func(x, isep, maxsplit) return self._apply(func=f_none, func_args=(sep,), dtype=np.object_) # _apply breaks on an empty array in this case if not self._obj.size: return self._obj.copy().expand_dims({dim: 0}, axis=-1) f_count = lambda x, isep: max(len(func(x, isep, maxsplit)), 1) maxsplit = ( self._apply(func=f_count, func_args=(sep,), dtype=np.int_).max().data.item() - 1 ) def _dosplit(mystr, sep, maxsplit=maxsplit, dtype=self._obj.dtype): res = func(mystr, sep, maxsplit) if len(res) < maxsplit + 1: pad = [""] * (maxsplit + 1 - len(res)) if pre: res += pad else: res = pad + res return np.array(res, dtype=dtype) # dtype MUST be object or strings can be truncated # See: https://github.com/numpy/numpy/issues/8352 return duck_array_ops.astype( self._apply( func=_dosplit, func_args=(sep,), dtype=np.object_, output_core_dims=[[dim]], output_sizes={dim: maxsplit}, ), self._obj.dtype.kind, ) def split( self, dim: Hashable | None, sep: str | bytes | Any = None, maxsplit: int = -1, ) -> DataArray: r""" Split strings in a DataArray around the given separator/delimiter `sep`. Splits the string in the DataArray from the beginning, at the specified delimiter string. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable or None Name for the dimension to place the results in. If `None`, place the results as list elements in an object DataArray. sep : str, default: None String to split on. If ``None`` (the default), split on any whitespace. If array-like, it is broadcast. maxsplit : int, default: -1 Limit number of splits in output, starting from the beginning. If -1 (the default), return all splits. Returns ------- splitted : same type as values or object array Examples -------- Create a string DataArray >>> values = xr.DataArray( ... [ ... ["abc def", "spam\t\teggs\tswallow", "red_blue"], ... ["test0\ntest1\ntest2\n\ntest3", "", "abra ka\nda\tbra"], ... ], ... dims=["X", "Y"], ... ) Split once and put the results in a new dimension >>> values.str.split(dim="splitted", maxsplit=1) <xarray.DataArray (X: 2, Y: 3, splitted: 2)> Size: 864B array([[['abc', 'def'], ['spam', 'eggs\tswallow'], ['red_blue', '']], <BLANKLINE> [['test0', 'test1\ntest2\n\ntest3'], ['', ''], ['abra', 'ka\nda\tbra']]], dtype='<U18') Dimensions without coordinates: X, Y, splitted Split as many times as needed and put the results in a new dimension >>> values.str.split(dim="splitted") <xarray.DataArray (X: 2, Y: 3, splitted: 4)> Size: 768B array([[['abc', 'def', '', ''], ['spam', 'eggs', 'swallow', ''], ['red_blue', '', '', '']], <BLANKLINE> [['test0', 'test1', 'test2', 'test3'], ['', '', '', ''], ['abra', 'ka', 'da', 'bra']]], dtype='<U8') Dimensions without coordinates: X, Y, splitted Split once and put the results in lists >>> values.str.split(dim=None, maxsplit=1) <xarray.DataArray (X: 2, Y: 3)> Size: 48B array([[list(['abc', 'def']), list(['spam', 'eggs\tswallow']), list(['red_blue'])], [list(['test0', 'test1\ntest2\n\ntest3']), list([]), list(['abra', 'ka\nda\tbra'])]], dtype=object) Dimensions without coordinates: X, Y Split as many times as needed and put the results in a list >>> values.str.split(dim=None) <xarray.DataArray (X: 2, Y: 3)> Size: 48B array([[list(['abc', 'def']), list(['spam', 'eggs', 'swallow']), list(['red_blue'])], [list(['test0', 'test1', 'test2', 'test3']), list([]), list(['abra', 'ka', 'da', 'bra'])]], dtype=object) Dimensions without coordinates: X, Y Split only on spaces >>> values.str.split(dim="splitted", sep=" ") <xarray.DataArray (X: 2, Y: 3, splitted: 3)> Size: 2kB array([[['abc', 'def', ''], ['spam\t\teggs\tswallow', '', ''], ['red_blue', '', '']], <BLANKLINE> [['test0\ntest1\ntest2\n\ntest3', '', ''], ['', '', ''], ['abra', '', 'ka\nda\tbra']]], dtype='<U24') Dimensions without coordinates: X, Y, splitted See Also -------- DataArray.str.rsplit str.split pandas.Series.str.split """ return self._splitter( func=self._obj.dtype.type.split, pre=True, dim=dim, sep=sep, maxsplit=maxsplit, ) def rsplit( self, dim: Hashable | None, sep: str | bytes | Any = None, maxsplit: int | Any = -1, ) -> DataArray: r""" Split strings in a DataArray around the given separator/delimiter `sep`. Splits the string in the DataArray from the end, at the specified delimiter string. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable or None Name for the dimension to place the results in. If `None`, place the results as list elements in an object DataArray sep : str, default: None String to split on. If ``None`` (the default), split on any whitespace. If array-like, it is broadcast. maxsplit : int, default: -1 Limit number of splits in output, starting from the end. If -1 (the default), return all splits. The final number of split values may be less than this if there are no DataArray elements with that many values. Returns ------- rsplitted : same type as values or object array Examples -------- Create a string DataArray >>> values = xr.DataArray( ... [ ... ["abc def", "spam\t\teggs\tswallow", "red_blue"], ... ["test0\ntest1\ntest2\n\ntest3", "", "abra ka\nda\tbra"], ... ], ... dims=["X", "Y"], ... ) Split once and put the results in a new dimension >>> values.str.rsplit(dim="splitted", maxsplit=1) <xarray.DataArray (X: 2, Y: 3, splitted: 2)> Size: 816B array([[['abc', 'def'], ['spam\t\teggs', 'swallow'], ['', 'red_blue']], <BLANKLINE> [['test0\ntest1\ntest2', 'test3'], ['', ''], ['abra ka\nda', 'bra']]], dtype='<U17') Dimensions without coordinates: X, Y, splitted Split as many times as needed and put the results in a new dimension >>> values.str.rsplit(dim="splitted") <xarray.DataArray (X: 2, Y: 3, splitted: 4)> Size: 768B array([[['', '', 'abc', 'def'], ['', 'spam', 'eggs', 'swallow'], ['', '', '', 'red_blue']], <BLANKLINE> [['test0', 'test1', 'test2', 'test3'], ['', '', '', ''], ['abra', 'ka', 'da', 'bra']]], dtype='<U8') Dimensions without coordinates: X, Y, splitted Split once and put the results in lists >>> values.str.rsplit(dim=None, maxsplit=1) <xarray.DataArray (X: 2, Y: 3)> Size: 48B array([[list(['abc', 'def']), list(['spam\t\teggs', 'swallow']), list(['red_blue'])], [list(['test0\ntest1\ntest2', 'test3']), list([]), list(['abra ka\nda', 'bra'])]], dtype=object) Dimensions without coordinates: X, Y Split as many times as needed and put the results in a list >>> values.str.rsplit(dim=None) <xarray.DataArray (X: 2, Y: 3)> Size: 48B array([[list(['abc', 'def']), list(['spam', 'eggs', 'swallow']), list(['red_blue'])], [list(['test0', 'test1', 'test2', 'test3']), list([]), list(['abra', 'ka', 'da', 'bra'])]], dtype=object) Dimensions without coordinates: X, Y Split only on spaces >>> values.str.rsplit(dim="splitted", sep=" ") <xarray.DataArray (X: 2, Y: 3, splitted: 3)> Size: 2kB array([[['', 'abc', 'def'], ['', '', 'spam\t\teggs\tswallow'], ['', '', 'red_blue']], <BLANKLINE> [['', '', 'test0\ntest1\ntest2\n\ntest3'], ['', '', ''], ['abra', '', 'ka\nda\tbra']]], dtype='<U24') Dimensions without coordinates: X, Y, splitted See Also -------- DataArray.str.split str.rsplit pandas.Series.str.rsplit """ return self._splitter( func=self._obj.dtype.type.rsplit, pre=False, dim=dim, sep=sep, maxsplit=maxsplit, ) def get_dummies( self, dim: Hashable, sep: str | bytes | Any = "|", ) -> DataArray: """ Return DataArray of dummy/indicator variables. Each string in the DataArray is split at `sep`. A new dimension is created with coordinates for each unique result, and the corresponding element of that dimension is `True` if that result is present and `False` if not. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable Name for the dimension to place the results in. sep : str, default: "|". String to split on. If array-like, it is broadcast. Returns ------- dummies : array of bool Examples -------- Create a string array >>> values = xr.DataArray( ... [ ... ["a|ab~abc|abc", "ab", "a||abc|abcd"], ... ["abcd|ab|a", "abc|ab~abc", "|a"], ... ], ... dims=["X", "Y"], ... ) Extract dummy values >>> values.str.get_dummies(dim="dummies") <xarray.DataArray (X: 2, Y: 3, dummies: 5)> Size: 30B array([[[ True, False, True, False, True], [False, True, False, False, False], [ True, False, True, True, False]], <BLANKLINE> [[ True, True, False, True, False], [False, False, True, False, True], [ True, False, False, False, False]]]) Coordinates: * dummies (dummies) <U6 120B 'a' 'ab' 'abc' 'abcd' 'ab~abc' Dimensions without coordinates: X, Y See Also -------- pandas.Series.str.get_dummies """ # _apply breaks on an empty array in this case if not self._obj.size: return self._obj.copy().expand_dims({dim: 0}, axis=-1) sep = self._stringify(sep) f_set = lambda x, isep: set(x.split(isep)) - {self._stringify("")} setarr = self._apply(func=f_set, func_args=(sep,), dtype=np.object_) vals = sorted(reduce(set_union, setarr.data.ravel())) func = lambda x: np.array([val in x for val in vals], dtype=np.bool_) res = _apply_str_ufunc( func=func, obj=setarr, output_core_dims=[[dim]], output_sizes={dim: len(vals)}, dtype=np.bool_, ) res.coords[dim] = vals return res def decode(self, encoding: str, errors: str = "strict") -> T_DataArray: """ Decode character string in the array using indicated encoding. Parameters ---------- encoding : str The encoding to use. Please see the Python documentation `codecs standard encoders <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ section for a list of encodings handlers. errors : str, default: "strict" The handler for encoding errors. Please see the Python documentation `codecs error handlers <https://docs.python.org/3/library/codecs.html#error-handlers>`_ for a list of error handlers. Returns ------- decoded : same type as values """ if encoding in _cpython_optimized_decoders: func = lambda x: x.decode(encoding, errors) else: decoder = codecs.getdecoder(encoding) func = lambda x: decoder(x, errors)[0] return self._apply(func=func, dtype=np.str_) def encode(self, encoding: str, errors: str = "strict") -> T_DataArray: """ Encode character string in the array using indicated encoding. Parameters ---------- encoding : str The encoding to use. Please see the Python documentation `codecs standard encoders <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ section for a list of encodings handlers. errors : str, default: "strict" The handler for encoding errors. Please see the Python documentation `codecs error handlers <https://docs.python.org/3/library/codecs.html#error-handlers>`_ for a list of error handlers. Returns ------- encoded : same type as values """ if encoding in _cpython_optimized_encoders: func = lambda x: x.encode(encoding, errors) else: encoder = codecs.getencoder(encoding) func = lambda x: encoder(x, errors)[0] return self._apply(func=func, dtype=np.bytes_)
StringAccessor
python
networkx__networkx
networkx/algorithms/isomorphism/tests/test_vf2pp_helpers.py
{ "start": 80818, "end": 85128 }
class ____: edges = [ (1, 3), (2, 3), (3, 4), (4, 9), (4, 5), (3, 9), (5, 8), (5, 7), (8, 7), (6, 7), ] mapped = { 0: "x", 1: "a", 2: "b", 3: "c", 4: "d", 5: "e", 6: "f", 7: "g", 8: "h", 9: "i", } G1 = nx.Graph() G1.add_edges_from(edges) G1.add_node(0) G2 = nx.relabel_nodes(G1, mapping=mapped) def test_updating(self): G2_degree = dict(self.G2.degree) gparams, sparams = _initialize_parameters(self.G1, self.G2, G2_degree) m, m_rev, T1, _, T1_tilde, _, T2, _, T2_tilde, _ = sparams # Add node to the mapping m[4] = self.mapped[4] m_rev[self.mapped[4]] = 4 _update_Tinout(4, self.mapped[4], gparams, sparams) assert T1 == {3, 5, 9} assert T2 == {"c", "i", "e"} assert T1_tilde == {0, 1, 2, 6, 7, 8} assert T2_tilde == {"x", "a", "b", "f", "g", "h"} # Add node to the mapping m[5] = self.mapped[5] m_rev.update({self.mapped[5]: 5}) _update_Tinout(5, self.mapped[5], gparams, sparams) assert T1 == {3, 9, 8, 7} assert T2 == {"c", "i", "h", "g"} assert T1_tilde == {0, 1, 2, 6} assert T2_tilde == {"x", "a", "b", "f"} # Add node to the mapping m[6] = self.mapped[6] m_rev.update({self.mapped[6]: 6}) _update_Tinout(6, self.mapped[6], gparams, sparams) assert T1 == {3, 9, 8, 7} assert T2 == {"c", "i", "h", "g"} assert T1_tilde == {0, 1, 2} assert T2_tilde == {"x", "a", "b"} # Add node to the mapping m[3] = self.mapped[3] m_rev.update({self.mapped[3]: 3}) _update_Tinout(3, self.mapped[3], gparams, sparams) assert T1 == {1, 2, 9, 8, 7} assert T2 == {"a", "b", "i", "h", "g"} assert T1_tilde == {0} assert T2_tilde == {"x"} # Add node to the mapping m[0] = self.mapped[0] m_rev.update({self.mapped[0]: 0}) _update_Tinout(0, self.mapped[0], gparams, sparams) assert T1 == {1, 2, 9, 8, 7} assert T2 == {"a", "b", "i", "h", "g"} assert T1_tilde == set() assert T2_tilde == set() def test_restoring(self): m = {0: "x", 3: "c", 4: "d", 5: "e", 6: "f"} m_rev = {"x": 0, "c": 3, "d": 4, "e": 5, "f": 6} T1 = {1, 2, 7, 9, 8} T2 = {"a", "b", "g", "i", "h"} T1_tilde = set() T2_tilde = set() gparams = _GraphParameters(self.G1, self.G2, {}, {}, {}, {}, {}) sparams = _StateParameters( m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None ) # Remove a node from the mapping m.pop(0) m_rev.pop("x") _restore_Tinout(0, self.mapped[0], gparams, sparams) assert T1 == {1, 2, 7, 9, 8} assert T2 == {"a", "b", "g", "i", "h"} assert T1_tilde == {0} assert T2_tilde == {"x"} # Remove a node from the mapping m.pop(6) m_rev.pop("f") _restore_Tinout(6, self.mapped[6], gparams, sparams) assert T1 == {1, 2, 7, 9, 8} assert T2 == {"a", "b", "g", "i", "h"} assert T1_tilde == {0, 6} assert T2_tilde == {"x", "f"} # Remove a node from the mapping m.pop(3) m_rev.pop("c") _restore_Tinout(3, self.mapped[3], gparams, sparams) assert T1 == {7, 9, 8, 3} assert T2 == {"g", "i", "h", "c"} assert T1_tilde == {0, 6, 1, 2} assert T2_tilde == {"x", "f", "a", "b"} # Remove a node from the mapping m.pop(5) m_rev.pop("e") _restore_Tinout(5, self.mapped[5], gparams, sparams) assert T1 == {9, 3, 5} assert T2 == {"i", "c", "e"} assert T1_tilde == {0, 6, 1, 2, 7, 8} assert T2_tilde == {"x", "f", "a", "b", "g", "h"} # Remove a node from the mapping m.pop(4) m_rev.pop("d") _restore_Tinout(4, self.mapped[4], gparams, sparams) assert T1 == set() assert T2 == set() assert T1_tilde == set(self.G1.nodes()) assert T2_tilde == set(self.G2.nodes())
TestGraphTinoutUpdating
python
kamyu104__LeetCode-Solutions
Python/three-consecutive-odds.py
{ "start": 29, "end": 326 }
class ____(object): def threeConsecutiveOdds(self, arr): """ :type arr: List[int] :rtype: bool """ count = 0 for x in arr: count = count+1 if x%2 else 0 if count == 3: return True return False
Solution
python
huggingface__transformers
src/transformers/models/bert_generation/configuration_bert_generation.py
{ "start": 741, "end": 5628 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`BertGenerationPreTrainedModel`]. It is used to instantiate a BertGeneration model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BertGeneration [google/bert_for_seq_generation_L-24_bbc_encoder](https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50358): Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BertGeneration`]. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often called feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 1): End of stream token id. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Examples: ```python >>> from transformers import BertGenerationConfig, BertGenerationEncoder >>> # Initializing a BertGeneration config >>> configuration = BertGenerationConfig() >>> # Initializing a model (with random weights) from the config >>> model = BertGenerationEncoder(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "bert-generation" def __init__( self, vocab_size=50358, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, bos_token_id=2, eos_token_id=1, use_cache=True, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache __all__ = ["BertGenerationConfig"]
BertGenerationConfig
python
ray-project__ray
rllib/policy/tests/test_sample_batch.py
{ "start": 504, "end": 20165 }
class ____(unittest.TestCase): @classmethod def setUpClass(cls) -> None: ray.init(num_gpus=1) @classmethod def tearDownClass(cls) -> None: ray.shutdown() def test_len_and_size_bytes(self): s1 = SampleBatch( { "a": np.array([1, 2, 3]), "b": {"c": np.array([4, 5, 6])}, SampleBatch.SEQ_LENS: [1, 2], } ) check(len(s1), 3) check( s1.size_bytes(), s1["a"].nbytes + s1["b"]["c"].nbytes + s1[SampleBatch.SEQ_LENS].nbytes, ) def test_dict_properties_of_sample_batches(self): base_dict = { "a": np.array([1, 2, 3]), "b": np.array([[0.1, 0.2], [0.3, 0.4]]), "c": True, } batch = SampleBatch(base_dict) keys_ = list(base_dict.keys()) values_ = list(base_dict.values()) items_ = list(base_dict.items()) assert list(batch.keys()) == keys_ assert list(batch.values()) == values_ assert list(batch.items()) == items_ # Add an item and check, whether it's in the "added" list. batch["d"] = np.array(1) assert batch.added_keys == {"d"}, batch.added_keys # Access two keys and check, whether they are in the # "accessed" list. print(batch["a"], batch["b"]) assert batch.accessed_keys == {"a", "b"}, batch.accessed_keys # Delete a key and check, whether it's in the "deleted" list. del batch["c"] assert batch.deleted_keys == {"c"}, batch.deleted_keys def test_right_zero_padding(self): """Tests, whether right-zero-padding work properly.""" s1 = SampleBatch( { "a": np.array([1, 2, 3]), "b": {"c": np.array([4, 5, 6])}, SampleBatch.SEQ_LENS: [1, 2], } ) s1.right_zero_pad(max_seq_len=5) check( s1, { "a": [1, 0, 0, 0, 0, 2, 3, 0, 0, 0], "b": {"c": [4, 0, 0, 0, 0, 5, 6, 0, 0, 0]}, SampleBatch.SEQ_LENS: [1, 2], }, ) def test_concat(self): """Tests, SampleBatches.concat() and concat_samples().""" s1 = SampleBatch( { "a": np.array([1, 2, 3]), "b": {"c": np.array([4, 5, 6])}, } ) s2 = SampleBatch( { "a": np.array([2, 3, 4]), "b": {"c": np.array([5, 6, 7])}, } ) concatd = concat_samples([s1, s2]) check(concatd["a"], [1, 2, 3, 2, 3, 4]) check(concatd["b"]["c"], [4, 5, 6, 5, 6, 7]) check(next(concatd.rows()), {"a": 1, "b": {"c": 4}}) concatd_2 = s1.concat(s2) check(concatd, concatd_2) def test_concat_max_seq_len(self): """Tests, SampleBatches.concat_samples() max_seq_len.""" s1 = SampleBatch( { "a": np.array([1, 2, 3]), "b": {"c": np.array([4, 5, 6])}, SampleBatch.SEQ_LENS: [1, 2], } ) s2 = SampleBatch( { "a": np.array([2, 3, 4]), "b": {"c": np.array([5, 6, 7])}, SampleBatch.SEQ_LENS: [3], } ) s3 = SampleBatch( { "a": np.array([2, 3, 4]), "b": {"c": np.array([5, 6, 7])}, } ) concatd = concat_samples([s1, s2]) check(concatd.max_seq_len, s2.max_seq_len) with self.assertRaises(ValueError): concat_samples([s1, s2, s3]) def test_rows(self): s1 = SampleBatch( { "a": np.array([[1, 1], [2, 2], [3, 3]]), "b": {"c": np.array([[4, 4], [5, 5], [6, 6]])}, SampleBatch.SEQ_LENS: np.array([1, 2]), } ) check( next(s1.rows()), {"a": [1, 1], "b": {"c": [4, 4]}, SampleBatch.SEQ_LENS: 1}, ) def test_compression(self): """Tests, whether compression and decompression work properly.""" s1 = SampleBatch( { "a": np.array([1, 2, 3, 2, 3, 4]), "b": {"c": np.array([4, 5, 6, 5, 6, 7])}, } ) # Test, whether compressing happens in-place. s1.compress(columns={"a", "b"}, bulk=True) self.assertTrue(is_compressed(s1["a"])) self.assertTrue(is_compressed(s1["b"]["c"])) self.assertTrue(isinstance(s1["b"], dict)) # Test, whether de-compressing happens in-place. s1.decompress_if_needed(columns={"a", "b"}) check(s1["a"], [1, 2, 3, 2, 3, 4]) check(s1["b"]["c"], [4, 5, 6, 5, 6, 7]) it = s1.rows() next(it) check(next(it), {"a": 2, "b": {"c": 5}}) def test_slicing(self): """Tests, whether slicing can be done on SampleBatches.""" s1 = SampleBatch( { "a": np.array([1, 2, 3, 2, 3, 4]), "b": {"c": np.array([4, 5, 6, 5, 6, 7])}, } ) check( s1[:3], { "a": [1, 2, 3], "b": {"c": [4, 5, 6]}, }, ) check( s1[0:3], { "a": [1, 2, 3], "b": {"c": [4, 5, 6]}, }, ) check( s1[1:4], { "a": [2, 3, 2], "b": {"c": [5, 6, 5]}, }, ) check( s1[1:], { "a": [2, 3, 2, 3, 4], "b": {"c": [5, 6, 5, 6, 7]}, }, ) check( s1[3:4], { "a": [2], "b": {"c": [5]}, }, ) # When we change the slice, the original SampleBatch should also # change (shared underlying data). s1[:3]["a"][0] = 100 s1[1:2]["a"][0] = 200 check(s1["a"][0], 100) check(s1["a"][1], 200) # Seq-len batches should be auto-sliced along sequences, # no matter what. s2 = SampleBatch( { "a": np.array([1, 2, 3, 2, 3, 4]), "b": {"c": np.array([4, 5, 6, 5, 6, 7])}, SampleBatch.SEQ_LENS: [2, 3, 1], "state_in_0": [1.0, 3.0, 4.0], } ) # We would expect a=[1, 2, 3] now, but due to the sequence # boundary, we stop earlier. check( s2[:3], { "a": [1, 2], "b": {"c": [4, 5]}, SampleBatch.SEQ_LENS: [2], "state_in_0": [1.0], }, ) # Split exactly at a seq-len boundary. check( s2[:5], { "a": [1, 2, 3, 2, 3], "b": {"c": [4, 5, 6, 5, 6]}, SampleBatch.SEQ_LENS: [2, 3], "state_in_0": [1.0, 3.0], }, ) # Split above seq-len boundary. check( s2[:50], { "a": [1, 2, 3, 2, 3, 4], "b": {"c": [4, 5, 6, 5, 6, 7]}, SampleBatch.SEQ_LENS: [2, 3, 1], "state_in_0": [1.0, 3.0, 4.0], }, ) check( s2[:], { "a": [1, 2, 3, 2, 3, 4], "b": {"c": [4, 5, 6, 5, 6, 7]}, SampleBatch.SEQ_LENS: [2, 3, 1], "state_in_0": [1.0, 3.0, 4.0], }, ) def test_split_by_episode(self): s = SampleBatch( { "a": np.array([0, 1, 2, 3, 4, 5]), "eps_id": np.array([0, 0, 0, 0, 1, 1]), "terminateds": np.array([0, 0, 0, 1, 0, 1]), } ) true_split = [np.array([0, 1, 2, 3]), np.array([4, 5])] # Check that splitting by EPS_ID works correctly eps_split = [b["a"] for b in s.split_by_episode()] check(true_split, eps_split) # Check that splitting by EPS_ID works correctly when explicitly specified eps_split = [b["a"] for b in s.split_by_episode(key="eps_id")] check(true_split, eps_split) # Check that splitting by DONES works correctly when explicitly specified eps_split = [b["a"] for b in s.split_by_episode(key="dones")] check(true_split, eps_split) # Check that splitting by DONES works correctly del s["eps_id"] terminateds_split = [b["a"] for b in s.split_by_episode()] check(true_split, terminateds_split) # Check that splitting without the EPS_ID or DONES key raise an error del s["terminateds"] with self.assertRaises(KeyError): s.split_by_episode() # Check that splitting with DONES always False returns the whole batch s["terminateds"] = np.array([0, 0, 0, 0, 0, 0]) batch_split = [b["a"] for b in s.split_by_episode()] check(s["a"], batch_split[0]) def test_copy(self): s = SampleBatch( { "a": np.array([1, 2, 3, 2, 3, 4]), "b": {"c": np.array([4, 5, 6, 5, 6, 7])}, SampleBatch.SEQ_LENS: [2, 3, 1], "state_in_0": [1.0, 3.0, 4.0], } ) s_copy = s.copy(shallow=False) s_copy["a"][0] = 100 s_copy["b"]["c"][0] = 200 s_copy[SampleBatch.SEQ_LENS][0] = 3 s_copy[SampleBatch.SEQ_LENS][1] = 2 s_copy["state_in_0"][0] = 400.0 self.assertNotEqual(s["a"][0], s_copy["a"][0]) self.assertNotEqual(s["b"]["c"][0], s_copy["b"]["c"][0]) self.assertNotEqual(s[SampleBatch.SEQ_LENS][0], s_copy[SampleBatch.SEQ_LENS][0]) self.assertNotEqual(s[SampleBatch.SEQ_LENS][1], s_copy[SampleBatch.SEQ_LENS][1]) self.assertNotEqual(s["state_in_0"][0], s_copy["state_in_0"][0]) s_copy = s.copy(shallow=True) s_copy["a"][0] = 100 s_copy["b"]["c"][0] = 200 s_copy[SampleBatch.SEQ_LENS][0] = 3 s_copy[SampleBatch.SEQ_LENS][1] = 2 s_copy["state_in_0"][0] = 400.0 self.assertEqual(s["a"][0], s_copy["a"][0]) self.assertEqual(s["b"]["c"][0], s_copy["b"]["c"][0]) self.assertEqual(s[SampleBatch.SEQ_LENS][0], s_copy[SampleBatch.SEQ_LENS][0]) self.assertEqual(s[SampleBatch.SEQ_LENS][1], s_copy[SampleBatch.SEQ_LENS][1]) self.assertEqual(s["state_in_0"][0], s_copy["state_in_0"][0]) def test_shuffle_with_interceptor(self): """Tests, whether `shuffle()` clears the `intercepted_values` cache.""" s = SampleBatch( { "a": np.array([1, 2, 3, 2, 3, 4, 3, 4, 5, 4, 5, 6, 5, 6, 7]), } ) # Set a summy get-interceptor (returning all values, but plus 1). s.set_get_interceptor(lambda v: v + 1) # Make sure, interceptor works. check(s["a"], [2, 3, 4, 3, 4, 5, 4, 5, 6, 5, 6, 7, 6, 7, 8]) s.shuffle() # Make sure, intercepted values are NOT the original ones (before the shuffle), # but have also been shuffled. check(s["a"], [2, 3, 4, 3, 4, 5, 4, 5, 6, 5, 6, 7, 6, 7, 8], false=True) def test_to_device(self): """Tests whether to_device works properly under different circumstances""" torch, _ = try_import_torch() # sample batch includes # a numpy array (a) # a nested stucture of dict, tuple and lists (b) of numpys and None # info dict # a nested structure that ends up with tensors and ints(c) # a tensor with float64 values (d) # a float64 tensor with possibly wrong device (depends on if cuda available) # repeated value object with np.array leaves (f) cuda_available = int(os.environ.get("RLLIB_NUM_GPUS", "0")) > 0 cuda_if_possible = torch.device("cuda:0" if cuda_available else "cpu") s = SampleBatch( { "a": np.array([1, 2]), "b": {"c": (np.array([4, 5]), np.array([5, 6]))}, "c": {"d": torch.Tensor([1, 2]), "g": (torch.Tensor([3, 4]), 1)}, "d": torch.Tensor([1.0, 2.0]).double(), "e": torch.Tensor([1.0, 2.0]).double().to(cuda_if_possible), "f": RepeatedValues(np.array([[1, 2, 0, 0]]), lengths=[2], max_len=4), SampleBatch.SEQ_LENS: np.array([2, 3, 1]), "state_in_0": np.array([1.0, 3.0, 4.0]), # INFO can have arbitrary elements, others need to conform in size SampleBatch.INFOS: np.array([{"a": 1}, {"b": [1, 2]}, {"c": None}]), } ) # inplace operation for sample_batch s.to_device(cuda_if_possible, framework="torch") def _check_recursive_device_and_type(input_struct, target_device): def get_mismatched_types(v): if isinstance(v, torch.Tensor): if v.device.type != target_device.type: return (v.device, v.dtype) if v.is_floating_point() and v.dtype != torch.float32: return (v.device, v.dtype) tree_checks = {} for k, v in input_struct.items(): tree_checks[k] = tree.map_structure(get_mismatched_types, v) self.assertTrue( all(v is None for v in tree.flatten((tree_checks))), f"the device type check dict: {tree_checks}", ) # check if all tensors have the correct device and dtype _check_recursive_device_and_type(s, cuda_if_possible) # check repeated value check(s["f"].lengths, [2]) check(s["f"].max_len, 4) check(s["f"].values, torch.from_numpy(np.asarray([[1, 2, 0, 0]]))) # check infos check(s[SampleBatch.INFOS], np.array([{"a": 1}, {"b": [1, 2]}, {"c": None}])) # check c/g/1 self.assertEqual(s["c"]["g"][1], torch.from_numpy(np.asarray(1))) with self.assertRaises(NotImplementedError): # should raise an error if framework is not torch s.to_device(cuda_if_possible, framework="tf") def test_count(self): # Tests if counts are what we would expect from different batches input_dicts_and_lengths = [ ( { SampleBatch.OBS: { "a": np.array([[1], [2], [3]]), "b": np.array([[0], [0], [1]]), "c": np.array([[4], [5], [6]]), } }, 3, ), ( { SampleBatch.OBS: { "a": np.array([[1, 2, 3]]), "b": np.array([[0, 0, 1]]), "c": np.array([[4, 5, 6]]), } }, 1, ), ( { SampleBatch.INFOS: { "a": np.array([[1], [2], [3]]), "b": np.array([[0], [0], [1]]), "c": np.array([[4], [5], [6]]), } }, 0, # This should have a length of zero, since we can ignore INFO ), ( { "state_in_0": { "a": [[[1], [2], [3]], [[1], [2], [3]], [[1], [2], [3]]], "b": [[[1], [2], [3]], [[1], [2], [3]], [[1], [2], [3]]], "c": [[[1], [2], [3]], [[1], [2], [3]], [[1], [2], [3]]], }, "state_out_0": { "a": [[[1], [2], [3]], [[1], [2], [3]], [[1], [2], [3]]], "b": [[[1], [2], [3]], [[1], [2], [3]], [[1], [2], [3]]], "c": [[[1], [2], [3]], [[1], [2], [3]], [[1], [2], [3]]], }, SampleBatch.OBS: { "a": np.array([1, 2, 3]), "b": np.array([0, 0, 1]), "c": np.array([4, 5, 6]), }, }, 3, # This should have a length of three - we count from OBS ), ( { "state_in_0": { "a": [[[1], [2], [3]], [[1], [2], [3]], [[1], [2], [3]]], "b": [[[1], [2], [3]], [[1], [2], [3]], [[1], [2], [3]]], "c": [[[1], [2], [3]], [[1], [2], [3]], [[1], [2], [3]]], }, "state_out_0": { "a": [[[1], [2], [3]], [[1], [2], [3]], [[1], [2], [3]]], "b": [[[1], [2], [3]], [[1], [2], [3]], [[1], [2], [3]]], "c": [[[1], [2], [3]], [[1], [2], [3]], [[1], [2], [3]]], }, }, 0, # This should have a length of zero, we don't attempt to count ), ( { SampleBatch.OBS: { "a": np.array([[1], [2], [3]]), "b": np.array([[0], [0], [1]]), "c": np.array([[4], [5], [6]]), }, SampleBatch.SEQ_LENS: np.array([[1], [2], [3]]), }, 6, # This should have a length of six, since we don't try to infer # from inputs but count by sequence lengths ), ( { SampleBatch.NEXT_OBS: { "a": {"b": np.array([[1], [2], [3]])}, "c": np.array([[4], [5], [6]]), }, }, 3, # Test if we properly support nesting ), ] for input_dict, length in input_dicts_and_lengths: self.assertEqual(attempt_count_timesteps(copy.deepcopy(input_dict)), length) s = SampleBatch(input_dict) self.assertEqual(s.count, length) def test_interceptors(self): # Tests whether interceptors work as intended some_array = np.array([1, 2, 3]) batch = SampleBatch({SampleBatch.OBS: some_array}) device = torch.device("cpu") self.assertTrue(batch[SampleBatch.OBS] is some_array) batch.set_get_interceptor( functools.partial(convert_to_torch_tensor, device=device) ) self.assertTrue( all(convert_to_torch_tensor(some_array) == batch[SampleBatch.OBS]) ) # This test requires a GPU, otherwise we can't test whether we are # moving between devices if not torch.cuda.is_available(): raise ValueError("This test can only fail if cuda is available.") another_array = np.array([4, 5, 6]) another_batch = SampleBatch({SampleBatch.OBS: another_array}) another_device = torch.device("cuda") self.assertTrue(another_batch[SampleBatch.OBS] is another_array) another_batch.set_get_interceptor( functools.partial(convert_to_torch_tensor, device=another_device) ) check(another_batch[SampleBatch.OBS], another_array) self.assertFalse(another_batch[SampleBatch.OBS] is another_array) if __name__ == "__main__": import sys import pytest sys.exit(pytest.main(["-v", __file__]))
TestSampleBatch
python
kamyu104__LeetCode-Solutions
Python/intersection-of-two-arrays-ii.py
{ "start": 451, "end": 1459 }
class ____(object): def intersect(self, nums1, nums2): """ :type nums1: List[int] :type nums2: List[int] :rtype: List[int] """ if len(nums1) > len(nums2): return self.intersect(nums2, nums1) lookup = collections.defaultdict(int) for i in nums1: lookup[i] += 1 res = [] for i in nums2: if lookup[i] > 0: res += i, lookup[i] -= 1 return res def intersect2(self, nums1, nums2): """ :type nums1: List[int] :type nums2: List[int] :rtype: List[int] """ c = collections.Counter(nums1) & collections.Counter(nums2) intersect = [] for i in c: intersect.extend([i] * c[i]) return intersect # If the given array is already sorted, and the memory is limited, and (m << n or m >> n). # Time: O(min(m, n) * log(max(m, n))) # Space: O(1) # Binary search solution.
Solution
python
pytorch__pytorch
torch/ao/quantization/observer.py
{ "start": 64530, "end": 64757 }
class ____(Granularity): """ Represents per-tensor granularity in quantization. This granularity type calculates the quantization parameters based off the entire tensor. """ @dataclass(frozen=True)
PerTensor
python
pytorch__pytorch
torch/_inductor/runtime/triton_heuristics.py
{ "start": 80602, "end": 135595 }
class ____(CachingAutotuner): def __init__( self, *args, regex_filter="", with_profiler=False, with_bandwidth_info=True, **kwargs, ): self.regex_filter = regex_filter self.with_profiler = with_profiler self.with_bandwidth_info = with_bandwidth_info super().__init__(*args, **kwargs) self.cached = None def run(self, *args, stream, **kwargs): if not self.with_bandwidth_info: super().run(*args, stream=stream, **kwargs, benchmark_run=True) return else: possible_names = _find_names(self) kernel_name = f"{max(possible_names, key=len)}" if not re.match(self.regex_filter, kernel_name): return if len(self.launchers) != 1: if len(self.launchers) == 0: start_time = time.time_ns() self.precompile() self.precompile_time_taken_ns = time.time_ns() - start_time if len(self.launchers) > 1: self.autotune_to_one_config(*args, **kwargs) (launcher,) = self.launchers if launcher.store_cubin: self.save_gpu_kernel(stream, launcher) if self.cached is None: ms = self.bench(launcher, *args, with_profiler=self.with_profiler) num_in_out_ptrs = len( [ arg_name for arg_name in self.fn.arg_names if arg_name.startswith("in_out_ptr") ] ) num_gb = self.inductor_meta.get("kernel_num_gb", None) if num_gb is None: num_gb = get_num_bytes(*args, num_in_out_args=num_in_out_ptrs) / 1e9 gb_per_s = num_gb / (ms / 1e3) self.cached = ms, num_gb, gb_per_s, kernel_name collected_calls.append((ms, num_gb, gb_per_s, kernel_name)) log.info( "%s", create_bandwidth_info_str( ms, num_gb, gb_per_s, suffix=f" \t {kernel_name}" ), ) else: # in AOTI, we will call the kernel and its timing info has been cached already collected_calls.append(self.cached) def hash_configs(configs: list[Config]): """ Hash used to check for changes in configurations """ hasher = hashlib.sha256() for cfg in configs: hasher.update( f"{sorted(cfg.kwargs.items())} {cfg.num_warps} {cfg.num_stages}\n".encode() ) return hasher.hexdigest() def cached_autotune( size_hints: list[int] | None, configs: list[Config], triton_meta, heuristic_type, filename=None, inductor_meta=None, custom_kernel=False, ): """ A copy of triton.autotune that calls our subclass. Our subclass has additional debugging, error handling, and on-disk caching. """ configs = unique_configs(configs) assert len(configs) == 1 or filename inductor_meta = {} if inductor_meta is None else inductor_meta configs, autotune_cache, autotune_cache_info = check_autotune_cache( configs, filename, inductor_meta ) mutated_arg_names = inductor_meta.pop("mutated_arg_names", ()) optimize_mem = inductor_meta.pop("optimize_mem", True) if "restore_value" in triton_meta: mutated_arg_names += triton_meta.pop("restore_value") reset_to_zero_arg_names: list[str] = [] if "reset_to_zero" in triton_meta: reset_to_zero_arg_names.extend(triton_meta.pop("reset_to_zero")) def decorator(fn): # Remove XBLOCK from config if it's not a function argument. # This way, coordinate descent tuning will not try to tune it. # # Context: When TritonKernel.no_x_dim is True, we hardcode XBLOCK to 1. import inspect if "XBLOCK" not in inspect.signature(fn.fn).parameters: for tconfig in configs: if "XBLOCK" in tconfig.kwargs: assert tconfig.kwargs["XBLOCK"] == 1 tconfig.kwargs.pop("XBLOCK") if inductor_meta.get("profile_bandwidth"): return DebugAutotuner( fn, triton_meta=triton_meta, inductor_meta=inductor_meta, regex_filter=inductor_meta["profile_bandwidth_regex"], with_profiler=inductor_meta[ "profile_bandwidth_with_do_bench_using_profiling" ], configs=configs, save_cache_hook=autotune_cache and autotune_cache.save, mutated_arg_names=mutated_arg_names, reset_to_zero_arg_names=reset_to_zero_arg_names, optimize_mem=optimize_mem, heuristic_type=heuristic_type, size_hints=size_hints, custom_kernel=custom_kernel, filename=filename, with_bandwidth_info=True, ) return CachingAutotuner( fn, triton_meta=triton_meta, inductor_meta=inductor_meta, configs=configs, save_cache_hook=autotune_cache and autotune_cache.save, mutated_arg_names=mutated_arg_names, reset_to_zero_arg_names=reset_to_zero_arg_names, optimize_mem=optimize_mem, heuristic_type=heuristic_type, size_hints=size_hints, custom_kernel=custom_kernel, filename=filename, autotune_cache_info=autotune_cache_info, ) return decorator def unique_configs(configs: list[Config]): """Remove duplicate configurations""" seen: OrderedSet[Hashable] = OrderedSet() pruned_configs = [] for cfg in configs: key = triton_config_to_hashable(cfg) if key not in seen: seen.add(key) pruned_configs.append(cfg) return pruned_configs def check_config(cfg, *, xnumel=None, ynumel=None, znumel=None): for numel, label in zip((xnumel, ynumel, znumel), "XYZ"): if numel is None: continue block = cfg[f"{label}BLOCK"] if numel == 1: assert block == 1, ( f"TritonKernel.indexing assumes numel == 1 => BLOCK == 1" f" but {label.lower()}numel=={numel} and {label}BLOCK={block} (cfg={cfg})." ) max_block = TRITON_MAX_BLOCK[label] max_block_str = f'config.triton.max_block["{label}"]' assert max_block % block == 0, ( f"TritonKernel.indexing assumes {label}BLOCK divides {max_block_str}" f" but {label}BLOCK={block} and {max_block_str}={max_block} (cfg={cfg})." ) def check_max_block(cfg: dict[str, int]): """ Check that block sizes are within the maximum allowed. """ for var, val in cfg.items(): block_suffix = "BLOCK" if block_suffix in var: prefix = var.removesuffix(block_suffix) max_block = TRITON_MAX_BLOCK[prefix] assert val <= max_block, ( f"'{var}' too large. Maximum: {max_block}. Actual: {val}." ) def _num_warps(num_warps, max_num_warps=8, min_num_warps=2, register_intensive=False): # On AMD GPU each warp has 64 lanes which is double the size on NV GPU, # therefore using half the number of warps here correspondingly. if torch.version.hip: max_num_warps = (max_num_warps + 1) // 2 min_num_warps = (min_num_warps + 1) // 2 # persistent reduction is register intensive if register_intensive: max_num_warps = max_num_warps // 2 return next_power_of_2(min(max(num_warps, min_num_warps), max_num_warps)) def _check_max_grid_x(size_hints, x, num_warps): # Check if maxGridSize is exceeded - if so then must scale XBLOCK further max_grid_x = 2147483647 warp_size = ( 64 if torch.version.hip else 32 ) # TODO: query warp size once #129663 is merged num_blocks = (size_hints["x"] + x - 1) // x while (num_blocks * num_warps * warp_size) > max_grid_x and x < size_hints["x"]: x *= 2 # Scale up XBLOCK if grid exceeds limits num_blocks = num_blocks // 2 if (num_blocks * num_warps * warp_size) > max_grid_x: raise AssertionError( "Reduction config exceeds cudaDeviceProp maxGridSize. Please raise a pytorch issue" ) return x, num_blocks def triton_config( size_hints, x, y=None, z=None, num_stages=1, num_elements_per_warp=256, min_elem_per_thread=0, num_warps=None, matrix_instr=None, waves_per_eu=None, ) -> Config: """ Construct a pointwise triton config with some adjustment heuristics based on size_hints. Size_hints is a tuple of numels in each tile dimension and will be rounded up to the nearest power of 2. num_elements_per_warp is a suggestion for controlling how many warps the triton config should contain. e.g.: if x=16, y=8, z=4 then num_elements = 16*8*4 = 512. Then if we set num_elements_per_warp=128, we'll launch 512 (elem) / 128 (elem/warp) = 4 warps. Note that it's just a suggestion, and sometimes other adjustment heuristics will override the num_elements_per_warp. min_elem_per_thread controls the minimum number of elements processed by each thread. It's always enforced. """ # Ideally we want to read this from some device config maxGridSize = [2147483647, 65535, 65535] target = conditional_product(x, y, z) if conditional_product(*size_hints.values()) < target: target //= 8 # shrink sizes to size hints x = min(x, size_hints["x"]) if y: y = min(y, size_hints["y"]) if z: z = min(z, size_hints["z"]) # if we are below original block size, scale up where we can; # or if the calculated grid size is larger than the limit, we bump up the corresponding dimension while x < min(size_hints["x"], TRITON_MAX_BLOCK["X"]) and ( x * maxGridSize[0] < size_hints["x"] or conditional_product(x, y, z) < target ): x *= 2 while ( y and y < min(size_hints["y"], TRITON_MAX_BLOCK["Y"]) and ( y * maxGridSize[1] < size_hints["y"] or conditional_product(x, y, z) < target ) ): y *= 2 while ( z and z < min(size_hints["z"], TRITON_MAX_BLOCK["Z"]) and ( z * maxGridSize[2] < size_hints["z"] or conditional_product(x, y, z) < target ) ): z *= 2 # Calculate num_warps if they are not hard passed to config if num_warps is None: num_warps = _num_warps( conditional_product(x, y, z) // num_elements_per_warp, min_num_warps=1 ) # we are going to arrive at 2 warps only if bs was too small due to # numel being too small. However to workaround some ptx bugs we still # want at least 4 warps if there's enough elements per thread # given that this is a rare situation, don't expect this to affect perf # in general # see https://github.com/pytorch/pytorch/pull/97950 if conditional_product(x, y, z) >= 128 and not torch.version.hip: num_warps = max(num_warps, 4) xnumel = size_hints["x"] ynumel = size_hints.get("y") znumel = size_hints.get("z") # Increase x to satisfy min_elem_per_thread requirements. block_size = max( conditional_product(x, y, z), min_elem_per_thread * _NUM_THREADS_PER_WARP * num_warps, ) x *= math.ceil(block_size / conditional_product(x, y, z)) x, _num_blocks = _check_max_grid_x(size_hints, x, num_warps) x = min(x, size_hints["x"]) cfg = {"XBLOCK": x} if y: cfg["YBLOCK"] = y if z: cfg["ZBLOCK"] = z check_max_block(cfg) check_config(cfg, xnumel=xnumel, ynumel=ynumel, znumel=znumel) config = Config(cfg, num_warps=num_warps, num_stages=num_stages) if torch.version.hip: if matrix_instr is not None: config.kwargs["matrix_instr_nonkdim"] = matrix_instr if waves_per_eu is not None: config.kwargs["waves_per_eu"] = waves_per_eu return config def _get_nd_reduction_numels(r: int, size_hints: dict[str, int]) -> dict[str, int]: """ Converts a linear reduction numel to ND, in row major order. This order is often desirable as it presents opportunities to coalesce memory accesses. For example, if r = 64 and size_hints = [32,32], this function returns [32, 2]. This unraveling works because both r and size_hints are powers of 2. """ # Shrink r to size_hints. r = min(r, get_total_reduction_numel(size_hints)) num_reduction_dims = len( [prefix for prefix in size_hints if prefix_is_reduction(prefix)] ) remaining = r rnumels = {} for idx in range(num_reduction_dims - 1, -1, -1): prefix = f"r{idx}_" max_size = min(size_hints[prefix], TRITON_MAX_BLOCK[prefix.upper()]) dim = min(max_size, remaining) assert remaining % dim == 0, ( f"Expected dimension '{dim}' to divide remaining size '{remaining}'" ) rnumels[prefix] = dim remaining //= dim # Sanity check the results. final_numel = conditional_product(*rnumels.values()) assert r == final_numel, ( f"Expected ND reduction size ({rnumels}) to have {r} elements." ) assert all(rnumels[prefix] <= size_hints[prefix] for prefix in rnumels), ( f"rnumels exceed size_hints. {rnumels} > {size_hints}" ) return rnumels def triton_config_reduction( size_hints, x: int, r: int, num_stages=1, num_warps=None, register_intensive=False, waves_per_eu=None, dynamic_scale_rblock=True, reduction_hint=None, min_num_warps=None, ) -> Config: """ Construct a reduction triton config with some adjustment heuristics based on size_hints. Size_hints is a tuple of numels in each tile dimension and will be rounded up to the nearest power of 2. """ # Convert the linear reduction numel into a multi-dimensional block. rnumels = _get_nd_reduction_numels(r, size_hints) # shrink sizes to size hints x = min(x, size_hints["x"]) def total_numel() -> int: return conditional_product(x, *rnumels.values()) target = total_numel() if conditional_product(*size_hints.values()) < target: target //= 8 # if we are below original block size, scale up where we can while x < size_hints["x"] and total_numel() < target: x *= 2 for prefix in sorted(rnumels): while rnumels[prefix] < size_hints[prefix] and total_numel() < target: rnumels[prefix] *= 2 if num_warps is None: if reduction_hint == ReductionHint.INNER: # r is contiguous, ensure at least 8 elements per thread # xblock is usually 1-2, default to giving each thread more work num_warps = r // 128 else: num_warps = total_numel() // 128 max_num_warps = 16 if r <= 8192 else 32 if min_num_warps is not None: _num_warps_func = functools.partial(_num_warps, min_num_warps=min_num_warps) else: _num_warps_func = _num_warps num_warps = _num_warps_func( num_warps, max_num_warps=max_num_warps, register_intensive=register_intensive ) x, _num_blocks = _check_max_grid_x(size_hints, x, num_warps) for prefix in sorted(rnumels): while total_numel() > target: if rnumels[prefix] == 1: break rnumels[prefix] //= 2 cfg = _get_config({"x": x, **rnumels}) check_max_block(cfg) check_config(cfg, xnumel=size_hints["x"]) config = InductorConfig( cfg, num_warps=num_warps, num_stages=num_stages, dynamic_scale_rblock=dynamic_scale_rblock, ) if torch.version.hip: if waves_per_eu is not None: config.kwargs["waves_per_eu"] = waves_per_eu return config def _get_config(numels: dict[str, int]) -> dict[str, int]: """ Convert numels ("x", "r0_", etc.) to block sizes ("XBLOCK", "R0_BLOCK"), etc. """ return {prefix.upper() + "BLOCK": numel for prefix, numel in numels.items()} def triton_config_tiled_reduction( size_hints, x, y, r, num_stages=1, register_intensive=False, waves_per_eu=None ): """ Construct a tile reduction triton config with some adjustment heuristics based on size_hints. Size_hints is a tuple of numels in each tile dimension and will be rounded up to the nearest power of 2. """ # Convert the linear reduction numel into a multi-dimensional block. rnumels = _get_nd_reduction_numels(r, size_hints) # shrink sizes to size hints x = min(x, size_hints["x"]) y = min(y, size_hints["y"]) def total_numel() -> int: return conditional_product(x, y, *rnumels.values()) target = total_numel() if conditional_product(*size_hints.values()) < target: target //= 8 # if we are below original block size, scale up where we can while x < size_hints["x"] and total_numel() < target: x *= 2 for prefix in sorted(rnumels): while rnumels[prefix] < size_hints[prefix] and total_numel() < target: rnumels[prefix] *= 2 while y < size_hints["y"] and total_numel() < target: y *= 2 cfg = _get_config({"x": x, "y": y, **rnumels}) num_warps = _num_warps(total_numel() // 256, min_num_warps=1) num_warps = _num_warps( num_warps, max_num_warps=16, register_intensive=register_intensive ) check_config(cfg, xnumel=size_hints["x"], ynumel=size_hints["y"]) check_max_block(cfg) config = Config(cfg, num_warps=num_warps, num_stages=num_stages) if torch.version.hip: if waves_per_eu is not None: config.kwargs["waves_per_eu"] = waves_per_eu return config def _maybe_filter_configs_for_tma_restrictions(inductor_meta, configs: list[Config]): tma_min_block_sizes: dict[str, int] if (tma_min_block_sizes := inductor_meta.get("tma_min_block_sizes")) and configs: # Rn blocks are not provided to the kernel for persistent reductions if inductor_meta.get("persistent_reduction"): tma_min_block_sizes = { block_type: block_size for block_type, block_size in tma_min_block_sizes if not prefix_is_reduction(block_type.lower()) } assert all( block_type in configs[0].kwargs for block_type in tma_min_block_sizes ) # Add a config that is guaranteed to compile example_config = configs[0] config_block_sizes = {**example_config.kwargs} config_block_sizes.update(tma_min_block_sizes) new_configs = [ Config( config_block_sizes, num_warps=example_config.num_warps, num_stages=example_config.num_stages, maxnreg=example_config.maxnreg, pre_hook=example_config.pre_hook, ) ] # Remove configs that will not compile for c in configs: if all( c.kwargs.get(block_type) >= min_block_value for block_type, min_block_value in tma_min_block_sizes.items() ): new_configs.append(c) log.debug( "Filtering configs for TMA API restrictions. Input configs size: %d. Output configs size: %d", len(configs), len(new_configs), ) return new_configs return configs def pointwise( size_hints, triton_meta, tile_hint=None, filename=None, min_elem_per_thread=0, inductor_meta=None, ): """ Construct @triton.heuristics() based on size_hints. """ inductor_meta = {} if inductor_meta is None else inductor_meta assert not inductor_meta.get("no_x_dim") numel = functools.reduce(operator.mul, size_hints.values()) bs = max(256, min(numel // 128, 1024)) hinted_configs = autotune_hints_to_configs( inductor_meta.get("autotune_hints", OrderedSet()), size_hints, bs, triton_meta["device"], ) triton_config_with_settings = functools.partial( triton_config, min_elem_per_thread=min_elem_per_thread ) configs = None if len(size_hints) == 1: if not inductor_meta.get("autotune_pointwise", True) and not ( inductor_meta.get("max_autotune") or inductor_meta.get("max_autotune_pointwise") ): configs = [triton_config_with_settings(size_hints, bs)] else: configs = [ triton_config_with_settings(size_hints, bs, num_elements_per_warp=256), triton_config_with_settings( size_hints, bs // 2, num_elements_per_warp=64 ), *hinted_configs, ] # Additional configs appended for ROCm builds if torch.version.hip: configs.extend( [ triton_config_with_settings( size_hints, TRITON_MAX_BLOCK["X"], waves_per_eu=2 ), triton_config_with_settings( size_hints, 4096, # wrt: better than the max_block for some kernel ), triton_config_with_settings( size_hints, 2048, num_warps=8, num_stages=2, waves_per_eu=1, # 20% improvement ), ] ) if inductor_meta.get("atomic_add_found"): configs.extend( [ triton_config_with_settings( size_hints, 64, num_warps=1, num_stages=1, # 250% improvement ) ] ) if len(size_hints) == 2: # Only avoiding tuning on TileHint.SQUARE if not on ROCm builds # ROCm has observed improvement by diverging here if ( not inductor_meta.get("autotune_pointwise", True) or (torch.version.hip is None and tile_hint == TileHint.SQUARE) ) and not ( inductor_meta.get("max_autotune") or inductor_meta.get("max_autotune_pointwise") ): configs = [triton_config_with_settings(size_hints, 32, 32)] else: configs = [ triton_config_with_settings(size_hints, 32, 32), triton_config_with_settings(size_hints, 64, 64), # ~8% better for fp16 triton_config_with_settings(size_hints, 256, 16), triton_config_with_settings(size_hints, 16, 256), triton_config_with_settings(size_hints, bs, 1), triton_config_with_settings(size_hints, 1, bs), *hinted_configs, ] # Additional configs appended for ROCm builds if torch.version.hip: configs.extend( [ triton_config_with_settings( size_hints, 64, 32 ), # better for some kernels triton_config_with_settings( size_hints, 128, 16 ), # +10% for some kernels triton_config_with_settings( size_hints, 128, 32 ), # additional 10% more triton_config_with_settings( size_hints, 32, 512 ), # +30% for some kernels ] ) if len(size_hints) == 3: if not inductor_meta.get("autotune_pointwise", True): configs = [triton_config_with_settings(size_hints, 16, 16, 16)] else: configs = [ triton_config_with_settings(size_hints, 16, 16, 16), triton_config_with_settings(size_hints, 64, 8, 8), triton_config_with_settings(size_hints, 8, 64, 8), triton_config_with_settings(size_hints, 8, 8, 64), triton_config_with_settings(size_hints, bs, 1, 1), triton_config_with_settings(size_hints, 1, bs, 1), triton_config_with_settings(size_hints, 1, 1, bs), *hinted_configs, ] if not configs: raise NotImplementedError(f"size_hints: {size_hints}") configs = _maybe_filter_configs_for_tma_restrictions(inductor_meta, configs) return cached_autotune( size_hints, configs, triton_meta=triton_meta, inductor_meta=inductor_meta, heuristic_type=HeuristicType.POINTWISE, filename=filename, ) def make_matmul_triton_config(sizes: dict[str, int], num_warps: int, num_stages: int): config = { "XBLOCK": sizes.get("x"), "YBLOCK": sizes.get("y"), "ZBLOCK": sizes.get("z"), "R0_BLOCK": sizes.get("r"), } # Remove keys with None values (i.e., missing in sizes) config = {k: v for k, v in config.items() if v is not None} return Config(config, num_warps=num_warps, num_stages=num_stages) def _config_helper(bmm=False, persistent=False): # Each entry is: (sizes_dict, num_warps, num_stages) _base_mm_configs = [ ({"x": 32, "y": 32, "r": 16}, 2, 1), ({"x": 32, "y": 32, "r": 128}, 4, 2), ({"x": 32, "y": 64, "r": 32}, 8, 5), ({"x": 64, "y": 32, "r": 32}, 8, 5), ({"x": 64, "y": 32, "r": 128}, 4, 5), ({"x": 64, "y": 64, "r": 16}, 4, 2), ({"x": 64, "y": 64, "r": 32}, 4, 2), ({"x": 64, "y": 64, "r": 64}, 8, 3), ({"x": 64, "y": 64, "r": 128}, 4, 5), ({"x": 64, "y": 128, "r": 32}, 4, 3), ({"x": 64, "y": 128, "r": 32}, 8, 4), ({"x": 64, "y": 128, "r": 64}, 4, 3), ({"x": 64, "y": 128, "r": 128}, 4, 4), ({"x": 128, "y": 64, "r": 32}, 4, 3), ({"x": 128, "y": 64, "r": 32}, 8, 4), ({"x": 128, "y": 128, "r": 32}, 8, 2), ({"x": 128, "y": 128, "r": 32}, 4, 3), ({"x": 128, "y": 128, "r": 64}, 4, 3), ({"x": 128, "y": 128, "r": 64}, 8, 5), ] out = [] for sizes, w, s in _base_mm_configs: d = dict(sizes) if persistent: d.pop("r", None) if bmm: d["z"] = 1 out.append((d, w, s)) # Deduplicate by converting dicts to immutable frozensets deduped = {(frozenset(d.items()), w, s): (d, w, s) for d, w, s in out} return list(deduped.values()) triton_native_mm_configs = _config_helper(bmm=False, persistent=False) triton_native_persistent_mm_configs = _config_helper(bmm=False, persistent=True) triton_native_bmm_configs = _config_helper(bmm=True, persistent=False) triton_native_persistent_bmm_configs = _config_helper(bmm=True, persistent=True) def _reduction_configs( *, size_hints: dict[str, int], inductor_meta: dict[str, Any], triton_meta: dict[str, Any], num_dynamic=0, ) -> list[Config]: reduction_hint = inductor_meta.get("reduction_hint") # Convert reductions to 1D, to simplify heuristics. rnumel = get_total_reduction_numel(size_hints) # Is max autotune enabled max_autotune_enabled = inductor_meta.get("max_autotune") or inductor_meta.get( "max_autotune_pointwise" ) register_intensive = False MAX_R0_BLOCK = 2048 loads_and_red = inductor_meta.get("num_load", 0) + inductor_meta.get( "num_reduction", 0 ) if size_hints["x"] >= 1024 and loads_and_red >= 10: # A heuristics to reduce R0_BLOCK if a kernel potentially need many registers. # Consider load and reduction since load need move data into registers and # reduction needs an accumulator. # # The magic numbers are a bit arbitrary. # # We cannot rely on dynamically scaling down R0_BLOCK later, since sometimes # triton makes it to use less registers with worse perf. Check: # https://github.com/pytorch/pytorch/issues/126463 # # The heuristic is a very simple one since registers can be reused. But # hopefully it can be a good enough indicator. MAX_R0_BLOCK = 1024 register_intensive = True if triton_meta.get("native_matmul"): if len(size_hints) == 3: return [ make_matmul_triton_config(sizes, num_warps, num_stages) for sizes, num_warps, num_stages in triton_native_mm_configs ] elif len(size_hints) == 4: return [ make_matmul_triton_config(sizes, num_warps, num_stages) for sizes, num_warps, num_stages in triton_native_bmm_configs ] else: raise NotImplementedError("native matmul only supports mm/bmm pattern") def make_config( x, r, num_warps=None, num_stages=1, register_intensive=False, dynamic_scale_rblock=True, waves_per_eu=None, ): # For 3D case with tiling scores, create an adapted version if "y" in size_hints: assert "tiling_scores" in inductor_meta return adapt_config_for_tiling( size_hints, inductor_meta["tiling_scores"], x, r, num_warps=num_warps, num_stages=num_stages, register_intensive=register_intensive, waves_per_eu=waves_per_eu, ) else: # For other cases, use the original function return triton_config_reduction( size_hints, x, r, num_warps=num_warps, num_stages=num_stages, register_intensive=register_intensive, waves_per_eu=waves_per_eu, dynamic_scale_rblock=dynamic_scale_rblock, reduction_hint=reduction_hint, ) def outer_config_opt(): # Default to 64 for vectorized loads max_x_block, x_block = 256, 64 load_factor = inductor_meta.get("num_load", 0) x = size_hints["x"] num_warps = None # Try to use all SMs with small x if x <= 1024: x_block = max(min(x // 128, 8), 2) outer_r_block = min(rnumel, 64) # Lower bound x = 1024, 1024 // 16 = 128 around # of SMs elif x // 4096 <= 8: x_block = 16 outer_r_block = 512 // x_block elif num_dynamic > 1: # Lots of compute with multiple dynamic shape per loop iteration # Larger RBLOCK minimizes loop iteration outer_r_block = max(min((rnumel // 64), 64), 8) elif num_dynamic == 1: # Dynamic shapes introduce a lot register pressure for indexing outer_r_block = ( 1 if load_factor >= 3 else min(next_power_of_2(max(rnumel, 128) // 128), 8) ) else: x_block = max(min(max_x_block, next_power_of_2(x // 4096)), x_block) if load_factor < 4 or rnumel <= 128: outer_r_block = 512 // x_block else: # Heavier reductions contain a lot more overhead per loop iteration # We minimize the overhead by enlarging r block if rnumel >= 2048: outer_r_block = 64 else: outer_r_block = 32 x_block = min(x_block, 32) num_warps = 4 # Set register intensive to true by default as we try to maximize tiles with heuristic return make_config( x_block, outer_r_block, num_warps=num_warps, register_intensive=register_intensive, ) contiguous_config = make_config( 2 if rnumel <= 2048 else 1, # 1024 or less is persistent min(rnumel, MAX_R0_BLOCK), register_intensive=register_intensive, ) tiny_config = make_config( 2 * (256 // rnumel) if rnumel <= 256 else 1, min(rnumel, MAX_R0_BLOCK), register_intensive=register_intensive, ) outer_config = make_config(64, 8, register_intensive=register_intensive) # TODO (paulzhan): Test heuristic on AMD and internal testing # for correctness if not torch.version.hip: outer_config = outer_config_opt() configs = [] if inductor_meta.get("add_persistent_rblock") and loads_and_red <= 8: xnumel = max(4096 // rnumel, 1) c = make_config( xnumel, min(rnumel, 32768), register_intensive=register_intensive, dynamic_scale_rblock=False, ) configs.append(c) result_configs = [] # For 3d tiling, default to more autotuning initially if "y" in size_hints: pass elif max_autotune_enabled: pass # skip all these cases elif reduction_hint == ReductionHint.INNER: return configs + [contiguous_config] elif reduction_hint == ReductionHint.OUTER: return configs + [outer_config] elif reduction_hint == ReductionHint.OUTER_TINY: return configs + [tiny_config] # We continue here under the following conditions: # - max_autotune_enabled is True # - max_autotune_enabled is False and reduction_hint is NOT one of the above cases result_configs = configs + [ contiguous_config, outer_config, tiny_config, make_config(64, 64), make_config(8, 512), # halve the XBLOCK/Rn_BLOCK compared to outer_config # TODO: this may only be beneficial when each iteration of the reduction # is quite heavy. E.g. https://gist.github.com/shunting314/189a8ef69f90db9d614a823385147a72 make_config(64, 4, num_warps=8), ] if torch.version.hip: result_configs.extend( [ make_config(1024, 8, num_warps=4, num_stages=1, waves_per_eu=2), make_config(512, 8, num_warps=4, num_stages=1, waves_per_eu=1), ] ) return result_configs def match_target_block_product( size_hints, tiling_scores, target_block_product, min_block_size=1 ): """ Distribute block sizes across dimensions according to tiling scores, aiming to match a target product of block sizes. """ total_score = sum(tiling_scores.values()) if total_score == 0: # just assume even score with no minimum block size min_block_size = 1 tiling_scores = dict.fromkeys(tiling_scores.keys(), target_block_product) # First, give each coalescing dimension at least min_block_size block_sizes = {} relative_scores = {} curr_block_product = 1 for dim, score in tiling_scores.items(): if score == 0: block_sizes[dim] = 1 continue block_sizes[dim] = min_block_size curr_block_product *= min_block_size relative_scores[dim] = score / total_score # Scale up dimensions by their relative scores until we reach the target while curr_block_product < target_block_product and relative_scores: dim, score = max(relative_scores.items(), key=lambda item: item[1]) # Check if we've hit the max for this dimension if ( block_sizes[dim] >= TRITON_MAX_BLOCK[dim.capitalize()] or block_sizes[dim] >= size_hints[dim] ): del relative_scores[dim] continue block_sizes[dim] *= 2 relative_scores[dim] /= 2 curr_block_product *= 2 return block_sizes def adapt_config_for_tiling( size_hints, tiling_scores, original_x, original_r, num_warps=None, num_stages=1, register_intensive=False, persistent_reduction=False, waves_per_eu=None, ) -> Config: """ Create an adapted configuration based on tiling scores, redistributing the same total block size (x * r) according to tiling scores. """ assert all(s in tiling_scores for s in size_hints) target_block_product = original_x * original_r block_sizes = match_target_block_product( size_hints, tiling_scores, target_block_product ) return triton_config_tiled_reduction( size_hints, block_sizes["x"], block_sizes["y"], block_sizes["r0_"], num_stages=num_stages, register_intensive=register_intensive, waves_per_eu=waves_per_eu, ) def filter_reduction_configs_for_determinism( inductor_meta: dict[str, Any], configs: list[Config] ) -> list[Config]: """ Filter configs for reduction so the numerics can be deterministic. Heuristics: - skip reduction configs with too small RBLOCK - skip reduction configs with XBLOCK==1 if we are confident it will not perform well - if there is a tie, pick the config with second largest RBLOCK - if there is still a tie, pick the config with second largest num_warps - if there is still a tie, pick the config with second largest XBLOCK """ configs = unique_configs(configs) assert len(configs) > 0 def _do_filter_due_to_inductor_config(): return ( inductor_meta.get("deterministic", False) or inductor_meta.get("force_filter_reduction_configs", False) ) or inductor_meta.get("are_deterministic_algorithms_enabled") if not _do_filter_due_to_inductor_config() or len(configs) == 1: # no filtering happening if NOT in deterministic mode return configs if log.isEnabledFor(logging.DEBUG): log.debug("reduction configs before filtering:") for c in configs: log.debug("%s", c) log.debug("") def _has_too_small_rblock(config): rblock = config.kwargs.get("R0_BLOCK") # too small RBLOCK is likely to be bad return rblock is not None and rblock <= 4 def _nonpromising_xblock_1(config): # kernel like https://gist.github.com/shunting314/0b3281c087e79bc915fe45985ff9d7d5 # without a load/store having contiguous rdim is unlikely to perform well with XBLOCK==1 return config.kwargs["XBLOCK"] == 1 and not inductor_meta.get( "has_loadstore_with_contiguous_rdim", True ) newconfigs = [*filter(lambda x: not _has_too_small_rblock(x), configs)] # accept the filtering only if there are configs left if len(newconfigs) > 0: configs = newconfigs newconfigs = [*filter(lambda x: not _nonpromising_xblock_1(x), configs)] if len(newconfigs) > 0: configs = newconfigs assert len(configs) > 0 def _r0_block(c): return c.kwargs.get("R0_BLOCK", -1) def _xblock(c): return c.kwargs.get("XBLOCK", -1) def _num_warps(c): return c.num_warps def _pick_second_largest(accessor): nonlocal configs configs = sorted(configs, key=lambda x: accessor(x)) if accessor(configs[0]) != accessor(configs[-1]): max_val = accessor(configs[-1]) configs = [*filter(lambda x: accessor(x) != max_val, configs)] second_max_val = accessor(configs[-1]) configs = [*filter(lambda x: accessor(x) == second_max_val, configs)] return configs def _pick_config(): nonlocal configs assert len(configs) > 0 if len(configs) == 1: return configs[0] # break tie by R0_BLOCK configs = _pick_second_largest(_r0_block) if len(configs) == 1: return configs[0] # break tie by num_warps configs = _pick_second_largest(_num_warps) if len(configs) == 1: return configs[0] # break tie by XBLOCK configs = _pick_second_largest(_xblock) # there is still a tie, pick the first one return configs[0] configs = [_pick_config()] if log.isEnabledFor(logging.DEBUG): log.debug("reduction configs after filtering:") for c in configs: log.debug("%s", c) log.debug("") return configs def reduction( size_hints, reduction_hint=False, triton_meta=None, filename=None, inductor_meta=None, ): """args to @triton.heuristics()""" inductor_meta = {} if inductor_meta is None else inductor_meta inductor_meta["reduction_hint"] = reduction_hint if inductor_meta.get("no_x_dim"): size_hints["x"] = 1 assert triton_meta is not None num_dynamic = 0 for k in triton_meta["signature"]: if "ks" in k: num_dynamic += 1 configs = _reduction_configs( size_hints=size_hints, inductor_meta=inductor_meta, triton_meta=triton_meta, num_dynamic=num_dynamic, ) configs = _maybe_filter_configs_for_tma_restrictions(inductor_meta, configs) configs = filter_reduction_configs_for_determinism(inductor_meta, configs) return cached_autotune( size_hints, configs=configs, triton_meta=triton_meta, inductor_meta=inductor_meta, heuristic_type=HeuristicType.REDUCTION, filename=filename, ) def cooperative_reduction( size_hints, reduction_hint, triton_meta, filename, inductor_meta, ): inductor_meta = {} if inductor_meta is None else inductor_meta inductor_meta["reduction_hint"] = reduction_hint if inductor_meta.get("no_x_dim"): size_hints["x"] = 1 # Cooperative reductions currently only support a single reduction dimension. assert len(size_hints) == 2, ( "Cooperative reductions don't support tiling reduction dims" ) xnumel, rnumel = size_hints["x"], size_hints["r0_"] # TODO(jansel): we should base target on the SM count of the local GPU target = 64 split = max(1, min(target // xnumel, TRITON_MAX_RSPLIT)) assert rnumel >= split assert split <= TRITON_MAX_RSPLIT if inductor_meta["persistent_reduction"]: configs = _persistent_reduction_configs( {"x": xnumel, "r0_": rnumel // split}, reduction_hint, inductor_meta, triton_meta, ) else: configs = _reduction_configs( size_hints={"x": xnumel, "r0_": rnumel // split}, inductor_meta=inductor_meta, triton_meta=triton_meta, ) for config in configs: config.kwargs["RSPLIT"] = split # TODO(jansel): add more configs in max_autotune configs = _maybe_filter_configs_for_tma_restrictions(inductor_meta, configs) configs = filter_reduction_configs_for_determinism(inductor_meta, configs) return cached_autotune( size_hints, configs=configs, triton_meta=triton_meta, inductor_meta=inductor_meta, heuristic_type=HeuristicType.REDUCTION, filename=filename, ) def _persistent_reduction_configs( size_hints, reduction_hint=False, inductor_meta=None, triton_meta=None, ): xnumel = size_hints["x"] rnumel = get_total_reduction_numel(size_hints) MAX_PERSISTENT_BLOCK_NUMEL = 4096 if triton_meta.get("native_matmul"): if len(size_hints) == 3: return [ make_matmul_triton_config(sizes, num_warps, num_stages) for sizes, num_warps, num_stages in triton_native_persistent_mm_configs ] elif len(size_hints) == 4: return [ make_matmul_triton_config(sizes, num_warps, num_stages) for sizes, num_warps, num_stages in triton_native_persistent_bmm_configs ] else: raise NotImplementedError("native matmul only supports mm/bmm pattern") max_autotune_enabled = inductor_meta.get("max_autotune") or inductor_meta.get( "max_autotune_pointwise" ) if torch.version.hip: xblock_vals = [1, 4, 8, 16, 32, 64, 128, 256] else: xblock_vals = [1, 8, 32, 128] if "y" not in size_hints: configs = [ triton_config_reduction( size_hints, xblock, rnumel, register_intensive=True, reduction_hint=reduction_hint, ) for xblock in xblock_vals if xblock == 1 or (rnumel * xblock <= MAX_PERSISTENT_BLOCK_NUMEL and xblock <= xnumel) ] else: configs = [] assert "tiling_scores" in inductor_meta x_y_scores = {dim: inductor_meta["tiling_scores"][dim] for dim in ("x", "y")} for target_block_size in xblock_vals: if target_block_size * rnumel > MAX_PERSISTENT_BLOCK_NUMEL: continue block_sizes = match_target_block_product( size_hints, x_y_scores, target_block_size ) configs.append( triton_config_tiled_reduction( size_hints, block_sizes["x"], block_sizes["y"], rnumel ) ) tiny_configs = [ triton_config_reduction( size_hints, 2 * (256 // rnumel) if rnumel <= 256 else 1, rnumel, ) ] # defer to more autotuning, initially if "y" in size_hints: pass # TODO(jansel): we should be able to improve these heuristics elif not max_autotune_enabled: # Do not filter configs when tuning if reduction_hint == ReductionHint.INNER and rnumel >= 256: if rnumel > 1024 or xnumel // 8 < 128 or inductor_meta.get("RSPLIT_SIZE"): configs = configs[:1] else: num_warps, min_num_warps = 1, 1 x_block = min(1024 // rnumel, 8) configs = [ triton_config_reduction( size_hints, x_block, rnumel, register_intensive=True, num_warps=num_warps, min_num_warps=min_num_warps, reduction_hint=reduction_hint, ) ] elif reduction_hint == ReductionHint.OUTER: configs = configs[-1:] elif reduction_hint == ReductionHint.OUTER_TINY: configs = tiny_configs else: if torch.version.hip: # If autotune is enabled append tiny configs for conf in tiny_configs: if conf not in configs: configs.append(conf) for c in configs: # we don't need Rn_BLOCK for persistent reduction for prefix in size_hints: if prefix_is_reduction(prefix): c.kwargs.pop(f"{prefix.upper()}BLOCK") return configs def persistent_reduction( size_hints, reduction_hint=False, triton_meta=None, filename=None, inductor_meta=None, ): inductor_meta = {} if inductor_meta is None else inductor_meta inductor_meta["reduction_hint"] = reduction_hint if inductor_meta.get("no_x_dim"): size_hints["x"] = 1 configs = _persistent_reduction_configs( size_hints, reduction_hint, inductor_meta, triton_meta ) # This key is not added to the inductor meta as its clear from the heuristic # choice that it is persistent. Add it and remove it below so that persistent # configs can be filtered appropriately by _maybe_filter_configs_for_tma_restrictions persistent_reduction_key = "persistent_reduction" inductor_meta[persistent_reduction_key] = True configs = _maybe_filter_configs_for_tma_restrictions(inductor_meta, configs) inductor_meta.pop(persistent_reduction_key) if inductor_meta.get("RSPLIT_SIZE"): new_configs = [] rsplit_size = inductor_meta.get("RSPLIT_SIZE") rnumel_hint = size_hints["r0_"] min_x_block = 1 if rnumel_hint <= 512: min_x_block = 4 x_block = min(max(rsplit_size // 32, min_x_block), 16) for c in configs: c.kwargs["RSPLIT_SIZE"] = rsplit_size # small XBLOCK to use less registers/smem c.kwargs["XBLOCK"] = x_block num_iters = rsplit_size // x_block c.kwargs["NUM_STAGES"] = min(max(num_iters // 4, 1), 3) if rnumel_hint <= 1024: c.num_warps //= 2 c.num_warps = max(c.num_warps, 1) new_configs.append(c) # less warps so potentially each sm can run more thread blocks # Inside each thread block, we handle the split sequentially, # more thread blocks is beneficial here. newc = copy.deepcopy(c) newc.num_warps = 2 new_configs.append(newc) else: # more warps for larger rows new_configs.append(c) if c.num_warps < 32: newc = copy.deepcopy(c) newc.num_warps *= 2 new_configs.append(newc) configs = unique_configs(new_configs) configs = filter_reduction_configs_for_determinism(inductor_meta, configs) return cached_autotune( size_hints, configs, triton_meta=triton_meta, inductor_meta=inductor_meta, filename=filename, heuristic_type=HeuristicType.PERSISTENT_REDUCTION, ) def split_scan( size_hints, reduction_hint=False, triton_meta=None, filename=None, inductor_meta=None, ): """Heuristic for TritonSplitScanKernel""" inductor_meta = {} if inductor_meta is None else inductor_meta inductor_meta["reduction_hint"] = reduction_hint if inductor_meta.get("no_x_dim"): size_hints["x"] = 1 assert triton_meta is not None if len(size_hints) != 2: raise NotImplementedError(f"size_hints: {size_hints}") configs = _reduction_configs( size_hints=size_hints, inductor_meta=inductor_meta, triton_meta=triton_meta ) # Fixup configs to enforce the minimum Rn_BLOCK size min_rblock = inductor_meta.get("min_split_scan_rblock", 256) for cfg in configs: for var in list(cfg.kwargs.keys()): if var.startswith("R") and cfg.kwargs[var] < min_rblock: cfg.kwargs[var] = min_rblock configs = _maybe_filter_configs_for_tma_restrictions(inductor_meta, configs) configs = filter_reduction_configs_for_determinism(inductor_meta, configs) return cached_autotune( size_hints, configs=configs, triton_meta=triton_meta, inductor_meta=inductor_meta, heuristic_type=HeuristicType.SPLIT_SCAN, filename=filename, ) def template( num_stages, num_warps, triton_meta, num_consumer_groups=0, num_buffers_warp_spec=0, filename=None, inductor_meta=None, ): """ Compile a triton template """ # Prepare the base configuration config_args = { "num_stages": num_stages, "num_warps": num_warps, } # Conditionally add arguments based on HAS_WARP_SPEC if HAS_WARP_SPEC: config_args.update( { "num_consumer_groups": num_consumer_groups, "num_buffers_warp_spec": num_buffers_warp_spec, } ) return cached_autotune( None, [triton.Config({}, **config_args)], triton_meta=triton_meta, inductor_meta=inductor_meta, heuristic_type=HeuristicType.TEMPLATE, filename=filename, ) def _pop_config_kwargs(config: dict[str, Any]) -> dict[str, Any]: """Extract triton.Config options that should become kwargs""" popped = {} for key in ( "num_warps", "num_stages", "num_ctas", "maxnreg", "num_consumer_groups", "num_buffers_warp_spec", ): val = config.pop(key, None) if val is not None: popped[key] = val return popped def config_to_dict(config: Config) -> dict[str, Any]: config_dict = { **config.kwargs, "num_warps": config.num_warps, "num_stages": config.num_stages, } if HAS_WARP_SPEC: config_dict.update( { "num_consumer_groups": getattr(config, "num_consumer_groups", 0), "num_buffers_warp_spec": getattr(config, "num_buffers_warp_spec", 0), } ) return config_dict def config_from_dict(config: dict[str, Any]) -> Config: config = {**config} return Config(config, **_pop_config_kwargs(config)) def fixed_config(config, filename, triton_meta, inductor_meta): """ Used when the configuration is already decided at compile time """ config = {**config} return cached_autotune( None, [triton.Config(config, **_pop_config_kwargs(config))], triton_meta=triton_meta, inductor_meta=inductor_meta, heuristic_type=HeuristicType.FIXED, filename=filename, ) def user_autotune( configs, triton_meta, filename=None, inductor_meta=None, custom_kernel=False ): """ Compile a user defined triton kernel """ if len(configs) == 0: configs = [triton.Config({})] else: configs = [*map(config_from_dict, configs)] return cached_autotune( None, configs, triton_meta=triton_meta, heuristic_type=HeuristicType.USER_AUTOTUNE, filename=filename, inductor_meta=inductor_meta, custom_kernel=custom_kernel, ) def foreach(triton_meta, filename=None, inductor_meta=None): """ Compile a triton foreach kernel """ configs = [] # Naive autotuning path for num_warps if not ( inductor_meta.get("max_autotune") or inductor_meta.get("max_autotune_pointwise") ): configs.append(triton.Config({}, num_stages=1, num_warps=8)) else: for warps in [1, 2, 4, 8]: configs.append(triton.Config({}, num_stages=1, num_warps=warps)) return cached_autotune( None, configs, triton_meta=triton_meta, inductor_meta=inductor_meta, heuristic_type=HeuristicType.TEMPLATE, filename=filename, ) @dataclasses.dataclass
DebugAutotuner
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 883410, "end": 883822 }
class ____(sgqlc.types.Type): """An edge in a connection.""" __schema__ = github_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") """A cursor for use in pagination.""" node = sgqlc.types.Field("PullRequestTimelineItems", graphql_name="node") """The item at the end of the edge."""
PullRequestTimelineItemsEdge
python
apache__airflow
providers/zendesk/tests/unit/zendesk/hooks/test_zendesk.py
{ "start": 1016, "end": 3774 }
class ____: conn_id = "zendesk_conn_id_test" @pytest.fixture(autouse=True) def setup_connections(self, create_connection_without_db): create_connection_without_db( Connection( conn_id=self.conn_id, conn_type="zendesk", host="yoursubdomain.zendesk.com", login="user@gmail.com", password="eb243592-faa2-4ba2-a551q-1afdf565c889", ) ) self.hook = ZendeskHook(zendesk_conn_id=self.conn_id) def test_hook_init_and_get_conn(self): # Verify config of zenpy APIs zenpy_client = self.hook.get_conn() assert zenpy_client.users.subdomain == "yoursubdomain" assert zenpy_client.users.domain == "zendesk.com" assert zenpy_client.users.session.auth == ("user@gmail.com", "eb243592-faa2-4ba2-a551q-1afdf565c889") assert not zenpy_client.cache.disabled assert self.hook._ZendeskHook__url == "https://yoursubdomain.zendesk.com" def test_get_ticket(self): zenpy_client = self.hook.get_conn() with patch.object(zenpy_client, "tickets") as tickets_mock: self.hook.get_ticket(ticket_id=1) tickets_mock.assert_called_once_with(id=1) def test_search_tickets(self): zenpy_client = self.hook.get_conn() with patch.object(zenpy_client, "search") as search_mock: self.hook.search_tickets(status="open", sort_order="desc") search_mock.assert_called_once_with(type="ticket", status="open", sort_order="desc") def test_create_tickets(self): zenpy_client = self.hook.get_conn() ticket = Ticket(subject="This is a test ticket to create") with patch.object(zenpy_client.tickets, "create") as search_mock: self.hook.create_tickets(ticket, extra_parameter="extra_parameter") search_mock.assert_called_once_with(ticket, extra_parameter="extra_parameter") def test_update_tickets(self): zenpy_client = self.hook.get_conn() ticket = Ticket(subject="This is a test ticket to update") with patch.object(zenpy_client.tickets, "update") as search_mock: self.hook.update_tickets(ticket, extra_parameter="extra_parameter") search_mock.assert_called_once_with(ticket, extra_parameter="extra_parameter") def test_delete_tickets(self): zenpy_client = self.hook.get_conn() ticket = Ticket(subject="This is a test ticket to delete") with patch.object(zenpy_client.tickets, "delete") as search_mock: self.hook.delete_tickets(ticket, extra_parameter="extra_parameter") search_mock.assert_called_once_with(ticket, extra_parameter="extra_parameter")
TestZendeskHook
python
django-extensions__django-extensions
tests/testapp/models.py
{ "start": 1455, "end": 2090 }
class ____(models.Model): name = models.ForeignKey(Name, on_delete=models.CASCADE) age = models.PositiveIntegerField() children = models.ManyToManyField("self") notes = models.ManyToManyField(Note) personality = models.OneToOneField( Personality, null=True, on_delete=models.CASCADE, ) clubs = models.ManyToManyField(Club, through="testapp.Membership") neighborhood = models.ForeignKey(Neighborhood, on_delete=models.SET_NULL, null=True) current_bank = models.ForeignKey(Bank, on_delete=models.PROTECT, null=True) class Meta: app_label = "django_extensions"
Person
python
tensorflow__tensorflow
tensorflow/python/framework/op_def_library_test.py
{ "start": 55856, "end": 56535 }
class ____(test_util.TensorFlowTestCase): def testPybind(self): x = constant_op.constant(32, dtype=dtypes.float32) y = constant_op.constant(32, dtype=dtypes.float32) attrs, inputs, input_types, output_structure = ( op_def_library_pybind.process_inputs("AddV2", 1, { "x": x, "y": y })) proto = text_format.Parse("type: DT_FLOAT", attr_value_pb2.AttrValue()) self.assertEqual(attrs, {"T": proto}) self.assertEqual(inputs, [x, y]) self.assertEqual(input_types, [dtypes.float32, dtypes.float32]) self.assertEqual(output_structure, [None]) if __name__ == "__main__": googletest.main()
OpDefLibraryPybindTest
python
django__django
tests/model_fields/models.py
{ "start": 935, "end": 1115 }
class ____(models.Model): a = models.CharField(max_length=10) d = models.DecimalField(max_digits=5, decimal_places=3) def get_foo(): return Foo.objects.get(id=1).pk
Foo
python
pandas-dev__pandas
asv_bench/benchmarks/io/excel.py
{ "start": 1546, "end": 2486 }
class ____: params = ["openpyxl", "odf"] param_names = ["engine"] fname_excel = "spreadsheet.xlsx" fname_odf = "spreadsheet.ods" def _create_odf(self): doc = OpenDocumentSpreadsheet() table = Table(name="Table1") for row in self.df.values: tr = TableRow() for val in row: tc = TableCell(valuetype="string") tc.addElement(P(text=val)) tr.addElement(tc) table.addElement(tr) doc.spreadsheet.addElement(table) doc.save(self.fname_odf) def setup_cache(self): self.df = _generate_dataframe() self.df.to_excel(self.fname_excel, sheet_name="Sheet1") self._create_odf() def time_read_excel(self, engine): if engine == "odf": fname = self.fname_odf else: fname = self.fname_excel read_excel(fname, engine=engine)
ReadExcel
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 53957, "end": 54289 }
class ____(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") result: Optional[bool] = Field(default=None, description="")
InlineResponse200
python
scipy__scipy
scipy/integrate/_odepack_py.py
{ "start": 199, "end": 11236 }
class ____(Warning): """Warning raised during the execution of `odeint`.""" pass _msgs = {2: "Integration successful.", 1: "Nothing was done; the integration time was 0.", -1: "Excess work done on this call (perhaps wrong Dfun type).", -2: "Excess accuracy requested (tolerances too small).", -3: "Illegal input detected (internal error).", -4: "Repeated error test failures (internal error).", -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).", -6: "Error weight became zero during problem.", -7: "Internal workspace insufficient to finish (internal error).", -8: "Run terminated (internal error)." } @xp_capabilities(out_of_scope=True) def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0, ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0, hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12, mxords=5, printmessg=0, tfirst=False): """ Integrate a system of ordinary differential equations. .. note:: For new code, use `scipy.integrate.solve_ivp` to solve a differential equation. Solve a system of ordinary differential equations using lsoda from the FORTRAN library odepack. Solves the initial value problem for stiff or non-stiff systems of first order ode-s:: dy/dt = func(y, t, ...) [or func(t, y, ...)] where y can be a vector. .. note:: By default, the required order of the first two arguments of `func` are in the opposite order of the arguments in the system definition function used by the `scipy.integrate.ode` class and the function `scipy.integrate.solve_ivp`. To use a function with the signature ``func(t, y, ...)``, the argument `tfirst` must be set to ``True``. Parameters ---------- func : callable(y, t, ...) or callable(t, y, ...) Computes the derivative of y at t. If the signature is ``callable(t, y, ...)``, then the argument `tfirst` must be set ``True``. `func` must not modify the data in `y`, as it is a view of the data used internally by the ODE solver. y0 : array Initial condition on y (can be a vector). t : array A sequence of time points for which to solve for y. The initial value point should be the first element of this sequence. This sequence must be monotonically increasing or monotonically decreasing; repeated values are allowed. args : tuple, optional Extra arguments to pass to function. Dfun : callable(y, t, ...) or callable(t, y, ...) Gradient (Jacobian) of `func`. If the signature is ``callable(t, y, ...)``, then the argument `tfirst` must be set ``True``. `Dfun` must not modify the data in `y`, as it is a view of the data used internally by the ODE solver. col_deriv : bool, optional True if `Dfun` defines derivatives down columns (faster), otherwise `Dfun` should define derivatives across rows. full_output : bool, optional True if to return a dictionary of optional outputs as the second output printmessg : bool, optional Whether to print the convergence message tfirst : bool, optional If True, the first two arguments of `func` (and `Dfun`, if given) must ``t, y`` instead of the default ``y, t``. .. versionadded:: 1.1.0 Returns ------- y : array, shape (len(t), len(y0)) Array containing the value of y for each desired time in t, with the initial value `y0` in the first row. infodict : dict, only returned if full_output == True Dictionary containing additional output information ======= ============================================================ key meaning ======= ============================================================ 'hu' vector of step sizes successfully used for each time step 'tcur' vector with the value of t reached for each time step (will always be at least as large as the input times) 'tolsf' vector of tolerance scale factors, greater than 1.0, computed when a request for too much accuracy was detected 'tsw' value of t at the time of the last method switch (given for each time step) 'nst' cumulative number of time steps 'nfe' cumulative number of function evaluations for each time step 'nje' cumulative number of jacobian evaluations for each time step 'nqu' a vector of method orders for each successful step 'imxer' index of the component of largest magnitude in the weighted local error vector (e / ewt) on an error return, -1 otherwise 'lenrw' the length of the double work array required 'leniw' the length of integer work array required 'mused' a vector of method indicators for each successful time step: 1: adams (nonstiff), 2: bdf (stiff) ======= ============================================================ Other Parameters ---------------- ml, mu : int, optional If either of these are not None or non-negative, then the Jacobian is assumed to be banded. These give the number of lower and upper non-zero diagonals in this banded matrix. For the banded case, `Dfun` should return a matrix whose rows contain the non-zero bands (starting with the lowest diagonal). Thus, the return matrix `jac` from `Dfun` should have shape ``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``. The data in `jac` must be stored such that ``jac[i - j + mu, j]`` holds the derivative of the ``i``\\ th equation with respect to the ``j``\\ th state variable. If `col_deriv` is True, the transpose of this `jac` must be returned. rtol, atol : float, optional The input parameters `rtol` and `atol` determine the error control performed by the solver. The solver will control the vector, e, of estimated local errors in y, according to an inequality of the form ``max-norm of (e / ewt) <= 1``, where ewt is a vector of positive error weights computed as ``ewt = rtol * abs(y) + atol``. rtol and atol can be either vectors the same length as y or scalars. Defaults to 1.49012e-8. tcrit : ndarray, optional Vector of critical points (e.g., singularities) where integration care should be taken. h0 : float, (0: solver-determined), optional The step size to be attempted on the first step. hmax : float, (0: solver-determined), optional The maximum absolute step size allowed. hmin : float, (0: solver-determined), optional The minimum absolute step size allowed. ixpr : bool, optional Whether to generate extra printing at method switches. mxstep : int, (0: solver-determined), optional Maximum number of (internally defined) steps allowed for each integration point in t. mxhnil : int, (0: solver-determined), optional Maximum number of messages printed. mxordn : int, (0: solver-determined), optional Maximum order to be allowed for the non-stiff (Adams) method. mxords : int, (0: solver-determined), optional Maximum order to be allowed for the stiff (BDF) method. See Also -------- solve_ivp : solve an initial value problem for a system of ODEs ode : a more object-oriented integrator based on VODE quad : for finding the area under a curve Examples -------- The second order differential equation for the angle `theta` of a pendulum acted on by gravity with friction can be written:: theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0 where `b` and `c` are positive constants, and a prime (') denotes a derivative. To solve this equation with `odeint`, we must first convert it to a system of first order equations. By defining the angular velocity ``omega(t) = theta'(t)``, we obtain the system:: theta'(t) = omega(t) omega'(t) = -b*omega(t) - c*sin(theta(t)) Let `y` be the vector [`theta`, `omega`]. We implement this system in Python as: >>> import numpy as np >>> def pend(y, t, b, c): ... theta, omega = y ... dydt = [omega, -b*omega - c*np.sin(theta)] ... return dydt ... We assume the constants are `b` = 0.25 and `c` = 5.0: >>> b = 0.25 >>> c = 5.0 For initial conditions, we assume the pendulum is nearly vertical with `theta(0)` = `pi` - 0.1, and is initially at rest, so `omega(0)` = 0. Then the vector of initial conditions is >>> y0 = [np.pi - 0.1, 0.0] We will generate a solution at 101 evenly spaced samples in the interval 0 <= `t` <= 10. So our array of times is: >>> t = np.linspace(0, 10, 101) Call `odeint` to generate the solution. To pass the parameters `b` and `c` to `pend`, we give them to `odeint` using the `args` argument. >>> from scipy.integrate import odeint >>> sol = odeint(pend, y0, t, args=(b, c)) The solution is an array with shape (101, 2). The first column is `theta(t)`, and the second is `omega(t)`. The following code plots both components. >>> import matplotlib.pyplot as plt >>> plt.plot(t, sol[:, 0], 'b', label='theta(t)') >>> plt.plot(t, sol[:, 1], 'g', label='omega(t)') >>> plt.legend(loc='best') >>> plt.xlabel('t') >>> plt.grid() >>> plt.show() """ if ml is None: ml = -1 # changed to zero inside function call if mu is None: mu = -1 # changed to zero inside function call dt = np.diff(t) if not ((dt >= 0).all() or (dt <= 0).all()): raise ValueError("The values in t must be monotonically increasing " "or monotonically decreasing; repeated values are " "allowed.") t = copy(t) y0 = copy(y0) output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu, full_output, rtol, atol, tcrit, h0, hmax, hmin, ixpr, mxstep, mxhnil, mxordn, mxords, int(bool(tfirst))) if output[-1] < 0: warning_msg = (f"{_msgs[output[-1]]} Run with full_output = 1 to " f"get quantitative information.") warnings.warn(warning_msg, ODEintWarning, stacklevel=2) elif printmessg: warning_msg = _msgs[output[-1]] warnings.warn(warning_msg, ODEintWarning, stacklevel=2) if full_output: output[1]['message'] = _msgs[output[-1]] output = output[:-1] if len(output) == 1: return output[0] else: return output
ODEintWarning
python
django__django
tests/many_to_many/tests.py
{ "start": 25369, "end": 28091 }
class ____(TestCase): """ SQL is optimized to reference the through table without joining against the related table when using count() and exists() functions on a queryset for many to many relations. The optimization applies to the case where there are no filters. """ @classmethod def setUpTestData(cls): cls.article = Article.objects.create( headline="Django lets you build Web apps easily" ) cls.nullable_target_article = NullableTargetArticle.objects.create( headline="The python is good" ) NullablePublicationThrough.objects.create( article=cls.nullable_target_article, publication=None ) @skipUnlessDBFeature("supports_foreign_keys") def test_count_join_optimization(self): with self.assertNumQueries(1) as ctx: self.article.publications.count() self.assertNotIn("JOIN", ctx.captured_queries[0]["sql"]) with self.assertNumQueries(1) as ctx: self.article.publications.count() self.assertNotIn("JOIN", ctx.captured_queries[0]["sql"]) self.assertEqual(self.nullable_target_article.publications.count(), 0) def test_count_join_optimization_disabled(self): with ( mock.patch.object(connection.features, "supports_foreign_keys", False), self.assertNumQueries(1) as ctx, ): self.article.publications.count() self.assertIn("JOIN", ctx.captured_queries[0]["sql"]) @skipUnlessDBFeature("supports_foreign_keys") def test_exists_join_optimization(self): with self.assertNumQueries(1) as ctx: self.article.publications.exists() self.assertNotIn("JOIN", ctx.captured_queries[0]["sql"]) self.article.publications.prefetch_related() with self.assertNumQueries(1) as ctx: self.article.publications.exists() self.assertNotIn("JOIN", ctx.captured_queries[0]["sql"]) self.assertIs(self.nullable_target_article.publications.exists(), False) def test_exists_join_optimization_disabled(self): with ( mock.patch.object(connection.features, "supports_foreign_keys", False), self.assertNumQueries(1) as ctx, ): self.article.publications.exists() self.assertIn("JOIN", ctx.captured_queries[0]["sql"]) def test_prefetch_related_no_queries_optimization_disabled(self): qs = Article.objects.prefetch_related("publications") article = qs.get() with self.assertNumQueries(0): article.publications.count() with self.assertNumQueries(0): article.publications.exists()
ManyToManyQueryTests
python
getsentry__sentry
src/sentry/analytics/events/manual_issue_assignment.py
{ "start": 80, "end": 308 }
class ____(analytics.Event): organization_id: int project_id: int group_id: int assigned_by: str | None = None had_to_deassign: bool | None = None analytics.register(ManualIssueAssignment)
ManualIssueAssignment
python
django__django
django/db/backends/base/validation.py
{ "start": 0, "end": 1119 }
class ____: """Encapsulate backend-specific validation.""" def __init__(self, connection): self.connection = connection def __del__(self): del self.connection def check(self, **kwargs): return [] def check_field(self, field, **kwargs): errors = [] # Backends may implement a check_field_type() method. if ( hasattr(self, "check_field_type") and # Ignore any related fields. not getattr(field, "remote_field", None) ): # Ignore fields with unsupported features. db_supports_all_required_features = all( getattr(self.connection.features, feature, False) for feature in field.model._meta.required_db_features ) if db_supports_all_required_features: field_type = field.db_type(self.connection) # Ignore non-concrete fields. if field_type is not None: errors.extend(self.check_field_type(field, field_type)) return errors
BaseDatabaseValidation
python
redis__redis-py
redis/commands/cluster.py
{ "start": 24955, "end": 25807 }
class ____( ClusterManagementCommands, AsyncManagementCommands ): """ A class for Redis Cluster management commands The class inherits from Redis's core ManagementCommands class and do the required adjustments to work with cluster mode """ async def cluster_delslots(self, *slots: EncodableT) -> List[bool]: """ Set hash slots as unbound in the cluster. It determines by it self what node the slot is in and sends it there Returns a list of the results for each processed slot. For more information see https://redis.io/commands/cluster-delslots """ return await asyncio.gather( *( asyncio.create_task(self.execute_command("CLUSTER DELSLOTS", slot)) for slot in slots ) )
AsyncClusterManagementCommands
python
pytorch__pytorch
torch/distributed/optim/functional_rprop.py
{ "start": 812, "end": 3807 }
class ____: def __init__( self, params: list[Tensor], lr: float = 1e-2, etas: tuple[float, float] = (0.5, 1.2), step_sizes: tuple[float, float] = (1e-6, 50), foreach: bool = False, maximize: bool = False, _allow_empty_param_list: bool = False, ): _scripted_functional_optimizer_deprecation_warning(stacklevel=2) self.defaults = { "lr": lr, } self.etas = etas self.step_sizes = step_sizes self.foreach = foreach self.maximize = maximize if len(params) == 0 and not _allow_empty_param_list: raise ValueError("optimizer got an empty parameter list") # NOTE: we only have one param_group and don't allow user to add additional # param group as it's not a common use case. self.param_group = {"params": params} self.state = torch.jit.annotate(dict[torch.Tensor, dict[str, torch.Tensor]], {}) def step(self, gradients: list[Tensor | None]): params = self.param_group["params"] params_with_grad = [] grads = [] prevs = [] step_sizes = [] state_steps = [] lr = self.defaults["lr"] etaminus, etaplus = self.etas step_size_min, step_size_max = self.step_sizes if len(params) != len(gradients): raise ValueError( "the gradients passed in does not equal to the size of the parameters!" + f"Params length: {len(params)}. " + f"Gradients length: {len(gradients)}" ) has_complex = False for param, gradient in zip(params, gradients): if gradient is not None: has_complex |= torch.is_complex(param) params_with_grad.append(param) grads.append(gradient) # Lazy state initialization if param not in self.state: self.state[param] = {} state = self.state[param] state["step"] = torch.tensor(0.0) state["prev"] = torch.zeros_like( param, memory_format=torch.preserve_format ) state["step_size"] = torch.full_like(gradient, lr) state = self.state[param] prevs.append(state["prev"]) step_sizes.append(state["step_size"]) state_steps.append(state["step"]) with torch.no_grad(): F.rprop( params_with_grad, grads, prevs, step_sizes, state_steps, step_size_min=step_size_min, step_size_max=step_size_max, etaminus=etaminus, etaplus=etaplus, foreach=self.foreach, maximize=self.maximize, has_complex=has_complex, )
_FunctionalRprop
python
astropy__astropy
astropy/utils/masked/tests/test_masked.py
{ "start": 58944, "end": 59799 }
class ____(MaskedArraySetup): def test_masked_array_from_masked(self): """Check that we can initialize a MaskedArray properly.""" np_ma = np.ma.MaskedArray(self.ma) assert type(np_ma) is np.ma.MaskedArray assert type(np_ma.data) is self._data_cls assert type(np_ma.mask) is np.ndarray assert_array_equal(np_ma.data, self.a) assert_array_equal(np_ma.mask, self.mask_a) def test_view_as_masked_array(self): """Test that we can be viewed as a MaskedArray.""" np_ma = self.ma.view(np.ma.MaskedArray) assert type(np_ma) is np.ma.MaskedArray assert type(np_ma.data) is self._data_cls assert type(np_ma.mask) is np.ndarray assert_array_equal(np_ma.data, self.a) assert_array_equal(np_ma.mask, self.mask_a)
TestMaskedArrayInteractionWithNumpyMA
python
PyCQA__pyflakes
pyflakes/checker.py
{ "start": 13379, "end": 15277 }
class ____(Binding): """ A binding created by an C{__all__} assignment. If the names in the list can be determined statically, they will be treated as names for export and additional checking applied to them. The only recognized C{__all__} assignment via list/tuple concatenation is in the following format: __all__ = ['a'] + ['b'] + ['c'] Names which are imported and not otherwise used but appear in the value of C{__all__} will not have an unused import warning reported for them. """ def __init__(self, name, source, scope): if '__all__' in scope and isinstance(source, ast.AugAssign): self.names = list(scope['__all__'].names) else: self.names = [] def _add_to_names(container): for node in container.elts: if isinstance(node, ast.Constant) and isinstance(node.value, str): self.names.append(node.value) if isinstance(source.value, (ast.List, ast.Tuple)): _add_to_names(source.value) # If concatenating lists or tuples elif isinstance(source.value, ast.BinOp): currentValue = source.value while isinstance(currentValue.right, (ast.List, ast.Tuple)): left = currentValue.left right = currentValue.right _add_to_names(right) # If more lists are being added if isinstance(left, ast.BinOp): currentValue = left # If just two lists are being added elif isinstance(left, (ast.List, ast.Tuple)): _add_to_names(left) # All lists accounted for - done break # If not list concatenation else: break super().__init__(name, source)
ExportBinding
python
sympy__sympy
sympy/matrices/expressions/factorizations.py
{ "start": 1120, "end": 1456 }
class ____(Factorization): @property def predicates(self): return (Q.orthogonal,) def lu(expr): return LofLU(expr), UofLU(expr) def qr(expr): return QofQR(expr), RofQR(expr) def eig(expr): return EigenValues(expr), EigenVectors(expr) def svd(expr): return UofSVD(expr), SofSVD(expr), VofSVD(expr)
VofSVD
python
pydata__xarray
xarray/tests/test_backends.py
{ "start": 64693, "end": 67223 }
class ____(CFEncodedBase): """Tests for all netCDF3 and netCDF4 backends.""" @pytest.mark.asyncio @pytest.mark.skip(reason="NetCDF backends don't support async loading") async def test_load_async(self) -> None: await super().test_load_async() @pytest.mark.skipif( ON_WINDOWS, reason="Windows does not allow modifying open files" ) def test_refresh_from_disk(self) -> None: # regression test for https://github.com/pydata/xarray/issues/4862 with create_tmp_file() as example_1_path: with create_tmp_file() as example_1_modified_path: with open_example_dataset("example_1.nc") as example_1: self.save(example_1, example_1_path) example_1.rh.values += 100 self.save(example_1, example_1_modified_path) a = open_dataset(example_1_path, engine=self.engine).load() # Simulate external process modifying example_1.nc while this script is running shutil.copy(example_1_modified_path, example_1_path) # Reopen example_1.nc (modified) as `b`; note that `a` has NOT been closed b = open_dataset(example_1_path, engine=self.engine).load() try: assert not np.array_equal(a.rh.values, b.rh.values) finally: a.close() b.close() def test_byte_attrs(self, byte_attrs_dataset: dict[str, Any]) -> None: # test for issue #9407 input = byte_attrs_dataset["input"] expected = byte_attrs_dataset["expected"] with self.roundtrip(input) as actual: assert_identical(actual, expected) _counter = itertools.count() @contextlib.contextmanager def create_tmp_file( suffix: str = ".nc", allow_cleanup_failure: bool = False ) -> Iterator[str]: temp_dir = tempfile.mkdtemp() path = os.path.join(temp_dir, f"temp-{next(_counter)}{suffix}") try: yield path finally: try: shutil.rmtree(temp_dir) except OSError: if not allow_cleanup_failure: raise @contextlib.contextmanager def create_tmp_files( nfiles: int, suffix: str = ".nc", allow_cleanup_failure: bool = False ) -> Iterator[list[str]]: with ExitStack() as stack: files = [ stack.enter_context(create_tmp_file(suffix, allow_cleanup_failure)) for _ in range(nfiles) ] yield files
NetCDFBase
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI012.py
{ "start": 714, "end": 795 }
class ____: pass # Y009 Empty body should contain `...`, not `pass`
EmptyClass
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/oracle/cx_oracle.py
{ "start": 22612, "end": 22713 }
class ____(sqltypes.Uuid): def get_dbapi_type(self, dbapi): return dbapi.STRING
_OracleUUID
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_textbox20.py
{ "start": 315, "end": 866 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("textbox20.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with textbox(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.insert_textbox("E9", "This is some text", {"font": {"bold": True}}) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
cherrypy__cherrypy
cherrypy/lib/profiler.py
{ "start": 1565, "end": 3936 }
class ____(object): """A profiling app.""" def __init__(self, path=None): """Prepare the profiling app resources.""" if not path: path = os.path.join(os.path.dirname(__file__), 'profile') self.path = path if not os.path.exists(path): os.makedirs(path) def run(self, func, *args, **params): """Dump profile data into self.path.""" global _count c = _count = _count + 1 path = os.path.join(self.path, 'cp_%04d.prof' % c) prof = profile.Profile() result = prof.runcall(func, *args, **params) prof.dump_stats(path) return result def statfiles(self): """Compose a list of statistics file names. :returns: A list of available profiles. :rtype: list[str] """ return [ f for f in os.listdir(self.path) if f.startswith('cp_') and f.endswith('.prof') ] def stats(self, filename, sortby='cumulative'): """Generate statistics from given profile. :returns: The sorted stats index printout. :rtype: str """ sio = io.StringIO() s = pstats.Stats(os.path.join(self.path, filename), stream=sio) s.strip_dirs() s.sort_stats(sortby) s.print_stats() response = sio.getvalue() sio.close() return response @cherrypy.expose def index(self): """Render the profiling viewer index page.""" return """<html> <head><title>CherryPy profile data</title></head> <frameset cols='200, 1*'> <frame src='menu' /> <frame name='main' src='' /> </frameset> </html> """ @cherrypy.expose def menu(self): """Render the profiler menu page html layout.""" yield '<h2>Profiling runs</h2>' yield '<p>Click on one of the runs below to see profiling data.</p>' runs = self.statfiles() runs.sort() for i in runs: yield "<a href='report?filename=%s' target='main'>%s</a><br />" % ( i, i, ) @cherrypy.expose def report(self, filename): """Render a statistics report.""" cherrypy.response.headers['Content-Type'] = 'text/plain' return self.stats(filename)
Profiler
python
pytorch__pytorch
test/inductor/test_flex_attention.py
{ "start": 181635, "end": 218819 }
class ____(InductorTestCase): def setUp(self): super().setUp() @supported_platform def test_block_mask_attributes(self, device): offset = torch.zeros(8, device=device) def causal_mask(b, h, q, kv): return (q + (offset[b] * 128)) >= kv block_mask = create_block_mask(causal_mask, 4, 2, 2048, 2048, device=device) self.assertEqual(block_mask.shape, (4, 2, 2048, 2048)) self.assertEqual(block_mask[0].shape, (1, 2, 2048, 2048)) self.assertEqual(block_mask[0, 0].shape, (1, 1, 2048, 2048)) self.assertEqual(block_mask.numel(), 4 * 2 * 2048 * 2048) self.assertEqual(block_mask.sparsity(), 46.875) self.assertEqual(block_mask[0].sparsity(), 46.875) self.assertEqual(block_mask[1, 0].sparsity(), 46.875) self.assertEqual(block_mask.sparsity(), block_mask[1].sparsity()) offset = torch.arange(8, device=device) block_mask = create_block_mask(causal_mask, 8, 1, 2048, 2048, device=device) self.assertEqual(block_mask.sparsity(), 29.1015625) self.assertTrue(block_mask.sparsity() < block_mask[0].sparsity()) self.assertTrue(block_mask[0].sparsity() > block_mask[1].sparsity()) @supported_platform @common_utils.parametrize("BLOCK_SIZE", [32, 64, 128, 256, (32, 64), (64, 32)]) def test_block_size_changes(self, device, BLOCK_SIZE: Union[int, tuple[int, int]]): B, H, Q_LEN, KV_LEN = 4, 2, 2048, 2048 if isinstance(BLOCK_SIZE, int): Q_BLOCK_SIZE = BLOCK_SIZE KV_BLOCK_SIZE = BLOCK_SIZE else: Q_BLOCK_SIZE, KV_BLOCK_SIZE = BLOCK_SIZE block_mask = create_block_mask( noop_mask, B, H, Q_LEN, KV_LEN, BLOCK_SIZE=BLOCK_SIZE, device=device ) self.assertEqual(block_mask.BLOCK_SIZE, (Q_BLOCK_SIZE, KV_BLOCK_SIZE)) self.assertEqual(block_mask.shape, (B, H, Q_LEN, KV_LEN)) @supported_platform def test_getitem(self, device): offset = torch.zeros(8, device=device) def causal_mask(b, h, q, kv): return (q + (offset[b] * 128)) >= kv block_mask = create_block_mask(causal_mask, 4, 2, 512, 512, device=device) assert block_mask.kv_num_blocks.shape == (4, 2, 4) assert block_mask.kv_indices.shape == (4, 2, 4, 4) # Index on batch dimension new_block_mask = block_mask[0] assert new_block_mask.kv_num_blocks.shape == (1, 2, 4) assert new_block_mask.kv_indices.shape == (1, 2, 4, 4) # Index on batch and head dimension new_block_mask = block_mask[0, 1] assert new_block_mask.kv_num_blocks.shape == ( 1, 1, 4, ) assert new_block_mask.kv_indices.shape == (1, 1, 4, 4) # Index on batch and head dimension with -1 semantics new_block_mask = block_mask[-1, -2] assert new_block_mask.kv_num_blocks.shape == ( 1, 1, 4, ) assert new_block_mask.kv_indices.shape == (1, 1, 4, 4) # slicing on batch and head dimension new_block_mask = block_mask[0:2, 1:2] assert new_block_mask.kv_num_blocks.shape == (2, 1, 4) assert new_block_mask.kv_indices.shape == (2, 1, 4, 4) # slicing on batch, head, and query dimension new_block_mask = block_mask[0:2, 1:2, torch.tensor([1], dtype=torch.int32)] assert new_block_mask.kv_num_blocks.shape == (2, 1, 1) assert new_block_mask.kv_indices.shape == (2, 1, 1, 4) # slicing on batch, head, and query dimension q_index = torch.tensor([0], dtype=torch.int32) new_block_mask = block_mask[:, :, q_index] self.assertEqual(new_block_mask.kv_num_blocks.ndim, 3) self.assertEqual(new_block_mask.kv_indices.ndim, 4) torch.testing.assert_close( new_block_mask.kv_num_blocks, block_mask.kv_num_blocks[:, :, q_index], ) torch.testing.assert_close( new_block_mask.kv_indices, block_mask.kv_indices[:, :, q_index, :] ) if block_mask.full_kv_num_blocks is not None: assert new_block_mask.full_kv_num_blocks is not None assert new_block_mask.full_kv_indices is not None torch.testing.assert_close( new_block_mask.full_kv_num_blocks, block_mask.full_kv_num_blocks[:, :, q_index], ) torch.testing.assert_close( new_block_mask.full_kv_indices, block_mask.full_kv_indices[:, :, q_index, :], ) @supported_platform def test_sliced_blockmask_mask_mod_error(self, device): """Test that sliced BlockMask raises helpful error when used with flex_attention""" def causal_mask(b, h, q_idx, kv_idx): return q_idx >= kv_idx base_mask = create_block_mask( causal_mask, B=1, H=1, Q_LEN=256, KV_LEN=256, device=device ) sliced_mask = base_mask[:, :, 0] q = torch.randn(1, 1, 1, 64, device=device) k = torch.randn(1, 1, 256, 64, device=device) v = torch.randn(1, 1, 256, 64, device=device) compiled_fa = torch.compile(flex_attention) with self.assertRaisesRegex( RuntimeError, "Cannot use mask_mod from a sliced BlockMask" ): compiled_fa(q, k, v, block_mask=sliced_mask) @supported_platform def test_block_mask_device_change(self, device): device = torch.device(device) offset = torch.zeros(8, device=device) def causal_mask(b, h, q, kv): return (q + (offset[b] * 128)) >= kv block_mask = create_block_mask(causal_mask, 1, 1, 512, 512, device=device) assert block_mask.kv_indices.device.type == device.type assert block_mask.kv_num_blocks.device.type == device.type assert block_mask.q_indices.device.type == device.type assert block_mask.q_num_blocks.device.type == device.type block_mask = block_mask.to("cpu") assert block_mask.kv_indices.is_cpu assert block_mask.kv_num_blocks.is_cpu assert block_mask.q_indices.is_cpu assert block_mask.q_num_blocks.is_cpu block_mask = block_mask.to(device) assert block_mask.kv_indices.device.type == device.type assert block_mask.kv_num_blocks.device.type == device.type assert block_mask.q_indices.device.type == device.type assert block_mask.q_num_blocks.device.type == device.type @supported_platform def test_compiling_create_block_mask(self, device): seq = torch.arange(512, device=device) // 127 def mask_mod(b, h, q, kv): return (q >= kv) & (seq[q] == seq[kv]) block_mask = torch.compile(create_block_mask, fullgraph=True)( mask_mod, 1, 1, 512, 512, device=device ) self.assertIsInstance(block_mask, BlockMask) self.assertEqual(block_mask.kv_num_blocks.shape, torch.Size((1, 1, 4))) self.assertEqual(block_mask.kv_indices.shape, torch.Size((1, 1, 4, 4))) @supported_platform def test_compiling_create_block_mask_no_recompile(self, device): def mask_mod(b, h, q, kv): return q >= kv torch._dynamo.reset() block_mask = torch.compile(create_block_mask)( mask_mod, 2, 4, 1024, 1024, device=device ) self.assertIsInstance(block_mask, BlockMask) self.assertEqual(block_mask.kv_num_blocks.shape, torch.Size((2, 4, 8))) self.assertEqual(block_mask.kv_indices.shape, torch.Size((2, 4, 8, 8))) self.assertEqual(torch._dynamo.utils.counters["aot_autograd"]["ok"], 1) # automatic dynamic shapes triggered and recompilation. block_mask = torch.compile(create_block_mask)( mask_mod, 4, 8, 2048, 2048, device=device ) self.assertIsInstance(block_mask, BlockMask) self.assertEqual(block_mask.kv_num_blocks.shape, torch.Size((4, 8, 16))) self.assertEqual(block_mask.kv_indices.shape, torch.Size((4, 8, 16, 16))) self.assertEqual(torch._dynamo.utils.counters["aot_autograd"]["ok"], 2) # no recompilation. block_mask = torch.compile(create_block_mask)( mask_mod, 6, 16, 3072, 3072, device=device ) self.assertIsInstance(block_mask, BlockMask) self.assertEqual(block_mask.kv_num_blocks.shape, torch.Size((6, 16, 24))) self.assertEqual(block_mask.kv_indices.shape, torch.Size((6, 16, 24, 24))) self.assertEqual(torch._dynamo.utils.counters["aot_autograd"]["ok"], 2) @supported_platform def test_block_mask_viz(self, device): def causal_mask(b, h, q, kv): return q >= kv block_mask = create_block_mask(causal_mask, 1, 1, 2048, 2048, device=device) def replace_non_printable(s): def replace(c): if c not in string.printable: return "@" elif c == " ": return "s" return c return "".join(replace(c) for c in s) self.assertExpectedInline( replace_non_printable(str(block_mask)), """\ BlockMask(shape=(1,s1,s2048,s2048),ssparsity=46.88%,s (0,s0) @@ssssssssssssssssssssssssssssss @@@@ssssssssssssssssssssssssssss @@@@@@ssssssssssssssssssssssssss @@@@@@@@ssssssssssssssssssssssss @@@@@@@@@@ssssssssssssssssssssss @@@@@@@@@@@@ssssssssssssssssssss @@@@@@@@@@@@@@ssssssssssssssssss @@@@@@@@@@@@@@@@ssssssssssssssss @@@@@@@@@@@@@@@@@@ssssssssssssss @@@@@@@@@@@@@@@@@@@@ssssssssssss @@@@@@@@@@@@@@@@@@@@@@ssssssssss @@@@@@@@@@@@@@@@@@@@@@@@ssssssss @@@@@@@@@@@@@@@@@@@@@@@@@@ssssss @@@@@@@@@@@@@@@@@@@@@@@@@@@@ssss @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ss @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ )""", ) offset = torch.arange(8, device=device) def causal_offset_mask(b, h, q, kv): return (q + offset[b] * 128) >= kv block_mask = create_block_mask( causal_offset_mask, 8, 1, 2048, 2048, device=device ) str_block_mask = str(block_mask) self.assertTrue("sparsity=29.10" in str_block_mask) def generate_test_inputs(self, full_seq_len: bool, device): if full_seq_len: kv_num_blocks = torch.tensor([1], dtype=torch.int32, device=device).view( 1, 1, 1 ) kv_indices = torch.tensor([1, -1], dtype=torch.int32, device=device).view( 1, 1, 1, 2 ) full_kv_num_blocks = torch.tensor( [1], dtype=torch.int32, device=device ).view(1, 1, 1) full_kv_indices = torch.tensor( [0, -1], dtype=torch.int32, device=device ).view(1, 1, 1, 2) else: kv_num_blocks = torch.tensor([2], dtype=torch.int32, device=device).view( 1, 1, 1 ) kv_indices = torch.tensor([0, 1], dtype=torch.int32, device=device).view( 1, 1, 1, 2 ) full_kv_indices = None full_kv_num_blocks = None return kv_num_blocks, kv_indices, full_kv_num_blocks, full_kv_indices @supported_platform @common_utils.parametrize("full_indices", [False, True]) def test_from_kv_blocks(self, device, full_indices: bool): ( kv_num_blocks, kv_indices, full_kv_num_blocks, full_kv_indices, ) = self.generate_test_inputs(full_indices, device=device) block_mask = BlockMask.from_kv_blocks( kv_num_blocks, kv_indices, full_kv_num_blocks, full_kv_indices ) self.assertIsInstance(block_mask, BlockMask) torch.testing.assert_close(block_mask.kv_num_blocks, kv_num_blocks) torch.testing.assert_close(block_mask.kv_indices, kv_indices) if full_indices: torch.testing.assert_close( block_mask.full_kv_num_blocks, full_kv_num_blocks ) torch.testing.assert_close(block_mask.full_kv_indices, full_kv_indices) torch.testing.assert_close( block_mask.q_num_blocks, torch.tensor([0, 1], dtype=torch.int32, device=device).view(1, 1, 2), ) torch.testing.assert_close( block_mask.q_indices, torch.tensor([0, 0], dtype=torch.int32, device=device).view(1, 1, 2, 1), ) torch.testing.assert_close( block_mask.full_q_num_blocks, torch.tensor([1, 0], dtype=torch.int32, device=device).view(1, 1, 2), ) torch.testing.assert_close( block_mask.full_q_indices, torch.tensor([0, 0], dtype=torch.int32, device=device).view(1, 1, 2, 1), ) else: torch.testing.assert_close( block_mask.q_num_blocks, torch.tensor([1, 1], dtype=torch.int32, device=device).view(1, 1, 2), ) torch.testing.assert_close( block_mask.q_indices, torch.tensor([0, 0], dtype=torch.int32, device=device).view(1, 1, 2, 1), ) self.assertIsNone(block_mask.full_kv_num_blocks) self.assertIsNone(block_mask.full_kv_indices) self.assertIsNone(block_mask.full_q_num_blocks) self.assertIsNone(block_mask.full_q_indices) @supported_platform def test_block_size(self, device): kv_num_blocks, kv_indices, _, _ = self.generate_test_inputs(False, device) block_mask = BlockMask.from_kv_blocks(kv_num_blocks, kv_indices) self.assertEqual( block_mask.BLOCK_SIZE, (_DEFAULT_SPARSE_BLOCK_SIZE, _DEFAULT_SPARSE_BLOCK_SIZE), ) custom_block_size = (64, 64) block_mask_custom = BlockMask.from_kv_blocks( kv_num_blocks, kv_indices, BLOCK_SIZE=custom_block_size ) self.assertEqual(block_mask_custom.BLOCK_SIZE, custom_block_size) @supported_platform def test_upcast_appropriately(self, device): q = torch.randn((1, 1, 128, 16), dtype=torch.float16, device=device) k = torch.randn((1, 1, 128, 16), dtype=torch.float16, device=device) v = torch.randn((1, 1, 128, 16), dtype=torch.float16, device=device) mass = torch.ones((1), dtype=torch.float16, device=device) def score_mod(score, b, h, q_idx, kv_idx): return score + torch.log(mass[0]) torch.compile(flex_attention)(q, k, v, score_mod=score_mod) @supported_platform def test_init_mismatched_full_kv(self, device): kv_num_blocks, kv_indices, full_kv_num_blocks, _ = self.generate_test_inputs( True, device ) with self.assertRaises(AssertionError): BlockMask( kv_num_blocks=kv_num_blocks, kv_indices=kv_indices, full_kv_num_blocks=full_kv_num_blocks, full_kv_indices=None, # Mismatched, should raise error q_num_blocks=kv_num_blocks, q_indices=kv_indices, full_q_num_blocks=None, full_q_indices=None, BLOCK_SIZE=(64, 64), mask_mod=noop_mask, seq_lengths=(1, 1), ) @supported_platform def test_init_mismatched_full_q(self, device): kv_num_blocks, kv_indices, _, _ = self.generate_test_inputs(False, device) with self.assertRaises(AssertionError): BlockMask( kv_num_blocks=kv_num_blocks, kv_indices=kv_indices, full_kv_num_blocks=None, full_kv_indices=None, q_num_blocks=kv_num_blocks, q_indices=kv_indices, full_q_num_blocks=kv_num_blocks, full_q_indices=None, # Mismatched, should raise error BLOCK_SIZE=(64, 64), mask_mod=noop_mask, seq_lengths=(1, 1), ) @supported_platform def test_doc_mask_clamped_repro(self, device): def _offsets_to_doc_ids_tensor(offsets): device = offsets.device counts = offsets[1:] - offsets[:-1] return torch.repeat_interleave( torch.arange(len(counts), device=device, dtype=torch.int32), counts ) def length_to_offsets( lengths: list[int], device: Union[str, torch.device] ) -> Tensor: offsets = [0] offsets.extend(lengths) offsets = torch.tensor(offsets, device=device, dtype=torch.int32) offsets = torch.cumsum(offsets, dim=-1) return offsets def generate_doc_mask_mod(offsets: Tensor) -> _mask_mod_signature: document_id = _offsets_to_doc_ids_tensor(offsets) def doc_mask_mod(b, h, q_idx, kv_idx): same_doc = document_id[q_idx] == document_id[kv_idx] return same_doc return doc_mask_mod random.seed(0) def generate_random_lengths(total_length, num_documents): lengths = [1] * num_documents remaining_length = total_length - num_documents for _ in range(remaining_length): index = random.randint(0, num_documents - 1) lengths[index] += 1 return lengths max_seq_len, doc_count = 128, 4 SEQ_LEN = max_seq_len lengths = generate_random_lengths(max_seq_len, doc_count) offsets = length_to_offsets(lengths, device) document_causal_mask = generate_doc_mask_mod(offsets) block_mask_compiled = torch.compile(create_block_mask)( document_causal_mask, 1, 1, SEQ_LEN, SEQ_LEN, device=device, ) block_mask = torch.compile(create_block_mask)( document_causal_mask, 1, 1, SEQ_LEN, SEQ_LEN, device=device, ) self.assertEqual(block_mask_compiled.kv_indices, block_mask.kv_indices) self.assertEqual( block_mask_compiled.full_kv_indices, block_mask.full_kv_indices ) for i in range(5): lengths = generate_random_lengths(1024 + i, 5) offsets = length_to_offsets(lengths, device) doc_ids = _offsets_to_doc_ids_tensor(offsets) def doc_mask_mod(b, h, q_idx, kv_idx): return ( doc_ids[q_idx.clamp(0, doc_ids.shape[0] - 1)] == doc_ids[kv_idx.clamp(0, doc_ids.shape[0] - 1)] ) q, k, v = ( torch.randn(1, 12, 1024 + i, 64, device=device) for _ in range(3) ) block_mask = create_block_mask( doc_mask_mod, None, None, 1024 + i, 1024 + i, device=device ) torch.compile(flex_attention)(q, k, v, block_mask=block_mask) @supported_platform def test_eager_tracing_correctness(self, device): qk_dims = 64 v_dims = 128 q_heads = 4 kv_heads = 2 seq_len = 256 batch_size = 1 make_tensor = functools.partial(torch.randn, device=device, dtype=torch.float16) q = make_tensor(*(batch_size, q_heads, seq_len, qk_dims)) k = make_tensor(*(batch_size, kv_heads, seq_len, qk_dims)) v = make_tensor(*(batch_size, kv_heads, seq_len, v_dims)) def flex_attention_fn(): out = flex_attention(q, k, v, enable_gqa=True) return out.view(batch_size, q_heads, seq_len, 2, 64) # Run with compilation compiled_fn = torch.compile(flex_attention_fn, fullgraph=True) result = compiled_fn() # Assert expected output shape expected_shape = (batch_size, q_heads, seq_len, 2, 64) self.assertEqual( result.shape, expected_shape, f"Expected output shape {expected_shape}, but got {result.shape}", ) @supported_platform @skip_on_xpu def test_create_is_cuda_graphable(self, device): def mask_mod(b, h, q, kv): return q >= kv g = torch.cuda.CUDAGraph() with torch.cuda.graph(g): create_block_mask(mask_mod, None, None, 256, 256) g.replay() @common_utils.parametrize("compile", [False, True]) @supported_platform def test_block_mask_vs_sequence_lengths(self, device, compile): if compile: flex_attention_call = torch.compile(flex_attention) else: flex_attention_call = flex_attention def mask_mod(b, h, q_idx, kv_idx): return q_idx >= kv_idx def create_inputs(S): q, k, v = ( torch.randn( 1, 8, S, 64, dtype=torch.float16, requires_grad=True, device=device ) for _ in range(3) ) return q, k, v block_mask = create_block_mask(mask_mod, None, None, 1024, 1024, device=device) flex_attention_call(*create_inputs(1024), block_mask=block_mask) with self.assertRaisesRegex(ValueError, "block_mask was created for"): flex_attention_call(*create_inputs(2048), block_mask=block_mask) block_mask = create_block_mask(mask_mod, None, None, 1023, 1023, device=device) with self.assertRaisesRegex(ValueError, "block_mask was created for"): flex_attention_call(*create_inputs(1024), block_mask=block_mask) @supported_platform @common_utils.parametrize("full_indices", [False, True]) def test_from_kv_blocks_without_q_computation(self, device, full_indices: bool): ( kv_num_blocks, kv_indices, full_kv_num_blocks, full_kv_indices, ) = self.generate_test_inputs(full_indices, device=device) block_mask = BlockMask.from_kv_blocks( kv_num_blocks, kv_indices, full_kv_num_blocks, full_kv_indices, compute_q_blocks=False, ) self.assertIsInstance(block_mask, BlockMask) self.assertEqual(block_mask.kv_num_blocks, kv_num_blocks) self.assertEqual(block_mask.kv_indices, kv_indices) self.assertIsNone(block_mask.q_num_blocks) self.assertIsNone(block_mask.q_indices) self.assertIsNone(block_mask.full_q_num_blocks) self.assertIsNone(block_mask.full_q_indices) if full_indices: self.assertEqual(block_mask.full_kv_num_blocks, full_kv_num_blocks) self.assertEqual(block_mask.full_kv_indices, full_kv_indices) else: self.assertIsNone(block_mask.full_kv_num_blocks) self.assertIsNone(block_mask.full_kv_indices) @supported_platform @skip_on_cpu def test_backward_error_with_none_q_indices(self, device): N_BLOCKS = 4 B, H, S, D = 1, 1, 128, 64 S_KV = N_BLOCKS * S kv_num_blocks = torch.tensor([[[N_BLOCKS]]], dtype=torch.int32, device=device) kv_indices = torch.tensor([[[[0, 1, 2, 3]]]], dtype=torch.int32, device=device) block_mask = BlockMask.from_kv_blocks( kv_num_blocks, kv_indices, compute_q_blocks=False ) q = torch.randn( B, H, S, D, dtype=torch.float16, device=device, requires_grad=True ) k = torch.randn( B, H, S_KV, D, dtype=torch.float16, device=device, requires_grad=True ) v = torch.randn( B, H, S_KV, D, dtype=torch.float16, device=device, requires_grad=True ) flex_compile = torch.compile(flex_attention, fullgraph=True) with torch.no_grad(): out_no_grad = flex_compile(q, k, v, block_mask=block_mask) self.assertEqual(out_no_grad.shape, (B, H, S, D)) # Forward pass with grad enabled should error immediately with self.assertRaisesRegex( RuntimeError, "BlockMask q_indices is None. Backward pass requires q_indices to be computed. " "Please create the BlockMask with compute_q_blocks=True", ): flex_compile(q, k, v, block_mask=block_mask) @supported_platform @skip_on_cpu def test_flex_attention_poisoned_rel_logits(self, device): B = 1 H = 1 S = 1025 D = 64 q, k, v = [ torch.randn(B, H, S, D, requires_grad=True, device=device) for _ in range(3) ] rel_logits = torch.randn(2 * B, H, S, S, device=device) rel_logits[B:] = float("nan") def score_mod(score, b, h, q, kv): return score + rel_logits[b, h, q, kv] def causal( b: torch.Tensor, h: torch.Tensor, q: torch.Tensor, kv: torch.Tensor ) -> torch.Tensor: return q >= kv block_mask = create_block_mask(causal, B, H, S, S, device=device) out = torch.compile(flex_attention)( q, k, v, score_mod=score_mod, block_mask=block_mask ) out.sum().backward() assert out.isfinite().all().item() assert q.grad.isfinite().all().item() assert k.grad.isfinite().all().item() assert v.grad.isfinite().all().item() @supported_platform @skip_on_cpu def test_flex_attention_poison_mod_fwd(self, device): """Div by score should cause our edge case handiling to NaN""" B = 1 H = 1 S = 257 D = 16 q, k, v = [ torch.randn(B, H, S, D, requires_grad=True, device=device) for _ in range(3) ] def score_mod(score, b, h, q, kv): return 1 / score def causal( b: torch.Tensor, h: torch.Tensor, q: torch.Tensor, kv: torch.Tensor ) -> torch.Tensor: return q >= kv block_mask = create_block_mask(causal, B, H, S, S, device=device) out = torch.compile(flex_attention, backend="inductor")( q, k, v, score_mod=score_mod, block_mask=block_mask ) out.sum().backward() assert out.isfinite().all().item() assert q.grad.isfinite().all().item() # assert k.grad.isfinite().all().item() assert v.grad.isfinite().all().item() @supported_platform @skip_on_cpu def test_flex_attention_poison_mod_bwd(self, device): """log score should cause our edge case handiling for NaN in grad score""" B = 1 H = 1 S = 257 D = 16 q, k, v = [ torch.randn(B, H, S, D, requires_grad=True, device=device) for _ in range(3) ] def score_mod(score, b, h, q, kv): return torch.where(score > 0, torch.log(score), score) def causal( b: torch.Tensor, h: torch.Tensor, q: torch.Tensor, kv: torch.Tensor ) -> torch.Tensor: return q >= kv block_mask = create_block_mask(causal, B, H, S, S, device=device) out = torch.compile(flex_attention, backend="inductor")( q, k, v, score_mod=score_mod, block_mask=block_mask ) out.sum().backward() assert out.isfinite().all().item() assert q.grad.isfinite().all().item() # assert k.grad.isfinite().all().item() assert v.grad.isfinite().all().item() @supported_platform @skip_on_cpu def test_forward_pass_with_none_q_indices(self, device): N_BLOCKS = 4 B, H, S, D = 1, 1, 128, 64 S_KV = N_BLOCKS * S kv_num_blocks = torch.tensor([[[N_BLOCKS]]], dtype=torch.int32, device=device) kv_indices = torch.tensor([[[[0, 1, 2, 3]]]], dtype=torch.int32, device=device) block_mask = BlockMask.from_kv_blocks( kv_num_blocks, kv_indices, compute_q_blocks=False ) q = torch.randn( B, H, S, D, dtype=torch.float16, device=device, ) k = torch.randn( B, H, S_KV, D, dtype=torch.float16, device=device, ) v = torch.randn( B, H, S_KV, D, dtype=torch.float16, device=device, ) flex_compile = torch.compile(flex_attention, fullgraph=True) out = flex_compile(q, k, v, block_mask=block_mask) self.assertEqual(out.shape, (B, H, S, D)) self.assertIsInstance(out, torch.Tensor) self.assertEqual(out.dtype, torch.float16) @supported_platform def test_block_mask_operations_with_none_q_indices(self, device): kv_num_blocks = torch.tensor([[[4]]], dtype=torch.int32, device=device) kv_indices = torch.tensor([[[[0, 1, 2, 3]]]], dtype=torch.int32, device=device) block_mask = BlockMask.from_kv_blocks( kv_num_blocks, kv_indices, compute_q_blocks=False ) self.assertEqual(block_mask.shape, (1, 1, 128, 512)) self.assertEqual(block_mask.BLOCK_SIZE, (128, 128)) sliced_mask = block_mask[0] self.assertEqual(sliced_mask.shape, (1, 1, 128, 512)) self.assertIsNone(sliced_mask.q_indices) self.assertIsNone(sliced_mask.q_num_blocks) # Test device movement if device != "cpu": cpu_mask = block_mask.to("cpu") self.assertEqual(cpu_mask.kv_num_blocks.device.type, "cpu") self.assertIsNone(cpu_mask.q_indices) @supported_platform @skip_on_cpu def test_broadcasted_head_block_mask(self, device): torch.manual_seed(42) def causal_mask(b, h, q_idx, kv_idx): return q_idx >= kv_idx def get_mask_mod_with_offset(mask_mod, offset_tensor): def _mask_mod(b, h, q, kv): return mask_mod(b, h, q + offset_tensor, kv) return _mask_mod B, T, H, D, current_pos = 4, 512, 8, 64, 128 dtype = torch.float32 q = torch.randn(B, H, 1, D, device=device, dtype=dtype) k_cache = torch.randn(B, H, T, D, device=device, dtype=dtype) v_cache = torch.randn(B, H, T, D, device=device, dtype=dtype) # Keep future tokens tiny to avoid numerical issues when using full caches k_cache[:, :, current_pos + 1 :, :] = ( torch.randn_like(k_cache[:, :, current_pos + 1 :, :]) * 1e-10 ) v_cache[:, :, current_pos + 1 :, :] = ( torch.randn_like(v_cache[:, :, current_pos + 1 :, :]) * 1e-10 ) k_cropped = k_cache[:, :, : current_pos + 1, :] v_cropped = v_cache[:, :, : current_pos + 1, :] sdpa_output = torch.nn.functional.scaled_dot_product_attention( q, k_cropped, v_cropped, attn_mask=None ) base_mask = create_block_mask( causal_mask, B=B, H=None, # broadcast across heads Q_LEN=T, KV_LEN=T, device=device, _compile=True, ) q_block_size = base_mask.BLOCK_SIZE[0] block_offset = current_pos // q_block_size mask_slice = base_mask[:, :, block_offset] offset_tensor = torch.tensor(current_pos, device=device) mask_slice.mask_mod = get_mask_mod_with_offset( base_mask.mask_mod, offset_tensor ) mask_slice.seq_lengths = (1, mask_slice.seq_lengths[1]) fa = torch.compile(flex_attention, dynamic=True) flex_output = fa(q, k_cache, v_cache, block_mask=mask_slice) self.assertEqual(flex_output, sdpa_output, atol=1e-3, rtol=1e-3) @supported_platform def test_pytree_flatten_unflatten(self, device): """Test that BlockMask can be correctly flattened and unflattened using class methods.""" def causal_mask(b, h, q_idx, kv_idx): return q_idx >= kv_idx # Create a BlockMask with various attributes set block_mask = create_block_mask( causal_mask, B=2, H=4, Q_LEN=512, KV_LEN=512, device=device ) # Flatten and unflatten using class methods tensors, context = block_mask._flatten() reconstructed_mask = BlockMask._unflatten(tensors, context) # Verify the reconstructed mask has the same attributes self.assertEqual(reconstructed_mask.shape, block_mask.shape) self.assertEqual(reconstructed_mask.sparsity(), block_mask.sparsity()) # Verify all tensor attributes are equal (using _TENSOR_ATTRS) for attr_name in BlockMask._TENSOR_ATTRS: original_value = getattr(block_mask, attr_name) reconstructed_value = getattr(reconstructed_mask, attr_name) if original_value is None: self.assertIsNone( reconstructed_value, f"Tensor attribute {attr_name} should be None but got {reconstructed_value}", ) else: self.assertIsInstance( original_value, torch.Tensor, f"Expected {attr_name} to be a Tensor", ) self.assertTrue( torch.equal(original_value, reconstructed_value), f"Tensor attribute {attr_name} not equal after reconstruction", ) # Verify all context attributes are equal (using _CONTEXT_ATTRS) for attr_name in BlockMask._CONTEXT_ATTRS: original_value = getattr(block_mask, attr_name) reconstructed_value = getattr(reconstructed_mask, attr_name) self.assertEqual( original_value, reconstructed_value, f"Context attribute {attr_name} not equal after reconstruction", ) @supported_platform def test_pytree_flatten_with_keys(self, device): """Test that BlockMask._flatten_with_keys works correctly for tracing.""" def causal_mask(b, h, q_idx, kv_idx): return q_idx >= kv_idx block_mask = create_block_mask( causal_mask, B=2, H=4, Q_LEN=512, KV_LEN=512, device=device ) tensors_with_keys, context_with_keys = block_mask._flatten_with_keys() self.assertEqual(len(tensors_with_keys), len(BlockMask._TENSOR_ATTRS)) self.assertEqual(len(context_with_keys), len(BlockMask._CONTEXT_ATTRS)) from torch.utils._pytree import GetAttrKey for key, _tensor in tensors_with_keys: self.assertIsInstance(key, GetAttrKey) self.assertIsNotNone(key) for key, _value in context_with_keys: self.assertIsInstance(key, GetAttrKey) self.assertIsNotNone(key) @supported_platform def test_pytree_preserves_new_attributes(self, device): """ Test that BlockMask._TENSOR_ATTRS and _CONTEXT_ATTRS are correctly defined and that flatten/unflatten preserves all attributes in these lists. """ def causal_mask(b, h, q_idx, kv_idx): return q_idx >= kv_idx block_mask = create_block_mask( causal_mask, B=2, H=4, Q_LEN=512, KV_LEN=512, device=device ) # Flatten and unflatten using class methods tensors, context = block_mask._flatten() reconstructed_mask = BlockMask._unflatten(tensors, context) # Verify the number of tensors and context values matches the attribute lists self.assertEqual( len(tensors), len(BlockMask._TENSOR_ATTRS), "Number of tensors should match _TENSOR_ATTRS length", ) self.assertEqual( len(context), len(BlockMask._CONTEXT_ATTRS), "Number of context values should match _CONTEXT_ATTRS length", ) # Verify all attributes from the lists exist and are equal after reconstruction for attr_name in BlockMask._TENSOR_ATTRS + BlockMask._CONTEXT_ATTRS: self.assertTrue( hasattr(reconstructed_mask, attr_name), f"Reconstructed mask missing attribute: {attr_name}", ) original_value = getattr(block_mask, attr_name) reconstructed_value = getattr(reconstructed_mask, attr_name) if isinstance(original_value, torch.Tensor): self.assertTrue( torch.equal(original_value, reconstructed_value), f"Tensor attribute {attr_name} not equal after reconstruction", ) elif original_value is None: self.assertIsNone( reconstructed_value, f"Attribute {attr_name} should be None but got {reconstructed_value}", ) else: self.assertEqual( original_value, reconstructed_value, f"Attribute {attr_name} not equal after reconstruction", ) @large_tensor_test_class("2GB", device=test_device[0])
TestBlockMask
python
pytorch__pytorch
torch/_inductor/compile_fx.py
{ "start": 25565, "end": 25991 }
class ____(TypedDict, total=False): cudagraphs: Optional[BoxedBool] static_input_idxs: Sequence[int] is_backward: bool graph_id: Optional[int] cpp_wrapper: bool aot_mode: bool is_inference: bool layout_opt: Optional[bool] extern_node_serializer: Optional[Callable[[list[ExternKernelNode]], Any]] boxed_forward_device_index: Optional[BoxedDeviceIndex] fx_wrapper: bool
_CompileFxKwargs
python
huggingface__transformers
src/transformers/modeling_outputs.py
{ "start": 24743, "end": 27679 }
class ____(ModelOutput): """ Base class for causal language model (or autoregressive) with mixture of experts outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): aux_loss for the sparse modules. router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary loss for Mixture of Experts models. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None aux_loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None router_logits: Optional[tuple[torch.FloatTensor]] = None @dataclass
MoeCausalLMOutputWithPast
python
numpy__numpy
numpy/f2py/tests/test_crackfortran.py
{ "start": 9939, "end": 10261 }
class ____(util.F2PyTest): def test_eval_scalar(self): eval_scalar = crackfortran._eval_scalar assert eval_scalar('123', {}) == '123' assert eval_scalar('12 + 3', {}) == '15' assert eval_scalar('a + b', {"a": 1, "b": 2}) == '3' assert eval_scalar('"123"', {}) == "'123'"
TestEval
python
openai__openai-python
src/openai/types/realtime/realtime_session_create_response.py
{ "start": 12589, "end": 13194 }
class ____(BaseModel): group_id: Optional[str] = None """ The group id to attach to this trace to enable filtering and grouping in the Traces Dashboard. """ metadata: Optional[object] = None """ The arbitrary metadata to attach to this trace to enable filtering in the Traces Dashboard. """ workflow_name: Optional[str] = None """The name of the workflow to attach to this trace. This is used to name the trace in the Traces Dashboard. """ Tracing: TypeAlias = Union[Literal["auto"], TracingTracingConfiguration, None]
TracingTracingConfiguration
python
ansible__ansible
test/lib/ansible_test/_internal/debugging.py
{ "start": 3709, "end": 7813 }
class ____(DebuggerSettings): """Settings for the pydevd debugger.""" package: str | None = None """ The Python package to install for debugging. If `None` then the package will be auto-detected. If an empty string, then no package will be installed. """ module: str | None = None """ The Python module to import for debugging. This should be pydevd or a derivative. If not provided it will be auto-detected. """ settrace: dict[str, object] = dataclasses.field(default_factory=dict) """ Options to pass to the `{module}.settrace` method. Used for running AnsiballZ modules only. The `host` and `port` options will be provided by ansible-test. The `suspend` option defaults to `False`. """ args: list[str] = dataclasses.field(default_factory=list) """ Arguments to pass to `pydevd` on the command line. Used for running Ansible CLI programs only. The `--client` and `--port` options will be provided by ansible-test. """ @classmethod def is_active(cls) -> bool: return detect_pydevd_port() is not None @classmethod def apply_defaults(cls, settings: t.Self) -> t.Self: if not settings.module: if not settings.package or 'pydevd-pycharm' in settings.package: module = 'pydevd_pycharm' else: module = 'pydevd' settings = dataclasses.replace(settings, module=module) if settings.package is None: if settings.module == 'pydevd_pycharm': if pycharm_version := detect_pycharm_version(): package = f'pydevd-pycharm~={pycharm_version}' else: package = None else: package = 'pydevd' settings = dataclasses.replace(settings, package=package) settings.settrace.setdefault('suspend', False) if port := detect_pydevd_port(): settings = dataclasses.replace(settings, port=port) if detect_pycharm_process(): # This only works with the default PyCharm debugger. # Using it with PyCharm's "Python Debug Server" results in hangs in Ansible workers. # Further investigation is required to understand the cause. settings = dataclasses.replace(settings, args=settings.args + ['--multiprocess']) return settings def get_python_package(self) -> str: if self.package is None and self.module == 'pydevd_pycharm': display.warning('Skipping installation of `pydevd-pycharm` since the running PyCharm version was not detected.') return self.package def activate_debugger(self, profile: DebuggerProfile) -> None: debugging_module = importlib.import_module(self.module) debugging_module.settrace(**self._get_settrace_arguments(profile)) def get_ansiballz_config(self, profile: DebuggerProfile) -> dict[str, object]: return dict( module=self.module, settrace=self._get_settrace_arguments(profile), source_mapping=profile.get_source_mapping(), ) def get_cli_arguments(self, profile: DebuggerProfile) -> list[str]: # Although `pydevd_pycharm` can be used to invoke `settrace`, it cannot be used to run the debugger on the command line. return ['-m', 'pydevd', '--client', profile.debugger_host, '--port', str(profile.debugger_port)] + self.args + ['--file'] def get_environment_variables(self, profile: DebuggerProfile) -> dict[str, str]: return dict( PATHS_FROM_ECLIPSE_TO_PYTHON=json.dumps(list(profile.get_source_mapping().items())), PYDEVD_DISABLE_FILE_VALIDATION="1", ) def _get_settrace_arguments(self, profile: DebuggerProfile) -> dict[str, object]: """Get settrace arguments for pydevd.""" return self.settrace | dict( host=profile.debugger_host, port=profile.debugger_port, ) @dataclasses.dataclass(frozen=True, kw_only=True)
PydevdSettings
python
getsentry__sentry
src/sentry/integrations/perforce/integration.py
{ "start": 8697, "end": 10756 }
class ____(IntegrationProvider): """Provider for Perforce integration.""" key = "perforce" name = "Perforce" metadata = metadata integration_cls = PerforceIntegration features = frozenset( [ IntegrationFeatures.STACKTRACE_LINK, IntegrationFeatures.COMMITS, ] ) requires_feature_flag = True def get_pipeline_views(self) -> Sequence[PipelineView]: """Get pipeline views for installation flow.""" return [PerforceInstallationView()] def build_integration(self, state: Mapping[str, Any]) -> IntegrationData: """ Build integration data from installation state. Args: state: Installation state from pipeline Returns: Integration data dictionary """ # Use p4port if available, otherwise fall back to host:port for legacy p4port = ( state.get("p4port") or f"{state.get('host', 'localhost')}:{state.get('port', '1666')}" ) return { "name": state.get("name", f"Perforce ({p4port})"), "external_id": p4port, "metadata": { "p4port": p4port, "user": state.get("user"), "password": state.get("password"), "client": state.get("client"), "ssl_fingerprint": state.get("ssl_fingerprint"), "web_url": state.get("web_url"), }, } def post_install( self, integration: Integration, organization: RpcOrganization, *, extra: dict[str, Any], ) -> None: """Actions after installation.""" pass def setup(self) -> None: """Setup integration provider.""" from sentry.plugins.base import bindings from .repository import PerforceRepositoryProvider bindings.add( "integration-repository.provider", PerforceRepositoryProvider, id="integrations:perforce", )
PerforceIntegrationProvider
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/triggers/redshift_cluster.py
{ "start": 6906, "end": 8176 }
class ____(AwsBaseWaiterTrigger): """ Trigger for RedshiftDeleteClusterOperator. :param cluster_identifier: A unique identifier for the cluster. :param waiter_max_attempts: The maximum number of attempts to be made. :param aws_conn_id: The Airflow connection used for AWS credentials. :param waiter_delay: The amount of time in seconds to wait between attempts. """ def __init__( self, cluster_identifier: str, aws_conn_id: str | None = "aws_default", waiter_delay: int = 30, waiter_max_attempts: int = 30, ): super().__init__( serialized_fields={"cluster_identifier": cluster_identifier}, waiter_name="cluster_deleted", waiter_args={"ClusterIdentifier": cluster_identifier}, failure_message="Delete Cluster Failed", status_message="Redshift Cluster deletion in progress", status_queries=["Clusters[].ClusterStatus"], return_value=None, waiter_delay=waiter_delay, waiter_max_attempts=waiter_max_attempts, aws_conn_id=aws_conn_id, ) def hook(self) -> AwsGenericHook: return RedshiftHook(aws_conn_id=self.aws_conn_id)
RedshiftDeleteClusterTrigger
python
redis__redis-py
redis/asyncio/client.py
{ "start": 32218, "end": 49892 }
class ____: """ PubSub provides publish, subscribe and listen support to Redis channels. After subscribing to one or more channels, the listen() method will block until a message arrives on one of the subscribed channels. That message will be returned and it's safe to start listening again. """ PUBLISH_MESSAGE_TYPES = ("message", "pmessage") UNSUBSCRIBE_MESSAGE_TYPES = ("unsubscribe", "punsubscribe") HEALTH_CHECK_MESSAGE = "redis-py-health-check" def __init__( self, connection_pool: ConnectionPool, shard_hint: Optional[str] = None, ignore_subscribe_messages: bool = False, encoder=None, push_handler_func: Optional[Callable] = None, event_dispatcher: Optional["EventDispatcher"] = None, ): if event_dispatcher is None: self._event_dispatcher = EventDispatcher() else: self._event_dispatcher = event_dispatcher self.connection_pool = connection_pool self.shard_hint = shard_hint self.ignore_subscribe_messages = ignore_subscribe_messages self.connection = None # we need to know the encoding options for this connection in order # to lookup channel and pattern names for callback handlers. self.encoder = encoder self.push_handler_func = push_handler_func if self.encoder is None: self.encoder = self.connection_pool.get_encoder() if self.encoder.decode_responses: self.health_check_response = [ ["pong", self.HEALTH_CHECK_MESSAGE], self.HEALTH_CHECK_MESSAGE, ] else: self.health_check_response = [ [b"pong", self.encoder.encode(self.HEALTH_CHECK_MESSAGE)], self.encoder.encode(self.HEALTH_CHECK_MESSAGE), ] if self.push_handler_func is None: _set_info_logger() self.channels = {} self.pending_unsubscribe_channels = set() self.patterns = {} self.pending_unsubscribe_patterns = set() self._lock = asyncio.Lock() async def __aenter__(self): return self async def __aexit__(self, exc_type, exc_value, traceback): await self.aclose() def __del__(self): if self.connection: self.connection.deregister_connect_callback(self.on_connect) async def aclose(self): # In case a connection property does not yet exist # (due to a crash earlier in the Redis() constructor), return # immediately as there is nothing to clean-up. if not hasattr(self, "connection"): return async with self._lock: if self.connection: await self.connection.disconnect() self.connection.deregister_connect_callback(self.on_connect) await self.connection_pool.release(self.connection) self.connection = None self.channels = {} self.pending_unsubscribe_channels = set() self.patterns = {} self.pending_unsubscribe_patterns = set() @deprecated_function(version="5.0.1", reason="Use aclose() instead", name="close") async def close(self) -> None: """Alias for aclose(), for backwards compatibility""" await self.aclose() @deprecated_function(version="5.0.1", reason="Use aclose() instead", name="reset") async def reset(self) -> None: """Alias for aclose(), for backwards compatibility""" await self.aclose() async def on_connect(self, connection: Connection): """Re-subscribe to any channels and patterns previously subscribed to""" # NOTE: for python3, we can't pass bytestrings as keyword arguments # so we need to decode channel/pattern names back to unicode strings # before passing them to [p]subscribe. self.pending_unsubscribe_channels.clear() self.pending_unsubscribe_patterns.clear() if self.channels: channels = {} for k, v in self.channels.items(): channels[self.encoder.decode(k, force=True)] = v await self.subscribe(**channels) if self.patterns: patterns = {} for k, v in self.patterns.items(): patterns[self.encoder.decode(k, force=True)] = v await self.psubscribe(**patterns) @property def subscribed(self): """Indicates if there are subscriptions to any channels or patterns""" return bool(self.channels or self.patterns) async def execute_command(self, *args: EncodableT): """Execute a publish/subscribe command""" # NOTE: don't parse the response in this function -- it could pull a # legitimate message off the stack if the connection is already # subscribed to one or more channels await self.connect() connection = self.connection kwargs = {"check_health": not self.subscribed} await self._execute(connection, connection.send_command, *args, **kwargs) async def connect(self): """ Ensure that the PubSub is connected """ if self.connection is None: self.connection = await self.connection_pool.get_connection() # register a callback that re-subscribes to any channels we # were listening to when we were disconnected self.connection.register_connect_callback(self.on_connect) else: await self.connection.connect() if self.push_handler_func is not None: self.connection._parser.set_pubsub_push_handler(self.push_handler_func) self._event_dispatcher.dispatch( AfterPubSubConnectionInstantiationEvent( self.connection, self.connection_pool, ClientType.ASYNC, self._lock ) ) async def _reconnect(self, conn): """ Try to reconnect """ await conn.disconnect() await conn.connect() async def _execute(self, conn, command, *args, **kwargs): """ Connect manually upon disconnection. If the Redis server is down, this will fail and raise a ConnectionError as desired. After reconnection, the ``on_connect`` callback should have been called by the # connection to resubscribe us to any channels and patterns we were previously listening to """ return await conn.retry.call_with_retry( lambda: command(*args, **kwargs), lambda _: self._reconnect(conn), ) async def parse_response(self, block: bool = True, timeout: float = 0): """Parse the response from a publish/subscribe command""" conn = self.connection if conn is None: raise RuntimeError( "pubsub connection not set: " "did you forget to call subscribe() or psubscribe()?" ) await self.check_health() if not conn.is_connected: await conn.connect() read_timeout = None if block else timeout response = await self._execute( conn, conn.read_response, timeout=read_timeout, disconnect_on_error=False, push_request=True, ) if conn.health_check_interval and response in self.health_check_response: # ignore the health check message as user might not expect it return None return response async def check_health(self): conn = self.connection if conn is None: raise RuntimeError( "pubsub connection not set: " "did you forget to call subscribe() or psubscribe()?" ) if ( conn.health_check_interval and asyncio.get_running_loop().time() > conn.next_health_check ): await conn.send_command( "PING", self.HEALTH_CHECK_MESSAGE, check_health=False ) def _normalize_keys(self, data: _NormalizeKeysT) -> _NormalizeKeysT: """ normalize channel/pattern names to be either bytes or strings based on whether responses are automatically decoded. this saves us from coercing the value for each message coming in. """ encode = self.encoder.encode decode = self.encoder.decode return {decode(encode(k)): v for k, v in data.items()} # type: ignore[return-value] # noqa: E501 async def psubscribe(self, *args: ChannelT, **kwargs: PubSubHandler): """ Subscribe to channel patterns. Patterns supplied as keyword arguments expect a pattern name as the key and a callable as the value. A pattern's callable will be invoked automatically when a message is received on that pattern rather than producing a message via ``listen()``. """ parsed_args = list_or_args((args[0],), args[1:]) if args else args new_patterns: Dict[ChannelT, PubSubHandler] = dict.fromkeys(parsed_args) # Mypy bug: https://github.com/python/mypy/issues/10970 new_patterns.update(kwargs) # type: ignore[arg-type] ret_val = await self.execute_command("PSUBSCRIBE", *new_patterns.keys()) # update the patterns dict AFTER we send the command. we don't want to # subscribe twice to these patterns, once for the command and again # for the reconnection. new_patterns = self._normalize_keys(new_patterns) self.patterns.update(new_patterns) self.pending_unsubscribe_patterns.difference_update(new_patterns) return ret_val def punsubscribe(self, *args: ChannelT) -> Awaitable: """ Unsubscribe from the supplied patterns. If empty, unsubscribe from all patterns. """ patterns: Iterable[ChannelT] if args: parsed_args = list_or_args((args[0],), args[1:]) patterns = self._normalize_keys(dict.fromkeys(parsed_args)).keys() else: parsed_args = [] patterns = self.patterns self.pending_unsubscribe_patterns.update(patterns) return self.execute_command("PUNSUBSCRIBE", *parsed_args) async def subscribe(self, *args: ChannelT, **kwargs: Callable): """ Subscribe to channels. Channels supplied as keyword arguments expect a channel name as the key and a callable as the value. A channel's callable will be invoked automatically when a message is received on that channel rather than producing a message via ``listen()`` or ``get_message()``. """ parsed_args = list_or_args((args[0],), args[1:]) if args else () new_channels = dict.fromkeys(parsed_args) # Mypy bug: https://github.com/python/mypy/issues/10970 new_channels.update(kwargs) # type: ignore[arg-type] ret_val = await self.execute_command("SUBSCRIBE", *new_channels.keys()) # update the channels dict AFTER we send the command. we don't want to # subscribe twice to these channels, once for the command and again # for the reconnection. new_channels = self._normalize_keys(new_channels) self.channels.update(new_channels) self.pending_unsubscribe_channels.difference_update(new_channels) return ret_val def unsubscribe(self, *args) -> Awaitable: """ Unsubscribe from the supplied channels. If empty, unsubscribe from all channels """ if args: parsed_args = list_or_args(args[0], args[1:]) channels = self._normalize_keys(dict.fromkeys(parsed_args)) else: parsed_args = [] channels = self.channels self.pending_unsubscribe_channels.update(channels) return self.execute_command("UNSUBSCRIBE", *parsed_args) async def listen(self) -> AsyncIterator: """Listen for messages on channels this client has been subscribed to""" while self.subscribed: response = await self.handle_message(await self.parse_response(block=True)) if response is not None: yield response async def get_message( self, ignore_subscribe_messages: bool = False, timeout: Optional[float] = 0.0 ): """ Get the next message if one is available, otherwise None. If timeout is specified, the system will wait for `timeout` seconds before returning. Timeout should be specified as a floating point number or None to wait indefinitely. """ response = await self.parse_response(block=(timeout is None), timeout=timeout) if response: return await self.handle_message(response, ignore_subscribe_messages) return None def ping(self, message=None) -> Awaitable[bool]: """ Ping the Redis server to test connectivity. Sends a PING command to the Redis server and returns True if the server responds with "PONG". """ args = ["PING", message] if message is not None else ["PING"] return self.execute_command(*args) async def handle_message(self, response, ignore_subscribe_messages=False): """ Parses a pub/sub message. If the channel or pattern was subscribed to with a message handler, the handler is invoked instead of a parsed message being returned. """ if response is None: return None if isinstance(response, bytes): response = [b"pong", response] if response != b"PONG" else [b"pong", b""] message_type = str_if_bytes(response[0]) if message_type == "pmessage": message = { "type": message_type, "pattern": response[1], "channel": response[2], "data": response[3], } elif message_type == "pong": message = { "type": message_type, "pattern": None, "channel": None, "data": response[1], } else: message = { "type": message_type, "pattern": None, "channel": response[1], "data": response[2], } # if this is an unsubscribe message, remove it from memory if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES: if message_type == "punsubscribe": pattern = response[1] if pattern in self.pending_unsubscribe_patterns: self.pending_unsubscribe_patterns.remove(pattern) self.patterns.pop(pattern, None) else: channel = response[1] if channel in self.pending_unsubscribe_channels: self.pending_unsubscribe_channels.remove(channel) self.channels.pop(channel, None) if message_type in self.PUBLISH_MESSAGE_TYPES: # if there's a message handler, invoke it if message_type == "pmessage": handler = self.patterns.get(message["pattern"], None) else: handler = self.channels.get(message["channel"], None) if handler: if inspect.iscoroutinefunction(handler): await handler(message) else: handler(message) return None elif message_type != "pong": # this is a subscribe/unsubscribe message. ignore if we don't # want them if ignore_subscribe_messages or self.ignore_subscribe_messages: return None return message async def run( self, *, exception_handler: Optional["PSWorkerThreadExcHandlerT"] = None, poll_timeout: float = 1.0, pubsub=None, ) -> None: """Process pub/sub messages using registered callbacks. This is the equivalent of :py:meth:`redis.PubSub.run_in_thread` in redis-py, but it is a coroutine. To launch it as a separate task, use ``asyncio.create_task``: >>> task = asyncio.create_task(pubsub.run()) To shut it down, use asyncio cancellation: >>> task.cancel() >>> await task """ for channel, handler in self.channels.items(): if handler is None: raise PubSubError(f"Channel: '{channel}' has no handler registered") for pattern, handler in self.patterns.items(): if handler is None: raise PubSubError(f"Pattern: '{pattern}' has no handler registered") await self.connect() while True: try: if pubsub is None: await self.get_message( ignore_subscribe_messages=True, timeout=poll_timeout ) else: await pubsub.get_message( ignore_subscribe_messages=True, timeout=poll_timeout ) except asyncio.CancelledError: raise except BaseException as e: if exception_handler is None: raise res = exception_handler(e, self) if inspect.isawaitable(res): await res # Ensure that other tasks on the event loop get a chance to run # if we didn't have to block for I/O anywhere. await asyncio.sleep(0)
PubSub
python
tiangolo__fastapi
tests/test_security_api_key_query_optional.py
{ "start": 262, "end": 2061 }
class ____(BaseModel): username: str def get_current_user(oauth_header: Optional[str] = Security(api_key)): if oauth_header is None: return None user = User(username=oauth_header) return user @app.get("/users/me") def read_current_user(current_user: Optional[User] = Depends(get_current_user)): if current_user is None: return {"msg": "Create an account first"} return current_user client = TestClient(app) def test_security_api_key(): response = client.get("/users/me?key=secret") assert response.status_code == 200, response.text assert response.json() == {"username": "secret"} def test_security_api_key_no_key(): response = client.get("/users/me") assert response.status_code == 200, response.text assert response.json() == {"msg": "Create an account first"} def test_openapi_schema(): response = client.get("/openapi.json") assert response.status_code == 200, response.text assert response.json() == { "openapi": "3.1.0", "info": {"title": "FastAPI", "version": "0.1.0"}, "paths": { "/users/me": { "get": { "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, } }, "summary": "Read Current User", "operationId": "read_current_user_users_me_get", "security": [{"APIKeyQuery": []}], } } }, "components": { "securitySchemes": { "APIKeyQuery": {"type": "apiKey", "name": "key", "in": "query"} } }, }
User
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_axis02.py
{ "start": 315, "end": 1430 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_axis02.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "column"}) chart.axis_ids = [43704320, 43706624] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.add_series({"values": "=Sheet1!$B$1:$B$5"}) chart.add_series({"values": "=Sheet1!$C$1:$C$5"}) chart.set_x_axis({"name": "XXX"}) chart.set_y_axis({"name": "YYY"}) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
keras-team__keras
keras/src/layers/convolutional/conv_transpose_test.py
{ "start": 8998, "end": 17095 }
class ____(testing.TestCase): @parameterized.parameters( { "filters": 5, "kernel_size": 2, "strides": 2, "padding": "valid", "output_padding": None, "data_format": "channels_last", "dilation_rate": 1, "input_shape": (2, 8, 4), "output_shape": (2, 16, 5), }, { "filters": 6, "kernel_size": 2, "strides": 3, "padding": "same", "output_padding": 2, "data_format": "channels_last", "dilation_rate": (1,), "input_shape": (2, 8, 4), "output_shape": (2, 23, 6), }, { "filters": 6, "kernel_size": (2,), "strides": (2,), "padding": "valid", "output_padding": None, "data_format": "channels_last", "dilation_rate": 1, "input_shape": (2, 8, 4), "output_shape": (2, 16, 6), }, ) @pytest.mark.requires_trainable_backend def test_conv1d_transpose_basic( self, filters, kernel_size, strides, padding, output_padding, data_format, dilation_rate, input_shape, output_shape, ): self.run_layer_test( layers.Conv1DTranspose, init_kwargs={ "filters": filters, "kernel_size": kernel_size, "strides": strides, "padding": padding, "output_padding": output_padding, "data_format": data_format, "dilation_rate": dilation_rate, }, input_shape=input_shape, expected_output_shape=output_shape, expected_num_trainable_weights=2, expected_num_non_trainable_weights=0, expected_num_losses=0, supports_masking=False, ) @parameterized.parameters( { "filters": 5, "kernel_size": 2, "strides": 2, "padding": "valid", "output_padding": None, "data_format": "channels_last", "dilation_rate": 1, "input_shape": (2, 8, 8, 4), "output_shape": (2, 16, 16, 5), }, { "filters": 6, "kernel_size": 2, "strides": 3, "padding": "same", "output_padding": 2, "data_format": "channels_last", "dilation_rate": (1, 1), "input_shape": (2, 8, 8, 4), "output_shape": (2, 23, 23, 6), }, { "filters": 6, "kernel_size": (2, 3), "strides": (2, 1), "padding": "valid", "output_padding": None, "data_format": "channels_first", "dilation_rate": (1, 1), "input_shape": (2, 4, 8, 8), "output_shape": (2, 6, 16, 10), }, { "filters": 2, "kernel_size": (7, 7), "strides": (16, 16), "padding": "valid", "output_padding": None, "data_format": "channels_last", "dilation_rate": (1, 1), "input_shape": (1, 14, 14, 2), "output_shape": (1, 224, 224, 2), }, ) @pytest.mark.requires_trainable_backend def test_conv2d_transpose_basic( self, filters, kernel_size, strides, padding, output_padding, data_format, dilation_rate, input_shape, output_shape, ): if ( data_format == "channels_first" and backend.backend() == "tensorflow" ): pytest.skip("channels_first unsupported on CPU with TF") self.run_layer_test( layers.Conv2DTranspose, init_kwargs={ "filters": filters, "kernel_size": kernel_size, "strides": strides, "padding": padding, "output_padding": output_padding, "data_format": data_format, "dilation_rate": dilation_rate, }, input_shape=input_shape, expected_output_shape=output_shape, expected_num_trainable_weights=2, expected_num_non_trainable_weights=0, expected_num_losses=0, supports_masking=False, ) @parameterized.parameters( { "filters": 5, "kernel_size": 2, "strides": 2, "padding": "valid", "output_padding": None, "data_format": "channels_last", "dilation_rate": 1, "input_shape": (2, 8, 8, 8, 4), "output_shape": (2, 16, 16, 16, 5), }, { "filters": 6, "kernel_size": 2, "strides": 3, "padding": "same", "output_padding": 2, "data_format": "channels_last", "dilation_rate": (1, 1, 1), "input_shape": (2, 8, 8, 8, 4), "output_shape": (2, 23, 23, 23, 6), }, { "filters": 6, "kernel_size": (2, 2, 3), "strides": (2, 1, 2), "padding": "valid", "output_padding": None, "data_format": "channels_last", "dilation_rate": (1, 1, 1), "input_shape": (2, 8, 8, 8, 4), "output_shape": (2, 16, 9, 17, 6), }, ) @pytest.mark.requires_trainable_backend def test_conv3d_transpose_basic( self, filters, kernel_size, strides, padding, output_padding, data_format, dilation_rate, input_shape, output_shape, ): self.run_layer_test( layers.Conv3DTranspose, init_kwargs={ "filters": filters, "kernel_size": kernel_size, "strides": strides, "padding": padding, "output_padding": output_padding, "data_format": data_format, "dilation_rate": dilation_rate, }, input_shape=input_shape, expected_output_shape=output_shape, expected_num_trainable_weights=2, expected_num_non_trainable_weights=0, expected_num_losses=0, supports_masking=False, ) def test_bad_init_args(self): # `filters` is not positive. with self.assertRaisesRegex( ValueError, "Invalid value for argument `filters`. Expected a " "strictly positive value. Received filters=0.", ): layers.Conv1DTranspose(filters=0, kernel_size=1) # `kernel_size` has 0. with self.assertRaisesRegex( ValueError, r"The `kernel_size` argument must be a tuple of " r"\d+ integers. Received kernel_size=\(1, 0\), including values" r" \{0\} that do not satisfy `value > 0`", ): layers.Conv2DTranspose(filters=2, kernel_size=(1, 0)) # `strides` has 0. with self.assertRaisesRegex( ValueError, r"The `strides` argument must be a tuple of \d+ " r"integers. Received strides=\(1, 0\), including values \{0\} " r"that do not satisfy `value > 0`", ): layers.Conv2DTranspose( filters=2, kernel_size=(2, 2), strides=(1, 0) ) # `dilation_rate > 1` while `strides > 1`. with self.assertRaisesRegex( ValueError, r"`strides > 1` not supported in conjunction with " r"`dilation_rate > 1`. Received: strides=\(2, 2\) and " r"dilation_rate=\(2, 1\)", ): layers.Conv2DTranspose( filters=2, kernel_size=(2, 2), strides=2, dilation_rate=(2, 1) )
ConvTransposeBasicTest
python
apache__airflow
providers/celery/src/airflow/providers/celery/executors/celery_kubernetes_executor.py
{ "start": 2000, "end": 13399 }
class ____(BaseExecutor): """ CeleryKubernetesExecutor consists of CeleryExecutor and KubernetesExecutor. It chooses an executor to use based on the queue defined on the task. When the queue is the value of ``kubernetes_queue`` in section ``[celery_kubernetes_executor]`` of the configuration (default value: `kubernetes`), KubernetesExecutor is selected to run the task, otherwise, CeleryExecutor is used. """ supports_ad_hoc_ti_run: bool = True # TODO: Remove this flag once providers depend on Airflow 3.0 supports_pickling: bool = True supports_sentry: bool = False is_local: bool = False is_single_threaded: bool = False is_production: bool = True serve_logs: bool = False callback_sink: BaseCallbackSink | None = None @cached_property @providers_configuration_loaded def kubernetes_queue(self) -> str: return conf.get("celery_kubernetes_executor", "kubernetes_queue") def __init__( self, celery_executor: CeleryExecutor | None = None, kubernetes_executor: KubernetesExecutor | None = None, ): if AIRFLOW_V_3_0_PLUS or not kubernetes_executor or not celery_executor: raise RuntimeError( f"{self.__class__.__name__} does not support Airflow 3.0+. See " "https://airflow.apache.org/docs/apache-airflow/stable/core-concepts/executor/index.html#using-multiple-executors-concurrently" " how to use multiple executors concurrently." ) super().__init__() self._job_id: int | str | None = None self.celery_executor = celery_executor self.kubernetes_executor = kubernetes_executor self.kubernetes_executor.kubernetes_queue = self.kubernetes_queue @property def _task_event_logs(self): self.celery_executor._task_event_logs += self.kubernetes_executor._task_event_logs self.kubernetes_executor._task_event_logs.clear() return self.celery_executor._task_event_logs @_task_event_logs.setter def _task_event_logs(self, value): """Not implemented for hybrid executors.""" @property def queued_tasks(self) -> dict[TaskInstanceKey, Any]: """Return queued tasks from celery and kubernetes executor.""" queued_tasks = self.celery_executor.queued_tasks.copy() queued_tasks.update(self.kubernetes_executor.queued_tasks) return queued_tasks @queued_tasks.setter def queued_tasks(self, value) -> None: """Not implemented for hybrid executors.""" @property def running(self) -> set[TaskInstanceKey]: """Return running tasks from celery and kubernetes executor.""" return self.celery_executor.running.union(self.kubernetes_executor.running) @running.setter def running(self, value) -> None: """Not implemented for hybrid executors.""" @property def job_id(self) -> int | str | None: """ Inherited attribute from BaseExecutor. Since this is not really an executor, but a wrapper of executors we implemented it as property, so we can have custom setter. """ return self._job_id @job_id.setter def job_id(self, value: int | str | None) -> None: """Expose job ID for SchedulerJob.""" self._job_id = value self.kubernetes_executor.job_id = value self.celery_executor.job_id = value def start(self) -> None: """Start celery and kubernetes executor.""" self.celery_executor.start() self.kubernetes_executor.start() @property def slots_available(self) -> int: """Number of new tasks this executor instance can accept.""" return self.celery_executor.slots_available @property def slots_occupied(self): """Number of tasks this executor instance is currently managing.""" return len(self.running) + len(self.queued_tasks) def queue_command( self, task_instance: TaskInstance, command: CommandType, priority: int = 1, queue: str | None = None, ) -> None: """Queues command via celery or kubernetes executor.""" executor = self._router(task_instance) self.log.debug("Using executor: %s for %s", executor.__class__.__name__, task_instance.key) executor.queue_command(task_instance, command, priority, queue) # type: ignore[union-attr] def queue_task_instance( self, task_instance: TaskInstance, mark_success: bool = False, ignore_all_deps: bool = False, ignore_depends_on_past: bool = False, wait_for_past_depends_before_skipping: bool = False, ignore_task_deps: bool = False, ignore_ti_state: bool = False, pool: str | None = None, cfg_path: str | None = None, **kwargs, ) -> None: """Queues task instance via celery or kubernetes executor.""" from airflow.models.taskinstance import SimpleTaskInstance # type: ignore[attr-defined] executor = self._router(SimpleTaskInstance.from_ti(task_instance)) self.log.debug( "Using executor: %s to queue_task_instance for %s", executor.__class__.__name__, task_instance.key ) # TODO: Remove this once providers depend on Airflow 3.0 if not hasattr(task_instance, "pickle_id"): del kwargs["pickle_id"] executor.queue_task_instance( # type: ignore[union-attr] task_instance=task_instance, mark_success=mark_success, ignore_all_deps=ignore_all_deps, ignore_depends_on_past=ignore_depends_on_past, wait_for_past_depends_before_skipping=wait_for_past_depends_before_skipping, ignore_task_deps=ignore_task_deps, ignore_ti_state=ignore_ti_state, pool=pool, cfg_path=cfg_path, **kwargs, ) def get_task_log(self, ti: TaskInstance, try_number: int) -> tuple[list[str], list[str]]: """Fetch task log from Kubernetes executor.""" if ti.queue == self.kubernetes_executor.kubernetes_queue: return self.kubernetes_executor.get_task_log(ti=ti, try_number=try_number) return [], [] def has_task(self, task_instance: TaskInstance) -> bool: """ Check if a task is either queued or running in either celery or kubernetes executor. :param task_instance: TaskInstance :return: True if the task is known to this executor """ return self.celery_executor.has_task(task_instance) or self.kubernetes_executor.has_task( task_instance ) def heartbeat(self) -> None: """Heartbeat sent to trigger new jobs in celery and kubernetes executor.""" self.celery_executor.heartbeat() self.kubernetes_executor.heartbeat() def get_event_buffer( self, dag_ids: list[str] | None = None ) -> dict[TaskInstanceKey, EventBufferValueType]: """ Return and flush the event buffer from celery and kubernetes executor. :param dag_ids: dag_ids to return events for, if None returns all :return: a dict of events """ cleared_events_from_celery = self.celery_executor.get_event_buffer(dag_ids) cleared_events_from_kubernetes = self.kubernetes_executor.get_event_buffer(dag_ids) return {**cleared_events_from_celery, **cleared_events_from_kubernetes} def try_adopt_task_instances(self, tis: Sequence[TaskInstance]) -> Sequence[TaskInstance]: """ Try to adopt running task instances that have been abandoned by a SchedulerJob dying. Anything that is not adopted will be cleared by the scheduler (and then become eligible for re-scheduling) :return: any TaskInstances that were unable to be adopted """ celery_tis = [ti for ti in tis if ti.queue != self.kubernetes_queue] kubernetes_tis = [ti for ti in tis if ti.queue == self.kubernetes_queue] return [ *self.celery_executor.try_adopt_task_instances(celery_tis), *self.kubernetes_executor.try_adopt_task_instances(kubernetes_tis), ] @deprecated( reason="Replaced by function `revoke_task`. Upgrade airflow core to make this go away.", category=AirflowProviderDeprecationWarning, action="ignore", # ignoring since will get warning from the nested executors ) def cleanup_stuck_queued_tasks(self, tis: list[TaskInstance]) -> list[str]: celery_tis = [ti for ti in tis if ti.queue != self.kubernetes_queue] kubernetes_tis = [ti for ti in tis if ti.queue == self.kubernetes_queue] return [ *self.celery_executor.cleanup_stuck_queued_tasks(celery_tis), *self.kubernetes_executor.cleanup_stuck_queued_tasks(kubernetes_tis), ] def revoke_task(self, *, ti: TaskInstance): if ti.queue == self.kubernetes_queue: try: self.kubernetes_executor.revoke_task(ti=ti) except NotImplementedError: self.log.warning( "Your kubernetes provider version is old. Falling back to deprecated " "function, `cleanup_stuck_queued_tasks`. You must upgrade k8s " "provider to enable 'stuck in queue' retries and stuck in queue " "event logging." ) for ti_repr in self.kubernetes_executor.cleanup_stuck_queued_tasks(tis=[ti]): self.log.info( "task stuck in queued and will be marked failed. task_instance=%s", ti_repr, ) else: self.celery_executor.revoke_task(ti=ti) def end(self) -> None: """End celery and kubernetes executor.""" self.celery_executor.end() self.kubernetes_executor.end() def terminate(self) -> None: """Terminate celery and kubernetes executor.""" self.celery_executor.terminate() self.kubernetes_executor.terminate() def _router(self, simple_task_instance: SimpleTaskInstance) -> CeleryExecutor | KubernetesExecutor: """ Return either celery_executor or kubernetes_executor. :param simple_task_instance: SimpleTaskInstance :return: celery_executor or kubernetes_executor """ if simple_task_instance.queue == self.kubernetes_queue: return self.kubernetes_executor return self.celery_executor def debug_dump(self) -> None: """Debug dump; called in response to SIGUSR2 by the scheduler.""" self.log.info("Dumping CeleryExecutor state") self.celery_executor.debug_dump() self.log.info("Dumping KubernetesExecutor state") self.kubernetes_executor.debug_dump() def send_callback(self, request: CallbackRequest) -> None: """ Send callback for execution. :param request: Callback request to be executed. """ if not self.callback_sink: raise ValueError("Callback sink is not ready.") self.callback_sink.send(request) @staticmethod def get_cli_commands() -> list: return CeleryExecutor.get_cli_commands() + KubernetesExecutor.get_cli_commands()
CeleryKubernetesExecutor
python
weaviate__weaviate-python-client
weaviate/collections/classes/config_vector_index.py
{ "start": 793, "end": 1057 }
class ____(str, Enum): """The available vector index types in Weaviate. Attributes: HNSW: Hierarchical Navigable Small World (HNSW) index. FLAT: Flat index. """ HNSW = "hnsw" FLAT = "flat" DYNAMIC = "dynamic"
VectorIndexType
python
pytorch__pytorch
torch/_inductor/pattern_matcher.py
{ "start": 38160, "end": 38516 }
class ____(PatternEntry): """ A pattern that runs a function on the FX graph """ handler: Callable[..., Any] def apply(self, match: Match, graph: torch.fx.Graph, node: torch.fx.Node) -> None: with graph.inserting_before(node): self.handler(match, *match.args, **match.kwargs) @dataclasses.dataclass
GraphPatternEntry
python
kamyu104__LeetCode-Solutions
Python/equal-row-and-column-pairs.py
{ "start": 84, "end": 437 }
class ____(object): def equalPairs(self, grid): """ :type grid: List[List[int]] :rtype: int """ cnt1 = collections.Counter(tuple(row) for row in grid) cnt2 = collections.Counter(tuple(col) for col in itertools.izip(*grid)) return sum(cnt1[k]*cnt2[k] for k in cnt1.iterkeys() if k in cnt2)
Solution
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/dataplex.py
{ "start": 138289, "end": 141414 }
class ____(DataplexCatalogBaseOperator): """ Get an AspectType resource. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:DataplexCatalogGetAspectTypeOperator` :param aspect_type_id: Required. AspectType identifier. :param project_id: Required. The ID of the Google Cloud project where the service is used. :param location: Required. The ID of the Google Cloud region where the service is used. :param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud. :param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not be retried. :param timeout: Optional. The amount of time, in seconds, to wait for the request to complete. Note that if `retry` is specified, the timeout applies to each individual attempt. :param metadata: Optional. Additional metadata that is provided to the method. :param impersonation_chain: Optional. Service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = tuple( {"aspect_type_id"} | set(DataplexCatalogBaseOperator.template_fields) ) operator_extra_links = (DataplexCatalogAspectTypeLink(),) def __init__( self, aspect_type_id: str, *args, **kwargs, ) -> None: super().__init__(*args, **kwargs) self.aspect_type_id = aspect_type_id @property def extra_links_params(self) -> dict[str, Any]: return { **super().extra_links_params, "aspect_type_id": self.aspect_type_id, } def execute(self, context: Context): DataplexCatalogAspectTypeLink.persist(context=context) self.log.info( "Retrieving Dataplex Catalog AspectType %s.", self.aspect_type_id, ) try: aspect_type = self.hook.get_aspect_type( aspect_type_id=self.aspect_type_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) except NotFound: self.log.info( "Dataplex Catalog AspectType %s not found.", self.aspect_type_id, ) raise AirflowException(NotFound) except Exception as ex: raise AirflowException(ex) return AspectType.to_dict(aspect_type)
DataplexCatalogGetAspectTypeOperator
python
ansible__ansible
test/units/mock/custom_types.py
{ "start": 998, "end": 1026 }
class ____(int): ...
CustomInt
python
dagster-io__dagster
python_modules/libraries/dagster-k8s/dagster_k8s/job.py
{ "start": 10970, "end": 43899 }
class ____( NamedTuple( "_K8sJobTaskConfig", [ ("job_image", Optional[str]), ("dagster_home", Optional[str]), ("image_pull_policy", str), ("image_pull_secrets", Sequence[Mapping[str, str]]), ("service_account_name", Optional[str]), ("instance_config_map", Optional[str]), ("postgres_password_secret", Optional[str]), ("env_config_maps", Sequence[str]), ("env_secrets", Sequence[str]), ("env_vars", Sequence[str]), ("volume_mounts", Sequence[Mapping[str, Any]]), ("volumes", Sequence[Mapping[str, Any]]), ("labels", Mapping[str, str]), ("resources", Mapping[str, Any]), ("scheduler_name", Optional[str]), ("security_context", Mapping[str, Any]), ], ) ): """Configuration parameters for launching Dagster Jobs on Kubernetes. Params: dagster_home (str): The location of DAGSTER_HOME in the Job container; this is where the ``dagster.yaml`` file will be mounted from the instance ConfigMap specified here. image_pull_policy (Optional[str]): Allows the image pull policy to be overridden, e.g. to facilitate local testing with `kind <https://kind.sigs.k8s.io/>`_. Default: ``"Always"``. See: https://kubernetes.io/docs/concepts/containers/images/#updating-images. image_pull_secrets (Optional[Sequence[Mapping[str, str]]]): Optionally, a list of dicts, each of which corresponds to a Kubernetes ``LocalObjectReference`` (e.g., ``{'name': 'myRegistryName'}``). This allows you to specify the ```imagePullSecrets`` on a pod basis. Typically, these will be provided through the service account, when needed, and you will not need to pass this argument. See: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod and https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#podspec-v1-core service_account_name (Optional[str]): The name of the Kubernetes service account under which to run the Job. Defaults to "default" instance_config_map (str): The ``name`` of an existing Volume to mount into the pod in order to provide a ConfigMap for the Dagster instance. This Volume should contain a ``dagster.yaml`` with appropriate values for run storage, event log storage, etc. postgres_password_secret (Optional[str]): The name of the Kubernetes Secret where the postgres password can be retrieved. Will be mounted and supplied as an environment variable to the Job Pod. env_config_maps (Optional[Sequence[str]]): A list of custom ConfigMapEnvSource names from which to draw environment variables (using ``envFrom``) for the Job. Default: ``[]``. See: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#define-an-environment-variable-for-a-container env_secrets (Optional[Sequence[str]]): A list of custom Secret names from which to draw environment variables (using ``envFrom``) for the Job. Default: ``[]``. See: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables env_vars (Optional[Sequence[str]]): A list of environment variables to inject into the Job. Default: ``[]``. See: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables job_image (Optional[str]): The docker image to use. The Job container will be launched with this image. Should not be specified if using userDeployments. volume_mounts (Optional[Sequence[Permissive]]): A list of volume mounts to include in the job's container. Default: ``[]``. See: https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#volumemount-v1-core volumes (Optional[List[Permissive]]): A list of volumes to include in the Job's Pod. Default: ``[]``. See: https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#volume-v1-core labels (Optional[Mapping[str, str]]): Additional labels that should be included in the Job's Pod. See: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels resources (Optional[Mapping[str, Any]]) Compute resource requirements for the container. See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ scheduler_name (Optional[str]): Use a custom Kubernetes scheduler for launched Pods. See: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/ security_context (Optional[Mapping[str,Any]]): Security settings for the container. See: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container """ def __new__( cls, job_image: Optional[str] = None, dagster_home: Optional[str] = None, image_pull_policy: Optional[str] = None, image_pull_secrets: Optional[Sequence[Mapping[str, str]]] = None, service_account_name: Optional[str] = None, instance_config_map: Optional[str] = None, postgres_password_secret: Optional[str] = None, env_config_maps: Optional[Sequence[str]] = None, env_secrets: Optional[Sequence[str]] = None, env_vars: Optional[Sequence[str]] = None, volume_mounts: Optional[Sequence[Mapping[str, Any]]] = None, volumes: Optional[Sequence[Mapping[str, Any]]] = None, labels: Optional[Mapping[str, str]] = None, resources: Optional[Mapping[str, Any]] = None, scheduler_name: Optional[str] = None, security_context: Optional[Mapping[str, Any]] = None, ): return super().__new__( cls, job_image=check.opt_str_param(job_image, "job_image"), dagster_home=check.opt_str_param(dagster_home, "dagster_home"), image_pull_policy=check.opt_str_param(image_pull_policy, "image_pull_policy", "Always"), image_pull_secrets=check.opt_sequence_param( image_pull_secrets, "image_pull_secrets", of_type=Mapping ), service_account_name=check.opt_str_param(service_account_name, "service_account_name"), instance_config_map=check.opt_str_param(instance_config_map, "instance_config_map"), postgres_password_secret=check.opt_str_param( postgres_password_secret, "postgres_password_secret" ), env_config_maps=check.opt_sequence_param( env_config_maps, "env_config_maps", of_type=str ), env_secrets=check.opt_sequence_param(env_secrets, "env_secrets", of_type=str), env_vars=check.opt_sequence_param(env_vars, "env_vars", of_type=str), volume_mounts=[ k8s_snake_case_dict(kubernetes.client.V1VolumeMount, mount) for mount in check.opt_sequence_param(volume_mounts, "volume_mounts") ], volumes=[ k8s_snake_case_dict(kubernetes.client.V1Volume, volume) for volume in check.opt_sequence_param(volumes, "volumes") ], labels=check.opt_mapping_param(labels, "labels", key_type=str, value_type=str), resources=check.opt_mapping_param(resources, "resources", key_type=str), scheduler_name=check.opt_str_param(scheduler_name, "scheduler_name"), security_context=check.opt_mapping_param(security_context, "security_context"), ) @classmethod def config_type_run_launcher(cls): """Configuration intended to be set on the Dagster instance for the run launcher.""" return merge_dicts( DagsterK8sJobConfig.config_type_job(), { "instance_config_map": Field( StringSource, is_required=True, description=( "The ``name`` of an existing Volume to mount into the pod in order to" " provide a ConfigMap for the Dagster instance. This Volume should contain" " a ``dagster.yaml`` with appropriate values for run storage, event log" " storage, etc." ), ), "postgres_password_secret": Field( StringSource, is_required=False, description=( "The name of the Kubernetes Secret where the postgres password can be" " retrieved. Will be mounted and supplied as an environment variable to the" ' Job Pod.Secret must contain the key ``"postgresql-password"`` which will' " be exposed in the Job environment as the environment variable" " ``DAGSTER_PG_PASSWORD``." ), ), "dagster_home": Field( StringSource, is_required=False, default_value=DAGSTER_HOME_DEFAULT, description=( "The location of DAGSTER_HOME in the Job container; this is where the" " ``dagster.yaml`` file will be mounted from the instance ConfigMap" " specified here. Defaults to /opt/dagster/dagster_home." ), ), "load_incluster_config": Field( bool, is_required=False, default_value=True, description="""Set this value if you are running the launcher within a k8s cluster. If ``True``, we assume the launcher is running within the target cluster and load config using ``kubernetes.config.load_incluster_config``. Otherwise, we will use the k8s config specified in ``kubeconfig_file`` (using ``kubernetes.config.load_kube_config``) or fall back to the default kubeconfig.""", ), "kubeconfig_file": Field( Noneable(str), is_required=False, default_value=None, description=( "The kubeconfig file from which to load config. Defaults to using the" " default kubeconfig." ), ), "fail_pod_on_run_failure": Field( bool, is_required=False, description=( "Whether the launched Kubernetes Jobs and Pods should fail if the Dagster" " run fails" ), ), "run_k8s_config": Field( Shape( { "container_config": Permissive(), "pod_template_spec_metadata": Permissive(), "pod_spec_config": Permissive(), "job_config": Permissive(), "job_metadata": Permissive(), "job_spec_config": Permissive(), } ), is_required=False, description="Raw Kubernetes configuration for launched runs.", ), "job_namespace": Field(StringSource, is_required=False, default_value="default"), "only_allow_user_defined_k8s_config_fields": Field( Shape( { "container_config": Field( Map(key_type=str, inner_type=bool), is_required=False ), "pod_spec_config": Field( Map(key_type=str, inner_type=bool), is_required=False ), "pod_template_spec_metadata": Field( Map(key_type=str, inner_type=bool), is_required=False ), "job_metadata": Field( Map(key_type=str, inner_type=bool), is_required=False ), "job_spec_config": Field( Map(key_type=str, inner_type=bool), is_required=False ), "namespace": Field(BoolSource, is_required=False), } ), is_required=False, description="Dictionary of fields that are allowed to be configured on a " "per-run or per-code-location basis - e.g. using tags on the run. " "Can be used to prevent user code from being able to set arbitrary kubernetes " "config on the pods launched by the run launcher.", ), "only_allow_user_defined_env_vars": Field( Array(str), is_required=False, description="List of environment variable names that are allowed to be set on " "a per-run or per-code-location basis - e.g. using tags on the run. ", ), }, ) @classmethod def config_type_job(cls): """Configuration intended to be set when creating a k8s job (e.g. in an executor that runs each op in its own k8s job, or a run launcher that create a k8s job for the run worker). Shares most of the schema with the container_context, but for back-compat reasons, 'namespace' is called 'job_namespace'. """ return merge_dicts( { "job_image": Field( Noneable(StringSource), is_required=False, description=( "Docker image to use for launched Jobs. If this field is empty, the image" " that was used to originally load the Dagster repository will be used." ' (Ex: "mycompany.com/dagster-k8s-image:latest").' ), ), }, DagsterK8sJobConfig.config_type_container(), ) @classmethod def config_type_container(cls): return { "image_pull_policy": Field( Noneable(StringSource), is_required=False, description="Image pull policy to set on launched Pods.", ), "image_pull_secrets": Field( Noneable(Array(Shape({"name": StringSource}))), is_required=False, description=( "Specifies that Kubernetes should get the credentials from " "the Secrets named in this list." ), ), "service_account_name": Field( Noneable(StringSource), is_required=False, description="The name of the Kubernetes service account under which to run.", ), "env_config_maps": Field( Noneable(Array(StringSource)), is_required=False, description=( "A list of custom ConfigMapEnvSource names from which to draw " "environment variables (using ``envFrom``) for the Job. Default: ``[]``. See:" "https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#define-an-environment-variable-for-a-container" ), ), "env_secrets": Field( Noneable(Array(StringSource)), is_required=False, description=( "A list of custom Secret names from which to draw environment " "variables (using ``envFrom``) for the Job. Default: ``[]``. See:" "https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables" ), ), "env_vars": Field( Noneable(Array(str)), is_required=False, description=( "A list of environment variables to inject into the Job. Each can be " "of the form KEY=VALUE or just KEY (in which case the value will be pulled" " from " "the current process). Default: ``[]``. See: " "https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables" ), ), "volume_mounts": Field( Array( # Can supply either snake_case or camelCase, but in typeaheads based on the # schema we assume snake_case Permissive( { "name": StringSource, "mount_path": Field(StringSource, is_required=False), "mount_propagation": Field(StringSource, is_required=False), "read_only": Field(BoolSource, is_required=False), "sub_path": Field(StringSource, is_required=False), "sub_path_expr": Field(StringSource, is_required=False), } ) ), is_required=False, default_value=[], description=( "A list of volume mounts to include in the job's container. Default: ``[]``." " See: " "https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#volumemount-v1-core" ), ), "volumes": Field( Array( Permissive( { "name": str, } ) ), is_required=False, default_value=[], description=( "A list of volumes to include in the Job's Pod. Default: ``[]``. For the many " "possible volume source types that can be included, see: " "https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#volume-v1-core" ), ), "labels": Field( dict, is_required=False, description=( "Labels to apply to all created pods. See: " "https://kubernetes.io/docs/concepts/overview/working-with-objects/labels" ), ), "resources": Field( Noneable( { "limits": Field(dict, is_required=False), "requests": Field(dict, is_required=False), } ), is_required=False, description=( "Compute resource requirements for the container. See: " "https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" ), ), "scheduler_name": Field( Noneable(StringSource), is_required=False, description=( "Use a custom Kubernetes scheduler for launched Pods. See:" "https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/" ), ), "security_context": Field( dict, is_required=False, description=( "Security settings for the container. See:" "https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container" ), ), } @classmethod def config_type_container_context(cls): return merge_dicts( DagsterK8sJobConfig.config_type_container(), { "namespace": Field( Noneable(StringSource), is_required=False, description=( "The namespace into which to launch Kubernetes resources. Note that any " "other required resources (such as the service account) must be " "present in this namespace." ), ), "run_k8s_config": Field( USER_DEFINED_K8S_JOB_CONFIG_SCHEMA, is_required=False, description="Raw Kubernetes configuration for launched runs.", ), "server_k8s_config": Field( Shape( { "container_config": Permissive(), "pod_spec_config": Permissive(), "pod_template_spec_metadata": Permissive(), "merge_behavior": Field( DagsterEnum.from_python_enum(K8sConfigMergeBehavior), is_required=False, ), "deployment_metadata": Permissive(), "service_metadata": Permissive(), } ), is_required=False, description="Raw Kubernetes configuration for launched code servers.", ), "env": Field( Array( Permissive( { "name": str, } ) ), is_required=False, default_value=[], ), }, ) @property def env(self) -> Sequence[Mapping[str, Optional[str]]]: parsed_env_vars = [parse_env_var(key) for key in (self.env_vars or [])] return [ {"name": parsed_env_var[0], "value": parsed_env_var[1]} for parsed_env_var in parsed_env_vars ] @property def env_from_sources(self) -> Sequence[Mapping[str, Any]]: """This constructs a list of env_from sources. Along with a default base environment config map which we always load, the ConfigMaps and Secrets specified via env_config_maps and env_secrets will be pulled into the job construction here. """ config_maps = [ {"config_map_ref": {"name": config_map}} for config_map in self.env_config_maps ] secrets = [{"secret_ref": {"name": secret}} for secret in self.env_secrets] return config_maps + secrets def to_dict(self): return self._asdict() def with_image(self, image): return self._replace(job_image=image) @staticmethod def from_dict(config: Mapping[str, Any]): return DagsterK8sJobConfig(**config) def construct_dagster_k8s_job( job_config: DagsterK8sJobConfig, args: Optional[Sequence[str]], job_name: str, user_defined_k8s_config: Optional[UserDefinedDagsterK8sConfig] = None, pod_name: Optional[str] = None, component: Optional[str] = None, labels: Optional[Mapping[str, str]] = None, env_vars: Optional[Sequence[Mapping[str, Any]]] = None, owner_references: Optional[Sequence[OwnerReference]] = None, ) -> kubernetes.client.V1Job: """Constructs a Kubernetes Job object. Args: job_config: Job configuration to use for constructing the Kubernetes Job object. args: CLI arguments to use with in this Job. job_name: The name of the Job. Note that this name must be <= 63 characters in length. user_defined_k8s_config: Additional k8s config in tags or Dagster config to apply to the job. pod_name: The name of the Pod. Note that this name must be <= 63 characters in length. Defaults to "<job_name>-pod". component: The name of the component, used to provide the Job label app.kubernetes.io/component. Defaults to None. labels: Additional labels to be attached to k8s jobs and pod templates. Long label values are may be truncated. env_vars: Environment config for the container in the pod template. Returns: kubernetes.client.V1Job: A Kubernetes Job object. """ check.inst_param(job_config, "job_config", DagsterK8sJobConfig) check.opt_sequence_param(args, "args", of_type=str) check.str_param(job_name, "job_name") user_defined_k8s_config = check.opt_inst_param( user_defined_k8s_config, "user_defined_k8s_config", UserDefinedDagsterK8sConfig, default=UserDefinedDagsterK8sConfig(), ) pod_name = check.opt_str_param(pod_name, "pod_name", default=job_name + "-pod") check.opt_str_param(component, "component") check.opt_mapping_param(labels, "labels", key_type=str, value_type=str) check.invariant( len(job_name) <= MAX_K8S_NAME_LEN, "job_name is %d in length; Kubernetes Jobs cannot be longer than %d characters." # noqa: UP031 % (len(job_name), MAX_K8S_NAME_LEN), ) check.invariant( len(pod_name) <= MAX_K8S_NAME_LEN, "job_name is %d in length; Kubernetes Pods cannot be longer than %d characters." # noqa: UP031 % (len(pod_name), MAX_K8S_NAME_LEN), ) k8s_common_labels = get_common_labels() if component: k8s_common_labels["app.kubernetes.io/component"] = component additional_labels = {k: sanitize_k8s_label(v) for k, v in (labels or {}).items()} dagster_labels = merge_dicts(k8s_common_labels, additional_labels) env: list[Mapping[str, Any]] = [] if env_vars: env.extend(env_vars) if job_config.dagster_home: env.append({"name": "DAGSTER_HOME", "value": job_config.dagster_home}) if job_config.postgres_password_secret: env.append( { "name": DAGSTER_PG_PASSWORD_ENV_VAR, "value_from": { "secret_key_ref": { "name": job_config.postgres_password_secret, "key": DAGSTER_PG_PASSWORD_SECRET_KEY, } }, } ) container_config = copy.deepcopy(dict(user_defined_k8s_config.container_config)) if args is not None: container_config["args"] = args user_defined_env_vars = container_config.pop("env", []) user_defined_env_from = container_config.pop("env_from", []) job_image = container_config.pop("image", job_config.job_image) image_pull_policy = container_config.pop("image_pull_policy", job_config.image_pull_policy) user_defined_k8s_volume_mounts = container_config.pop("volume_mounts", []) user_defined_resources = container_config.pop("resources", {}) container_name = container_config.pop("name", "dagster") volume_mounts = [*job_config.volume_mounts, *user_defined_k8s_volume_mounts] resources = user_defined_resources if user_defined_resources else job_config.resources security_context = container_config.pop("security_context", job_config.security_context) container_config = merge_dicts( container_config, { "name": container_name, "image": job_image, "image_pull_policy": image_pull_policy, "env": [*env, *job_config.env, *user_defined_env_vars], "env_from": [*job_config.env_from_sources, *user_defined_env_from], "volume_mounts": volume_mounts, "resources": resources, }, {"security_context": security_context} if security_context else {}, ) pod_spec_config = copy.deepcopy(dict(user_defined_k8s_config.pod_spec_config)) user_defined_volumes = pod_spec_config.pop("volumes", []) volumes = [*job_config.volumes, *user_defined_volumes] # If the user has defined custom labels, remove them from the pod_template_spec_metadata # key and merge them with the dagster labels pod_template_spec_metadata = copy.deepcopy( dict(user_defined_k8s_config.pod_template_spec_metadata) ) user_defined_pod_template_labels = pod_template_spec_metadata.pop("labels", {}) service_account_name = pod_spec_config.pop( "service_account_name", job_config.service_account_name ) scheduler_name = pod_spec_config.pop("scheduler_name", job_config.scheduler_name) automount_service_account_token = pod_spec_config.pop("automount_service_account_token", True) user_defined_containers = pod_spec_config.pop("containers", []) user_defined_image_pull_secrets = pod_spec_config.pop("image_pull_secrets", []) template = { "metadata": merge_dicts( pod_template_spec_metadata, { "name": pod_name, "labels": merge_dicts( dagster_labels, job_config.labels, user_defined_pod_template_labels ), }, ), "spec": merge_dicts( {"restart_policy": "Never"}, pod_spec_config, { "image_pull_secrets": [ *job_config.image_pull_secrets, *user_defined_image_pull_secrets, ], "service_account_name": service_account_name, "automount_service_account_token": automount_service_account_token, "containers": [container_config] + user_defined_containers, "volumes": volumes, }, {"scheduler_name": scheduler_name} if scheduler_name else {}, ), } job_spec_config = merge_dicts( DEFAULT_JOB_SPEC_CONFIG, user_defined_k8s_config.job_spec_config, {"template": template}, ) user_defined_job_metadata = copy.deepcopy(dict(user_defined_k8s_config.job_metadata)) user_defined_job_labels = user_defined_job_metadata.pop("labels", {}) owner_reference_dicts = ( [owner_reference.to_dict() for owner_reference in owner_references] if owner_references else [] ) if "owner_references" in user_defined_job_metadata: user_defined_job_metadata["owner_references"] = ( owner_reference_dicts + user_defined_job_metadata["owner_references"] ) job = k8s_model_from_dict( kubernetes.client.V1Job, merge_dicts( user_defined_k8s_config.job_config, { "api_version": "batch/v1", "kind": "Job", "metadata": merge_dicts( user_defined_job_metadata, { "name": job_name, "labels": merge_dicts( dagster_labels, user_defined_job_labels, job_config.labels ), }, {"owner_references": owner_reference_dicts} if owner_reference_dicts else {}, ), "spec": job_spec_config, }, ), ) return job def get_k8s_job_name(input_1, input_2=None): """Creates a unique (short!) identifier to name k8s objects based on run ID and step key(s). K8s Job names are limited to 63 characters, because they are used as labels. For more info, see: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/ """ check.str_param(input_1, "input_1") check.opt_str_param(input_2, "input_2") if not input_2: letters = string.ascii_lowercase input_2 = "".join(random.choice(letters) for i in range(20)) # Creates 32-bit signed int, so could be negative name_hash = non_secure_md5_hash_str((input_1 + input_2).encode("utf-8")) return name_hash
DagsterK8sJobConfig
python
getsentry__sentry
src/sentry/release_health/base.py
{ "start": 6180, "end": 15699 }
class ____(Service): """Abstraction layer for all release health related queries""" __all__ = ( "get_current_and_previous_crash_free_rates", "get_release_adoption", "check_has_health_data", "get_release_sessions_time_bounds", "check_releases_have_health_data", "sessions_query_config", "run_sessions_query", "get_release_health_data_overview", "get_crash_free_breakdown", "get_changed_project_release_model_adoptions", "get_oldest_health_data_for_releases", "get_project_releases_count", "get_project_release_stats", "get_project_sessions_count", "get_num_sessions_per_project", "get_project_releases_by_stability", ) def get_current_and_previous_crash_free_rates( self, project_ids: Sequence[ProjectId], current_start: datetime, current_end: datetime, previous_start: datetime, previous_end: datetime, rollup: int, org_id: OrganizationId | None = None, ) -> CurrentAndPreviousCrashFreeRates: """ Function that returns `currentCrashFreeRate` and the `previousCrashFreeRate` of projects based on the inputs provided Inputs: * project_ids * current_start: start interval of currentCrashFreeRate * current_end: end interval of currentCrashFreeRate * previous_start: start interval of previousCrashFreeRate * previous_end: end interval of previousCrashFreeRate * rollup Returns: A dictionary of project_id as key and as value the `currentCrashFreeRate` and the `previousCrashFreeRate` As an example: { 1: { "currentCrashFreeRate": 100, "previousCrashFreeRate": 66.66666666666667 }, 2: { "currentCrashFreeRate": 50.0, "previousCrashFreeRate": None }, ... } """ raise NotImplementedError() def get_release_adoption( self, project_releases: Sequence[ProjectRelease], environments: Sequence[EnvironmentName] | None = None, now: datetime | None = None, org_id: OrganizationId | None = None, ) -> ReleasesAdoption: """ Get the adoption of the last 24 hours (or a difference reference timestamp). :param project_releases: A list of releases to get adoption for. Our backends store session data per-project, so each release has to be scoped down to a project too. :param environments: Optional. A list of environments to filter by. :param now: Release adoption information will be provided from 24h ago until this timestamp. :param org_id: An organization ID to filter by. Note that all projects have to be within this organization, and this backend doesn't check for that. Omit if you're not sure. """ raise NotImplementedError() def sessions_query_config(self, organization: Any) -> SessionsQueryConfig: """Return the backend-dependent config for sessions_v2.QueryDefinition""" raise NotImplementedError() def run_sessions_query( self, org_id: int, query: QueryDefinition, span_op: str, ) -> SessionsQueryResult: """ Runs the `query` as defined by the sessions_v2 [`QueryDefinition`], and returns the resulting timeseries in sessions_v2 format. """ raise NotImplementedError() def get_release_sessions_time_bounds( self, project_id: ProjectId, release: ReleaseName, org_id: OrganizationId, environments: Iterable[str] | None = None, ) -> ReleaseSessionsTimeBounds: """ Get the sessions time bounds in terms of when the first session started and when the last session started according to a specific (project_id, org_id, release, environments) combination Inputs: * project_id * release * org_id: Organization Id * environments Return: Dictionary with two keys "sessions_lower_bound" and "sessions_upper_bound" that correspond to when the first session occurred and when the last session occurred respectively """ raise NotImplementedError() def check_has_health_data( self, projects_list: Collection[ProjectOrRelease], now: datetime | None = None, ) -> set[ProjectOrRelease]: """ Function that returns a set of all project_ids or (project, release) if they have health data within the last 90 days based on a list of projects or a list of project, release combinations provided as an arg. Inputs: * projects_list: Contains either a list of project ids or a list of tuple (project_id, release) """ raise NotImplementedError() def check_releases_have_health_data( self, organization_id: OrganizationId, project_ids: Sequence[ProjectId], release_versions: Sequence[ReleaseName], start: datetime, end: datetime, ) -> set[ReleaseName]: """ Returns a set of all release versions that have health data within a given period of time. """ raise NotImplementedError() def get_release_health_data_overview( self, project_releases: Sequence[ProjectRelease], environments: Sequence[EnvironmentName] | None = None, summary_stats_period: StatsPeriod | None = None, health_stats_period: StatsPeriod | None = None, stat: Literal["users", "sessions"] | None = None, now: datetime | None = None, ) -> Mapping[ProjectRelease, ReleaseHealthOverview]: """Checks quickly for which of the given project releases we have health data available. The argument is a tuple of `(project_id, release_name)` tuples. The return value is a set of all the project releases that have health data. """ raise NotImplementedError() def get_crash_free_breakdown( self, project_id: ProjectId, release: ReleaseName, start: datetime, environments: Sequence[EnvironmentName] | None = None, now: datetime | None = None, ) -> Sequence[CrashFreeBreakdown]: """Get stats about crash free sessions and stats for the last 1, 2, 7, 14 and 30 days""" raise NotImplementedError def get_changed_project_release_model_adoptions( self, project_ids: Iterable[int], now: datetime | None = None, ) -> Sequence[ProjectRelease]: """ Returns a sequence of tuples (ProjectId, ReleaseName) with the releases seen in the last 72 hours for the requested projects. """ raise NotImplementedError() def get_oldest_health_data_for_releases( self, project_releases: Sequence[ProjectRelease], now: datetime | None = None, ) -> Mapping[ProjectRelease, str]: """Returns the oldest health data we have observed in a release in 90 days. This is used for backfilling. """ raise NotImplementedError() def get_project_releases_count( self, organization_id: OrganizationId, project_ids: Sequence[ProjectId], scope: str, stats_period: str | None = None, environments: Sequence[EnvironmentName] | None = None, ) -> int: """ Fetches the total count of releases/project combinations """ raise NotImplementedError() def get_project_release_stats( self, project_id: ProjectId, release: ReleaseName, stat: OverviewStat, rollup: int, start: datetime, end: datetime, environments: Sequence[EnvironmentName] | None = None, ) -> ProjectReleaseUserStats | ProjectReleaseSessionStats: raise NotImplementedError() def get_project_sessions_count( self, project_id: ProjectId, rollup: int, # rollup in seconds start: datetime, end: datetime, environment_id: int | None = None, ) -> int: """ Returns the number of sessions in the specified period (optionally filtered by environment) """ raise NotImplementedError() def get_num_sessions_per_project( self, project_ids: Sequence[ProjectId], start: datetime | None, end: datetime | None, environment_ids: Sequence[int] | None = None, ) -> Sequence[ProjectWithCount]: """ Returns the number of sessions for each project specified. """ raise NotImplementedError() def get_project_releases_by_stability( self, project_ids: Sequence[ProjectId], offset: int | None, limit: int | None, scope: str, stats_period: str | None = None, environments: Sequence[str] | None = None, now: datetime | None = None, ) -> Sequence[ProjectRelease]: """Given some project IDs returns adoption rates that should be updated on the postgres tables. """ raise NotImplementedError()
ReleaseHealthBackend
python
pytest-dev__pytest
testing/code/test_source.py
{ "start": 12966, "end": 13690 }
class ____: def setup_class(self) -> None: self.source = """\ try: raise ValueError except Something: raise IndexError(1) else: raise KeyError() """ def test_body(self) -> None: source = getstatement(1, self.source) assert str(source) == " raise ValueError" def test_except_line(self) -> None: source = getstatement(2, self.source) assert str(source) == "except Something:" def test_except_body(self) -> None: source = getstatement(3, self.source) assert str(source) == " raise IndexError(1)" def test_else(self) -> None: source = getstatement(5, self.source) assert str(source) == " raise KeyError()"
TestTry
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 845008, "end": 846233 }
class ____(sgqlc.types.Type, HovercardContext): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ( "relevant_teams", "teams_resource_path", "teams_url", "total_team_count", ) relevant_teams = sgqlc.types.Field( sgqlc.types.non_null(TeamConnection), graphql_name="relevantTeams", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) teams_resource_path = sgqlc.types.Field( sgqlc.types.non_null(URI), graphql_name="teamsResourcePath" ) teams_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="teamsUrl") total_team_count = sgqlc.types.Field( sgqlc.types.non_null(Int), graphql_name="totalTeamCount" )
OrganizationTeamsHovercardContext
python
pypa__warehouse
warehouse/accounts/interfaces.py
{ "start": 7666, "end": 7861 }
class ____(Interface): def get_email_breach_count(email: str) -> int | None: """ Returns count of times the email appears in verified breaches. """
IEmailBreachedService
python
dagster-io__dagster
python_modules/libraries/dagster-dg-core/dagster_dg_core/utils/__init__.py
{ "start": 12869, "end": 13614 }
class ____: def format_help(self, context: click.Context, formatter: click.HelpFormatter): """Customizes the help to include hierarchical usage.""" from typer.rich_utils import rich_format_help if not isinstance(self, click.Command): raise ValueError("This mixin is only intended for use with click.Command instances.") # We use typer's rich_format_help to render our help output, despite the fact that we are # not using typer elsewhere in the app. Global options are separated from command-specific # options by setting the `rich_help_panel` attribute to "Global options" on our global options. rich_format_help(obj=self, ctx=context, markup_mode="rich")
DgClickHelpMixin
python
dagster-io__dagster
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
{ "start": 243976, "end": 245862 }
class ____(GeneratedAirbyteSource): class OAuth: @public def __init__(self, client_id: str, client_secret: str, refresh_token: str): self.credentials_title = "OAuth Credentials" self.client_id = check.str_param(client_id, "client_id") self.client_secret = check.str_param(client_secret, "client_secret") self.refresh_token = check.str_param(refresh_token, "refresh_token") class APIKey: @public def __init__(self, api_key: str): self.credentials_title = "API Key Credentials" self.api_key = check.str_param(api_key, "api_key") class PrivateAPP: @public def __init__(self, access_token: str): self.credentials_title = "Private App Credentials" self.access_token = check.str_param(access_token, "access_token") @public def __init__( self, name: str, start_date: str, credentials: Union[ "HubspotSource.OAuth", "HubspotSource.APIKey", "HubspotSource.PrivateAPP" ], ): """Airbyte Source for Hubspot. Documentation can be found at https://docs.airbyte.com/integrations/sources/hubspot Args: name (str): The name of the destination. start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. credentials (Union[HubspotSource.OAuth, HubspotSource.APIKey, HubspotSource.PrivateAPP]): Choose how to authenticate to HubSpot. """ self.start_date = check.str_param(start_date, "start_date") self.credentials = check.inst_param( credentials, "credentials", (HubspotSource.OAuth, HubspotSource.APIKey, HubspotSource.PrivateAPP), ) super().__init__("Hubspot", name)
HubspotSource
python
huggingface__transformers
tests/models/llava_next_video/test_modeling_llava_next_video.py
{ "start": 12612, "end": 19984 }
class ____(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf") image_file = hf_hub_download( repo_id="raushan-testing-hf/images_test", filename="llava_v1_5_radar.jpg", repo_type="dataset" ) video_file = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset" ) self.image = Image.open(image_file) self.video = np.load(video_file) self.prompt_image = "USER: <image>\nWhat is shown in this image? ASSISTANT:" self.prompt_video = "USER: <video>\nWhy is this video funny? ASSISTANT:" def tearDown(self): cleanup(torch_device, gc_collect=True) @slow @require_bitsandbytes def test_small_model_integration_test(self): model = LlavaNextVideoForConditionalGeneration.from_pretrained( "llava-hf/LLaVA-NeXT-Video-7B-hf", quantization_config=BitsAndBytesConfig(load_in_4bit=True), cache_dir="./", ) inputs = self.processor(text=self.prompt_video, videos=self.video, return_tensors="pt") # verify single forward pass inputs = inputs.to(torch_device) with torch.no_grad(): output = model(**inputs) # verify generation output = model.generate(**inputs, do_sample=False, max_new_tokens=40) expected_decoded_text = Expectations( { ("cuda", None): "USER: \nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and somewhat comical situation of a young child reading a book while another child is attempting to read the same book. The child who is reading the book seems", ("xpu", None): "USER: \nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and somewhat comical situation of a young child reading a book while another child is attempting to read the same book. The child who is reading the book seems", ("rocm", (9, 5)): "USER: \nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and adorable behavior of the young child. The child is seen reading a book, but instead of turning the pages like one would typically do, they", } ).get_expectation() # fmt: off decoded_text = self.processor.decode(output[0], skip_special_tokens=True) self.assertEqual(decoded_text, expected_decoded_text) @slow @require_bitsandbytes def test_small_model_integration_test_batch(self): model = LlavaNextVideoForConditionalGeneration.from_pretrained( "llava-hf/LLaVA-NeXT-Video-7B-hf", quantization_config=BitsAndBytesConfig(load_in_4bit=True), cache_dir="./", ) inputs = self.processor( text=[self.prompt_video, self.prompt_video], videos=[self.video, self.video], return_tensors="pt", padding=True, ).to(torch_device) output = model.generate(**inputs, do_sample=False, max_new_tokens=20) decoded_text = self.processor.batch_decode(output, skip_special_tokens=True) expected_decoded_text = Expectations( { ("xpu", None): "USER: \nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and somewhat comical situation of a young child reading a", ("cuda", None): "USER: \nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and somewhat comical situation of a young child reading a", ("rocm", (9, 5)): "USER: \nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and adorable behavior of the young child. The", } ).get_expectation() # fmt: off EXPECTED_DECODED_TEXT = [expected_decoded_text, expected_decoded_text] self.assertEqual(decoded_text, EXPECTED_DECODED_TEXT) @slow @require_bitsandbytes def test_small_model_integration_test_batch_different_vision_types(self): model = LlavaNextVideoForConditionalGeneration.from_pretrained( "llava-hf/LLaVA-NeXT-Video-7B-hf", quantization_config=BitsAndBytesConfig(load_in_4bit=True), cache_dir="./", ) inputs = self.processor( text=[self.prompt_image, self.prompt_video], images=self.image, videos=self.video, return_tensors="pt", padding=True, ).to(torch_device) # check loss when labels are passed inputs["labels"] = inputs["input_ids"].clone() with torch.no_grad(): output = model(**inputs) self.assertTrue(output.loss is not None) # verify generation output = model.generate(**inputs, do_sample=False, max_new_tokens=50) EXPECTED_DECODED_TEXT = Expectations( { ("xpu", None): 'USER: \nWhat is shown in this image? ASSISTANT: The image appears to be a graphical representation of a machine learning model\'s performance on a task, likely related to natural language processing or text understanding. It shows a scatter plot with two axes, one labeled "BLIP-2"', ("rocm", (9, 5)): "USER: \nWhat is shown in this image? ASSISTANT: The image displays a chart that appears to be a comparison of different models or versions of a machine learning (ML) model, likely a neural network, based on their performance on a task or dataset. The chart is a scatter plot with axes labeled", ("cuda", None): 'USER: \nWhat is shown in this image? ASSISTANT: The image appears to be a graphical representation of a machine learning model\'s performance on a task, likely related to natural language processing or text understanding. It shows a scatter plot with two axes, one labeled "BLIP-2"', } ).get_expectation() # fmt: off decoded_text = self.processor.decode(output[0], skip_special_tokens=True) self.assertEqual(decoded_text, EXPECTED_DECODED_TEXT) @slow @require_bitsandbytes def test_small_model_integration_test_batch_matches_single(self): model = LlavaNextVideoForConditionalGeneration.from_pretrained( "llava-hf/LLaVA-NeXT-Video-7B-hf", quantization_config=BitsAndBytesConfig(load_in_4bit=True), cache_dir="./", ) inputs_batched = self.processor( text=[self.prompt_video, self.prompt_image], images=[self.image], videos=[self.video], return_tensors="pt", padding=True, ).to(torch_device) inputs_single = self.processor(text=self.prompt_video, videos=[self.video], return_tensors="pt").to( torch_device ) # verify generation output_batched = model.generate(**inputs_batched, do_sample=False, max_new_tokens=50) output_single = model.generate(**inputs_single, do_sample=False, max_new_tokens=50) self.assertEqual( self.processor.decode(output_batched[0], skip_special_tokens=True), self.processor.decode(output_single[0], skip_special_tokens=True), )
LlavaNextVideoForConditionalGenerationIntegrationTest
python
joke2k__faker
faker/providers/company/es_MX/__init__.py
{ "start": 45, "end": 11424 }
class ____(CompanyProvider): formats = ( "{{last_name}} {{company_suffix}}", "{{last_name}}-{{last_name}}", "{{company_prefix}} {{last_name}}-{{last_name}}", "{{company_prefix}} {{last_name}} y {{last_name}}", "{{company_prefix}} {{last_name}}, {{last_name}} y {{last_name}}", "{{last_name}}-{{last_name}} {{company_suffix}}", "{{last_name}}, {{last_name}} y {{last_name}}", "{{last_name}} y {{last_name}} {{company_suffix}}", ) catch_phrase_words = ( ( "habilidad", "acceso", "adaptador", "algoritmo", "alianza", "analista", "aplicación", "enfoque", "arquitectura", "archivo", "inteligencia artificial", "array", "actitud", "medición", "gestión presupuestaria", "capacidad", "desafío", "circuito", "colaboración", "complejidad", "concepto", "conglomeración", "contingencia", "núcleo", "fidelidad", "base de datos", "data-warehouse", "definición", "emulación", "codificar", "encriptar", "extranet", "firmware", "flexibilidad", "focus group", "previsión", "base de trabajo", "función", "funcionalidad", "interfaz gráfica", "groupware", "interfaz gráfico de usuario", "hardware", "soporte", "jerarquía", "conjunto", "implementación", "infraestructura", "iniciativa", "instalación", "conjunto de instrucciones", "interfaz", "intranet", "base del conocimiento", "red de area local", "aprovechar", "matrices", "metodologías", "middleware", "migración", "modelo", "moderador", "monitorizar", "arquitectura abierta", "sistema abierto", "orquestar", "paradigma", "paralelismo", "política", "portal", "estructura de precios", "proceso de mejora", "producto", "productividad", "proyecto", "proyección", "protocolo", "línea segura", "software", "solución", "estandarización", "estrategia", "estructura", "éxito", "superestructura", "soporte", "sinergia", "mediante", "marco de tiempo", "caja de herramientas", "utilización", "website", "fuerza de trabajo", ), ( "24 horas", "24/7", "3ra generación", "4ta generación", "5ta generación", "6ta generación", "analizada", "asimétrica", "asíncrona", "monitorizada por red", "bidireccional", "bifurcada", "generada por el cliente", "cliente-servidor", "coherente", "cohesiva", "compuesto", "sensible al contexto", "basado en el contexto", "basado en contenido", "dedicada", "generado por la demanda", "didáctica", "direccional", "discreta", "dinámica", "potenciada", "acompasada", "ejecutiva", "explícita", "tolerante a fallos", "innovadora", "amplio abanico", "global", "heurística", "alto nivel", "holística", "homogénea", "híbrida", "incremental", "intangible", "interactiva", "intermedia", "local", "logística", "maximizada", "metódica", "misión crítica", "móvil", "modular", "motivadora", "multimedia", "multiestado", "multitarea", "nacional", "basado en necesidades", "neutral", "nueva generación", "no-volátil", "orientado a objetos", "óptima", "optimizada", "radical", "tiempo real", "recíproca", "regional", "escalable", "secundaria", "orientada a soluciones", "estable", "estática", "sistemática", "sistémica", "tangible", "terciaria", "transicional", "uniforme", "valor añadido", "vía web", "defectos cero", "tolerancia cero", ), ( "adaptativo", "avanzado", "asimilado", "automatizado", "balanceado", "enfocado al negocio", "centralizado", "clonado", "compatible", "configurable", "multiplataforma", "enfocado al cliente", "personalizable", "descentralizado", "digitalizado", "distribuido", "diverso", "mejorado", "en toda la empresa", "ergonómico", "exclusivo", "expandido", "extendido", "cara a cara", "enfocado", "de primera línea", "totalmente configurable", "basado en funcionalidad", "fundamental", "horizontal", "implementado", "innovador", "integrado", "intuitivo", "inverso", "administrado", "mandatorio", "monitoreado", "multicanal", "multilateral", "multi-capas", "en red", "basado en objetos", "de arquitectura abierta", "open-source", "operativo", "optimizado", "opcional", "orgánico", "organizado", "perseverante", "persistente", "polarizado", "preventivo", "proactivo", "enfocado a ganancias", "programable", "progresivo", "llave pública", "enfocado a la calidad", "reactivo", "realineado", "recontextualizado", "reducido", "con ingeniería inversa", "de tamaño adecuado", "robusto", "seguro", "compartible", "sincronizado", "orientado a equipos", "total", "universal", "actualizable", "centrado en el usuario", "versátil", "virtual", "visionario", ), ) bsWords = ( ( "implementa", "utiliza", "integra", "optimiza", "evoluciona", "transforma", "abraza", "habilita", "orquesta", "reinventa", "agrega", "mejora", "incentiva", "modifica", "empodera", "monetiza", "fortalece", "facilita", "sinergiza", "crea marca", "crece", "sintetiza", "entrega", "mezcla", "incuba", "compromete", "maximiza", "visualiza", "innova", "escala", "libera", "maneja", "extiende", "revoluciona", "genera", "explota", "transiciona", "itera", "cultiva", "redefine", "recontextualiza", ), ( "sinergias", "paradigmas", "marcados", "socios", "infraestructuras", "plataformas", "iniciativas", "canales", "communidades", "ROI", "soluciones", "portales", "nichos", "tecnologías", "contenido", "cadena de producción", "convergencia", "relaciones", "arquitecturas", "interfaces", "comercio electrónico", "sistemas", "ancho de banda", "modelos", "entregables", "usuarios", "esquemas", "redes", "aplicaciones", "métricas", "funcionalidades", "experiencias", "servicios web", "metodologías", ), ( "valor agregado", "verticales", "proactivas", "robustas", "revolucionarias", "escalables", "de punta", "innovadoras", "intuitivas", "estratégicas", "e-business", "de misión crítica", "uno-a-uno", "24/7", "end-to-end", "globales", "B2B", "B2C", "granulares", "sin fricciones", "virtuales", "virales", "dinámicas", "24/365", "magnéticas", "listo para la web", "interactivas", "punto-com", "sexi", "en tiempo real", "eficientes", "front-end", "distribuidas", "extensibles", "llave en mano", "de clase mundial", "open-source", "plataforma cruzada", "de paquete", "empresariales", "integrado", "impacto total", "inalámbrica", "transparentes", "de siguiente generación", "lo último", "centrado al usuario", "visionarias", "personalizado", "ubicuas", "plug-and-play", "colaborativas", "holísticas", "ricas", ), ) company_preffixes = ( "Despacho", "Grupo", "Corporacin", "Club", "Industrias", "Laboratorios", "Proyectos", ) company_suffixes = ( "A.C.", "S.A.", "S.A. de C.V.", "S.C.", "S. R.L. de C.V.", "e Hijos", "y Asociados", ) def company_prefix(self) -> str: """ :example: 'Grupo' """ return self.random_element(self.company_preffixes)
Provider
python
scrapy__scrapy
tests/test_feedexport.py
{ "start": 21986, "end": 22418 }
class ____(BlockingFeedStorage): def __init__(self, uri, *args, feed_options=None): self.path = Path(file_uri_to_path(uri)) def _store_in_thread(self, file): dirname = self.path.parent if dirname and not dirname.exists(): dirname.mkdir(parents=True) with self.path.open("ab") as output_file: output_file.write(file.read()) file.close()
DummyBlockingFeedStorage
python
readthedocs__readthedocs.org
readthedocs/api/v3/views.py
{ "start": 21641, "end": 22302 }
class ____( APIv3Settings, GenericViewSet, ): # NOTE: this viewset is only useful for nested URLs required for notifications: # /api/v3/users/<username>/notifications/ # However, accessing to /api/v3/users/ or /api/v3/users/<username>/ will return 404. # We can implement these endpoints when we need them, tho. model = User serializer_class = UserSerializer queryset = User.objects.none() permission_classes = (IsAuthenticated,) # We are using the username as the lookup field, # by default, DRF does not allow dots and `/`, # but we allow usernames to have dots. lookup_value_regex = "[^/]+"
UsersViewSet
python
rapidsai__cudf
python/cudf/cudf/pandas/module_accelerator.py
{ "start": 2282, "end": 11089 }
class ____( importlib.abc.MetaPathFinder, importlib.abc.Loader ): _instance: ModuleAcceleratorBase | None = None mod_name: str fast_lib: str slow_lib: str # When walking the module tree and wrapping module attributes, # we often will come across the same object more than once. We # don't want to create separate wrappers for each # instance, so we keep a registry of all module attributes # that we can look up to see if we have already wrapped an # attribute before _wrapped_objs: dict[Any, Any] def __new__( cls, mod_name: str, fast_lib: str, slow_lib: str, ): """Build a custom module finder that will provide wrapped modules on demand. Parameters ---------- mod_name Import name to deliver modules under. fast_lib Name of package that provides "fast" implementation slow_lib Name of package that provides "slow" fallback implementation """ if ModuleAcceleratorBase._instance is not None: raise RuntimeError( "Only one instance of ModuleAcceleratorBase allowed" ) self = object.__new__(cls) self.mod_name = mod_name self.fast_lib = fast_lib self.slow_lib = slow_lib # When walking the module tree and wrapping module attributes, # we often will come across the same object more than once. We # don't want to create separate wrappers for each # instance, so we keep a registry of all module attributes # that we can look up to see if we have already wrapped an # attribute before self._wrapped_objs = {} self._wrapped_objs.update(get_final_type_map()) self._wrapped_objs.update(get_intermediate_type_map()) self._wrapped_objs.update(get_registered_functions()) ModuleAcceleratorBase._instance = self return self def __repr__(self) -> str: return ( f"{self.__class__.__name__}" f"(fast={self.fast_lib}, slow={self.slow_lib})" ) def find_spec( self, fullname: str, path, target=None ) -> importlib.machinery.ModuleSpec | None: """Provide ourselves as a module loader. Parameters ---------- fullname Name of module to be imported, if it starts with the name that we are using to wrap, we will deliver ourselves as a loader, otherwise defer to the standard Python loaders. Returns ------- A ModuleSpec with ourself as loader if we're interposing, otherwise None to pass off to the next loader. """ if fullname == self.mod_name or fullname.startswith( f"{self.mod_name}." ): return importlib.machinery.ModuleSpec( name=fullname, loader=self, # Note, this influences the repr of the module, so we may want # to change it if we ever want to control that. origin=None, loader_state=None, is_package=True, ) return None def create_module(self, spec) -> ModuleType | None: return None def exec_module(self, mod: ModuleType): # importlib calls this function with the global import lock held. self._populate_module(mod) @abstractmethod def disabled(self) -> ContextManager: pass def _postprocess_module( self, mod: ModuleType, slow_mod: ModuleType, fast_mod: ModuleType | None, ) -> ModuleType: """Ensure that the wrapped module satisfies required invariants. Parameters ---------- mod Wrapped module to postprocess slow_mod Slow version that we are mimicking fast_mod Fast module that provides accelerated implementations (may be None Returns ------- Checked and validated module Notes ----- The implementation of fast-slow proxies imposes certain requirements on the wrapped modules that it delivers. This function encodes those requirements and raises if the module does not satisfy them. This post-processing routine should be kept up to date with any requirements encoded by fast_slow_proxy.py """ mod.__dict__["_fsproxy_slow"] = slow_mod if fast_mod is not None: mod.__dict__["_fsproxy_fast"] = fast_mod return mod @abstractmethod def _populate_module(self, mod: ModuleType) -> ModuleType: """Populate given module with appropriate attributes. This traverses the attributes of the slow module corresponding to mod and mirrors those in the provided module in a wrapped mode that attempts to execute them using the fast module first. Parameters ---------- mod Module to populate Returns ------- ModuleType Populated module Notes ----- In addition to the attributes of the slow module, the returned module must have the following attributes: - '_fsproxy_slow': the corresponding slow module - '_fsproxy_fast': the corresponding fast module This is necessary for correct rewriting of UDFs when calling to the respective fast/slow libraries. The necessary invariants are checked and applied in :meth:`_postprocess_module`. """ pass def _wrap_attribute( self, slow_attr: Any, fast_attr: Any | _Unusable, name: str, ) -> Any: """ Return the wrapped version of an attribute. Parameters ---------- slow_attr : Any The attribute from the slow module fast_mod : Any (or None) The same attribute from the fast module, if it exists name Name of attribute Returns ------- Wrapped attribute """ wrapped_attr: Any # TODO: what else should we make sure not to get from the fast # library? if name in {"__all__", "__dir__", "__file__", "__doc__"}: wrapped_attr = slow_attr elif self.fast_lib == self.slow_lib: # no need to create a fast-slow wrapper wrapped_attr = slow_attr if any( [ slow_attr in get_registered_functions(), slow_attr in get_final_type_map(), slow_attr in get_intermediate_type_map(), ] ): # attribute already registered in self._wrapped_objs return self._wrapped_objs[slow_attr] if isinstance(slow_attr, ModuleType) and slow_attr.__name__.startswith( self.slow_lib ): # attribute is a submodule of the slow library, # replace the string "{slow_lib}" in the submodule's # name with "{self.mod_name}" # now, attempt to import the wrapped module, which will # recursively wrap all of its attributes: return importlib.import_module( rename_root_module( slow_attr.__name__, self.slow_lib, self.mod_name ) ) if slow_attr in self._wrapped_objs: if type(fast_attr) is _Unusable: # we don't want to replace a wrapped object that # has a usable fast object with a wrapped object # with a an unusable fast object. return self._wrapped_objs[slow_attr] if _is_function_or_method(slow_attr): wrapped_attr = _FunctionProxy(fast_attr, slow_attr) else: wrapped_attr = slow_attr return wrapped_attr @classmethod @abstractmethod def install( cls, destination_module: str, fast_lib: str, slow_lib: str ) -> Self | None: """ Install the loader in sys.meta_path. Parameters ---------- destination_module Name under which the importer will kick in fast_lib Name of fast module slow_lib Name of slow module we are trying to mimic Returns ------- Instance of the class (or None if the loader was not installed) Notes ----- This function is idempotent. If called with the same arguments a second time, it does not create a new loader, but instead returns the existing loader from ``sys.meta_path``. """ pass
ModuleAcceleratorBase
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/orm/strategies.py
{ "start": 44649, "end": 47711 }
class ____(_PostLoader): __slots__ = ("join_depth",) def __init__(self, parent, strategy_key): super().__init__(parent, strategy_key) self.join_depth = self.parent_property.join_depth def init_class_attribute(self, mapper): self.parent_property._get_strategy( (("lazy", "select"),) ).init_class_attribute(mapper) def create_row_processor( self, context, query_entity, path, loadopt, mapper, result, adapter, populators, ): if not context.compile_state.compile_options._enable_eagerloads: return ( effective_path, run_loader, execution_options, recursion_depth, ) = self._setup_for_recursion(context, path, loadopt, self.join_depth) if not run_loader: # this will not emit SQL and will only emit for a many-to-one # "use get" load. the "_RELATED" part means it may return # instance even if its expired, since this is a mutually-recursive # load operation. flags = attributes.PASSIVE_NO_FETCH_RELATED | PassiveFlag.NO_RAISE else: flags = attributes.PASSIVE_OFF | PassiveFlag.NO_RAISE loading._PostLoad.callable_for_path( context, effective_path, self.parent, self.parent_property, self._load_for_path, loadopt, flags, recursion_depth, execution_options, ) def _load_for_path( self, context, path, states, load_only, loadopt, flags, recursion_depth, execution_options, ): if recursion_depth: new_opt = Load(loadopt.path.entity) new_opt.context = ( loadopt, loadopt._recurse(), ) alternate_effective_path = path._truncate_recursive() extra_options = (new_opt,) else: alternate_effective_path = path extra_options = () key = self.key lazyloader = self.parent_property._get_strategy((("lazy", "select"),)) for state, overwrite in states: dict_ = state.dict if overwrite or key not in dict_: value = lazyloader._load_for_state( state, flags, extra_options=extra_options, alternate_effective_path=alternate_effective_path, execution_options=execution_options, ) if value not in ( ATTR_WAS_SET, LoaderCallableStatus.PASSIVE_NO_RESULT, ): state.get_impl(key).set_committed_value( state, dict_, value ) @log.class_logger @relationships.RelationshipProperty.strategy_for(lazy="subquery")
_ImmediateLoader
python
huggingface__transformers
tests/models/rembert/test_modeling_rembert.py
{ "start": 1347, "end": 13362 }
class ____: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, input_embedding_size=18, output_embedding_size=43, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.input_embedding_size = input_embedding_size self.output_embedding_size = output_embedding_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = RemBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, input_embedding_size=self.input_embedding_size, output_embedding_size=self.output_embedding_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RemBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = RemBertModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RemBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = RemBertForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RemBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = RemBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = RemBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = RemBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch
RemBertModelTester
python
google__jax
jax/_src/pallas/mosaic/sc_core.py
{ "start": 4313, "end": 4724 }
class ____(pallas_core.BlockMapping): indexed_by: int | None = None indexed_dim: int | None = None def get_sparse_core_info() -> tpu_info.SparseCoreInfo: """Returns the SparseCore information for the current device.""" return tpu_info.get_tpu_info().sparse_core or tpu_info.SparseCoreInfo( num_cores=0, num_subcores=0, num_lanes=0 ) @dataclasses.dataclass(frozen=True, kw_only=True)
BlockMapping
python
allegroai__clearml
clearml/backend_api/services/v2_13/tasks.py
{ "start": 322553, "end": 323688 }
class ____(Response): """ Response of tasks.make_private endpoint. :param updated: Number of tasks updated :type updated: int """ _service = "tasks" _action = "make_private" _version = "2.13" _schema = { "definitions": {}, "properties": { "updated": { "description": "Number of tasks updated", "type": ["integer", "null"], } }, "type": "object", } def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None: super(MakePrivateResponse, self).__init__(**kwargs) self.updated = updated @schema_property("updated") def updated(self) -> Optional[int]: return self._property_updated @updated.setter def updated(self, value: Optional[int]) -> None: if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value
MakePrivateResponse
python
pytest-dev__pytest-xdist
testing/test_workermanage.py
{ "start": 1319, "end": 5221 }
class ____: def test_popen_no_default_chdir(self, config: pytest.Config) -> None: gm = NodeManager(config, ["popen"]) assert gm.specs[0].chdir is None def test_default_chdir(self, config: pytest.Config) -> None: specs = ["ssh=noco", "socket=xyz"] for spec in NodeManager(config, specs).specs: assert spec.chdir == "pyexecnetcache" for spec in NodeManager(config, specs, defaultchdir="abc").specs: assert spec.chdir == "abc" def test_popen_makegateway_events( self, config: pytest.Config, hookrecorder: pytest.HookRecorder, workercontroller: None, ) -> None: hm = NodeManager(config, ["popen"] * 2) hm.setup_nodes(None) # type: ignore[arg-type] call = hookrecorder.popcall("pytest_xdist_setupnodes") assert len(call.specs) == 2 call = hookrecorder.popcall("pytest_xdist_newgateway") assert call.gateway.spec == execnet.XSpec("execmodel=main_thread_only//popen") assert call.gateway.id == "gw0" call = hookrecorder.popcall("pytest_xdist_newgateway") assert call.gateway.id == "gw1" assert len(hm.group) == 2 hm.teardown_nodes() assert not len(hm.group) def test_popens_rsync( self, config: pytest.Config, source: Path, dest: Path, workercontroller: None, ) -> None: hm = NodeManager(config, ["popen"] * 2) hm.setup_nodes(None) # type: ignore[arg-type] assert len(hm.group) == 2 for gw in hm.group: class pseudoexec: args = [] # type: ignore[var-annotated] def __init__(self, *args: object) -> None: self.args.extend(args) def waitclose(self) -> None: pass gw.remote_exec = pseudoexec # type: ignore[assignment] notifications = [] for gw in hm.group: hm.rsync(gw, source, notify=lambda *args: notifications.append(args)) assert not notifications hm.teardown_nodes() assert not len(hm.group) assert "sys.path.insert" in gw.remote_exec.args[0] # type: ignore[attr-defined] def test_rsync_popen_with_path( self, config: pytest.Config, source: Path, dest: Path, workercontroller: None ) -> None: hm = NodeManager(config, ["popen//chdir=%s" % dest] * 1) hm.setup_nodes(None) # type: ignore[arg-type] source.joinpath("dir1", "dir2").mkdir(parents=True) source.joinpath("dir1", "dir2", "hello").touch() notifications = [] for gw in hm.group: hm.rsync(gw, source, notify=lambda *args: notifications.append(args)) assert len(notifications) == 1 assert notifications[0] == ("rsyncrootready", hm.group["gw0"].spec, source) hm.teardown_nodes() dest = dest.joinpath(source.name) assert dest.joinpath("dir1").exists() assert dest.joinpath("dir1", "dir2").exists() assert dest.joinpath("dir1", "dir2", "hello").exists() def test_rsync_same_popen_twice( self, config: pytest.Config, source: Path, dest: Path, hookrecorder: pytest.HookRecorder, workercontroller: None, ) -> None: hm = NodeManager(config, ["popen//chdir=%s" % dest] * 2) hm.roots = [] hm.setup_nodes(None) # type: ignore[arg-type] source.joinpath("dir1", "dir2").mkdir(parents=True) source.joinpath("dir1", "dir2", "hello").touch() gw = hm.group[0] hm.rsync(gw, source) call = hookrecorder.popcall("pytest_xdist_rsyncstart") assert call.source == source assert len(call.gateways) == 1 assert call.gateways[0] in hm.group call = hookrecorder.popcall("pytest_xdist_rsyncfinish")
TestNodeManagerPopen
python
ray-project__ray
python/ray/data/_internal/logical/optimizers.py
{ "start": 1674, "end": 3390 }
class ____(Optimizer): """The optimizer for physical operators.""" @property def rules(self) -> List[Rule]: return [rule_cls() for rule_cls in get_physical_ruleset()] def get_plan_conversion_fns() -> List[Callable[[Plan], Plan]]: """Get the list of transformation functions to convert a logical plan to an optimized physical plan. This returns the 3 transformation steps: 1. Logical optimization 2. Planning (logical -> physical operators) 3. Physical optimization Returns: A list of transformation functions, each taking a Plan and returning a Plan. """ from ray.data._internal.planner import create_planner return [ LogicalOptimizer().optimize, # Logical optimization create_planner().plan, # Planning PhysicalOptimizer().optimize, # Physical optimization ] def get_execution_plan(logical_plan: LogicalPlan) -> PhysicalPlan: """Get the physical execution plan for the provided logical plan. This process has 3 steps: (1) logical optimization: optimize logical operators. (2) planning: convert logical to physical operators. (3) physical optimization: optimize physical operators. """ # 1. Get planning functions optimize_logical, plan, optimize_physical = get_plan_conversion_fns() # 2. Logical -> Logical (Optimized) optimized_logical_plan = optimize_logical(logical_plan) # 3. Rewire Logical -> Logical (Optimized) logical_plan._dag = optimized_logical_plan.dag # 4. Logical (Optimized) -> Physical physical_plan = plan(optimized_logical_plan) # 5. Physical (Optimized) -> Physical return optimize_physical(physical_plan)
PhysicalOptimizer
python
tensorflow__tensorflow
tensorflow/python/tpu/feature_column_test.py
{ "start": 6633, "end": 13349 }
class ____(test.TestCase): @test_util.deprecated_graph_mode_only def test_defaults(self): categorical_column_a = fc_lib.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc_lib.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_b, embedding_column_a = tpu_fc.shared_embedding_columns( [categorical_column_b, categorical_column_a], dimension=embedding_dimension) self.assertIs(categorical_column_a, embedding_column_a.categorical_column) self.assertIs(categorical_column_b, embedding_column_b.categorical_column) self.assertEqual(embedding_dimension, embedding_column_a.dimension) self.assertEqual(embedding_dimension, embedding_column_b.dimension) self.assertEqual('mean', embedding_column_a.combiner) self.assertEqual('mean', embedding_column_b.combiner) self.assertIsNotNone(embedding_column_a.initializer) self.assertIsNotNone(embedding_column_b.initializer) self.assertEqual('aaa_bbb_shared_embedding', embedding_column_a.shared_embedding_collection_name) self.assertEqual('aaa_bbb_shared_embedding', embedding_column_b.shared_embedding_collection_name) self.assertEqual('aaa_shared_embedding', embedding_column_a.name) self.assertEqual('bbb_shared_embedding', embedding_column_b.name) self.assertEqual('aaa_bbb_shared_embedding', embedding_column_a._var_scope_name) self.assertEqual('aaa_bbb_shared_embedding', embedding_column_b._var_scope_name) self.assertEqual((embedding_dimension,), embedding_column_a._variable_shape) self.assertEqual((embedding_dimension,), embedding_column_b._variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a._parse_example_spec) self.assertEqual({ 'bbb': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_b._parse_example_spec) @test_util.deprecated_graph_mode_only def test_all_constructor_args(self): categorical_column_a = fc_lib.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc_lib.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', shared_embedding_collection_name='var_scope_name') self.assertIs(categorical_column_a, embedding_column_a.categorical_column) self.assertIs(categorical_column_b, embedding_column_b.categorical_column) self.assertEqual(embedding_dimension, embedding_column_a.dimension) self.assertEqual(embedding_dimension, embedding_column_b.dimension) self.assertEqual('my_combiner', embedding_column_a.combiner) self.assertEqual('my_combiner', embedding_column_b.combiner) self.assertEqual('my_initializer', embedding_column_a.initializer()) self.assertEqual('my_initializer', embedding_column_b.initializer()) self.assertEqual('var_scope_name', embedding_column_a.shared_embedding_collection_name) self.assertEqual('var_scope_name', embedding_column_b.shared_embedding_collection_name) self.assertEqual('aaa_shared_embedding', embedding_column_a.name) self.assertEqual('bbb_shared_embedding', embedding_column_b.name) self.assertEqual('var_scope_name', embedding_column_a._var_scope_name) self.assertEqual('var_scope_name', embedding_column_b._var_scope_name) self.assertEqual((embedding_dimension,), embedding_column_a._variable_shape) self.assertEqual((embedding_dimension,), embedding_column_b._variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a._parse_example_spec) self.assertEqual({ 'bbb': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_b._parse_example_spec) @test_util.deprecated_graph_mode_only def test_get_dense_tensor(self): # Inputs. vocabulary_size = 3 # -1 values are ignored. input_a = np.array([ [2, -1, -1], # example 0, ids [2] [0, 1, -1] ]) # example 1, ids [0, 1] input_b = np.array([ [0, -1, -1], # example 0, ids [0] [-1, -1, -1] ]) # example 1, ids [] input_features = {'aaa': input_a, 'bbb': input_b} # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups_a = ( # example 0: (7., 11.), # ids [2], embedding = [7, 11] # example 1: (2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] ) expected_lookups_b = ( # example 0: (1., 2.), # ids [0], embedding = [1, 2] # example 1: (0., 0.), # ids [], embedding = [0, 0] ) # Build columns. categorical_column_a = fc_lib.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc_lib.categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. embedding_lookup_a = embedding_column_a._get_dense_tensor( fc._LazyBuilder(input_features)) embedding_lookup_b = embedding_column_b._get_dense_tensor( fc._LazyBuilder(input_features)) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) embedding_var = global_vars[0] with _initialized_session(): self.assertAllEqual(embedding_values, embedding_var) self.assertAllEqual(expected_lookups_a, embedding_lookup_a) self.assertAllEqual(expected_lookups_b, embedding_lookup_b) if __name__ == '__main__': test.main()
SharedEmbeddingColumnTest
python
Textualize__textual
tests/snapshot_tests/language_snippets.py
{ "start": 15671, "end": 19198 }
class ____ implements Shape { private double width; private double height; public Rectangle(double width, double height) { this.width = width; this.height = height; } @Override public double getArea() { return width * height; } } // Enums enum DaysOfWeek { MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY } public class Main { // Constants private static final double PI = 3.14159; // Methods public static int sum(int a, int b) { return a + b; } public static void main(String[] args) { // Variables String name = "John"; int age = 30; boolean isStudent = true; // Printing variables System.out.println("Hello, " + name + "! You are " + age + " years old."); // Conditional statements if (age >= 18 && isStudent) { System.out.println("You are an adult student."); } else if (age >= 18) { System.out.println("You are an adult."); } else { System.out.println("You are a minor."); } // Arrays int[] numbers = {1, 2, 3, 4, 5}; System.out.println("Numbers: " + Arrays.toString(numbers)); // Lists List<String> fruits = new ArrayList<>(); fruits.add("apple"); fruits.add("banana"); fruits.add("orange"); System.out.println("Fruits: " + fruits); // Loops for (int num : numbers) { System.out.println("Number: " + num); } // Hash maps Map<String, Integer> scores = new HashMap<>(); scores.put("Alice", 100); scores.put("Bob", 80); System.out.println("Alice's score: " + scores.get("Alice")); // Exception handling try { int result = 10 / 0; } catch (ArithmeticException e) { System.out.println("Error: " + e.getMessage()); } // Instantiating objects Rectangle rect = new Rectangle(10, 20); System.out.println("Rectangle area: " + rect.getArea()); // Enums DaysOfWeek today = DaysOfWeek.MONDAY; System.out.println("Today is " + today); // Calling methods int sum = sum(5, 10); System.out.println("Sum: " + sum); // Ternary operator String message = age >= 18 ? "You are an adult." : "You are a minor."; System.out.println(message); } } """ XML = """\ <?xml version="1.0" encoding="UTF-8"?> <!-- This is an example XML document --> <library> <book id="1" genre="fiction"> <title>The Great Gatsby</title> <author>F. Scott Fitzgerald</author> <published>1925</published> <description><![CDATA[This classic novel explores themes of wealth, society, and the American dream.]]></description> </book> <book id="2" genre="non-fiction"> <title>Sapiens: A Brief History of Humankind</title> <author>Yuval Noah Harari</author> <published>2011</published> <description><![CDATA[Explores the history and impact of Homo sapiens.]]></description> </book> <!-- Another book can be added here --> </library> """ SNIPPETS = { "python": PYTHON, "markdown": MARKDOWN, "yaml": YAML, "toml": TOML, "sql": SQL, "css": CSS, "html": HTML, "json": JSON, "regex": REGEX, "go": GO, "javascript": JAVASCRIPT, "bash": BASH, "rust": RUST, "java": JAVA, "xml": XML, }
Rectangle
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_iter.py
{ "start": 3511, "end": 3729 }
class ____: def __init__(self): self.i = 0 def __call__(self): i = self.i self.i = i + 1 if i > 100: raise IndexError # Emergency stop return i
CallableIterClass
python
coleifer__peewee
tests/sql.py
{ "start": 72000, "end": 74554 }
class ____(BaseTestCase): _data = [(1, 'one'), (2, 'two'), (3, 'three')] def test_values_list(self): vl = ValuesList(self._data) query = vl.select(SQL('*')) self.assertSQL(query, ( 'SELECT * FROM (VALUES (?, ?), (?, ?), (?, ?)) AS "t1"'), [1, 'one', 2, 'two', 3, 'three']) def test_values_list_named_columns(self): vl = ValuesList(self._data).columns('idx', 'name') query = (vl .select(vl.c.idx, vl.c.name) .order_by(vl.c.idx)) self.assertSQL(query, ( 'SELECT "t1"."idx", "t1"."name" ' 'FROM (VALUES (?, ?), (?, ?), (?, ?)) AS "t1"("idx", "name") ' 'ORDER BY "t1"."idx"'), [1, 'one', 2, 'two', 3, 'three']) def test_named_values_list(self): vl = ValuesList(self._data, ['idx', 'name']).alias('vl') query = (vl .select(vl.c.idx, vl.c.name) .order_by(vl.c.idx)) self.assertSQL(query, ( 'SELECT "vl"."idx", "vl"."name" ' 'FROM (VALUES (?, ?), (?, ?), (?, ?)) AS "vl"("idx", "name") ' 'ORDER BY "vl"."idx"'), [1, 'one', 2, 'two', 3, 'three']) def test_docs_examples(self): data = [(1, 'first'), (2, 'second')] vl = ValuesList(data, columns=('idx', 'name')) query = (vl .select(vl.c.idx, vl.c.name) .order_by(vl.c.idx)) self.assertSQL(query, ( 'SELECT "t1"."idx", "t1"."name" ' 'FROM (VALUES (?, ?), (?, ?)) AS "t1"("idx", "name") ' 'ORDER BY "t1"."idx"'), [1, 'first', 2, 'second']) vl = ValuesList([(1, 'first'), (2, 'second')]) vl = vl.columns('idx', 'name').alias('v') query = vl.select(vl.c.idx, vl.c.name) self.assertSQL(query, ( 'SELECT "v"."idx", "v"."name" ' 'FROM (VALUES (?, ?), (?, ?)) AS "v"("idx", "name")'), [1, 'first', 2, 'second']) def test_join_on_valueslist(self): vl = ValuesList([('huey',), ('zaizee',)], columns=['username']) query = (User .select(vl.c.username) .join(vl, on=(User.c.username == vl.c.username)) .order_by(vl.c.username.desc())) self.assertSQL(query, ( 'SELECT "t1"."username" FROM "users" AS "t2" ' 'INNER JOIN (VALUES (?), (?)) AS "t1"("username") ' 'ON ("t2"."username" = "t1"."username") ' 'ORDER BY "t1"."username" DESC'), ['huey', 'zaizee'])
TestValuesList
python
PrefectHQ__prefect
src/integrations/prefect-sqlalchemy/tests/test_database.py
{ "start": 2646, "end": 19488 }
class ____: async def test_connector_init(self): credentials_components = SqlAlchemyConnector( connection_info=ConnectionComponents( driver=SyncDriver.POSTGRESQL_PSYCOPG2, username="myusername", password="mypass", database="my.db", host="localhost", port=1234, ), ) connection_url = "postgresql+psycopg2://myusername:mypass@localhost:1234/my.db" credentials_url = SqlAlchemyConnector(connection_info=connection_url) assert credentials_components._rendered_url == credentials_url._rendered_url def test_connector_init_with_oracle_url(self): connection_url = "oracle+cx_oracle://myusername:mypass@localhost:1234/my.db" connector = SqlAlchemyConnector(connection_info=connection_url) assert isinstance(connector._rendered_url, URL) assert str(connector._rendered_url).startswith("oracle+cx_oracle://") def test_connector_init_fails_with_invalid_url(self): with pytest.raises(ValueError, match="Invalid URL"): SqlAlchemyConnector(connection_info="plskeepmydata") @pytest.mark.parametrize("method", ["fetch_all", "execute"]) def test_delay_start(self, caplog, method): with SqlAlchemyConnector( connection_info=ConnectionComponents( driver=SyncDriver.SQLITE_PYSQLITE, database=":memory:", ), ) as connector: assert connector._unique_results == {} assert isinstance(connector._exit_stack, ExitStack) connector.reset_connections() assert ( caplog.records[0].msg == "Reset opened connections and their results." ) assert connector._engine is None assert connector._unique_results == {} assert isinstance(connector._exit_stack, ExitStack) getattr(connector, method)("SELECT 1") assert isinstance(connector._engine, Engine) if method == "execute": assert connector._unique_results == {} else: assert len(connector._unique_results) == 1 assert isinstance(connector._exit_stack, ExitStack) @pytest.fixture(params=[SyncDriver.SQLITE_PYSQLITE, AsyncDriver.SQLITE_AIOSQLITE]) async def connector_with_data(self, tmp_path, request): credentials = SqlAlchemyConnector( connection_info=ConnectionComponents( driver=request.param, database=str(tmp_path / "test.db"), ), fetch_size=2, ) create_result = await credentials.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" ) insert_result = await credentials.execute( "INSERT INTO customers (name, address) VALUES (:name, :address);", parameters={"name": "Marvin", "address": "Highway 42"}, ) many_result = await credentials.execute_many( "INSERT INTO customers (name, address) VALUES (:name, :address);", seq_of_parameters=[ {"name": "Ford", "address": "Highway 42"}, {"name": "Unknown", "address": "Space"}, {"name": "Me", "address": "Myway 88"}, ], ) assert isinstance(many_result, CursorResult) assert isinstance(insert_result, CursorResult) assert isinstance(create_result, CursorResult) yield credentials @pytest.fixture(params=[True, False]) async def managed_connector_with_data(self, connector_with_data, request): if request.param: # managed if connector_with_data._driver_is_async: async with connector_with_data: yield connector_with_data else: with connector_with_data: yield connector_with_data # need to reset manually because # the function is sync_compatible, but the test is an async function # so calling the close method in this async context results in: # 'SqlAlchemyConnector.reset_connections' was never awaited # but normally it's run in a sync function, which properly closes connector_with_data._reset_cursor_results() else: yield connector_with_data if connector_with_data._driver_is_async: await connector_with_data.aclose() else: connector_with_data._reset_cursor_results() connector_with_data._exit_stack.close() connector_with_data._engine = None assert connector_with_data._unique_results == {} assert connector_with_data._engine is None @pytest.mark.parametrize("begin", [True, False]) async def test_get_connection(self, begin, managed_connector_with_data): connection = managed_connector_with_data.get_connection(begin=begin) if begin: engine_type = ( AsyncEngine if managed_connector_with_data._driver_is_async else Engine ) if SQLALCHEMY_VERSION.startswith("1."): assert isinstance(connection, engine_type._trans_ctx) elif managed_connector_with_data._driver_is_async: async with connection as conn: assert isinstance(conn, engine_type._connection_cls) else: with connection as conn: assert isinstance(conn, engine_type._connection_cls) else: engine_type = ( AsyncConnection if managed_connector_with_data._driver_is_async else Connection ) assert isinstance(connection, engine_type) @pytest.mark.parametrize("begin", [True, False]) async def test_get_client(self, begin, managed_connector_with_data): connection = managed_connector_with_data.get_client( client_type="connection", begin=begin ) if begin: engine_type = ( AsyncEngine if managed_connector_with_data._driver_is_async else Engine ) if SQLALCHEMY_VERSION.startswith("1."): assert isinstance(connection, engine_type._trans_ctx) elif managed_connector_with_data._driver_is_async: async with connection as conn: assert isinstance(conn, engine_type._connection_cls) else: with connection as conn: assert isinstance(conn, engine_type._connection_cls) else: engine_type = ( AsyncConnection if managed_connector_with_data._driver_is_async else Connection ) assert isinstance(connection, engine_type) async def test_reset_connections_sync_async_error( self, managed_connector_with_data ): with pytest.raises(RuntimeError, match="synchronous connections"): if managed_connector_with_data._driver_is_async: await managed_connector_with_data.reset_connections() else: await managed_connector_with_data.reset_async_connections() async def test_fetch_one(self, managed_connector_with_data): results = await managed_connector_with_data.fetch_one("SELECT * FROM customers") assert results == ("Marvin", "Highway 42") results = await managed_connector_with_data.fetch_one("SELECT * FROM customers") assert results == ("Ford", "Highway 42") # test with parameters results = await managed_connector_with_data.fetch_one( "SELECT * FROM customers WHERE address = :address", parameters={"address": "Myway 88"}, ) assert results == ("Me", "Myway 88") assert len(managed_connector_with_data._unique_results) == 2 # now reset so fetch starts at the first value again if managed_connector_with_data._driver_is_async: await managed_connector_with_data.reset_async_connections() else: await managed_connector_with_data.reset_connections() assert len(managed_connector_with_data._unique_results) == 0 # ensure it's really reset results = await managed_connector_with_data.fetch_one("SELECT * FROM customers") assert results == ("Marvin", "Highway 42") assert len(managed_connector_with_data._unique_results) == 1 @pytest.mark.parametrize("size", [None, 1, 2]) async def test_fetch_many(self, managed_connector_with_data, size): results = await managed_connector_with_data.fetch_many( "SELECT * FROM customers", size=size ) expected = [("Marvin", "Highway 42"), ("Ford", "Highway 42")][ : (size or managed_connector_with_data.fetch_size) ] assert results == expected # test with parameters results = await managed_connector_with_data.fetch_many( "SELECT * FROM customers WHERE address = :address", parameters={"address": "Myway 88"}, ) assert results == [("Me", "Myway 88")] assert len(managed_connector_with_data._unique_results) == 2 # now reset so fetch starts at the first value again if managed_connector_with_data._driver_is_async: await managed_connector_with_data.reset_async_connections() else: await managed_connector_with_data.reset_connections() assert len(managed_connector_with_data._unique_results) == 0 # ensure it's really reset results = await managed_connector_with_data.fetch_many( "SELECT * FROM customers", size=3 ) assert results == [ ("Marvin", "Highway 42"), ("Ford", "Highway 42"), ("Unknown", "Space"), ] assert len(managed_connector_with_data._unique_results) == 1 async def test_fetch_all(self, managed_connector_with_data): # test with parameters results = await managed_connector_with_data.fetch_all( "SELECT * FROM customers WHERE address = :address", parameters={"address": "Highway 42"}, ) expected = [("Marvin", "Highway 42"), ("Ford", "Highway 42")] assert results == expected # there should be no more results results = await managed_connector_with_data.fetch_all( "SELECT * FROM customers WHERE address = :address", parameters={"address": "Highway 42"}, ) assert results == [] assert len(managed_connector_with_data._unique_results) == 1 # now reset so fetch one starts at the first value again if managed_connector_with_data._driver_is_async: await managed_connector_with_data.reset_async_connections() else: await managed_connector_with_data.reset_connections() assert len(managed_connector_with_data._unique_results) == 0 # ensure it's really reset results = await managed_connector_with_data.fetch_all( "SELECT * FROM customers WHERE address = :address", parameters={"address": "Highway 42"}, ) expected = [("Marvin", "Highway 42"), ("Ford", "Highway 42")] assert results == expected assert len(managed_connector_with_data._unique_results) == 1 def test_close(self, managed_connector_with_data): if managed_connector_with_data._driver_is_async: with pytest.raises(RuntimeError, match="Please use the"): managed_connector_with_data.close() else: managed_connector_with_data.close() # test calling it twice async def test_aclose(self, managed_connector_with_data): if not managed_connector_with_data._driver_is_async: with pytest.raises(RuntimeError, match="Please use the"): await managed_connector_with_data.aclose() else: await managed_connector_with_data.aclose() # test calling it twice async def test_enter(self, managed_connector_with_data): if managed_connector_with_data._driver_is_async: with pytest.raises(RuntimeError, match="cannot be run"): with managed_connector_with_data: pass async def test_aenter(self, managed_connector_with_data): if not managed_connector_with_data._driver_is_async: with pytest.raises(RuntimeError, match="cannot be run"): async with managed_connector_with_data: pass def test_sync_sqlite_in_flow(self, tmp_path): @flow def a_flow(): with SqlAlchemyConnector( connection_info=ConnectionComponents( driver=SyncDriver.SQLITE_PYSQLITE, database=str(tmp_path / "test.db"), ) ) as conn: conn.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" # noqa ) conn.execute( "INSERT INTO customers (name, address) VALUES (:name, :address);", parameters={"name": "Marvin", "address": "Highway 42"}, ) conn.execute_many( "INSERT INTO customers (name, address) VALUES (:name, :address);", seq_of_parameters=[ {"name": "Ford", "address": "Highway 42"}, {"name": "Unknown", "address": "Space"}, {"name": "Me", "address": "Myway 88"}, ], ) return conn.fetch_one("SELECT * FROM customers") assert a_flow() == ("Marvin", "Highway 42") def test_sync_compatible_reset_connections(self, tmp_path): conn = SqlAlchemyConnector( connection_info=ConnectionComponents( driver=SyncDriver.SQLITE_PYSQLITE, database=str(tmp_path / "test.db"), ) ) conn.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" # noqa ) conn.execute( "INSERT INTO customers (name, address) VALUES (:name, :address);", parameters={"name": "Marvin", "address": "Highway 42"}, ) conn.fetch_one("SELECT * FROM customers") assert len(conn._unique_results) == 1 conn.reset_connections() assert len(conn._unique_results) == 0 def test_flow_without_initialized_engine(self, tmp_path): @task def setup_table(block_name: str) -> None: with SqlAlchemyConnector.load(block_name) as connector: connector.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" # noqa ) connector.execute( "INSERT INTO customers (name, address) VALUES (:name, :address);", parameters={"name": "Marvin", "address": "Highway 42"}, ) connector.execute_many( "INSERT INTO customers (name, address) VALUES (:name, :address);", seq_of_parameters=[ {"name": "Ford", "address": "Highway 42"}, {"name": "Unknown", "address": "Highway 42"}, ], ) @task def fetch_data(block_name: str) -> list: all_rows = [] with SqlAlchemyConnector.load(block_name) as connector: while True: # Repeated fetch* calls using the same operation will # skip re-executing and instead return the next set of results new_rows = connector.fetch_many("SELECT * FROM customers", size=2) if len(new_rows) == 0: break all_rows.append(new_rows) return all_rows @flow def sqlalchemy_flow(block_name: str) -> list: SqlAlchemyConnector( connection_info=ConnectionComponents( driver=SyncDriver.SQLITE_PYSQLITE, database=str(tmp_path / "test.db"), ) ).save(block_name) setup_table(block_name) all_rows = fetch_data(block_name) return all_rows assert sqlalchemy_flow("connector") == [ [("Marvin", "Highway 42"), ("Ford", "Highway 42")], [("Unknown", "Highway 42")], ]
TestSqlAlchemyConnector