language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | numba__numba | numba/core/ir.py | {
"start": 27540,
"end": 27925
} | class ____(Stmt):
"""
Assign to a variable.
"""
def __init__(self, value, target, loc):
assert isinstance(value, AbstractRHS)
assert isinstance(target, Var)
assert isinstance(loc, Loc)
self.value = value
self.target = target
self.loc = loc
def __str__(self):
return '%s = %s' % (self.target, self.value)
| Assign |
python | PyCQA__pylint | doc/data/messages/s/subclassed-final-class/bad.py | {
"start": 148,
"end": 260
} | class ____(PlatypusData): # [subclassed-final-class]
"""Playtipus with fluorescent fur."""
| FluorescentPlaytipus |
python | apache__airflow | providers/mysql/src/airflow/providers/mysql/hooks/mysql.py | {
"start": 2010,
"end": 16335
} | class ____(DbApiHook):
"""
Interact with MySQL.
You can specify charset in the extra field of your connection
as ``{"charset": "utf8"}``. Also you can choose cursor as
``{"cursor": "SSCursor"}``. Refer to the MySQLdb.cursors for more details.
Note: For AWS IAM authentication, use iam in the extra connection parameters
and set it to true. Leave the password field empty. This will use the
"aws_default" connection to get the temporary token unless you override
in extras.
extras example: ``{"iam":true, "aws_conn_id":"my_aws_conn"}``
You can also add "local_infile" parameter to determine whether local_infile feature of MySQL client is
going to be enabled (it is disabled by default).
:param schema: The MySQL database schema to connect to.
:param connection: The :ref:`MySQL connection id <howto/connection:mysql>` used for MySQL credentials.
:param local_infile: Boolean flag determining if local_infile should be used
:param init_command: Initial command to issue to MySQL server upon connection
"""
conn_name_attr = "mysql_conn_id"
default_conn_name = "mysql_default"
conn_type = "mysql"
hook_name = "MySQL"
supports_autocommit = True
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **{**kwargs, **{"escape_word_format": "`{}`"}})
self.schema = kwargs.pop("schema", None)
self.local_infile = kwargs.pop("local_infile", False)
self.init_command = kwargs.pop("init_command", None)
def set_autocommit(self, conn: MySQLConnectionTypes, autocommit: bool) -> None:
"""
Set *autocommit*.
*mysqlclient* uses an *autocommit* method rather than an *autocommit*
property, so we need to override this to support it.
:param conn: connection to set autocommit setting
:param autocommit: autocommit setting
"""
if hasattr(conn.__class__, "autocommit") and isinstance(conn.__class__.autocommit, property):
conn.autocommit = autocommit
else:
conn.autocommit(autocommit) # type: ignore[operator]
def get_autocommit(self, conn: MySQLConnectionTypes) -> bool:
"""
Whether *autocommit* is active.
*mysqlclient* uses an *get_autocommit* method rather than an *autocommit*
property, so we need to override this to support it.
:param conn: connection to get autocommit setting from.
:return: connection autocommit setting
"""
if hasattr(conn.__class__, "autocommit") and isinstance(conn.__class__.autocommit, property):
return conn.autocommit
return conn.get_autocommit() # type: ignore[union-attr]
def _get_conn_config_mysql_client(self, conn: Connection) -> dict:
conn_config = {
"user": conn.login,
"passwd": conn.password or "",
"host": conn.host or "localhost",
"db": self.schema or conn.schema or "",
}
# check for authentication via AWS IAM
if conn.extra_dejson.get("iam", False):
conn_config["passwd"], conn.port = self.get_iam_token(conn)
conn_config["read_default_group"] = "enable-cleartext-plugin"
conn_config["port"] = int(conn.port) if conn.port else 3306
if conn.extra_dejson.get("charset", False):
conn_config["charset"] = conn.extra_dejson["charset"]
if str(conn_config.get("charset", "undef")).lower() in ("utf8", "utf-8"):
conn_config["use_unicode"] = True
if conn.extra_dejson.get("cursor", False):
try:
import MySQLdb.cursors
except ImportError:
raise RuntimeError(
"You do not have `mysqlclient` package installed. "
"Please install it with `pip install mysqlclient` and make sure you have system "
"mysql libraries installed, as well as well as `pkg-config` system package "
"installed in case you see compilation error during installation."
)
cursor_type = conn.extra_dejson.get("cursor", "").lower()
# Dictionary mapping cursor types to their respective classes
cursor_classes = {
"sscursor": MySQLdb.cursors.SSCursor,
"dictcursor": MySQLdb.cursors.DictCursor,
"ssdictcursor": MySQLdb.cursors.SSDictCursor,
}
# Set the cursor class in the connection configuration based on the cursor type
if cursor_type in cursor_classes:
conn_config["cursorclass"] = cursor_classes[cursor_type]
if conn.extra_dejson.get("ssl", False):
# SSL parameter for MySQL has to be a dictionary and in case
# of extra/dejson we can get string if extra is passed via
# URL parameters
dejson_ssl = conn.extra_dejson["ssl"]
if isinstance(dejson_ssl, str):
dejson_ssl = json.loads(dejson_ssl)
conn_config["ssl"] = dejson_ssl
if conn.extra_dejson.get("ssl_mode", False):
conn_config["ssl_mode"] = conn.extra_dejson["ssl_mode"]
if conn.extra_dejson.get("unix_socket"):
conn_config["unix_socket"] = conn.extra_dejson["unix_socket"]
if self.local_infile:
conn_config["local_infile"] = 1
if self.init_command:
conn_config["init_command"] = self.init_command
return conn_config
def _get_conn_config_mysql_connector_python(self, conn: Connection) -> dict:
conn_config = {
"user": conn.login,
"password": conn.password or "",
"host": conn.host or "localhost",
"database": self.schema or conn.schema or "",
"port": int(conn.port) if conn.port else 3306,
}
if self.local_infile:
conn_config["allow_local_infile"] = True
if self.init_command:
conn_config["init_command"] = self.init_command
# Ref: https://dev.mysql.com/doc/connector-python/en/connector-python-connectargs.html
for key, value in conn.extra_dejson.items():
if key.startswith("ssl_"):
conn_config[key] = value
return conn_config
def get_conn(self) -> MySQLConnectionTypes:
"""
Get connection to a MySQL database.
Establishes a connection to a mysql database
by extracting the connection configuration from the Airflow connection.
.. note:: By default it connects to the database via the mysqlclient library.
But you can also choose the mysql-connector-python library which lets you connect through ssl
without any further ssl parameters required.
:return: a mysql connection object
"""
conn = self.connection or self.get_connection(self.get_conn_id())
client_name = conn.extra_dejson.get("client", "mysqlclient")
if client_name == "mysqlclient":
try:
import MySQLdb
except ImportError:
raise RuntimeError(
"You do not have `mysqlclient` package installed. "
"Please install it with `pip install mysqlclient` and make sure you have system "
"mysql libraries installed, as well as well as `pkg-config` system package "
"installed in case you see compilation error during installation."
)
conn_config = self._get_conn_config_mysql_client(conn)
return MySQLdb.connect(**conn_config)
if client_name == "mysql-connector-python":
try:
import mysql.connector
except ModuleNotFoundError:
raise AirflowOptionalProviderFeatureException(
"The pip package 'mysql-connector-python' is not installed, therefore the connection "
"wasn't established. Please, consider using default driver or pip install the package "
"'mysql-connector-python'. Warning! It might cause dependency conflicts."
)
conn_config = self._get_conn_config_mysql_connector_python(conn)
return mysql.connector.connect(**conn_config)
raise ValueError("Unknown MySQL client name provided!")
def bulk_load(self, table: str, tmp_file: str) -> None:
"""Load a tab-delimited file into a database table."""
import re
conn = self.get_conn()
cur = conn.cursor()
if not re.fullmatch(r"^[a-zA-Z0-9_.]+$", table):
raise ValueError(f"Invalid table name: {table}")
cur.execute(
f"LOAD DATA LOCAL INFILE %s INTO TABLE `{table}`",
(tmp_file,),
)
conn.commit()
conn.close()
def bulk_dump(self, table: str, tmp_file: str) -> None:
"""Dump a database table into a tab-delimited file."""
import re
conn = self.get_conn()
cur = conn.cursor()
if not re.fullmatch(r"^[a-zA-Z0-9_.]+$", table):
raise ValueError(f"Invalid table name: {table}")
cur.execute(
f"SELECT * INTO OUTFILE %s FROM `{table}`",
(tmp_file,),
)
conn.commit()
conn.close()
@staticmethod
def _serialize_cell(cell: object, conn: Connection | None = None) -> Any:
"""
Convert argument to a literal.
The package MySQLdb converts an argument to a literal
when passing those separately to execute. Hence, this method does nothing.
:param cell: The cell to insert into the table
:param conn: The database connection
:return: The same cell
"""
return cell
def get_iam_token(self, conn: Connection) -> tuple[str, int]:
"""
Retrieve a temporary password to connect to MySQL.
Uses AWSHook to retrieve a temporary password to connect to MySQL
Port is required. If none is provided, default 3306 is used
"""
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
aws_conn_id = conn.extra_dejson.get("aws_conn_id", "aws_default")
aws_hook = AwsBaseHook(aws_conn_id, client_type="rds")
if conn.port is None:
port = 3306
else:
port = conn.port
client = aws_hook.get_conn()
token = client.generate_db_auth_token(conn.host, port, conn.login)
return token, port
def bulk_load_custom(
self, table: str, tmp_file: str, duplicate_key_handling: str = "IGNORE", extra_options: str = ""
) -> None:
"""
Load local data from a file into the database in a more configurable way.
.. warning:: According to the mysql docs using this function is a
`security risk <https://dev.mysql.com/doc/refman/8.0/en/load-data-local.html>`_.
If you want to use it anyway you can do so by setting a client-side + server-side option.
This depends on the mysql client library used.
:param table: The table were the file will be loaded into.
:param tmp_file: The file (name) that contains the data.
:param duplicate_key_handling: Specify what should happen to duplicate data.
You can choose either `IGNORE` or `REPLACE`.
.. seealso::
https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-duplicate-key-handling
:param extra_options: More sql options to specify exactly how to load the data.
.. seealso:: https://dev.mysql.com/doc/refman/8.0/en/load-data.html
"""
conn = self.get_conn()
cursor = conn.cursor()
cursor.execute(
f"LOAD DATA LOCAL INFILE %s %s INTO TABLE `{table}` %s",
(tmp_file, duplicate_key_handling, extra_options),
)
cursor.close()
conn.commit()
conn.close()
def get_openlineage_database_info(self, connection):
"""Return MySQL specific information for OpenLineage."""
from airflow.providers.openlineage.sqlparser import DatabaseInfo
return DatabaseInfo(
scheme=self.get_openlineage_database_dialect(connection),
authority=DbApiHook.get_openlineage_authority_part(connection, default_port=3306),
information_schema_columns=[
"table_schema",
"table_name",
"column_name",
"ordinal_position",
"column_type",
],
normalize_name_method=lambda name: name.upper(),
)
def get_openlineage_database_dialect(self, _):
"""Return database dialect."""
return "mysql"
def get_openlineage_default_schema(self):
"""MySQL has no concept of schema."""
return None
def get_uri(self) -> str:
"""Get URI for MySQL connection."""
conn = self.connection or self.get_connection(self.get_conn_id())
conn_schema = self.schema or conn.schema or ""
client_name = conn.extra_dejson.get("client", "mysqlclient")
# Determine URI prefix based on client
if client_name == "mysql-connector-python":
uri_prefix = "mysql+mysqlconnector://"
elif client_name == "pymysql":
uri_prefix = "mysql+pymysql://"
else: # default: mysqlclient
uri_prefix = "mysql://"
auth_part = ""
if conn.login:
auth_part = quote_plus(conn.login)
if conn.password:
auth_part = f"{auth_part}:{quote_plus(conn.password)}"
auth_part = f"{auth_part}@"
host_part = conn.host or "localhost"
if conn.port:
host_part = f"{host_part}:{conn.port}"
schema_part = f"/{quote_plus(conn_schema)}" if conn_schema else ""
uri = f"{uri_prefix}{auth_part}{host_part}{schema_part}"
# Add extra connection parameters
extra = conn.extra_dejson.copy()
if "client" in extra:
extra.pop("client")
query_params = {k: str(v) for k, v in extra.items() if v}
if query_params:
uri = f"{uri}?{urlencode(query_params)}"
return uri
| MySqlHook |
python | mwaskom__seaborn | tests/_core/test_moves.py | {
"start": 8432,
"end": 9723
} | class ____(MoveFixtures):
def test_basic(self, toy_df):
groupby = GroupBy(["color", "group"])
res = Stack()(toy_df, groupby, "x", {})
assert_array_equal(res["x"], [0, 0, 1])
assert_array_equal(res["y"], [1, 3, 3])
assert_array_equal(res["baseline"], [0, 1, 0])
def test_faceted(self, toy_df_facets):
groupby = GroupBy(["color", "group"])
res = Stack()(toy_df_facets, groupby, "x", {})
assert_array_equal(res["x"], [0, 0, 1, 0, 1, 2])
assert_array_equal(res["y"], [1, 3, 3, 1, 2, 3])
assert_array_equal(res["baseline"], [0, 1, 0, 0, 0, 0])
def test_misssing_data(self, toy_df):
df = pd.DataFrame({
"x": [0, 0, 0],
"y": [2, np.nan, 1],
"baseline": [0, 0, 0],
})
res = Stack()(df, None, "x", {})
assert_array_equal(res["y"], [2, np.nan, 3])
assert_array_equal(res["baseline"], [0, np.nan, 2])
def test_baseline_homogeneity_check(self, toy_df):
toy_df["baseline"] = [0, 1, 2]
groupby = GroupBy(["color", "group"])
move = Stack()
err = "Stack move cannot be used when baselines"
with pytest.raises(RuntimeError, match=err):
move(toy_df, groupby, "x", {})
| TestStack |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 20015,
"end": 20183
} | class ____(AtomicRule):
a: Expr
d: Expr
def eval(self) -> Expr:
return elliptic_f(self.variable, self.d/self.a)/sqrt(self.a)
@dataclass
| EllipticFRule |
python | pandas-dev__pandas | pandas/tests/reshape/merge/test_join.py | {
"start": 624,
"end": 40938
} | class ____:
# aggregate multiple columns
@pytest.fixture
def df(self):
df = DataFrame(
{
"key1": get_test_data(),
"key2": get_test_data(),
"data1": np.random.default_rng(2).standard_normal(50),
"data2": np.random.default_rng(2).standard_normal(50),
}
)
# exclude a couple keys for fun
df = df[df["key2"] > 1]
return df
@pytest.fixture
def df2(self):
return DataFrame(
{
"key1": get_test_data(n=10),
"key2": get_test_data(ngroups=4, n=10),
"value": np.random.default_rng(2).standard_normal(10),
}
)
@pytest.fixture
def target_source(self):
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
target = DataFrame(data, index=Index(["a", "b", "c", "d", "e"], dtype=object))
# Join on string value
source = DataFrame(
{"MergedA": data["A"], "MergedD": data["D"]}, index=data["C"]
)
return target, source
def test_left_outer_join(self, df, df2):
joined_key2 = merge(df, df2, on="key2")
_check_join(df, df2, joined_key2, ["key2"], how="left")
joined_both = merge(df, df2)
_check_join(df, df2, joined_both, ["key1", "key2"], how="left")
def test_right_outer_join(self, df, df2):
joined_key2 = merge(df, df2, on="key2", how="right")
_check_join(df, df2, joined_key2, ["key2"], how="right")
joined_both = merge(df, df2, how="right")
_check_join(df, df2, joined_both, ["key1", "key2"], how="right")
def test_full_outer_join(self, df, df2):
joined_key2 = merge(df, df2, on="key2", how="outer")
_check_join(df, df2, joined_key2, ["key2"], how="outer")
joined_both = merge(df, df2, how="outer")
_check_join(df, df2, joined_both, ["key1", "key2"], how="outer")
def test_inner_join(self, df, df2):
joined_key2 = merge(df, df2, on="key2", how="inner")
_check_join(df, df2, joined_key2, ["key2"], how="inner")
joined_both = merge(df, df2, how="inner")
_check_join(df, df2, joined_both, ["key1", "key2"], how="inner")
def test_handle_overlap(self, df, df2):
joined = merge(df, df2, on="key2", suffixes=(".foo", ".bar"))
assert "key1.foo" in joined
assert "key1.bar" in joined
def test_handle_overlap_arbitrary_key(self, df, df2):
joined = merge(
df,
df2,
left_on="key2",
right_on="key1",
suffixes=(".foo", ".bar"),
)
assert "key1.foo" in joined
assert "key2.bar" in joined
@pytest.mark.parametrize(
"infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
)
def test_join_on(self, target_source, infer_string):
target, source = target_source
merged = target.join(source, on="C")
tm.assert_series_equal(merged["MergedA"], target["A"], check_names=False)
tm.assert_series_equal(merged["MergedD"], target["D"], check_names=False)
# join with duplicates (fix regression from DataFrame/Matrix merge)
df = DataFrame({"key": ["a", "a", "b", "b", "c"]})
df2 = DataFrame({"value": [0, 1, 2]}, index=["a", "b", "c"])
joined = df.join(df2, on="key")
expected = DataFrame(
{"key": ["a", "a", "b", "b", "c"], "value": [0, 0, 1, 1, 2]}
)
tm.assert_frame_equal(joined, expected)
# Test when some are missing
df_a = DataFrame([[1], [2], [3]], index=["a", "b", "c"], columns=["one"])
df_b = DataFrame([["foo"], ["bar"]], index=[1, 2], columns=["two"])
df_c = DataFrame([[1], [2]], index=[1, 2], columns=["three"])
joined = df_a.join(df_b, on="one")
joined = joined.join(df_c, on="one")
assert np.isnan(joined["two"]["c"])
assert np.isnan(joined["three"]["c"])
# merge column not p resent
with pytest.raises(KeyError, match="^'E'$"):
target.join(source, on="E")
# overlap
msg = (
"You are trying to merge on float64 and object|str columns for key "
"'A'. If you wish to proceed you should use pd.concat"
)
with pytest.raises(ValueError, match=msg):
target.join(source, on="A")
def test_join_on_fails_with_different_right_index(self):
df = DataFrame(
{
"a": np.random.default_rng(2).choice(["m", "f"], size=3),
"b": np.random.default_rng(2).standard_normal(3),
}
)
df2 = DataFrame(
{
"a": np.random.default_rng(2).choice(["m", "f"], size=10),
"b": np.random.default_rng(2).standard_normal(10),
},
index=MultiIndex.from_product([range(5), ["A", "B"]]),
)
msg = r'len\(left_on\) must equal the number of levels in the index of "right"'
with pytest.raises(ValueError, match=msg):
merge(df, df2, left_on="a", right_index=True)
def test_join_on_fails_with_different_left_index(self):
df = DataFrame(
{
"a": np.random.default_rng(2).choice(["m", "f"], size=3),
"b": np.random.default_rng(2).standard_normal(3),
},
index=MultiIndex.from_arrays([range(3), list("abc")]),
)
df2 = DataFrame(
{
"a": np.random.default_rng(2).choice(["m", "f"], size=10),
"b": np.random.default_rng(2).standard_normal(10),
}
)
msg = r'len\(right_on\) must equal the number of levels in the index of "left"'
with pytest.raises(ValueError, match=msg):
merge(df, df2, right_on="b", left_index=True)
def test_join_on_fails_with_different_column_counts(self):
df = DataFrame(
{
"a": np.random.default_rng(2).choice(["m", "f"], size=3),
"b": np.random.default_rng(2).standard_normal(3),
}
)
df2 = DataFrame(
{
"a": np.random.default_rng(2).choice(["m", "f"], size=10),
"b": np.random.default_rng(2).standard_normal(10),
},
index=MultiIndex.from_product([range(5), ["A", "B"]]),
)
msg = r"len\(right_on\) must equal len\(left_on\)"
with pytest.raises(ValueError, match=msg):
merge(df, df2, right_on="a", left_on=["a", "b"])
@pytest.mark.parametrize("wrong_type", [2, "str", None, np.array([0, 1])])
def test_join_on_fails_with_wrong_object_type(self, wrong_type):
# GH12081 - original issue
# GH21220 - merging of Series and DataFrame is now allowed
# Edited test to remove the Series object from test parameters
df = DataFrame({"a": [1, 1]})
msg = (
"Can only merge Series or DataFrame objects, "
f"a {type(wrong_type)} was passed"
)
with pytest.raises(TypeError, match=msg):
merge(wrong_type, df, left_on="a", right_on="a")
with pytest.raises(TypeError, match=msg):
merge(df, wrong_type, left_on="a", right_on="a")
def test_join_on_pass_vector(self, target_source):
target, source = target_source
expected = target.join(source, on="C")
expected = expected.rename(columns={"C": "key_0"})
expected = expected[["key_0", "A", "B", "D", "MergedA", "MergedD"]]
join_col = target.pop("C")
result = target.join(source, on=join_col)
tm.assert_frame_equal(result, expected)
def test_join_with_len0(self, target_source):
# nothing to merge
target, source = target_source
merged = target.join(source.reindex([]), on="C")
for col in source:
assert col in merged
assert merged[col].isna().all()
merged2 = target.join(source.reindex([]), on="C", how="inner")
tm.assert_index_equal(merged2.columns, merged.columns)
assert len(merged2) == 0
def test_join_on_inner(self):
df = DataFrame({"key": ["a", "a", "d", "b", "b", "c"]})
df2 = DataFrame({"value": [0, 1]}, index=["a", "b"])
joined = df.join(df2, on="key", how="inner")
expected = df.join(df2, on="key")
expected = expected[expected["value"].notna()]
tm.assert_series_equal(joined["key"], expected["key"])
tm.assert_series_equal(joined["value"], expected["value"], check_dtype=False)
tm.assert_index_equal(joined.index, expected.index)
def test_join_on_singlekey_list(self):
df = DataFrame({"key": ["a", "a", "b", "b", "c"]})
df2 = DataFrame({"value": [0, 1, 2]}, index=["a", "b", "c"])
# corner cases
joined = df.join(df2, on=["key"])
expected = df.join(df2, on="key")
tm.assert_frame_equal(joined, expected)
def test_join_on_series(self, target_source):
target, source = target_source
result = target.join(source["MergedA"], on="C")
expected = target.join(source[["MergedA"]], on="C")
tm.assert_frame_equal(result, expected)
def test_join_on_series_buglet(self):
# GH #638
df = DataFrame({"a": [1, 1]})
ds = Series([2], index=[1], name="b")
result = df.join(ds, on="a")
expected = DataFrame({"a": [1, 1], "b": [2, 2]}, index=df.index)
tm.assert_frame_equal(result, expected)
def test_join_index_mixed(self, join_type):
# no overlapping blocks
df1 = DataFrame(index=np.arange(10))
df1["bool"] = True
df1["string"] = "foo"
df2 = DataFrame(index=np.arange(5, 15))
df2["int"] = 1
df2["float"] = 1.0
joined = df1.join(df2, how=join_type)
expected = _join_by_hand(df1, df2, how=join_type)
tm.assert_frame_equal(joined, expected)
joined = df2.join(df1, how=join_type)
expected = _join_by_hand(df2, df1, how=join_type)
tm.assert_frame_equal(joined, expected)
def test_join_index_mixed_overlap(self):
df1 = DataFrame(
{"A": 1.0, "B": 2, "C": "foo", "D": True},
index=np.arange(10),
columns=["A", "B", "C", "D"],
)
assert df1["B"].dtype == np.int64
assert df1["D"].dtype == np.bool_
df2 = DataFrame(
{"A": 1.0, "B": 2, "C": "foo", "D": True},
index=np.arange(0, 10, 2),
columns=["A", "B", "C", "D"],
)
# overlap
joined = df1.join(df2, lsuffix="_one", rsuffix="_two")
expected_columns = [
"A_one",
"B_one",
"C_one",
"D_one",
"A_two",
"B_two",
"C_two",
"D_two",
]
df1.columns = expected_columns[:4]
df2.columns = expected_columns[4:]
expected = _join_by_hand(df1, df2)
tm.assert_frame_equal(joined, expected)
def test_join_empty_bug(self):
# generated an exception in 0.4.3
x = DataFrame()
x.join(DataFrame([3], index=[0], columns=["A"]), how="outer")
def test_join_unconsolidated(self):
# GH #331
a = DataFrame(
np.random.default_rng(2).standard_normal((30, 2)), columns=["a", "b"]
)
c = Series(np.random.default_rng(2).standard_normal(30))
a["c"] = c
d = DataFrame(np.random.default_rng(2).standard_normal((30, 1)), columns=["q"])
# it works!
a.join(d)
d.join(a)
def test_join_multiindex(self):
index1 = MultiIndex.from_arrays(
[["a", "a", "a", "b", "b", "b"], [1, 2, 3, 1, 2, 3]],
names=["first", "second"],
)
index2 = MultiIndex.from_arrays(
[["b", "b", "b", "c", "c", "c"], [1, 2, 3, 1, 2, 3]],
names=["first", "second"],
)
df1 = DataFrame(
data=np.random.default_rng(2).standard_normal(6),
index=index1,
columns=["var X"],
)
df2 = DataFrame(
data=np.random.default_rng(2).standard_normal(6),
index=index2,
columns=["var Y"],
)
df1 = df1.sort_index(level=0)
df2 = df2.sort_index(level=0)
joined = df1.join(df2, how="outer")
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
tm.assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
df1 = df1.sort_index(level=1)
df2 = df2.sort_index(level=1)
joined = df1.join(df2, how="outer").sort_index(level=0)
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
tm.assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
def test_join_inner_multiindex(self, lexsorted_two_level_string_multiindex):
key1 = ["bar", "bar", "bar", "foo", "foo", "baz", "baz", "qux", "qux", "snap"]
key2 = [
"two",
"one",
"three",
"one",
"two",
"one",
"two",
"two",
"three",
"one",
]
data = np.random.default_rng(2).standard_normal(len(key1))
data = DataFrame({"key1": key1, "key2": key2, "data": data})
index = lexsorted_two_level_string_multiindex
to_join = DataFrame(
np.random.default_rng(2).standard_normal((10, 3)),
index=index,
columns=["j_one", "j_two", "j_three"],
)
joined = data.join(to_join, on=["key1", "key2"], how="inner")
expected = merge(
data,
to_join.reset_index(),
left_on=["key1", "key2"],
right_on=["first", "second"],
how="inner",
sort=False,
)
expected2 = merge(
to_join,
data,
right_on=["key1", "key2"],
left_index=True,
how="inner",
sort=False,
)
tm.assert_frame_equal(joined, expected2.reindex_like(joined))
expected2 = merge(
to_join,
data,
right_on=["key1", "key2"],
left_index=True,
how="inner",
sort=False,
)
expected = expected.drop(["first", "second"], axis=1)
expected.index = joined.index
assert joined.index.is_monotonic_increasing
tm.assert_frame_equal(joined, expected)
# _assert_same_contents(expected, expected2.loc[:, expected.columns])
def test_join_hierarchical_mixed_raises(self):
# GH 2024
# GH 40993: For raising, enforced in 2.0
df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "c"])
new_df = df.groupby(["a"]).agg({"b": ["mean", "sum"]})
other_df = DataFrame([(1, 2, 3), (7, 10, 6)], columns=["a", "b", "d"])
other_df.set_index("a", inplace=True)
# GH 9455, 12219
with pytest.raises(
pd.errors.MergeError, match="Not allowed to merge between different levels"
):
merge(new_df, other_df, left_index=True, right_index=True)
def test_join_float64_float32(self):
a = DataFrame(
np.random.default_rng(2).standard_normal((10, 2)),
columns=["a", "b"],
dtype=np.float64,
)
b = DataFrame(
np.random.default_rng(2).standard_normal((10, 1)),
columns=["c"],
dtype=np.float32,
)
joined = a.join(b)
assert joined.dtypes["a"] == "float64"
assert joined.dtypes["b"] == "float64"
assert joined.dtypes["c"] == "float32"
a = np.random.default_rng(2).integers(0, 5, 100).astype("int64")
b = np.random.default_rng(2).random(100).astype("float64")
c = np.random.default_rng(2).random(100).astype("float32")
df = DataFrame({"a": a, "b": b, "c": c})
xpdf = DataFrame({"a": a, "b": b, "c": c})
s = DataFrame(
np.random.default_rng(2).random(5).astype("float32"), columns=["md"]
)
rs = df.merge(s, left_on="a", right_index=True)
assert rs.dtypes["a"] == "int64"
assert rs.dtypes["b"] == "float64"
assert rs.dtypes["c"] == "float32"
assert rs.dtypes["md"] == "float32"
xp = xpdf.merge(s, left_on="a", right_index=True)
tm.assert_frame_equal(rs, xp)
def test_join_many_non_unique_index(self):
df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]})
df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]})
df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how="outer")
df_partially_merged = merge(df1, df2, on=["a", "b"], how="outer")
expected = merge(df_partially_merged, df3, on=["a", "b"], how="outer")
result = result.reset_index()
expected = expected[result.columns]
expected["a"] = expected.a.astype("int64")
expected["b"] = expected.b.astype("int64")
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]})
df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]})
df3 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how="inner")
df_partially_merged = merge(df1, df2, on=["a", "b"], how="inner")
expected = merge(df_partially_merged, df3, on=["a", "b"], how="inner")
result = result.reset_index()
tm.assert_frame_equal(result, expected.loc[:, result.columns])
# GH 11519
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": np.random.default_rng(2).standard_normal(8),
"D": np.random.default_rng(2).standard_normal(8),
}
)
s = Series(
np.repeat(np.arange(8), 2), index=np.repeat(np.arange(8), 2), name="TEST"
)
inner = df.join(s, how="inner")
outer = df.join(s, how="outer")
left = df.join(s, how="left")
right = df.join(s, how="right")
tm.assert_frame_equal(inner, outer)
tm.assert_frame_equal(inner, left)
tm.assert_frame_equal(inner, right)
@pytest.mark.parametrize(
"infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
)
def test_join_sort(self, infer_string):
with option_context("future.infer_string", infer_string):
left = DataFrame(
{"key": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]}
)
right = DataFrame({"value2": ["a", "b", "c"]}, index=["bar", "baz", "foo"])
joined = left.join(right, on="key", sort=True)
expected = DataFrame(
{
"key": ["bar", "baz", "foo", "foo"],
"value": [2, 3, 1, 4],
"value2": ["a", "b", "c", "c"],
},
index=[1, 2, 0, 3],
)
tm.assert_frame_equal(joined, expected)
# smoke test
joined = left.join(right, on="key", sort=False)
tm.assert_index_equal(joined.index, Index(range(4)), exact=True)
def test_join_mixed_non_unique_index(self):
# GH 12814, unorderable types in py3 with a non-unique index
df1 = DataFrame({"a": [1, 2, 3, 4]}, index=[1, 2, 3, "a"])
df2 = DataFrame({"b": [5, 6, 7, 8]}, index=[1, 3, 3, 4])
result = df1.join(df2)
expected = DataFrame(
{"a": [1, 2, 3, 3, 4], "b": [5, np.nan, 6, 7, np.nan]},
index=[1, 2, 3, 3, "a"],
)
tm.assert_frame_equal(result, expected)
df3 = DataFrame({"a": [1, 2, 3, 4]}, index=[1, 2, 2, "a"])
df4 = DataFrame({"b": [5, 6, 7, 8]}, index=[1, 2, 3, 4])
result = df3.join(df4)
expected = DataFrame(
{"a": [1, 2, 3, 4], "b": [5, 6, 6, np.nan]}, index=[1, 2, 2, "a"]
)
tm.assert_frame_equal(result, expected)
def test_join_non_unique_period_index(self):
# GH #16871
index = pd.period_range("2016-01-01", periods=16, freq="M")
df = DataFrame(list(range(len(index))), index=index, columns=["pnum"])
df2 = concat([df, df])
result = df.join(df2, how="inner", rsuffix="_df2")
expected = DataFrame(
np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2),
columns=["pnum", "pnum_df2"],
index=df2.sort_index().index,
)
tm.assert_frame_equal(result, expected)
def test_mixed_type_join_with_suffix(self, using_infer_string):
# GH #916
df = DataFrame(
np.random.default_rng(2).standard_normal((20, 6)),
columns=["a", "b", "c", "d", "e", "f"],
)
df.insert(0, "id", 0)
df.insert(5, "dt", "foo")
grouped = df.groupby("id")
msg = re.escape("agg function failed [how->mean,dtype->")
if using_infer_string:
msg = "dtype 'str' does not support operation 'mean'"
with pytest.raises(TypeError, match=msg):
grouped.mean()
mn = grouped.mean(numeric_only=True)
cn = grouped.count()
# it works!
mn.join(cn, rsuffix="_right")
def test_join_many(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 6)), columns=list("abcdef")
)
df_list = [df[["a", "b"]], df[["c", "d"]], df[["e", "f"]]]
joined = df_list[0].join(df_list[1:])
tm.assert_frame_equal(joined, df)
df_list = [df[["a", "b"]][:-2], df[["c", "d"]][2:], df[["e", "f"]][1:9]]
def _check_diff_index(df_list, result, exp_index):
reindexed = [x.reindex(exp_index) for x in df_list]
expected = reindexed[0].join(reindexed[1:])
tm.assert_frame_equal(result, expected)
# different join types
joined = df_list[0].join(df_list[1:], how="outer")
_check_diff_index(df_list, joined, df.index)
joined = df_list[0].join(df_list[1:])
_check_diff_index(df_list, joined, df_list[0].index)
joined = df_list[0].join(df_list[1:], how="inner")
_check_diff_index(df_list, joined, df.index[2:8])
msg = "Joining multiple DataFrames only supported for joining on index"
with pytest.raises(ValueError, match=msg):
df_list[0].join(df_list[1:], on="a")
@pytest.mark.parametrize("how", ["left", "right", "inner", "outer"])
def test_join_many_sort_unique(self, how, sort):
# https://github.com/pandas-dev/pandas/pull/62954
df = DataFrame({"a": [1, 2, 3]}, index=[1, 0, 2])
df2 = DataFrame({"b": [4, 5, 6]}, index=[2, 0, 1])
if how == "right":
expected = DataFrame({"a": [3, 2, 1], "b": [4, 5, 6]}, index=[2, 0, 1])
else:
expected = DataFrame({"a": [1, 2, 3], "b": [6, 5, 4]}, index=[1, 0, 2])
if how == "outer" or sort:
# outer always sorts.
expected = expected.sort_index()
result = df.join([df2], how=how, sort=sort)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("how", ["left", "right", "inner", "outer"])
def test_join_many_sort_nonunique(self, how, sort):
# https://github.com/pandas-dev/pandas/pull/62954
df = DataFrame({"a": [1, 2, 3]}, index=[3, 0, 0])
df2 = DataFrame({"b": [4, 5, 6]}, index=[2, 0, 1])
if how == "inner":
expected = DataFrame({"a": [2, 3], "b": [5, 5]}, index=[0, 0])
elif how == "left":
expected = DataFrame(
{"a": [1, 2, 3], "b": [np.nan, 5.0, 5.0]}, index=[3, 0, 0]
)
elif how == "right":
expected = DataFrame(
{"a": [np.nan, 2.0, 3.0, np.nan], "b": [4, 5, 5, 6]}, index=[2, 0, 0, 1]
)
else:
expected = DataFrame(
{
"a": [2.0, 3.0, np.nan, np.nan, 1.0],
"b": [5.0, 5.0, 6.0, 4.0, np.nan],
},
index=[0, 0, 1, 2, 3],
)
if sort:
expected = expected.sort_index()
result = df.join([df2], how=how, sort=sort)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("how", ["inner", "outer", "left", "right"])
def test_join_many_datetime_unsorted(self, how):
# https://github.com/pandas-dev/pandas/pull/62843
index = Index([datetime(2024, 1, 2), datetime(2024, 1, 1)])
df = DataFrame({"a": [1, 2]}, index=index)
df2 = DataFrame({"b": [1, 2]}, index=index)
result = df.join([df2], how=how)
if how == "outer":
# Outer always sorts the index.
expected = DataFrame({"a": [2, 1], "b": [2, 1]}, index=[index[1], index[0]])
else:
expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=index)
tm.assert_frame_equal(result, expected)
def test_join_many_mixed(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((8, 4)),
columns=["A", "B", "C", "D"],
)
df["key"] = ["foo", "bar"] * 4
df1 = df.loc[:, ["A", "B"]]
df2 = df.loc[:, ["C", "D"]]
df3 = df.loc[:, ["key"]]
result = df1.join([df2, df3])
tm.assert_frame_equal(result, df)
def test_join_dups(self):
# joining dups
df = concat(
[
DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=["A", "A", "B", "B"],
),
DataFrame(
np.random.default_rng(2).integers(0, 10, size=20).reshape(10, 2),
columns=["A", "C"],
),
],
axis=1,
)
expected = concat([df, df], axis=1)
result = df.join(df, rsuffix="_2")
result.columns = expected.columns
tm.assert_frame_equal(result, expected)
# GH 4975, invalid join on dups
w = DataFrame(
np.random.default_rng(2).standard_normal((4, 2)), columns=["x", "y"]
)
x = DataFrame(
np.random.default_rng(2).standard_normal((4, 2)), columns=["x", "y"]
)
y = DataFrame(
np.random.default_rng(2).standard_normal((4, 2)), columns=["x", "y"]
)
z = DataFrame(
np.random.default_rng(2).standard_normal((4, 2)), columns=["x", "y"]
)
dta = x.merge(y, left_index=True, right_index=True).merge(
z, left_index=True, right_index=True, how="outer"
)
# GH 40991: As of 2.0 causes duplicate columns
with pytest.raises(
pd.errors.MergeError,
match="Passing 'suffixes' which cause duplicate columns",
):
dta.merge(w, left_index=True, right_index=True)
def test_join_multi_to_multi(self, join_type):
# GH 20475
leftindex = MultiIndex.from_product(
[list("abc"), list("xy"), [1, 2]], names=["abc", "xy", "num"]
)
left = DataFrame({"v1": range(12)}, index=leftindex)
rightindex = MultiIndex.from_product(
[list("abc"), list("xy")], names=["abc", "xy"]
)
right = DataFrame({"v2": [100 * i for i in range(1, 7)]}, index=rightindex)
result = left.join(right, on=["abc", "xy"], how=join_type)
expected = (
left.reset_index()
.merge(right.reset_index(), on=["abc", "xy"], how=join_type)
.set_index(["abc", "xy", "num"])
)
tm.assert_frame_equal(expected, result)
msg = r'len\(left_on\) must equal the number of levels in the index of "right"'
with pytest.raises(ValueError, match=msg):
left.join(right, on="xy", how=join_type)
with pytest.raises(ValueError, match=msg):
right.join(left, on=["abc", "xy"], how=join_type)
def test_join_on_tz_aware_datetimeindex(self):
# GH 23931, 26335
df1 = DataFrame(
{
"date": pd.date_range(
start="2018-01-01", periods=5, tz="America/Chicago"
),
"vals": list("abcde"),
}
)
df2 = DataFrame(
{
"date": pd.date_range(
start="2018-01-03", periods=5, tz="America/Chicago"
),
"vals_2": list("tuvwx"),
}
)
result = df1.join(df2.set_index("date"), on="date")
expected = df1.copy()
expected["vals_2"] = Series([np.nan] * 2 + list("tuv"))
tm.assert_frame_equal(result, expected)
def test_join_datetime_string(self):
# GH 5647
dfa = DataFrame(
[
["2012-08-02", "L", 10],
["2012-08-02", "J", 15],
["2013-04-06", "L", 20],
["2013-04-06", "J", 25],
],
columns=["x", "y", "a"],
)
dfa["x"] = pd.to_datetime(dfa["x"]).astype("M8[ns]")
dfb = DataFrame(
[["2012-08-02", "J", 1], ["2013-04-06", "L", 2]],
columns=["x", "y", "z"],
index=[2, 4],
)
dfb["x"] = pd.to_datetime(dfb["x"]).astype("M8[ns]")
result = dfb.join(dfa.set_index(["x", "y"]), on=["x", "y"])
expected = DataFrame(
[
[Timestamp("2012-08-02 00:00:00"), "J", 1, 15],
[Timestamp("2013-04-06 00:00:00"), "L", 2, 20],
],
index=[2, 4],
columns=["x", "y", "z", "a"],
)
expected["x"] = expected["x"].astype("M8[ns]")
tm.assert_frame_equal(result, expected)
def test_join_with_categorical_index(self):
# GH47812
ix = ["a", "b"]
id1 = pd.CategoricalIndex(ix, categories=ix)
id2 = pd.CategoricalIndex(reversed(ix), categories=reversed(ix))
df1 = DataFrame({"c1": ix}, index=id1)
df2 = DataFrame({"c2": reversed(ix)}, index=id2)
result = df1.join(df2)
expected = DataFrame(
{"c1": ["a", "b"], "c2": ["a", "b"]},
index=pd.CategoricalIndex(["a", "b"], categories=["a", "b"]),
)
tm.assert_frame_equal(result, expected)
def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix="_y"):
# some smoke tests
for c in join_col:
assert result[c].notna().all()
left_grouped = left.groupby(join_col)
right_grouped = right.groupby(join_col)
for group_key, group in result.groupby(join_col):
l_joined = _restrict_to_columns(group, left.columns, lsuffix)
r_joined = _restrict_to_columns(group, right.columns, rsuffix)
try:
lgroup = left_grouped.get_group(group_key)
except KeyError as err:
if how in ("left", "inner"):
raise AssertionError(
f"key {group_key} should not have been in the join"
) from err
_assert_all_na(l_joined, left.columns, join_col)
else:
_assert_same_contents(l_joined, lgroup)
try:
rgroup = right_grouped.get_group(group_key)
except KeyError as err:
if how in ("right", "inner"):
raise AssertionError(
f"key {group_key} should not have been in the join"
) from err
_assert_all_na(r_joined, right.columns, join_col)
else:
_assert_same_contents(r_joined, rgroup)
def _restrict_to_columns(group, columns, suffix):
found = [
c for c in group.columns if c in columns or c.replace(suffix, "") in columns
]
# filter
group = group.loc[:, found]
# get rid of suffixes, if any
group = group.rename(columns=lambda x: x.replace(suffix, ""))
# put in the right order...
group = group.loc[:, columns]
return group
def _assert_same_contents(join_chunk, source):
NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...
jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values
svalues = source.fillna(NA_SENTINEL).drop_duplicates().values
rows = {tuple(row) for row in jvalues}
assert len(rows) == len(source)
assert all(tuple(row) in rows for row in svalues)
def _assert_all_na(join_chunk, source_columns, join_col):
for c in source_columns:
if c in join_col:
continue
assert join_chunk[c].isna().all()
def _join_by_hand(a, b, how="left"):
join_index = a.index.join(b.index, how=how)
a_re = a.reindex(join_index)
b_re = b.reindex(join_index)
result_columns = a.columns.append(b.columns)
for col, s in b_re.items():
a_re[col] = s
return a_re.reindex(columns=result_columns)
def test_join_inner_multiindex_deterministic_order():
# GH: 36910
left = DataFrame(
data={"e": 5},
index=MultiIndex.from_tuples([(1, 2, 4)], names=("a", "b", "d")),
)
right = DataFrame(
data={"f": 6}, index=MultiIndex.from_tuples([(2, 3)], names=("b", "c"))
)
result = left.join(right, how="inner")
expected = DataFrame(
{"e": [5], "f": [6]},
index=MultiIndex.from_tuples([(1, 2, 4, 3)], names=("a", "b", "d", "c")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
("input_col", "output_cols"), [("b", ["a", "b"]), ("a", ["a_x", "a_y"])]
)
def test_join_cross(input_col, output_cols):
# GH#5401
left = DataFrame({"a": [1, 3]})
right = DataFrame({input_col: [3, 4]})
result = left.join(right, how="cross", lsuffix="_x", rsuffix="_y")
expected = DataFrame({output_cols[0]: [1, 1, 3, 3], output_cols[1]: [3, 4, 3, 4]})
tm.assert_frame_equal(result, expected)
def test_join_multiindex_one_level(join_type):
# GH#36909
left = DataFrame(
data={"c": 3}, index=MultiIndex.from_tuples([(1, 2)], names=("a", "b"))
)
right = DataFrame(data={"d": 4}, index=MultiIndex.from_tuples([(2,)], names=("b",)))
result = left.join(right, how=join_type)
if join_type == "right":
expected = DataFrame(
{"c": [3], "d": [4]},
index=MultiIndex.from_tuples([(2, 1)], names=["b", "a"]),
)
else:
expected = DataFrame(
{"c": [3], "d": [4]},
index=MultiIndex.from_tuples([(1, 2)], names=["a", "b"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"categories, values",
[
(["Y", "X"], ["Y", "X", "X"]),
([2, 1], [2, 1, 1]),
([2.5, 1.5], [2.5, 1.5, 1.5]),
(
[Timestamp("2020-12-31"), Timestamp("2019-12-31")],
[Timestamp("2020-12-31"), Timestamp("2019-12-31"), Timestamp("2019-12-31")],
),
],
)
def test_join_multiindex_not_alphabetical_categorical(categories, values):
# GH#38502
left = DataFrame(
{
"first": ["A", "A"],
"second": Categorical(categories, categories=categories),
"value": [1, 2],
}
).set_index(["first", "second"])
right = DataFrame(
{
"first": ["A", "A", "B"],
"second": Categorical(values, categories=categories),
"value": [3, 4, 5],
}
).set_index(["first", "second"])
result = left.join(right, lsuffix="_left", rsuffix="_right")
expected = DataFrame(
{
"first": ["A", "A"],
"second": Categorical(categories, categories=categories),
"value_left": [1, 2],
"value_right": [3, 4],
}
).set_index(["first", "second"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"left_empty, how, exp",
[
(False, "left", "left"),
(False, "right", "empty"),
(False, "inner", "empty"),
(False, "outer", "left"),
(False, "cross", "empty"),
(True, "left", "empty"),
(True, "right", "right"),
(True, "inner", "empty"),
(True, "outer", "right"),
(True, "cross", "empty"),
],
)
def test_join_empty(left_empty, how, exp):
left = DataFrame({"A": [2, 1], "B": [3, 4]}, dtype="int64").set_index("A")
right = DataFrame({"A": [1], "C": [5]}, dtype="int64").set_index("A")
if left_empty:
left = left.head(0)
else:
right = right.head(0)
result = left.join(right, how=how)
if exp == "left":
expected = DataFrame({"A": [2, 1], "B": [3, 4], "C": [np.nan, np.nan]})
expected = expected.set_index("A")
elif exp == "right":
expected = DataFrame({"B": [np.nan], "A": [1], "C": [5]})
expected = expected.set_index("A")
elif exp == "empty":
expected = DataFrame(columns=["B", "C"], dtype="int64")
if how != "cross":
expected = expected.rename_axis("A")
if how == "outer":
expected = expected.sort_index()
tm.assert_frame_equal(result, expected)
def test_join_empty_uncomparable_columns():
# GH 57048
df1 = DataFrame()
df2 = DataFrame(columns=["test"])
df3 = DataFrame(columns=["foo", ("bar", "baz")])
result = df1 + df2
expected = DataFrame(columns=["test"])
tm.assert_frame_equal(result, expected)
result = df2 + df3
expected = DataFrame(columns=[("bar", "baz"), "foo", "test"])
tm.assert_frame_equal(result, expected)
result = df1 + df3
expected = DataFrame(columns=[("bar", "baz"), "foo"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"how, values",
[
("inner", [0, 1, 2]),
("outer", [0, 1, 2]),
("left", [0, 1, 2]),
("right", [0, 2, 1]),
],
)
def test_join_multiindex_categorical_output_index_dtype(how, values):
# GH#50906
df1 = DataFrame(
{
"a": Categorical([0, 1, 2]),
"b": Categorical([0, 1, 2]),
"c": [0, 1, 2],
}
).set_index(["a", "b"])
df2 = DataFrame(
{
"a": Categorical([0, 2, 1]),
"b": Categorical([0, 2, 1]),
"d": [0, 2, 1],
}
).set_index(["a", "b"])
expected = DataFrame(
{
"a": Categorical(values),
"b": Categorical(values),
"c": values,
"d": values,
}
).set_index(["a", "b"])
result = df1.join(df2, how=how)
tm.assert_frame_equal(result, expected)
def test_join_multiindex_with_none_as_label():
# GH 58721
df1 = DataFrame(
{"A": [1]},
index=MultiIndex.from_tuples([(3, 3)], names=["X", None]),
)
df2 = DataFrame(
{"B": [2]},
index=MultiIndex.from_tuples([(3, 3)], names=[None, "X"]),
)
result12 = df1.join(df2)
expected12 = DataFrame(
{"A": [1], "B": [2]},
index=MultiIndex.from_tuples([(3, 3)], names=["X", None]),
)
tm.assert_frame_equal(result12, expected12)
result21 = df2.join(df1)
expected21 = DataFrame(
{"B": [2], "A": [1]},
index=MultiIndex.from_tuples([(3, 3)], names=[None, "X"]),
)
tm.assert_frame_equal(result21, expected21)
| TestJoin |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 80707,
"end": 83333
} | class ____(NonStrictDataModel):
"""
:param name: Name of the parameter. Should be unique
:type name: str
:param value: Value of the parameter
:type value: str
:param type: Type of the parameter. Optional
:type type: str
:param description: The parameter description. Optional
:type description: str
"""
_schema = {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, name=None, value=None, type=None, description=None, **kwargs):
super(ConfigurationItem, self).__init__(**kwargs)
self.name = name
self.value = value
self.type = type
self.description = description
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("value")
def value(self):
return self._property_value
@value.setter
def value(self, value):
if value is None:
self._property_value = None
return
self.assert_isinstance(value, "value", six.string_types)
self._property_value = value
@schema_property("type")
def type(self):
return self._property_type
@type.setter
def type(self, value):
if value is None:
self._property_type = None
return
self.assert_isinstance(value, "type", six.string_types)
self._property_type = value
@schema_property("description")
def description(self):
return self._property_description
@description.setter
def description(self, value):
if value is None:
self._property_description = None
return
self.assert_isinstance(value, "description", six.string_types)
self._property_description = value
| ConfigurationItem |
python | pikepdf__pikepdf | src/pikepdf/canvas.py | {
"start": 30834,
"end": 33253
} | class ____:
"""Canvas for rendering PDFs with pikepdf.
All drawing is done on a pikepdf canvas using the ``.do`` property.
This interface manages the graphics state of the canvas.
A Canvas can be exported as a single page Pdf using ``.to_pdf``. This Pdf can
then be merged into other PDFs or written to a file.
"""
def __init__(self, *, page_size: tuple[int | float, int | float]):
"""Initialize a canvas."""
self.page_size = page_size
self._pdf = Pdf.new()
self._page = self._pdf.add_blank_page(page_size=page_size)
self._page.Resources = Dictionary(Font=Dictionary(), XObject=Dictionary())
self._cs = ContentStreamBuilder()
self._images: list[LoadedImage] = []
self._accessor = _CanvasAccessor(self._cs, self._images)
self.do.push()
def add_font(self, resource_name: Name, font: Font):
"""Add a font to the page."""
self._page.Resources.Font[resource_name] = font.register(self._pdf)
@property
def do(self) -> _CanvasAccessor:
"""Do operations on the current graphics state."""
return self._accessor
def _save_image(self, li: LoadedImage):
return self._pdf.make_stream(
li.image.tobytes(),
Width=li.image.width,
Height=li.image.height,
ColorSpace=(
Name.DeviceGray if li.image.mode in ("1", "L") else Name.DeviceRGB
),
Type=Name.XObject,
Subtype=Name.Image,
BitsPerComponent=1 if li.image.mode == '1' else 8,
)
def to_pdf(self) -> Pdf:
"""Render the canvas as a single page PDF."""
self.do.pop()
if self._accessor._stack_depth != 0:
log.warning(
"Graphics state stack is not empty when page saved - "
"rendering may be incorrect"
)
self._page.Contents = self._pdf.make_stream(self._cs.build())
for li in self._images:
self._page.Resources.XObject[li.name] = self._save_image(li)
bio = BytesIO()
self._pdf.save(bio)
bio.seek(0)
result = Pdf.open(bio)
# Reset the graphics state to before we saved the page
self.do.push()
return result
def _repr_mimebundle_(self, include=None, exclude=None):
return self.to_pdf()._repr_mimebundle_(include, exclude)
| Canvas |
python | Netflix__metaflow | metaflow/plugins/cards/card_datastore.py | {
"start": 585,
"end": 650
} | class ____:
DATA = "data.json"
CARD = "html"
| CardNameSuffix |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/concrete_function.py | {
"start": 39059,
"end": 43541
} | class ____(_TapeGradientFunctions):
"""Caches tape-friendly functions for higher-order gradients."""
# TODO(b/136189779): Cond/while under a tape may need similar logic. Consider
# generalizing if so.
def _forward_and_backward_functions(self, inference_args, input_tangents):
"""Forward and backward functions suitable for higher-order gradients.
Unlike in `_FirstOrderTapeGradientFunctions`, the backward function built by
this method accepts gradients for all of the outputs of the returned forward
function, including side outputs.
Args:
inference_args: A flat list of Tensors, arguments to the inference
function.
input_tangents: A flat list of Tensors, jvps associated with
`inference_args`.
Returns:
A tuple of (forward_function, backward_function):
forward_function: Takes the same inputs as the inference function, but
returns side outputs used by backward_function in addition to the
inference function's outputs.
backward_function: Takes side outputs from forward_function and
gradients with respect to all of its outputs, real and side. Returns
gradients with respect to the inputs.
"""
outputs = []
iteration_count = 0
# First we need to figure out how many side outputs from the forward pass
# will be required. We do this in a temporary graph to avoid actually
# running multiple copies of the backward pass (one per _GradientsHelper
# call).
#
# While computing gradients, the backward function captures Tensors from
# the forward function. We add these as side outputs of the original
# function. However, we then need to accept output gradients with respect
# to these side outputs for higher order gradients to work. Thus we loop
# until the number of outputs of the function stabilizes. Note that this
# is only required for tape gradients, where we need to declare in advance
# all of the forward op's outputs: symbolic gradients with tf.gradients
# instead rely on regenerating backward functions when higher-order
# gradients are requested.
while (len(outputs) < len(self._func_graph.outputs)
# It's possible for gradient generation to add new ops to the forward
# pass. If all of the new outputs are non-trainable, there's no
# reason to continue.
and any(backprop_util.IsTrainable(output)
for output in self._func_graph.outputs[len(outputs):])):
iteration_count += 1
if iteration_count >= 20 and iteration_count % 5 == 0:
new_op_with_trainable_output = None
num_new_trainable_outputs = 0
for output in self._func_graph.outputs[len(outputs):]:
if backprop_util.IsTrainable(output):
num_new_trainable_outputs += 1
new_op_with_trainable_output = output.op
logging.warning(
("Determining side outputs for the function '{}' is taking longer "
"than expected ({} iterations, typically this converges in 5 or "
"so). This could indicate that a gradient registration is adding "
"new ops to the forward pass every time gradients are generated. "
"{} new trainable output(s) were added this iteration, one from "
"the following op:\n {}\nThis may indicate a TensorFlow bug, or "
"an issue in a tf.custom_gradient.")
.format(
self._func_graph.name, iteration_count,
num_new_trainable_outputs, new_op_with_trainable_output))
outputs = list(self._func_graph.outputs)
self._build_functions_for_outputs(
outputs, inference_args, input_tangents)
(forward_function, forward_graph,
backward_function, output_indices, num_output_tangents) = (
self._build_functions_for_outputs(
outputs, inference_args, input_tangents))
if (len(self._func_graph.outputs) > len(outputs)
and any(backprop_util.IsTrainable(output)
for output in self._func_graph.outputs[len(outputs):])):
raise errors.InternalError(
"Unexpectedly added new outputs to the forward function when "
"building the backward function: "
f"{self._func_graph.outputs[len(outputs):]}.")
return (forward_function, forward_graph, backward_function, output_indices,
num_output_tangents)
| _HigherOrderTapeGradientFunctions |
python | PrefectHQ__prefect | tests/server/orchestration/test_core_policy.py | {
"start": 11552,
"end": 17471
} | class ____:
@pytest.mark.parametrize(
["expiration", "expected_status", "expected_name"],
[
(now("UTC") - timedelta(days=1), SetStateStatus.ACCEPT, "Running"),
(now("UTC") + timedelta(days=1), SetStateStatus.REJECT, "Cached"),
(None, SetStateStatus.REJECT, "Cached"),
],
ids=["past", "future", "null"],
)
async def test_set_and_retrieve_unexpired_cached_states(
self,
session,
initialize_orchestration,
expiration,
expected_status,
expected_name,
):
caching_policy = [CacheInsertion, CacheRetrieval]
# this first proposed state is added to the cache table
initial_state_type = states.StateType.RUNNING
proposed_state_type = states.StateType.COMPLETED
intended_transition = (initial_state_type, proposed_state_type)
ctx1 = await initialize_orchestration(
session,
"task",
*intended_transition,
initial_details={"cache_key": "cache-hit", "cache_expiration": expiration},
proposed_details={"cache_key": "cache-hit", "cache_expiration": expiration},
)
async with contextlib.AsyncExitStack() as stack:
for rule in caching_policy:
ctx1 = await stack.enter_async_context(rule(ctx1, *intended_transition))
await ctx1.validate_proposed_state()
assert ctx1.response_status == SetStateStatus.ACCEPT
initial_state_type = states.StateType.PENDING
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
ctx2 = await initialize_orchestration(
session,
"task",
*intended_transition,
initial_details={"cache_key": "cache-hit", "cache_expiration": expiration},
proposed_details={"cache_key": "cache-hit", "cache_expiration": expiration},
)
async with contextlib.AsyncExitStack() as stack:
for rule in caching_policy:
ctx2 = await stack.enter_async_context(rule(ctx2, *intended_transition))
await ctx2.validate_proposed_state()
assert ctx2.response_status == expected_status
assert ctx2.validated_state.name == expected_name
async def test_cache_insertion_requires_validated_state(
self,
session,
initialize_orchestration,
):
"""Regression test for the bug observed when exiting the CacheInsertion
rule after a database error that prevented committing the validated state"""
initial_state_type = states.StateType.RUNNING
proposed_state_type = states.StateType.COMPLETED
intended_transition = (initial_state_type, proposed_state_type)
expiration = now("UTC") - timedelta(days=1)
ctx1 = await initialize_orchestration(
session,
"task",
*intended_transition,
initial_details={"cache_key": "cache-hit", "cache_expiration": expiration},
proposed_details={"cache_key": "cache-hit", "cache_expiration": expiration},
)
with pytest.raises(ValueError, match="this better be mine"):
async with contextlib.AsyncExitStack() as stack:
ctx1 = await stack.enter_async_context(
CacheInsertion(ctx1, *intended_transition)
)
# Simulate an exception (for example, a database error) that happens
# within the context manager; when this happens, we've observed the
# CacheInsertion to raise:
#
# AttributeError: 'NoneType' object has no attribute 'state_details'
#
# because the `validated_state` passed to its after_transition
# handler is None
raise ValueError("this better be mine")
@pytest.mark.parametrize(
"proposed_state_type",
# Include all state types but COMPLETED; cast to sorted list for determinism
list(sorted(set(states.StateType) - set([states.StateType.COMPLETED]))),
ids=lambda statetype: statetype.name,
)
async def test_only_cache_completed_states(
self,
session,
initialize_orchestration,
proposed_state_type,
):
caching_policy = [CacheInsertion, CacheRetrieval]
# this first proposed state is added to the cache table
initial_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
ctx1 = await initialize_orchestration(
session,
"task",
*intended_transition,
initial_details={"cache_key": "cache-hit"},
proposed_details={"cache_key": "cache-hit"},
)
async with contextlib.AsyncExitStack() as stack:
for rule in caching_policy:
ctx1 = await stack.enter_async_context(rule(ctx1, *intended_transition))
await ctx1.validate_proposed_state()
assert ctx1.response_status == SetStateStatus.ACCEPT
initial_state_type = states.StateType.PENDING
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
ctx2 = await initialize_orchestration(
session,
"task",
*intended_transition,
initial_details={"cache_key": "cache-hit"},
proposed_details={"cache_key": "cache-hit"},
)
async with contextlib.AsyncExitStack() as stack:
for rule in caching_policy:
ctx2 = await stack.enter_async_context(rule(ctx2, *intended_transition))
await ctx2.validate_proposed_state()
assert ctx2.response_status == SetStateStatus.ACCEPT
| TestCachingBackendLogic |
python | pytorch__pytorch | test/distributed/test_c10d_gloo.py | {
"start": 85730,
"end": 86214
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, use_fc3=True):
x = self.relu(self.fc1(x)).float()
x = self.relu(self.fc2(x)).float()
if use_fc3:
x = self.fc3(x).float()
return F.softmax(x, dim=1)
| ReducerModule |
python | doocs__leetcode | solution/0900-0999/0940.Distinct Subsequences II/Solution3.py | {
"start": 0,
"end": 293
} | class ____:
def distinctSubseqII(self, s: str) -> int:
mod = 10**9 + 7
dp = [0] * 26
ans = 0
for c in s:
i = ord(c) - ord('a')
add = ans - dp[i] + 1
ans = (ans + add) % mod
dp[i] += add
return ans
| Solution |
python | numba__numba | numba/tests/test_dyn_func.py | {
"start": 574,
"end": 811
} | class ____(TestCase):
def test_issue_455(self):
inst = Issue455()
inst.create_f()
a = inst.call_f()
self.assertPreciseEqual(a, np.ones_like(a))
if __name__ == '__main__':
unittest.main()
| TestDynFunc |
python | django__django | tests/backends/test_ddl_references.py | {
"start": 9846,
"end": 13384
} | class ____(TransactionTestCase):
available_apps = []
def setUp(self):
compiler = Person.objects.all().query.get_compiler(connection.alias)
self.editor = connection.schema_editor()
self.expressions = Expressions(
table=Person._meta.db_table,
expressions=ExpressionList(
IndexExpression(F("first_name")),
IndexExpression(F("last_name").desc()),
IndexExpression(Upper("last_name")),
).resolve_expression(compiler.query),
compiler=compiler,
quote_value=self.editor.quote_value,
)
def test_references_table(self):
self.assertIs(self.expressions.references_table(Person._meta.db_table), True)
self.assertIs(self.expressions.references_table("other"), False)
def test_references_column(self):
table = Person._meta.db_table
self.assertIs(self.expressions.references_column(table, "first_name"), True)
self.assertIs(self.expressions.references_column(table, "last_name"), True)
self.assertIs(self.expressions.references_column(table, "other"), False)
def test_rename_table_references(self):
table = Person._meta.db_table
self.expressions.rename_table_references(table, "other")
self.assertIs(self.expressions.references_table(table), False)
self.assertIs(self.expressions.references_table("other"), True)
self.assertIn(
"%s.%s"
% (
self.editor.quote_name("other"),
self.editor.quote_name("first_name"),
),
str(self.expressions),
)
def test_rename_table_references_without_alias(self):
compiler = Query(Person, alias_cols=False).get_compiler(connection=connection)
table = Person._meta.db_table
expressions = Expressions(
table=table,
expressions=ExpressionList(
IndexExpression(Upper("last_name")),
IndexExpression(F("first_name")),
).resolve_expression(compiler.query),
compiler=compiler,
quote_value=self.editor.quote_value,
)
expressions.rename_table_references(table, "other")
self.assertIs(expressions.references_table(table), False)
self.assertIs(expressions.references_table("other"), True)
expected_str = "(UPPER(%s)), %s" % (
self.editor.quote_name("last_name"),
self.editor.quote_name("first_name"),
)
self.assertEqual(str(expressions), expected_str)
def test_rename_column_references(self):
table = Person._meta.db_table
self.expressions.rename_column_references(table, "first_name", "other")
self.assertIs(self.expressions.references_column(table, "other"), True)
self.assertIs(self.expressions.references_column(table, "first_name"), False)
self.assertIn(
"%s.%s" % (self.editor.quote_name(table), self.editor.quote_name("other")),
str(self.expressions),
)
def test_str(self):
table_name = self.editor.quote_name(Person._meta.db_table)
expected_str = "%s.%s, %s.%s DESC, (UPPER(%s.%s))" % (
table_name,
self.editor.quote_name("first_name"),
table_name,
self.editor.quote_name("last_name"),
table_name,
self.editor.quote_name("last_name"),
)
self.assertEqual(str(self.expressions), expected_str)
| ExpressionsTests |
python | django__django | tests/model_fields/models.py | {
"start": 4172,
"end": 4269
} | class ____(models.Model):
nbfield = models.BooleanField(null=True, blank=True)
| NullBooleanModel |
python | jazzband__django-simple-history | simple_history/tests/tests/utils.py | {
"start": 3554,
"end": 3662
} | class ____(Enum):
ADD = "add"
CHANGE = "change"
DELETE = "delete"
VIEW = "view"
| PermissionAction |
python | ray-project__ray | python/ray/_private/worker.py | {
"start": 9735,
"end": 10846
} | class ____(HasOptions, Generic[R, T0, T1, T2, T3, T4, T5, T6, T7, T8]):
def __init__(
self, function: Callable[[T0, T1, T2, T3, T4, T5, T6, T7, T8], R]
) -> None:
pass
def remote(
self,
__arg0: "Union[T0, ObjectRef[T0]]",
__arg1: "Union[T1, ObjectRef[T1]]",
__arg2: "Union[T2, ObjectRef[T2]]",
__arg3: "Union[T3, ObjectRef[T3]]",
__arg4: "Union[T4, ObjectRef[T4]]",
__arg5: "Union[T5, ObjectRef[T5]]",
__arg6: "Union[T6, ObjectRef[T6]]",
__arg7: "Union[T7, ObjectRef[T7]]",
__arg8: "Union[T8, ObjectRef[T8]]",
) -> "ObjectRef[R]":
...
def bind(
self,
__arg0: "Union[T0, DAGNode[T0]]",
__arg1: "Union[T1, DAGNode[T1]]",
__arg2: "Union[T2, DAGNode[T2]]",
__arg3: "Union[T3, DAGNode[T3]]",
__arg4: "Union[T4, DAGNode[T4]]",
__arg5: "Union[T5, DAGNode[T5]]",
__arg6: "Union[T6, DAGNode[T6]]",
__arg7: "Union[T7, DAGNode[T7]]",
__arg8: "Union[T8, DAGNode[T8]]",
) -> "DAGNode[R]":
...
| RemoteFunction8 |
python | pytorch__pytorch | test/test_testing.py | {
"start": 17200,
"end": 18058
} | class ____(TestCase):
@classmethod
def setUpClass(cls):
# store something on the test class to query during teardown
cls.stored_thing = "called with " + cls.__name__
@classmethod
def tearDownClass(cls):
# throw here so we know teardown was run
raise RuntimeError(cls.stored_thing)
def test_bar(self, device):
# make sure the test can access the stored thing
print(self.stored_thing)
instantiate_device_type_tests(TestFoo, globals(), only_for='{self.device_type}')
if __name__ == '__main__':
run_tests()
""")
expected_device_class_name = f"TestFoo{self.device_type.upper()}"
expected_error_text = f"RuntimeError: called with {expected_device_class_name}"
self.assertIn(expected_error_text, stderr)
instantiate_device_type_tests(TestTesting, globals())
| TestFoo |
python | ansible__ansible | test/units/mock/loader.py | {
"start": 969,
"end": 3483
} | class ____(DataLoader):
def __init__(self, file_mapping=None):
file_mapping = {} if file_mapping is None else file_mapping
assert isinstance(file_mapping, dict)
super(DictDataLoader, self).__init__()
self._file_mapping = file_mapping
self._build_known_directories()
self._vault_secrets = None
def load_from_file(self, file_name: str, cache='all', unsafe: bool = False, json_only: bool = False, trusted_as_template: bool = False) -> t.Any:
data = None
path = to_text(file_name)
if path in self._file_mapping:
abs_path = os.path.join(self.get_basedir(), path)
data = self.load(self._file_mapping[path], abs_path, json_only=json_only)
if trusted_as_template:
data = TrustedAsTemplate().tag(data)
return data
# TODO: the real _get_file_contents returns a bytestring, so we actually convert the
# unicode/text it's created with to utf-8
def _get_file_contents(self, file_name):
return to_bytes(self._file_mapping[file_name]), False
def path_exists(self, path):
path = to_text(path)
return path in self._file_mapping or path in self._known_directories
def is_file(self, path):
path = to_text(path)
return path in self._file_mapping
def is_directory(self, path):
path = to_text(path)
return path in self._known_directories
def list_directory(self, path):
ret = []
path = to_text(path)
for x in (list(self._file_mapping.keys()) + self._known_directories):
if x.startswith(path):
if os.path.dirname(x) == path:
ret.append(os.path.basename(x))
return ret
def is_executable(self, path):
# FIXME: figure out a way to make paths return true for this
return False
def _add_known_directory(self, directory):
if directory not in self._known_directories:
self._known_directories.append(directory)
def _build_known_directories(self):
self._known_directories = []
for path in self._file_mapping:
dirname = os.path.dirname(path)
while dirname not in ('/', ''):
self._add_known_directory(dirname)
dirname = os.path.dirname(dirname)
def get_basedir(self):
return os.getcwd()
def set_vault_secrets(self, vault_secrets):
self._vault_secrets = vault_secrets
| DictDataLoader |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_univariate.py | {
"start": 6823,
"end": 7730
} | class ____(Benchmark):
"""
Univariate Problem09 objective function.
This class defines the Univariate Problem09 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem09}}(x) = \\sin(x) + \\sin \\left(\\frac{2}{3} x \\right)
Bound constraints: :math:`x \\in [3.1, 20.4]`
.. figure:: figures/Problem09.png
:alt: Univariate Problem09 function
:align: center
**Univariate Problem09 function**
*Global optimum*: :math:`f(x)=-1.90596` for :math:`x = 17.039`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(3.1, 20.4)]
self.global_optimum = 17.039
self.fglob = -1.90596
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return sin(x) + sin(2.0 / 3.0 * x)
| Problem09 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramInference2.py | {
"start": 377,
"end": 608
} | class ____(Parent1[float]):
def method1(self, a, b):
reveal_type(self, expected_text="Self@Child1")
reveal_type(a, expected_text="float")
reveal_type(b, expected_text="list[float]")
return a
| Child1 |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 33471,
"end": 33668
} | class ____:
xlCellChangeApplied = 3 # from enum XlCellChangedState
xlCellChanged = 2 # from enum XlCellChangedState
xlCellNotChanged = 1 # from enum XlCellChangedState
| CellChangedState |
python | doocs__leetcode | lcci/16.01.Swap Numbers/Solution.py | {
"start": 0,
"end": 198
} | class ____:
def swapNumbers(self, numbers: List[int]) -> List[int]:
numbers[0] ^= numbers[1]
numbers[1] ^= numbers[0]
numbers[0] ^= numbers[1]
return numbers
| Solution |
python | pytest-dev__pytest-asyncio | docs/reference/markers/class_scoped_loop_strict_mode_example.py | {
"start": 73,
"end": 359
} | class ____:
loop: asyncio.AbstractEventLoop
async def test_remember_loop(self):
TestClassScopedLoop.loop = asyncio.get_running_loop()
async def test_this_runs_in_same_loop(self):
assert asyncio.get_running_loop() is TestClassScopedLoop.loop
| TestClassScopedLoop |
python | facelessuser__pymdown-extensions | pymdownx/progressbar.py | {
"start": 3691,
"end": 4066
} | class ____(AttrListTreeprocessor):
"""Used for AttrList compatibility."""
def run(self, elem):
"""Inline check for attributes at start of tail."""
if elem.tail:
m = self.INLINE_RE.match(elem.tail)
if m:
self.assign_attrs(elem, m.group(1))
elem.tail = elem.tail[m.end():]
| ProgressBarTreeProcessor |
python | doocs__leetcode | solution/3400-3499/3412.Find Mirror Score of a String/Solution.py | {
"start": 0,
"end": 340
} | class ____:
def calculateScore(self, s: str) -> int:
d = defaultdict(list)
ans = 0
for i, x in enumerate(s):
y = chr(ord("a") + ord("z") - ord(x))
if d[y]:
j = d[y].pop()
ans += i - j
else:
d[x].append(i)
return ans
| Solution |
python | ansible__ansible | test/units/plugins/lookup/test_password.py | {
"start": 15438,
"end": 16204
} | class ____(unittest.TestCase):
def setUp(self):
self.makedirs_safe = password.makedirs_safe
self.os_chmod = password.os.chmod
password.makedirs_safe = self.noop
password.os.chmod = self.noop
def noop(self, *args, **kwargs):
pass
def tearDown(self):
password.makedirs_safe = self.makedirs_safe
password.os.chmod = self.os_chmod
def test_content_written(self):
with patch.object(builtins, 'open', mock_open()) as m:
password._write_password_file(b'/this/is/a/test/caf\xc3\xa9', u'Testing Café')
m.assert_called_once_with(b'/this/is/a/test/caf\xc3\xa9', 'wb')
m().write.assert_called_once_with(u'Testing Café\n'.encode('utf-8'))
| TestWritePasswordFile |
python | scikit-learn__scikit-learn | sklearn/feature_selection/_univariate_selection.py | {
"start": 24522,
"end": 28332
} | class ____(_BaseFilter):
"""Select features according to the k highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable, default=f_classif
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See Also"). The default function only
works with classification tasks.
.. versionadded:: 0.18
k : int or "all", default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like of shape (n_features,)
Scores of features.
pvalues_ : array-like of shape (n_features,)
p-values of feature scores, None if `score_func` returned only scores.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectPercentile: Select features based on percentile of the highest
scores.
SelectFpr : Select features based on a false positive rate test.
SelectFdr : Select features based on an estimated false discovery rate.
SelectFwe : Select features based on family-wise error rate.
GenericUnivariateSelect : Univariate feature selector with configurable
mode.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
This filter supports unsupervised feature selection that only requests `X` for
computing the scores.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> X, y = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> X_new = SelectKBest(chi2, k=20).fit_transform(X, y)
>>> X_new.shape
(1797, 20)
"""
_parameter_constraints: dict = {
**_BaseFilter._parameter_constraints,
"k": [StrOptions({"all"}), Interval(Integral, 0, None, closed="left")],
}
def __init__(self, score_func=f_classif, *, k=10):
super().__init__(score_func=score_func)
self.k = k
def _check_params(self, X, y):
if not isinstance(self.k, str) and self.k > X.shape[1]:
warnings.warn(
f"k={self.k} is greater than n_features={X.shape[1]}. "
"All the features will be returned."
)
def _get_support_mask(self):
check_is_fitted(self)
if self.k == "all":
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k :]] = 1
return mask
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.target_tags.required = False
return tags
| SelectKBest |
python | encode__django-rest-framework | rest_framework/authtoken/models.py | {
"start": 1553,
"end": 1944
} | class ____(Token):
"""
Proxy mapping pk to user pk for use in admin.
"""
@property
def pk(self):
return self.user_id
class Meta:
proxy = 'rest_framework.authtoken' in settings.INSTALLED_APPS
abstract = 'rest_framework.authtoken' not in settings.INSTALLED_APPS
verbose_name = _("Token")
verbose_name_plural = _("Tokens")
| TokenProxy |
python | encode__django-rest-framework | tests/test_validators.py | {
"start": 24530,
"end": 32082
} | class ____(TestCase):
def setUp(self):
self.instance = UniqueConstraintModel.objects.create(
race_name='example',
position=1,
global_id=1,
fancy_conditions=1
)
UniqueConstraintModel.objects.create(
race_name='example',
position=2,
global_id=2,
fancy_conditions=1
)
UniqueConstraintModel.objects.create(
race_name='other',
position=1,
global_id=3,
fancy_conditions=1
)
def test_repr(self):
serializer = UniqueConstraintSerializer()
# the order of validators isn't deterministic so delete
# fancy_conditions field that has two of them
del serializer.fields['fancy_conditions']
expected = dedent(r"""
UniqueConstraintSerializer\(\):
id = IntegerField\(label='ID', read_only=True\)
race_name = CharField\(max_length=100, required=True\)
position = IntegerField\(.*required=True\)
global_id = IntegerField\(.*validators=\[<UniqueValidator\(queryset=UniqueConstraintModel.objects.all\(\)\)>\]\)
class Meta:
validators = \[<UniqueTogetherValidator\(queryset=UniqueConstraintModel.objects.all\(\), fields=\('race_name', 'position'\), condition=<Q: \(AND: \('race_name', 'example'\)\)>\)>\]
""")
assert re.search(expected, repr(serializer)) is not None
def test_unique_together_condition(self):
"""
Fields used in UniqueConstraint's condition must be included
into queryset existence check
"""
UniqueConstraintModel.objects.create(
race_name='condition',
position=1,
global_id=10,
fancy_conditions=10,
)
serializer = UniqueConstraintSerializer(data={
'race_name': 'condition',
'position': 1,
'global_id': 11,
'fancy_conditions': 9,
})
assert serializer.is_valid()
serializer = UniqueConstraintSerializer(data={
'race_name': 'condition',
'position': 1,
'global_id': 11,
'fancy_conditions': 11,
})
assert not serializer.is_valid()
def test_unique_together_condition_fields_required(self):
"""
Fields used in UniqueConstraint's condition must be present in serializer
"""
serializer = UniqueConstraintSerializer(data={
'race_name': 'condition',
'position': 1,
'global_id': 11,
})
assert not serializer.is_valid()
assert serializer.errors == {'fancy_conditions': ['This field is required.']}
class NoFieldsSerializer(serializers.ModelSerializer):
class Meta:
model = UniqueConstraintModel
fields = ('race_name', 'position', 'global_id')
serializer = NoFieldsSerializer()
assert len(serializer.validators) == 1
def test_single_field_uniq_validators(self):
"""
UniqueConstraint with single field must be transformed into
field's UniqueValidator
"""
# Django 5 includes Max and Min values validators for IntegerField
extra_validators_qty = 2 if django_version[0] >= 5 else 0
serializer = UniqueConstraintSerializer()
assert len(serializer.validators) == 2
validators = serializer.fields['global_id'].validators
assert len(validators) == 1 + extra_validators_qty
assert validators[0].queryset == UniqueConstraintModel.objects
validators = serializer.fields['fancy_conditions'].validators
assert len(validators) == 2 + extra_validators_qty
ids_in_qs = {frozenset(v.queryset.values_list(flat=True)) for v in validators if hasattr(v, "queryset")}
assert ids_in_qs == {frozenset([1]), frozenset([3])}
def test_nullable_unique_constraint_fields_are_not_required(self):
serializer = UniqueConstraintNullableSerializer(data={'title': 'Bob'})
self.assertTrue(serializer.is_valid(), serializer.errors)
result = serializer.save()
self.assertIsInstance(result, UniqueConstraintNullableModel)
def test_unique_constraint_source(self):
class SourceUniqueConstraintSerializer(serializers.ModelSerializer):
raceName = serializers.CharField(source="race_name")
class Meta:
model = UniqueConstraintModel
fields = ("raceName", "position", "global_id", "fancy_conditions")
serializer = SourceUniqueConstraintSerializer(
data={
"raceName": "example",
"position": 5,
"global_id": 11,
"fancy_conditions": 11,
}
)
assert serializer.is_valid()
def test_uniq_constraint_condition_read_only_create(self):
class UniqueConstraintReadOnlyFieldModelSerializer(serializers.ModelSerializer):
class Meta:
model = UniqueConstraintReadOnlyFieldModel
read_only_fields = ("state",)
fields = ("position", "something", *read_only_fields)
serializer = UniqueConstraintReadOnlyFieldModelSerializer(
data={"position": 1, "something": 1}
)
assert serializer.is_valid()
def test_uniq_constraint_condition_read_only_partial(self):
class UniqueConstraintReadOnlyFieldModelSerializer(serializers.ModelSerializer):
class Meta:
model = UniqueConstraintReadOnlyFieldModel
read_only_fields = ("state",)
fields = ("position", "something", *read_only_fields)
instance = UniqueConstraintReadOnlyFieldModel.objects.create(position=1, something=1)
serializer = UniqueConstraintReadOnlyFieldModelSerializer(
instance=instance,
data={"position": 1, "something": 1},
partial=True
)
assert serializer.is_valid()
def test_unique_constraint_custom_message_code(self):
UniqueConstraintCustomMessageCodeModel.objects.create(username="Alice", company_id=1, role="member")
expected_code = "duplicate_username" if django_version[0] >= 5 else UniqueTogetherValidator.code
serializer = UniqueConstraintCustomMessageCodeSerializer(data={
"username": "Alice",
"company_id": 1,
"role": "admin",
})
assert not serializer.is_valid()
assert serializer.errors == {"non_field_errors": ["Username must be unique within a company."]}
assert serializer.errors["non_field_errors"][0].code == expected_code
def test_unique_constraint_default_message_code(self):
UniqueConstraintCustomMessageCodeModel.objects.create(username="Alice", company_id=1, role="member")
serializer = UniqueConstraintCustomMessageCodeSerializer(data={
"username": "John",
"company_id": 1,
"role": "member",
})
expected_message = UniqueTogetherValidator.message.format(field_names=', '.join(("company_id", "role")))
assert not serializer.is_valid()
assert serializer.errors == {"non_field_errors": [expected_message]}
assert serializer.errors["non_field_errors"][0].code == UniqueTogetherValidator.code
# Tests for `UniqueForDateValidator`
# ----------------------------------
| TestUniqueConstraintValidation |
python | getsentry__sentry | src/sentry/integrations/github_enterprise/webhook.py | {
"start": 1916,
"end": 1999
} | class ____(Exception):
"""Webhook payload not found"""
| MissingWebhookPayloadError |
python | apache__airflow | dev/breeze/tests/test_ui_commands.py | {
"start": 9379,
"end": 10427
} | class ____:
def test_remove_extra_translations(self, tmp_path):
from airflow_breeze.commands.ui_commands import remove_extra_translations
de_dir = tmp_path / "de"
de_dir.mkdir()
de_data = {"greeting": "Hallo", "extra": "Extra Key"}
(de_dir / "test.json").write_text(json.dumps(de_data))
import airflow_breeze.commands.ui_commands as ui_commands
original_locales_dir = ui_commands.LOCALES_DIR
ui_commands.LOCALES_DIR = tmp_path
try:
summary = LocaleSummary(
missing_keys={"de": []},
extra_keys={"de": ["extra"]},
)
remove_extra_translations("de", {"test.json": summary})
# Check that the extra key was removed
de_data_updated = json.loads((de_dir / "test.json").read_text())
assert "extra" not in de_data_updated
assert "greeting" in de_data_updated
finally:
ui_commands.LOCALES_DIR = original_locales_dir
| TestRemoveExtraTranslations |
python | getsentry__sentry | src/sentry/seer/endpoints/group_autofix_setup_check.py | {
"start": 3598,
"end": 6383
} | class ____(GroupAiEndpoint):
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.ML_AI
enforce_rate_limit = True
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {
RateLimitCategory.IP: RateLimit(limit=200, window=60, concurrent_limit=20),
RateLimitCategory.USER: RateLimit(limit=100, window=60, concurrent_limit=10),
RateLimitCategory.ORGANIZATION: RateLimit(
limit=1000, window=60, concurrent_limit=100
),
}
}
)
def get(self, request: Request, group: Group) -> Response:
"""
Checks if we are able to run Autofix on the given group.
"""
if not request.user.is_authenticated:
return Response(status=400)
org: Organization = request.organization
integration_check = None
# This check is to skip using the GitHub integration for Autofix in s4s.
# As we only use the github integration to get the code mappings, we can skip this check if the repos are hardcoded.
if not settings.SEER_AUTOFIX_FORCE_USE_REPOS:
integration_check = get_autofix_integration_setup_problems(
organization=org, project=group.project
)
write_integration_check = None
if request.query_params.get("check_write_access", False):
repos = get_repos_and_access(group.project, group.id)
write_access_ok = len(repos) > 0 and all(repo["ok"] for repo in repos)
write_integration_check = {
"ok": write_access_ok,
"repos": repos,
}
user_acknowledgement = get_seer_user_acknowledgement(
user_id=request.user.id, organization=org
)
org_acknowledgement = True
if not user_acknowledgement: # If the user has acknowledged, the org must have too.
org_acknowledgement = get_seer_org_acknowledgement(org)
has_autofix_quota: bool = quotas.backend.has_available_reserved_budget(
org_id=org.id, data_category=DataCategory.SEER_AUTOFIX
)
return Response(
{
"integration": {
"ok": integration_check is None,
"reason": integration_check,
},
"githubWriteIntegration": write_integration_check,
"setupAcknowledgement": {
"orgHasAcknowledged": org_acknowledgement,
"userHasAcknowledged": user_acknowledgement,
},
"billing": {
"hasAutofixQuota": has_autofix_quota,
},
}
)
| GroupAutofixSetupCheck |
python | google__jax | jax/_src/internal_test_util/test_harnesses.py | {
"start": 3243,
"end": 3402
} | class ____(NamedTuple):
"""Descriptor for a randomly generated argument.
See description of `Harness`.
"""
shape: tuple[int, ...]
dtype: DType
| RandArg |
python | scipy__scipy | scipy/integrate/_ivp/base.py | {
"start": 8770,
"end": 10004
} | class ____:
"""Base class for local interpolant over step made by an ODE solver.
It interpolates between `t_min` and `t_max` (see Attributes below).
Evaluation outside this interval is not forbidden, but the accuracy is not
guaranteed.
Attributes
----------
t_min, t_max : float
Time range of the interpolation.
"""
# generic type compatibility with scipy-stubs
__class_getitem__ = classmethod(GenericAlias)
def __init__(self, t_old, t):
self.t_old = t_old
self.t = t
self.t_min = min(t, t_old)
self.t_max = max(t, t_old)
def __call__(self, t):
"""Evaluate the interpolant.
Parameters
----------
t : float or array_like with shape (n_points,)
Points to evaluate the solution at.
Returns
-------
y : ndarray, shape (n,) or (n, n_points)
Computed values. Shape depends on whether `t` was a scalar or a
1-D array.
"""
t = np.asarray(t)
if t.ndim > 1:
raise ValueError("`t` must be a float or a 1-D array.")
return self._call_impl(t)
def _call_impl(self, t):
raise NotImplementedError
| DenseOutput |
python | ijl__orjson | test/test_numpy.py | {
"start": 35604,
"end": 35939
} | class ____:
def test_numpy_array_dimension_zero(self):
wrong_endianness = ">" if sys.byteorder == "little" else "<"
array = numpy.array([0, 1, 0.4, 5.7], dtype=f"{wrong_endianness}f8")
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(array, option=orjson.OPT_SERIALIZE_NUMPY)
| NumpyEndianness |
python | mwaskom__seaborn | seaborn/_statistics.py | {
"start": 18598,
"end": 20817
} | class ____:
def __init__(self, estimator, errorbar=None, **boot_kws):
"""
Data aggregator that produces a weighted estimate and error bar interval.
Parameters
----------
estimator : string
Function (or method name) that maps a vector to a scalar. Currently
supports only "mean".
errorbar : string or (string, number) tuple
Name of errorbar method or a tuple with a method name and a level parameter.
Currently the only supported method is "ci".
boot_kws
Additional keywords are passed to bootstrap when error_method is "ci".
"""
if estimator != "mean":
# Note that, while other weighted estimators may make sense (e.g. median),
# I'm not aware of an implementation in our dependencies. We can add one
# in seaborn later, if there is sufficient interest. For now, limit to mean.
raise ValueError(f"Weighted estimator must be 'mean', not {estimator!r}.")
self.estimator = estimator
method, level = _validate_errorbar_arg(errorbar)
if method is not None and method != "ci":
# As with the estimator, weighted 'sd' or 'pi' error bars may make sense.
# But we'll keep things simple for now and limit to (bootstrap) CI.
raise ValueError(f"Error bar method must be 'ci', not {method!r}.")
self.error_method = method
self.error_level = level
self.boot_kws = boot_kws
def __call__(self, data, var):
"""Aggregate over `var` column of `data` with estimate and error interval."""
vals = data[var]
weights = data["weight"]
estimate = np.average(vals, weights=weights)
if self.error_method == "ci" and len(data) > 1:
def error_func(x, w):
return np.average(x, weights=w)
boots = bootstrap(vals, weights, func=error_func, **self.boot_kws)
err_min, err_max = _percentile_interval(boots, self.error_level)
else:
err_min = err_max = np.nan
return pd.Series({var: estimate, f"{var}min": err_min, f"{var}max": err_max})
| WeightedAggregator |
python | ethereum__web3.py | tests/integration/go_ethereum/test_goethereum_http.py | {
"start": 3483,
"end": 3904
} | class ____(GoEthereumTxPoolModuleTest):
pass
# -- async -- #
@pytest_asyncio.fixture
async def async_w3(start_geth_process_and_yield_port):
port = start_geth_process_and_yield_port
_w3 = AsyncWeb3(
AsyncHTTPProvider(
f"http://127.0.0.1:{port}", request_kwargs={"timeout": ClientTimeout(10)}
)
)
yield _w3
await _w3.provider.disconnect()
| TestGoEthereumTxPoolModuleTest |
python | psf__requests | src/requests/exceptions.py | {
"start": 3709,
"end": 3785
} | class ____(RequestException):
"""Custom retries logic failed"""
| RetryError |
python | astropy__astropy | astropy/units/tests/test_format.py | {
"start": 12861,
"end": 13157
} | class ____(RoundtripBase):
format_ = u_format.FITS
@pytest.mark.parametrize(
"unit",
[u for u in u_format.FITS._units.values() if not isinstance(u, PrefixUnit)],
ids=str,
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
| TestRoundtripFITS |
python | tensorflow__tensorflow | tensorflow/python/ops/special_math_ops_test.py | {
"start": 8782,
"end": 10501
} | class ____(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_expint_boundary(self):
self.assertAllClose(-np.inf, special_math_ops.expint(0.))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.expint(np.nan))))
# Check that the domain of definition is [0, inf)
self.assertTrue(
np.all(
np.isnan(
self.evaluate(
special_math_ops.expint(
np.random.uniform(-20., -1., size=int(1e3)))))))
@parameterized.parameters(np.float32, np.float64)
def test_expint_small(self, dtype):
x = np.random.uniform(0., 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.expi(x), self.evaluate(special_math_ops.expint(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_expint_larger(self, dtype):
x = np.random.uniform(1., 50., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.expi(x), self.evaluate(special_math_ops.expint(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
def test_expint_gradient(self):
inputs = [np.random.uniform(1., 10., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.expint, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 5e-3)
@test_util.run_all_in_graph_and_eager_modes
| ExpintTest |
python | scrapy__scrapy | tests/test_utils_deprecate.py | {
"start": 8782,
"end": 9875
} | class ____:
def test_old_path_gets_fixed(self):
with warnings.catch_warnings(record=True) as w:
output = update_classpath("scrapy.contrib.debug.Debug")
assert output == "scrapy.extensions.debug.Debug"
assert len(w) == 1
assert "scrapy.contrib.debug.Debug" in str(w[0].message)
assert "scrapy.extensions.debug.Debug" in str(w[0].message)
def test_sorted_replacement(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ScrapyDeprecationWarning)
output = update_classpath("scrapy.contrib.pipeline.Pipeline")
assert output == "scrapy.pipelines.Pipeline"
def test_unmatched_path_stays_the_same(self):
with warnings.catch_warnings(record=True) as w:
output = update_classpath("scrapy.unmatched.Path")
assert output == "scrapy.unmatched.Path"
assert len(w) == 0
def test_returns_nonstring(self):
for notastring in [None, True, [1, 2, 3], object()]:
assert update_classpath(notastring) == notastring
| TestUpdateClassPath |
python | Textualize__textual | src/textual/dom.py | {
"start": 2617,
"end": 3326
} | class ____(Exception):
"""Exception raised if you supply a `id` attribute or class name in the wrong format."""
def check_identifiers(description: str, *names: str) -> None:
"""Validate identifier and raise an error if it fails.
Args:
description: Description of where identifier is used for error message.
*names: Identifiers to check.
"""
match = _re_identifier.fullmatch
for name in names:
if match(name) is None:
raise BadIdentifier(
f"{name!r} is an invalid {description}; "
"identifiers must contain only letters, numbers, underscores, or hyphens, and must not begin with a number."
)
| BadIdentifier |
python | jina-ai__jina | tests/integration/docarray_v2/test_streaming.py | {
"start": 405,
"end": 452
} | class ____(BaseDoc):
text: str
| OutputDocument |
python | numba__numba | numba/tests/test_unsafe_intrinsics.py | {
"start": 517,
"end": 2088
} | class ____(TestCase):
"""Tests for numba.unsafe.tuple
"""
def test_tuple_setitem(self):
@njit
def foo(tup, idxs, vals):
out_tup = tup
for i, v in zip(idxs, vals):
out_tup = tuple_setitem(out_tup, i, v)
return tup, out_tup
random.seed(123)
for _ in range(20):
# Random data
n = random.randint(1, 10)
tup = tuple([random.randint(0, n) for i in range(n)])
vals = tuple([random.randint(10, 20) for i in range(n)])
idxs = list(range(len(vals)))
random.shuffle(idxs)
idxs = tuple(idxs)
# Expect
expect_tup = tuple(tup)
expect_out = np.asarray(expect_tup)
expect_out[np.asarray(idxs)] = vals
# Got
got_tup, got_out = foo(tup, idxs, vals)
# Check
self.assertEqual(got_tup, expect_tup)
self.assertEqual(got_out, tuple(expect_out))
def test_slice_tuple(self):
@njit
def full_slice_array(a, n):
# Since numba slices can't be boxed at the moment
return a[build_full_slice_tuple(literally(n))]
for n in range(1, 3):
a = np.random.random(np.arange(n) + 1)
for i in range(1, n + 1):
np.testing.assert_array_equal(a, full_slice_array(a, i))
with self.assertRaises(TypingError):
# numpy would throw an IndexError here
full_slice_array(a, n + 1)
| TestTupleIntrinsic |
python | pypa__pip | src/pip/_vendor/rich/control.py | {
"start": 1537,
"end": 6484
} | class ____:
"""A renderable that inserts a control code (non printable but may move cursor).
Args:
*codes (str): Positional arguments are either a :class:`~rich.segment.ControlType` enum or a
tuple of ControlType and an integer parameter
"""
__slots__ = ["segment"]
def __init__(self, *codes: Union[ControlType, ControlCode]) -> None:
control_codes: List[ControlCode] = [
(code,) if isinstance(code, ControlType) else code for code in codes
]
_format_map = CONTROL_CODES_FORMAT
rendered_codes = "".join(
_format_map[code](*parameters) for code, *parameters in control_codes
)
self.segment = Segment(rendered_codes, None, control_codes)
@classmethod
def bell(cls) -> "Control":
"""Ring the 'bell'."""
return cls(ControlType.BELL)
@classmethod
def home(cls) -> "Control":
"""Move cursor to 'home' position."""
return cls(ControlType.HOME)
@classmethod
def move(cls, x: int = 0, y: int = 0) -> "Control":
"""Move cursor relative to current position.
Args:
x (int): X offset.
y (int): Y offset.
Returns:
~Control: Control object.
"""
def get_codes() -> Iterable[ControlCode]:
control = ControlType
if x:
yield (
control.CURSOR_FORWARD if x > 0 else control.CURSOR_BACKWARD,
abs(x),
)
if y:
yield (
control.CURSOR_DOWN if y > 0 else control.CURSOR_UP,
abs(y),
)
control = cls(*get_codes())
return control
@classmethod
def move_to_column(cls, x: int, y: int = 0) -> "Control":
"""Move to the given column, optionally add offset to row.
Returns:
x (int): absolute x (column)
y (int): optional y offset (row)
Returns:
~Control: Control object.
"""
return (
cls(
(ControlType.CURSOR_MOVE_TO_COLUMN, x),
(
ControlType.CURSOR_DOWN if y > 0 else ControlType.CURSOR_UP,
abs(y),
),
)
if y
else cls((ControlType.CURSOR_MOVE_TO_COLUMN, x))
)
@classmethod
def move_to(cls, x: int, y: int) -> "Control":
"""Move cursor to absolute position.
Args:
x (int): x offset (column)
y (int): y offset (row)
Returns:
~Control: Control object.
"""
return cls((ControlType.CURSOR_MOVE_TO, x, y))
@classmethod
def clear(cls) -> "Control":
"""Clear the screen."""
return cls(ControlType.CLEAR)
@classmethod
def show_cursor(cls, show: bool) -> "Control":
"""Show or hide the cursor."""
return cls(ControlType.SHOW_CURSOR if show else ControlType.HIDE_CURSOR)
@classmethod
def alt_screen(cls, enable: bool) -> "Control":
"""Enable or disable alt screen."""
if enable:
return cls(ControlType.ENABLE_ALT_SCREEN, ControlType.HOME)
else:
return cls(ControlType.DISABLE_ALT_SCREEN)
@classmethod
def title(cls, title: str) -> "Control":
"""Set the terminal window title
Args:
title (str): The new terminal window title
"""
return cls((ControlType.SET_WINDOW_TITLE, title))
def __str__(self) -> str:
return self.segment.text
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
if self.segment.text:
yield self.segment
def strip_control_codes(
text: str, _translate_table: Dict[int, None] = _CONTROL_STRIP_TRANSLATE
) -> str:
"""Remove control codes from text.
Args:
text (str): A string possibly contain control codes.
Returns:
str: String with control codes removed.
"""
return text.translate(_translate_table)
def escape_control_codes(
text: str,
_translate_table: Dict[int, str] = CONTROL_ESCAPE,
) -> str:
"""Replace control codes with their "escaped" equivalent in the given text.
(e.g. "\b" becomes "\\b")
Args:
text (str): A string possibly containing control codes.
Returns:
str: String with control codes replaced with their escaped version.
"""
return text.translate(_translate_table)
if __name__ == "__main__": # pragma: no cover
from pip._vendor.rich.console import Console
console = Console()
console.print("Look at the title of your terminal window ^")
# console.print(Control((ControlType.SET_WINDOW_TITLE, "Hello, world!")))
for i in range(10):
console.set_window_title("🚀 Loading" + "." * i)
time.sleep(0.5)
| Control |
python | pytorch__pytorch | torch/distributed/elastic/multiprocessing/api.py | {
"start": 14347,
"end": 15102
} | class ____:
"""
Results of a completed run of processes started with ``start_processes()``. Returned by ``PContext``.
Note the following:
1. All fields are mapped by local rank
2. ``return_values`` - only populated for functions (not the binaries).
3. ``stdouts`` - path to stdout.log (empty string if no redirect)
4. ``stderrs`` - path to stderr.log (empty string if no redirect)
"""
return_values: dict[int, Any] = field(default_factory=dict)
failures: dict[int, ProcessFailure] = field(default_factory=dict)
stdouts: dict[int, str] = field(default_factory=dict)
stderrs: dict[int, str] = field(default_factory=dict)
def is_failed(self) -> bool:
return len(self.failures) > 0
| RunProcsResult |
python | ansible__ansible | test/units/module_utils/basic/test_dict_converters.py | {
"start": 309,
"end": 861
} | class ____(unittest.TestCase):
def test_module_utils_basic_json_dict_converters(self):
from ansible.module_utils.basic import json_dict_unicode_to_bytes, json_dict_bytes_to_unicode
test_data = dict(
item1=u"Fóo",
item2=[u"Bár", u"Bam"],
item3=dict(sub1=u"Súb"),
item4=(u"föo", u"bär", u"©"),
item5=42,
)
res = json_dict_unicode_to_bytes(test_data)
res2 = json_dict_bytes_to_unicode(res)
self.assertEqual(test_data, res2)
| TestTextifyContainers |
python | walkccc__LeetCode | solutions/3559. Number of Ways to Assign Edge Weights II/3559.py | {
"start": 0,
"end": 1380
} | class ____:
def assignEdgeWeights(
self,
edges: list[list[int]],
queries: list[list[int]]
) -> list[int]:
MOD = 1_000_000_007
LOG = 17 # since 2^17 > 1e5
n = len(edges) + 1
ans = []
depth = [0] * (n + 1)
graph = [[] for _ in range(n + 1)]
parent = [[-1] * (n + 1) for _ in range(LOG)]
for u, v in edges:
graph[u].append(v)
graph[v].append(u)
def dfs(u: int, p: int) -> None:
parent[0][u] = p
for v in graph[u]:
if v != p:
depth[v] = depth[u] + 1
dfs(v, u)
dfs(1, -1)
for k in range(1, LOG):
for v in range(1, n + 1):
if parent[k - 1][v] != -1:
parent[k][v] = parent[k - 1][parent[k - 1][v]]
def lca(u: int, v: int) -> int:
if depth[u] < depth[v]:
u, v = v, u
for k in reversed(range(LOG)):
if parent[k][u] != -1 and depth[parent[k][u]] >= depth[v]:
u = parent[k][u]
if u == v:
return u
for k in reversed(range(LOG)):
if parent[k][u] != -1 and parent[k][u] != parent[k][v]:
u = parent[k][u]
v = parent[k][v]
return parent[0][u]
for u, v in queries:
if u == v:
ans.append(0)
else:
a = lca(u, v)
d = depth[u] + depth[v] - 2 * depth[a]
ans.append(pow(2, d - 1, MOD))
return ans
| Solution |
python | pytorch__pytorch | test/test_nn.py | {
"start": 362828,
"end": 364660
} | class ____(nn.Module):
def __init__(self, pool, unpool):
super().__init__()
self.pool = pool
self.unpool = unpool
def forward(self, input):
return self.unpool(*self.pool(input))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool1d(2, return_indices=True),
nn.MaxUnpool1d(2)),
input_size=(1, 1, 4),
fullname='MaxUnpool1d_net',
default_dtype=torch.double,))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool2d(2, return_indices=True),
nn.MaxUnpool2d(2)),
input_size=(1, 1, 2, 4),
fullname='MaxUnpool2d_net',
default_dtype=torch.double,))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool3d(2, return_indices=True),
nn.MaxUnpool3d(2)),
input_size=(1, 1, 2, 4, 6),
fullname='MaxUnpool3d_net',
check_gradgrad=False,
default_dtype=torch.double,))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool1d(2, return_indices=True),
nn.MaxUnpool1d(2)),
input_size=(1, 4),
reference_fn=single_batch_reference_fn,
fullname='MaxUnpool1d_net_no_batch_dim',
default_dtype=torch.double,))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool2d(2, return_indices=True),
nn.MaxUnpool2d(2)),
input_size=(1, 2, 4),
reference_fn=single_batch_reference_fn,
fullname='MaxUnpool2d_net_no_batch_dim',
default_dtype=torch.double,))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool3d(2, return_indices=True),
nn.MaxUnpool3d(2)),
input_size=(1, 2, 4, 6),
reference_fn=single_batch_reference_fn,
fullname='MaxUnpool3d_net_no_batch_dim',
check_gradgrad=False,
default_dtype=torch.double,))
| UnpoolingNet |
python | google__jax | jax/_src/export/shape_poly.py | {
"start": 50064,
"end": 52498
} | class ____(effects.Effect):
__str__ = lambda _: "ShapeAssertionEffect"
shape_assertion_effect = ShapeAssertionEffect()
effects.lowerable_effects.add_type(ShapeAssertionEffect)
effects.control_flow_allowed_effects.add_type(ShapeAssertionEffect)
effects.remat_allowed_effects.add_type(ShapeAssertionEffect)
effects.custom_derivatives_allowed_effects.add_type(ShapeAssertionEffect)
def shape_assertion(assert_what: typing.Array,
*error_message_inputs: typing.Array,
error_message: str) -> None:
"""Adds a shape assertion in the code.
Args:
assert_what: a boolean asserted to be true. Must be computed based only
on dimension expressions, so that it can be evaluated after shape
refinement.
error_message_inputs: integers expressions whose values can be referenced
in the `error_message`. Must be computed based only
on dimension expressions, so that they can be evaluated after shape
refinement.
error_message: an error message, possibly containing format specifiers
{0}, {1}, ..., referencing the values of the `error_message_inputs`.
The format specifiers are sometimes processed with Python's
`string::format` method, and sometimes with `llvm::formatv`.
"""
shape_assertion_p.bind(assert_what, *error_message_inputs,
error_message=error_message)
# A JAX primitive with no array arguments but with a dimension parameter
# that is a DimExpr. The value of the primitive is the value of the dimension,
# using int64 in x64 mode or int32 otherwise (core.dim_value_dtype())
dim_as_value_p = core.Primitive("dim_as_value")
dim_as_value_p.def_abstract_eval(lambda dim: core.dim_value_aval())
def dim_as_value_impl(dim: DimSize):
raise NotImplementedError(
"Evaluation rule for 'dim_as_value' is not implemented. "
"It seems that you are using shape polymorphism outside jax.export.")
dim_as_value_p.def_impl(dim_as_value_impl)
def _dim_as_value(dim: DimSize):
return dim_as_value_p.bind(dim=dim)
def _dim_as_value_lowering(ctx: mlir.LoweringRuleContext, *,
dim):
res, = mlir.eval_dynamic_shape(ctx, (dim,))
out_type = mlir.aval_to_ir_type(ctx.avals_out[0])
if out_type != res.type: # type: ignore
return [mlir.hlo.convert(out_type, res)]
else:
return [res]
mlir.register_lowering(dim_as_value_p, _dim_as_value_lowering)
| ShapeAssertionEffect |
python | sqlalchemy__sqlalchemy | test/orm/test_lazy_relations.py | {
"start": 29823,
"end": 37576
} | class ____(_fixtures.FixtureTest):
"""test lazyloader on non-existent attribute returns
expected attribute symbols, maintain expected state"""
run_inserts = None
def _unhashable_fixture(self, metadata, load_on_pending=False):
class MyHashType(sa.TypeDecorator):
impl = sa.String(100)
cache_ok = True
def process_bind_param(self, value, dialect):
return ";".join(
"%s=%s" % (k, v)
for k, v in sorted(value.items(), key=lambda key: key[0])
)
def process_result_value(self, value, dialect):
return dict(elem.split("=", 1) for elem in value.split(";"))
category = Table(
"category",
metadata,
Column("id", Integer, primary_key=True),
Column("data", MyHashType()),
)
article = Table(
"article",
metadata,
Column("id", Integer, primary_key=True),
Column("data", MyHashType()),
)
class Category(ComparableEntity):
pass
class Article(ComparableEntity):
pass
self.mapper_registry.map_imperatively(Category, category)
self.mapper_registry.map_imperatively(
Article,
article,
properties={
"category": relationship(
Category,
primaryjoin=orm.foreign(article.c.data) == category.c.data,
load_on_pending=load_on_pending,
)
},
)
metadata.create_all(testing.db)
sess = Session(testing.db, autoflush=False)
data = {"im": "unhashable"}
a1 = Article(id=1, data=data)
c1 = Category(id=1, data=data)
if load_on_pending:
sess.add(c1)
else:
sess.add_all([c1, a1])
sess.flush()
if load_on_pending:
sess.add(a1)
return Category, Article, sess, a1, c1
def _u_ad_fixture(self, populate_user, dont_use_get=False):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(Address, back_populates="user")
},
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(
User,
primaryjoin=(
and_(
users.c.id == addresses.c.user_id, users.c.id != 27
)
if dont_use_get
else None
),
back_populates="addresses",
)
},
)
sess = fixture_session()
a1 = Address(email_address="a1")
sess.add(a1)
if populate_user:
a1.user = User(name="ed")
sess.flush()
if populate_user:
sess.expire_all()
return User, Address, sess, a1
def test_no_use_get_params_missing(self):
User, Address, sess, a1 = self._u_ad_fixture(False, True)
def go():
eq_(a1.user, None)
# doesn't emit SQL
self.assert_sql_count(testing.db, go, 0)
@testing.provide_metadata
def test_no_use_get_params_not_hashable(self):
Category, Article, sess, a1, c1 = self._unhashable_fixture(
self.metadata
)
def go():
eq_(a1.category, c1)
self.assert_sql_count(testing.db, go, 1)
@testing.provide_metadata
def test_no_use_get_params_not_hashable_on_pending(self):
Category, Article, sess, a1, c1 = self._unhashable_fixture(
self.metadata, load_on_pending=True
)
def go():
eq_(a1.category, c1)
self.assert_sql_count(testing.db, go, 1)
def test_get_empty_passive_return_never_set(self):
User, Address, sess, a1 = self._u_ad_fixture(False)
eq_(
Address.user.impl.get(
attributes.instance_state(a1),
attributes.instance_dict(a1),
passive=attributes.PASSIVE_RETURN_NO_VALUE,
),
attributes.NO_VALUE,
)
assert "user_id" not in a1.__dict__
assert "user" not in a1.__dict__
def test_history_empty_passive_return_never_set(self):
User, Address, sess, a1 = self._u_ad_fixture(False)
eq_(
Address.user.impl.get_history(
attributes.instance_state(a1),
attributes.instance_dict(a1),
passive=attributes.PASSIVE_RETURN_NO_VALUE,
),
((), (), ()),
)
assert "user_id" not in a1.__dict__
assert "user" not in a1.__dict__
def test_get_empty_passive_no_initialize(self):
User, Address, sess, a1 = self._u_ad_fixture(False)
eq_(
Address.user.impl.get(
attributes.instance_state(a1),
attributes.instance_dict(a1),
passive=attributes.PASSIVE_NO_INITIALIZE,
),
attributes.PASSIVE_NO_RESULT,
)
assert "user_id" not in a1.__dict__
assert "user" not in a1.__dict__
def test_history_empty_passive_no_initialize(self):
User, Address, sess, a1 = self._u_ad_fixture(False)
eq_(
Address.user.impl.get_history(
attributes.instance_state(a1),
attributes.instance_dict(a1),
passive=attributes.PASSIVE_NO_INITIALIZE,
),
attributes.HISTORY_BLANK,
)
assert "user_id" not in a1.__dict__
assert "user" not in a1.__dict__
def test_get_populated_passive_no_initialize(self):
User, Address, sess, a1 = self._u_ad_fixture(True)
eq_(
Address.user.impl.get(
attributes.instance_state(a1),
attributes.instance_dict(a1),
passive=attributes.PASSIVE_NO_INITIALIZE,
),
attributes.PASSIVE_NO_RESULT,
)
assert "user_id" not in a1.__dict__
assert "user" not in a1.__dict__
def test_history_populated_passive_no_initialize(self):
User, Address, sess, a1 = self._u_ad_fixture(True)
eq_(
Address.user.impl.get_history(
attributes.instance_state(a1),
attributes.instance_dict(a1),
passive=attributes.PASSIVE_NO_INITIALIZE,
),
attributes.HISTORY_BLANK,
)
assert "user_id" not in a1.__dict__
assert "user" not in a1.__dict__
def test_get_populated_passive_return_never_set(self):
User, Address, sess, a1 = self._u_ad_fixture(True)
eq_(
Address.user.impl.get(
attributes.instance_state(a1),
attributes.instance_dict(a1),
passive=attributes.PASSIVE_RETURN_NO_VALUE,
),
User(name="ed"),
)
def test_history_populated_passive_return_never_set(self):
User, Address, sess, a1 = self._u_ad_fixture(True)
eq_(
Address.user.impl.get_history(
attributes.instance_state(a1),
attributes.instance_dict(a1),
passive=attributes.PASSIVE_RETURN_NO_VALUE,
),
((), [User(name="ed")], ()),
)
| GetterStateTest |
python | ApeWorX__ape | src/ape_ethereum/transactions.py | {
"start": 1591,
"end": 1888
} | class ____(Enum):
"""
Transaction enumerable type constants defined by
`EIP-2718 <https://eips.ethereum.org/EIPS/eip-2718>`__.
"""
STATIC = 0
ACCESS_LIST = 1 # EIP-2930
DYNAMIC = 2 # EIP-1559
SHARED_BLOB = 3 # EIP-4844
SET_CODE = 4 # EIP-7702
| TransactionType |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 58337,
"end": 58400
} | class ____(_TestNorm, _TestNormInt64Base):
pass
| TestNormInt64 |
python | docker__docker-py | docker/models/resource.py | {
"start": 1313,
"end": 2580
} | class ____:
"""
A base class for representing all objects of a particular type on the
server.
"""
#: The type of object this collection represents, set by subclasses
model = None
def __init__(self, client=None):
#: The client pointing at the server that this collection of objects
#: is on.
self.client = client
def __call__(self, *args, **kwargs):
raise TypeError(
f"'{self.__class__.__name__}' object is not callable. "
"You might be trying to use the old (pre-2.0) API - "
"use docker.APIClient if so."
)
def list(self):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
def create(self, attrs=None):
raise NotImplementedError
def prepare_model(self, attrs):
"""
Create a model from a set of attributes.
"""
if isinstance(attrs, Model):
attrs.client = self.client
attrs.collection = self
return attrs
elif isinstance(attrs, dict):
return self.model(attrs=attrs, client=self.client, collection=self)
else:
raise Exception(f"Can't create {self.model.__name__} from {attrs}")
| Collection |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_rich_string11.py | {
"start": 315,
"end": 1053
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("rich_string11.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({"bold": 1})
italic = workbook.add_format({"italic": 1})
smiley = "\u263a"
worksheet.write("A1", "Foo", bold)
worksheet.write("A2", "Bar", italic)
worksheet.write_rich_string("A3", "a", bold, smiley, "defg")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | src/sentry/issues/attributes.py | {
"start": 1215,
"end": 11479
} | class ____:
id: int
project_id: int
status: int
substatus: int | None
first_seen: datetime
num_comments: int
priority: int | None
first_release_id: int | None
def _get_attribute_snapshot_producer():
return get_arroyo_producer(
"sentry.issues.attributes",
Topic.GROUP_ATTRIBUTES,
exclude_config_keys=["compression.type", "message.max.bytes"],
)
_attribute_snapshot_producer = SingletonProducer(
_get_attribute_snapshot_producer, max_futures=settings.SENTRY_GROUP_ATTRIBUTES_FUTURES_MAX_LIMIT
)
def _log_group_attributes_changed(
operation: Operation,
model_inducing_snapshot: str,
column_inducing_snapshot: str | None = None,
) -> None:
metrics.incr(
"group_attributes.changed",
tags={
"operation": operation.value,
"model": model_inducing_snapshot,
"column": column_inducing_snapshot,
},
)
def send_snapshot_values(
group_id: int | None, group: Group | None, group_deleted: bool = False
) -> None:
group_ids = None
if group_id:
group_ids = [group_id]
groups = None
if group:
groups = [group]
bulk_send_snapshot_values(group_ids, groups, group_deleted=group_deleted)
def bulk_send_snapshot_values(
group_ids: list[int] | None, groups: list[Group] | None, group_deleted: bool = False
) -> None:
if group_ids is None and groups is None:
raise ValueError("cannot send snapshot values when group_ids and groups are None")
group_list: list[Group | GroupValues] = [*(groups or [])]
if group_ids:
group_list.extend(_bulk_retrieve_group_values(group_ids))
snapshots = _bulk_retrieve_snapshot_values(group_list, group_deleted=group_deleted)
for snapshot in snapshots:
produce_snapshot_to_kafka(snapshot)
def produce_snapshot_to_kafka(snapshot: GroupAttributesSnapshot) -> None:
if settings.SENTRY_EVENTSTREAM != "sentry.eventstream.kafka.KafkaEventStream":
# If we're not running Kafka then we're just in dev. Skip producing to Kafka and just
# write to snuba directly
try:
resp = _snuba_pool.urlopen(
"POST",
"/tests/entities/group_attributes/insert",
body=json.dumps([snapshot]),
headers={},
)
if resp.status != 200:
raise snuba.SnubaError(
f"HTTP {resp.status} response from Snuba! {resp.data.decode('utf-8')}"
)
return None
except urllib3.exceptions.HTTPError as err:
raise snuba.SnubaError(err)
else:
payload = KafkaPayload(None, json.dumps(snapshot).encode("utf-8"), [])
_attribute_snapshot_producer.produce(
ArroyoTopic(get_topic_definition(Topic.GROUP_ATTRIBUTES)["real_topic_name"]), payload
)
def _bulk_retrieve_group_values(group_ids: list[int]) -> list[GroupValues]:
group_values_map = {
group["id"]: group
for group in Group.objects.filter(id__in=group_ids).values(
"id",
"project_id",
"status",
"substatus",
"first_seen",
"num_comments",
"priority",
"first_release",
)
}
assert len(group_values_map) == len(group_ids)
results = []
for group_id in group_ids:
group_values = group_values_map[group_id]
results.append(
GroupValues(
id=group_id,
project_id=group_values["project_id"],
status=group_values["status"],
substatus=group_values["substatus"],
first_seen=group_values["first_seen"],
num_comments=group_values["num_comments"] or 0,
priority=group_values["priority"],
first_release_id=(group_values["first_release"] or None),
)
)
return results
def _bulk_retrieve_snapshot_values(
group_values_list: Iterable[Group | GroupValues], group_deleted: bool = False
) -> list[GroupAttributesSnapshot]:
group_assignee_map = {
ga["group_id"]: ga
for ga in GroupAssignee.objects.filter(
group_id__in=[gv.id for gv in group_values_list]
).values("group_id", "user_id", "team_id")
}
group_owner_map = {}
for group_owner in (
GroupOwner.objects.annotate(
position=Window(Rank(), partition_by=[F("group_id"), F("type")], order_by="-date_added")
)
.filter(position=1, group_id__in=[g.id for g in group_values_list])
.values("group_id", "user_id", "team_id", "type")
):
group_owner_map[(group_owner["group_id"], group_owner["type"])] = group_owner
snapshots = []
for group_value in group_values_list:
assignee = group_assignee_map.get(group_value.id)
suspect_owner = group_owner_map.get((group_value.id, GroupOwnerType.SUSPECT_COMMIT.value))
ownership_owner = group_owner_map.get((group_value.id, GroupOwnerType.OWNERSHIP_RULE.value))
codeowners_owner = group_owner_map.get((group_value.id, GroupOwnerType.CODEOWNERS.value))
snapshot: GroupAttributesSnapshot = {
"group_deleted": group_deleted,
"project_id": group_value.project_id,
"group_id": group_value.id,
"status": group_value.status,
"substatus": group_value.substatus,
"priority": group_value.priority,
"first_release": group_value.first_release_id,
"first_seen": group_value.first_seen.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"num_comments": group_value.num_comments,
"timestamp": datetime.now().isoformat(),
"assignee_user_id": assignee["user_id"] if assignee else None,
"assignee_team_id": assignee["team_id"] if assignee else None,
"owner_suspect_commit_user_id": suspect_owner["user_id"] if suspect_owner else None,
"owner_ownership_rule_user_id": ownership_owner["user_id"] if ownership_owner else None,
"owner_ownership_rule_team_id": ownership_owner["team_id"] if ownership_owner else None,
"owner_codeowners_user_id": codeowners_owner["user_id"] if codeowners_owner else None,
"owner_codeowners_team_id": codeowners_owner["team_id"] if codeowners_owner else None,
}
snapshots.append(snapshot)
return snapshots
@receiver(
post_save, sender=Group, dispatch_uid="post_save_log_group_attributes_changed", weak=False
)
def post_save_log_group_attributes_changed(instance, sender, created, *args, **kwargs) -> None:
try:
if created:
_log_group_attributes_changed(Operation.CREATED, "group", None)
send_snapshot_values(None, instance, False)
else:
if process_update_fields(kwargs.get("update_fields", set())):
send_snapshot_values(None, instance, False)
except Exception:
logger.exception("failed to log group attributes after group post_save")
@receiver(post_update, sender=Group, dispatch_uid="post_update_group", weak=False)
def post_update_group(sender, updated_fields, model_ids, *args, **kwargs) -> None:
try:
updated_fields = process_update_fields(updated_fields)
if updated_fields:
bulk_send_snapshot_values(model_ids, None)
except Exception:
logger.exception("failed to log group attributes after group_owner updated")
def process_update_fields(updated_fields) -> set[str]:
if not updated_fields:
# we have no guarantees update_fields is used everywhere save() is called
# we'll need to assume any of the attributes are updated in that case
updated_fields = {"all"}
else:
VALID_FIELDS = {"status", "substatus", "num_comments", "priority", "first_release"}
updated_fields = VALID_FIELDS.intersection(updated_fields or ())
if updated_fields:
_log_group_attributes_changed(Operation.UPDATED, "group", "-".join(sorted(updated_fields)))
return updated_fields
@issue_deleted.connect(weak=False)
def on_issue_deleted_log_deleted(group, user, delete_type, **kwargs) -> None:
try:
_log_group_attributes_changed(Operation.DELETED, "group", "all")
send_snapshot_values(None, group, True)
except Exception:
logger.exception("failed to log group attributes after group delete")
@issue_assigned.connect(weak=False)
def on_issue_assigned_log_group_assignee_attributes_changed(project, group, user, **kwargs) -> None:
try:
_log_group_attributes_changed(Operation.UPDATED, "group_assignee", "all")
send_snapshot_values(None, group, False)
except Exception:
logger.exception("failed to log group attributes after group_assignee assignment")
@issue_unassigned.connect(weak=False)
def on_issue_unassigned_log_group_assignee_attributes_changed(
project, group, user, **kwargs
) -> None:
try:
_log_group_attributes_changed(Operation.DELETED, "group_assignee", "all")
send_snapshot_values(None, group, False)
except Exception:
logger.exception("failed to log group attributes after group_assignee unassignment")
@receiver(
post_save, sender=GroupOwner, dispatch_uid="post_save_log_group_owner_changed", weak=False
)
def post_save_log_group_owner_changed(
instance, sender, created, update_fields, *args, **kwargs
) -> None:
try:
_log_group_attributes_changed(
Operation.CREATED if created else Operation.UPDATED, "group_owner", "all"
)
send_snapshot_values(instance.group_id, None, False)
except Exception:
logger.exception("failed to log group attributes after group_owner updated")
@receiver(
post_delete, sender=GroupOwner, dispatch_uid="post_delete_log_group_owner_changed", weak=False
)
def post_delete_log_group_owner_changed(instance, sender, *args, **kwargs) -> None:
try:
_log_group_attributes_changed(Operation.DELETED, "group_owner", "all")
send_snapshot_values(instance.group_id, None, False)
except Exception:
logger.exception("failed to log group attributes after group_owner delete")
| GroupValues |
python | Netflix__metaflow | metaflow/plugins/secrets/secrets_decorator.py | {
"start": 456,
"end": 5000
} | class ____(StepDecorator):
"""
Specifies secrets to be retrieved and injected as environment variables prior to
the execution of a step.
Parameters
----------
sources : List[Union[str, Dict[str, Any]]], default: []
List of secret specs, defining how the secrets are to be retrieved
role : str, optional, default: None
Role to use for fetching secrets
allow_override : bool, optional, default: False
Toggle whether secrets can replace existing environment variables.
"""
name = "secrets"
defaults = {"sources": [], "role": None, "allow_override": False}
def task_pre_step(
self,
step_name,
task_datastore,
metadata,
run_id,
task_id,
flow,
graph,
retry_count,
max_user_code_retries,
ubf_context,
inputs,
):
if (
ubf_context
and ubf_context == UBF_TASK
and os.environ.get("METAFLOW_RUNTIME_ENVIRONMENT", "local") == "local"
):
# We will skip the secret injection for "locally" launched UBF_TASK (worker) tasks
# When we "locally" run @parallel tasks, the control task will create the worker tasks and the environment variables
# of the control task are inherited by the worker tasks. If we don't skip setting secrets in the worker task then the
# worker tasks will try to set the environment variables again which will cause a clash with the control task's env vars,
# causing the @secrets' `task_pre_step` to fail. In remote settings, (e.g. AWS Batch/Kubernetes), the worker task and
# control task are independently created so there is no chances of an env var clash.
return
# List of pairs (secret_spec, env_vars_from_this_spec)
all_secrets_env_vars = []
secret_specs = []
# Role (in terms of RBAC) to use when retrieving secrets.
# This is a general concept applicable to multiple backends
# E.g in AWS, this would be an IAM Role ARN.
#
# Config precedence (decreasing):
# - Source level: @secrets(source=[{"role": ...}])
# - Decorator level: @secrets(role=...)
# - Metaflow config key DEFAULT_SECRETS_ROLE
role = self.attributes["role"]
if role is None:
role = DEFAULT_SECRETS_ROLE
for secret_spec_str_or_dict in self.attributes["sources"]:
if isinstance(secret_spec_str_or_dict, str):
secret_specs.append(
SecretSpec.secret_spec_from_str(secret_spec_str_or_dict, role=role)
)
elif isinstance(secret_spec_str_or_dict, dict):
secret_specs.append(
SecretSpec.secret_spec_from_dict(secret_spec_str_or_dict, role=role)
)
else:
raise MetaflowException(
"@secrets sources items must be either a string or a dict"
)
for secret_spec in secret_specs:
secrets_backend_provider = get_secrets_backend_provider(
secret_spec.secrets_backend_type
)
try:
env_vars_for_secret = secrets_backend_provider.get_secret_as_dict(
secret_spec.secret_id,
options=secret_spec.options,
role=secret_spec.role,
)
except Exception as e:
raise MetaflowException(
"Failed to retrieve secret '%s': %s" % (secret_spec.secret_id, e)
)
try:
validate_env_vars(env_vars_for_secret)
except ValueError as e:
raise MetaflowException(
"Invalid env vars from secret %s: %s"
% (secret_spec.secret_id, str(e))
)
all_secrets_env_vars.append((secret_spec, env_vars_for_secret))
validate_env_vars_across_secrets(all_secrets_env_vars)
if not self.attributes["allow_override"]:
validate_env_vars_vs_existing_env(all_secrets_env_vars)
# By this point
# all_secrets_env_vars contains a list of dictionaries... env maps.
# - env maps must be disjoint from each other
# - env maps must be disjoint from existing current process os.environ
for secrets_env_vars in all_secrets_env_vars:
os.environ.update(secrets_env_vars[1].items())
| SecretsDecorator |
python | getsentry__sentry | tests/sentry/uptime/subscriptions/test_tasks.py | {
"start": 2891,
"end": 4774
} | class ____(ConfigPusherTestMixin, metaclass=abc.ABCMeta):
__test__ = Abstract(__module__, __qualname__)
status_translations = {
UptimeSubscription.Status.CREATING: "create",
UptimeSubscription.Status.UPDATING: "update",
UptimeSubscription.Status.DELETING: "delete",
}
@pytest.fixture(autouse=True)
def _setup_metrics(self):
with patch("sentry.uptime.subscriptions.tasks.metrics") as self.metrics:
yield
@abc.abstractproperty
def expected_status(self):
pass
@abc.abstractmethod
def task(self, uptime_subscription_id: int) -> None:
pass
def create_subscription(
self, status: UptimeSubscription.Status, subscription_id: str | None = None
):
return self.create_uptime_subscription(
status=status,
type="something",
subscription_id=subscription_id,
url="http://sentry.io",
interval_seconds=300,
timeout_ms=500,
region_slugs=["default"],
)
def test_no_subscription(self) -> None:
self.task(12345)
self.metrics.incr.assert_called_once_with(
"uptime.subscriptions.{}.subscription_does_not_exist".format(
self.status_translations[self.expected_status]
),
sample_rate=1.0,
)
def test_invalid_status(self) -> None:
sub = self.create_subscription(
UptimeSubscription.Status.ACTIVE, subscription_id=uuid.uuid4().hex
)
self.task(sub.id)
self.metrics.incr.assert_called_once_with(
"uptime.subscriptions.{}.incorrect_status".format(
self.status_translations[self.expected_status]
),
sample_rate=1.0,
)
self.assert_redis_config("default", sub, None, None)
| BaseUptimeSubscriptionTaskTest |
python | pyqtgraph__pyqtgraph | pyqtgraph/Qt/internals.py | {
"start": 1931,
"end": 8373
} | class ____:
# Note: This class is an internal implementation detail and is not part
# of the public API.
#
# QPainter has a C++ native API that takes an array of objects:
# drawPrimitives(const Primitive *array, int count, ...)
# where "Primitive" is one of QPointF, QLineF, QRectF, PixmapFragment
#
# PySide (with the exception of drawPixmapFragments) and older PyQt
# require a Python list of "Primitive" instances to be provided to
# the respective "drawPrimitives" method.
#
# This is inefficient because:
# 1) constructing the Python list involves calling wrapinstance multiple times.
# - this is mitigated here by reusing the instance pointers
# 2) The binding will anyway have to repack the instances into a contiguous array,
# in order to call the underlying C++ native API.
#
# Newer PyQt provides sip.array, which is more efficient.
#
# PySide's drawPixmapFragments() takes an instance to the first item of a
# C array of PixmapFragment(s) _and_ the length of the array.
# There is no overload that takes a Python list of PixmapFragment(s).
def __init__(self, Klass, nfields, *, use_array=None):
self._Klass = Klass
self._nfields = nfields
self._capa = -1
self.use_sip_array = False
self.use_ptr_to_array = False
if QT_LIB.startswith('PyQt'):
if use_array is None:
use_array = (
hasattr(sip, 'array') and
(
(0x60301 <= QtCore.PYQT_VERSION) or
(0x50f07 <= QtCore.PYQT_VERSION < 0x60000)
)
)
self.use_sip_array = use_array
elif QT_LIB.startswith('PySide'):
if use_array is None:
use_array = (
# here we are actually testing for PySide version rather
# than Qt version. But PySide version mostly matches
# Qt version anyway.
Klass is QtGui.QPainter.PixmapFragment
or QtVersionInfo >= (6, 4, 3)
)
self.use_ptr_to_array = use_array
self.resize(0)
def resize(self, size):
if self.use_sip_array:
# For reference, SIP_VERSION 6.7.8 first arrived
# in PyQt5_sip 12.11.2 and PyQt6_sip 13.4.2
if sip.SIP_VERSION >= 0x60708:
if size <= self._capa:
self._size = size
return
else:
# sip.array prior to SIP_VERSION 6.7.8 had a
# buggy slicing implementation.
# so trigger a reallocate for any different size
if size == self._capa:
return
self._siparray = sip.array(self._Klass, size)
else:
if size <= self._capa:
self._size = size
return
self._ndarray = np.empty((size, self._nfields), dtype=np.float64)
if self.use_ptr_to_array:
# defer creation
self._objs = None
else:
self._objs = self._wrap_instances(self._ndarray)
self._capa = size
self._size = size
def _wrap_instances(self, array):
return list(map(compat.wrapinstance,
itertools.count(array.ctypes.data, array.strides[0]),
itertools.repeat(self._Klass, array.shape[0])))
def __len__(self):
return self._size
def ndarray(self):
# ndarray views are cheap to recreate each time
if self.use_sip_array:
# sip.array prior to SIP_VERSION 6.7.8 had a buggy buffer protocol
# that set the wrong size.
# workaround it by going through a sip.voidptr
mv = sip.voidptr(self._siparray, self._capa*self._nfields*8)
# note that we perform the slicing by using only _size rows
nd = np.frombuffer(mv, dtype=np.float64, count=self._size*self._nfields)
return nd.reshape((-1, self._nfields))
else:
return self._ndarray[:self._size]
def instances(self):
# this returns an iterable container of Klass instances.
# for "use_ptr_to_array" mode, such a container may not
# be required at all, so its creation is deferred
if self.use_sip_array:
if self._size == self._capa:
# avoiding slicing when it's not necessary
# handles the case where sip.array had a buggy
# slicing implementation
return self._siparray
else:
# this is a view
return self._siparray[:self._size]
if self._objs is None:
self._objs = self._wrap_instances(self._ndarray)
if self._size == self._capa:
return self._objs
else:
# this is a shallow copy
return self._objs[:self._size]
def drawargs(self):
# returns arguments to apply to the respective drawPrimitives() functions
if self.use_ptr_to_array:
if self._capa > 0:
# wrap memory only if it is safe to do so
ptr = compat.wrapinstance(self._ndarray.ctypes.data, self._Klass)
else:
# shiboken translates None <--> nullptr
# alternatively, we could instantiate a dummy _Klass()
ptr = None
return ptr, self._size
else:
return self.instances(),
_qbytearray_leaks = None
def qbytearray_leaks() -> bool:
global _qbytearray_leaks
if _qbytearray_leaks is None:
# When PySide{2,6} is built without Py_LIMITED_API,
# it leaks memory when a memory view to a QByteArray
# object is taken.
# See https://github.com/pyqtgraph/pyqtgraph/issues/3265
# and PYSIDE-3031
# Note: official builds of PySide{2,6} by Qt are built with
# the limited api, and thus do not leak.
if QT_LIB.startswith("PySide"):
# probe whether QByteArray leaks
qba = QtCore.QByteArray()
ref0 = sys.getrefcount(qba)
memoryview(qba)
_qbytearray_leaks = sys.getrefcount(qba) > ref0
else:
_qbytearray_leaks = False
return _qbytearray_leaks
| PrimitiveArray |
python | numpy__numpy | numpy/ma/tests/test_core.py | {
"start": 212355,
"end": 224477
} | class ____:
# TODO: Test masked_object, masked_equal, ...
def test_masked_values(self):
res = masked_values(np.array([-32768.0]), np.int16(-32768))
assert_equal(res.mask, [True])
res = masked_values(np.inf, np.inf)
assert_equal(res.mask, True)
res = np.ma.masked_values(np.inf, -np.inf)
assert_equal(res.mask, False)
res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=True)
assert_(res.mask is np.ma.nomask)
res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=False)
assert_equal(res.mask, [False] * 4)
def test_masked_array():
a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])
assert_equal(np.argwhere(a), [[1], [3]])
def test_masked_array_no_copy():
# check nomask array is updated in place
a = np.ma.array([1, 2, 3, 4])
_ = np.ma.masked_where(a == 3, a, copy=False)
assert_array_equal(a.mask, [False, False, True, False])
# check masked array is updated in place
a = np.ma.array([1, 2, 3, 4], mask=[1, 0, 0, 0])
_ = np.ma.masked_where(a == 3, a, copy=False)
assert_array_equal(a.mask, [True, False, True, False])
# check masked array with masked_invalid is updated in place
a = np.ma.array([np.inf, 1, 2, 3, 4])
_ = np.ma.masked_invalid(a, copy=False)
assert_array_equal(a.mask, [True, False, False, False, False])
def test_append_masked_array():
a = np.ma.masked_equal([1, 2, 3], value=2)
b = np.ma.masked_equal([4, 3, 2], value=2)
result = np.ma.append(a, b)
expected_data = [1, 2, 3, 4, 3, 2]
expected_mask = [False, True, False, False, False, True]
assert_array_equal(result.data, expected_data)
assert_array_equal(result.mask, expected_mask)
a = np.ma.masked_all((2, 2))
b = np.ma.ones((3, 1))
result = np.ma.append(a, b)
expected_data = [1] * 3
expected_mask = [True] * 4 + [False] * 3
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
result = np.ma.append(a, b, axis=None)
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
def test_append_masked_array_along_axis():
a = np.ma.masked_equal([1, 2, 3], value=2)
b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
# When `axis` is specified, `values` must have the correct shape.
assert_raises(ValueError, np.ma.append, a, b, axis=0)
result = np.ma.append(a[np.newaxis, :], b, axis=0)
expected = np.ma.arange(1, 10)
expected[[1, 6]] = np.ma.masked
expected = expected.reshape((3, 3))
assert_array_equal(result.data, expected.data)
assert_array_equal(result.mask, expected.mask)
def test_default_fill_value_complex():
# regression test for Python 3, where 'unicode' was not defined
assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j)
def test_string_dtype_fill_value_on_construction():
# Regression test for gh-29421: allow string fill_value on StringDType masked arrays
dt = np.dtypes.StringDType()
data = np.array(["A", "test", "variable", ""], dtype=dt)
mask = [True, False, True, True]
# Prior to the fix, this would TypeError; now it should succeed
arr = np.ma.MaskedArray(data, mask=mask, fill_value="FILL", dtype=dt)
assert isinstance(arr.fill_value, str)
assert arr.fill_value == "FILL"
filled = arr.filled()
# Masked positions should be replaced by 'FILL'
assert filled.tolist() == ["FILL", "test", "FILL", "FILL"]
def test_string_dtype_default_fill_value():
# Regression test for gh-29421: default fill_value for StringDType is 'N/A'
dt = np.dtypes.StringDType()
data = np.array(['x', 'y', 'z'], dtype=dt)
# no fill_value passed → uses default_fill_value internally
arr = np.ma.MaskedArray(data, mask=[True, False, True], dtype=dt)
# ensure it’s stored as a Python str and equals the expected default
assert isinstance(arr.fill_value, str)
assert arr.fill_value == 'N/A'
# masked slots should be replaced by that default
assert arr.filled().tolist() == ['N/A', 'y', 'N/A']
def test_string_dtype_fill_value_persists_through_slice():
# Regression test for gh-29421: .fill_value survives slicing/viewing
dt = np.dtypes.StringDType()
arr = np.ma.MaskedArray(
['a', 'b', 'c'],
mask=[True, False, True],
dtype=dt
)
arr.fill_value = 'Z'
# slice triggers __array_finalize__
sub = arr[1:]
# the slice should carry the same fill_value and behavior
assert isinstance(sub.fill_value, str)
assert sub.fill_value == 'Z'
assert sub.filled().tolist() == ['b', 'Z']
def test_setting_fill_value_attribute():
# Regression test for gh-29421: setting .fill_value post-construction works too
dt = np.dtypes.StringDType()
arr = np.ma.MaskedArray(
["x", "longstring", "mid"], mask=[False, True, False], dtype=dt
)
# Setting the attribute should not raise
arr.fill_value = "Z"
assert arr.fill_value == "Z"
# And filled() should use the new fill_value
assert arr.filled()[0] == "x"
assert arr.filled()[1] == "Z"
assert arr.filled()[2] == "mid"
def test_ufunc_with_output():
# check that giving an output argument always returns that output.
# Regression test for gh-8416.
x = array([1., 2., 3.], mask=[0, 0, 1])
y = np.add(x, 1., out=x)
assert_(y is x)
def test_ufunc_with_out_varied():
""" Test that masked arrays are immune to gh-10459 """
# the mask of the output should not affect the result, however it is passed
a = array([ 1, 2, 3], mask=[1, 0, 0])
b = array([10, 20, 30], mask=[1, 0, 0])
out = array([ 0, 0, 0], mask=[0, 0, 1])
expected = array([11, 22, 33], mask=[1, 0, 0])
out_pos = out.copy()
res_pos = np.add(a, b, out_pos)
out_kw = out.copy()
res_kw = np.add(a, b, out=out_kw)
out_tup = out.copy()
res_tup = np.add(a, b, out=(out_tup,))
assert_equal(res_kw.mask, expected.mask)
assert_equal(res_kw.data, expected.data)
assert_equal(res_tup.mask, expected.mask)
assert_equal(res_tup.data, expected.data)
assert_equal(res_pos.mask, expected.mask)
assert_equal(res_pos.data, expected.data)
def test_astype_mask_ordering():
descr = np.dtype([('v', int, 3), ('x', [('y', float)])])
x = array([
[([1, 2, 3], (1.0,)), ([1, 2, 3], (2.0,))],
[([1, 2, 3], (3.0,)), ([1, 2, 3], (4.0,))]], dtype=descr)
x[0]['v'][0] = np.ma.masked
x_a = x.astype(descr)
assert x_a.dtype.names == np.dtype(descr).names
assert x_a.mask.dtype.names == np.dtype(descr).names
assert_equal(x, x_a)
assert_(x is x.astype(x.dtype, copy=False))
assert_equal(type(x.astype(x.dtype, subok=False)), np.ndarray)
x_f = x.astype(x.dtype, order='F')
assert_(x_f.flags.f_contiguous)
assert_(x_f.mask.flags.f_contiguous)
# Also test the same indirectly, via np.array
x_a2 = np.array(x, dtype=descr, subok=True)
assert x_a2.dtype.names == np.dtype(descr).names
assert x_a2.mask.dtype.names == np.dtype(descr).names
assert_equal(x, x_a2)
assert_(x is np.array(x, dtype=descr, copy=None, subok=True))
x_f2 = np.array(x, dtype=x.dtype, order='F', subok=True)
assert_(x_f2.flags.f_contiguous)
assert_(x_f2.mask.flags.f_contiguous)
@pytest.mark.parametrize('dt1', num_dts, ids=num_ids)
@pytest.mark.parametrize('dt2', num_dts, ids=num_ids)
@pytest.mark.filterwarnings('ignore::numpy.exceptions.ComplexWarning')
def test_astype_basic(dt1, dt2):
# See gh-12070
src = np.ma.array(ones(3, dt1), fill_value=1)
dst = src.astype(dt2)
assert_(src.fill_value == 1)
assert_(src.dtype == dt1)
assert_(src.fill_value.dtype == dt1)
assert_(dst.fill_value == 1)
assert_(dst.dtype == dt2)
assert_(dst.fill_value.dtype == dt2)
assert_equal(src, dst)
def test_fieldless_void():
dt = np.dtype([]) # a void dtype with no fields
x = np.empty(4, dt)
# these arrays contain no values, so there's little to test - but this
# shouldn't crash
mx = np.ma.array(x)
assert_equal(mx.dtype, x.dtype)
assert_equal(mx.shape, x.shape)
mx = np.ma.array(x, mask=x)
assert_equal(mx.dtype, x.dtype)
assert_equal(mx.shape, x.shape)
def test_mask_shape_assignment_does_not_break_masked():
a = np.ma.masked
b = np.ma.array(1, mask=a.mask)
b.shape = (1,)
assert_equal(a.mask.shape, ())
@pytest.mark.skipif(sys.flags.optimize > 1,
reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") # noqa: E501
def test_doc_note():
def method(self):
"""This docstring
Has multiple lines
And notes
Notes
-----
original note
"""
pass
expected_doc = """This docstring
Has multiple lines
And notes
Notes
-----
note
original note"""
assert_equal(np.ma.core.doc_note(method.__doc__, "note"), expected_doc)
def test_gh_22556():
source = np.ma.array([0, [0, 1, 2]], dtype=object)
deepcopy = copy.deepcopy(source)
deepcopy[1].append('this should not appear in source')
assert len(source[1]) == 3
def test_gh_21022():
# testing for absence of reported error
source = np.ma.masked_array(data=[-1, -1], mask=True, dtype=np.float64)
axis = np.array(0)
result = np.prod(source, axis=axis, keepdims=False)
result = np.ma.masked_array(result,
mask=np.ones(result.shape, dtype=np.bool))
array = np.ma.masked_array(data=-1, mask=True, dtype=np.float64)
copy.deepcopy(array)
copy.deepcopy(result)
def test_deepcopy_2d_obj():
source = np.ma.array([[0, "dog"],
[1, 1],
[[1, 2], "cat"]],
mask=[[0, 1],
[0, 0],
[0, 0]],
dtype=object)
deepcopy = copy.deepcopy(source)
deepcopy[2, 0].extend(['this should not appear in source', 3])
assert len(source[2, 0]) == 2
assert len(deepcopy[2, 0]) == 4
assert_equal(deepcopy._mask, source._mask)
deepcopy._mask[0, 0] = 1
assert source._mask[0, 0] == 0
def test_deepcopy_0d_obj():
source = np.ma.array(0, mask=[0], dtype=object)
deepcopy = copy.deepcopy(source)
deepcopy[...] = 17
assert_equal(source, 0)
assert_equal(deepcopy, 17)
def test_uint_fill_value_and_filled():
# See also gh-27269
a = np.ma.MaskedArray([1, 1], [True, False], dtype="uint16")
# the fill value should likely not be 99999, but for now guarantee it:
assert a.fill_value == 999999
# However, it's type is uint:
assert a.fill_value.dtype.kind == "u"
# And this ensures things like filled work:
np.testing.assert_array_equal(
a.filled(), np.array([999999, 1]).astype("uint16"), strict=True)
@pytest.mark.parametrize(
('fn', 'signature'),
[
(np.ma.nonzero, "(a)"),
(np.ma.anomalies, "(a, axis=None, dtype=None)"),
(np.ma.cumsum, "(a, axis=None, dtype=None, out=None)"),
(np.ma.compress, "(condition, a, axis=None, out=None)"),
]
)
def test_frommethod_signature(fn, signature):
assert str(inspect.signature(fn)) == signature
@pytest.mark.parametrize(
('fn', 'signature'),
[
(
np.ma.empty,
(
"(shape, dtype=None, order='C', *, device=None, like=None, "
"fill_value=None, hardmask=False)"
),
),
(
np.ma.empty_like,
(
"(prototype, /, dtype=None, order='K', subok=True, shape=None, *, "
"device=None)"
),
),
(np.ma.squeeze, "(a, axis=None, *, fill_value=None, hardmask=False)"),
(
np.ma.identity,
"(n, dtype=None, *, like=None, fill_value=None, hardmask=False)",
),
]
)
def test_convert2ma_signature(fn, signature):
assert str(inspect.signature(fn)) == signature
assert fn.__module__ == 'numpy.ma.core'
| TestMaskedWhereAliases |
python | doocs__leetcode | solution/3300-3399/3361.Shift Distance Between Two Strings/Solution.py | {
"start": 0,
"end": 600
} | class ____:
def shiftDistance(
self, s: str, t: str, nextCost: List[int], previousCost: List[int]
) -> int:
m = 26
s1 = [0] * (m << 1 | 1)
s2 = [0] * (m << 1 | 1)
for i in range(m << 1):
s1[i + 1] = s1[i] + nextCost[i % m]
s2[i + 1] = s2[i] + previousCost[(i + 1) % m]
ans = 0
for a, b in zip(s, t):
x, y = ord(a) - ord("a"), ord(b) - ord("a")
c1 = s1[y + m if y < x else y] - s1[x]
c2 = s2[x + m if x < y else x] - s2[y]
ans += min(c1, c2)
return ans
| Solution |
python | ray-project__ray | python/ray/llm/_internal/batch/stages/http_request_stage.py | {
"start": 351,
"end": 6691
} | class ____(StatefulStageUDF):
RETRYABLE_STATUS_CODES = [429, 408, 504, 502, 503]
def __init__(
self,
data_column: str,
expected_input_keys: List[str],
url: str,
additional_header: Optional[Dict[str, Any]] = None,
qps: Optional[int] = None,
max_retries: int = 0,
base_retry_wait_time_in_s: float = 1.0,
session_factory: Optional[Callable[[], aiohttp.ClientSession]] = None,
):
"""
Initialize the HttpRequestUDF.
Args:
data_column: The data column name.
expected_input_keys: The expected input keys of the stage.
url: The URL to send the HTTP request to.
additional_header: The additional headers to send with the HTTP request.
qps: The maximum number of requests per second.
max_retries: The maximum number of retries per request in the event of failures. We retry with exponential backoff upto this specific maximum retries.
base_retry_wait_time_in_s: The base retry wait time during exponential backoff.
session_factory: Optional session factory to be used for initializing a client session.
"""
super().__init__(data_column, expected_input_keys)
self.url = url
self.additional_header = additional_header or {}
self.qps = qps
self.max_retries = max_retries
self.base_retry_wait_time_in_s = base_retry_wait_time_in_s
self.session_factory = session_factory or aiohttp.ClientSession
async def udf(self, batch: List[Dict[str, Any]]) -> AsyncIterator[Dict[str, Any]]:
"""
Send HTTP requests to the given URL.
Args:
batch: A list of rows to send.
Yields:
A generator of rows of the response of the HTTP request.
"""
# preprocess to get request body for the given batch
request_bodies = [None] * len(batch)
for row in batch:
# Normalize the row to a JSON body.
json_body = {}
for key, value in row["payload"].items():
if isinstance(value, np.ndarray):
json_body[key] = value.tolist()
else:
json_body[key] = value
request_bodies[row[self.IDX_IN_BATCH_COLUMN]] = json_body
async with self.session_factory() as session:
start_time = time.time()
request_count = 0
pending_requests = []
headers = {
"Content-Type": "application/json",
**self.additional_header,
}
# First send all requests based on QPS
for row in batch:
# Rate limit based on qps if specified
if self.qps is not None:
request_count += 1
expected_time = request_count / self.qps
elapsed = time.time() - start_time
if elapsed < expected_time:
await asyncio.sleep(expected_time - elapsed)
# self.IDX_IN_BATCH_COLUMN is the index of row in the batch
json_body = request_bodies[row[self.IDX_IN_BATCH_COLUMN]]
# Create request but don't await it yet
request = session.post(
self.url,
headers=headers,
json=json_body,
)
pending_requests.append((row[self.IDX_IN_BATCH_COLUMN], request))
# Now receive all responses
for idx_in_batch_column, request in pending_requests:
resp_json = None
last_exception = None
last_exception_traceback = None
for retry_count in range(self.max_retries + 1):
if retry_count > 0:
json_body = request_bodies[idx_in_batch_column]
request = session.post(
self.url,
headers=headers,
json=json_body,
)
try:
async with await request as response:
status_code = response.status
# check status and see if it's retry worthy
if status_code in self.RETRYABLE_STATUS_CODES:
last_exception = aiohttp.web_exceptions.HTTPException(
reason=response.reason
)
last_exception.status_code = status_code
wait_time = self.base_retry_wait_time_in_s * (
2**retry_count
)
await asyncio.sleep(wait_time)
continue
resp_json = await response.json()
if self.IDX_IN_BATCH_COLUMN in resp_json:
raise ValueError(
"The response of the HTTP request must not contain "
f"the column {self.IDX_IN_BATCH_COLUMN}."
)
break
except (
asyncio.TimeoutError,
aiohttp.ClientConnectionError,
ClientPayloadError,
) as e:
last_exception_traceback = traceback.format_exc()
last_exception = type(e).__name__
wait_time = self.base_retry_wait_time_in_s * (2**retry_count)
await asyncio.sleep(wait_time)
continue
if not resp_json:
raise RuntimeError(
f"Reached maximum retries of {self.max_retries} for input row {batch[idx_in_batch_column]}. Previous Exception: {last_exception}. Full Traceback: \n{last_exception_traceback}"
)
yield {
self.IDX_IN_BATCH_COLUMN: idx_in_batch_column,
"http_response": resp_json,
}
| HttpRequestUDF |
python | pytorch__pytorch | torch/utils/file_baton.py | {
"start": 67,
"end": 2140
} | class ____:
"""A primitive, file-based synchronization utility."""
def __init__(self, lock_file_path, wait_seconds=0.1, warn_after_seconds=None) -> None:
"""
Create a new :class:`FileBaton`.
Args:
lock_file_path: The path to the file used for locking.
wait_seconds: The seconds to periodically sleep (spin) when
calling ``wait()``.
warn_after_seconds: The seconds to wait before showing
lock file path to warn existing lock file.
"""
self.lock_file_path = lock_file_path
self.wait_seconds = wait_seconds
self.fd = None
self.warn_after_seconds = warn_after_seconds
def try_acquire(self) -> bool | None:
"""
Try to atomically create a file under exclusive access.
Returns:
True if the file could be created, else False.
"""
try:
# pyrefly: ignore [bad-assignment]
self.fd = os.open(self.lock_file_path, os.O_CREAT | os.O_EXCL)
return True
except FileExistsError:
return False
def wait(self) -> None:
"""
Periodically sleeps for a certain amount until the baton is released.
The amount of time slept depends on the ``wait_seconds`` parameter
passed to the constructor.
"""
has_warned = False
start_time = time.time()
while os.path.exists(self.lock_file_path):
time.sleep(self.wait_seconds)
if self.warn_after_seconds is not None:
if time.time() - start_time > self.warn_after_seconds and not has_warned:
warnings.warn(f'Waited on lock file "{self.lock_file_path}" for '
f'{self.warn_after_seconds} seconds.', stacklevel=2)
has_warned = True
def release(self) -> None:
"""Release the baton and removes its file."""
if self.fd is not None:
os.close(self.fd)
os.remove(self.lock_file_path)
| FileBaton |
python | apache__airflow | providers/databricks/src/airflow/providers/databricks/hooks/databricks.py | {
"start": 6863,
"end": 8856
} | class ____:
"""Utility class for the SQL statement state concept of Databricks statements."""
SQL_STATEMENT_LIFE_CYCLE_STATES = [
"PENDING",
"RUNNING",
"SUCCEEDED",
"FAILED",
"CANCELED",
"CLOSED",
]
def __init__(
self, state: str = "", error_code: str = "", error_message: str = "", *args, **kwargs
) -> None:
if state not in self.SQL_STATEMENT_LIFE_CYCLE_STATES:
raise AirflowException(
f"Unexpected SQL statement life cycle state: {state}: If the state has "
"been introduced recently, please check the Databricks user "
"guide for troubleshooting information"
)
self.state = state
self.error_code = error_code
self.error_message = error_message
@property
def is_terminal(self) -> bool:
"""True if the current state is a terminal state."""
return self.state in ("SUCCEEDED", "FAILED", "CANCELED", "CLOSED")
@property
def is_running(self) -> bool:
"""True if the current state is running."""
return self.state in ("PENDING", "RUNNING")
@property
def is_successful(self) -> bool:
"""True if the state is SUCCEEDED."""
return self.state == "SUCCEEDED"
def __eq__(self, other: object) -> bool:
if not isinstance(other, SQLStatementState):
return NotImplemented
return (
self.state == other.state
and self.error_code == other.error_code
and self.error_message == other.error_message
)
def __hash__(self):
return hash((self.state, self.error_code, self.error_message))
def __repr__(self) -> str:
return str(self.__dict__)
def to_json(self) -> str:
return json.dumps(self.__dict__)
@classmethod
def from_json(cls, data: str) -> SQLStatementState:
return SQLStatementState(**json.loads(data))
| SQLStatementState |
python | huggingface__transformers | src/transformers/pipelines/keypoint_matching.py | {
"start": 1076,
"end": 1981
} | class ____(TypedDict):
keypoint_image_0: Keypoint
keypoint_image_1: Keypoint
score: float
def validate_image_pairs(images: Any) -> Sequence[Sequence[ImagePair]]:
error_message = (
"Input images must be a one of the following :",
" - A pair of images.",
" - A list of pairs of images.",
)
def _is_valid_image(image):
"""images is a PIL Image or a string."""
return is_pil_image(image) or isinstance(image, str)
if isinstance(images, Sequence):
if len(images) == 2 and all((_is_valid_image(image)) for image in images):
return [images]
if all(
isinstance(image_pair, Sequence)
and len(image_pair) == 2
and all(_is_valid_image(image) for image in image_pair)
for image_pair in images
):
return images
raise ValueError(error_message)
| Match |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/input_manager.py | {
"start": 7485,
"end": 8868
} | class ____:
def __init__(
self,
config_schema: CoercableToConfigSchema = None,
description: Optional[str] = None,
version: Optional[str] = None,
input_config_schema: CoercableToConfigSchema = None,
required_resource_keys: Optional[AbstractSet[str]] = None,
):
self.config_schema = config_schema
self.description = check.opt_str_param(description, "description")
self.version = check.opt_str_param(version, "version")
self.input_config_schema = input_config_schema
self.required_resource_keys = required_resource_keys
def __call__(self, load_fn: InputLoadFn) -> InputManagerDefinition:
check.callable_param(load_fn, "load_fn")
def _resource_fn(_):
return InputManagerWrapper(load_fn)
input_manager_def = InputManagerDefinition(
resource_fn=_resource_fn,
config_schema=self.config_schema,
description=self.description,
version=self.version,
input_config_schema=self.input_config_schema,
required_resource_keys=self.required_resource_keys,
)
# `update_wrapper` typing cannot currently handle a Union of Callables correctly
update_wrapper(input_manager_def, wrapped=load_fn) # type: ignore
return input_manager_def
| _InputManagerDecoratorCallable |
python | getsentry__sentry | src/sentry/incidents/models/incident.py | {
"start": 10863,
"end": 12375
} | class ____(Model):
"""
An instance of an alert rule trigger (eg. each time the rule hits the trigger threshold, we create an incident trigger)
NOTE: dissimilar to an AlertRuleTrigger which represents the trigger threshold required to initialize an Incident
"""
__relocation_scope__ = RelocationScope.Global
objects: ClassVar[IncidentTriggerManager] = IncidentTriggerManager()
incident = FlexibleForeignKey("sentry.Incident", db_index=False)
alert_rule_trigger = FlexibleForeignKey("sentry.AlertRuleTrigger")
status = models.SmallIntegerField()
date_modified = models.DateTimeField(default=timezone.now, null=False)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "sentry"
db_table = "sentry_incidenttrigger"
unique_together = (("incident", "alert_rule_trigger"),)
indexes = (models.Index(fields=("alert_rule_trigger", "incident_id")),)
post_save.connect(IncidentManager.clear_active_incident_cache, sender=Incident)
post_save.connect(IncidentManager.clear_active_incident_project_cache, sender=IncidentProject)
post_delete.connect(IncidentManager.clear_active_incident_project_cache, sender=IncidentProject)
post_delete.connect(IncidentTriggerManager.clear_incident_cache, sender=Incident)
post_save.connect(IncidentTriggerManager.clear_incident_trigger_cache, sender=IncidentTrigger)
post_delete.connect(IncidentTriggerManager.clear_incident_trigger_cache, sender=IncidentTrigger)
| IncidentTrigger |
python | pypa__pipenv | pipenv/exceptions.py | {
"start": 10568,
"end": 11017
} | class ____(PipenvException):
def __init__(self, message):
extra = [
"{} {}".format(
click.style("The operation failed...", bold=True, fg="red"),
click.style(
"A dependency conflict was detected and could not be resolved.",
fg="red",
),
)
]
PipenvException.__init__(self, message, extra=extra)
| DependencyConflict |
python | allegroai__clearml | clearml/debugging/log.py | {
"start": 12358,
"end": 12473
} | class ____(logging.handlers.TimedRotatingFileHandler, ClearmlLoggerHandler):
pass
| ClearmlTimedRotatingFileHandler |
python | pypa__warehouse | warehouse/events/tags.py | {
"start": 53,
"end": 1262
} | class ____(str, enum.Enum):
"""Base class for Enum representing Event tags.
Tags can be broken into three colon-separated parts:
1. source type
2. subject type
3. action
For example, for event tag "project:role:add":
1. "project" is the source type
2. "role" is the subject type
3. "add" is the action
In some cases, the subject type can contain a colon:
For example, for event tag "project:release:file:remove":
1. "project" is the source type
2. "release:file" is the subject type
3. "remove" is the action
If omitted, subject type is implied to be the same as source type.
For example, for event tag "project:create":
1. "project" is the source type
2. "project" is also the subject type
3. "create" is the action
"""
source_type: str
subject_type: str
action: str
# Name = "source_type:subject_type:action"
def __new__(cls, value: str):
values = value.split(":")
obj = str.__new__(cls, value)
obj._value_ = value
obj.source_type = values[0]
obj.subject_type = ":".join(values[1:-1]) or value[0]
obj.action = values[-1]
return obj
| EventTagEnum |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/processors.py | {
"start": 18055,
"end": 18980
} | class ____(Processor):
"""
Insert text after the input.
:param text: This can be either plain text or formatted text
(or a callable that returns any of those).
:param style: style to be applied to this prompt/prefix.
"""
def __init__(self, text: AnyFormattedText, style: str = "") -> None:
self.text = text
self.style = style
def apply_transformation(self, ti: TransformationInput) -> Transformation:
# Insert fragments after the last line.
if ti.lineno == ti.document.line_count - 1:
# Get fragments.
fragments_after = to_formatted_text(self.text, self.style)
return Transformation(fragments=ti.fragments + fragments_after)
else:
return Transformation(fragments=ti.fragments)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.text!r}, style={self.style!r})"
| AfterInput |
python | coleifer__peewee | tests/prefetch_tests.py | {
"start": 365,
"end": 470
} | class ____(TestModel):
person = ForeignKeyField(Person, backref='notes')
content = TextField()
| Note |
python | keras-team__keras | keras/src/utils/text_dataset_utils_test.py | {
"start": 188,
"end": 14332
} | class ____(testing.TestCase):
def _prepare_directory(
self, num_classes=2, nested_dirs=False, count=16, length=20
):
# Get a unique temp directory
temp_dir = self.get_temp_dir()
# Generate paths to class subdirectories
paths = []
for class_index in range(num_classes):
class_directory = f"class_{class_index}"
if nested_dirs:
class_paths = [
class_directory,
os.path.join(class_directory, "subfolder_1"),
os.path.join(class_directory, "subfolder_2"),
os.path.join(
class_directory, "subfolder_1", "sub-subfolder"
),
]
else:
class_paths = [class_directory]
for path in class_paths:
os.mkdir(os.path.join(temp_dir, path))
paths += class_paths
for i in range(count):
path = paths[i % len(paths)]
filename = os.path.join(path, f"text_{i}.txt")
with open(os.path.join(temp_dir, filename), "w") as f:
text = "".join(
[random.choice(string.printable) for _ in range(length)]
)
f.write(text)
return temp_dir
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_standalone(self, format):
# Test retrieving txt files without labels from a directory and its
# subdirs. Save a few extra files in the parent directory.
directory = self._prepare_directory(count=7, num_classes=2)
for i in range(3):
filename = f"text_{i}.txt"
with open(os.path.join(directory, filename), "w") as f:
text = "".join(
[random.choice(string.printable) for _ in range(20)]
)
f.write(text)
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=5,
label_mode=None,
max_length=10,
format=format,
)
batch = next(iter(dataset))
# We just return the texts, no labels
if format == "tf" or backend.backend() == "tensorflow":
self.assertEqual(list(batch.shape), [5])
self.assertDType(batch, "string")
else:
self.assertLen(batch, 5)
self.assertIsInstance(batch[0], str)
# Count samples
batch_count = 0
sample_count = 0
for batch in dataset:
batch_count += 1
sample_count += len(batch)
self.assertEqual(batch_count, 2)
self.assertEqual(sample_count, 10)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_binary(self, format=format):
directory = self._prepare_directory(num_classes=2)
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=8,
label_mode="int",
max_length=10,
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
if format == "tf" or backend.backend() == "tensorflow":
self.assertEqual(batch[0].shape, (8,))
self.assertDType(batch[0], "string")
self.assertEqual(len(batch[0].numpy()[0]), 10) # Test max_length
else:
self.assertLen(batch[0], 8)
self.assertIsInstance(batch[0][0], str)
self.assertLen(batch[0][0], 10) # Test max_length
self.assertEqual(list(batch[1].shape), [8])
self.assertDType(batch[1], "int32")
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=8,
label_mode="binary",
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
if format == "tf" or backend.backend() == "tensorflow":
self.assertEqual(list(batch[0].shape), [8])
self.assertEqual(batch[0].dtype.name, "string")
else:
self.assertLen(batch[0], 8)
self.assertIsInstance(batch[0][0], str)
self.assertEqual(list(batch[1].shape), [8, 1])
self.assertDType(batch[1], "float32")
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=8,
label_mode="categorical",
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
if format == "tf" or backend.backend() == "tensorflow":
self.assertEqual(list(batch[0].shape), [8])
self.assertEqual(batch[0].dtype.name, "string")
else:
self.assertLen(batch[0], 8)
self.assertIsInstance(batch[0][0], str)
self.assertEqual(list(batch[1].shape), [8, 2])
self.assertDType(batch[1], "float32")
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_sample_count(self, format):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode=None, format=format
)
sample_count = 0
for batch in dataset:
sample_count += len(batch)
self.assertEqual(sample_count, 15)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_multiclass(self, format):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode=None, format=format
)
batch = next(iter(dataset))
self.assertLen(batch, 8)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode=None, format=format
)
sample_count = 0
iterator = iter(dataset)
for batch in dataset:
sample_count += len(next(iterator))
self.assertEqual(sample_count, 15)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode="int", format=format
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
if format == "tf" or backend.backend() == "tensorflow":
self.assertEqual(list(batch[0].shape), [8])
self.assertEqual(batch[0].dtype.name, "string")
else:
self.assertLen(batch[0], 8)
self.assertIsInstance(batch[0][0], str)
self.assertEqual(list(batch[1].shape), [8])
self.assertDType(batch[1], "int32")
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode="categorical", format=format
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
if format == "tf" or backend.backend() == "tensorflow":
self.assertEqual(list(batch[0].shape), [8])
self.assertEqual(batch[0].dtype.name, "string")
else:
self.assertLen(batch[0], 8)
self.assertIsInstance(batch[0][0], str)
self.assertEqual(list(batch[1].shape), [8, 4])
self.assertDType(batch[1], "float32")
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_validation_split(self, format):
directory = self._prepare_directory(num_classes=2, count=10)
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=10,
validation_split=0.2,
subset="training",
seed=1337,
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertLen(batch[0], 8)
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=10,
validation_split=0.2,
subset="validation",
seed=1337,
format=format,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertLen(batch[0], 2)
(
train_dataset,
val_dataset,
) = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=10,
validation_split=0.2,
subset="both",
seed=1337,
format=format,
)
batch = next(iter(train_dataset))
self.assertLen(batch, 2)
self.assertLen(batch[0], 8)
batch = next(iter(val_dataset))
self.assertLen(batch, 2)
self.assertLen(batch[0], 2)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_manual_labels(self, format):
directory = self._prepare_directory(num_classes=2, count=2)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, labels=[0, 1], shuffle=False, format=format
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertAllClose(batch[1], [0, 1])
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_follow_links(self, format):
directory = self._prepare_directory(
num_classes=2, count=25, nested_dirs=True
)
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=8,
label_mode=None,
follow_links=True,
format=format,
)
sample_count = 0
for batch in dataset:
sample_count += len(batch)
self.assertEqual(sample_count, 25)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_no_files(self, format):
directory = self._prepare_directory(num_classes=2, count=0)
with self.assertRaisesRegex(ValueError, "No text files found"):
_ = text_dataset_utils.text_dataset_from_directory(
directory, format=format
)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_errors(self, format):
directory = self._prepare_directory(num_classes=3, count=5)
with self.assertRaisesRegex(ValueError, "`labels` argument should be"):
_ = text_dataset_utils.text_dataset_from_directory(
directory, labels="other", format=format
)
with self.assertRaisesRegex(
ValueError, "`label_mode` argument must be"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, label_mode="other", format=format
)
with self.assertRaisesRegex(
ValueError, 'only pass `class_names` if `labels="inferred"`'
):
_ = text_dataset_utils.text_dataset_from_directory(
directory,
labels=[0, 0, 1, 1, 1],
class_names=["class_0", "class_1", "class_2"],
format=format,
)
with self.assertRaisesRegex(
ValueError,
"Expected the lengths of `labels` to match the number of files",
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, labels=[0, 0, 1, 1], format=format
)
with self.assertRaisesRegex(
ValueError, "`class_names` passed did not match"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, class_names=["class_0", "wrong_class"], format=format
)
with self.assertRaisesRegex(ValueError, "there must be exactly 2"):
_ = text_dataset_utils.text_dataset_from_directory(
directory, label_mode="binary", format=format
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be between 0 and 1"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, validation_split=2, format=format
)
with self.assertRaisesRegex(
ValueError,
'`subset` must be either "training", "validation" or "both"',
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, validation_split=0.2, subset="other", format=format
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be set"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory,
validation_split=0.0,
subset="training",
format=format,
)
with self.assertRaisesRegex(ValueError, "must provide a `seed`"):
_ = text_dataset_utils.text_dataset_from_directory(
directory,
validation_split=0.2,
subset="training",
format=format,
)
@parameterized.named_parameters(
("tf", "tf"),
("grain", "grain"),
)
def test_text_dataset_from_directory_not_batched(self, format):
directory = self._prepare_directory()
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=None,
label_mode=None,
follow_links=True,
format=format,
)
sample = next(iter(dataset))
if format == "tf":
self.assertEqual(len(sample.shape), 0)
else:
self.assertIsInstance(sample, str)
| TextDatasetFromDirectoryTest |
python | scipy__scipy | scipy/signal/tests/test_upfirdn.py | {
"start": 4879,
"end": 13011
} | class ____:
@skip_xp_backends(np_only=True, reason="enough to only test on numpy")
def test_valid_input(self, xp):
assert_raises(ValueError, upfirdn, [1], [1], 1, 0) # up or down < 1
assert_raises(ValueError, upfirdn, [], [1], 1, 1) # h.ndim != 1
assert_raises(ValueError, upfirdn, [[1]], [1], 1, 1)
@pytest.mark.parametrize('len_h', [1, 2, 3, 4, 5])
@pytest.mark.parametrize('len_x', [1, 2, 3, 4, 5])
def test_singleton(self, len_h, len_x, xp):
# gh-9844: lengths producing expected outputs
h = xp.zeros(len_h)
h = xpx.at(h)[len_h // 2].set(1.) # make h a delta
x = xp.ones(len_x)
y = upfirdn(h, x, 1, 1)
want = xpx.pad(x, (len_h // 2, (len_h - 1) // 2), 'constant', xp=xp)
xp_assert_close(y, want)
def test_shift_x(self, xp):
# gh-9844: shifted x can change values?
y = upfirdn(xp.asarray([1, 1]), xp.asarray([1.]), 1, 1)
xp_assert_close(
y, xp.asarray([1.0, 1.0], dtype=xp.float64) # was [0, 1] in the issue
)
y = upfirdn(xp.asarray([1, 1]), xp.asarray([0., 1.]), 1, 1)
xp_assert_close(y, xp.asarray([0.0, 1.0, 1.0], dtype=xp.float64))
# A bunch of lengths/factors chosen because they exposed differences
# between the "old way" and new way of computing length, and then
# got `expected` from MATLAB
@pytest.mark.parametrize('len_h, len_x, up, down, expected', [
(2, 2, 5, 2, [1, 0, 0, 0]),
(2, 3, 6, 3, [1, 0, 1, 0, 1]),
(2, 4, 4, 3, [1, 0, 0, 0, 1]),
(3, 2, 6, 2, [1, 0, 0, 1, 0]),
(4, 11, 3, 5, [1, 0, 0, 1, 0, 0, 1]),
])
def test_length_factors(self, len_h, len_x, up, down, expected, xp):
# gh-9844: weird factors
h = xp.zeros(len_h)
h = xpx.at(h)[0].set(1.)
x = xp.ones(len_x, dtype=xp.float64)
y = upfirdn(h, x, up, down)
expected = xp.asarray(expected, dtype=xp.float64)
xp_assert_close(y, expected)
@pytest.mark.parametrize(
'dtype', ["int64", "float32", "complex64", "float64", "complex128"]
)
@pytest.mark.parametrize('down, want_len', [ # lengths from MATLAB
(2, 5015),
(11, 912),
(79, 127),
])
def test_vs_convolve(self, down, want_len, dtype, xp):
# Check that up=1.0 gives same answer as convolve + slicing
random_state = np.random.RandomState(17)
size = 10000
np_dtype = getattr(np, dtype)
x = random_state.randn(size).astype(np_dtype)
if np_dtype in (np.complex64, np.complex128):
x += 1j * random_state.randn(size)
dtype = getattr(xp, dtype)
x = xp.asarray(x, dtype=dtype)
h = xp.asarray(firwin(31, 1. / down, window='hamming'))
yl = xp.asarray(
upfirdn_naive(_xp_copy_to_numpy(x), _xp_copy_to_numpy(h), 1, down)
)
y = upfirdn(h, x, up=1, down=down)
assert y.shape == (want_len,)
assert yl.shape[0] == y.shape[0]
xp_assert_close(yl, y, atol=1e-7, rtol=1e-7)
@pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES)
@pytest.mark.parametrize('h', (1., 1j))
@pytest.mark.parametrize('up, down', [(1, 1), (2, 2), (3, 2), (2, 3)])
def test_vs_naive_delta(self, x_dtype, h, up, down, xp):
UpFIRDnCase(up, down, h, x_dtype, xp=xp)()
@pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES)
@pytest.mark.parametrize('h_dtype', _UPFIRDN_TYPES)
@pytest.mark.parametrize('p_max, q_max',
list(product((10, 100), (10, 100))))
def test_vs_naive(self, x_dtype, h_dtype, p_max, q_max, xp):
tests = self._random_factors(p_max, q_max, h_dtype, x_dtype, xp=xp)
for test in tests:
test()
def _random_factors(self, p_max, q_max, h_dtype, x_dtype, *, xp):
n_rep = 3
longest_h = 25
random_state = np.random.RandomState(17)
tests = []
for _ in range(n_rep):
# Randomize the up/down factors somewhat
p_add = q_max if p_max > q_max else 1
q_add = p_max if q_max > p_max else 1
p = random_state.randint(p_max) + p_add
q = random_state.randint(q_max) + q_add
# Generate random FIR coefficients
len_h = random_state.randint(longest_h) + 1
h = np.atleast_1d(random_state.randint(len_h))
h = h.astype(h_dtype)
if h_dtype is complex:
h += 1j * random_state.randint(len_h)
tests.append(UpFIRDnCase(p, q, h, x_dtype, xp=xp))
return tests
@pytest.mark.parametrize('mode', _upfirdn_modes)
def test_extensions(self, mode, xp):
"""Test vs. manually computed results for modes not in numpy's pad."""
x = np.asarray([1, 2, 3, 1], dtype=np.float64)
npre, npost = 6, 6
y = _pad_test(x, npre=npre, npost=npost, mode=mode)
x = xp.asarray(x)
y = xp.asarray(y)
if mode == 'antisymmetric':
y_expected = xp.asarray(
[3.0, 1, -1, -3, -2, -1, 1, 2, 3, 1, -1, -3, -2, -1, 1, 2])
elif mode == 'antireflect':
y_expected = xp.asarray(
[1.0, 2, 3, 1, -1, 0, 1, 2, 3, 1, -1, 0, 1, 2, 3, 1])
elif mode == 'smooth':
y_expected = xp.asarray(
[-5.0, -4, -3, -2, -1, 0, 1, 2, 3, 1, -1, -3, -5, -7, -9, -11])
elif mode == "line":
lin_slope = (x[-1] - x[0]) / (x.shape[0] - 1)
left = x[0] + xp.arange(-npre, 0, 1, dtype=xp.float64) * lin_slope
right = x[-1] + xp.arange(1, npost + 1, dtype=xp.float64) * lin_slope
concat = array_namespace(left).concat
y_expected = concat((left, x, right))
else:
y_expected = np.pad(_xp_copy_to_numpy(x), (npre, npost), mode=mode)
y_expected = xp.asarray(y_expected)
y_expected = xp.asarray(y_expected, dtype=xp.float64)
xp_assert_close(y, y_expected)
@pytest.mark.parametrize(
'size, h_len, mode, dtype',
product(
[8],
[4, 5, 26], # include cases with h_len > 2*size
_upfirdn_modes,
["float32", "float64", "complex64", "complex128"],
)
)
def test_modes(self, size, h_len, mode, dtype, xp):
if is_cupy(xp) and mode != "constant":
pytest.skip(reason="only mode='constant' supported by CuPy")
dtype_np = getattr(np, dtype)
dtype_xp = getattr(xp, dtype)
random_state = np.random.RandomState(5)
x = random_state.randn(size).astype(dtype_np)
if dtype in ("complex64", "complex128"):
x += 1j * random_state.randn(size)
h = np.arange(1, 1 + h_len, dtype=x.real.dtype)
x = xp.asarray(x, dtype=dtype_xp)
h = xp.asarray(h)
y = upfirdn(h, x, up=1, down=1, mode=mode)
# expected result: pad the input, filter with zero padding, then crop
npad = h_len - 1
if mode in ['antisymmetric', 'antireflect', 'smooth', 'line']:
# use _pad_test test function for modes not supported by np.pad.
xpad = _pad_test(_xp_copy_to_numpy(x), npre=npad, npost=npad, mode=mode)
else:
xpad = np.pad(_xp_copy_to_numpy(x), npad, mode=mode)
xpad = xp.asarray(xpad)
ypad = upfirdn(h, xpad, up=1, down=1, mode='constant')
y_expected = ypad[npad:-npad]
atol = rtol = xp.finfo(dtype_xp).eps * 1e2
xp_assert_close(y, y_expected, atol=atol, rtol=rtol)
@make_xp_test_case(upfirdn)
def test_output_len_long_input(xp):
# Regression test for gh-17375. On Windows, a large enough input
# that should have been well within the capabilities of 64 bit integers
# would result in a 32 bit overflow because of a bug in Cython 0.29.32.
len_h = 1001
in_len = 10**8
up = 320
down = 441
out_len = _output_len(len_h, in_len, up, down)
# The expected value was computed "by hand" from the formula
# (((in_len - 1) * up + len_h) - 1) // down + 1
assert out_len == 72562360
| TestUpfirdn |
python | doocs__leetcode | solution/1800-1899/1874.Minimize Product Sum of Two Arrays/Solution.py | {
"start": 0,
"end": 198
} | class ____:
def minProductSum(self, nums1: List[int], nums2: List[int]) -> int:
nums1.sort()
nums2.sort(reverse=True)
return sum(x * y for x, y in zip(nums1, nums2))
| Solution |
python | django__django | django/utils/connection.py | {
"start": 888,
"end": 2554
} | class ____:
settings_name = None
exception_class = ConnectionDoesNotExist
thread_critical = False
def __init__(self, settings=None):
self._settings = settings
self._connections = Local(self.thread_critical)
@cached_property
def settings(self):
self._settings = self.configure_settings(self._settings)
return self._settings
def configure_settings(self, settings):
if settings is None:
settings = getattr(django_settings, self.settings_name)
return settings
def create_connection(self, alias):
raise NotImplementedError("Subclasses must implement create_connection().")
def __getitem__(self, alias):
try:
return getattr(self._connections, alias)
except AttributeError:
if alias not in self.settings:
raise self.exception_class(f"The connection '{alias}' doesn't exist.")
conn = self.create_connection(alias)
setattr(self._connections, alias, conn)
return conn
def __setitem__(self, key, value):
setattr(self._connections, key, value)
def __delitem__(self, key):
delattr(self._connections, key)
def __iter__(self):
return iter(self.settings)
def all(self, initialized_only=False):
return [
self[alias]
for alias in self
# If initialized_only is True, return only initialized connections.
if not initialized_only or hasattr(self._connections, alias)
]
def close_all(self):
for conn in self.all(initialized_only=True):
conn.close()
| BaseConnectionHandler |
python | matplotlib__matplotlib | lib/mpl_toolkits/mplot3d/art3d.py | {
"start": 39160,
"end": 58574
} | class ____(PolyCollection):
"""
A collection of 3D polygons.
.. note::
**Filling of 3D polygons**
There is no simple definition of the enclosed surface of a 3D polygon
unless the polygon is planar.
In practice, Matplotlib fills the 2D projection of the polygon. This
gives a correct filling appearance only for planar polygons. For all
other polygons, you'll find orientations in which the edges of the
polygon intersect in the projection. This will lead to an incorrect
visualization of the 3D area.
If you need filled areas, it is recommended to create them via
`~mpl_toolkits.mplot3d.axes3d.Axes3D.plot_trisurf`, which creates a
triangulation and thus generates consistent surfaces.
"""
def __init__(self, verts, *args, zsort='average', shade=False,
lightsource=None, axlim_clip=False, **kwargs):
"""
Parameters
----------
verts : list of (N, 3) array-like
The sequence of polygons [*verts0*, *verts1*, ...] where each
element *verts_i* defines the vertices of polygon *i* as a 2D
array-like of shape (N, 3).
zsort : {'average', 'min', 'max'}, default: 'average'
The calculation method for the z-order.
See `~.Poly3DCollection.set_zsort` for details.
shade : bool, default: False
Whether to shade *facecolors* and *edgecolors*. When activating
*shade*, *facecolors* and/or *edgecolors* must be provided.
.. versionadded:: 3.7
lightsource : `~matplotlib.colors.LightSource`, optional
The lightsource to use when *shade* is True.
.. versionadded:: 3.7
axlim_clip : bool, default: False
Whether to hide polygons with a vertex outside the view limits.
.. versionadded:: 3.10
*args, **kwargs
All other parameters are forwarded to `.PolyCollection`.
Notes
-----
Note that this class does a bit of magic with the _facecolors
and _edgecolors properties.
"""
if shade:
normals = _generate_normals(verts)
facecolors = kwargs.get('facecolors', None)
if facecolors is not None:
kwargs['facecolors'] = _shade_colors(
facecolors, normals, lightsource
)
edgecolors = kwargs.get('edgecolors', None)
if edgecolors is not None:
kwargs['edgecolors'] = _shade_colors(
edgecolors, normals, lightsource
)
if facecolors is None and edgecolors is None:
raise ValueError(
"You must provide facecolors, edgecolors, or both for "
"shade to work.")
super().__init__(verts, *args, **kwargs)
if isinstance(verts, np.ndarray):
if verts.ndim != 3:
raise ValueError('verts must be a list of (N, 3) array-like')
else:
if any(len(np.shape(vert)) != 2 for vert in verts):
raise ValueError('verts must be a list of (N, 3) array-like')
self.set_zsort(zsort)
self._codes3d = None
self._axlim_clip = axlim_clip
_zsort_functions = {
'average': np.average,
'min': np.min,
'max': np.max,
}
def set_zsort(self, zsort):
"""
Set the calculation method for the z-order.
Parameters
----------
zsort : {'average', 'min', 'max'}
The function applied on the z-coordinates of the vertices in the
viewer's coordinate system, to determine the z-order.
"""
self._zsortfunc = self._zsort_functions[zsort]
self._sort_zpos = None
self.stale = True
@_api.deprecated("3.10")
def get_vector(self, segments3d):
return self._get_vector(segments3d)
def _get_vector(self, segments3d):
"""
Optimize points for projection.
Parameters
----------
segments3d : NumPy array or list of NumPy arrays
List of vertices of the boundary of every segment. If all paths are
of equal length and this argument is a NumPy array, then it should
be of shape (num_faces, num_vertices, 3).
"""
if isinstance(segments3d, np.ndarray):
_api.check_shape((None, None, 3), segments3d=segments3d)
if isinstance(segments3d, np.ma.MaskedArray):
self._faces = segments3d.data
self._invalid_vertices = segments3d.mask.any(axis=-1)
else:
self._faces = segments3d
self._invalid_vertices = False
else:
# Turn the potentially ragged list into a numpy array for later speedups
# If it is ragged, set the unused vertices per face as invalid
num_faces = len(segments3d)
num_verts = np.fromiter(map(len, segments3d), dtype=np.intp)
max_verts = num_verts.max(initial=0)
segments = np.empty((num_faces, max_verts, 3))
for i, face in enumerate(segments3d):
segments[i, :len(face)] = face
self._faces = segments
self._invalid_vertices = np.arange(max_verts) >= num_verts[:, None]
def set_verts(self, verts, closed=True):
"""
Set 3D vertices.
Parameters
----------
verts : list of (N, 3) array-like
The sequence of polygons [*verts0*, *verts1*, ...] where each
element *verts_i* defines the vertices of polygon *i* as a 2D
array-like of shape (N, 3).
closed : bool, default: True
Whether the polygon should be closed by adding a CLOSEPOLY
connection at the end.
"""
self._get_vector(verts)
# 2D verts will be updated at draw time
super().set_verts([], False)
self._closed = closed
def set_verts_and_codes(self, verts, codes):
"""Set 3D vertices with path codes."""
# set vertices with closed=False to prevent PolyCollection from
# setting path codes
self.set_verts(verts, closed=False)
# and set our own codes instead.
self._codes3d = codes
def set_3d_properties(self, axlim_clip=False):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
self._sort_zpos = None
self.set_zsort('average')
self._facecolor3d = PolyCollection.get_facecolor(self)
self._edgecolor3d = PolyCollection.get_edgecolor(self)
self._alpha3d = PolyCollection.get_alpha(self)
self.stale = True
def set_sort_zpos(self, val):
"""Set the position to use for z-sorting."""
self._sort_zpos = val
self.stale = True
def do_3d_projection(self):
"""
Perform the 3D projection for this object.
"""
if self._A is not None:
# force update of color mapping because we re-order them
# below. If we do not do this here, the 2D draw will call
# this, but we will never port the color mapped values back
# to the 3D versions.
#
# We hold the 3D versions in a fixed order (the order the user
# passed in) and sort the 2D version by view depth.
self.update_scalarmappable()
if self._face_is_mapped:
self._facecolor3d = self._facecolors
if self._edge_is_mapped:
self._edgecolor3d = self._edgecolors
needs_masking = np.any(self._invalid_vertices)
num_faces = len(self._faces)
mask = self._invalid_vertices
# Some faces might contain masked vertices, so we want to ignore any
# errors that those might cause
with np.errstate(invalid='ignore', divide='ignore'):
pfaces = proj3d._proj_transform_vectors(self._faces, self.axes.M)
if self._axlim_clip:
viewlim_mask = _viewlim_mask(self._faces[..., 0], self._faces[..., 1],
self._faces[..., 2], self.axes)
if np.any(viewlim_mask):
needs_masking = True
mask = mask | viewlim_mask
pzs = pfaces[..., 2]
if needs_masking:
pzs = np.ma.MaskedArray(pzs, mask=mask)
# This extra fuss is to re-order face / edge colors
cface = self._facecolor3d
cedge = self._edgecolor3d
if len(cface) != num_faces:
cface = cface.repeat(num_faces, axis=0)
if len(cedge) != num_faces:
if len(cedge) == 0:
cedge = cface
else:
cedge = cedge.repeat(num_faces, axis=0)
if len(pzs) > 0:
face_z = self._zsortfunc(pzs, axis=-1)
else:
face_z = pzs
if needs_masking:
face_z = face_z.data
face_order = np.argsort(face_z, axis=-1)[::-1]
if len(pfaces) > 0:
faces_2d = pfaces[face_order, :, :2]
else:
faces_2d = pfaces
if self._codes3d is not None and len(self._codes3d) > 0:
if needs_masking:
segment_mask = ~mask[face_order, :]
faces_2d = [face[mask, :] for face, mask
in zip(faces_2d, segment_mask)]
codes = [self._codes3d[idx] for idx in face_order]
PolyCollection.set_verts_and_codes(self, faces_2d, codes)
else:
if needs_masking and len(faces_2d) > 0:
invalid_vertices_2d = np.broadcast_to(
mask[face_order, :, None],
faces_2d.shape)
faces_2d = np.ma.MaskedArray(
faces_2d, mask=invalid_vertices_2d)
PolyCollection.set_verts(self, faces_2d, self._closed)
if len(cface) > 0:
self._facecolors2d = cface[face_order]
else:
self._facecolors2d = cface
if len(self._edgecolor3d) == len(cface) and len(cedge) > 0:
self._edgecolors2d = cedge[face_order]
else:
self._edgecolors2d = self._edgecolor3d
# Return zorder value
if self._sort_zpos is not None:
zvec = np.array([[0], [0], [self._sort_zpos], [1]])
ztrans = proj3d._proj_transform_vec(zvec, self.axes.M)
return ztrans[2][0]
elif pzs.size > 0:
# FIXME: Some results still don't look quite right.
# In particular, examine contourf3d_demo2.py
# with az = -54 and elev = -45.
return np.min(pzs)
else:
return np.nan
def set_facecolor(self, colors):
# docstring inherited
super().set_facecolor(colors)
self._facecolor3d = PolyCollection.get_facecolor(self)
def set_edgecolor(self, colors):
# docstring inherited
super().set_edgecolor(colors)
self._edgecolor3d = PolyCollection.get_edgecolor(self)
def set_alpha(self, alpha):
# docstring inherited
artist.Artist.set_alpha(self, alpha)
try:
self._facecolor3d = mcolors.to_rgba_array(
self._facecolor3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
self._edgecolors = mcolors.to_rgba_array(
self._edgecolor3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
self.stale = True
def get_facecolor(self):
# docstring inherited
# self._facecolors2d is not initialized until do_3d_projection
if not hasattr(self, '_facecolors2d'):
self.axes.M = self.axes.get_proj()
self.do_3d_projection()
return np.asarray(self._facecolors2d)
def get_edgecolor(self):
# docstring inherited
# self._edgecolors2d is not initialized until do_3d_projection
if not hasattr(self, '_edgecolors2d'):
self.axes.M = self.axes.get_proj()
self.do_3d_projection()
return np.asarray(self._edgecolors2d)
def poly_collection_2d_to_3d(col, zs=0, zdir='z', axlim_clip=False):
"""
Convert a `.PolyCollection` into a `.Poly3DCollection` object.
Parameters
----------
col : `~matplotlib.collections.PolyCollection`
The collection to convert.
zs : float or array of floats
The location or locations to place the polygons in the collection along
the *zdir* axis. Default: 0.
zdir : {'x', 'y', 'z'}
The axis in which to place the patches. Default: 'z'.
See `.get_dir_vector` for a description of the values.
"""
segments_3d, codes = _paths_to_3d_segments_with_codes(
col.get_paths(), zs, zdir)
col.__class__ = Poly3DCollection
col.set_verts_and_codes(segments_3d, codes)
col.set_3d_properties()
col._axlim_clip = axlim_clip
def juggle_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that 2D *xs*, *ys* can be plotted in the plane
orthogonal to *zdir*. *zdir* is normally 'x', 'y' or 'z'. However, if
*zdir* starts with a '-' it is interpreted as a compensation for
`rotate_axes`.
"""
if zdir == 'x':
return zs, xs, ys
elif zdir == 'y':
return xs, zs, ys
elif zdir[0] == '-':
return rotate_axes(xs, ys, zs, zdir)
else:
return xs, ys, zs
def rotate_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that the axes are rotated with *zdir* along
the original z axis. Prepending the axis with a '-' does the
inverse transform, so *zdir* can be 'x', '-x', 'y', '-y', 'z' or '-z'.
"""
if zdir in ('x', '-y'):
return ys, zs, xs
elif zdir in ('-x', 'y'):
return zs, xs, ys
else:
return xs, ys, zs
def _zalpha(
colors,
zs,
min_alpha=0.3,
_data_scale=None,
):
"""Modify the alpha values of the color list according to z-depth."""
if len(colors) == 0 or len(zs) == 0:
return np.zeros((0, 4))
# Alpha values beyond the range 0-1 inclusive make no sense, so clip them
min_alpha = np.clip(min_alpha, 0, 1)
if _data_scale is None or _data_scale == 0:
# Don't scale the alpha values since we have no valid data scale for reference
sats = np.ones_like(zs)
else:
# Deeper points have an increasingly transparent appearance
sats = np.clip(1 - (zs - np.min(zs)) / _data_scale, min_alpha, 1)
rgba = np.broadcast_to(mcolors.to_rgba_array(colors), (len(zs), 4))
# Change the alpha values of the colors using the generated alpha multipliers
return np.column_stack([rgba[:, :3], rgba[:, 3] * sats])
def _all_points_on_plane(xs, ys, zs, atol=1e-8):
"""
Check if all points are on the same plane. Note that NaN values are
ignored.
Parameters
----------
xs, ys, zs : array-like
The x, y, and z coordinates of the points.
atol : float, default: 1e-8
The tolerance for the equality check.
"""
xs, ys, zs = np.asarray(xs), np.asarray(ys), np.asarray(zs)
points = np.column_stack([xs, ys, zs])
points = points[~np.isnan(points).any(axis=1)]
# Check for the case where we have less than 3 unique points
points = np.unique(points, axis=0)
if len(points) <= 3:
return True
# Calculate the vectors from the first point to all other points
vs = (points - points[0])[1:]
vs = vs / np.linalg.norm(vs, axis=1)[:, np.newaxis]
# Filter out parallel vectors
vs = np.unique(vs, axis=0)
if len(vs) <= 2:
return True
# Filter out parallel and antiparallel vectors to the first vector
cross_norms = np.linalg.norm(np.cross(vs[0], vs[1:]), axis=1)
zero_cross_norms = np.where(np.isclose(cross_norms, 0, atol=atol))[0] + 1
vs = np.delete(vs, zero_cross_norms, axis=0)
if len(vs) <= 2:
return True
# Calculate the normal vector from the first three points
n = np.cross(vs[0], vs[1])
n = n / np.linalg.norm(n)
# If the dot product of the normal vector and all other vectors is zero,
# all points are on the same plane
dots = np.dot(n, vs.transpose())
return np.allclose(dots, 0, atol=atol)
def _generate_normals(polygons):
"""
Compute the normals of a list of polygons, one normal per polygon.
Normals point towards the viewer for a face with its vertices in
counterclockwise order, following the right hand rule.
Uses three points equally spaced around the polygon. This method assumes
that the points are in a plane. Otherwise, more than one shade is required,
which is not supported.
Parameters
----------
polygons : list of (M_i, 3) array-like, or (..., M, 3) array-like
A sequence of polygons to compute normals for, which can have
varying numbers of vertices. If the polygons all have the same
number of vertices and array is passed, then the operation will
be vectorized.
Returns
-------
normals : (..., 3) array
A normal vector estimated for the polygon.
"""
if isinstance(polygons, np.ndarray):
# optimization: polygons all have the same number of points, so can
# vectorize
n = polygons.shape[-2]
i1, i2, i3 = 0, n//3, 2*n//3
v1 = polygons[..., i1, :] - polygons[..., i2, :]
v2 = polygons[..., i2, :] - polygons[..., i3, :]
else:
# The subtraction doesn't vectorize because polygons is jagged.
v1 = np.empty((len(polygons), 3))
v2 = np.empty((len(polygons), 3))
for poly_i, ps in enumerate(polygons):
n = len(ps)
ps = np.asarray(ps)
i1, i2, i3 = 0, n//3, 2*n//3
v1[poly_i, :] = ps[i1, :] - ps[i2, :]
v2[poly_i, :] = ps[i2, :] - ps[i3, :]
return np.cross(v1, v2)
def _shade_colors(color, normals, lightsource=None):
"""
Shade *color* using normal vectors given by *normals*,
assuming a *lightsource* (using default position if not given).
*color* can also be an array of the same length as *normals*.
"""
if lightsource is None:
# chosen for backwards-compatibility
lightsource = mcolors.LightSource(azdeg=225, altdeg=19.4712)
with np.errstate(invalid="ignore"):
shade = ((normals / np.linalg.norm(normals, axis=1, keepdims=True))
@ lightsource.direction)
mask = ~np.isnan(shade)
if mask.any():
# convert dot product to allowed shading fractions
in_norm = mcolors.Normalize(-1, 1)
out_norm = mcolors.Normalize(0.3, 1).inverse
def norm(x):
return out_norm(in_norm(x))
shade[~mask] = 0
color = mcolors.to_rgba_array(color)
# shape of color should be (M, 4) (where M is number of faces)
# shape of shade should be (M,)
# colors should have final shape of (M, 4)
alpha = color[:, 3]
colors = norm(shade)[:, np.newaxis] * color
colors[:, 3] = alpha
else:
colors = np.asanyarray(color).copy()
return colors
| Poly3DCollection |
python | wandb__wandb | wandb/automations/_filters/operators.py | {
"start": 3443,
"end": 3555
} | class ____(BaseVariadicLogicalOp):
exprs: TupleOf[Union[FilterExpr, Op]] = Field(default=(), alias="$and")
| And |
python | modin-project__modin | modin/core/storage_formats/pandas/parsers.py | {
"start": 13873,
"end": 14808
} | class ____(PandasParser):
@staticmethod
@doc(_doc_parse_func, parameters=_doc_parse_parameters_common2)
def parse(fname, common_read_kwargs, **kwargs):
return PandasParser.generic_parse(
fname,
callback=PandasFWFParser.read_callback,
**common_read_kwargs,
**kwargs,
)
@staticmethod
def read_callback(*args, **kwargs):
"""
Parse data on each partition.
Parameters
----------
*args : list
Positional arguments to be passed to the callback function.
**kwargs : dict
Keyword arguments to be passed to the callback function.
Returns
-------
pandas.DataFrame or pandas.io.parsers.TextFileReader
Function call result.
"""
return pandas.read_fwf(*args, **kwargs)
@doc(_doc_pandas_parser_class, data_type="excel files")
| PandasFWFParser |
python | numpy__numpy | numpy/random/tests/test_random.py | {
"start": 3808,
"end": 5767
} | class ____:
def _create_rng(self):
seed = 1234567890
prng = random.RandomState(seed)
state = prng.get_state()
return prng, state
def test_basic(self):
prng, state = self._create_rng()
old = prng.tomaxint(16)
prng.set_state(state)
new = prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
prng, state = self._create_rng()
old = prng.standard_normal(size=3)
prng.set_state(state)
new = prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
prng, state = self._create_rng()
prng.standard_normal()
state = prng.get_state()
old = prng.standard_normal(size=3)
prng.set_state(state)
new = prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
prng, state = self._create_rng()
old_state = state[:-2]
x1 = prng.standard_normal(size=16)
prng.set_state(old_state)
x2 = prng.standard_normal(size=16)
prng.set_state(state)
x3 = prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
prng, _ = self._create_rng()
prng.negative_binomial(0.5, 0.5)
def test_set_invalid_state(self):
# gh-25402
prng, _ = self._create_rng()
with pytest.raises(IndexError):
prng.set_state(())
| TestSetState |
python | getsentry__sentry | tests/sentry/api/serializers/test_fields.py | {
"start": 1620,
"end": 3910
} | class ____(TestCase):
def test_simple(self) -> None:
data = {"actor_field": f"user:{self.user.id}"}
serializer = DummySerializer(data=data, context={"organization": self.organization})
assert serializer.is_valid()
assert serializer.validated_data["actor_field"].is_user
assert serializer.validated_data["actor_field"].id == self.user.id
def test_legacy_user_fallback(self) -> None:
data = {"actor_field": f"{self.user.id}"}
serializer = DummySerializer(data=data, context={"organization": self.organization})
assert serializer.is_valid()
assert serializer.validated_data["actor_field"].is_user
assert serializer.validated_data["actor_field"].id == self.user.id
def test_team(self) -> None:
data = {"actor_field": f"team:{self.team.id}"}
serializer = DummySerializer(data=data, context={"organization": self.organization})
assert serializer.is_valid()
assert serializer.validated_data["actor_field"].actor_type == ActorType.TEAM
assert serializer.validated_data["actor_field"].id == self.team.id
def test_permissions(self) -> None:
other_org = self.create_organization()
serializer = DummySerializer(
data={"actor_field": f"user:{self.user.id}"}, context={"organization": other_org}
)
assert not serializer.is_valid()
assert serializer.errors["actor_field"] == [
ErrorDetail("User is not a member of this organization", "invalid")
]
serializer = DummySerializer(
data={"actor_field": f"team:{self.team.id}"}, context={"organization": other_org}
)
assert not serializer.is_valid()
assert serializer.errors["actor_field"] == [
ErrorDetail("Team is not a member of this organization", "invalid")
]
def test_validates(self) -> None:
data = {"actor_field": "foo:1"}
serializer = DummySerializer(data=data, context={"organization": self.organization})
assert not serializer.is_valid()
assert serializer.errors == {
"actor_field": [
"Could not parse actor. Format should be `type:id` where type is `team` or `user`."
]
}
| TestActorField |
python | lepture__authlib | authlib/integrations/base_client/errors.py | {
"start": 53,
"end": 117
} | class ____(AuthlibBaseError):
error = "oauth_error"
| OAuthError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-netsuite/source_netsuite/source.py | {
"start": 583,
"end": 7583
} | class ____(AbstractSource):
logger: logging.Logger = logging.getLogger("airbyte")
def auth(self, config: Mapping[str, Any]) -> OAuth1:
# the `realm` param should be in format of: 12345_SB1
realm = config["realm"].replace("-", "_").upper()
return OAuth1(
client_key=config["consumer_key"],
client_secret=config["consumer_secret"],
resource_owner_key=config["token_key"],
resource_owner_secret=config["token_secret"],
realm=realm,
signature_method="HMAC-SHA256",
)
def base_url(self, config: Mapping[str, Any]) -> str:
# the subdomain should be in format of: 12345-sb1
subdomain = config["realm"].replace("_", "-").lower()
return f"https://{subdomain}.suitetalk.api.netsuite.com"
def get_session(self, auth: OAuth1) -> requests.Session:
session = requests.Session()
session.auth = auth
return session
def check_connection(self, logger, config: Mapping[str, Any]) -> Tuple[bool, Any]:
auth = self.auth(config)
object_types = config.get("object_types")
base_url = self.base_url(config)
session = self.get_session(auth)
# if record types are specified make sure they are valid
if object_types:
# ensure there are no duplicate record types as this will break Airbyte
duplicates = [k for k, v in Counter(object_types).items() if v > 1]
if duplicates:
return False, f'Duplicate record type: {", ".join(duplicates)}'
# check connectivity to all provided `object_types`
for object in object_types:
try:
response = session.get(url=base_url + RECORD_PATH + object.lower(), params={"limit": 1})
response.raise_for_status()
return True, None
except requests.exceptions.HTTPError as e:
return False, e
else:
# if `object_types` are not provided, use `Contact` object
# there should be at least 1 contact available in every NetSuite account by default.
url = base_url + RECORD_PATH + "contact"
try:
response = session.get(url=url, params={"limit": 1})
response.raise_for_status()
return True, None
except requests.exceptions.HTTPError as e:
return False, e
def get_schemas(self, object_names: Union[List[str], str], session: requests.Session, metadata_url: str) -> Mapping[str, Any]:
"""
Handles multivariance of object_names type input and fetches the schema for each object type provided.
"""
try:
if isinstance(object_names, list):
schemas = {}
for object_name in object_names:
schemas.update(**self.fetch_schema(object_name, session, metadata_url))
return schemas
elif isinstance(object_names, str):
return self.fetch_schema(object_names, session, metadata_url)
else:
raise NotImplementedError(
f"Object Types has unknown structure, should be either `dict` or `str`, actual input: {object_names}"
)
except JSONDecodeError as e:
self.logger.error(f"Unexpected output while fetching the object schema. Full error: {e.__repr__()}")
def fetch_schema(self, object_name: str, session: requests.Session, metadata_url: str) -> Mapping[str, Any]:
"""
Calls the API for specific object type and returns schema as a dict.
"""
return {object_name.lower(): session.get(metadata_url + object_name, headers=SCHEMA_HEADERS).json()}
def generate_stream(
self,
session: requests.Session,
metadata_url: str,
schemas: dict,
object_name: str,
auth: OAuth1,
base_url: str,
start_datetime: str,
window_in_days: int,
max_retry: int = 3,
) -> Union[NetsuiteStream, IncrementalNetsuiteStream, CustomIncrementalNetsuiteStream]:
input_args = {
"auth": auth,
"object_name": object_name,
"base_url": base_url,
"start_datetime": start_datetime,
"window_in_days": window_in_days,
}
schema = schemas[object_name]
schema_props = schema.get("properties")
if schema_props:
if INCREMENTAL_CURSOR in schema_props.keys():
return IncrementalNetsuiteStream(**input_args)
elif CUSTOM_INCREMENTAL_CURSOR in schema_props.keys():
return CustomIncrementalNetsuiteStream(**input_args)
else:
# all other streams are full_refresh
return NetsuiteStream(**input_args)
else:
retry_attempt = 1
while retry_attempt <= max_retry:
self.logger.warn(f"Object `{object_name}` schema has missing `properties` key. Retry attempt: {retry_attempt}/{max_retry}")
# somethimes object metadata returns data with missing `properties` key,
# we should try to fetch metadata again to that object
schemas = self.get_schemas(object_name, session, metadata_url)
if schemas[object_name].get("properties"):
input_args.update(**{"session": session, "metadata_url": metadata_url, "schemas": schemas})
return self.generate_stream(**input_args)
retry_attempt += 1
self.logger.warn(f"Object `{object_name}` schema is not available. Skipping this stream.")
return None
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
auth = self.auth(config)
session = self.get_session(auth)
base_url = self.base_url(config)
metadata_url = base_url + META_PATH
object_names = config.get("object_types")
# retrieve all record types if `object_types` config field is not specified
if not object_names:
objects_metadata = session.get(metadata_url).json().get("items")
object_names = [object["name"] for object in objects_metadata]
input_args = {"session": session, "metadata_url": metadata_url}
schemas = self.get_schemas(object_names, **input_args)
input_args.update(
**{
"auth": auth,
"base_url": base_url,
"start_datetime": config["start_datetime"],
"window_in_days": config["window_in_days"],
"schemas": schemas,
}
)
# build streams
streams: list = []
for name in object_names:
stream = self.generate_stream(object_name=name.lower(), **input_args)
if stream:
streams.append(stream)
return streams
| SourceNetsuite |
python | pennersr__django-allauth | allauth/socialaccount/providers/snapchat/constants.py | {
"start": 27,
"end": 263
} | class ____:
EXTERNAL_ID = "https://auth.snapchat.com/oauth2/api/user.external_id"
DISPLAY_NAME = "https://auth.snapchat.com/oauth2/api/user.display_name"
BITMOJI = "https://auth.snapchat.com/oauth2/api/user.bitmoji.avatar"
| Scope |
python | openai__openai-python | src/openai/resources/fine_tuning/checkpoints/permissions.py | {
"start": 15281,
"end": 15738
} | class ____:
def __init__(self, permissions: Permissions) -> None:
self._permissions = permissions
self.create = _legacy_response.to_raw_response_wrapper(
permissions.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
permissions.retrieve,
)
self.delete = _legacy_response.to_raw_response_wrapper(
permissions.delete,
)
| PermissionsWithRawResponse |
python | numpy__numpy | numpy/matrixlib/tests/test_matrix_linalg.py | {
"start": 1236,
"end": 1298
} | class ____(SolveCases, MatrixTestCase):
pass
| TestSolveMatrix |
python | ApeWorX__ape | src/ape/managers/query.py | {
"start": 3763,
"end": 7648
} | class ____(ManagerAccessMixin):
"""
A singleton that manages query engines and performs queries.
Args:
query (``QueryType``): query to execute
Usage example::
biggest_block_size = chain.blocks.query("size").max()
"""
@cached_property
def engines(self) -> dict[str, QueryAPI]:
"""
A dict of all :class:`~ape.api.query.QueryAPI` instances across all
installed plugins.
Returns:
dict[str, :class:`~ape.api.query.QueryAPI`]
"""
engines: dict[str, QueryAPI] = {"__default__": DefaultQueryProvider()}
for plugin_name, engine_class in self.plugin_manager.query_engines:
engine_name = clean_plugin_name(plugin_name)
engines[engine_name] = engine_class() # type: ignore
return engines
def _suggest_engines(self, engine_selection):
return difflib.get_close_matches(engine_selection, list(self.engines), cutoff=0.6)
def query(
self,
query: QueryType,
engine_to_use: Optional[str] = None,
) -> Iterator[BaseInterfaceModel]:
"""
Args:
query (``QueryType``): The type of query to execute
engine_to_use (Optional[str]): Short-circuit selection logic using
a specific engine. Defaults is set by performance-based selection logic.
Raises:
:class:`~ape.exceptions.QueryEngineError`: When given an invalid or
inaccessible ``engine_to_use`` value.
Returns:
Iterator[``BaseInterfaceModel``]
"""
if engine_to_use:
if engine_to_use not in self.engines:
raise QueryEngineError(
f"Query engine `{engine_to_use}` not found. "
f"Did you mean {' or '.join(self._suggest_engines(engine_to_use))}?"
)
sel_engine = self.engines[engine_to_use]
est_time = sel_engine.estimate_query(query)
else:
# Get heuristics from all the query engines to perform this query
estimates = map(lambda qe: (qe, qe.estimate_query(query)), self.engines.values())
# Ignore query engines that can't perform this query
valid_estimates = filter(lambda qe: qe[1] is not None, estimates)
try:
# Find the "best" engine to perform the query
# NOTE: Sorted by fastest time heuristic
sel_engine, est_time = min(valid_estimates, key=lambda qe: qe[1]) # type: ignore
except ValueError as e:
raise QueryEngineError("No query engines are available.") from e
# Go fetch the result from the engine
sel_engine_name = getattr(type(sel_engine), "__name__", None)
query_type_name = getattr(type(query), "__name__", None)
if not sel_engine_name:
logger.error("Engine type unknown")
if not query_type_name:
logger.error("Query type unknown")
if sel_engine_name and query_type_name:
logger.debug(f"{sel_engine_name}: {query_type_name}({query})")
start_time = time.time_ns()
result = sel_engine.perform_query(query)
exec_time = (time.time_ns() - start_time) // 1000
if sel_engine_name and query_type_name:
logger.debug(
f"{sel_engine_name}: {query_type_name}"
f" executed in {exec_time} ms (expected: {est_time} ms)"
)
# Update any caches
for engine in self.engines.values():
if not isinstance(engine, sel_engine.__class__):
result, cache_data = tee(result)
try:
engine.update_cache(query, cache_data)
except QueryEngineError as err:
logger.error(str(err))
return result
| QueryManager |
python | encode__django-rest-framework | rest_framework/relations.py | {
"start": 8205,
"end": 9356
} | class ____(RelatedField):
default_error_messages = {
'required': _('This field is required.'),
'does_not_exist': _('Invalid pk "{pk_value}" - object does not exist.'),
'incorrect_type': _('Incorrect type. Expected pk value, received {data_type}.'),
}
def __init__(self, **kwargs):
self.pk_field = kwargs.pop('pk_field', None)
super().__init__(**kwargs)
def use_pk_only_optimization(self):
return True
def to_internal_value(self, data):
if self.pk_field is not None:
data = self.pk_field.to_internal_value(data)
queryset = self.get_queryset()
try:
if isinstance(data, bool):
raise TypeError
return queryset.get(pk=data)
except ObjectDoesNotExist:
self.fail('does_not_exist', pk_value=data)
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data).__name__)
def to_representation(self, value):
if self.pk_field is not None:
return self.pk_field.to_representation(value.pk)
return value.pk
| PrimaryKeyRelatedField |
python | psf__black | tests/data/cases/preview_long_strings__regression.py | {
"start": 4647,
"end": 5447
} | class ____:
def foo():
some_func_call(
(
"xx {xxxxxxxxxxx}/xxxxxxxxxxx.xxx xxxx.xxx && xxxxxx -x "
"xxxx, ('xxxxxxx xxxxxx xxxx, xxxx') xxxxxx_xxxxx xxxxxx xxxx; "
"xxxx.xxxx_xxxxxx(['xxxx.xxx'], xxxx.xxxxxxx().xxxxxxxxxx)\" "
),
None,
('xxxxxxxxxxx',),
),
xxxxxxx = { 'xx' : 'xxxx xxxxxxx xxxxxxxxx -x xxx -x /xxx/{0} -x xxx,xxx -xx {1} \
-xx {1} -xx xxx=xxx_xxxx,xxx_xx,xxx_xxx,xxx_xxxx,xxx_xx,xxx_xxx |\
xxxxxx -x xxxxxxxx -x xxxxxxxx -x',
'xx' : 'xxxx xxxxxxx xxxxxxxxx -x xxx -x /xxx/{0} -x xxx,xxx -xx {1} \
-xx {1} -xx xxx=xxx_xxxx_xxx_xxxx,xxx_xx_xxx_xxxx,xxx_xxxx_xxx_xxxx,\
xxx_xx_xxxx_xxxx,xxx_xxx_xxxx,xxx_xxx_xxxx xxxx=xxx | xxxxxx -x xxxxxxxx -x xxxxxxxx -x'
}
| A |
python | PrefectHQ__prefect | src/prefect/server/events/counting.py | {
"start": 995,
"end": 7147
} | class ____(AutoEnum):
week = AutoEnum.auto()
day = AutoEnum.auto()
hour = AutoEnum.auto()
minute = AutoEnum.auto()
second = AutoEnum.auto()
def as_timedelta(self, interval: float) -> Duration:
if self == self.week:
return Duration(days=7 * interval)
elif self == self.day:
return Duration(days=1 * interval)
elif self == self.hour:
return Duration(hours=1 * interval)
elif self == self.minute:
return Duration(minutes=1 * interval)
elif self == self.second:
return Duration(seconds=1 * interval)
else:
raise NotImplementedError()
def validate_buckets(
self,
start_datetime: datetime.datetime,
end_datetime: datetime.datetime,
interval: float,
) -> None:
MAX_ALLOWED_BUCKETS = 1000
delta = self.as_timedelta(interval)
start_in_utc = start_datetime.astimezone(ZoneInfo("UTC"))
end_in_utc = end_datetime.astimezone(ZoneInfo("UTC"))
if interval < 0.01:
raise InvalidEventCountParameters("The minimum interval is 0.01")
number_of_buckets = math.ceil((end_in_utc - start_in_utc) / delta)
if number_of_buckets > MAX_ALLOWED_BUCKETS:
raise InvalidEventCountParameters(
f"The given interval would create {number_of_buckets} buckets, "
"which is too many. Please increase the interval or reduce the "
f"time range to produce {MAX_ALLOWED_BUCKETS} buckets or fewer."
)
def get_interval_spans(
self,
start_datetime: datetime.datetime,
end_datetime: datetime.datetime,
interval: float,
) -> Generator[int | tuple[datetime.datetime, datetime.datetime], None, None]:
"""Divide the given range of dates into evenly-sized spans of interval units"""
self.validate_buckets(start_datetime, end_datetime, interval)
# Our universe began on PIVOT_DATETIME and all time after that is
# divided into `delta`-sized buckets. We want to find the bucket that
# contains `start_datetime` and then find the all of the buckets
# that come after it until the bucket that contains `end_datetime`.
delta = self.as_timedelta(interval)
start_in_utc = start_datetime.astimezone(ZoneInfo("UTC"))
end_in_utc = end_datetime.astimezone(ZoneInfo("UTC"))
if end_in_utc > now("UTC"):
end_in_utc = end_of_period(now("UTC"), self.value)
first_span_index = math.floor((start_in_utc - PIVOT_DATETIME) / delta)
yield first_span_index
span_start = PIVOT_DATETIME + delta * first_span_index
while span_start < end_in_utc:
next_span_start = span_start + delta
yield (span_start, next_span_start - timedelta(microseconds=1))
span_start = next_span_start
def database_value_expression(self, time_interval: float) -> sa.Cast[str]:
"""Returns the SQL expression to place an event in a time bucket"""
# The date_bin function can do the bucketing for us:
# https://www.postgresql.org/docs/14/functions-datetime.html#FUNCTIONS-DATETIME-BIN
db = provide_database_interface()
delta = self.as_timedelta(time_interval)
if db.dialect.name == "postgresql":
return sa.cast(
sa.func.floor(
sa.extract(
"epoch",
(
sa.func.date_bin(delta, db.Event.occurred, PIVOT_DATETIME)
- PIVOT_DATETIME
),
)
/ delta.total_seconds(),
),
sa.Text,
)
elif db.dialect.name == "sqlite":
# Convert pivot date and event date to strings formatted as seconds since the epoch
pivot_timestamp = sa.func.strftime(
"%s", PIVOT_DATETIME.strftime("%Y-%m-%d %H:%M:%S")
)
event_timestamp = sa.func.strftime("%s", db.Event.occurred)
seconds_since_pivot = event_timestamp - pivot_timestamp
# Calculate the bucket index by dividing by the interval in seconds and flooring the result
bucket_index = sa.func.floor(
sa.cast(seconds_since_pivot, sa.Integer) / delta.total_seconds()
)
return sa.cast(bucket_index, sa.Text)
else:
raise NotImplementedError(f"Dialect {db.dialect.name} is not supported.")
def database_label_expression(
self, db: PrefectDBInterface, time_interval: float
) -> sa.Function[str]:
"""Returns the SQL expression to label a time bucket"""
time_delta = self.as_timedelta(time_interval)
if db.dialect.name == "postgresql":
# The date_bin function can do the bucketing for us:
# https://www.postgresql.org/docs/14/functions-datetime.html#FUNCTIONS-DATETIME-BIN
return sa.func.to_char(
sa.func.date_bin(time_delta, db.Event.occurred, PIVOT_DATETIME),
'YYYY-MM-DD"T"HH24:MI:SSTZH:TZM',
)
elif db.dialect.name == "sqlite":
# We can't use date_bin in SQLite, so we have to do the bucketing manually
seconds_since_epoch = sa.func.strftime("%s", db.Event.occurred)
# Convert the total seconds of the timedelta to a constant in SQL
bucket_size = time_delta.total_seconds()
# Perform integer division and multiplication to find the bucket start epoch using SQL functions
bucket_start_epoch = sa.func.cast(
(sa.cast(seconds_since_epoch, sa.Integer) / bucket_size) * bucket_size,
sa.Integer,
)
bucket_datetime = sa.func.strftime(
"%Y-%m-%dT%H:%M:%SZ", sa.func.datetime(bucket_start_epoch, "unixepoch")
)
return bucket_datetime
else:
raise NotImplementedError(f"Dialect {db.dialect.name} is not supported.")
| TimeUnit |
python | PrefectHQ__prefect | src/prefect/server/schemas/core.py | {
"start": 32548,
"end": 32903
} | class ____(ORMBaseModel):
"""An ORM representation of saved search data. Represents a set of filter criteria."""
name: str = Field(default=..., description="The name of the saved search.")
filters: list[SavedSearchFilter] = Field(
default_factory=lambda: [],
description="The filter set for the saved search.",
)
| SavedSearch |
python | pypa__pipenv | pipenv/patched/pip/_internal/cli/index_command.py | {
"start": 1479,
"end": 4700
} | class ____(CommandContextMixIn):
"""
A class mixin for command classes needing _build_session().
"""
def __init__(self) -> None:
super().__init__()
self._session: Optional[PipSession] = None
@classmethod
def _get_index_urls(cls, options: Values) -> Optional[List[str]]:
"""Return a list of index urls from user-provided options."""
index_urls = []
if not getattr(options, "no_index", False):
url = getattr(options, "index_url", None)
if url:
index_urls.append(url)
urls = getattr(options, "extra_index_urls", None)
if urls:
index_urls.extend(urls)
# Return None rather than an empty list
return index_urls or None
def get_default_session(self, options: Values) -> "PipSession":
"""Get a default-managed session."""
if self._session is None:
self._session = self.enter_context(self._build_session(options))
# there's no type annotation on requests.Session, so it's
# automatically ContextManager[Any] and self._session becomes Any,
# then https://github.com/python/mypy/issues/7696 kicks in
assert self._session is not None
return self._session
def _build_session(
self,
options: Values,
retries: Optional[int] = None,
timeout: Optional[int] = None,
) -> "PipSession":
from pipenv.patched.pip._internal.network.session import PipSession
cache_dir = options.cache_dir
assert not cache_dir or os.path.isabs(cache_dir)
if "legacy-certs" not in options.deprecated_features_enabled:
ssl_context = _create_truststore_ssl_context()
else:
ssl_context = None
session = PipSession(
cache=os.path.join(cache_dir, "http-v2") if cache_dir else None,
retries=retries if retries is not None else options.retries,
trusted_hosts=options.trusted_hosts,
index_urls=self._get_index_urls(options),
ssl_context=ssl_context,
)
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle SSL client certificate
if options.client_cert:
session.cert = options.client_cert
# Handle timeouts
if options.timeout or timeout:
session.timeout = timeout if timeout is not None else options.timeout
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
session.trust_env = False
session.pip_proxy = options.proxy
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
session.auth.keyring_provider = options.keyring_provider
return session
def _pip_self_version_check(session: "PipSession", options: Values) -> None:
from pipenv.patched.pip._internal.self_outdated_check import pip_self_version_check as check
check(session, options)
| SessionCommandMixin |
python | sqlalchemy__sqlalchemy | test/orm/test_naturalpks.py | {
"start": 21366,
"end": 22482
} | class ____(_fixtures.FixtureTest):
run_inserts = None
__sparse_driver_backend__ = True
def test_transient_exception(self):
"""An object that goes from a pk value to transient/pending
doesn't count as a "pk" switch.
"""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(
Address, addresses, properties={"user": relationship(User)}
)
sess = fixture_session()
u1 = User(id=5, name="u1")
ad1 = Address(email_address="e1", user=u1)
sess.add_all([u1, ad1])
sess.flush()
make_transient(u1)
u1.id = None
u1.username = "u2"
sess.add(u1)
sess.flush()
eq_(ad1.user_id, 5)
sess.expire_all()
eq_(ad1.user_id, 5)
ne_(u1.id, 5)
ne_(u1.id, None)
eq_(sess.query(User).count(), 2)
| TransientExceptionTesst |
python | getsentry__sentry | src/sentry/conf/types/kafka_definition.py | {
"start": 253,
"end": 5261
} | class ____(Enum):
"""
These are the default topic names used by Sentry. They must match
the registered values in sentry-kafka-schemas.
"""
EVENTS = "events"
EVENTS_COMMIT_LOG = "snuba-commit-log"
TRANSACTIONS = "transactions"
TRANSACTIONS_COMMIT_LOG = "snuba-transactions-commit-log"
OUTCOMES = "outcomes"
OUTCOMES_DLQ = "outcomes-dlq"
OUTCOMES_BILLING = "outcomes-billing"
OUTCOMES_BILLING_DLQ = "outcomes-billing-dlq"
EVENTS_SUBSCRIPTIONS_RESULTS = "events-subscription-results"
TRANSACTIONS_SUBSCRIPTIONS_RESULTS = "transactions-subscription-results"
GENERIC_METRICS_SUBSCRIPTIONS_RESULTS = "generic-metrics-subscription-results"
METRICS_SUBSCRIPTIONS_RESULTS = "metrics-subscription-results"
INGEST_EVENTS = "ingest-events"
INGEST_EVENTS_DLQ = "ingest-events-dlq"
INGEST_EVENTS_BACKLOG = "ingest-events-backlog"
INGEST_FEEDBACK_EVENTS = "ingest-feedback-events"
INGEST_FEEDBACK_EVENTS_DLQ = "ingest-feedback-events-dlq"
INGEST_ATTACHMENTS = "ingest-attachments"
INGEST_ATTACHMENTS_DLQ = "ingest-attachments-dlq"
INGEST_TRANSACTIONS = "ingest-transactions"
INGEST_TRANSACTIONS_DLQ = "ingest-transactions-dlq"
INGEST_TRANSACTIONS_BACKLOG = "ingest-transactions-backlog"
INGEST_SPANS = "ingest-spans"
INGEST_SPANS_DLQ = "ingest-spans-dlq"
INGEST_METRICS = "ingest-metrics"
INGEST_METRICS_DLQ = "ingest-metrics-dlq"
SNUBA_METRICS = "snuba-metrics"
PROFILES = "profiles"
PROFILES_CALL_TREE = "profiles-call-tree"
PROFILE_CHUNKS = "snuba-profile-chunks"
PROCESSED_PROFILES = "processed-profiles"
INGEST_PERFORMANCE_METRICS = "ingest-performance-metrics"
INGEST_GENERIC_METRICS_DLQ = "ingest-generic-metrics-dlq"
SNUBA_GENERIC_METRICS = "snuba-generic-metrics"
INGEST_REPLAY_EVENTS = "ingest-replay-events"
INGEST_REPLAYS_RECORDINGS = "ingest-replay-recordings"
INGEST_OCCURRENCES = "ingest-occurrences"
INGEST_MONITORS = "ingest-monitors"
PREPROD_ARTIFACT_EVENTS = "preprod-artifact-events"
MONITORS_CLOCK_TICK = "monitors-clock-tick"
MONITORS_CLOCK_TASKS = "monitors-clock-tasks"
MONITORS_INCIDENT_OCCURRENCES = "monitors-incident-occurrences"
UPTIME_RESULTS = "uptime-results"
EVENTSTREAM_GENERIC = "generic-events"
GENERIC_EVENTS_COMMIT_LOG = "snuba-generic-events-commit-log"
GROUP_ATTRIBUTES = "group-attributes"
SHARED_RESOURCES_USAGE = "shared-resources-usage"
SNUBA_ITEMS = "snuba-items"
EAP_ITEMS_SUBSCRIPTIONS_RESULTS = "subscription-results-eap-items"
BUFFERED_SEGMENTS = "buffered-segments"
BUFFERED_SEGMENTS_DLQ = "buffered-segments-dlq"
# Taskworker topics
TASKWORKER = "taskworker"
TASKWORKER_DLQ = "taskworker-dlq"
TASKWORKER_BILLING = "taskworker-billing"
TASKWORKER_BILLING_DLQ = "taskworker-billing-dlq"
TASKWORKER_BUFFER = "taskworker-buffer"
TASKWORKER_BUFFER_DLQ = "taskworker-buffer-dlq"
TASKWORKER_CONTROL = "taskworker-control"
TASKWORKER_CONTROL_DLQ = "taskworker-control-dlq"
TASKWORKER_CONTROL_LIMITED = "taskworker-control-limited"
TASKWORKER_CONTROL_LIMITED_DLQ = "taskworker-control-limited-dlq"
TASKWORKER_CUTOVER = "taskworker-cutover"
TASKWORKER_EMAIL = "taskworker-email"
TASKWORKER_EMAIL_DLQ = "taskworker-email-dlq"
TASKWORKER_INGEST = "taskworker-ingest"
TASKWORKER_INGEST_DLQ = "taskworker-ingest-dlq"
TASKWORKER_INGEST_ERRORS = "taskworker-ingest-errors"
TASKWORKER_INGEST_ERRORS_DLQ = "taskworker-ingest-errors-dlq"
TASKWORKER_INGEST_ERRORS_POSTPROCESS = "taskworker-ingest-errors-postprocess"
TASKWORKER_INGEST_ERRORS_POSTPROCESS_DLQ = "taskworker-ingest-errors-postprocess-dlq"
TASKWORKER_INGEST_TRANSACTIONS = "taskworker-ingest-transactions"
TASKWORKER_INGEST_TRANSACTIONS_DLQ = "taskworker-ingest-transactions-dlq"
TASKWORKER_INGEST_ATTACHMENTS = "taskworker-ingest-attachments"
TASKWORKER_INGEST_ATTACHMENTS_DLQ = "taskworker-ingest-attachments-dlq"
TASKWORKER_INGEST_PROFILING = "taskworker-ingest-profiling"
TASKWORKER_INGEST_PROFILING_DLQ = "taskworker-ingest-profiling-dlq"
TASKWORKER_INTERNAL = "taskworker-internal"
TASKWORKER_INTERNAL_DLQ = "taskworker-internal-dlq"
TASKWORKER_LIMITED = "taskworker-limited"
TASKWORKER_LIMITED_DLQ = "taskworker-limited-dlq"
TASKWORKER_LONG = "taskworker-long"
TASKWORKER_LONG_DLQ = "taskworker-long-dlq"
TASKWORKER_PRODUCTS = "taskworker-products"
TASKWORKER_PRODUCTS_DLQ = "taskworker-products-dlq"
TASKWORKER_SENTRYAPP = "taskworker-sentryapp"
TASKWORKER_SENTRYAPP_DLQ = "taskworker-sentryapp-dlq"
TASKWORKER_SYMBOLICATION = "taskworker-symbolication"
TASKWORKER_SYMBOLICATION_DLQ = "taskworker-symbolication-dlq"
TASKWORKER_USAGE = "taskworker-usage"
TASKWORKER_USAGE_DLQ = "taskworker-usage-dlq"
TASKWORKER_WORKFLOWS_ENGINE = "taskworker-workflows-engine"
TASKWORKER_WORKFLOWS_ENGINE_DLQ = "taskworker-workflows-engine-dlq"
| Topic |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 39710,
"end": 39877
} | class ____(panel_grid_major_x, panel_grid_major_y):
"""
Major grid lines
Parameters
----------
theme_element : element_line
"""
| panel_grid_major |
python | sympy__sympy | sympy/core/function.py | {
"start": 8464,
"end": 11775
} | class ____(Basic, metaclass=FunctionClass):
"""
Base class for applied functions.
Explanation
===========
Instances of Application represent the result of applying an application of
any type to any object.
"""
is_Function = True
@cacheit
def __new__(cls, *args, **options):
from sympy.sets.fancysets import Naturals0
from sympy.sets.sets import FiniteSet
args = list(map(sympify, args))
evaluate = options.pop('evaluate', global_parameters.evaluate)
# WildFunction (and anything else like it) may have nargs defined
# and we throw that value away here
options.pop('nargs', None)
if options:
raise ValueError("Unknown options: %s" % options)
if evaluate:
evaluated = cls.eval(*args)
if evaluated is not None:
return evaluated
obj = super().__new__(cls, *args, **options)
# make nargs uniform here
sentinel = object()
objnargs = getattr(obj, "nargs", sentinel)
if objnargs is not sentinel:
# things passing through here:
# - functions subclassed from Function (e.g. myfunc(1).nargs)
# - functions like cos(1).nargs
# - AppliedUndef with given nargs like Function('f', nargs=1)(1).nargs
# Canonicalize nargs here
if is_sequence(objnargs):
nargs = tuple(ordered(set(objnargs)))
elif objnargs is not None:
nargs = (as_int(objnargs),)
else:
nargs = None
else:
# things passing through here:
# - WildFunction('f').nargs
# - AppliedUndef with no nargs like Function('f')(1).nargs
nargs = obj._nargs # note the underscore here
# convert to FiniteSet
obj.nargs = FiniteSet(*nargs) if nargs else Naturals0()
return obj
@classmethod
def eval(cls, *args):
"""
Returns a canonical form of cls applied to arguments args.
Explanation
===========
The ``eval()`` method is called when the class ``cls`` is about to be
instantiated and it should return either some simplified instance
(possible of some other class), or if the class ``cls`` should be
unmodified, return None.
Examples of ``eval()`` for the function "sign"
.. code-block:: python
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
if arg.is_zero: return S.Zero
if arg.is_positive: return S.One
if arg.is_negative: return S.NegativeOne
if isinstance(arg, Mul):
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff is not S.One:
return cls(coeff) * cls(terms)
"""
return
@property
def func(self):
return self.__class__
def _eval_subs(self, old, new):
if (old.is_Function and new.is_Function and
callable(old) and callable(new) and
old == self.func and len(self.args) in new.nargs):
return new(*[i._subs(old, new) for i in self.args])
| Application |
python | cython__cython | Cython/Compiler/CodeGeneration.py | {
"start": 72,
"end": 1068
} | class ____(VisitorTransform):
"""
Finds nodes in a pxd file that should generate code, and
returns them in a StatListNode.
The result is a tuple (StatListNode, ModuleScope), i.e.
everything that is needed from the pxd after it is processed.
A purer approach would be to separately compile the pxd code,
but the result would have to be slightly more sophisticated
than pure strings (functions + wanted interned strings +
wanted utility code + wanted cached objects) so for now this
approach is taken.
"""
def __call__(self, root):
self.funcs = []
self.visitchildren(root)
return (StatListNode(root.pos, stats=self.funcs), root.scope)
def visit_FuncDefNode(self, node):
self.funcs.append(node)
# Do not visit children, nested funcdefnodes will
# also be moved by this action...
return node
def visit_Node(self, node):
self.visitchildren(node)
return node
| ExtractPxdCode |
python | huggingface__transformers | src/transformers/models/patchtsmixer/modeling_patchtsmixer.py | {
"start": 56666,
"end": 57139
} | class ____(ModelOutput):
r"""
sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, prediction_length, number_channels)`):
Sampled values from the chosen distribution.
"""
sequences: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for time series model's predictions outputs that contains the sampled values from the chosen
distribution.
"""
)
| SamplePatchTSMixerPredictionOutput |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.