language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/maximum-difference-by-remapping-a-digit.py | {
"start": 41,
"end": 604
} | class ____(object):
def minMaxDifference(self, num):
"""
:type num: int
:rtype: int
"""
def f(dst):
result = 0
base = 1
while base <= num:
base *= 10
base //= 10
src = -1
while base:
d = num//base%10
if src == -1 and d != dst:
src = d
result += base*(dst if d == src else d)
base //= 10
return result
return f(9)-f(0)
| Solution |
python | lazyprogrammer__machine_learning_examples | rl/epsilon_greedy_starter.py | {
"start": 505,
"end": 2214
} | class ____:
def __init__(self, p):
# p: the win rate
self.p = p
self.p_estimate = # TODO
self.N = # TODO
def pull(self):
# draw a 1 with probability p
return np.random.random() < self.p
def update(self, x):
self.N = # TODO
self.p_estimate = # TODO
def experiment():
bandits = [Bandit(p) for p in BANDIT_PROBABILITIES]
rewards = np.zeros(NUM_TRIALS)
num_times_explored = 0
num_times_exploited = 0
num_optimal = 0
optimal_j = np.argmax([b.p for b in bandits])
print("optimal j:", optimal_j)
for i in range(NUM_TRIALS):
# use epsilon-greedy to select the next bandit
if np.random.random() < EPS:
num_times_explored += 1
j = # TODO
else:
num_times_exploited += 1
j = # TODO
if j == optimal_j:
num_optimal += 1
# pull the arm for the bandit with the largest sample
x = bandits[j].pull()
# update rewards log
rewards[i] = x
# update the distribution for the bandit whose arm we just pulled
bandits[j].update(x)
# print mean estimates for each bandit
for b in bandits:
print("mean estimate:", b.p_estimate)
# print total reward
print("total reward earned:", rewards.sum())
print("overall win rate:", rewards.sum() / NUM_TRIALS)
print("num_times_explored:", num_times_explored)
print("num_times_exploited:", num_times_exploited)
print("num times selected optimal bandit:", num_optimal)
# plot the results
cumulative_rewards = np.cumsum(rewards)
win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1)
plt.plot(win_rates)
plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES))
plt.show()
if __name__ == "__main__":
experiment()
| Bandit |
python | huggingface__transformers | src/transformers/models/ibert/quant_modules.py | {
"start": 27043,
"end": 30074
} | class ____(Function):
"""
Function to perform fixed-point arithmetic that can match integer arithmetic on hardware.
Args:
pre_act (`torch.Tensor`):
Input tensor.
pre_act_scaling_factor (`torch.Tensor`):
Scaling factor of the input tensor *pre_act*.
bit_num (`int`):
Quantization bitwidth.
z_scaling_factor (`torch.Tensor`):
Scaling factor of the output tensor.
identity (`torch.Tensor`, *optional*):
Identity tensor, if exists.
identity_scaling_factor (`torch.Tensor`, *optional*):
Scaling factor of the identity tensor *identity*, if exists.
Returns:
`torch.Tensor`: Output tensor(*pre_act* if *identity* is not given, otherwise the addition of *pre_act* and
*identity*), whose scale is rescaled to *z_scaling_factor*.
"""
@staticmethod
def forward(
ctx,
pre_act,
pre_act_scaling_factor,
bit_num,
z_scaling_factor,
identity=None,
identity_scaling_factor=None,
):
if len(pre_act_scaling_factor.shape) == 3:
reshape = lambda x: x # noqa: E731
else:
reshape = lambda x: x.view(1, 1, -1) # noqa: E731
ctx.identity = identity
n = 2 ** (bit_num - 1) - 1
with torch.no_grad():
pre_act_scaling_factor = reshape(pre_act_scaling_factor)
if identity is not None:
identity_scaling_factor = reshape(identity_scaling_factor)
ctx.z_scaling_factor = z_scaling_factor
z_int = torch.round(pre_act / pre_act_scaling_factor)
_A = pre_act_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m, e = batch_frexp(new_scale)
output = z_int.type(torch.double) * m.type(torch.double)
output = torch.round(output / (2.0**e))
if identity is not None:
# needs addition of identity activation
wx_int = torch.round(identity / identity_scaling_factor)
_A = identity_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m1, e1 = batch_frexp(new_scale)
output1 = wx_int.type(torch.double) * m1.type(torch.double)
output1 = torch.round(output1 / (2.0**e1))
output = output1 + output
return torch.clamp(output.type(torch.float), -n - 1, n)
@staticmethod
def backward(ctx, grad_output):
identity_grad = None
if ctx.identity is not None:
identity_grad = grad_output.clone() / ctx.z_scaling_factor
return grad_output.clone() / ctx.z_scaling_factor, None, None, None, None, identity_grad, None
| FixedPointMul |
python | sqlalchemy__sqlalchemy | test/engine/test_processors.py | {
"start": 3994,
"end": 4263
} | class ____(_DateProcessorTest):
__requires__ = ("cextensions",)
@classmethod
def setup_test_class(cls):
from sqlalchemy.engine import _processors_cy
assert _processors_cy._is_compiled()
cls.module = _processors_cy
| CyDateProcessorTest |
python | pydata__xarray | xarray/tests/test_plot.py | {
"start": 3977,
"end": 4943
} | class ____:
@pytest.fixture(autouse=True)
def setup(self) -> Generator:
yield
# Remove all matplotlib figures
plt.close("all")
def pass_in_axis(self, plotmethod, subplot_kw=None) -> None:
_fig, axs = plt.subplots(ncols=2, subplot_kw=subplot_kw, squeeze=False)
ax = axs[0, 0]
plotmethod(ax=ax)
assert ax.has_data()
@pytest.mark.slow
def imshow_called(self, plotmethod) -> bool:
plotmethod()
images = plt.gca().findobj(mpl.image.AxesImage)
return len(images) > 0
def contourf_called(self, plotmethod) -> bool:
plotmethod()
# Compatible with mpl before (PathCollection) and after (QuadContourSet) 3.8
def matchfunc(x) -> bool:
return isinstance(
x, mpl.collections.PathCollection | mpl.contour.QuadContourSet
)
paths = plt.gca().findobj(matchfunc)
return len(paths) > 0
| PlotTestCase |
python | sympy__sympy | sympy/tensor/array/mutable_ndim_array.py | {
"start": 54,
"end": 277
} | class ____(NDimArray):
def as_immutable(self):
raise NotImplementedError("abstract method")
def as_mutable(self):
return self
def _sympy_(self):
return self.as_immutable()
| MutableNDimArray |
python | mlflow__mlflow | tests/pyfunc/test_spark.py | {
"start": 4919,
"end": 58676
} | class ____(NamedTuple):
model: Any
inference_data: Any
@pytest.fixture(scope="module")
def sklearn_model():
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
knn_model = KNeighborsClassifier()
knn_model.fit(X, y)
return ModelWithData(model=knn_model, inference_data=X)
def test_spark_udf(spark, model_path):
mlflow.pyfunc.save_model(
path=model_path,
loader_module=__name__,
code_paths=[os.path.dirname(tests.__file__)],
)
with mock.patch("mlflow.pyfunc.warn_dependency_requirement_mismatches") as mock_check_fn:
reloaded_pyfunc_model = mlflow.pyfunc.load_model(model_path)
mock_check_fn.assert_called_once()
pandas_df = pd.DataFrame(data=np.ones((10, 10)), columns=[str(i) for i in range(10)])
spark_df = spark.createDataFrame(pandas_df)
# Test all supported return types
type_map = {
"float": (FloatType(), np.number),
"int": (IntegerType(), np.int32),
"double": (DoubleType(), np.number),
"long": (LongType(), int),
"string": (StringType(), None),
"bool": (BooleanType(), bool),
"boolean": (BooleanType(), bool),
}
for tname, tdef in type_map.items():
spark_type, np_type = tdef
prediction_df = reloaded_pyfunc_model.predict(pandas_df)
for is_array in [True, False]:
t = ArrayType(spark_type) if is_array else spark_type
if tname == "string":
expected = prediction_df.applymap(str)
else:
expected = prediction_df.select_dtypes(np_type)
if tname == "float":
expected = expected.astype(np.float32)
if tname in {"bool", "boolean"}:
expected = expected.astype(bool)
expected = [list(row[1]) if is_array else row[1][0] for row in expected.iterrows()]
pyfunc_udf = spark_udf(spark, model_path, result_type=t, env_manager="local")
new_df = spark_df.withColumn("prediction", pyfunc_udf(*pandas_df.columns))
actual = list(new_df.select("prediction").toPandas()["prediction"])
assert expected == actual
if not is_array:
pyfunc_udf = spark_udf(spark, model_path, result_type=tname, env_manager="local")
new_df = spark_df.withColumn("prediction", pyfunc_udf(*pandas_df.columns))
actual = list(new_df.select("prediction").toPandas()["prediction"])
assert expected == actual
@pytest.mark.parametrize("sklearn_version", ["1.3.2", "1.4.2"])
@pytest.mark.parametrize("env_manager", ["virtualenv", "conda", "uv"])
def test_spark_udf_env_manager_can_restore_env(
spark, model_path, sklearn_version, env_manager, monkeypatch
):
class EnvRestoringTestModel(mlflow.pyfunc.PythonModel):
def __init__(self):
pass
def predict(self, context, model_input, params=None):
import sklearn
return model_input.apply(lambda row: sklearn.__version__, axis=1)
infer_spark_df = spark.createDataFrame(pd.DataFrame(data=[[1, 2]], columns=["a", "b"]))
mlflow.pyfunc.save_model(
path=model_path,
python_model=EnvRestoringTestModel(),
pip_requirements=[
f"pyspark=={pyspark.__version__}",
f"scikit-learn=={sklearn_version}",
# pytest is required to load the custom model from this file
f"pytest=={pytest.__version__}",
],
)
# tests/helper_functions.py
from tests.helper_functions import _get_mlflow_home
monkeypatch.setenv("MLFLOW_HOME", _get_mlflow_home())
python_udf = mlflow.pyfunc.spark_udf(
spark, model_path, env_manager=env_manager, result_type="string"
)
result = infer_spark_df.select(python_udf("a", "b").alias("result")).toPandas().result[0]
assert result == sklearn_version
@pytest.mark.parametrize(
("env_manager", "force_stdin_scoring_server"),
[("virtualenv", False), ("conda", False), ("uv", False), ("uv", True)],
)
def test_spark_udf_env_manager_predict_sklearn_model(
spark, sklearn_model, model_path, env_manager, force_stdin_scoring_server, monkeypatch
):
monkeypatch.setenv(
"MLFLOW_ENFORCE_STDIN_SCORING_SERVER_FOR_SPARK_UDF",
str(force_stdin_scoring_server),
)
model, inference_data = sklearn_model
mlflow.sklearn.save_model(model, model_path)
expected_pred_result = model.predict(inference_data)
infer_data = pd.DataFrame(inference_data, columns=["a", "b"])
infer_spark_df = spark.createDataFrame(infer_data)
pyfunc_udf = spark_udf(spark, model_path, env_manager=env_manager)
result = (
infer_spark_df.select(pyfunc_udf("a", "b").alias("predictions"))
.toPandas()
.predictions.to_numpy()
)
np.testing.assert_allclose(result, expected_pred_result, rtol=1e-5)
def test_spark_udf_with_single_arg(spark):
class TestModel(PythonModel):
def predict(self, context, model_input, params=None):
return [",".join(map(str, model_input.columns.tolist()))] * len(model_input)
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=TestModel())
udf = mlflow.pyfunc.spark_udf(
spark, f"runs:/{run.info.run_id}/model", result_type=StringType()
)
data1 = spark.createDataFrame(pd.DataFrame({"a": [1], "b": [4]})).repartition(1)
result = data1.withColumn("res", udf("a")).select("res").toPandas()
assert result.res[0] == "0"
data2 = data1.select(struct("a", "b").alias("ab"))
result = data2.withColumn("res", udf("ab")).select("res").toPandas()
assert result.res[0] == "a,b"
def test_spark_udf_with_struct_return_type(spark):
class TestModel(PythonModel):
def predict(self, context, model_input, params=None):
input_len = len(model_input)
return {
"r1": [1] * input_len,
"r2": [1.5] * input_len,
"r3": [[1, 2]] * input_len,
"r4": [np.array([1.5, 2.5])] * input_len,
"r5": np.vstack([np.array([1.5, 2.5])] * input_len),
"r6": [True] * input_len,
"r7": ["abc"] * input_len,
}
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=TestModel())
udf = mlflow.pyfunc.spark_udf(
spark,
f"runs:/{run.info.run_id}/model",
result_type=(
"r1 int, r2 float, r3 array<long>, r4 array<double>, "
"r5 array<double>, r6 boolean, r7 string"
),
)
data1 = spark.range(2).repartition(1)
result = (
data1.withColumn("res", udf("id"))
.select("res.r1", "res.r2", "res.r3", "res.r4", "res.r5", "res.r6", "res.r7")
.toPandas()
)
assert result.r1.tolist() == [1] * 2
np.testing.assert_almost_equal(result.r2.tolist(), [1.5] * 2)
assert result.r3.tolist() == [[1, 2]] * 2
np.testing.assert_almost_equal(
np.vstack(result.r4.tolist()), np.array([[1.5, 2.5], [1.5, 2.5]])
)
np.testing.assert_almost_equal(
np.vstack(result.r5.tolist()), np.array([[1.5, 2.5], [1.5, 2.5]])
)
assert result.r6.tolist() == [True] * 2
assert result.r7.tolist() == ["abc"] * 2
def test_spark_udf_colspec_struct_return_type_inference(spark):
class TestModel(PythonModel):
def predict(self, context, model_input):
input_len = len(model_input)
return {
"r1": [1] * input_len,
"r2": [1.5] * input_len,
"r3": [True] * input_len,
"r4": ["abc"] * input_len,
}
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model",
python_model=TestModel(),
signature=ModelSignature(
inputs=Schema([ColSpec("long")]),
outputs=Schema(
[
ColSpec("integer", "r1"),
ColSpec("float", "r2"),
ColSpec("boolean", "r3"),
ColSpec("string", "r4"),
]
),
),
)
udf = mlflow.pyfunc.spark_udf(spark, model_info.model_uri)
data1 = spark.range(2).repartition(1)
result_spark_df = data1.withColumn("res", udf("id")).select(
"res.r1", "res.r2", "res.r3", "res.r4"
)
assert (
result_spark_df.schema.simpleString() == "struct<r1:int,r2:float,r3:boolean,r4:string>"
)
result = result_spark_df.toPandas()
expected_data = {
"r1": [1] * 2,
"r2": [1.5] * 2,
"r3": [True] * 2,
"r4": ["abc"] * 2,
}
expected_df = pd.DataFrame(expected_data)
assert_frame_equal(result, expected_df, check_dtype=False)
def test_spark_udf_tensorspec_struct_return_type_inference(spark):
class TestModel(PythonModel):
def predict(self, context, model_input):
input_len = len(model_input)
return {
"r1": [[1, 2]] * input_len,
"r2": [np.array([1.5, 2.5])] * input_len,
"r3": np.vstack([np.array([1.5, 2.5])] * input_len),
"r4": [np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])] * input_len,
"r5": np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]] * input_len),
}
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model",
python_model=TestModel(),
signature=ModelSignature(
inputs=Schema([ColSpec("long")]),
outputs=Schema(
[
TensorSpec(np.dtype(np.int64), (2,), "r1"),
TensorSpec(np.dtype(np.float64), (2,), "r2"),
TensorSpec(np.dtype(np.float64), (2,), "r3"),
TensorSpec(np.dtype(np.float64), (2, 3), "r4"),
TensorSpec(np.dtype(np.float64), (2, 3), "r5"),
]
),
),
)
udf = mlflow.pyfunc.spark_udf(spark, model_info.model_uri)
data1 = spark.range(2).repartition(1)
result_spark_df = data1.withColumn("res", udf("id")).select(
"res.r1", "res.r2", "res.r3", "res.r4", "res.r5"
)
assert (
result_spark_df.schema.simpleString() == "struct<"
"r1:array<bigint>,"
"r2:array<double>,"
"r3:array<double>,"
"r4:array<array<double>>,"
"r5:array<array<double>>"
">"
)
result = result_spark_df.toPandas()
assert result["r1"].tolist() == [[1, 2]] * 2
np.testing.assert_almost_equal(
np.vstack(result["r2"].tolist()), np.array([[1.5, 2.5], [1.5, 2.5]])
)
np.testing.assert_almost_equal(
np.vstack(result["r3"].tolist()), np.array([[1.5, 2.5], [1.5, 2.5]])
)
np.testing.assert_almost_equal(list(result["r4"]), [[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]] * 2)
np.testing.assert_almost_equal(list(result["r5"]), [[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]] * 2)
def test_spark_udf_single_1d_array_return_type_inference(spark):
class TestModel(PythonModel):
def predict(self, context, model_input):
input_len = len(model_input)
return [[1, 2]] * input_len
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model",
python_model=TestModel(),
signature=ModelSignature(
inputs=Schema([ColSpec("long")]),
outputs=Schema(
[
TensorSpec(np.dtype(np.int64), (2,)),
]
),
),
)
udf = mlflow.pyfunc.spark_udf(spark, model_info.model_uri)
data1 = spark.range(2).repartition(1)
result_spark_df = data1.select(udf("id").alias("res"))
assert result_spark_df.schema.simpleString() == "struct<res:array<bigint>>"
result = result_spark_df.toPandas()
assert result["res"].tolist() == [[1, 2]] * 2
def test_spark_udf_single_2d_array_return_type_inference(spark):
class TestModel(PythonModel):
def predict(self, context, model_input):
input_len = len(model_input)
return [np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])] * input_len
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model",
python_model=TestModel(),
signature=ModelSignature(
inputs=Schema([ColSpec("long")]),
outputs=Schema(
[
TensorSpec(np.dtype(np.float64), (2, 3)),
]
),
),
)
udf = mlflow.pyfunc.spark_udf(spark, model_info.model_uri)
data1 = spark.range(2).repartition(1)
result_spark_df = data1.select(udf("id").alias("res"))
assert result_spark_df.schema.simpleString() == "struct<res:array<array<double>>>"
result = result_spark_df.toPandas()
np.testing.assert_almost_equal(
list(result["res"]), [[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]] * 2
)
def test_spark_udf_single_long_return_type_inference(spark):
class TestModel(PythonModel):
def predict(self, context, model_input):
input_len = len(model_input)
return [12] * input_len
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model",
python_model=TestModel(),
signature=ModelSignature(
inputs=Schema([ColSpec("long")]),
outputs=Schema(
[
ColSpec("long"),
]
),
),
)
udf = mlflow.pyfunc.spark_udf(spark, model_info.model_uri)
data1 = spark.range(2).repartition(1)
result_spark_df = data1.select(udf("id").alias("res"))
assert result_spark_df.schema.simpleString() == "struct<res:bigint>"
result = result_spark_df.toPandas()
assert result["res"].tolist() == [12] * 2
@pytest.mark.parametrize(
("type_str", "expected"),
[
# Good
("int", True),
("bigint", True),
("float", True),
("double", True),
("boolean", True),
("string", True),
("array<double>", True),
("array<array<double>>", True),
("a long, b boolean, c array<double>, d array<array<double>>", True),
("array<struct<a: int, b: boolean>>", True),
("array<struct<a: array<int>>>", True),
("array<array<array<float>>>", True),
("a array<array<array<int>>>", True),
("struct<x: struct<a: long, b: boolean>>", True),
("struct<x: array<struct<a: long, b: boolean>>>", True),
("struct<a: array<struct<a: int>>>", True),
# Bad
("timestamp", False),
("array<timestamp>", False),
("struct<a: int, b: timestamp>", False),
],
)
@pytest.mark.usefixtures("spark")
def test_check_spark_udf_return_type(type_str, expected):
assert _check_udf_return_type(_parse_spark_datatype(type_str)) == expected
def test_spark_udf_autofills_no_arguments(spark):
class TestModel(PythonModel):
def predict(self, context, model_input, params=None):
return [model_input.columns] * len(model_input)
signature = ModelSignature(
inputs=Schema([ColSpec("long", "a"), ColSpec("long", "b"), ColSpec("long", "c")]),
outputs=Schema([ColSpec("integer")]),
)
good_data = spark.createDataFrame(
pd.DataFrame(columns=["a", "b", "c", "d"], data={"a": [1], "b": [2], "c": [3], "d": [4]})
)
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=TestModel(), signature=signature)
udf = mlflow.pyfunc.spark_udf(
spark,
f"runs:/{run.info.run_id}/model",
result_type=ArrayType(StringType()),
env_manager="local",
)
res = good_data.withColumn("res", udf()).select("res").toPandas()
assert res["res"][0] == ["a", "b", "c"]
with pytest.raises(
pyspark.sql.utils.PythonException,
match=r"Model input is missing required columns. Expected 3 required input columns",
):
res = good_data.withColumn("res", udf("b", "c")).select("res").toPandas()
# this dataframe won't work because it's missing column a
bad_data = spark.createDataFrame(
pd.DataFrame(
columns=["x", "b", "c", "d"], data={"x": [1], "b": [2], "c": [3], "d": [4]}
)
)
with pytest.raises(
AnalysisException,
match=(
# PySpark < 3.3
r"cannot resolve 'a' given input columns|"
# PySpark 3.3
r"Column 'a' does not exist|"
# PySpark 3.4
r"A column or function parameter with name `a` cannot be resolved|"
# PySpark 4.0
r"A column, variable, or function parameter with name `a` cannot be resolved"
),
):
bad_data.withColumn("res", udf())
nameless_signature = ModelSignature(
inputs=Schema([ColSpec("long"), ColSpec("long"), ColSpec("long")]),
outputs=Schema([ColSpec("integer")]),
)
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(
name="model", python_model=TestModel(), signature=nameless_signature
)
udf = mlflow.pyfunc.spark_udf(
spark, f"runs:/{run.info.run_id}/model", result_type=ArrayType(StringType())
)
with pytest.raises(
MlflowException,
match=r"Cannot apply udf because no column names specified",
):
good_data.withColumn("res", udf())
with mlflow.start_run() as run:
# model without signature
mlflow.pyfunc.log_model(name="model", python_model=TestModel())
udf = mlflow.pyfunc.spark_udf(
spark, f"runs:/{run.info.run_id}/model", result_type=ArrayType(StringType())
)
with pytest.raises(MlflowException, match="Attempting to apply udf on zero columns"):
res = good_data.withColumn("res", udf()).select("res").toPandas()
named_signature_with_optional_input = ModelSignature(
inputs=Schema(
[
ColSpec("long", "a"),
ColSpec("long", "b"),
ColSpec("long", "c"),
ColSpec("long", "d", required=False),
]
),
outputs=Schema([ColSpec("integer")]),
)
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(
name="model", python_model=TestModel(), signature=named_signature_with_optional_input
)
udf = mlflow.pyfunc.spark_udf(
spark, f"runs:/{run.info.run_id}/model", result_type=ArrayType(StringType())
)
with pytest.raises(
MlflowException,
match=r"Cannot apply UDF without column names specified when model "
r"signature contains optional columns",
):
good_data.withColumn("res", udf())
# Ensure optional inputs are not truncated
res = good_data.withColumn("res", udf(*good_data.columns)).select("res").toPandas()
assert res["res"][0] == ["a", "b", "c", "d"]
def test_spark_udf_autofills_column_names_with_schema(spark):
class TestModel(PythonModel):
def predict(self, context, model_input, params=None):
return [model_input.columns] * len(model_input)
signature = ModelSignature(
inputs=Schema([ColSpec("long", "a"), ColSpec("long", "b"), ColSpec("long", "c")]),
outputs=Schema([ColSpec("integer")]),
)
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=TestModel(), signature=signature)
udf = mlflow.pyfunc.spark_udf(
spark,
f"runs:/{run.info.run_id}/model",
result_type=ArrayType(StringType()),
env_manager="local",
)
data = spark.createDataFrame(
pd.DataFrame(
columns=["a", "b", "c", "d"], data={"a": [1], "b": [2], "c": [3], "d": [4]}
)
)
res = data.withColumn("res2", udf("a", "b", "c")).select("res2").toPandas()
assert res["res2"][0] == ["a", "b", "c"]
res = data.withColumn("res4", udf("a", "b", "c", "d")).select("res4").toPandas()
assert res["res4"][0] == ["a", "b", "c"]
# Exception being thrown in udf process intermittently causes the SparkSession to crash
# which results in a `java.net.SocketException: Socket is closed` failure in subsequent
# tests if tests are conducted after this exception capture validation.
# Keep this at the end of this suite so that executor sockets don't get closed while
# processing is still being conducted.
with pytest.raises(pyspark.sql.utils.PythonException, match=r".+"):
data.withColumn("res1", udf("a", "b")).select("res1").toPandas()
def test_spark_udf_with_datetime_columns(spark):
class TestModel(PythonModel):
def predict(self, context, model_input, params=None):
return [model_input.columns] * len(model_input)
signature = ModelSignature(
inputs=Schema([ColSpec("datetime", "timestamp"), ColSpec("datetime", "date")]),
outputs=Schema([ColSpec("integer")]),
)
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=TestModel(), signature=signature)
udf = mlflow.pyfunc.spark_udf(
spark,
f"runs:/{run.info.run_id}/model",
result_type=ArrayType(StringType()),
env_manager="local",
)
data = spark.range(10).selectExpr(
"current_timestamp() as timestamp", "current_date() as date"
)
res = data.withColumn("res", udf("timestamp", "date")).select("res")
res = res.toPandas()
assert res["res"][0] == ["timestamp", "date"]
def test_spark_udf_over_empty_partition(spark):
class TestModel(PythonModel):
def predict(self, context, model_input, params=None):
if len(model_input) == 0:
raise ValueError("Empty input is not allowed.")
else:
return model_input.a + model_input.b
signature = ModelSignature(
inputs=Schema([ColSpec("long", "a"), ColSpec("long", "b")]),
outputs=Schema([ColSpec("long")]),
)
# Create a spark dataframe with 2 partitions, one partition has one record and
# the other partition is empty.
spark_df = spark.createDataFrame(
pd.DataFrame(columns=["x", "y"], data={"x": [11], "y": [21]})
).repartition(2)
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=TestModel(), signature=signature)
python_udf = mlflow.pyfunc.spark_udf(
spark, f"runs:/{run.info.run_id}/model", result_type=LongType()
)
res_df = spark_df.withColumn("res", python_udf("x", "y")).select("res").toPandas()
assert res_df.res[0] == 32
res_df2 = (
spark_df.withColumn("res", python_udf(struct(col("x").alias("a"), col("y").alias("b"))))
.select("res")
.toPandas()
)
assert res_df2.res[0] == 32
def test_model_cache(spark, model_path):
mlflow.pyfunc.save_model(
path=model_path,
loader_module=__name__,
code_paths=[os.path.dirname(tests.__file__)],
)
archive_path = SparkModelCache.add_local_model(spark, model_path)
assert archive_path != model_path
# Define the model class name as a string so that each Spark executor can reference it
# without attempting to resolve ConstantPyfuncWrapper, which is only available on the driver.
constant_model_name = ConstantPyfuncWrapper.__name__
def check_get_or_load_return_value(model_from_cache, model_path_from_cache):
assert model_path_from_cache != model_path
assert os.path.isdir(model_path_from_cache)
model2 = mlflow.pyfunc.load_model(model_path_from_cache)
for model in [model_from_cache, model2]:
assert isinstance(model, PyFuncModel)
# NB: Can not use instanceof test as remote does not know about ConstantPyfuncWrapper
# class.
assert type(model._model_impl).__name__ == constant_model_name
# Ensure we can use the model locally.
local_model, local_model_path = SparkModelCache.get_or_load(archive_path)
check_get_or_load_return_value(local_model, local_model_path)
# Request the model on all executors, and see how many times we got cache hits.
def get_model(_):
executor_model, executor_model_path = SparkModelCache.get_or_load(archive_path)
check_get_or_load_return_value(executor_model, executor_model_path)
return SparkModelCache._cache_hits
# This will run 30 distinct tasks, and we expect most to reuse an already-loaded model.
# Note that we can't necessarily expect an even split, or even that there were only
# exactly 2 python processes launched, due to Spark and its mysterious ways, but we do
# expect significant reuse.
results = spark.sparkContext.parallelize(range(100), 30).map(get_model).collect()
assert max(results) > 10
# Running again should see no newly-loaded models.
results2 = spark.sparkContext.parallelize(range(100), 30).map(get_model).collect()
assert min(results2) > 0
@pytest.mark.skipif(
not sys.platform.startswith("linux"),
reason="Only Linux system support setting parent process death signal via prctl lib.",
)
@pytest.mark.parametrize("env_manager", ["virtualenv", "conda"])
def test_spark_udf_embedded_model_server_killed_when_job_canceled(
spark, sklearn_model, model_path, env_manager
):
from mlflow.models.flavor_backend_registry import get_flavor_backend
from mlflow.pyfunc.scoring_server.client import ScoringServerClient
mlflow.sklearn.save_model(sklearn_model.model, model_path)
server_port = 51234
timeout = 60
@pandas_udf("int")
def udf_with_model_server(it: Iterator[pd.Series]) -> Iterator[pd.Series]:
from mlflow.models.flavor_backend_registry import get_flavor_backend
get_flavor_backend(
model_path, env_manager=env_manager, workers=1, install_mlflow=False
).serve(
model_uri=model_path,
port=server_port,
host="127.0.0.1",
timeout=timeout,
enable_mlserver=False,
synchronous=False,
)
time.sleep(120)
yield from it
def run_job():
# Start a spark job with only one UDF task,
# and the udf task starts a mlflow model server process.
spark.range(1).repartition(1).select(udf_with_model_server("id")).collect()
get_flavor_backend(model_path, env_manager=env_manager, install_mlflow=False).prepare_env(
model_uri=model_path
)
job_thread = threading.Thread(target=run_job)
job_thread.start()
client = ScoringServerClient("127.0.0.1", server_port)
client.wait_server_ready(timeout=20)
spark.sparkContext.cancelAllJobs()
job_thread.join()
time.sleep(10) # waiting server to exit and release the port.
# assert ping failed, i.e. the server process is killed successfully.
with pytest.raises(Exception, match=r".*"):
client.ping()
def test_spark_udf_datetime_with_model_schema(spark):
X, y = datasets.load_iris(as_frame=True, return_X_y=True)
X = X.assign(
timestamp=[datetime.datetime(2022, random.randint(1, 12), 1) for _ in range(len(X))]
)
month_extractor = FunctionTransformer(
lambda df: df.assign(month=df["timestamp"].map(lambda d: d.month)), validate=False
)
timestamp_remover = ColumnTransformer(
[("selector", "passthrough", X.columns.drop("timestamp"))], remainder="drop"
)
model = Pipeline(
[
("month_extractor", month_extractor),
("timestamp_remover", timestamp_remover),
("knn", KNeighborsClassifier()),
]
)
model.fit(X, y)
timestamp_dtype = {"timestamp": "datetime64[ns]"}
with mlflow.start_run():
signature = mlflow.models.infer_signature(X.astype(timestamp_dtype), y)
model_info = mlflow.sklearn.log_model(model, name="model", signature=signature)
inference_sample = X.sample(n=10, random_state=42)
infer_spark_df = spark.createDataFrame(inference_sample.astype(timestamp_dtype))
pyfunc_udf = mlflow.pyfunc.spark_udf(spark, model_info.model_uri, env_manager="conda")
result = infer_spark_df.select(pyfunc_udf(*X.columns).alias("predictions")).toPandas()
np.testing.assert_almost_equal(result.to_numpy().squeeze(), model.predict(inference_sample))
def test_spark_udf_string_datetime_with_model_schema(spark):
X, y = datasets.load_iris(as_frame=True, return_X_y=True)
X = X.assign(timestamp=[f"2022-{random.randint(1, 12):02d}-01" for _ in range(len(X))])
month_extractor = FunctionTransformer(
lambda df: df.assign(month=df["timestamp"].str.extract(r"^2022-0?(\d{1,2})-").astype(int)),
validate=False,
)
timestamp_remover = ColumnTransformer(
[("selector", "passthrough", X.columns.drop("timestamp"))], remainder="drop"
)
model = Pipeline(
[
("month_extractor", month_extractor),
("timestamp_remover", timestamp_remover),
("knn", KNeighborsClassifier()),
]
)
model.fit(X, y)
with mlflow.start_run():
signature = mlflow.models.infer_signature(X, y)
model_info = mlflow.sklearn.log_model(model, name="model", signature=signature)
inference_sample = X.sample(n=10, random_state=42)
infer_spark_df = spark.createDataFrame(inference_sample)
pyfunc_udf = mlflow.pyfunc.spark_udf(spark, model_info.model_uri, env_manager="conda")
result = infer_spark_df.select(pyfunc_udf(*X.columns).alias("predictions")).toPandas()
np.testing.assert_almost_equal(result.to_numpy().squeeze(), model.predict(inference_sample))
def test_spark_udf_with_col_spec_type_input(spark):
input_pdf = pd.DataFrame(
{
"c_bool": [True],
"c_int": [10],
"c_long": [20],
"c_float": [1.5],
"c_double": [2.5],
"c_str": ["abc"],
"c_binary": [b"xyz"],
"c_datetime": [pd.to_datetime("2018-01-01")],
}
)
class TestModel(PythonModel):
def predict(self, context, model_input, params=None):
assert model_input.to_dict() == input_pdf.to_dict()
return model_input[["c_int", "c_float"]]
signature = ModelSignature(
inputs=Schema(
[
ColSpec("boolean", "c_bool"),
ColSpec("integer", "c_int"),
ColSpec("long", "c_long"),
ColSpec("float", "c_float"),
ColSpec("double", "c_double"),
ColSpec("string", "c_str"),
ColSpec("binary", "c_binary"),
ColSpec("datetime", "c_datetime"),
]
),
)
spark_schema = (
"c_bool boolean, c_int int, c_long long, c_float float, c_double double, "
"c_str string, c_binary binary, c_datetime timestamp"
)
data = spark.createDataFrame(
data=input_pdf,
schema=spark_schema,
).repartition(1)
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=TestModel(), signature=signature)
udf = mlflow.pyfunc.spark_udf(
spark,
f"runs:/{run.info.run_id}/model",
result_type="c_int int, c_float float",
env_manager="local",
)
res = data.withColumn("res", udf()).select("res.c_int", "res.c_float").toPandas()
assert res.c_int.tolist() == [10]
np.testing.assert_almost_equal(res.c_float.tolist(), [1.5])
def test_spark_udf_stdin_scoring_server(spark):
X, y = datasets.load_iris(return_X_y=True, as_frame=True)
X = X[::5]
y = y[::5]
model = LogisticRegression().fit(X, y)
model.fit(X, y)
with mlflow.start_run():
signature = mlflow.models.infer_signature(X, y)
model_info = mlflow.sklearn.log_model(model, name="model", signature=signature)
with mock.patch("mlflow.pyfunc.check_port_connectivity", return_value=False):
udf = mlflow.pyfunc.spark_udf(
spark,
model_info.model_uri,
env_manager="virtualenv",
)
df = spark.createDataFrame(X)
result = df.select(udf(*X.columns)).toPandas()
np.testing.assert_almost_equal(result.to_numpy().squeeze(), model.predict(X))
# TODO: Remove `skipif` once pyspark 3.4 is released
@pytest.mark.skipif(
Version(pyspark.__version__) < Version("3.4.0"), reason="requires spark >= 3.4.0"
)
def test_spark_udf_array_of_structs(spark):
class TestModel(PythonModel):
def predict(self, context, model_input, params=None):
return [[("str", 0, 1, 0.0, 0.1, True)]] * len(model_input)
signature = ModelSignature(inputs=Schema([ColSpec("long", "a")]))
good_data = spark.createDataFrame(pd.DataFrame({"a": [1, 2, 3]}))
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(
name="model",
python_model=TestModel(),
signature=signature,
)
udf = mlflow.pyfunc.spark_udf(
spark,
f"runs:/{run.info.run_id}/model",
result_type=ArrayType(
StructType(
[
StructField("str", StringType()),
StructField("int", IntegerType()),
StructField("long", LongType()),
StructField("float", FloatType()),
StructField("double", DoubleType()),
StructField("bool", BooleanType()),
]
)
),
)
res = good_data.withColumn("res", udf("a")).select("res").toPandas()
assert res["res"][0] == [("str", 0, 1, 0.0, 0.1, True)]
def test_spark_udf_return_nullable_array_field(spark):
class TestModel(PythonModel):
def predict(self, context, model_input):
values = [np.array([1.0, np.nan])] * (len(model_input) - 2) + [None, np.nan]
return pd.DataFrame({"a": values})
with mlflow.start_run():
mlflow_info = mlflow.pyfunc.log_model(
name="model",
python_model=TestModel(),
)
udf = mlflow.pyfunc.spark_udf(
spark,
mlflow_info.model_uri,
result_type="a array<double>",
)
data1 = spark.range(3).repartition(1)
result = data1.select(udf("id").alias("res")).select("res.a").toPandas()
assert list(result["a"]) == [[1.0, None], None, None]
def test_spark_udf_with_params(spark):
class TestModel(PythonModel):
def predict(self, context, model_input, params=None):
return [[tuple(params.values())]] * len(model_input)
test_params = {
"str_param": "str_a",
"int_param": np.int32(1),
"bool_param": True,
"double_param": 1.0,
"float_param": np.float32(0.1),
"long_param": 100,
}
signature = mlflow.models.infer_signature(["input"], params=test_params)
spark_df = spark.createDataFrame(
[
("input1",),
("input2",),
("input3",),
],
["input_col"],
)
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(
name="model",
python_model=TestModel(),
signature=signature,
)
udf = mlflow.pyfunc.spark_udf(
spark,
f"runs:/{run.info.run_id}/model",
result_type=ArrayType(
StructType(
[
StructField("str_param", StringType()),
StructField("int_param", IntegerType()),
StructField("bool_param", BooleanType()),
StructField("double_param", DoubleType()),
StructField("float_param", FloatType()),
StructField("long_param", LongType()),
]
)
),
params=test_params,
)
res = spark_df.withColumn("res", udf("input_col")).select("res").toPandas()
assert res["res"][0] == [tuple(test_params.values())]
def test_spark_udf_with_array_params(spark):
class TestModel(PythonModel):
def predict(self, context, model_input, params=None):
return pd.DataFrame({k: [v] * len(model_input) for k, v in params.items()})
test_params = {
"str_array": np.array(["str_a", "str_b"]),
"int_array": np.array([np.int32(1), np.int32(2)]),
"double_array": np.array([1.0, 2.0]),
"bool_array": np.array([True, False]),
"float_array": np.array([np.float32(1.0), np.float32(2.0)]),
"long_array": np.array([1, 2]),
}
signature = mlflow.models.infer_signature(["input"], params=test_params)
spark_df = spark.createDataFrame(
[
("input1",),
("input2",),
("input3",),
],
["input_col"],
)
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(
name="model",
python_model=TestModel(),
signature=signature,
)
udf = mlflow.pyfunc.spark_udf(
spark,
f"runs:/{run.info.run_id}/model",
result_type=StructType(
[
StructField("str_array", ArrayType(StringType())),
StructField("int_array", ArrayType(IntegerType())),
StructField("double_array", ArrayType(DoubleType())),
StructField("bool_array", ArrayType(BooleanType())),
StructField("float_array", ArrayType(FloatType())),
StructField("long_array", ArrayType(LongType())),
]
),
params=test_params,
)
res = spark_df.withColumn("res", udf("input_col")).select("res").toPandas()
assert res["res"].values[0] == tuple(v.tolist() for v in test_params.values())
def test_spark_udf_with_params_with_errors(spark):
# datetime is not supported
class TestModel(PythonModel):
def predict(self, context, model_input, params=None):
return [params.values[0]] * len(model_input)
test_params = {"datetime_param": np.datetime64("2023-06-26 00:00:00")}
signature = mlflow.models.infer_signature(["input"], params=test_params)
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(
name="model",
python_model=TestModel(),
signature=signature,
)
with pytest.raises(MlflowException, match=r"Invalid 'spark_udf' result type"):
mlflow.pyfunc.spark_udf(
spark,
f"runs:/{run.info.run_id}/model",
result_type=TimestampType(),
params=test_params,
)
def test_spark_udf_compatible_with_mlflow_2_4_0(tmp_path, spark):
"""
# Code for logging the model in mlflow 2.4.0
import mlflow
class TestModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input):
return ["string"] * len(model_input)
signature = mlflow.models.infer_signature(["input"])
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(
"model",
python_model=TestModel(),
signature=signature,
)
"""
tmp_path.joinpath("MLmodel").write_text(
"""
artifact_path: model
flavors:
python_function:
cloudpickle_version: 2.2.1
env:
conda: conda.yaml
virtualenv: python_env.yaml
loader_module: mlflow.pyfunc.model
python_model: python_model.pkl
python_version: 3.8.16
mlflow_version: 2.4.0
model_uuid: 067c27bc09954838ad6d6bfc89c7eeed
run_id: 054cfd4d129849f88210568366fea24b
signature:
inputs: '[{"type": "string"}]'
outputs: null
utc_time_created: '2023-07-17 10:01:42.071952'
"""
)
tmp_path.joinpath("python_env.yaml").write_text(
"""
python: 3.8.16
build_dependencies:
- pip==23.1.2
- setuptools==56.0.0
- wheel==0.40.0
dependencies:
- -r requirements.txt
"""
)
tmp_path.joinpath("requirements.txt").write_text(
"""
mlflow==2.4.0
cloudpickle==2.2.1
"""
)
class TestModel(PythonModel):
def predict(self, context, model_input):
return ["string"] * len(model_input)
python_model = TestModel()
with open(tmp_path / "python_model.pkl", "wb") as out:
cloudpickle.dump(python_model, out)
assert Version(mlflow.__version__) > Version("2.4.0")
model_uri = str(tmp_path)
spark_df = spark.createDataFrame(
[("input1",), ("input2",), ("input3",)],
["input_col"],
)
udf = mlflow.pyfunc.spark_udf(
spark,
model_uri,
result_type=StringType(),
)
res = spark_df.withColumn("res", udf("input_col")).select("res").toPandas()
assert res["res"][0] == ("string")
def test_spark_udf_with_model_serving(spark):
class TestModel(PythonModel):
def predict(self, context, model_input, params=None):
return ["string"] * len(model_input)
test_params = {
"str_param": "str_a",
}
signature = mlflow.models.infer_signature(["input"], params=test_params)
spark_df = spark.createDataFrame(
[
("input1",),
("input2",),
("input3",),
],
["input_col"],
)
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(
name="model",
python_model=TestModel(),
signature=signature,
)
with mock.patch("mlflow.pyfunc.check_port_connectivity", return_value=False):
udf = mlflow.pyfunc.spark_udf(
spark,
f"runs:/{run.info.run_id}/model",
result_type=StringType(),
params=test_params,
env_manager="conda",
)
res = spark_df.withColumn("res", udf("input_col")).select("res").toPandas()
assert res["res"][0] == ("string")
def test_spark_udf_set_extra_udf_env_vars(spark):
class TestModel(PythonModel):
def predict(self, context, model_input, params=None):
return [os.environ["TEST_ENV_VAR"]] * len(model_input)
signature = mlflow.models.infer_signature(["input"])
spark_df = spark.createDataFrame(
[("input1",), ("input2",), ("input3",)],
["input_col"],
)
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model",
python_model=TestModel(),
signature=signature,
)
udf = mlflow.pyfunc.spark_udf(
spark,
model_info.model_uri,
result_type=StringType(),
env_manager="local",
extra_env={"TEST_ENV_VAR": "test"},
)
res = spark_df.withColumn("res", udf("input_col")).select("res").toPandas()
assert res["res"][0] == ("test")
def test_modified_environ():
with modified_environ({"TEST_ENV_VAR": "test"}):
assert os.environ["TEST_ENV_VAR"] == "test"
assert os.environ.get("TEST_ENV_VAR") is None
def test_spark_df_schema_inference_for_map_type(spark):
data = [
{
"arr": ["a", "b"],
"map1": {"a": 1, "b": 2},
"map2": {"e": ["e", "e"]},
"string": "c",
}
]
df = spark.createDataFrame(data)
expected_schema = Schema(
[
ColSpec(Array(DataType.string), "arr"),
ColSpec(Object([Property("a", DataType.long), Property("b", DataType.long)]), "map1"),
ColSpec(Object([Property("e", Array(DataType.string))]), "map2"),
ColSpec(DataType.string, "string"),
]
)
inferred_schema = infer_signature(df).inputs
assert inferred_schema == expected_schema
complex_df = spark.createDataFrame([{"map": {"nested_map": {"a": 1}}}])
with pytest.raises(
MlflowException, match=r"Please construct spark DataFrame with schema using StructType"
):
_infer_schema(complex_df)
def test_spark_udf_structs_and_arrays(spark, tmp_path):
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input):
return [str(" | ".join(map(str, row))) for _, row in model_input.iterrows()]
df = spark.createDataFrame(
[
(
"a",
[0],
{"bool": True},
[{"double": 0.1}],
),
(
"b",
[1, 2],
{"bool": False},
[{"double": 0.2}, {"double": 0.3}],
),
],
schema=StructType(
[
StructField(
"str",
StringType(),
),
StructField(
"arr",
ArrayType(IntegerType()),
),
StructField(
"obj",
StructType(
[
StructField("bool", BooleanType()),
]
),
),
StructField(
"obj_arr",
ArrayType(
StructType(
[
StructField("double", DoubleType()),
]
)
),
),
]
),
)
save_path = tmp_path / "1"
mlflow.pyfunc.save_model(
path=save_path,
python_model=MyModel(),
signature=mlflow.models.infer_signature(df),
)
udf = mlflow.pyfunc.spark_udf(spark=spark, model_uri=save_path, result_type="string")
pdf = df.withColumn("output", udf("str", "arr", "obj", "obj_arr")).toPandas()
assert pdf["output"][0] == "a | [0] | {'bool': np.True_} | [{'double': np.float64(0.1)}]"
assert pdf["output"][1] == (
"b | [1 2] | {'bool': np.False_} | "
"[{'double': np.float64(0.2)} {'double': np.float64(0.3)}]"
)
# More complex nested structures
df = spark.createDataFrame(
[
([{"arr": [{"bool": True}]}],),
([{"arr": [{"bool": False}]}],),
],
schema=StructType(
[
StructField(
"test",
ArrayType(
StructType(
[
StructField(
"arr",
ArrayType(
StructType(
[
StructField("bool", BooleanType()),
]
)
),
),
]
)
),
),
]
),
)
save_path = tmp_path / "2"
mlflow.pyfunc.save_model(
path=save_path,
python_model=MyModel(),
signature=mlflow.models.infer_signature(df),
)
udf = mlflow.pyfunc.spark_udf(spark=spark, model_uri=save_path, result_type="string")
pdf = df.withColumn("output", udf("test")).toPandas()
assert pdf["output"][0] == "[{'arr': array([{'bool': np.True_}], dtype=object)}]"
assert pdf["output"][1] == "[{'arr': array([{'bool': np.False_}], dtype=object)}]"
def test_spark_udf_infer_return_type(spark, tmp_path):
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input):
return model_input
schema = StructType(
[
StructField(
"str",
StringType(),
),
StructField(
"arr",
ArrayType(IntegerType()),
),
StructField(
"obj",
StructType(
[
StructField("bool", BooleanType()),
StructField("obj2", StructType([StructField("str", StringType())])),
]
),
),
StructField(
"obj_arr",
ArrayType(
StructType(
[
StructField("double", DoubleType()),
]
)
),
),
]
)
df = spark.createDataFrame(
[
(
"a",
[0],
{"bool": True, "obj2": {"str": "some_string"}},
[{"double": 0.1}],
),
(
"b",
[1],
{"bool": False, "obj2": {"str": "another_string"}},
[{"double": 0.2}, {"double": 0.3}],
),
],
schema=schema,
)
signature = mlflow.models.infer_signature(df, df)
mlflow.pyfunc.save_model(
path=tmp_path,
python_model=MyModel(),
signature=signature,
)
udf = mlflow.pyfunc.spark_udf(spark=spark, model_uri=tmp_path)
df = df.withColumn("output", udf("str", "arr", "obj", "obj_arr"))
assert df.schema["output"] == StructField("output", schema)
pdf = df.toPandas()
assert pdf["output"][0] == ("a", [0], (True, ("some_string",)), [(0.1,)])
assert pdf["output"][1] == ("b", [1], (False, ("another_string",)), [(0.2,), (0.3,)])
def test_spark_udf_env_manager_with_invalid_pythonpath(
spark, sklearn_model, model_path, tmp_path, monkeypatch
):
# create an unreadable file
unreadable_file = tmp_path / "unreadable_file"
unreadable_file.write_text("unreadable file content")
unreadable_file.chmod(0o000)
with pytest.raises(PermissionError, match="Permission denied"):
with unreadable_file.open():
pass
non_exist_file = tmp_path / "does_not_exist"
origin_python_path = os.environ.get("PYTHONPATH", "")
monkeypatch.setenv("PYTHONPATH", f"{origin_python_path}:{non_exist_file}:{unreadable_file}")
model, inference_data = sklearn_model
mlflow.sklearn.save_model(model, model_path)
expected_pred_result = model.predict(inference_data)
infer_data = pd.DataFrame(inference_data, columns=["a", "b"])
infer_spark_df = spark.createDataFrame(infer_data)
with mock.patch("mlflow.utils.databricks_utils.is_in_databricks_runtime", return_value=True):
pyfunc_udf = spark_udf(spark, model_path, env_manager="virtualenv")
result = (
infer_spark_df.select(pyfunc_udf("a", "b").alias("predictions"))
.toPandas()
.predictions.to_numpy()
)
np.testing.assert_allclose(result, expected_pred_result, rtol=1e-5)
def test_build_model_env(spark, sklearn_model, model_path, tmp_path, monkeypatch):
import sklearn
from mlflow.pyfunc.dbconnect_artifact_cache import extract_archive_to_dir
monkeypatch.setenv("DATABRICKS_RUNTIME_VERSION", "15.4.1")
spark.udf.register(
"current_version",
lambda: {"dbr_version": "15.4.1-scala2.12"},
returnType="dbr_version string",
)
model, inference_data = sklearn_model
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
model,
name="model",
pip_requirements=[
f"scikit-learn=={sklearn.__version__}",
# `build_model_env` doesn't support building env with dev version MLflow,
# so add MLflow as a required dependency here.
"mlflow",
],
)
model_uri = model_info.model_uri
model_env_path = build_model_env(model_uri, tmp_path)
archive_name = Path(model_env_path).name[:-7]
env_name = "-".join(archive_name.split("-")[:2])
extract_dir = Path("/tmp") / archive_name
try:
extract_archive_to_dir(model_env_path, extract_dir)
# Check the extracted python environment installs the expected sklearn package version.
subprocess.check_call(
[
"bash",
"-c",
f"source /tmp/{archive_name}/virtualenv_envs/{env_name}/bin/activate && "
f"python -c "
f"\"import sklearn; assert sklearn.__version__ == '{sklearn.__version__}'\"",
]
)
finally:
shutil.rmtree(f"/tmp/{archive_name}", ignore_errors=True)
| ModelWithData |
python | pytorch__pytorch | torch/nn/modules/activation.py | {
"start": 27439,
"end": 29560
} | class ____(Module):
r"""Applies the soft shrinkage function element-wise.
.. math::
\text{SoftShrinkage}(x) =
\begin{cases}
x - \lambda, & \text{ if } x > \lambda \\
x + \lambda, & \text{ if } x < -\lambda \\
0, & \text{ otherwise }
\end{cases}
Args:
lambd: the :math:`\lambda` (must be no less than zero) value for the Softshrink formulation. Default: 0.5
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Softshrink.png
Examples::
>>> m = nn.Softshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["lambd"]
lambd: float
def __init__(self, lambd: float = 0.5) -> None:
super().__init__()
self.lambd = lambd
def forward(self, input: Tensor) -> Tensor:
"""
Run forward pass.
"""
return F.softshrink(input, self.lambd)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
return str(self.lambd)
def _check_arg_device(x: Optional[torch.Tensor]) -> bool:
if x is not None:
return x.device.type in [
"cpu",
"cuda",
torch.utils.backend_registration._privateuse1_backend_name,
]
return True
def _arg_requires_grad(x: Optional[torch.Tensor]) -> bool:
if x is not None:
return x.requires_grad
return False
def _is_make_fx_tracing():
if not torch.jit.is_scripting():
torch_dispatch_mode_stack = (
torch.utils._python_dispatch._get_current_dispatch_mode_stack()
)
# this can be triggered when dynamo inlining the module too.
return (
any(
type(x) is torch.fx.experimental.proxy_tensor.ProxyTorchDispatchMode
for x in torch_dispatch_mode_stack
)
or torch.compiler.is_exporting()
)
else:
return False
| Softshrink |
python | django__django | tests/migrations/test_migrations_squashed_complex/6_auto.py | {
"start": 35,
"end": 188
} | class ____(migrations.Migration):
dependencies = [("migrations", "5_auto")]
operations = [migrations.RunPython(migrations.RunPython.noop)]
| Migration |
python | pennersr__django-allauth | allauth/idp/oidc/forms.py | {
"start": 1827,
"end": 2627
} | class ____(forms.Form):
code = forms.CharField(
label=_("Code"),
required=True,
widget=forms.TextInput(
attrs={"placeholder": _("Code"), "autocomplete": "one-time-code"},
),
)
def __init__(self, *args, **kwargs):
self.code = kwargs.pop("code", None)
super().__init__(*args, **kwargs)
def clean_code(self):
code = self.cleaned_data.get("code")
if not ratelimit.consume(
context.request,
action="device_user_code",
config=app_settings.RATE_LIMITS,
limit_get=True,
):
raise get_account_adapter().validation_error("rate_limited")
self.device_code, self.client = device_codes.validate_user_code(code)
return code
| ConfirmCodeForm |
python | ipython__ipython | IPython/extensions/autoreload.py | {
"start": 14488,
"end": 14764
} | class ____:
def __init__(self, obj):
self.obj = obj
def __call__(self):
return self.obj
mod_attrs = [
"__name__",
"__doc__",
"__package__",
"__loader__",
"__spec__",
"__file__",
"__cached__",
"__builtins__",
]
| StrongRef |
python | getsentry__sentry-python | sentry_sdk/consts.py | {
"start": 511,
"end": 839
} | class ____(Enum):
"""
The type of an endpoint. This is an enum, rather than a constant, for historical reasons
(the old /store endpoint). The enum also preserve future compatibility, in case we ever
have a new endpoint.
"""
ENVELOPE = "envelope"
OTLP_TRACES = "integration/otlp/v1/traces"
| EndpointType |
python | psf__black | src/black/cache.py | {
"start": 523,
"end": 1409
} | class ____(NamedTuple):
st_mtime: float
st_size: int
hash: str
def get_cache_dir() -> Path:
"""Get the cache directory used by black.
Users can customize this directory on all systems using `BLACK_CACHE_DIR`
environment variable. By default, the cache directory is the user cache directory
under the black application.
This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid
repeated calls.
"""
# NOTE: Function mostly exists as a clean way to test getting the cache directory.
default_cache_dir = user_cache_dir("black")
cache_dir = Path(os.environ.get("BLACK_CACHE_DIR", default_cache_dir))
cache_dir = cache_dir / __version__
return cache_dir
CACHE_DIR = get_cache_dir()
def get_cache_file(mode: Mode) -> Path:
return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle"
@dataclass
| FileData |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_report_stream.py | {
"start": 1048,
"end": 17282
} | class ____(TestReportStream):
stream_name: Optional[str] = None
report_file: str
report_file_with_records_further_start_date: Optional[str] = None
records_number: int
second_read_records_number: Optional[int] = None
state_file: str
state_file_after_migration: Optional[str] = None
state_file_after_migration_with_cursor_further_config_start_date: Optional[str] = None
state_file_legacy: Optional[str] = None
incremental_report_file: str
incremental_report_file_with_records_further_cursor: Optional[str] = None
first_read_state: dict
first_read_state_for_records_further_start_date: Optional[dict] = None
second_read_state: dict
second_read_state_for_records_before_start_date: Optional[dict] = None
second_read_state_for_records_further_start_date: Optional[dict] = None
transform_field: str = "AccountId"
account_id: str = "180535609"
cursor_field = "TimePeriod"
def setUp(self):
super().setUp()
if not self.stream_name:
self.skipTest("Skipping TestSuiteReportStream")
def mock_report_apis(self):
# make noop for no migrated streams to manifest (rest api).
...
@freeze_time("2024-05-06")
def test_return_records_from_given_csv_file(self):
self.mock_report_apis()
output = self.read_stream(self.stream_name, SyncMode.full_refresh, self._config, self.report_file)
assert len(output.records) == self.records_number
@freeze_time("2024-05-06")
def test_transform_records_from_given_csv_file(self):
self.mock_report_apis()
output = self.read_stream(self.stream_name, SyncMode.full_refresh, self._config, self.report_file)
assert len(output.records) == self.records_number
for record in output.records:
assert self.transform_field in record.record.data.keys()
@freeze_time("2024-05-06")
def test_incremental_read_returns_records(self):
self.mock_report_apis()
output = self.read_stream(self.stream_name, SyncMode.incremental, self._config, self.report_file)
assert len(output.records) == self.records_number
assert output.most_recent_state.stream_state.__dict__ == self.first_read_state
@freeze_time("2024-05-06")
def test_incremental_read_returns_records_further_config_start_date(self):
"""
We validate the state cursor is set to the value of the latest record read.
"""
if not self.report_file_with_records_further_start_date or not self.first_read_state_for_records_further_start_date:
assert False, "test_incremental_read_returns_records_further_config_start_date is not correctly set"
self.mock_report_apis()
output = self.read_stream(self.stream_name, SyncMode.incremental, self._config, self.report_file_with_records_further_start_date)
assert len(output.records) == self.records_number
assert output.most_recent_state.stream_state.__dict__ == self.first_read_state_for_records_further_start_date
@freeze_time(SECOND_READ_FREEZE_TIME)
def test_incremental_read_with_state_and_no_start_date_returns_records_once_after_migration(self):
"""
Test that incremental read with state and no start date in config returns records only once.
We observed that if the start date is not provided in the config, and we don't parse correctly the account_id
from the state, the incremental read returns records multiple times as we yield the default_time_periods
for no start date scenario.
"""
self.mock_report_apis()
state = self._state(self.state_file_legacy, self.stream_name)
config = deepcopy(self._config)
del config["reports_start_date"] # Simulate no start date in config
output = self.read_stream(
self.stream_name, SyncMode.incremental, config, self.incremental_report_file_with_records_further_cursor, state
)
if not self.second_read_records_number:
assert len(output.records) == self.records_number
else:
assert len(output.records) == self.second_read_records_number
@freeze_time("2024-05-06")
def test_incremental_read_with_state_returns_records_after_migration(self):
"""
For this test the records are all with TimePeriod behind the config start date and the state TimePeriod cursor.
"""
self.mock_report_apis()
state = self._state(self.state_file_after_migration, self.stream_name)
output = self.read_stream(self.stream_name, SyncMode.incremental, self._config, self.incremental_report_file, state)
if not self.second_read_records_number:
assert len(output.records) == self.records_number
else:
assert len(output.records) == self.second_read_records_number
actual_cursor = None
for state in output.most_recent_state.stream_state.states:
if state["partition"]["account_id"] == self.account_id:
actual_cursor = state["cursor"]
expected_state = self.second_read_state_for_records_before_start_date
expected_cursor = None
for state in expected_state["states"]:
if state["partition"]["account_id"] == self.account_id:
expected_cursor = state["cursor"]
if not expected_cursor or not actual_cursor:
assert False, f"Expected state is empty for account_id: {self.account_id}"
assert actual_cursor == expected_cursor
@freeze_time(SECOND_READ_FREEZE_TIME)
def test_incremental_read_with_state_returns_records_after_migration_with_records_further_state_cursor(self):
"""
For this test we get records with TimePeriod further the config start date and the state TimePeriod cursor.
The provide state is "taken" from a previous run; with stream manifest; so, is in the new state format, and
where the resultant cursor was further the config start date.
So we validate that the cursor in the output.most_recent_state is moved to the value of the latest record read.
The state format before migration IS NOT involved in this test.
"""
self.mock_report_apis()
provided_state = self._state(self.state_file_after_migration_with_cursor_further_config_start_date, self.stream_name)
output = self.read_stream(
self.stream_name, SyncMode.incremental, self._config, self.incremental_report_file_with_records_further_cursor, provided_state
)
if not self.second_read_records_number:
assert len(output.records) == self.records_number
else:
assert len(output.records) == self.second_read_records_number
actual_cursor = None
actual_partition = None
for state in output.most_recent_state.stream_state.states:
if state["partition"]["account_id"] == self.account_id:
actual_cursor = state["cursor"]
actual_partition = state["partition"]
expected_state = self.second_read_state_for_records_further_start_date
expected_cursor = None
expected_partition = None
for state in expected_state["states"]:
if state["partition"]["account_id"] == self.account_id:
expected_cursor = state["cursor"]
expected_partition = state["partition"]
if not expected_cursor or not actual_cursor:
assert False, f"Expected state is empty for account_id: {self.account_id}"
# here the cursor moved to expected that is the latest record read
assert actual_cursor == expected_cursor
# this is important as we are expecting the new state format
# to contain the parent slice as should be happening. In this case
# migration is not needed as the state is already in the new format
assert actual_partition == expected_partition
# Let's check in the logs what was the start_time and end_time values of the Job
job_completed_log = ""
for current_log in output.logs:
if "The following jobs for stream slice" in current_log.log.message:
job_completed_log = current_log.log.message
break
if not job_completed_log:
assert False, "Job completed log is empty"
# Regex patterns to match start_time and end_time values
start_time_pattern = re.compile(r"'start_time': '([^']+)'")
end_time_pattern = re.compile(r"'end_time': '([^']+)'")
# Extract values
start_time_match = start_time_pattern.search(job_completed_log)
end_time_match = end_time_pattern.search(job_completed_log)
job_start_time = start_time_match.group(1) if start_time_match else None
job_end_time = end_time_match.group(1) if end_time_match else None
last_successful_sync_cursor_value = provided_state[0].stream.stream_state.state[self.cursor_field]
assert job_start_time == last_successful_sync_cursor_value
if "hourly" in self.stream_name or (hasattr(self, "custom_report_aggregation") and self.custom_report_aggregation == "Hourly"):
assert job_end_time == f"{SECOND_READ_FREEZE_TIME}T00:00:00+00:00"
else:
assert job_end_time == SECOND_READ_FREEZE_TIME
@freeze_time(SECOND_READ_FREEZE_TIME)
def test_incremental_read_with_legacy_state_returns_records_after_migration_with_records_further_state_cursor(self):
"""
For this test, we get records with TimePeriod further the config start date and the state TimePeriod cursor.
The provided state is taken from a previous run; with python stream; so, is already in legacy format, and
where the resultant cursor was further the config start date.
So we validate that the cursor in the output.most_recent_state is moved to the value of the latest record read.
Also, the state is migrated to the new format, so we can validate that the partition is correctly set.
The state format before migration (legacy) IS involved in this test.
"""
self.mock_report_apis()
provided_state = self._state(self.state_file_legacy, self.stream_name)
output = self.read_stream(
self.stream_name, SyncMode.incremental, self._config, self.incremental_report_file_with_records_further_cursor, provided_state
)
if not self.second_read_records_number:
assert len(output.records) == self.records_number
else:
assert len(output.records) == self.second_read_records_number
actual_cursor = None
actual_partition = None
for state in output.most_recent_state.stream_state.states:
if state["partition"]["account_id"] == self.account_id:
actual_cursor = state["cursor"]
actual_partition = state["partition"]
expected_state = self.second_read_state_for_records_further_start_date
expected_cursor = None
expected_partition = None
for state in expected_state["states"]:
if state["partition"]["account_id"] == self.account_id:
expected_cursor = state["cursor"]
expected_partition = state["partition"]
if not expected_cursor or not actual_cursor:
assert False, f"Expected state is empty for account_id: {self.account_id}"
if not actual_partition or not expected_partition:
assert False, f"Expected state is empty for account_id: {self.account_id}"
# here the cursor moved to expect that is the latest record read
assert actual_cursor == expected_cursor
assert actual_partition == expected_partition
# Let's check in the logs what was the start_time and end_time values of the Job
job_completed_log = ""
for current_log in output.logs:
if "The following jobs for stream slice" in current_log.log.message:
job_completed_log = current_log.log.message
break
if not job_completed_log:
assert False, "Job completed log is empty"
# Regex patterns to match start_time and end_time values
start_time_pattern = re.compile(r"'start_time': '([^']+)'")
end_time_pattern = re.compile(r"'end_time': '([^']+)'")
# Extract values
start_time_match = start_time_pattern.search(job_completed_log)
end_time_match = end_time_pattern.search(job_completed_log)
job_start_time = start_time_match.group(1) if start_time_match else None
job_end_time = end_time_match.group(1) if end_time_match else None
last_successful_sync_cursor_value = vars(provided_state[0].stream.stream_state)[self.account_id][self.cursor_field]
assert job_start_time == last_successful_sync_cursor_value
if "hourly" in self.stream_name or (hasattr(self, "custom_report_aggregation") and self.custom_report_aggregation == "Hourly"):
assert job_end_time == f"{SECOND_READ_FREEZE_TIME}T00:00:00+00:00"
else:
assert job_end_time == SECOND_READ_FREEZE_TIME
@freeze_time("2024-05-06")
def test_no_config_start_date(self):
"""
If the field reports_start_date is blank, Airbyte will replicate all data from previous and current calendar years.
This test is to validate that the stream will return all records from the first day of the year 2023 (CustomDateRangeStart in mocked body).
"""
self.mock_report_apis()
# here we mock the report start date to be the first day of the year 2023
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AdPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "AdPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountId", "CampaignId", "AdGroupId", "AdId", "TimePeriod", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "CurrencyCode", "AdDistribution", "DeviceType", "Language", "Network", "DeviceOS", "TopVsOther", "BidMatchType", "DeliveredMatchType", "AccountName", "CampaignName", "CampaignType", "AdGroupName", "Impressions", "Clicks", "Ctr", "Spend", "CostPerConversion", "DestinationUrl", "Assists", "ReturnOnAdSpend", "CostPerAssist", "CustomParameters", "FinalAppUrl", "AdDescription", "AdDescription2", "ViewThroughConversions", "ViewThroughConversionsQualified", "AllCostPerConversion", "AllReturnOnAdSpend", "Conversions", "ConversionRate", "ConversionsQualified", "AverageCpc", "AveragePosition", "AverageCpm", "AllConversions", "AllConversionRate", "AllRevenue", "AllRevenuePerConversion", "Revenue", "RevenuePerConversion", "RevenuePerAssist"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2023}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
config = deepcopy(self._config)
del config["reports_start_date"]
output = self.read_stream(self.stream_name, SyncMode.incremental, config, self.report_file)
assert len(output.records) == self.records_number
first_read_state = deepcopy(self.first_read_state)
# this corresponds to the last read record as we don't have started_date in the config
# the self.first_read_state is set using the config start date, so it is not correct for this test
if "hourly" in self.stream_name or (hasattr(self, "custom_report_aggregation") and self.custom_report_aggregation == "Hourly"):
first_read_state["state"][self.cursor_field] = "2023-11-12T00:00:00+00:00"
first_read_state["states"][0]["cursor"][self.cursor_field] = "2023-11-12T00:00:00+00:00"
assert output.most_recent_state.stream_state.__dict__ == first_read_state
else:
assert output.most_recent_state.stream_state.__dict__ == first_read_state
| TestSuiteReportStream |
python | apache__airflow | providers/teradata/src/airflow/providers/teradata/triggers/teradata_compute_cluster.py | {
"start": 1218,
"end": 6944
} | class ____(BaseTrigger):
"""
Fetch the status of the suspend or resume operation for the specified compute cluster.
:param teradata_conn_id: The :ref:`Teradata connection id <howto/connection:teradata>`
reference to a specific Teradata database.
:param compute_profile_name: Name of the Compute Profile to manage.
:param compute_group_name: Name of compute group to which compute profile belongs.
:param opr_type: Compute cluster operation - SUSPEND/RESUME
:param poll_interval: polling period in minutes to check for the status
"""
def __init__(
self,
teradata_conn_id: str,
compute_profile_name: str,
compute_group_name: str | None = None,
operation_type: str | None = None,
poll_interval: float | None = None,
):
super().__init__()
self.teradata_conn_id = teradata_conn_id
self.compute_profile_name = compute_profile_name
self.compute_group_name = compute_group_name
self.operation_type = operation_type
self.poll_interval = poll_interval
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize TeradataComputeClusterSyncTrigger arguments and classpath."""
return (
"airflow.providers.teradata.triggers.teradata_compute_cluster.TeradataComputeClusterSyncTrigger",
{
"teradata_conn_id": self.teradata_conn_id,
"compute_profile_name": self.compute_profile_name,
"compute_group_name": self.compute_group_name,
"operation_type": self.operation_type,
"poll_interval": self.poll_interval,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Wait for Compute Cluster operation to complete."""
try:
while True:
status = await self.get_status()
if status is None or len(status) == 0:
raise AirflowException(Constants.CC_GRP_PRP_NON_EXISTS_MSG % "manage")
if (
self.operation_type == Constants.CC_SUSPEND_OPR
or self.operation_type == Constants.CC_CREATE_SUSPEND_OPR
):
if status == Constants.CC_SUSPEND_DB_STATUS:
break
elif (
self.operation_type == Constants.CC_RESUME_OPR
or self.operation_type == Constants.CC_CREATE_OPR
):
if status == Constants.CC_RESUME_DB_STATUS:
break
if self.poll_interval is not None:
self.poll_interval = float(self.poll_interval)
else:
self.poll_interval = float(Constants.CC_POLL_INTERVAL)
await asyncio.sleep(self.poll_interval)
if (
self.operation_type == Constants.CC_SUSPEND_OPR
or self.operation_type == Constants.CC_CREATE_SUSPEND_OPR
):
if status == Constants.CC_SUSPEND_DB_STATUS:
yield TriggerEvent(
{
"status": "success",
"message": Constants.CC_OPR_SUCCESS_STATUS_MSG
% (self.compute_profile_name, self.operation_type),
}
)
else:
yield TriggerEvent(
{
"status": "error",
"message": Constants.CC_OPR_TIMEOUT_ERROR
% (self.operation_type, self.compute_profile_name),
}
)
elif (
self.operation_type == Constants.CC_RESUME_OPR
or self.operation_type == Constants.CC_CREATE_OPR
):
if status == Constants.CC_RESUME_DB_STATUS:
yield TriggerEvent(
{
"status": "success",
"message": Constants.CC_OPR_SUCCESS_STATUS_MSG
% (self.compute_profile_name, self.operation_type),
}
)
else:
yield TriggerEvent(
{
"status": "error",
"message": Constants.CC_OPR_TIMEOUT_ERROR
% (self.operation_type, self.compute_profile_name),
}
)
else:
yield TriggerEvent({"status": "error", "message": "Invalid operation"})
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
except asyncio.CancelledError:
self.log.error(Constants.CC_OPR_TIMEOUT_ERROR, self.operation_type, self.compute_profile_name)
async def get_status(self) -> str:
"""Return compute cluster SUSPEND/RESUME operation status."""
sql = (
"SEL ComputeProfileState FROM DBC.ComputeProfilesVX WHERE UPPER(ComputeProfileName) = UPPER('"
+ self.compute_profile_name
+ "')"
)
if self.compute_group_name:
sql += " AND UPPER(ComputeGroupName) = UPPER('" + self.compute_group_name + "')"
hook = TeradataHook(teradata_conn_id=self.teradata_conn_id)
result_set = hook.run(sql, handler=fetch_one_handler)
status = ""
if isinstance(result_set, list) and isinstance(result_set[0], str):
status = str(result_set[0])
return status
| TeradataComputeClusterSyncTrigger |
python | scipy__scipy | scipy/special/tests/test_hypergeometric.py | {
"start": 118,
"end": 3855
} | class ____:
def test_negative_x(self):
a, b, x = np.meshgrid(
[-1, -0.5, 0, 0.5, 1],
[-1, -0.5, 0, 0.5, 1],
np.linspace(-100, -1, 10),
)
assert np.all(np.isnan(sc.hyperu(a, b, x)))
def test_special_cases(self):
assert sc.hyperu(0, 1, 1) == 1.0
@pytest.mark.parametrize('a', [0.5, 1, np.nan])
@pytest.mark.parametrize('b', [1, 2, np.nan])
@pytest.mark.parametrize('x', [0.25, 3, np.nan])
def test_nan_inputs(self, a, b, x):
assert np.isnan(sc.hyperu(a, b, x)) == np.any(np.isnan([a, b, x]))
@pytest.mark.parametrize(
'a,b,x,expected',
[(0.21581740448533887, 1.0, 1e-05, 3.6030558839391325),
(0.21581740448533887, 1.0, 0.00021544346900318823, 2.8783254988948976),
(0.21581740448533887, 1.0, 0.004641588833612777, 2.154928216691109),
(0.21581740448533887, 1.0, 0.1, 1.446546638718792),
(0.0030949064301273865, 1.0, 1e-05, 1.0356696454116199),
(0.0030949064301273865, 1.0, 0.00021544346900318823, 1.0261510362481985),
(0.0030949064301273865, 1.0, 0.004641588833612777, 1.0166326903402296),
(0.0030949064301273865, 1.0, 0.1, 1.0071174207698674),
(0.1509924314279033, 1.0, 1e-05, 2.806173846998948),
(0.1509924314279033, 1.0, 0.00021544346900318823, 2.3092158526816124),
(0.1509924314279033, 1.0, 0.004641588833612777, 1.812905980588048),
(0.1509924314279033, 1.0, 0.1, 1.3239738117634872),
(-0.010678995342969011, 1.0, 1e-05, 0.8775194903781114),
(-0.010678995342969011, 1.0, 0.00021544346900318823, 0.9101008998540128),
(-0.010678995342969011, 1.0, 0.004641588833612777, 0.9426854294058609),
(-0.010678995342969011, 1.0, 0.1, 0.9753065150174902),
(-0.06556622211831487, 1.0, 1e-05, 0.26435429752668904),
(-0.06556622211831487, 1.0, 0.00021544346900318823, 0.4574756033875781),
(-0.06556622211831487, 1.0, 0.004641588833612777, 0.6507121093358457),
(-0.06556622211831487, 1.0, 0.1, 0.8453129788602187),
(-0.21628242470175185, 1.0, 1e-05, -1.2318314201114489),
(-0.21628242470175185, 1.0, 0.00021544346900318823, -0.6704694233529538),
(-0.21628242470175185, 1.0, 0.004641588833612777, -0.10795098653682857),
(-0.21628242470175185, 1.0, 0.1, 0.4687227684115524)]
)
def test_gh_15650_mp(self, a, b, x, expected):
# See https://github.com/scipy/scipy/issues/15650
# b == 1, |a| < 0.25, 0 < x < 1
#
# This purpose of this test is to check the accuracy of results
# in the region that was impacted by gh-15650.
#
# Reference values computed with mpmath using the script:
#
# import itertools as it
# import numpy as np
#
# from mpmath import mp
#
# rng = np.random.default_rng(1234)
#
# cases = []
# for a, x in it.product(
# np.random.uniform(-0.25, 0.25, size=6),
# np.logspace(-5, -1, 4),
# ):
# with mp.workdps(100):
# cases.append((float(a), 1.0, float(x), float(mp.hyperu(a, 1.0, x))))
assert_allclose(sc.hyperu(a, b, x), expected, rtol=1e-13)
def test_gh_15650_sanity(self):
# The purpose of this test is to sanity check hyperu in the region that
# was impacted by gh-15650 by making sure there are no excessively large
# results, as were reported there.
a = np.linspace(-0.5, 0.5, 500)
x = np.linspace(1e-6, 1e-1, 500)
a, x = np.meshgrid(a, x)
results = sc.hyperu(a, 1.0, x)
assert np.all(np.abs(results) < 1e3)
| TestHyperu |
python | ansible__ansible | lib/ansible/errors/__init__.py | {
"start": 6259,
"end": 6346
} | class ____(AnsibleError):
"""Unable to get user input."""
| AnsiblePromptNoninteractive |
python | huggingface__transformers | src/transformers/models/depth_pro/modeling_depth_pro.py | {
"start": 21082,
"end": 22395
} | class ____(nn.Module):
def __init__(self, config: DepthProConfig):
super().__init__()
self.config = config
combined_feature_dims = config.scaled_images_feature_dims + config.intermediate_feature_dims
self.projections = nn.ModuleList()
for i, in_channels in enumerate(combined_feature_dims):
if i == len(combined_feature_dims) - 1 and in_channels == config.fusion_hidden_size:
# projection for last layer can be ignored if input and output channels already match
self.projections.append(nn.Identity())
else:
self.projections.append(
nn.Conv2d(
in_channels=in_channels,
out_channels=config.fusion_hidden_size,
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
)
def forward(self, features: list[torch.Tensor]) -> list[torch.Tensor]:
projected_features = []
for i, projection in enumerate(self.projections):
upsampled_feature = projection(features[i])
projected_features.append(upsampled_feature)
return projected_features
| DepthProFeatureProjection |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_reload_repository_location.py | {
"start": 2745,
"end": 3150
} | class ____(ReadonlyGraphQLContextTestMatrix):
def test_reload_workspace_permission_failure(self, graphql_context):
result = execute_dagster_graphql(graphql_context, RELOAD_WORKSPACE_QUERY)
assert result
assert result.data
assert result.data["reloadWorkspace"]
assert result.data["reloadWorkspace"]["__typename"] == "UnauthorizedError"
| TestReloadWorkspaceReadOnly |
python | django__django | tests/admin_views/models.py | {
"start": 19924,
"end": 20031
} | class ____(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
| Story |
python | doocs__leetcode | solution/3000-3099/3064.Guess the Number Using Bitwise Questions I/Solution.py | {
"start": 75,
"end": 195
} | class ____:
def findNumber(self) -> int:
return sum(1 << i for i in range(32) if commonSetBits(1 << i))
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/strategy_options.py | {
"start": 71898,
"end": 74585
} | class ____(_LoadElement):
"""Loader strategies against wildcard attributes
e.g.::
raiseload("*")
Load(User).lazyload("*")
defer("*")
load_only(User.name, User.email) # will create a defer('*')
joinedload(User.addresses).raiseload("*")
"""
__visit_name__ = "token_strategy_load_element"
inherit_cache = True
is_class_strategy = False
is_token_strategy = True
def _init_path(
self, path, attr, wildcard_key, attr_group, raiseerr, extra_criteria
):
# assert isinstance(attr, str) or attr is None
if attr is not None:
default_token = attr.endswith(_DEFAULT_TOKEN)
if attr.endswith(_WILDCARD_TOKEN) or default_token:
if wildcard_key:
attr = f"{wildcard_key}:{attr}"
path = path.token(attr)
return path
else:
raise sa_exc.ArgumentError(
"Strings are not accepted for attribute names in loader "
"options; please use class-bound attributes directly."
)
return path
def _prepare_for_compile_state(
self,
parent_loader,
compile_state,
mapper_entities,
reconciled_lead_entity,
raiseerr,
):
# _TokenStrategyLoad
current_path = compile_state.current_path
is_refresh = compile_state.compile_options._for_refresh_state
assert self.path.is_token
if is_refresh and not self.propagate_to_loaders:
return []
# omit setting attributes for a "defaultload" type of option
if not self.strategy and not self.local_opts:
return []
effective_path = self.path
if reconciled_lead_entity:
effective_path = PathRegistry.coerce(
(reconciled_lead_entity,) + effective_path.path[1:]
)
if current_path:
new_effective_path = self._adjust_effective_path_for_current_path(
effective_path, current_path
)
if new_effective_path is None:
return []
effective_path = new_effective_path
# for a wildcard token, expand out the path we set
# to encompass everything from the query entity on
# forward. not clear if this is necessary when current_path
# is set.
return [
("loader", natural_path)
for natural_path in (
cast(
_TokenRegistry, effective_path
)._generate_natural_for_superclasses()
)
]
| _TokenStrategyLoad |
python | pytorch__pytorch | torch/distributed/optim/post_localSGD_optimizer.py | {
"start": 134,
"end": 4498
} | class ____(torch.optim.Optimizer):
r"""
Wraps an arbitrary :class:`torch.optim.Optimizer` and runs `post-local SGD <https://arxiv.org/abs/1808.07217>`_,
This optimizer runs local optimizer at every step.
After the warm-up stage, it averages parameters periodically after the local optimizer is applied.
Args:
optim: The local optimizer.
averager: A model averager instance to run post-localSGD algorithm.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> import torch
>>> import torch.distributed as dist
>>> import torch.distributed.algorithms.model_averaging.averagers as averagers
>>> import torch.nn as nn
>>> from torch.distributed.optim import PostLocalSGDOptimizer
>>> from torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook import (
>>> PostLocalSGDState,
>>> post_localSGD_hook,
>>> )
>>>
>>> model = nn.parallel.DistributedDataParallel(
>>> module, device_ids=[rank], output_device=rank
>>> )
>>>
>>> # Register a post-localSGD communication hook.
>>> state = PostLocalSGDState(process_group=None, subgroup=None, start_localSGD_iter=100)
>>> model.register_comm_hook(state, post_localSGD_hook)
>>>
>>> # Create a post-localSGD optimizer that wraps a local optimizer.
>>> # Note that ``warmup_steps`` used in ``PostLocalSGDOptimizer`` must be the same as
>>> # ``start_localSGD_iter`` used in ``PostLocalSGDState``.
>>> local_optim = torch.optim.SGD(params=model.parameters(), lr=0.01)
>>> opt = PostLocalSGDOptimizer(
>>> optim=local_optim,
>>> averager=averagers.PeriodicModelAverager(period=4, warmup_steps=100)
>>> )
>>>
>>> # In the first 100 steps, DDP runs global gradient averaging at every step.
>>> # After 100 steps, DDP runs gradient averaging within each subgroup (intra-node by default),
>>> # and post-localSGD optimizer runs global model averaging every 4 steps after applying the local optimizer.
>>> for step in range(0, 200):
>>> opt.zero_grad()
>>> loss = loss_fn(output, labels)
>>> loss.backward()
>>> opt.step()
"""
def __init__(self, optim: torch.optim.Optimizer, averager: averagers.ModelAverager):
self.optim = optim
self.param_groups = self.optim.param_groups
self.averager = averager
@property
def state(self): # type: ignore[override]
return self.optim.state
def __repr__(self):
return self.optim.__repr__()
def state_dict(self):
r"""
This is the same as :class:`torch.optim.Optimizer` :meth:`state_dict`,
but adds an extra entry to record model averager's step to the checkpoint
to ensure reload does not cause unnecessary warm up again.
"""
optim_state_dict = self.optim.state_dict()
optim_state_dict["step"] = self.averager.step
return optim_state_dict
def load_state_dict(self, state_dict):
r"""
This is the same as :class:`torch.optim.Optimizer` :meth:`load_state_dict`,
but also restores model averager's step value to the one
saved in the provided ``state_dict``.
If there is no ``"step"`` entry in ``state_dict``,
it will raise a warning and initialize the model averager's step to 0.
"""
self.optim.load_state_dict(state_dict)
if "step" in state_dict:
self.averager.step = state_dict["step"]
else:
warnings.warn(
"Loaded state dict does not contain a step counter for an averager. "
"Setting step counter to 0.",
stacklevel=2,
)
self.averager.step = 0
def step(self): # type: ignore[override]
r"""
Performs a single optimization step (parameter update).
"""
self.optim.step()
self.averager.average_parameters(params=self.param_groups)
def zero_grad(self, set_to_none: bool = True): # type: ignore[override]
self.optim.zero_grad(set_to_none=set_to_none)
def add_param_group(self, param_group):
self.optim.add_param_group(param_group)
| PostLocalSGDOptimizer |
python | jazzband__django-waffle | waffle/testutils.py | {
"start": 1079,
"end": 2027
} | class ____(_overrider[bool]):
"""
override_switch is a contextmanager for easier testing of switches.
It accepts two parameters, name of the switch and it's state. Example
usage::
with override_switch('happy_mode', active=True):
...
If `Switch` already existed, it's value would be changed inside the context
block, then restored to the original value. If `Switch` did not exist
before entering the context, it is created, then removed at the end of the
block.
It can also act as a decorator::
@override_switch('happy_mode', active=True)
def test_happy_mode_enabled():
...
"""
cls = get_waffle_switch_model()
def update(self, active: bool) -> None:
obj = self.cls.objects.get(pk=self.obj.pk)
obj.active = active
obj.save()
obj.flush()
def get_value(self) -> bool:
return self.obj.active
| override_switch |
python | pytorch__pytorch | setup.py | {
"start": 50827,
"end": 63311
} | class ____(setuptools.command.sdist.sdist):
def run(self) -> None:
with concat_license_files():
super().run()
def get_cmake_cache_vars() -> defaultdict[str, CMakeValue]:
try:
return defaultdict(lambda: False, cmake.get_cmake_cache_variables())
except FileNotFoundError:
# CMakeCache.txt does not exist.
# Probably running "python setup.py clean" over a clean directory.
return defaultdict(lambda: False)
def configure_extension_build() -> tuple[
list[Extension], # ext_modules
dict[str, type[Command]], # cmdclass
list[str], # packages
dict[str, list[str]], # entry_points
list[str], # extra_install_requires
]:
r"""Configures extension build options according to system environment and user's choice.
Returns:
The input to parameters ext_modules, cmdclass, packages, and entry_points as required in setuptools.setup.
"""
cmake_cache_vars = get_cmake_cache_vars()
################################################################################
# Configure compile flags
################################################################################
library_dirs: list[str] = [str(TORCH_LIB_DIR)]
extra_install_requires: list[str] = []
if IS_WINDOWS:
# /NODEFAULTLIB makes sure we only link to DLL runtime
# and matches the flags set for protobuf and ONNX
extra_link_args: list[str] = ["/NODEFAULTLIB:LIBCMT.LIB"]
# /MD links against DLL runtime
# and matches the flags set for protobuf and ONNX
# /EHsc is about standard C++ exception handling
extra_compile_args: list[str] = ["/MD", "/FS", "/EHsc"]
else:
extra_link_args = []
extra_compile_args = [
"-Wall",
"-Wextra",
"-Wno-strict-overflow",
"-Wno-unused-parameter",
"-Wno-missing-field-initializers",
"-Wno-unknown-pragmas",
# Python 2.6 requires -fno-strict-aliasing, see
# http://legacy.python.org/dev/peps/pep-3123/
# We also depend on it in our code (even Python 3).
"-fno-strict-aliasing",
]
main_compile_args: list[str] = []
main_libraries: list[str] = ["torch_python"]
main_link_args: list[str] = []
main_sources: list[str] = ["torch/csrc/stub.c"]
if BUILD_LIBTORCH_WHL:
main_libraries = ["torch"]
main_sources = []
if build_type.is_debug():
if IS_WINDOWS:
extra_compile_args += ["/Z7"]
extra_link_args += ["/DEBUG:FULL"]
else:
extra_compile_args += ["-O0", "-g"]
extra_link_args += ["-O0", "-g"]
if build_type.is_rel_with_deb_info():
if IS_WINDOWS:
extra_compile_args += ["/Z7"]
extra_link_args += ["/DEBUG:FULL"]
else:
extra_compile_args += ["-g"]
extra_link_args += ["-g"]
# pypi cuda package that requires installation of cuda runtime, cudnn and cublas
# should be included in all wheels uploaded to pypi
pytorch_extra_install_requires = os.getenv("PYTORCH_EXTRA_INSTALL_REQUIREMENTS")
if pytorch_extra_install_requires:
report(f"pytorch_extra_install_requirements: {pytorch_extra_install_requires}")
extra_install_requires.extend(
map(str.strip, pytorch_extra_install_requires.split("|"))
)
# Cross-compile for M1
if IS_DARWIN:
macos_target_arch = os.getenv("CMAKE_OSX_ARCHITECTURES", "")
if macos_target_arch in ["arm64", "x86_64"]:
macos_sysroot_path = os.getenv("CMAKE_OSX_SYSROOT")
if macos_sysroot_path is None:
macos_sysroot_path = (
subprocess.check_output(
["xcrun", "--show-sdk-path", "--sdk", "macosx"]
)
.decode("utf-8")
.strip()
)
extra_compile_args += [
"-arch",
macos_target_arch,
"-isysroot",
macos_sysroot_path,
]
extra_link_args += ["-arch", macos_target_arch]
def make_relative_rpath_args(path: str) -> list[str]:
if IS_DARWIN:
return ["-Wl,-rpath,@loader_path/" + path]
elif IS_WINDOWS:
return []
else:
return ["-Wl,-rpath,$ORIGIN/" + path]
################################################################################
# Declare extensions and package
################################################################################
ext_modules: list[Extension] = []
# packages that we want to install into site-packages and include them in wheels
includes = ["torch", "torch.*", "torchgen", "torchgen.*"]
# exclude folders that they look like Python packages but are not wanted in wheels
excludes = ["tools", "tools.*", "caffe2", "caffe2.*"]
if cmake_cache_vars["BUILD_FUNCTORCH"]:
includes.extend(["functorch", "functorch.*"])
else:
excludes.extend(["functorch", "functorch.*"])
packages = find_packages(include=includes, exclude=excludes)
C = Extension(
"torch._C",
libraries=main_libraries,
sources=main_sources,
language="c",
extra_compile_args=[
*main_compile_args,
*extra_compile_args,
],
include_dirs=[],
library_dirs=library_dirs,
extra_link_args=[
*extra_link_args,
*main_link_args,
*make_relative_rpath_args("lib"),
],
)
ext_modules.append(C)
cmdclass = {
"bdist_wheel": bdist_wheel,
"build_ext": build_ext,
"clean": clean,
"sdist": sdist,
}
entry_points = {
"console_scripts": [
"torchrun = torch.distributed.run:main",
],
"torchrun.logs_specs": [
"default = torch.distributed.elastic.multiprocessing:DefaultLogsSpecs",
],
}
if cmake_cache_vars["USE_DISTRIBUTED"]:
# Only enable fr_trace command if distributed is enabled
entry_points["console_scripts"].append(
"torchfrtrace = torch.distributed.flight_recorder.fr_trace:main",
)
return ext_modules, cmdclass, packages, entry_points, extra_install_requires
# post run, warnings, printed at the end to make them more visible
build_update_message = """
It is no longer necessary to use the 'build' or 'rebuild' targets
To install:
$ python -m pip install --no-build-isolation -v .
To develop locally:
$ python -m pip install --no-build-isolation -v -e .
To force cmake to re-generate native build files (off by default):
$ CMAKE_FRESH=1 python -m pip install --no-build-isolation -v -e .
""".strip()
def print_box(msg: str) -> None:
msg = textwrap.dedent(msg).strip()
lines = ["", *msg.split("\n"), ""]
max_width = max(len(l) for l in lines)
print("+" + "-" * (max_width + 4) + "+", file=sys.stderr, flush=True)
for line in lines:
print(f"| {line:<{max_width}s} |", file=sys.stderr, flush=True)
print("+" + "-" * (max_width + 4) + "+", file=sys.stderr, flush=True)
def main() -> None:
if BUILD_LIBTORCH_WHL and BUILD_PYTHON_ONLY:
raise RuntimeError(
"Conflict: 'BUILD_LIBTORCH_WHL' and 'BUILD_PYTHON_ONLY' can't both be 1. "
"Set one to 0 and rerun."
)
install_requires = [
"filelock",
"typing-extensions>=4.10.0",
'setuptools ; python_version >= "3.12"',
"sympy>=1.13.3",
"networkx>=2.5.1",
"jinja2",
"fsspec>=0.8.5",
]
if BUILD_PYTHON_ONLY:
install_requires += [f"{LIBTORCH_PKG_NAME}=={TORCH_VERSION}"]
# Parse the command line and check the arguments before we proceed with
# building deps and setup. We need to set values so `--help` works.
dist = Distribution()
dist.script_name = os.path.basename(sys.argv[0])
dist.script_args = sys.argv[1:]
try:
dist.parse_command_line()
except setuptools.errors.BaseError as e:
print(e, file=sys.stderr)
sys.exit(1)
mirror_files_into_torchgen()
if RUN_BUILD_DEPS:
build_deps()
mirror_inductor_external_kernels()
(
ext_modules,
cmdclass,
packages,
entry_points,
extra_install_requires,
) = configure_extension_build()
install_requires += extra_install_requires
torch_package_data = [
"py.typed",
"bin/*",
"test/*",
"*.pyi",
"**/*.pyi",
"lib/*.pdb",
"lib/**/*.pdb",
"lib/*shm*",
"lib/torch_shm_manager",
"lib/*.h",
"lib/**/*.h",
"include/*.h",
"include/**/*.h",
"include/*.hpp",
"include/**/*.hpp",
"include/*.cuh",
"include/**/*.cuh",
"csrc/inductor/aoti_runtime/model.h",
"_inductor/codegen/*.h",
"_inductor/codegen/aoti_runtime/*.h",
"_inductor/codegen/aoti_runtime/*.cpp",
"_inductor/script.ld",
"_inductor/kernel/flex/templates/*.jinja",
"_inductor/kernel/templates/*.jinja",
"_export/serde/*.yaml",
"_export/serde/*.thrift",
"share/cmake/ATen/*.cmake",
"share/cmake/Caffe2/*.cmake",
"share/cmake/Caffe2/public/*.cmake",
"share/cmake/Caffe2/Modules_CUDA_fix/*.cmake",
"share/cmake/Caffe2/Modules_CUDA_fix/upstream/*.cmake",
"share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/*.cmake",
"share/cmake/Gloo/*.cmake",
"share/cmake/Tensorpipe/*.cmake",
"share/cmake/Torch/*.cmake",
"utils/benchmark/utils/*.cpp",
"utils/benchmark/utils/valgrind_wrapper/*.cpp",
"utils/benchmark/utils/valgrind_wrapper/*.h",
"utils/model_dump/skeleton.html",
"utils/model_dump/code.js",
"utils/model_dump/*.mjs",
"_dynamo/graph_break_registry.json",
"tools/dynamo/gb_id_mapping.py",
]
if not BUILD_LIBTORCH_WHL:
torch_package_data += [
"lib/libtorch_python.so",
"lib/libtorch_python.dylib",
"lib/libtorch_python.dll",
]
if not BUILD_PYTHON_ONLY:
torch_package_data += [
"lib/*.so*",
"lib/*.dylib*",
"lib/*.dll",
"lib/*.lib",
]
# XXX: Why not use wildcards ["lib/aotriton.images/*", "lib/aotriton.images/**/*"] here?
aotriton_image_path = TORCH_DIR / "lib" / "aotriton.images"
aks2_files = [
file.relative_to(TORCH_DIR).as_posix()
for file in aotriton_image_path.rglob("*")
if file.is_file()
]
torch_package_data += aks2_files
if get_cmake_cache_vars()["USE_TENSORPIPE"]:
torch_package_data += [
"include/tensorpipe/*.h",
"include/tensorpipe/**/*.h",
]
if get_cmake_cache_vars()["USE_KINETO"]:
torch_package_data += [
"include/kineto/*.h",
"include/kineto/**/*.h",
]
torchgen_package_data = [
"packaged/*",
"packaged/**/*",
]
package_data = {
"torch": torch_package_data,
}
# some win libraries are excluded
# these are statically linked
exclude_windows_libs = [
"lib/dnnl.lib",
"lib/kineto.lib",
"lib/libprotobuf-lite.lib",
"lib/libprotobuf.lib",
"lib/libprotoc.lib",
]
exclude_package_data = {
"torch": exclude_windows_libs,
}
if not BUILD_LIBTORCH_WHL:
package_data["torchgen"] = torchgen_package_data
exclude_package_data["torchgen"] = ["*.py[co]"]
else:
# no extensions in BUILD_LIBTORCH_WHL mode
ext_modules = []
setup(
name=TORCH_PACKAGE_NAME,
version=TORCH_VERSION,
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=packages,
entry_points=entry_points,
install_requires=install_requires,
package_data=package_data,
exclude_package_data=exclude_package_data,
# Disable automatic inclusion of data files because we want to
# explicitly control with `package_data` above.
include_package_data=False,
)
if EMIT_BUILD_WARNING:
print_box(build_update_message)
if __name__ == "__main__":
main()
| sdist |
python | nedbat__coveragepy | tests/mixins.py | {
"start": 543,
"end": 1689
} | class ____:
"""A base class to connect to pytest in a test class hierarchy."""
@pytest.fixture(autouse=True)
def connect_to_pytest(
self,
request: pytest.FixtureRequest,
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Captures pytest facilities for use by other test helpers."""
# pylint: disable=attribute-defined-outside-init
self._pytest_request = request
self._monkeypatch = monkeypatch
self.setUp()
def setUp(self) -> None:
"""Per-test initialization. Override this as you wish."""
pass
def addCleanup(self, fn: Callable[..., None], *args: Any) -> None:
"""Like unittest's addCleanup: code to call when the test is done."""
self._pytest_request.addfinalizer(lambda: fn(*args))
def set_environ(self, name: str, value: str) -> None:
"""Set an environment variable `name` to be `value`."""
self._monkeypatch.setenv(name, value)
def del_environ(self, name: str) -> None:
"""Delete an environment variable, unless we set it."""
self._monkeypatch.delenv(name, raising=False)
| PytestBase |
python | joke2k__faker | faker/providers/bank/ro_RO/__init__.py | {
"start": 60,
"end": 883
} | class ____(BankProvider):
"""Implement bank provider for ``ro_RO`` locale."""
country_code = "RO"
bban_format = "????################"
swift_bank_codes = (
"NBOR",
"ABNA",
"BUCU",
"ARBL",
"MIND",
"BPOS",
"CARP",
"RNCB",
"BROM",
"BITR",
"BRDE",
"BRMA",
"BTRL",
"DAFB",
"MIRB",
"CECE",
"CITI",
"CRCO",
"FNNB",
"EGNA",
"BSEA",
"EXIM",
"UGBI",
"HVBL",
"INGB",
"BREL",
"CRDZ",
"BNRB",
"PIRB",
"PORL",
"MIRO",
"RZBL",
"RZBR",
"ROIN",
"WBAN",
"TRFD",
"TREZ",
"BACX",
"VBBU",
"DARO",
)
| Provider |
python | ansible__ansible | test/units/plugins/lookup/test_password.py | {
"start": 10738,
"end": 13159
} | class ____(unittest.TestCase):
def _assert_valid_chars(self, res, chars):
for res_char in res:
self.assertIn(res_char, chars)
def test_default(self):
res = password.random_password()
self.assertEqual(len(res), DEFAULT_LENGTH)
self.assertTrue(isinstance(res, str))
self._assert_valid_chars(res, DEFAULT_CANDIDATE_CHARS)
def test_zero_length(self):
res = password.random_password(length=0)
self.assertEqual(len(res), 0)
self.assertTrue(isinstance(res, str))
self._assert_valid_chars(res, u',')
def test_just_a_common(self):
res = password.random_password(length=1, chars=u',')
self.assertEqual(len(res), 1)
self.assertEqual(res, u',')
def test_free_will(self):
# A Rush and Spinal Tap reference twofer
res = password.random_password(length=11, chars=u'a')
self.assertEqual(len(res), 11)
self.assertEqual(res, 'aaaaaaaaaaa')
self._assert_valid_chars(res, u'a')
def test_unicode(self):
res = password.random_password(length=11, chars=u'くらとみ')
self._assert_valid_chars(res, u'くらとみ')
self.assertEqual(len(res), 11)
def test_seed(self):
pw1 = password.random_password(seed=1)
pw2 = password.random_password(seed=1)
pw3 = password.random_password(seed=2)
self.assertEqual(pw1, pw2)
self.assertNotEqual(pw1, pw3)
def test_gen_password(self):
for testcase in old_style_params_data:
params = testcase['params']
candidate_chars = testcase['candidate_chars']
params_chars_spec = password._gen_candidate_chars(params['chars'])
password_string = password.random_password(length=params['length'],
chars=params_chars_spec)
self.assertEqual(len(password_string),
params['length'],
msg='generated password=%s has length (%s) instead of expected length (%s)' %
(password_string, len(password_string), params['length']))
for char in password_string:
self.assertIn(char, candidate_chars,
msg='%s not found in %s from chars spect %s' %
(char, candidate_chars, params['chars']))
| TestRandomPassword |
python | Netflix__metaflow | metaflow/_vendor/packaging/version.py | {
"start": 4491,
"end": 16312
} | class ____(_BaseVersion):
"""This class abstracts handling of a project's versions.
A :class:`Version` instance is comparison aware and can be compared and
sorted using the standard Python interfaces.
>>> v1 = Version("1.0a5")
>>> v2 = Version("1.0")
>>> v1
<Version('1.0a5')>
>>> v2
<Version('1.0')>
>>> v1 < v2
True
>>> v1 == v2
False
>>> v1 > v2
False
>>> v1 >= v2
False
>>> v1 <= v2
True
"""
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
def __init__(self, version: str) -> None:
"""Initialize a Version object.
:param version:
The string representation of a version which will be parsed and normalized
before use.
:raises InvalidVersion:
If the ``version`` does not conform to PEP 440 in any way then this
exception will be raised.
"""
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion(f"Invalid version: '{version}'")
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self) -> str:
"""A representation of the Version that shows all internal state.
>>> Version('1.0.0')
<Version('1.0.0')>
"""
return f"<Version('{self}')>"
def __str__(self) -> str:
"""A string representation of the version that can be rounded-tripped.
>>> str(Version("1.0a5"))
'1.0a5'
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
return "".join(parts)
@property
def epoch(self) -> int:
"""The epoch of the version.
>>> Version("2.0.0").epoch
0
>>> Version("1!2.0.0").epoch
1
"""
_epoch: int = self._version.epoch
return _epoch
@property
def release(self) -> Tuple[int, ...]:
"""The components of the "release" segment of the version.
>>> Version("1.2.3").release
(1, 2, 3)
>>> Version("2.0.0").release
(2, 0, 0)
>>> Version("1!2.0.0.post0").release
(2, 0, 0)
Includes trailing zeroes but not the epoch or any pre-release / development /
post-release suffixes.
"""
_release: Tuple[int, ...] = self._version.release
return _release
@property
def pre(self) -> Optional[Tuple[str, int]]:
"""The pre-release segment of the version.
>>> print(Version("1.2.3").pre)
None
>>> Version("1.2.3a1").pre
('a', 1)
>>> Version("1.2.3b1").pre
('b', 1)
>>> Version("1.2.3rc1").pre
('rc', 1)
"""
_pre: Optional[Tuple[str, int]] = self._version.pre
return _pre
@property
def post(self) -> Optional[int]:
"""The post-release number of the version.
>>> print(Version("1.2.3").post)
None
>>> Version("1.2.3.post1").post
1
"""
return self._version.post[1] if self._version.post else None
@property
def dev(self) -> Optional[int]:
"""The development number of the version.
>>> print(Version("1.2.3").dev)
None
>>> Version("1.2.3.dev1").dev
1
"""
return self._version.dev[1] if self._version.dev else None
@property
def local(self) -> Optional[str]:
"""The local version segment of the version.
>>> print(Version("1.2.3").local)
None
>>> Version("1.2.3+abc").local
'abc'
"""
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
@property
def public(self) -> str:
"""The public portion of the version.
>>> Version("1.2.3").public
'1.2.3'
>>> Version("1.2.3+abc").public
'1.2.3'
>>> Version("1.2.3+abc.dev1").public
'1.2.3'
"""
return str(self).split("+", 1)[0]
@property
def base_version(self) -> str:
"""The "base version" of the version.
>>> Version("1.2.3").base_version
'1.2.3'
>>> Version("1.2.3+abc").base_version
'1.2.3'
>>> Version("1!1.2.3+abc.dev1").base_version
'1!1.2.3'
The "base version" is the public version of the project without any pre or post
release markers.
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
@property
def is_prerelease(self) -> bool:
"""Whether this version is a pre-release.
>>> Version("1.2.3").is_prerelease
False
>>> Version("1.2.3a1").is_prerelease
True
>>> Version("1.2.3b1").is_prerelease
True
>>> Version("1.2.3rc1").is_prerelease
True
>>> Version("1.2.3dev1").is_prerelease
True
"""
return self.dev is not None or self.pre is not None
@property
def is_postrelease(self) -> bool:
"""Whether this version is a post-release.
>>> Version("1.2.3").is_postrelease
False
>>> Version("1.2.3.post1").is_postrelease
True
"""
return self.post is not None
@property
def is_devrelease(self) -> bool:
"""Whether this version is a development release.
>>> Version("1.2.3").is_devrelease
False
>>> Version("1.2.3.dev1").is_devrelease
True
"""
return self.dev is not None
@property
def major(self) -> int:
"""The first item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").major
1
"""
return self.release[0] if len(self.release) >= 1 else 0
@property
def minor(self) -> int:
"""The second item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").minor
2
>>> Version("1").minor
0
"""
return self.release[1] if len(self.release) >= 2 else 0
@property
def micro(self) -> int:
"""The third item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").micro
3
>>> Version("1").micro
0
"""
return self.release[2] if len(self.release) >= 3 else 0
def _parse_letter_version(
letter: str, number: Union[str, bytes, SupportsInt]
) -> Optional[Tuple[str, int]]:
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
return None
_local_version_separators = re.compile(r"[\._-]")
def _parse_local_version(local: str) -> Optional[LocalType]:
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_separators.split(local)
)
return None
def _cmpkey(
epoch: int,
release: Tuple[int, ...],
pre: Optional[Tuple[str, int]],
post: Optional[Tuple[str, int]],
dev: Optional[Tuple[str, int]],
local: Optional[Tuple[SubLocalType]],
) -> CmpKey:
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
_release = tuple(
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
_pre: PrePostDevType = NegativeInfinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
_pre = Infinity
else:
_pre = pre
# Versions without a post segment should sort before those with one.
if post is None:
_post: PrePostDevType = NegativeInfinity
else:
_post = post
# Versions without a development segment should sort after those with one.
if dev is None:
_dev: PrePostDevType = Infinity
else:
_dev = dev
if local is None:
# Versions without a local segment should sort before those with one.
_local: LocalType = NegativeInfinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
_local = tuple(
(i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
)
return epoch, _release, _pre, _post, _dev, _local
| Version |
python | modin-project__modin | modin/config/envvars.py | {
"start": 23033,
"end": 23177
} | class ____(EnvironmentVariable, type=bool):
"""Whether to Turn on experimental features."""
varname = "MODIN_EXPERIMENTAL"
| IsExperimental |
python | django-haystack__django-haystack | test_haystack/test_utils.py | {
"start": 2238,
"end": 14472
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.document_1 = "This is a test of the highlightable words detection. This is only a test. Were this an actual emergency, your text would have exploded in mid-air."
self.document_2 = (
"The content of words in no particular order causes nothing to occur."
)
self.document_3 = "%s %s" % (self.document_1, self.document_2)
def test_find_highlightable_words(self):
highlighter = Highlighter("this test")
highlighter.text_block = self.document_1
self.assertEqual(
highlighter.find_highlightable_words(),
{"this": [0, 53, 79], "test": [10, 68]},
)
# We don't stem for now.
highlighter = Highlighter("highlight tests")
highlighter.text_block = self.document_1
self.assertEqual(
highlighter.find_highlightable_words(), {"highlight": [22], "tests": []}
)
# Ignore negated bits.
highlighter = Highlighter("highlight -test")
highlighter.text_block = self.document_1
self.assertEqual(highlighter.find_highlightable_words(), {"highlight": [22]})
def test_find_window(self):
# The query doesn't matter for this method, so ignore it.
highlighter = Highlighter("")
highlighter.text_block = self.document_1
# No query.
self.assertEqual(highlighter.find_window({}), (0, 200))
# Nothing found.
self.assertEqual(
highlighter.find_window({"highlight": [], "tests": []}), (0, 200)
)
# Simple cases.
self.assertEqual(
highlighter.find_window({"highlight": [0], "tests": [100]}), (0, 200)
)
self.assertEqual(
highlighter.find_window({"highlight": [99], "tests": [199]}), (99, 299)
)
self.assertEqual(
highlighter.find_window({"highlight": [0], "tests": [201]}), (0, 200)
)
self.assertEqual(
highlighter.find_window({"highlight": [203], "tests": [120]}), (120, 320)
)
self.assertEqual(
highlighter.find_window({"highlight": [], "tests": [100]}), (100, 300)
)
self.assertEqual(
highlighter.find_window({"highlight": [0], "tests": [80], "moof": [120]}),
(0, 200),
)
# Simple cases, with an outlier far outside the window.
self.assertEqual(
highlighter.find_window({"highlight": [0], "tests": [100, 450]}), (0, 200)
)
self.assertEqual(
highlighter.find_window({"highlight": [100], "tests": [220, 450]}),
(100, 300),
)
self.assertEqual(
highlighter.find_window({"highlight": [100], "tests": [350, 450]}),
(350, 550),
)
self.assertEqual(
highlighter.find_window(
{"highlight": [100], "tests": [220], "moof": [450]}
),
(100, 300),
)
# Density checks.
self.assertEqual(
highlighter.find_window({"highlight": [0], "tests": [100, 180, 450]}),
(0, 200),
)
self.assertEqual(
highlighter.find_window(
{"highlight": [0, 40], "tests": [100, 200, 220, 450]}
),
(40, 240),
)
self.assertEqual(
highlighter.find_window(
{"highlight": [0, 40], "tests": [100, 200, 220], "moof": [450]}
),
(40, 240),
)
self.assertEqual(
highlighter.find_window(
{
"highlight": [0, 40],
"tests": [100, 200, 220],
"moof": [294, 299, 450],
}
),
(100, 300),
)
def test_render_html(self):
highlighter = Highlighter("this test")
highlighter.text_block = self.document_1
self.assertEqual(
highlighter.render_html({"this": [0, 53, 79], "test": [10, 68]}, 0, 200),
'<span class="highlighted">This</span> is a <span class="highlighted">test</span> of the highlightable words detection. <span class="highlighted">This</span> is only a <span class="highlighted">test</span>. Were <span class="highlighted">this</span> an actual emergency, your text would have exploded in mid-air.',
)
highlighter.text_block = self.document_2
self.assertEqual(
highlighter.render_html({"this": [0, 53, 79], "test": [10, 68]}, 0, 200),
"The content of words in no particular order causes nothing to occur.",
)
highlighter.text_block = self.document_3
self.assertEqual(
highlighter.render_html({"this": [0, 53, 79], "test": [10, 68]}, 0, 200),
'<span class="highlighted">This</span> is a <span class="highlighted">test</span> of the highlightable words detection. <span class="highlighted">This</span> is only a <span class="highlighted">test</span>. Were <span class="highlighted">this</span> an actual emergency, your text would have exploded in mid-air. The content of words in no particular order causes no...',
)
highlighter = Highlighter("content detection")
highlighter.text_block = self.document_3
self.assertEqual(
highlighter.render_html({"content": [151], "detection": [42]}, 42, 242),
'...<span class="highlighted">detection</span>. This is only a test. Were this an actual emergency, your text would have exploded in mid-air. The <span class="highlighted">content</span> of words in no particular order causes nothing to occur.',
)
self.assertEqual(
highlighter.render_html({"content": [151], "detection": [42]}, 42, 200),
'...<span class="highlighted">detection</span>. This is only a test. Were this an actual emergency, your text would have exploded in mid-air. The <span class="highlighted">content</span> of words in no particular order causes no...',
)
# One term found within another term.
highlighter = Highlighter("this is")
highlighter.text_block = self.document_1
self.assertEqual(
highlighter.render_html(
{"this": [0, 53, 79], "is": [2, 5, 55, 58, 81]}, 0, 200
),
'<span class="highlighted">This</span> <span class="highlighted">is</span> a test of the highlightable words detection. <span class="highlighted">This</span> <span class="highlighted">is</span> only a test. Were <span class="highlighted">this</span> an actual emergency, your text would have exploded in mid-air.',
)
# Regression for repetition in the regular expression.
highlighter = Highlighter("i++")
highlighter.text_block = "Foo is i++ in most cases."
self.assertEqual(
highlighter.render_html({"i++": [7]}, 0, 200),
'Foo is <span class="highlighted">i++</span> in most cases.',
)
highlighter = Highlighter("i**")
highlighter.text_block = "Foo is i** in most cases."
self.assertEqual(
highlighter.render_html({"i**": [7]}, 0, 200),
'Foo is <span class="highlighted">i**</span> in most cases.',
)
highlighter = Highlighter("i..")
highlighter.text_block = "Foo is i.. in most cases."
self.assertEqual(
highlighter.render_html({"i..": [7]}, 0, 200),
'Foo is <span class="highlighted">i..</span> in most cases.',
)
highlighter = Highlighter("i??")
highlighter.text_block = "Foo is i?? in most cases."
self.assertEqual(
highlighter.render_html({"i??": [7]}, 0, 200),
'Foo is <span class="highlighted">i??</span> in most cases.',
)
# Regression for highlighting already highlighted HTML terms.
highlighter = Highlighter("span")
highlighter.text_block = "A span in spam makes html in a can."
self.assertEqual(
highlighter.render_html({"span": [2]}, 0, 200),
'A <span class="highlighted">span</span> in spam makes html in a can.',
)
highlighter = Highlighter("highlight")
highlighter.text_block = "A span in spam makes highlighted html in a can."
self.assertEqual(
highlighter.render_html({"highlight": [21]}, 0, 200),
'A span in spam makes <span class="highlighted">highlight</span>ed html in a can.',
)
def test_highlight(self):
highlighter = Highlighter("this test")
self.assertEqual(
highlighter.highlight(self.document_1),
'<span class="highlighted">This</span> is a <span class="highlighted">test</span> of the highlightable words detection. <span class="highlighted">This</span> is only a <span class="highlighted">test</span>. Were <span class="highlighted">this</span> an actual emergency, your text would have exploded in mid-air.',
)
self.assertEqual(
highlighter.highlight(self.document_2),
"The content of words in no particular order causes nothing to occur.",
)
self.assertEqual(
highlighter.highlight(self.document_3),
'<span class="highlighted">This</span> is a <span class="highlighted">test</span> of the highlightable words detection. <span class="highlighted">This</span> is only a <span class="highlighted">test</span>. Were <span class="highlighted">this</span> an actual emergency, your text would have exploded in mid-air. The content of words in no particular order causes no...',
)
highlighter = Highlighter("this test", html_tag="div", css_class=None)
self.assertEqual(
highlighter.highlight(self.document_1),
"<div>This</div> is a <div>test</div> of the highlightable words detection. <div>This</div> is only a <div>test</div>. Were <div>this</div> an actual emergency, your text would have exploded in mid-air.",
)
self.assertEqual(
highlighter.highlight(self.document_2),
"The content of words in no particular order causes nothing to occur.",
)
self.assertEqual(
highlighter.highlight(self.document_3),
"<div>This</div> is a <div>test</div> of the highlightable words detection. <div>This</div> is only a <div>test</div>. Were <div>this</div> an actual emergency, your text would have exploded in mid-air. The content of words in no particular order causes no...",
)
highlighter = Highlighter("content detection")
self.assertEqual(
highlighter.highlight(self.document_1),
'...<span class="highlighted">detection</span>. This is only a test. Were this an actual emergency, your text would have exploded in mid-air.',
)
self.assertEqual(
highlighter.highlight(self.document_2),
'...<span class="highlighted">content</span> of words in no particular order causes nothing to occur.',
)
self.assertEqual(
highlighter.highlight(self.document_3),
'...<span class="highlighted">detection</span>. This is only a test. Were this an actual emergency, your text would have exploded in mid-air. The <span class="highlighted">content</span> of words in no particular order causes nothing to occur.',
)
highlighter = Highlighter("content detection", max_length=100)
self.assertEqual(
highlighter.highlight(self.document_1),
'...<span class="highlighted">detection</span>. This is only a test. Were this an actual emergency, your text would have exploded in mid-...',
)
self.assertEqual(
highlighter.highlight(self.document_2),
'...<span class="highlighted">content</span> of words in no particular order causes nothing to occur.',
)
self.assertEqual(
highlighter.highlight(self.document_3),
'This is a test of the highlightable words <span class="highlighted">detection</span>. This is only a test. Were this an actual emerge...',
)
| HighlighterTestCase |
python | pypa__pip | src/pip/_vendor/packaging/_elffile.py | {
"start": 515,
"end": 569
} | class ____(enum.IntEnum):
Lsb = 1
Msb = 2
| EIData |
python | doocs__leetcode | solution/0000-0099/0071.Simplify Path/Solution.py | {
"start": 0,
"end": 335
} | class ____:
def simplifyPath(self, path: str) -> str:
stk = []
for s in path.split('/'):
if not s or s == '.':
continue
if s == '..':
if stk:
stk.pop()
else:
stk.append(s)
return '/' + '/'.join(stk)
| Solution |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/interpolate_test.py | {
"start": 104,
"end": 4250
} | class ____(op_bench.TorchBenchmarkBase):
def init(
self,
input_size,
output_size,
channels_last=False,
mode="linear",
dtype=torch.float,
):
input_image = torch.randint(
0,
256,
size=input_size,
dtype=dtype,
device="cpu",
requires_grad=self.auto_set(),
)
if channels_last:
if input_image.ndim == 4:
input_image = input_image.contiguous(memory_format=torch.channels_last)
elif input_image.ndim == 5:
input_image = input_image.contiguous(
memory_format=torch.channels_last_3d
)
else:
raise ValueError(
f"Can not set channels_last to the input of {input_image.ndim} dims"
)
align_corners = None if mode == "nearest" else False
if mode == "linear":
mode = {
3: "linear",
4: "bilinear",
5: "trilinear",
}[input_image.ndim]
self.inputs = {
"input_image": input_image,
"output_size": output_size,
"mode": mode,
"align_corners": align_corners,
}
self.set_module_name("interpolate")
def forward(self, input_image, output_size, mode, align_corners):
return torch.nn.functional.interpolate(
input_image, size=output_size, mode=mode, align_corners=align_corners
)
config_short = op_bench.config_list(
attr_names=["input_size", "output_size"],
attrs=[
[(1, 3, 60, 40), (24, 24)],
[(1, 3, 600, 400), (240, 240)],
[(1, 3, 320, 320), (256, 256)],
[(1, 1, 60, 40), (24, 24)],
[(1, 1, 600, 400), (240, 240)],
[(1, 1, 320, 320), (256, 256)],
],
cross_product_configs={
"channels_last": [True, False],
"mode": ["nearest", "linear", "bicubic"],
},
tags=["short"],
)
config_short += op_bench.config_list(
attr_names=["input_size", "output_size"],
attrs=[
[(1, 3, 60, 40), (24, 24)],
[(1, 3, 600, 400), (240, 240)],
[(1, 3, 320, 320), (256, 256)],
[(1, 1, 60, 40), (24, 24)],
[(1, 1, 600, 400), (240, 240)],
[(1, 1, 320, 320), (256, 256)],
],
cross_product_configs={
"channels_last": [True, False],
"mode": [
"nearest",
],
"dtype": [
torch.uint8,
],
},
tags=["short"],
)
config_long = op_bench.config_list(
attr_names=["input_size", "output_size"],
attrs=[
[(1, 3, 320, 320), (512, 512)],
[(1, 3, 500, 500), (256, 256)],
[(1, 3, 500, 500), (800, 800)],
[(1, 1, 320, 320), (512, 512)],
[(1, 1, 500, 500), (256, 256)],
[(1, 1, 500, 500), (800, 800)],
# vectorization test-case
[(2, 128, 64, 46), (128, 128)],
[(2, 128, 64, 46), (32, 24)],
],
cross_product_configs={
"channels_last": [True, False],
"mode": ["nearest", "linear", "bicubic"],
},
tags=["long"],
)
config_3d = op_bench.config_list(
# no channels_last for 3D tensors
attr_names=["input_size", "output_size"],
attrs=[
[(4, 512, 320), (256,)],
[(4, 512, 320), (512,)],
],
cross_product_configs={
"mode": ["nearest", "linear"],
},
tags=["long"],
)
config_5d = op_bench.config_list(
attr_names=["input_size", "output_size"],
attrs=[
[(1, 3, 16, 320, 320), (8, 256, 256)],
[(1, 3, 16, 320, 320), (32, 512, 512)],
# vectorization test-case
[(1, 16, 32, 64, 64), (16, 32, 32)],
[(1, 16, 32, 64, 64), (64, 128, 128)],
],
cross_product_configs={
"channels_last": [True, False],
"mode": ["nearest", "linear"],
},
tags=["long"],
)
for config in (config_short, config_long, config_3d, config_5d):
op_bench.generate_pt_test(config, InterpolateBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| InterpolateBenchmark |
python | huggingface__transformers | src/transformers/models/colqwen2/configuration_colqwen2.py | {
"start": 799,
"end": 3791
} | class ____(PreTrainedConfig):
r"""
Configuration class to store the configuration of a [`ColQ2en2ForRetrieval`]. It is used to instantiate an instance
of `ColQwen2ForRetrieval` according to the specified arguments, defining the model architecture following the methodology
from the "ColPali: Efficient Document Retrieval with Vision Language Models" paper.
Instantiating a configuration with the defaults will yield a similar configuration to the vision encoder used by the pre-trained
ColQwen2-v1.0 model, e.g. [vidore/colqwen2-v1.0-hf](https://huggingface.co/vidore/colqwen2-v1.0-hf).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vlm_config (`PreTrainedConfig`, *optional*):
Configuration of the VLM backbone model.
embedding_dim (`int`, *optional*, defaults to 128):
Dimension of the multi-vector embeddings produced by the model.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
from transformers.models.colqwen2 import ColQwen2Config, ColQwen2ForRetrieval
config = ColQwen2Config()
model = ColQwen2ForRetrieval(config)
```
"""
model_type = "colqwen2"
sub_configs: dict[str, Any] = {"vlm_config": PreTrainedConfig}
def __init__(
self,
vlm_config=None,
embedding_dim: int = 128,
initializer_range: float = 0.02,
**kwargs,
):
if vlm_config is None:
vlm_config = CONFIG_MAPPING["qwen2_vl"]()
logger.info(
"`vlm_config` is `None`. Initializing `vlm_config` with the `Qwen2VLConfig` with default values."
)
elif isinstance(vlm_config, dict):
vlm_config = deepcopy(vlm_config)
if "model_type" not in vlm_config:
raise KeyError(
"The `model_type` key is missing in the `vlm_config` dictionary. Please provide the model type."
)
vlm_config = CONFIG_MAPPING[vlm_config["model_type"]](**vlm_config)
elif not isinstance(vlm_config, PreTrainedConfig):
raise TypeError(
f"Invalid type for `vlm_config`. Expected `PreTrainedConfig`, `dict`, or `None`, but got {type(vlm_config)}."
)
if not hasattr(vlm_config, "vocab_size"):
vlm_config.vocab_size = vlm_config.get_text_config().vocab_size
self.vlm_config = vlm_config
self.embedding_dim = embedding_dim
self.initializer_range = initializer_range
super().__init__(**kwargs)
def get_text_config(self, *args, **kwargs) -> PreTrainedConfig:
return self.vlm_config.get_text_config(*args, **kwargs)
__all__ = ["ColQwen2Config"]
| ColQwen2Config |
python | getsentry__sentry | tests/sentry/middleware/integrations/parsers/test_plugin.py | {
"start": 624,
"end": 3909
} | class ____(TestCase):
factory = RequestFactory()
def get_response(self, request: HttpRequest) -> HttpResponse:
return HttpResponse(status=200, content="passthrough")
@responses.activate
def test_routing_webhooks_no_region(self) -> None:
routes = [
reverse("sentry-plugins-github-webhook", args=[self.organization.id]),
reverse("sentry-plugins-bitbucket-webhook", args=[self.organization.id]),
]
# No mapping
OrganizationMapping.objects.get(organization_id=self.organization.id).update(
region_name="eu"
)
for route in routes:
request = self.factory.post(route)
parser = PluginRequestParser(request=request, response_handler=self.get_response)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == 200
assert response.content == b"passthrough"
assert len(responses.calls) == 0
assert_no_webhook_payloads()
def test_routing_webhooks_with_region(self) -> None:
routes = [
reverse("sentry-plugins-github-webhook", args=[self.organization.id]),
reverse("sentry-plugins-bitbucket-webhook", args=[self.organization.id]),
]
OrganizationMapping.objects.get(organization_id=self.organization.id).update(
region_name="us"
)
for route in routes:
request = self.factory.post(route)
parser = PluginRequestParser(request=request, response_handler=self.get_response)
parser.get_response()
assert_webhook_payloads_for_mailbox(
request=request,
mailbox_name=f"plugins:{self.organization.id}",
region_names=["us"],
)
# Purge outboxes after checking each route
WebhookPayload.objects.all().delete()
def test_routing_for_missing_organization(self) -> None:
# Delete the mapping to simulate an org being deleted.
OrganizationMapping.objects.filter(organization_id=self.organization.id).delete()
routes = {
reverse("sentry-plugins-github-webhook", args=[self.organization.id]): True,
reverse("sentry-plugins-bitbucket-webhook", args=[self.organization.id]): True,
}
for route in routes:
request = self.factory.post(route)
parser = PluginRequestParser(request=request, response_handler=self.get_response)
response = parser.get_response()
assert response.status_code == 400
def test_invalid_webhooks(self) -> None:
routes = {
reverse("sentry-plugins-github-webhook", args=[self.organization.id]): True,
reverse("sentry-plugins-bitbucket-webhook", args=[self.organization.id]): True,
reverse("sentry-plugins-github-installation-webhook"): False,
"/api/0/organizations": False,
}
for route, should_operate in routes.items():
request = self.factory.post(route)
parser = PluginRequestParser(request=request, response_handler=self.get_response)
assert parser.should_operate() == should_operate
| PluginRequestParserTest |
python | cookiecutter__cookiecutter | cookiecutter/exceptions.py | {
"start": 2327,
"end": 2494
} | class ____(CookiecutterException):
"""
Exception for a empty directory name.
Raised when the directory name provided is empty.
"""
| EmptyDirNameException |
python | getsentry__sentry | src/sentry/models/team.py | {
"start": 3971,
"end": 7109
} | class ____(ReplicatedRegionModel):
"""
A team represents a group of individuals which maintain ownership of projects.
"""
__relocation_scope__ = RelocationScope.Organization
category = OutboxCategory.TEAM_UPDATE
organization = FlexibleForeignKey("sentry.Organization")
slug = SentrySlugField()
# Only currently used in SCIM, use slug elsewhere as this isn't updated in the app.
# TODO: deprecate name in team API responses or keep it up to date with slug
name = models.CharField(max_length=64)
status = BoundedPositiveIntegerField(
choices=(
(TeamStatus.ACTIVE, _("Active")),
(TeamStatus.PENDING_DELETION, _("Pending Deletion")),
(TeamStatus.DELETION_IN_PROGRESS, _("Deletion in Progress")),
),
default=TeamStatus.ACTIVE,
)
idp_provisioned = models.BooleanField(default=False, db_default=False)
date_added = models.DateTimeField(default=timezone.now, null=True)
objects: ClassVar[TeamManager] = TeamManager(cache_fields=("pk", "slug"))
class Meta:
app_label = "sentry"
db_table = "sentry_team"
unique_together = (("organization", "slug"),)
__repr__ = sane_repr("name", "slug")
def class_name(self) -> str:
return "Team"
def __str__(self) -> str:
return f"{self.name} ({self.slug})"
def handle_async_replication(self, shard_identifier: int) -> None:
from sentry.hybridcloud.services.replica import control_replica_service
from sentry.organizations.services.organization.serial import serialize_rpc_team
control_replica_service.upsert_replicated_team(team=serialize_rpc_team(self))
def save(self, *args, **kwargs):
if not self.slug:
lock = locks.get(f"slug:team:{self.organization_id}", duration=5, name="team_slug")
with TimedRetryPolicy(10)(lock.acquire):
slugify_instance(self, self.name, organization=self.organization)
if settings.SENTRY_USE_SNOWFLAKE:
snowflake_redis_key = "team_snowflake_key"
save_with_snowflake_id(
instance=self,
snowflake_redis_key=snowflake_redis_key,
save_callback=lambda: super(Team, self).save(*args, **kwargs),
)
else:
super().save(*args, **kwargs)
@property
def member_set(self):
""":returns a QuerySet of all Users that belong to this Team"""
return self.organization.member_set.filter(
organizationmemberteam__team=self,
organizationmemberteam__is_active=True,
user_id__isnull=False,
user_is_active=True,
).distinct()
def get_audit_log_data(self):
return {
"id": self.id,
"slug": self.slug,
"name": self.name,
"status": self.status,
}
def get_projects(self):
from sentry.models.project import Project
return Project.objects.get_for_team_ids([self.id])
def get_member_user_ids(self):
return self.member_set.values_list("user_id", flat=True)
| Team |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_s3.py | {
"start": 5083,
"end": 6398
} | class ____:
def setup_method(self):
self.get_bucket_tagging_operator = S3GetBucketTaggingOperator(
task_id="test-s3-get-bucket-tagging-operator",
bucket_name=BUCKET_NAME,
)
@mock_aws
@mock.patch.object(S3Hook, "get_bucket_tagging")
@mock.patch.object(S3Hook, "check_for_bucket")
def test_execute_if_bucket_exist(self, mock_check_for_bucket, get_bucket_tagging):
mock_check_for_bucket.return_value = True
# execute s3 get bucket tagging operator
self.get_bucket_tagging_operator.execute({})
mock_check_for_bucket.assert_called_once_with(BUCKET_NAME)
get_bucket_tagging.assert_called_once_with(BUCKET_NAME)
@mock_aws
@mock.patch.object(S3Hook, "get_bucket_tagging")
@mock.patch.object(S3Hook, "check_for_bucket")
def test_execute_if_not_bucket_exist(self, mock_check_for_bucket, get_bucket_tagging):
mock_check_for_bucket.return_value = False
# execute s3 get bucket tagging operator
self.get_bucket_tagging_operator.execute({})
mock_check_for_bucket.assert_called_once_with(BUCKET_NAME)
get_bucket_tagging.assert_not_called()
def test_template_fields(self):
validate_template_fields(self.get_bucket_tagging_operator)
| TestS3GetBucketTaggingOperator |
python | dask__distributed | distributed/tests/test_nanny.py | {
"start": 29783,
"end": 31242
} | class ____(Nanny):
def __init__(self, *args, in_instantiate, wait_instantiate, **kwargs):
super().__init__(*args, **kwargs)
self.in_instantiate = in_instantiate
self.wait_instantiate = wait_instantiate
async def instantiate(self):
self.in_instantiate.set()
self.wait_instantiate.wait()
return await super().instantiate()
def run_nanny(scheduler_addr, in_instantiate, wait_instantiate):
async def _():
worker = await SlowDistNanny(
scheduler_addr,
wait_instantiate=wait_instantiate,
in_instantiate=in_instantiate,
)
await worker.finished()
asyncio.run(_())
@pytest.mark.parametrize("restart", [True, False])
@gen_cluster(client=True, nthreads=[])
async def test_nanny_plugin_register_nanny_killed(c, s, restart):
in_instantiate = get_mp_context().Event()
wait_instantiate = get_mp_context().Event()
proc = get_mp_context().Process(
name="run_nanny",
target=run_nanny,
kwargs={
"in_instantiate": in_instantiate,
"wait_instantiate": wait_instantiate,
},
args=(s.address,),
)
proc.start()
try:
plugin = DummyNannyPlugin("foo", restart=restart)
await asyncio.to_thread(in_instantiate.wait)
register = asyncio.create_task(c.register_plugin(plugin))
finally:
proc.kill()
assert await register == {}
| SlowDistNanny |
python | Textualize__textual | docs/examples/styles/outline_vs_border.py | {
"start": 384,
"end": 690
} | class ____(App):
CSS_PATH = "outline_vs_border.tcss"
def compose(self):
yield Label(TEXT, classes="outline")
yield Label(TEXT, classes="border")
yield Label(TEXT, classes="outline border")
if __name__ == "__main__":
app = OutlineBorderApp()
app.run()
| OutlineBorderApp |
python | doocs__leetcode | solution/1400-1499/1477.Find Two Non-overlapping Sub-arrays Each With Target Sum/Solution.py | {
"start": 0,
"end": 471
} | class ____:
def minSumOfLengths(self, arr: List[int], target: int) -> int:
d = {0: 0}
s, n = 0, len(arr)
f = [inf] * (n + 1)
ans = inf
for i, v in enumerate(arr, 1):
s += v
f[i] = f[i - 1]
if s - target in d:
j = d[s - target]
f[i] = min(f[i], i - j)
ans = min(ans, f[j] + i - j)
d[s] = i
return -1 if ans > n else ans
| Solution |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/string_conversion.py | {
"start": 1399,
"end": 1472
} | class ____:
f: str = ""
def __str__(self):
return self.f
| B |
python | pytest-dev__pytest | src/_pytest/python.py | {
"start": 60020,
"end": 66875
} | class ____(PyobjMixin, nodes.Item):
"""Item responsible for setting up and executing a Python test function.
:param name:
The full function name, including any decorations like those
added by parametrization (``my_func[my_param]``).
:param parent:
The parent Node.
:param config:
The pytest Config object.
:param callspec:
If given, this function has been parametrized and the callspec contains
meta information about the parametrization.
:param callobj:
If given, the object which will be called when the Function is invoked,
otherwise the callobj will be obtained from ``parent`` using ``originalname``.
:param keywords:
Keywords bound to the function object for "-k" matching.
:param session:
The pytest Session object.
:param fixtureinfo:
Fixture information already resolved at this fixture node..
:param originalname:
The attribute name to use for accessing the underlying function object.
Defaults to ``name``. Set this if name is different from the original name,
for example when it contains decorations like those added by parametrization
(``my_func[my_param]``).
"""
# Disable since functions handle it themselves.
_ALLOW_MARKERS = False
def __init__(
self,
name: str,
parent,
config: Config | None = None,
callspec: CallSpec2 | None = None,
callobj=NOTSET,
keywords: Mapping[str, Any] | None = None,
session: Session | None = None,
fixtureinfo: FuncFixtureInfo | None = None,
originalname: str | None = None,
) -> None:
super().__init__(name, parent, config=config, session=session)
if callobj is not NOTSET:
self._obj = callobj
self._instance = getattr(callobj, "__self__", None)
#: Original function name, without any decorations (for example
#: parametrization adds a ``"[...]"`` suffix to function names), used to access
#: the underlying function object from ``parent`` (in case ``callobj`` is not given
#: explicitly).
#:
#: .. versionadded:: 3.0
self.originalname = originalname or name
# Note: when FunctionDefinition is introduced, we should change ``originalname``
# to a readonly property that returns FunctionDefinition.name.
self.own_markers.extend(get_unpacked_marks(self.obj))
if callspec:
self.callspec = callspec
self.own_markers.extend(callspec.marks)
# todo: this is a hell of a hack
# https://github.com/pytest-dev/pytest/issues/4569
# Note: the order of the updates is important here; indicates what
# takes priority (ctor argument over function attributes over markers).
# Take own_markers only; NodeKeywords handles parent traversal on its own.
self.keywords.update((mark.name, mark) for mark in self.own_markers)
self.keywords.update(self.obj.__dict__)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fm = self.session._fixturemanager
fixtureinfo = fm.getfixtureinfo(self, self.obj, self.cls)
self._fixtureinfo: FuncFixtureInfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
# todo: determine sound type limitations
@classmethod
def from_parent(cls, parent, **kw) -> Self:
"""The public constructor."""
return super().from_parent(parent=parent, **kw)
def _initrequest(self) -> None:
self.funcargs: dict[str, object] = {}
self._request = fixtures.TopRequest(self, _ispytest=True)
@property
def function(self):
"""Underlying python 'function' object."""
return getimfunc(self.obj)
@property
def instance(self):
try:
return self._instance
except AttributeError:
if isinstance(self.parent, Class):
# Each Function gets a fresh class instance.
self._instance = self._getinstance()
else:
self._instance = None
return self._instance
def _getinstance(self):
if isinstance(self.parent, Class):
# Each Function gets a fresh class instance.
return self.parent.newinstance()
else:
return None
def _getobj(self):
instance = self.instance
if instance is not None:
parent_obj = instance
else:
assert self.parent is not None
parent_obj = self.parent.obj # type: ignore[attr-defined]
return getattr(parent_obj, self.originalname)
@property
def _pyfuncitem(self):
"""(compatonly) for code expecting pytest-2.2 style request objects."""
return self
def runtest(self) -> None:
"""Execute the underlying test function."""
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self) -> None:
self._request._fillfixtures()
def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback:
if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False):
code = _pytest._code.Code.from_function(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
ntraceback = ntraceback.filter(excinfo)
# issue364: mark all but first and last frames to
# only show a single-line message for each frame.
if self.config.getoption("tbstyle", "auto") == "auto":
if len(ntraceback) > 2:
ntraceback = Traceback(
(
ntraceback[0],
*(t.with_repr_style("short") for t in ntraceback[1:-1]),
ntraceback[-1],
)
)
return ntraceback
return excinfo.traceback
# TODO: Type ignored -- breaks Liskov Substitution.
def repr_failure( # type: ignore[override]
self,
excinfo: ExceptionInfo[BaseException],
) -> str | TerminalRepr:
style = self.config.getoption("tbstyle", "auto")
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
| Function |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_univariate.py | {
"start": 5865,
"end": 6823
} | class ____(Benchmark):
"""
Univariate Problem08 objective function.
This class defines the Univariate Problem08 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem08}}(x) = - \\sum_{k=1}^6 k \\cos[(k+1)x+k]
Bound constraints: :math:`x \\in [-10, 10]`
.. figure:: figures/Problem08.png
:alt: Univariate Problem08 function
:align: center
**Univariate Problem08 function**
*Global optimum*: :math:`f(x)=-14.508` for :math:`x = -7.083506`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(-10, 10)]
self.global_optimum = -7.083506
self.fglob = -14.508
def fun(self, x, *args):
self.nfev += 1
x = x[0]
y = 0.0
for k in range(1, 6):
y += k * cos((k + 1) * x + k)
return -y
| Problem08 |
python | huggingface__transformers | src/transformers/models/lxmert/modeling_lxmert.py | {
"start": 18089,
"end": 18642
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
| LxmertOutput |
python | pennersr__django-allauth | allauth/socialaccount/admin.py | {
"start": 1022,
"end": 1366
} | class ____(admin.ModelAdmin):
search_fields = []
raw_id_fields = ("user",)
list_display = ("user", "uid", "provider")
list_filter = ("provider",)
def get_search_fields(self, request):
base_fields = get_adapter().get_user_search_fields()
return list(map(lambda a: "user__" + a, base_fields))
| SocialAccountAdmin |
python | pytorch__pytorch | test/dynamo/test_aot_compile.py | {
"start": 2882,
"end": 2984
} | class ____(torch.nn.Module):
def forward(self, x):
return super().forward(x)
| MultiModalMixin |
python | realpython__materials | python-type-checking/hearts.py | {
"start": 151,
"end": 976
} | class ____:
SUITS = "♠ ♡ ♢ ♣".split()
RANKS = "2 3 4 5 6 7 8 9 10 J Q K A".split()
def __init__(self, suit: str, rank: str) -> None:
self.suit = suit
self.rank = rank
@property
def value(self) -> int:
"""The value of a card is rank as a number"""
return self.RANKS.index(self.rank)
@property
def points(self) -> int:
"""Points this card is worth"""
if self.suit == "♠" and self.rank == "Q":
return 13
if self.suit == "♡":
return 1
return 0
def __eq__(self, other: Any) -> Any:
return self.suit == other.suit and self.rank == other.rank
def __lt__(self, other: Any) -> Any:
return self.value < other.value
def __repr__(self) -> str:
return f"{self.suit}{self.rank}"
| Card |
python | coleifer__peewee | tests/sql.py | {
"start": 484,
"end": 43074
} | class ____(BaseTestCase):
def test_select(self):
query = (User
.select(User.c.id, User.c.username)
.where(User.c.username == 'foo'))
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."username" '
'FROM "users" AS "t1" '
'WHERE ("t1"."username" = ?)'), ['foo'])
query = (User
.select(User.c['id'], User.c['username'])
.where(User.c['username'] == 'test'))
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."username" '
'FROM "users" AS "t1" '
'WHERE ("t1"."username" = ?)'), ['test'])
def test_select_extend(self):
query = User.select(User.c.id, User.c.username)
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1"'), [])
query = query.select(User.c.username, User.c.is_admin)
self.assertSQL(query, (
'SELECT "t1"."username", "t1"."is_admin" FROM "users" AS "t1"'),
[])
query = query.select_extend(User.c.is_active, User.c.id)
self.assertSQL(query, (
'SELECT "t1"."username", "t1"."is_admin", "t1"."is_active", '
'"t1"."id" FROM "users" AS "t1"'), [])
def test_selected_columns(self):
query = (User
.select(User.c.id, User.c.username, fn.COUNT(Tweet.c.id))
.join(Tweet, JOIN.LEFT_OUTER,
on=(User.c.id == Tweet.c.user_id)))
# NOTE: because of operator overloads for equality we have to test by
# asserting the attributes of the selected cols.
c_id, c_username, c_ct = query.selected_columns
self.assertEqual(c_id.name, 'id')
self.assertTrue(c_id.source is User)
self.assertEqual(c_username.name, 'username')
self.assertTrue(c_username.source is User)
self.assertTrue(isinstance(c_ct, Function))
self.assertEqual(c_ct.name, 'COUNT')
c_tid, = c_ct.arguments
self.assertEqual(c_tid.name, 'id')
self.assertTrue(c_tid.source is Tweet)
query.selected_columns = (User.c.username,)
c_username, = query.selected_columns
self.assertEqual(c_username.name, 'username')
self.assertTrue(c_username.source is User)
def test_select_explicit_columns(self):
query = (Person
.select()
.where(Person.dob < datetime.date(1980, 1, 1)))
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."name", "t1"."dob" '
'FROM "person" AS "t1" '
'WHERE ("t1"."dob" < ?)'), [datetime.date(1980, 1, 1)])
def test_select_in_list_of_values(self):
names_vals = [
['charlie', 'huey'],
('charlie', 'huey'),
set(('charlie', 'huey')),
frozenset(('charlie', 'huey'))]
for names in names_vals:
query = (Person
.select()
.where(Person.name.in_(names)))
sql, params = Context().sql(query).query()
self.assertEqual(sql, (
'SELECT "t1"."id", "t1"."name", "t1"."dob" '
'FROM "person" AS "t1" '
'WHERE ("t1"."name" IN (?, ?))'))
self.assertEqual(sorted(params), ['charlie', 'huey'])
query = (Person
.select()
.where(Person.id.in_(range(1, 10, 2))))
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."name", "t1"."dob" '
'FROM "person" AS "t1" '
'WHERE ("t1"."id" IN (?, ?, ?, ?, ?))'), [1, 3, 5, 7, 9])
def test_select_subselect_function(self):
# For functions whose only argument is a subquery, we do not need to
# include additional parentheses -- in fact, some databases will report
# a syntax error if we do.
exists = fn.EXISTS(Tweet
.select(Tweet.c.id)
.where(Tweet.c.user_id == User.c.id))
query = User.select(User.c.username, exists.alias('has_tweet'))
self.assertSQL(query, (
'SELECT "t1"."username", EXISTS('
'SELECT "t2"."id" FROM "tweets" AS "t2" '
'WHERE ("t2"."user_id" = "t1"."id")) AS "has_tweet" '
'FROM "users" AS "t1"'), [])
# If the function has more than one argument, we need to wrap the
# subquery in parentheses.
Stat = Table('stat', ['id', 'val'])
SA = Stat.alias('sa')
subq = SA.select(fn.SUM(SA.val).alias('val_sum'))
query = Stat.select(fn.COALESCE(subq, 0))
self.assertSQL(query, (
'SELECT COALESCE(('
'SELECT SUM("sa"."val") AS "val_sum" FROM "stat" AS "sa"'
'), ?) FROM "stat" AS "t1"'), [0])
def test_subquery_in_select_sql(self):
subq = User.select(User.c.id).where(User.c.username == 'huey')
query = Tweet.select(Tweet.c.content,
Tweet.c.user_id.in_(subq).alias('is_huey'))
self.assertSQL(query, (
'SELECT "t1"."content", ("t1"."user_id" IN ('
'SELECT "t2"."id" FROM "users" AS "t2" WHERE ("t2"."username" = ?)'
')) AS "is_huey" FROM "tweets" AS "t1"'), ['huey'])
# If we explicitly specify an alias, it will be included.
subq = subq.alias('sq')
query = Tweet.select(Tweet.c.content,
Tweet.c.user_id.in_(subq).alias('is_huey'))
self.assertSQL(query, (
'SELECT "t1"."content", ("t1"."user_id" IN ('
'SELECT "t2"."id" FROM "users" AS "t2" WHERE ("t2"."username" = ?)'
') AS "sq") AS "is_huey" FROM "tweets" AS "t1"'), ['huey'])
def test_subquery_in_select_expression_sql(self):
Point = Table('point', ('x', 'y'))
PA = Point.alias('pa')
subq = PA.select(fn.SUM(PA.y).alias('sa')).where(PA.x == Point.x)
query = (Point
.select(Point.x, Point.y, subq.alias('sy'))
.order_by(Point.x, Point.y))
self.assertSQL(query, (
'SELECT "t1"."x", "t1"."y", ('
'SELECT SUM("pa"."y") AS "sa" FROM "point" AS "pa" '
'WHERE ("pa"."x" = "t1"."x")) AS "sy" '
'FROM "point" AS "t1" '
'ORDER BY "t1"."x", "t1"."y"'), [])
def test_star(self):
query = User.select(User.__star__)
self.assertSQL(query, ('SELECT "t1".* FROM "users" AS "t1"'), [])
query = (Tweet
.select(Tweet.__star__, User.__star__)
.join(User, on=(Tweet.c.user_id == User.c.id)))
self.assertSQL(query, (
'SELECT "t1".*, "t2".* '
'FROM "tweets" AS "t1" '
'INNER JOIN "users" AS "t2" ON ("t1"."user_id" = "t2"."id")'), [])
query = (Tweet
.select(Tweet.__star__, User.c.id)
.join(User, on=(Tweet.c.user_id == User.c.id)))
self.assertSQL(query, (
'SELECT "t1".*, "t2"."id" '
'FROM "tweets" AS "t1" '
'INNER JOIN "users" AS "t2" ON ("t1"."user_id" = "t2"."id")'), [])
def test_from_clause(self):
query = (Note
.select(Note.content, Person.name)
.from_(Note, Person)
.where(Note.person_id == Person.id)
.order_by(Note.id))
self.assertSQL(query, (
'SELECT "t1"."content", "t2"."name" '
'FROM "note" AS "t1", "person" AS "t2" '
'WHERE ("t1"."person_id" = "t2"."id") '
'ORDER BY "t1"."id"'), [])
def test_from_query(self):
inner = Person.select(Person.name)
query = (Person
.select(Person.name)
.from_(inner.alias('i1')))
self.assertSQL(query, (
'SELECT "t1"."name" '
'FROM (SELECT "t1"."name" FROM "person" AS "t1") AS "i1"'), [])
PA = Person.alias('pa')
inner = PA.select(PA.name).alias('i1')
query = (Person
.select(inner.c.name)
.from_(inner)
.order_by(inner.c.name))
self.assertSQL(query, (
'SELECT "i1"."name" '
'FROM (SELECT "pa"."name" FROM "person" AS "pa") AS "i1" '
'ORDER BY "i1"."name"'), [])
def test_join_explicit_columns(self):
query = (Note
.select(Note.content)
.join(Person, on=(Note.person_id == Person.id))
.where(Person.name == 'charlie')
.order_by(Note.id.desc()))
self.assertSQL(query, (
'SELECT "t1"."content" '
'FROM "note" AS "t1" '
'INNER JOIN "person" AS "t2" ON ("t1"."person_id" = "t2"."id") '
'WHERE ("t2"."name" = ?) '
'ORDER BY "t1"."id" DESC'), ['charlie'])
def test_multi_join(self):
Like = Table('likes')
LikeUser = User.alias('lu')
query = (Like
.select(Tweet.c.content, User.c.username, LikeUser.c.username)
.join(Tweet, on=(Like.c.tweet_id == Tweet.c.id))
.join(User, on=(Tweet.c.user_id == User.c.id))
.join(LikeUser, on=(Like.c.user_id == LikeUser.c.id))
.where(LikeUser.c.username == 'charlie')
.order_by(Tweet.c.timestamp))
self.assertSQL(query, (
'SELECT "t1"."content", "t2"."username", "lu"."username" '
'FROM "likes" AS "t3" '
'INNER JOIN "tweets" AS "t1" ON ("t3"."tweet_id" = "t1"."id") '
'INNER JOIN "users" AS "t2" ON ("t1"."user_id" = "t2"."id") '
'INNER JOIN "users" AS "lu" ON ("t3"."user_id" = "lu"."id") '
'WHERE ("lu"."username" = ?) '
'ORDER BY "t1"."timestamp"'), ['charlie'])
def test_correlated_subquery(self):
Employee = Table('employee', ['id', 'name', 'salary', 'dept'])
EA = Employee.alias('e2')
query = (Employee
.select(Employee.id, Employee.name)
.where(Employee.salary > (EA
.select(fn.AVG(EA.salary))
.where(EA.dept == Employee.dept))))
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."name" '
'FROM "employee" AS "t1" '
'WHERE ("t1"."salary" > ('
'SELECT AVG("e2"."salary") '
'FROM "employee" AS "e2" '
'WHERE ("e2"."dept" = "t1"."dept")))'), [])
def test_multiple_where(self):
"""Ensure multiple calls to WHERE are AND-ed together."""
query = (Person
.select(Person.name)
.where(Person.dob < datetime.date(1980, 1, 1))
.where(Person.dob > datetime.date(1950, 1, 1)))
self.assertSQL(query, (
'SELECT "t1"."name" '
'FROM "person" AS "t1" '
'WHERE (("t1"."dob" < ?) AND ("t1"."dob" > ?))'),
[datetime.date(1980, 1, 1), datetime.date(1950, 1, 1)])
def test_orwhere(self):
query = (Person
.select(Person.name)
.orwhere(Person.dob > datetime.date(1980, 1, 1))
.orwhere(Person.dob < datetime.date(1950, 1, 1)))
self.assertSQL(query, (
'SELECT "t1"."name" '
'FROM "person" AS "t1" '
'WHERE (("t1"."dob" > ?) OR ("t1"."dob" < ?))'),
[datetime.date(1980, 1, 1), datetime.date(1950, 1, 1)])
def test_limit(self):
base = User.select(User.c.id)
self.assertSQL(base.limit(None), (
'SELECT "t1"."id" FROM "users" AS "t1"'), [])
self.assertSQL(base.limit(10), (
'SELECT "t1"."id" FROM "users" AS "t1" LIMIT ?'), [10])
self.assertSQL(base.limit(10).offset(3), (
'SELECT "t1"."id" FROM "users" AS "t1" '
'LIMIT ? OFFSET ?'), [10, 3])
self.assertSQL(base.limit(0), (
'SELECT "t1"."id" FROM "users" AS "t1" LIMIT ?'), [0])
self.assertSQL(base.offset(3), (
'SELECT "t1"."id" FROM "users" AS "t1" OFFSET ?'), [3],
limit_max=None)
# Some databases do not support offset without corresponding LIMIT:
self.assertSQL(base.offset(3), (
'SELECT "t1"."id" FROM "users" AS "t1" LIMIT ? OFFSET ?'), [-1, 3],
limit_max=-1)
self.assertSQL(base.limit(0).offset(3), (
'SELECT "t1"."id" FROM "users" AS "t1" LIMIT ? OFFSET ?'), [0, 3],
limit_max=-1)
def test_simple_join(self):
query = (User
.select(
User.c.id,
User.c.username,
fn.COUNT(Tweet.c.id).alias('ct'))
.join(Tweet, on=(Tweet.c.user_id == User.c.id))
.group_by(User.c.id, User.c.username))
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."username", COUNT("t2"."id") AS "ct" '
'FROM "users" AS "t1" '
'INNER JOIN "tweets" AS "t2" ON ("t2"."user_id" = "t1"."id") '
'GROUP BY "t1"."id", "t1"."username"'), [])
def test_subquery(self):
inner = (Tweet
.select(fn.COUNT(Tweet.c.id).alias('ct'))
.where(Tweet.c.user == User.c.id))
query = (User
.select(User.c.username, inner.alias('iq'))
.order_by(User.c.username))
self.assertSQL(query, (
'SELECT "t1"."username", '
'(SELECT COUNT("t2"."id") AS "ct" '
'FROM "tweets" AS "t2" '
'WHERE ("t2"."user" = "t1"."id")) AS "iq" '
'FROM "users" AS "t1" ORDER BY "t1"."username"'), [])
def test_subquery_in_expr(self):
Team = Table('team')
Challenge = Table('challenge')
subq = Team.select(fn.COUNT(Team.c.id) + 1)
query = (Challenge
.select((Challenge.c.points / subq).alias('score'))
.order_by(SQL('score')))
self.assertSQL(query, (
'SELECT ("t1"."points" / ('
'SELECT (COUNT("t2"."id") + ?) FROM "team" AS "t2")) AS "score" '
'FROM "challenge" AS "t1" ORDER BY score'), [1])
def test_user_defined_alias(self):
UA = User.alias('alt')
query = (User
.select(User.c.id, User.c.username, UA.c.nuggz)
.join(UA, on=(User.c.id == UA.c.id))
.order_by(UA.c.nuggz))
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."username", "alt"."nuggz" '
'FROM "users" AS "t1" '
'INNER JOIN "users" AS "alt" ON ("t1"."id" = "alt"."id") '
'ORDER BY "alt"."nuggz"'), [])
def test_simple_cte(self):
cte = User.select(User.c.id).cte('user_ids')
query = (User
.select(User.c.username)
.where(User.c.id.in_(cte))
.with_cte(cte))
self.assertSQL(query, (
'WITH "user_ids" AS (SELECT "t1"."id" FROM "users" AS "t1") '
'SELECT "t2"."username" FROM "users" AS "t2" '
'WHERE ("t2"."id" IN "user_ids")'), [])
def test_two_ctes(self):
c1 = User.select(User.c.id).cte('user_ids')
c2 = User.select(User.c.username).cte('user_names')
query = (User
.select(c1.c.id, c2.c.username)
.where((c1.c.id == User.c.id) &
(c2.c.username == User.c.username))
.with_cte(c1, c2))
self.assertSQL(query, (
'WITH "user_ids" AS (SELECT "t1"."id" FROM "users" AS "t1"), '
'"user_names" AS (SELECT "t1"."username" FROM "users" AS "t1") '
'SELECT "user_ids"."id", "user_names"."username" '
'FROM "users" AS "t2" '
'WHERE (("user_ids"."id" = "t2"."id") AND '
'("user_names"."username" = "t2"."username"))'), [])
def test_select_from_cte(self):
# Use the "select_from()" helper on the CTE object.
cte = User.select(User.c.username).cte('user_cte')
query = cte.select_from(cte.c.username).order_by(cte.c.username)
self.assertSQL(query, (
'WITH "user_cte" AS (SELECT "t1"."username" FROM "users" AS "t1") '
'SELECT "user_cte"."username" FROM "user_cte" '
'ORDER BY "user_cte"."username"'), [])
# Test selecting from multiple CTEs, which is done manually.
c1 = User.select(User.c.username).where(User.c.is_admin == 1).cte('c1')
c2 = User.select(User.c.username).where(User.c.is_staff == 1).cte('c2')
query = (Select((c1, c2), (c1.c.username, c2.c.username))
.with_cte(c1, c2))
self.assertSQL(query, (
'WITH "c1" AS ('
'SELECT "t1"."username" FROM "users" AS "t1" '
'WHERE ("t1"."is_admin" = ?)), '
'"c2" AS ('
'SELECT "t1"."username" FROM "users" AS "t1" '
'WHERE ("t1"."is_staff" = ?)) '
'SELECT "c1"."username", "c2"."username" FROM "c1", "c2"'), [1, 1])
def test_materialize_cte(self):
cases = (
(True, 'MATERIALIZED '),
(False, 'NOT MATERIALIZED '),
(None, ''))
for materialized, clause in cases:
cte = (User
.select(User.c.id)
.cte('user_ids', materialized=materialized))
query = cte.select_from(cte.c.id).where(cte.c.id < 10)
self.assertSQL(query, (
'WITH "user_ids" AS %s('
'SELECT "t1"."id" FROM "users" AS "t1") '
'SELECT "user_ids"."id" FROM "user_ids" '
'WHERE ("user_ids"."id" < ?)') % clause, [10])
def test_fibonacci_cte(self):
q1 = Select(columns=(
Value(1).alias('n'),
Value(0).alias('fib_n'),
Value(1).alias('next_fib_n'))).cte('fibonacci', recursive=True)
n = (q1.c.n + 1).alias('n')
rterm = Select(columns=(
n,
q1.c.next_fib_n,
q1.c.fib_n + q1.c.next_fib_n)).from_(q1).where(n < 10)
cases = (
(q1.union_all, 'UNION ALL'),
(q1.union, 'UNION'))
for method, clause in cases:
cte = method(rterm)
query = cte.select_from(cte.c.n, cte.c.fib_n)
self.assertSQL(query, (
'WITH RECURSIVE "fibonacci" AS ('
'SELECT ? AS "n", ? AS "fib_n", ? AS "next_fib_n" '
'%s '
'SELECT ("fibonacci"."n" + ?) AS "n", "fibonacci"."next_fib_n", '
'("fibonacci"."fib_n" + "fibonacci"."next_fib_n") '
'FROM "fibonacci" '
'WHERE ("n" < ?)) '
'SELECT "fibonacci"."n", "fibonacci"."fib_n" '
'FROM "fibonacci"' % clause), [1, 0, 1, 1, 10])
def test_cte_with_count(self):
cte = User.select(User.c.id).cte('user_ids')
query = (User
.select(User.c.username)
.join(cte, on=(User.c.id == cte.c.id))
.with_cte(cte))
count = Select([query], [fn.COUNT(SQL('1'))])
self.assertSQL(count, (
'SELECT COUNT(1) FROM ('
'WITH "user_ids" AS (SELECT "t1"."id" FROM "users" AS "t1") '
'SELECT "t2"."username" FROM "users" AS "t2" '
'INNER JOIN "user_ids" ON ("t2"."id" = "user_ids"."id")) '
'AS "t3"'), [])
def test_cte_subquery_in_expression(self):
Order = Table('order', ('id', 'description'))
Item = Table('item', ('id', 'order_id', 'description'))
cte = Order.select(fn.MAX(Order.id).alias('max_id')).cte('max_order')
qexpr = (Order
.select(Order.id)
.join(cte, on=(Order.id == cte.c.max_id))
.with_cte(cte))
query = (Item
.select(Item.id, Item.order_id, Item.description)
.where(Item.order_id.in_(qexpr)))
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."order_id", "t1"."description" '
'FROM "item" AS "t1" '
'WHERE ("t1"."order_id" IN ('
'WITH "max_order" AS ('
'SELECT MAX("t2"."id") AS "max_id" FROM "order" AS "t2") '
'SELECT "t3"."id" '
'FROM "order" AS "t3" '
'INNER JOIN "max_order" '
'ON ("t3"."id" = "max_order"."max_id")))'), [])
def test_multi_update_cte(self):
data = [(i, 'u%sx' % i) for i in range(1, 3)]
vl = ValuesList(data)
cte = vl.select().cte('uv', columns=('id', 'username'))
subq = cte.select(cte.c.username).where(cte.c.id == User.c.id)
query = (User
.update(username=subq)
.where(User.c.id.in_(cte.select(cte.c.id)))
.with_cte(cte))
self.assertSQL(query, (
'WITH "uv" ("id", "username") AS ('
'SELECT * FROM (VALUES (?, ?), (?, ?)) AS "t1") '
'UPDATE "users" SET "username" = ('
'SELECT "uv"."username" FROM "uv" '
'WHERE ("uv"."id" = "users"."id")) '
'WHERE ("users"."id" IN (SELECT "uv"."id" FROM "uv"))'),
[1, 'u1x', 2, 'u2x'])
def test_data_modifying_cte_delete(self):
Product = Table('products', ('id', 'name', 'timestamp'))
Archive = Table('archive', ('id', 'name', 'timestamp'))
query = (Product.delete()
.where(Product.timestamp < datetime.date(2022, 1, 1))
.returning(Product.id, Product.name, Product.timestamp))
cte = query.cte('moved_rows')
src = Select((cte,), (cte.c.id, cte.c.name, cte.c.timestamp))
iq = (Archive
.insert(src, (Archive.id, Archive.name, Archive.timestamp))
.with_cte(cte))
self.assertSQL(iq, (
'WITH "moved_rows" AS ('
'DELETE FROM "products" WHERE ("products"."timestamp" < ?) '
'RETURNING "products"."id", "products"."name", '
'"products"."timestamp") '
'INSERT INTO "archive" ("id", "name", "timestamp") '
'SELECT "moved_rows"."id", "moved_rows"."name", '
'"moved_rows"."timestamp" FROM "moved_rows"'),
[datetime.date(2022, 1, 1)])
Part = Table('parts', ('id', 'part', 'sub_part'))
base = (Part
.select(Part.sub_part, Part.part)
.where(Part.part == 'p')
.cte('included_parts', recursive=True,
columns=('sub_part', 'part')))
PA = Part.alias('p')
recursive = (PA
.select(PA.sub_part, PA.part)
.join(base, on=(PA.part == base.c.sub_part)))
cte = base.union_all(recursive)
sq = Select((cte,), (cte.c.part,))
query = (Part.delete()
.where(Part.part.in_(sq))
.with_cte(cte))
self.assertSQL(query, (
'WITH RECURSIVE "included_parts" ("sub_part", "part") AS ('
'SELECT "t1"."sub_part", "t1"."part" FROM "parts" AS "t1" '
'WHERE ("t1"."part" = ?) '
'UNION ALL '
'SELECT "p"."sub_part", "p"."part" '
'FROM "parts" AS "p" '
'INNER JOIN "included_parts" '
'ON ("p"."part" = "included_parts"."sub_part")) '
'DELETE FROM "parts" '
'WHERE ("parts"."part" IN ('
'SELECT "included_parts"."part" FROM "included_parts"))'), ['p'])
def test_data_modifying_cte_update(self):
Product = Table('products', ('id', 'name', 'price'))
Archive = Table('archive', ('id', 'name', 'price'))
query = (Product
.update(price=Product.price * 1.05)
.returning(Product.id, Product.name, Product.price))
cte = query.cte('t')
sq = cte.select_from(cte.c.id, cte.c.name, cte.c.price)
self.assertSQL(sq, (
'WITH "t" AS ('
'UPDATE "products" SET "price" = ("products"."price" * ?) '
'RETURNING "products"."id", "products"."name", "products"."price")'
' SELECT "t"."id", "t"."name", "t"."price" FROM "t"'), [1.05])
sq = Select((cte,), (cte.c.id, cte.c.price))
uq = (Archive
.update(price=sq.c.price)
.from_(sq)
.where(Archive.id == sq.c.id)
.with_cte(cte))
self.assertSQL(uq, (
'WITH "t" AS ('
'UPDATE "products" SET "price" = ("products"."price" * ?) '
'RETURNING "products"."id", "products"."name", "products"."price")'
' UPDATE "archive" SET "price" = "t1"."price"'
' FROM (SELECT "t"."id", "t"."price" FROM "t") AS "t1"'
' WHERE ("archive"."id" = "t1"."id")'), [1.05])
def test_data_modifying_cte_insert(self):
Product = Table('products', ('id', 'name', 'price'))
Archive = Table('archive', ('id', 'name', 'price'))
query = (Product
.insert({'name': 'p1', 'price': 10})
.returning(Product.id, Product.name, Product.price))
cte = query.cte('t')
sq = cte.select_from(cte.c.id, cte.c.name, cte.c.price)
self.assertSQL(sq, (
'WITH "t" AS ('
'INSERT INTO "products" ("name", "price") VALUES (?, ?) '
'RETURNING "products"."id", "products"."name", "products"."price")'
' SELECT "t"."id", "t"."name", "t"."price" FROM "t"'),
['p1', 10])
sq = Select((cte,), (cte.c.id, cte.c.name, cte.c.price))
iq = (Archive
.insert(sq, (sq.c.id, sq.c.name, sq.c.price))
.with_cte(cte))
self.assertSQL(iq, (
'WITH "t" AS ('
'INSERT INTO "products" ("name", "price") VALUES (?, ?) '
'RETURNING "products"."id", "products"."name", "products"."price")'
' INSERT INTO "archive" ("id", "name", "price")'
' SELECT "t"."id", "t"."name", "t"."price" FROM "t"'), ['p1', 10])
def test_complex_select(self):
Order = Table('orders', columns=(
'region',
'amount',
'product',
'quantity'))
regional_sales = (Order
.select(
Order.region,
fn.SUM(Order.amount).alias('total_sales'))
.group_by(Order.region)
.cte('regional_sales'))
top_regions = (regional_sales
.select(regional_sales.c.region)
.where(regional_sales.c.total_sales > (
regional_sales.select(
fn.SUM(regional_sales.c.total_sales) / 10)))
.cte('top_regions'))
query = (Order
.select(
Order.region,
Order.product,
fn.SUM(Order.quantity).alias('product_units'),
fn.SUM(Order.amount).alias('product_sales'))
.where(
Order.region << top_regions.select(top_regions.c.region))
.group_by(Order.region, Order.product)
.with_cte(regional_sales, top_regions))
self.assertSQL(query, (
'WITH "regional_sales" AS ('
'SELECT "t1"."region", SUM("t1"."amount") AS "total_sales" '
'FROM "orders" AS "t1" '
'GROUP BY "t1"."region"'
'), '
'"top_regions" AS ('
'SELECT "regional_sales"."region" '
'FROM "regional_sales" '
'WHERE ("regional_sales"."total_sales" > '
'(SELECT (SUM("regional_sales"."total_sales") / ?) '
'FROM "regional_sales"))'
') '
'SELECT "t2"."region", "t2"."product", '
'SUM("t2"."quantity") AS "product_units", '
'SUM("t2"."amount") AS "product_sales" '
'FROM "orders" AS "t2" '
'WHERE ('
'"t2"."region" IN ('
'SELECT "top_regions"."region" '
'FROM "top_regions")'
') GROUP BY "t2"."region", "t2"."product"'), [10])
def test_compound_select(self):
lhs = User.select(User.c.id).where(User.c.username == 'charlie')
rhs = User.select(User.c.username).where(User.c.admin == True)
q2 = (lhs | rhs)
UA = User.alias('U2')
q3 = q2 | UA.select(UA.c.id).where(UA.c.superuser == False)
self.assertSQL(q3, (
'SELECT "t1"."id" '
'FROM "users" AS "t1" '
'WHERE ("t1"."username" = ?) '
'UNION '
'SELECT "t2"."username" '
'FROM "users" AS "t2" '
'WHERE ("t2"."admin" = ?) '
'UNION '
'SELECT "U2"."id" '
'FROM "users" AS "U2" '
'WHERE ("U2"."superuser" = ?)'), ['charlie', True, False])
def test_compound_operations(self):
admin = (User
.select(User.c.username, Value('admin').alias('role'))
.where(User.c.is_admin == True))
editors = (User
.select(User.c.username, Value('editor').alias('role'))
.where(User.c.is_editor == True))
union = admin.union(editors)
self.assertSQL(union, (
'SELECT "t1"."username", ? AS "role" '
'FROM "users" AS "t1" '
'WHERE ("t1"."is_admin" = ?) '
'UNION '
'SELECT "t2"."username", ? AS "role" '
'FROM "users" AS "t2" '
'WHERE ("t2"."is_editor" = ?)'), ['admin', 1, 'editor', 1])
xcept = editors.except_(admin)
self.assertSQL(xcept, (
'SELECT "t1"."username", ? AS "role" '
'FROM "users" AS "t1" '
'WHERE ("t1"."is_editor" = ?) '
'EXCEPT '
'SELECT "t2"."username", ? AS "role" '
'FROM "users" AS "t2" '
'WHERE ("t2"."is_admin" = ?)'), ['editor', 1, 'admin', 1])
def test_compound_parentheses_handling(self):
admin = (User
.select(User.c.username, Value('admin').alias('role'))
.where(User.c.is_admin == True)
.order_by(User.c.id.desc())
.limit(3))
editors = (User
.select(User.c.username, Value('editor').alias('role'))
.where(User.c.is_editor == True)
.order_by(User.c.id.desc())
.limit(5))
self.assertSQL((admin | editors), (
'(SELECT "t1"."username", ? AS "role" FROM "users" AS "t1" '
'WHERE ("t1"."is_admin" = ?) ORDER BY "t1"."id" DESC LIMIT ?) '
'UNION '
'(SELECT "t2"."username", ? AS "role" FROM "users" AS "t2" '
'WHERE ("t2"."is_editor" = ?) ORDER BY "t2"."id" DESC LIMIT ?)'),
['admin', 1, 3, 'editor', 1, 5], compound_select_parentheses=True)
Reg = Table('register', ('value',))
lhs = Reg.select().where(Reg.value < 2)
rhs = Reg.select().where(Reg.value > 7)
compound = lhs | rhs
for csq_setting in (1, 2):
self.assertSQL(compound, (
'(SELECT "t1"."value" FROM "register" AS "t1" '
'WHERE ("t1"."value" < ?)) '
'UNION '
'(SELECT "t2"."value" FROM "register" AS "t2" '
'WHERE ("t2"."value" > ?))'),
[2, 7], compound_select_parentheses=csq_setting)
rhs2 = Reg.select().where(Reg.value == 5)
c2 = compound | rhs2
# CSQ = always, we get nested parentheses.
self.assertSQL(c2, (
'((SELECT "t1"."value" FROM "register" AS "t1" '
'WHERE ("t1"."value" < ?)) '
'UNION '
'(SELECT "t2"."value" FROM "register" AS "t2" '
'WHERE ("t2"."value" > ?))) '
'UNION '
'(SELECT "t2"."value" FROM "register" AS "t2" '
'WHERE ("t2"."value" = ?))'),
[2, 7, 5], compound_select_parentheses=1) # Always.
# CSQ = unnested, no nesting but all individual queries have parens.
self.assertSQL(c2, (
'(SELECT "t1"."value" FROM "register" AS "t1" '
'WHERE ("t1"."value" < ?)) '
'UNION '
'(SELECT "t2"."value" FROM "register" AS "t2" '
'WHERE ("t2"."value" > ?)) '
'UNION '
'(SELECT "t2"."value" FROM "register" AS "t2" '
'WHERE ("t2"."value" = ?))'),
[2, 7, 5], compound_select_parentheses=2) # Un-nested.
def test_compound_select_order_limit(self):
A = Table('a', ('col_a',))
B = Table('b', ('col_b',))
C = Table('c', ('col_c',))
q1 = A.select(A.col_a.alias('foo'))
q2 = B.select(B.col_b.alias('foo'))
q3 = C.select(C.col_c.alias('foo'))
qc = (q1 | q2 | q3)
qc = qc.order_by(qc.c.foo.desc()).limit(3)
self.assertSQL(qc, (
'SELECT "t1"."col_a" AS "foo" FROM "a" AS "t1" UNION '
'SELECT "t2"."col_b" AS "foo" FROM "b" AS "t2" UNION '
'SELECT "t3"."col_c" AS "foo" FROM "c" AS "t3" '
'ORDER BY "foo" DESC LIMIT ?'), [3])
self.assertSQL(qc, (
'((SELECT "t1"."col_a" AS "foo" FROM "a" AS "t1") UNION '
'(SELECT "t2"."col_b" AS "foo" FROM "b" AS "t2")) UNION '
'(SELECT "t3"."col_c" AS "foo" FROM "c" AS "t3") '
'ORDER BY "foo" DESC LIMIT ?'),
[3], compound_select_parentheses=1)
def test_compound_select_as_subquery(self):
A = Table('a', ('col_a',))
B = Table('b', ('col_b',))
q1 = A.select(A.col_a.alias('foo'))
q2 = B.select(B.col_b.alias('foo'))
union = q1 | q2
# Create an outer query and do grouping.
outer = (union
.select_from(union.c.foo, fn.COUNT(union.c.foo).alias('ct'))
.group_by(union.c.foo))
self.assertSQL(outer, (
'SELECT "t1"."foo", COUNT("t1"."foo") AS "ct" FROM ('
'SELECT "t2"."col_a" AS "foo" FROM "a" AS "t2" UNION '
'SELECT "t3"."col_b" AS "foo" FROM "b" AS "t3") AS "t1" '
'GROUP BY "t1"."foo"'), [])
def test_join_on_query(self):
inner = User.select(User.c.id).alias('j1')
query = (Tweet
.select(Tweet.c.content)
.join(inner, on=(Tweet.c.user_id == inner.c.id)))
self.assertSQL(query, (
'SELECT "t1"."content" FROM "tweets" AS "t1" '
'INNER JOIN (SELECT "t2"."id" FROM "users" AS "t2") AS "j1" '
'ON ("t1"."user_id" = "j1"."id")'), [])
def test_join_on_misc(self):
cond = fn.Magic(Person.id, Note.id).alias('magic')
query = Person.select(Person.id).join(Note, on=cond)
self.assertSQL(query, (
'SELECT "t1"."id" FROM "person" AS "t1" '
'INNER JOIN "note" AS "t2" '
'ON Magic("t1"."id", "t2"."id") AS "magic"'), [])
def test_all_clauses(self):
count = fn.COUNT(Tweet.c.id).alias('ct')
query = (User
.select(User.c.username, count)
.join(Tweet, JOIN.LEFT_OUTER,
on=(User.c.id == Tweet.c.user_id))
.where(User.c.is_admin == 1)
.group_by(User.c.username)
.having(count > 10)
.order_by(count.desc()))
self.assertSQL(query, (
'SELECT "t1"."username", COUNT("t2"."id") AS "ct" '
'FROM "users" AS "t1" '
'LEFT OUTER JOIN "tweets" AS "t2" '
'ON ("t1"."id" = "t2"."user_id") '
'WHERE ("t1"."is_admin" = ?) '
'GROUP BY "t1"."username" '
'HAVING ("ct" > ?) '
'ORDER BY "ct" DESC'), [1, 10])
def test_order_by_collate(self):
query = (User
.select(User.c.username)
.order_by(User.c.username.asc(collation='binary')))
self.assertSQL(query, (
'SELECT "t1"."username" FROM "users" AS "t1" '
'ORDER BY "t1"."username" ASC COLLATE binary'), [])
def test_order_by_nulls(self):
query = (User
.select(User.c.username)
.order_by(User.c.ts.desc(nulls='LAST')))
self.assertSQL(query, (
'SELECT "t1"."username" FROM "users" AS "t1" '
'ORDER BY "t1"."ts" DESC NULLS LAST'), [], nulls_ordering=True)
self.assertSQL(query, (
'SELECT "t1"."username" FROM "users" AS "t1" '
'ORDER BY CASE WHEN ("t1"."ts" IS NULL) THEN ? ELSE ? END, '
'"t1"."ts" DESC'), [1, 0], nulls_ordering=False)
query = (User
.select(User.c.username)
.order_by(User.c.ts.desc(nulls='first')))
self.assertSQL(query, (
'SELECT "t1"."username" FROM "users" AS "t1" '
'ORDER BY "t1"."ts" DESC NULLS first'), [], nulls_ordering=True)
self.assertSQL(query, (
'SELECT "t1"."username" FROM "users" AS "t1" '
'ORDER BY CASE WHEN ("t1"."ts" IS NULL) THEN ? ELSE ? END, '
'"t1"."ts" DESC'), [0, 1], nulls_ordering=False)
def test_in_value_representation(self):
query = (User
.select(User.c.id)
.where(User.c.username.in_(['foo', 'bar', 'baz'])))
self.assertSQL(query, (
'SELECT "t1"."id" FROM "users" AS "t1" '
'WHERE ("t1"."username" IN (?, ?, ?))'), ['foo', 'bar', 'baz'])
def test_tuple_comparison(self):
name_dob = Tuple(Person.name, Person.dob)
query = (Person
.select(Person.id)
.where(name_dob == ('foo', '2017-01-01')))
expected = ('SELECT "t1"."id" FROM "person" AS "t1" '
'WHERE (("t1"."name", "t1"."dob") = (?, ?))')
self.assertSQL(query, expected, ['foo', '2017-01-01'])
# Also works specifying rhs values as Tuple().
query = (Person
.select(Person.id)
.where(name_dob == Tuple('foo', '2017-01-01')))
self.assertSQL(query, expected, ['foo', '2017-01-01'])
def test_tuple_comparison_subquery(self):
PA = Person.alias('pa')
subquery = (PA
.select(PA.name, PA.id)
.where(PA.name != 'huey'))
query = (Person
.select(Person.name)
.where(Tuple(Person.name, Person.id).in_(subquery)))
self.assertSQL(query, (
'SELECT "t1"."name" FROM "person" AS "t1" '
'WHERE (("t1"."name", "t1"."id") IN ('
'SELECT "pa"."name", "pa"."id" FROM "person" AS "pa" '
'WHERE ("pa"."name" != ?)))'), ['huey'])
def test_empty_in(self):
query = User.select(User.c.id).where(User.c.username.in_([]))
self.assertSQL(query, (
'SELECT "t1"."id" FROM "users" AS "t1" '
'WHERE (0 = 1)'), [])
query = User.select(User.c.id).where(User.c.username.not_in([]))
self.assertSQL(query, (
'SELECT "t1"."id" FROM "users" AS "t1" '
'WHERE (1 = 1)'), [])
def test_add_custom_op(self):
def mod(lhs, rhs):
return Expression(lhs, '%', rhs)
Stat = Table('stats')
query = (Stat
.select(fn.COUNT(Stat.c.id))
.where(mod(Stat.c.index, 10) == 0))
self.assertSQL(query, (
'SELECT COUNT("t1"."id") FROM "stats" AS "t1" '
'WHERE (("t1"."index" % ?) = ?)'), [10, 0])
def test_where_convert_to_is_null(self):
Note = Table('notes', ('id', 'content', 'user_id'))
query = Note.select().where(Note.user_id == None)
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."content", "t1"."user_id" '
'FROM "notes" AS "t1" WHERE ("t1"."user_id" IS NULL)'), [])
def test_like_escape(self):
T = Table('tbl', ('key',))
def assertLike(expr, expected):
query = T.select().where(expr)
sql, params = __sql__(T.select().where(expr))
match_obj = re.search(r'\("t1"."key" (ILIKE[^\)]+)\)', sql)
if match_obj is None:
raise AssertionError('LIKE expression not found in query.')
like, = match_obj.groups()
self.assertEqual((like, params), expected)
cases = (
(T.key.contains('base'), ('ILIKE ?', ['%base%'])),
(T.key.contains('x_y'), ("ILIKE ? ESCAPE ?", ['%x\\_y%', '\\'])),
(T.key.contains('__y'), ("ILIKE ? ESCAPE ?", ['%\\_\\_y%', '\\'])),
(T.key.contains('%'), ("ILIKE ? ESCAPE ?", ['%\\%%', '\\'])),
(T.key.contains('_%'), ("ILIKE ? ESCAPE ?", ['%\\_\\%%', '\\'])),
(T.key.startswith('base'), ("ILIKE ?", ['base%'])),
(T.key.startswith('x_y'), ("ILIKE ? ESCAPE ?", ['x\\_y%', '\\'])),
(T.key.startswith('x%'), ("ILIKE ? ESCAPE ?", ['x\\%%', '\\'])),
(T.key.startswith('_%'), ("ILIKE ? ESCAPE ?", ['\\_\\%%', '\\'])),
(T.key.endswith('base'), ("ILIKE ?", ['%base'])),
(T.key.endswith('x_y'), ("ILIKE ? ESCAPE ?", ['%x\\_y', '\\'])),
(T.key.endswith('x%'), ("ILIKE ? ESCAPE ?", ['%x\\%', '\\'])),
(T.key.endswith('_%'), ("ILIKE ? ESCAPE ?", ['%\\_\\%', '\\'])),
)
for expr, expected in cases:
assertLike(expr, expected)
def test_like_expr(self):
query = User.select(User.c.id).where(User.c.username.like('%foo%'))
self.assertSQL(query, (
'SELECT "t1"."id" FROM "users" AS "t1" '
'WHERE ("t1"."username" LIKE ?)'), ['%foo%'])
query = User.select(User.c.id).where(User.c.username.ilike('%foo%'))
self.assertSQL(query, (
'SELECT "t1"."id" FROM "users" AS "t1" '
'WHERE ("t1"."username" ILIKE ?)'), ['%foo%'])
def test_field_ops(self):
query = User.select(User.c.id).where(User.c.username.regexp('[a-z]+'))
self.assertSQL(query, (
'SELECT "t1"."id" FROM "users" AS "t1" '
'WHERE ("t1"."username" REGEXP ?)'), ['[a-z]+'])
query = User.select(User.c.id).where(User.c.username.contains('abc'))
self.assertSQL(query, (
'SELECT "t1"."id" FROM "users" AS "t1" '
'WHERE ("t1"."username" ILIKE ?)'), ['%abc%'])
| TestSelectQuery |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 27089,
"end": 27271
} | class ____(ProjectTranslationsMixin, CreateView):
success_message = _("Translation created")
template_name = "projects/project_translations_form.html"
| ProjectTranslationsCreate |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels37.py | {
"start": 315,
"end": 1624
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels37.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [48498944, 48508928]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {
"value": True,
"border": {"color": "red", "width": 1, "dash_type": "dash"},
"fill": {"color": "#00B050"},
},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | django/db/models/functions/json.py | {
"start": 236,
"end": 2499
} | class ____(Func):
function = "JSON_ARRAY"
output_field = JSONField()
def as_sql(self, compiler, connection, **extra_context):
if not connection.features.supports_json_field:
raise NotSupportedError(
"JSONFields are not supported on this database backend."
)
return super().as_sql(compiler, connection, **extra_context)
def as_native(self, compiler, connection, *, returning, **extra_context):
# PostgreSQL 16+ and Oracle remove SQL NULL values from the array by
# default. Adds the NULL ON NULL clause to keep NULL values in the
# array, mapping them to JSON null values, which matches the behavior
# of SQLite.
null_on_null = "NULL ON NULL" if len(self.get_source_expressions()) > 0 else ""
return self.as_sql(
compiler,
connection,
template=(
f"%(function)s(%(expressions)s {null_on_null} RETURNING {returning})"
),
**extra_context,
)
def as_postgresql(self, compiler, connection, **extra_context):
# Casting source expressions is only required using JSONB_BUILD_ARRAY
# or when using JSON_ARRAY on PostgreSQL 16+ with server-side bindings.
# This is done in all cases for consistency.
casted_obj = self.copy()
casted_obj.set_source_expressions(
[
(
# Conditional Cast to avoid unnecessary wrapping.
expression
if isinstance(expression, Cast)
else Cast(expression, expression.output_field)
)
for expression in casted_obj.get_source_expressions()
]
)
if connection.features.is_postgresql_16:
return casted_obj.as_native(
compiler, connection, returning="JSONB", **extra_context
)
return casted_obj.as_sql(
compiler,
connection,
function="JSONB_BUILD_ARRAY",
**extra_context,
)
def as_oracle(self, compiler, connection, **extra_context):
return self.as_native(compiler, connection, returning="CLOB", **extra_context)
| JSONArray |
python | astropy__astropy | astropy/visualization/wcsaxes/tests/test_frame.py | {
"start": 346,
"end": 1043
} | class ____(BaseFrame):
spine_names = "abcdef"
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
ymid = 0.5 * (ymin + ymax)
xmid1 = (xmin + xmax) / 4.0
xmid2 = (xmin + xmax) * 3.0 / 4.0
self["a"].data = np.array(([xmid1, ymin], [xmid2, ymin]))
self["b"].data = np.array(([xmid2, ymin], [xmax, ymid]))
self["c"].data = np.array(([xmax, ymid], [xmid2, ymax]))
self["d"].data = np.array(([xmid2, ymax], [xmid1, ymax]))
self["e"].data = np.array(([xmid1, ymax], [xmin, ymid]))
self["f"].data = np.array(([xmin, ymid], [xmid1, ymin]))
| HexagonalFrame |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/selectable.py | {
"start": 242624,
"end": 247766
} | class ____(SelectBase, ExecutableReturnsRows, Generative):
"""Wrap a :class:`_expression.TextClause` construct within a
:class:`_expression.SelectBase`
interface.
This allows the :class:`_expression.TextClause` object to gain a
``.c`` collection
and other FROM-like capabilities such as
:meth:`_expression.FromClause.alias`,
:meth:`_expression.SelectBase.cte`, etc.
The :class:`_expression.TextualSelect` construct is produced via the
:meth:`_expression.TextClause.columns`
method - see that method for details.
.. versionchanged:: 1.4 the :class:`_expression.TextualSelect`
class was renamed
from ``TextAsFrom``, to more correctly suit its role as a
SELECT-oriented object and not a FROM clause.
.. seealso::
:func:`_expression.text`
:meth:`_expression.TextClause.columns` - primary creation interface.
"""
__visit_name__ = "textual_select"
_label_style = LABEL_STYLE_NONE
_traverse_internals: _TraverseInternalsType = (
[
("element", InternalTraversal.dp_clauseelement),
("column_args", InternalTraversal.dp_clauseelement_list),
]
+ SupportsCloneAnnotations._clone_annotations_traverse_internals
+ HasCTE._has_ctes_traverse_internals
+ ExecutableStatement._executable_traverse_internals
)
_is_textual = True
is_text = True
is_select = True
def __init__(
self,
text: TextClause,
columns: List[_ColumnExpressionArgument[Any]],
positional: bool = False,
) -> None:
self._init(
text,
# convert for ORM attributes->columns, etc
[
coercions.expect(roles.LabeledColumnExprRole, c)
for c in columns
],
positional,
)
def _init(
self,
text: TextClause,
columns: List[NamedColumn[Any]],
positional: bool = False,
) -> None:
self.element = text
self.column_args = columns
self.positional = positional
@HasMemoized_ro_memoized_attribute
def selected_columns(
self,
) -> ColumnCollection[str, KeyedColumnElement[Any]]:
"""A :class:`_expression.ColumnCollection`
representing the columns that
this SELECT statement or similar construct returns in its result set,
not including :class:`_sql.TextClause` constructs.
This collection differs from the :attr:`_expression.FromClause.columns`
collection of a :class:`_expression.FromClause` in that the columns
within this collection cannot be directly nested inside another SELECT
statement; a subquery must be applied first which provides for the
necessary parenthesization required by SQL.
For a :class:`_expression.TextualSelect` construct, the collection
contains the :class:`_expression.ColumnElement` objects that were
passed to the constructor, typically via the
:meth:`_expression.TextClause.columns` method.
.. versionadded:: 1.4
"""
return ColumnCollection(
(c.key, c) for c in self.column_args
).as_readonly()
@util.ro_non_memoized_property
def _all_selected_columns(self) -> _SelectIterable:
return self.column_args
def set_label_style(self, style: SelectLabelStyle) -> TextualSelect:
return self
def _ensure_disambiguated_names(self) -> TextualSelect:
return self
@_generative
def bindparams(
self,
*binds: BindParameter[Any],
**bind_as_values: Any,
) -> Self:
self.element = self.element.bindparams(*binds, **bind_as_values)
return self
def _generate_fromclause_column_proxies(
self,
fromclause: FromClause,
columns: ColumnCollection[str, KeyedColumnElement[Any]],
primary_key: ColumnSet,
foreign_keys: Set[KeyedColumnElement[Any]],
*,
proxy_compound_columns: Optional[
Iterable[Sequence[ColumnElement[Any]]]
] = None,
) -> None:
if TYPE_CHECKING:
assert isinstance(fromclause, Subquery)
if proxy_compound_columns:
columns._populate_separate_keys(
c._make_proxy(
fromclause,
compound_select_cols=extra_cols,
primary_key=primary_key,
foreign_keys=foreign_keys,
)
for c, extra_cols in zip(
self.column_args, proxy_compound_columns
)
)
else:
columns._populate_separate_keys(
c._make_proxy(
fromclause,
primary_key=primary_key,
foreign_keys=foreign_keys,
)
for c in self.column_args
)
def _scalar_type(self) -> Union[TypeEngine[Any], Any]:
return self.column_args[0].type
TextAsFrom = TextualSelect
"""Backwards compatibility with the previous name"""
| TextualSelect |
python | facebookresearch__faiss | tests/test_residual_quantizer.py | {
"start": 6097,
"end": 11655
} | class ____(unittest.TestCase):
def test_training(self):
"""check that the error is in the same ballpark as PQ """
ds = datasets.SyntheticDataset(32, 3000, 1000, 0)
xt = ds.get_train()
xb = ds.get_database()
rq = faiss.ResidualQuantizer(ds.d, 4, 6)
rq.verbose
rq.verbose = True
#
rq.train_type = faiss.ResidualQuantizer.Train_default
rq.cp.verbose
# rq.cp.verbose = True
rq.train(xt)
err_rq = eval_codec(rq, xb)
pq = faiss.ProductQuantizer(ds.d, 4, 6)
pq.train(xt)
err_pq = eval_codec(pq, xb)
# in practice RQ is often better than PQ but it does not the case here, so just check
# that we are within some factor.
self.assertLess(err_rq, err_pq * 1.2)
def test_beam_size(self):
""" check that a larger beam gives a lower error """
ds = datasets.SyntheticDataset(32, 3000, 1000, 0)
xt = ds.get_train()
xb = ds.get_database()
rq0 = faiss.ResidualQuantizer(ds.d, 4, 6)
rq0.train_type = faiss.ResidualQuantizer.Train_default
rq0.max_beam_size = 2
rq0.train(xt)
err_rq0 = eval_codec(rq0, xb)
rq1 = faiss.ResidualQuantizer(ds.d, 4, 6)
rq1.train_type = faiss.ResidualQuantizer.Train_default
rq1.max_beam_size = 10
rq1.train(xt)
err_rq1 = eval_codec(rq1, xb)
self.assertLess(err_rq1, err_rq0)
def test_training_with_limited_mem(self):
""" make sure a different batch size gives the same result"""
ds = datasets.SyntheticDataset(32, 3000, 1000, 0)
xt = ds.get_train()
rq0 = faiss.ResidualQuantizer(ds.d, 4, 6)
rq0.train_type = faiss.ResidualQuantizer.Train_default
rq0.max_beam_size = 5
# rq0.verbose = True
rq0.train(xt)
cb0 = get_additive_quantizer_codebooks(rq0)
rq1 = faiss.ResidualQuantizer(ds.d, 4, 6)
rq1.train_type = faiss.ResidualQuantizer.Train_default
rq1.max_beam_size = 5
rq1.max_mem_distances
rq1.max_mem_distances = 3000 * ds.d * 4 * 3
# rq1.verbose = True
rq1.train(xt)
cb1 = get_additive_quantizer_codebooks(rq1)
for c0, c1 in zip(cb0, cb1):
self.assertTrue(np.all(c0 == c1))
def test_clipping(self):
""" verify that a clipped residual quantizer gives the same
code prefix + suffix as the full RQ """
ds = datasets.SyntheticDataset(32, 1000, 100, 0)
rq = faiss.ResidualQuantizer(ds.d, 5, 4)
rq.train_type = faiss.ResidualQuantizer.Train_default
rq.max_beam_size = 5
rq.train(ds.get_train())
rq.max_beam_size = 1 # is not he same for a large beam size
codes = rq.compute_codes(ds.get_database())
rq2 = faiss.ResidualQuantizer(ds.d, 2, 4)
rq2.initialize_from(rq)
self.assertEqual(rq2.M, 2)
# verify that the beginning of the codes are the same
codes2 = rq2.compute_codes(ds.get_database())
rq3 = faiss.ResidualQuantizer(ds.d, 3, 4)
rq3.initialize_from(rq, 2)
self.assertEqual(rq3.M, 3)
codes3 = rq3.compute_codes(ds.get_database() - rq2.decode(codes2))
# verify that prefixes are the same
for i in range(ds.nb):
br = faiss.BitstringReader(faiss.swig_ptr(codes[i]), rq.code_size)
br2 = faiss.BitstringReader(faiss.swig_ptr(codes2[i]), rq2.code_size)
self.assertEqual(br.read(rq2.tot_bits), br2.read(rq2.tot_bits))
br3 = faiss.BitstringReader(faiss.swig_ptr(codes3[i]), rq3.code_size)
self.assertEqual(br.read(rq3.tot_bits), br3.read(rq3.tot_bits))
###########################################################
# Test index, index factory sa_encode / sa_decode
###########################################################
def unpack_codes(rq, packed_codes):
nbits = faiss.vector_to_array(rq.nbits)
if np.all(nbits == 8):
return packed_codes.astype("uint32")
nbits = [int(x) for x in nbits]
nb = len(nbits)
n, code_size = packed_codes.shape
codes = np.zeros((n, nb), dtype="uint32")
for i in range(n):
br = faiss.BitstringReader(faiss.swig_ptr(packed_codes[i]), code_size)
for j, nbi in enumerate(nbits):
codes[i, j] = br.read(nbi)
return codes
def retrain_AQ_codebook(index, xt):
""" reference implementation of codebook retraining """
rq = index.rq
codes_packed = index.sa_encode(xt)
n, code_size = codes_packed.shape
x_decoded = index.sa_decode(codes_packed)
MSE = ((xt - x_decoded) ** 2).sum() / n
codes = unpack_codes(index.rq, codes_packed)
codebook_offsets = faiss.vector_to_array(rq.codebook_offsets)
# build sparse code matrix (represented as a dense matrix)
C = np.zeros((n, rq.total_codebook_size))
for i in range(n):
C[i][codes[i] + codebook_offsets[:-1]] = 1
# import pdb; pdb.set_trace()
# import scipy
# B, residuals, rank, singvals = np.linalg.lstsq(C, xt, rcond=None)
if True:
B, residuals, rank, singvals = np.linalg.lstsq(C, xt, rcond=None)
else:
import scipy.linalg
B, residuals, rank, singvals = scipy.linalg.lstsq(C, xt, )
MSE = ((C @ B - xt) ** 2).sum() / n
# replace codebook
# faiss.copy_array_to_vector(B.astype('float32').ravel(), index.rq.codebooks)
# update codebook tables
# index.rq.compute_codebook_tables()
return C, B
| TestResidualQuantizer |
python | huggingface__transformers | tests/models/udop/test_processing_udop.py | {
"start": 1233,
"end": 4346
} | class ____(ProcessorTesterMixin, unittest.TestCase):
tokenizer_class = UdopTokenizer
rust_tokenizer_class = UdopTokenizerFast
processor_class = UdopProcessor
maxDiff = None
@classmethod
def _setup_image_processor(cls):
image_processor_class = cls._get_component_class_from_processor("image_processor")
return image_processor_class(
do_resize=True,
size=224,
apply_ocr=True,
)
@classmethod
def _setup_tokenizer(cls):
tokenizer_class = cls._get_component_class_from_processor("tokenizer")
return tokenizer_class.from_pretrained("microsoft/udop-large")
@unittest.skip("UdopProcessor doesn't return pixel_values tensors")
def test_image_processor_defaults(self):
pass
def test_text_target(self):
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = UdopProcessor(tokenizer=tokenizer, image_processor=image_processor)
text = "hello world"
expected_decoding = "hello world</s>"
encoding_processor = processor(text_target=text)
encoding_tokenizer = tokenizer(text_target=text)
self.assertListEqual(encoding_processor["input_ids"], [21820, 296, 1])
self.assertListEqual(encoding_processor["attention_mask"], [1, 1, 1])
self.assertDictEqual(dict(encoding_processor), dict(encoding_tokenizer))
self.assertEqual(tokenizer.decode(encoding_processor["input_ids"]), expected_decoding)
@slow
def test_overflowing_tokens(self):
# In the case of overflowing tokens, test that we still have 1-to-1 mapping between the images and input_ids (sequences that are too long are broken down into multiple sequences).
from datasets import load_dataset
# set up
datasets = load_dataset("nielsr/funsd")
processor = UdopProcessor.from_pretrained("microsoft/udop-large", apply_ocr=False)
def preprocess_data(examples):
images = [image.convert("RGB") for image in examples["image"]]
words = list(examples["words"])
boxes = list(examples["bboxes"])
word_labels = list(examples["ner_tags"])
encoded_inputs = processor(
images,
words,
boxes=boxes,
word_labels=word_labels,
max_length=512,
padding="max_length",
truncation=True,
return_overflowing_tokens=True,
stride=50,
return_offsets_mapping=True,
return_tensors="pt",
)
return encoded_inputs
train_data = preprocess_data(datasets["train"])
self.assertEqual(len(train_data["pixel_values"]), len(train_data["input_ids"]))
@unittest.skip("We will not support batch input with and without images for UDOP!")
def test_processor_text_has_no_visual(self):
pass
# different use cases tests
@require_sentencepiece
@require_torch
@require_pytesseract
| UdopProcessorTest |
python | ray-project__ray | python/ray/dashboard/subprocesses/module.py | {
"start": 580,
"end": 1393
} | class ____:
"""
Configuration for a SubprocessModule.
Pickleable.
"""
cluster_id_hex: str
gcs_address: str
session_name: str
temp_dir: str
session_dir: str
# Logger configs. Will be set up in subprocess entrypoint `run_module`.
logging_level: str
logging_format: str
log_dir: str
# Name of the "base" log file. Its stem is appended with the Module.__name__.
# e.g. when logging_filename = "dashboard.log", and Module is JobHead,
# we will set up logger with name "dashboard_JobHead.log". This name will again be
# appended with .1 and .2 for rotation.
logging_filename: str
logging_rotate_bytes: int
logging_rotate_backup_count: int
# The directory where the socket file will be created.
socket_dir: str
| SubprocessModuleConfig |
python | spyder-ide__spyder | spyder/plugins/onlinehelp/plugin.py | {
"start": 733,
"end": 4255
} | class ____(SpyderDockablePlugin):
"""
Online Help Plugin.
"""
NAME = 'onlinehelp'
REQUIRES = [Plugins.Application]
TABIFY = [Plugins.VariableExplorer, Plugins.Help]
CONF_SECTION = NAME
CONF_FILE = False
WIDGET_CLASS = PydocBrowser
LOG_PATH = get_conf_path(NAME)
REQUIRE_WEB_WIDGETS = True
CAN_HANDLE_SEARCH_ACTIONS = True
# --- Signals
# ------------------------------------------------------------------------
sig_load_finished = Signal()
"""
This signal is emitted to indicate the help page has finished loading.
"""
# --- SpyderDockablePlugin API
# ------------------------------------------------------------------------
@staticmethod
def get_name():
return _('Online help')
@staticmethod
def get_description():
return _(
"Browse and search documentation for installed Python modules "
"interactively."
)
@classmethod
def get_icon(cls):
return cls.create_icon('online_help')
def on_close(self, cancelable=False):
self.save_history()
self.set_conf('zoom_factor',
self.get_widget().get_zoom_factor())
return True
def on_initialize(self):
widget = self.get_widget()
widget.load_history(self.load_history())
widget.sig_load_finished.connect(self.sig_load_finished)
@on_plugin_available(plugin=Plugins.Application)
def on_application_available(self):
# Setup Search actions
self._enable_search_action(ApplicationActions.FindText, True)
self._enable_search_action(ApplicationActions.FindNext, True)
self._enable_search_action(ApplicationActions.FindPrevious, True)
# Replace action is set disabled since the `FindReplace` widget created
# by the main widget has `enable_replace=False`
self._enable_search_action(ApplicationActions.ReplaceText, False)
def update_font(self):
self.get_widget().reload()
# --- Private API
# ------------------------------------------------------------------------
def _enable_search_action(self, action_name: str, enabled: bool) -> None:
"""Enable or disable search action for this plugin."""
application = self.get_plugin(Plugins.Application, error=False)
if application:
application.enable_search_action(action_name, enabled, self.NAME)
# --- Public API
# ------------------------------------------------------------------------
def load_history(self):
"""
Load history from a text file in the Spyder configuration directory.
"""
if osp.isfile(self.LOG_PATH):
with open(self.LOG_PATH, 'r') as fh:
lines = fh.read().split('\n')
history = [line.replace('\n', '') for line in lines]
else:
history = []
return history
def save_history(self):
"""
Save history to a text file in the Spyder configuration directory.
"""
data = "\n".join(self.get_widget().get_history())
with open(self.LOG_PATH, 'w') as fh:
fh.write(data)
def find(self) -> None:
find_widget = self.get_widget().find_widget
find_widget.show()
find_widget.search_text.setFocus()
def find_next(self) -> None:
self.get_widget().find_widget.find_next()
def find_previous(self) -> None:
self.get_widget().find_widget.find_previous()
| OnlineHelp |
python | pypa__pip | tests/lib/configuration_helpers.py | {
"start": 430,
"end": 1866
} | class ____:
def setup_method(self) -> None:
self.configuration = pip._internal.configuration.Configuration(
isolated=False,
)
def patch_configuration(self, variant: Kind, di: dict[str, Any]) -> None:
old = self.configuration._load_config_files
@functools.wraps(old)
def overridden() -> None:
# Manual Overload
self.configuration._config[variant].setdefault("fakefile", {})
self.configuration._config[variant]["fakefile"].update(di)
# Configuration._parsers has type:
# Dict[Kind, List[Tuple[str, RawConfigParser]]].
# As a testing convenience, pass a special value.
self.configuration._parsers[variant].append(
(None, None), # type: ignore[arg-type]
)
old()
# https://github.com/python/mypy/issues/2427
self.configuration._load_config_files = overridden # type: ignore[method-assign]
@contextlib.contextmanager
def tmpfile(self, contents: str) -> Iterator[str]:
# Create a temporary file
fd, path = tempfile.mkstemp(prefix="pip_", suffix="_config.ini", text=True)
os.close(fd)
contents = textwrap.dedent(contents).lstrip()
ensure_dir(os.path.dirname(path))
with open(path, "w") as f:
f.write(contents)
yield path
os.remove(path)
| ConfigurationMixin |
python | sqlalchemy__sqlalchemy | test/ext/test_associationproxy.py | {
"start": 94295,
"end": 94426
} | class ____(
ScalarRemoveScalarObjectNoCascade
):
create_on_none_assignment = True
| ScalarRemoveScalarObjectNoCascadeNoneAssign |
python | huggingface__transformers | tests/models/mamba/test_modeling_mamba.py | {
"start": 8491,
"end": 16565
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (MambaModel, MambaForCausalLM) if is_torch_available() else ()
has_attentions = False # Mamba does not support attentions
test_missing_keys = False
pipeline_model_mapping = (
{"feature-extraction": MambaModel, "text-generation": MambaForCausalLM} if is_torch_available() else {}
)
def setUp(self):
self.model_tester = MambaModelTester(self)
self.config_tester = ConfigTester(
self, config_class=MambaConfig, n_embd=37, common_properties=["hidden_size", "num_hidden_layers"]
)
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
self.assertIsInstance(past_key_values, MambaCache)
conv_shape = (batch_size, config.intermediate_size, config.conv_kernel)
ssm_shape = (batch_size, config.intermediate_size, config.state_size)
self.assertTrue(config.num_hidden_layers, len(past_key_values.conv_states))
for idx in range(len(past_key_values.conv_states)):
self.assertEqual(past_key_values.conv_states[idx].shape, conv_shape)
self.assertEqual(past_key_values.ssm_states[idx].shape, ssm_shape)
def assertInterval(self, member, container, msg=None):
r"""
Simple utility function to check if a member is inside an interval.
"""
if isinstance(member, torch.Tensor):
max_value, min_value = member.max().item(), member.min().item()
elif isinstance(member, (list, tuple)):
max_value, min_value = max(member), min(member)
if not isinstance(container, list):
raise TypeError("container should be a list or tuple")
elif len(container) != 2:
raise ValueError("container should have 2 elements")
expected_min, expected_max = container
is_inside_interval = (min_value >= expected_min) and (max_value <= expected_max)
if not is_inside_interval:
standardMsg = f"{safe_repr(member)} not found in {safe_repr(container)}"
self.fail(self._formatMessage(msg, standardMsg))
def test_config(self):
self.config_tester.run_common_tests()
def test_mamba_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mamba_model(*config_and_inputs)
def test_mamba_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm(*config_and_inputs)
def test_state_equivalency(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_state_equivalency(*config_and_inputs)
def test_mamba_cached_slow_forward_and_backwards(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mamba_cached_slow_forward_and_backwards(*config_and_inputs)
def test_mamba_lm_head_forward_and_backwards(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mamba_lm_head_forward_and_backwards(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model = MambaModel.from_pretrained("hf-internal-testing/mamba-130m")
self.assertIsNotNone(model)
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, MambaCache): # MODIFIED PART START
recursive_check(tuple_object.conv_states, dict_object.conv_states)
recursive_check(tuple_object.ssm_states, dict_object.ssm_states)
elif isinstance(tuple_object, (list, tuple)): # MODIFIED PART END
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(tuple_object, dict_object, atol=1e-5),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
@unittest.skip("The `input_embeds` when fed don't produce the same results.")
def test_beam_sample_generate(self):
pass
def test_dtype_mismatch_handled_in_cache(self):
config, input_ids, *args = self.model_tester.prepare_config_and_inputs()
model = MambaModel(config)
model.to(torch_device).to(torch.float16)
model.eval()
# Create cache with float32 dtype
cache_params = MambaCache(config, max_batch_size=input_ids.size(0), dtype=torch.float32, device=torch_device)
# If code is correct, no error occurs and test passes
outputs = model(
input_ids,
cache_params=cache_params,
use_cache=True,
cache_position=torch.arange(0, config.conv_kernel, device=input_ids.device),
)
self.assertIsNotNone(outputs)
self.assertIsNotNone(outputs.last_hidden_state)
self.assertEqual(
outputs.last_hidden_state.shape,
(self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.hidden_size),
)
@unittest.skip("Mamba models do not support DDP.")
def test_multi_gpu_data_parallel_forward(self):
pass
@slow
@require_torch
| MambaModelTest |
python | pytorch__pytorch | test/lazy/test_extract_compiled_graph.py | {
"start": 517,
"end": 763
} | class ____(nn.Module):
"""
addcmul function takes a at::Scalar which results in a special TSData containing a Scalar rather than a Tensor.
"""
def forward(self, a, b, c):
return torch.addcmul(a, b, c, value=5)
| ModuleAddcmul |
python | cython__cython | tests/run/pure_py.py | {
"start": 11502,
"end": 13548
} | class ____(object):
def __init__(self, *args):
pass
def same_type_cast():
"""
>>> same_type_cast()
True
"""
f = EmptyClass()
return f is cython.cast(EmptyClass, f)
def multi_args_init_cast():
"""
>>> multi_args_init_cast()
True
"""
f = Foo(10, 20, 30)
return cython.cast(Foo, f) is f
def multi_args_init_declare():
"""
>>> multi_args_init_declare() is None
True
"""
f = cython.declare(Foo)
if cython.compiled:
f = None
return f
EmptyClassSyn = cython.typedef(EmptyClass)
def empty_declare():
"""
>>> empty_declare()
[]
"""
r0 = cython.declare(EmptyClass)
r1 = cython.declare(EmptyClassSyn)
r2 = cython.declare(MyStruct)
r3 = cython.declare(MyUnion)
r4 = cython.declare(MyStruct2)
r5 = cython.declare(cython.int[2])
if cython.compiled:
r0 = None
r1 = None
res = [
r0 is None,
r1 is None,
r2 is not None,
r3 is not None,
r4 is not None,
r5 is not None
]
r2.is_integral = True
assert r2.is_integral == True
r3.x = 12.3
assert r3.x == 12.3
#It generates a correct C code, but raises an exception when interpreted
if cython.compiled:
r4[0].is_integral = True
assert r4[0].is_integral == True
r5[0] = 42
assert r5[0] == 42
return [i for i, x in enumerate(res) if not x]
def same_declare():
"""
>>> same_declare()
True
"""
f = EmptyClass()
f2 = cython.declare(EmptyClass, f)
return f2 is f
def none_cast():
"""
>>> none_cast() is None
True
"""
f = None
return cython.cast(EmptyClass, f)
def none_declare():
"""
>>> none_declare() is None
True
"""
f = None
f2 = cython.declare(Foo, f)
return f2
def array_init_with_list():
"""
>>> array_init_with_list()
[10, 42]
"""
x = cython.declare(cython.int[20], list(range(20)))
x[12] = 42
return [x[10], x[12]]
| EmptyClass |
python | sympy__sympy | sympy/polys/rootoftools.py | {
"start": 1137,
"end": 4087
} | class ____:
"""A minimal dictionary that makes sure that the key is a
univariate PurePoly instance.
Examples
========
Only the following actions are guaranteed:
>>> from sympy.polys.rootoftools import _pure_key_dict
>>> from sympy import PurePoly
>>> from sympy.abc import x, y
1) creation
>>> P = _pure_key_dict()
2) assignment for a PurePoly or univariate polynomial
>>> P[x] = 1
>>> P[PurePoly(x - y, x)] = 2
3) retrieval based on PurePoly key comparison (use this
instead of the get method)
>>> P[y]
1
4) KeyError when trying to retrieve a nonexisting key
>>> P[y + 1]
Traceback (most recent call last):
...
KeyError: PurePoly(y + 1, y, domain='ZZ')
5) ability to query with ``in``
>>> x + 1 in P
False
NOTE: this is a *not* a dictionary. It is a very basic object
for internal use that makes sure to always address its cache
via PurePoly instances. It does not, for example, implement
``get`` or ``setdefault``.
"""
def __init__(self):
self._dict = {}
def __getitem__(self, k):
if not isinstance(k, PurePoly):
if not (isinstance(k, Expr) and len(k.free_symbols) == 1):
raise KeyError
k = PurePoly(k, expand=False)
return self._dict[k]
def __setitem__(self, k, v):
if not isinstance(k, PurePoly):
if not (isinstance(k, Expr) and len(k.free_symbols) == 1):
raise ValueError('expecting univariate expression')
k = PurePoly(k, expand=False)
self._dict[k] = v
def __contains__(self, k):
try:
self[k]
return True
except KeyError:
return False
_reals_cache = _pure_key_dict()
_complexes_cache = _pure_key_dict()
def _pure_factors(poly):
_, factors = poly.factor_list()
return [(PurePoly(f, expand=False), m) for f, m in factors]
def _imag_count_of_factor(f):
"""Return the number of imaginary roots for irreducible
univariate polynomial ``f``.
"""
terms = [(i, j) for (i,), j in f.terms()]
if any(i % 2 for i, j in terms):
return 0
# update signs
even = [(i, I**i*j) for i, j in terms]
even = Poly.from_dict(dict(even), Dummy('x'))
return int(even.count_roots(-oo, oo))
@public
def rootof(f, x, index=None, radicals=True, expand=True):
"""An indexed root of a univariate polynomial.
Returns either a :obj:`ComplexRootOf` object or an explicit
expression involving radicals.
Parameters
==========
f : Expr
Univariate polynomial.
x : Symbol, optional
Generator for ``f``.
index : int or Integer
radicals : bool
Return a radical expression if possible.
expand : bool
Expand ``f``.
"""
return CRootOf(f, x, index=index, radicals=radicals, expand=expand)
@public
| _pure_key_dict |
python | mlflow__mlflow | mlflow/genai/optimize/types.py | {
"start": 4785,
"end": 5324
} | class ____:
"""
Result of the :py:func:`mlflow.genai.optimize_prompts()` API.
Args:
optimized_prompts: The optimized prompts.
optimizer_name: The name of the optimizer.
initial_eval_score: The evaluation score before optimization (optional).
final_eval_score: The evaluation score after optimization (optional).
"""
optimized_prompts: list[PromptVersion]
optimizer_name: str
initial_eval_score: float | None = None
final_eval_score: float | None = None
| PromptOptimizationResult |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 179056,
"end": 179752
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"project_id",
"name",
"body",
"state",
"public",
"client_mutation_id",
)
project_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectId")
name = sgqlc.types.Field(String, graphql_name="name")
body = sgqlc.types.Field(String, graphql_name="body")
state = sgqlc.types.Field(ProjectState, graphql_name="state")
public = sgqlc.types.Field(Boolean, graphql_name="public")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| UpdateProjectInput |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-path-with-teleportations.py | {
"start": 68,
"end": 1360
} | class ____(object):
def minCost(self, grid, k):
"""
:type grid: List[List[int]]
:type k: int
:rtype: int
"""
m = len(grid)
n = len(grid[0])
dp = [[float("inf")]*n for _ in xrange(m)]
dp[-1][-1] = 0
mx = max(max(row) for row in grid)
prefix = [float("inf")]*(mx+1)
for i in xrange(k+1):
for r in reversed(xrange(m)):
for c in reversed(xrange(n)):
if r+1 < m:
if dp[r+1][c]+grid[r+1][c] < dp[r][c]:
dp[r][c] = dp[r+1][c]+grid[r+1][c]
if c+1 < n:
if dp[r][c+1]+grid[r][c+1] < dp[r][c]:
dp[r][c] = dp[r][c+1]+grid[r][c+1]
if prefix[grid[r][c]] < dp[r][c]:
dp[r][c] = prefix[grid[r][c]]
for r in xrange(m):
for c in xrange(n):
if dp[r][c] < prefix[grid[r][c]]:
prefix[grid[r][c]] = dp[r][c]
for i in xrange(mx):
if prefix[i] < prefix[i+1]:
prefix[i+1] = prefix[i]
return dp[0][0]
# Time: O(k * (m * n + r))
# Space: O(m * n + r)
# dp, prefix sum
| Solution |
python | django__django | django/urls/resolvers.py | {
"start": 10824,
"end": 13702
} | class ____(CheckURLMixin):
regex = LocaleRegexRouteDescriptor()
def __init__(self, route, name=None, is_endpoint=False):
self._route = route
self._regex, self.converters = _route_to_regex(str(route), is_endpoint)
self._regex_dict = {}
self._is_endpoint = is_endpoint
self.name = name
def match(self, path):
# Only use regex overhead if there are converters.
if self.converters:
if match := self.regex.search(path):
# RoutePattern doesn't allow non-named groups so args are
# ignored.
kwargs = match.groupdict()
for key, value in kwargs.items():
converter = self.converters[key]
try:
kwargs[key] = converter.to_python(value)
except ValueError:
return None
return path[match.end() :], (), kwargs
# If this is an endpoint, the path should be exactly the same as the
# route.
elif self._is_endpoint:
if self._route == path:
return "", (), {}
# If this isn't an endpoint, the path should start with the route.
elif path.startswith(self._route):
return path.removeprefix(self._route), (), {}
return None
def check(self):
warnings = [
*self._check_pattern_startswith_slash(),
*self._check_pattern_unmatched_angle_brackets(),
]
route = self._route
if "(?P<" in route or route.startswith("^") or route.endswith("$"):
warnings.append(
Warning(
"Your URL pattern {} has a route that contains '(?P<', begins "
"with a '^', or ends with a '$'. This was likely an oversight "
"when migrating to django.urls.path().".format(self.describe()),
id="2_0.W001",
)
)
return warnings
def _check_pattern_unmatched_angle_brackets(self):
warnings = []
msg = "Your URL pattern %s has an unmatched '%s' bracket."
brackets = re.findall(r"[<>]", str(self._route))
open_bracket_counter = 0
for bracket in brackets:
if bracket == "<":
open_bracket_counter += 1
elif bracket == ">":
open_bracket_counter -= 1
if open_bracket_counter < 0:
warnings.append(
Warning(msg % (self.describe(), ">"), id="urls.W010")
)
open_bracket_counter = 0
if open_bracket_counter > 0:
warnings.append(Warning(msg % (self.describe(), "<"), id="urls.W010"))
return warnings
def __str__(self):
return str(self._route)
| RoutePattern |
python | scipy__scipy | scipy/interpolate/tests/test_interpolate.py | {
"start": 71721,
"end": 76810
} | class ____:
def test_simple(self, xp):
x = xp.asarray([0, 1])
c = xp.asarray([[3]])
bp = BPoly(c, x)
xp_assert_close(bp(0.1), xp.asarray(3., dtype=xp.float64))
def test_simple2(self, xp):
x = xp.asarray([0, 1])
c = xp.asarray([[3], [1]])
bp = BPoly(c, x) # 3*(1-x) + 1*x
xp_assert_close(bp(0.1), xp.asarray(3*0.9 + 1.*0.1, dtype=xp.float64))
def test_simple3(self, xp):
x = xp.asarray([0, 1])
c = xp.asarray([[3], [1], [4]])
bp = BPoly(c, x) # 3 * (1-x)**2 + 2 * x (1-x) + 4 * x**2
xp_assert_close(
bp(0.2),
xp.asarray(3 * 0.8*0.8 + 1 * 2*0.2*0.8 + 4 * 0.2*0.2, dtype=xp.float64)
)
def test_simple4(self, xp):
x = xp.asarray([0, 1])
c = xp.asarray([[1], [1], [1], [2]])
bp = BPoly(c, x)
xp_assert_close(bp(0.3),
xp.asarray( 0.7**3 +
3 * 0.7**2 * 0.3 +
3 * 0.7 * 0.3**2 +
2 * 0.3**3, dtype=xp.float64)
)
def test_simple5(self, xp):
x = xp.asarray([0, 1])
c = xp.asarray([[1], [1], [8], [2], [1]])
bp = BPoly(c, x)
xp_assert_close(bp(0.3),
xp.asarray( 0.7**4 +
4 * 0.7**3 * 0.3 +
8 * 6 * 0.7**2 * 0.3**2 +
2 * 4 * 0.7 * 0.3**3 +
0.3**4, dtype=xp.float64)
)
def test_periodic(self, xp):
x = xp.asarray([0, 1, 3])
c = xp.asarray([[3, 0], [0, 0], [0, 2]])
# [3*(1-x)**2, 2*((x-1)/2)**2]
bp = BPoly(c, x, extrapolate='periodic')
xp_assert_close(bp(3.4), xp.asarray(3 * 0.6**2, dtype=xp.float64))
xp_assert_close(bp(-1.3), xp.asarray(2 * (0.7/2)**2, dtype=xp.float64))
xp_assert_close(bp(3.4, 1), xp.asarray(-6 * 0.6, dtype=xp.float64))
xp_assert_close(bp(-1.3, 1), xp.asarray(2 * (0.7/2), dtype=xp.float64))
def test_descending(self):
rng = np.random.RandomState(0)
power = 3
for m in [10, 20, 30]:
x = np.sort(rng.uniform(0, 10, m + 1))
ca = rng.uniform(-0.1, 0.1, size=(power + 1, m))
# We need only to flip coefficients to get it right!
cd = ca[::-1].copy()
pa = BPoly(ca, x, extrapolate=True)
pd = BPoly(cd[:, ::-1], x[::-1], extrapolate=True)
x_test = rng.uniform(-10, 20, 100)
xp_assert_close(pa(x_test), pd(x_test), rtol=1e-13)
xp_assert_close(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
pa_d = pa.derivative()
pd_d = pd.derivative()
xp_assert_close(pa_d(x_test), pd_d(x_test), rtol=1e-13)
# Antiderivatives won't be equal because fixing continuity is
# done in the reverse order, but surely the differences should be
# equal.
pa_i = pa.antiderivative()
pd_i = pd.antiderivative()
for a, b in rng.uniform(-10, 20, (5, 2)):
int_a = pa.integrate(a, b)
int_d = pd.integrate(a, b)
xp_assert_close(int_a, int_d, rtol=1e-12)
xp_assert_close(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
rtol=1e-12)
def test_multi_shape(self):
rng = np.random.RandomState(1234)
c = rng.rand(6, 2, 1, 2, 3)
x = np.array([0, 0.5, 1])
p = BPoly(c, x)
assert p.x.shape == x.shape
assert p.c.shape == c.shape
assert p(0.3).shape == c.shape[2:]
assert p(rng.rand(5, 6)).shape == (5, 6) + c.shape[2:]
dp = p.derivative()
assert dp.c.shape == (5, 2, 1, 2, 3)
def test_interval_length(self, xp):
x = xp.asarray([0, 2])
c = xp.asarray([[3], [1], [4]])
bp = BPoly(c, x)
xval = 0.1
s = xval / 2 # s = (x - xa) / (xb - xa)
xp_assert_close(
bp(xval),
xp.asarray(3 * (1-s)*(1-s) + 1 * 2*s*(1-s) + 4 * s*s, dtype=xp.float64)
)
def test_two_intervals(self, xp):
x = xp.asarray([0, 1, 3])
c = xp.asarray([[3, 0], [0, 0], [0, 2]])
bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
xp_assert_close(bp(0.4), xp.asarray(3 * 0.6*0.6, dtype=xp.float64))
xp_assert_close(bp(1.7), xp.asarray(2 * (0.7/2)**2, dtype=xp.float64))
def test_extrapolate_attr(self):
x = [0, 2]
c = [[3], [1], [4]]
bp = BPoly(c, x)
for extrapolate in (True, False, None):
bp = BPoly(c, x, extrapolate=extrapolate)
bp_d = bp.derivative()
if extrapolate is False:
assert np.isnan(bp([-0.1, 2.1])).all()
assert np.isnan(bp_d([-0.1, 2.1])).all()
else:
assert not np.isnan(bp([-0.1, 2.1])).any()
assert not np.isnan(bp_d([-0.1, 2.1])).any()
@make_xp_test_case(BPoly)
| TestBPoly |
python | getsentry__sentry | src/sentry/net/http.py | {
"start": 6756,
"end": 7276
} | class ____(HTTPAdapter):
def __init__(self, *args, **kwargs):
timeout = kwargs.pop("timeout", None)
HTTPAdapter.__init__(self, *args, **kwargs)
if timeout is None:
timeout = 10.0
self.default_timeout = timeout
def send(self, *args, **kwargs):
if kwargs.get("timeout") is None:
kwargs["timeout"] = self.default_timeout
return HTTPAdapter.send(self, *args, **kwargs)
USER_AGENT = f"sentry/{SENTRY_VERSION} (https://sentry.io)"
| TimeoutAdapter |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/dataclass_transforms_one.py | {
"start": 388,
"end": 793
} | class ____(Base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str]
x: Mapped[Optional[int]] = mapped_column(default=None)
y: Mapped[Optional[int]] = mapped_column(kw_only=True)
tis = TestInitialSupport(data="some data", y=5)
assert_type(tis.data, str)
assert_type(tis.y, int | None)
tis.data = "some other data"
| TestInitialSupport |
python | google__jax | tests/pallas/tpu_pallas_test.py | {
"start": 137959,
"end": 138791
} | class ____(PallasBaseTest):
@parameterized.parameters(
(dict(foo='bar'),),
(dict(foo='afjafo'),),
(dict(problem_info=json.dumps(dict(tiling_info=dict(bm=128, bk=128)))),),
)
def test_metadata_is_preserved(self, metadata):
def kernel(x_ref, y_ref, out_ref):
out_ref[...] = x_ref[...] + y_ref[...]
x = jnp.arange(1024, dtype=jnp.float32).reshape((8, 128))
y = jnp.arange(1024, dtype=jnp.float32).reshape((8, 128))
@jax.jit
def f(x, y):
return self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
metadata=metadata,
)(x, y)
hlo = f.lower(x, y).compile().as_text()
self.assertIn(json.dumps(metadata), hlo)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| PallasKernelMetadataTest |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 9323,
"end": 9828
} | class ____(BaseModel):
enabled: bool = Field(..., description="")
status: Optional["ClusterStatusTelemetry"] = Field(default=None, description="")
config: Optional["ClusterConfigTelemetry"] = Field(default=None, description="")
peers: Optional[Dict[str, "PeerInfo"]] = Field(default=None, description="")
peer_metadata: Optional[Dict[str, "PeerMetadata"]] = Field(default=None, description="")
metadata: Optional[Dict[str, Any]] = Field(default=None, description="")
| ClusterTelemetry |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 62239,
"end": 62505
} | class ____(BaseModel):
time: Optional[float] = Field(default=None, description="Time spent to process this request")
status: Optional[str] = Field(default=None, description="")
result: Optional[bool] = Field(default=None, description="")
| InlineResponse2009 |
python | google__jax | tests/lazy_loader_test.py | {
"start": 732,
"end": 1351
} | class ____(absltest.TestCase):
def testLazyLoader(self):
self.assertEmpty([m for m in sys.modules if "lazy_test_submodule" in m])
self.assertEqual(["lazy_test_submodule"], l.__all__)
self.assertEqual(["lazy_test_submodule"], dir(l))
# The submodule should be imported only after it is accessed.
self.assertEmpty([m for m in sys.modules if "lazy_test_submodule" in m])
self.assertEqual(42, l.lazy_test_submodule.a_function())
self.assertLen([m for m in sys.modules if "lazy_test_submodule" in m], 1)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| LazyLoaderTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/pool/impl.py | {
"start": 14795,
"end": 16895
} | class ____(Pool):
"""A Pool of exactly one connection, used for all requests.
Reconnect-related functions such as ``recycle`` and connection
invalidation (which is also used to support auto-reconnect) are only
partially supported right now and may not yield good results.
The :class:`.StaticPool` class **is compatible** with asyncio and
:func:`_asyncio.create_async_engine`.
"""
@util.memoized_property
def connection(self) -> _ConnectionRecord:
return _ConnectionRecord(self)
def status(self) -> str:
return "StaticPool"
def dispose(self) -> None:
if (
"connection" in self.__dict__
and self.connection.dbapi_connection is not None
):
self.connection.close()
del self.__dict__["connection"]
def recreate(self) -> StaticPool:
self.logger.info("Pool recreating")
return self.__class__(
creator=self._creator,
recycle=self._recycle,
reset_on_return=self._reset_on_return,
pre_ping=self._pre_ping,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
dialect=self._dialect,
)
def _transfer_from(self, other_static_pool: StaticPool) -> None:
# used by the test suite to make a new engine / pool without
# losing the state of an existing SQLite :memory: connection
def creator(rec: ConnectionPoolEntry) -> DBAPIConnection:
conn = other_static_pool.connection.dbapi_connection
assert conn is not None
return conn
self._invoke_creator = creator
def _create_connection(self) -> ConnectionPoolEntry:
raise NotImplementedError()
def _do_return_conn(self, record: ConnectionPoolEntry) -> None:
pass
def _do_get(self) -> ConnectionPoolEntry:
rec = self.connection
if rec._is_hard_or_soft_invalidated():
del self.__dict__["connection"]
rec = self.connection
return rec
| StaticPool |
python | pypa__setuptools | setuptools/_distutils/errors.py | {
"start": 3013,
"end": 3092
} | class ____(DistutilsError):
"""Byte compile error."""
| DistutilsByteCompileError |
python | huggingface__transformers | src/transformers/models/luke/modeling_luke.py | {
"start": 78720,
"end": 83333
} | class ____(LukePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.luke = LukeModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
entity_ids: Optional[torch.LongTensor] = None,
entity_attention_mask: Optional[torch.FloatTensor] = None,
entity_token_type_ids: Optional[torch.LongTensor] = None,
entity_position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, LukeTokenClassifierOutput]:
r"""
entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
Indices of entity tokens in the entity vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:
- 1 for entity tokens that are **not masked**,
- 0 for entity tokens that are **masked**.
entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
Segment token indices to indicate first and second portions of the entity token inputs. Indices are
selected in `[0, 1]`:
- 0 corresponds to a *portion A* entity token,
- 1 corresponds to a *portion B* entity token.
entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
Indices of positions of each input entity in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.luke(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
sequence_output = outputs.last_hidden_state
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
# move labels to correct device
labels = labels.to(logits.device)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
return tuple(
v
for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions]
if v is not None
)
return LukeTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
entity_hidden_states=outputs.entity_hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| LukeForTokenClassification |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-monday/components.py | {
"start": 5566,
"end": 17057
} | class ____(HttpRequester):
NEXT_PAGE_TOKEN_FIELD_NAME = "next_page_token"
schema_loader: InlineSchemaLoader
limit: Union[InterpolatedString, str, int] = None
nested_limit: Union[InterpolatedString, str, int] = None
def __post_init__(self, parameters: Mapping[str, Any]):
super(MondayGraphqlRequester, self).__post_init__(parameters)
self.limit = InterpolatedString.create(self.limit, parameters=parameters)
self.nested_limit = InterpolatedString.create(self.nested_limit, parameters=parameters)
self.name = parameters.get("name", "").lower()
self.stream_sync_mode = (
SyncMode.full_refresh if parameters.get("stream_sync_mode", "full_refresh") == "full_refresh" else SyncMode.incremental
)
def _ensure_type(self, t: Type, o: Any):
"""
Ensure given object `o` is of type `t`
"""
if not isinstance(o, t):
raise TypeError(f"{type(o)} {o} is not of type {t}")
def _get_schema_root_properties(self):
schema = self.schema_loader.get_json_schema()[self.name]["properties"]
# delete fields that will be created by extractor
delete_fields = ["updated_at_int", "created_at_int", "pulse_id"]
if self.name == "activity_logs":
delete_fields.append("board_id")
for field in delete_fields:
if field in schema:
schema.pop(field)
return schema
def _get_object_arguments(self, **object_arguments) -> str:
return ",".join(
[
f"{argument}:{value}" if argument != "fromt" else f'from:"{value}"'
for argument, value in object_arguments.items()
if value is not None
]
)
def _build_query(self, object_name: str, field_schema: dict, **object_arguments) -> str:
"""
Recursive function that builds a GraphQL query string by traversing given stream schema properties.
Attributes
object_name (str): the name of root object
field_schema (dict): configured catalog schema for current stream
object_arguments (dict): arguments such as limit, page, ids, ... etc to be passed for given object
"""
fields = []
for field, nested_schema in field_schema.items():
nested_fields = nested_schema.get("properties", nested_schema.get("items", {}).get("properties"))
if nested_fields:
# preconfigured_arguments = get properties from schema or any other source ...
# fields.append(self._build_query(field, nested_fields, **preconfigured_arguments))
fields.append(self._build_query(field, nested_fields))
else:
fields.append(field)
# when querying the boards stream (object_name == "boards"), filter by board_ids if they provided in the config
if object_name == "boards" and "board_ids" in self.config:
# if we are building a query for incremental syncs, board ids are already present under 'ids' key in object_arguments (as a result of fetching the activity_logs stream first)
# These ids are already an intersection of the board_ids provided in the config and the ones that must be fetched for the incremental sync and need not be overridden
if "ids" not in object_arguments:
object_arguments["ids"] = self.config.get("board_ids")
arguments = self._get_object_arguments(**object_arguments)
arguments = f"({arguments})" if arguments else ""
if object_name == "column_values":
fields.remove("display_value")
fields.extend(
["... on MirrorValue{display_value}", "... on BoardRelationValue{display_value}", "... on DependencyValue{display_value}"]
)
fields = ",".join(fields)
if object_name in ["items_page", "next_items_page"]:
query = f"{object_name}{arguments}{{cursor,items{{{fields}}}}}"
else:
query = f"{object_name}{arguments}{{{fields}}}"
return query
def _build_items_query(self, object_name: str, field_schema: dict, sub_page: Optional[int], **object_arguments) -> str:
"""
Special optimization needed for items stream. Starting October 3rd, 2022 items can only be reached through boards.
See https://developer.monday.com/api-reference/docs/items-queries#items-queries
Comparison of different APIs queries:
2023-07:
boards(limit: 1) { items(limit: 20) { field1, field2, ... }}
boards(limit: 1, page:2) { items(limit: 20, page:2) { field1, field2, ... }} boards and items paginations
2024_01:
boards(limit: 1) { items_page(limit: 20) {cursor, items{field1, field2, ...} }}
boards(limit: 1, page:2) { items_page(limit: 20) {cursor, items{field1, field2, ...} }} - boards pagination
next_items_page(limit: 20, cursor: "blaa") {cursor, items{field1, field2, ...} } - items pagination
"""
nested_limit = self.nested_limit.eval(self.config)
if sub_page:
query = self._build_query("next_items_page", field_schema, limit=nested_limit, cursor=f'"{sub_page}"')
else:
query = self._build_query("items_page", field_schema, limit=nested_limit)
# since items are a subresource of boards, when querying items, filter by board_ids if provided in the config
if "board_ids" in self.config and "ids" not in object_arguments:
object_arguments["ids"] = self.config.get("board_ids")
arguments = self._get_object_arguments(**object_arguments)
query = f"boards({arguments}){{{query}}}"
return query
def _build_items_incremental_query(self, object_name: str, field_schema: dict, stream_slice: dict, **object_arguments) -> str:
"""
Special optimization needed for items stream. Starting October 3rd, 2022 items can only be reached through boards.
See https://developer.monday.com/api-reference/docs/items-queries#items-queries
"""
nested_limit = self.nested_limit.eval(self.config)
object_arguments["limit"] = nested_limit
object_arguments["ids"] = stream_slice["ids"]
return self._build_query("items", field_schema, **object_arguments)
def _build_teams_query(self, object_name: str, field_schema: dict, **object_arguments) -> str:
"""
Special optimization needed for tests to pass successfully because of rate limits.
It makes a query cost less points, but it is never used in production
"""
teams_limit = self.config.get("teams_limit")
if teams_limit:
self._ensure_type(int, teams_limit)
arguments = self._get_object_arguments(**object_arguments)
query = f"{{id,name,picture_url,users(limit:{teams_limit}){{id}}}}"
if not arguments:
# when providing empty arguments in () API returns error
return f"{object_name}{query}"
return f"{object_name}({arguments}){query}"
return self._build_query(object_name=object_name, field_schema=field_schema, **object_arguments)
def _build_activity_query(self, object_name: str, field_schema: dict, sub_page: Optional[int], **object_arguments) -> str:
"""
Special optimization needed for items stream. Starting October 3rd, 2022 items can only be reached through boards.
See https://developer.monday.com/api-reference/docs/items-queries#items-queries
"""
nested_limit = self.nested_limit.eval(self.config)
created_at = (object_arguments.get("stream_slice", dict()) or dict()).get("start_time")
if "stream_slice" in object_arguments:
object_arguments.pop("stream_slice")
# 1 is default start time, so we can skip it to get all the data
if created_at == "1":
created_at = None
else:
created_at = datetime.fromtimestamp(int(created_at)).strftime("%Y-%m-%dT%H:%M:%SZ")
query = self._build_query(object_name, field_schema, limit=nested_limit, page=sub_page, fromt=created_at)
if "board_ids" in self.config and "ids" not in object_arguments:
object_arguments["ids"] = self.config.get("board_ids")
arguments = self._get_object_arguments(**object_arguments)
return f"boards({arguments}){{{query}}}"
def get_request_headers(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, Any]:
headers = super().get_request_headers(stream_state=stream_state, stream_slice=stream_slice, next_page_token=next_page_token)
headers["API-Version"] = "2024-10"
return headers
def get_request_body_json( # type: ignore
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> Optional[Mapping[str, Any]]:
"""
Combines queries to a single GraphQL query.
"""
limit = self.limit.eval(self.config)
page = next_page_token and next_page_token[self.NEXT_PAGE_TOKEN_FIELD_NAME]
if self.name == "boards" and stream_slice:
if self.stream_sync_mode == SyncMode.full_refresh:
# incremental sync parameters are not needed for full refresh
stream_slice = {}
else:
stream_slice = {"ids": stream_slice.get("ids")}
query_builder = partial(self._build_query, **stream_slice)
elif self.name == "items":
# `items` stream use a separate pagination strategy where first level pages are across `boards` and sub-pages are across `items`
page, sub_page = page if page else (None, None)
if self.stream_sync_mode == SyncMode.full_refresh:
query_builder = partial(self._build_items_query, sub_page=sub_page)
else:
query_builder = partial(self._build_items_incremental_query, stream_slice=stream_slice)
elif self.name == "teams":
query_builder = self._build_teams_query
elif self.name == "activity_logs":
page, sub_page = page if page else (None, None)
query_builder = partial(self._build_activity_query, sub_page=sub_page, stream_slice=stream_slice)
else:
query_builder = self._build_query
query = query_builder(
object_name=self.name,
field_schema=self._get_schema_root_properties(),
limit=limit or None,
page=page,
)
return {"query": f"{{{query}}}"}
# We are using an LRU cache in should_retry() method which requires all incoming arguments (including self) to be hashable.
# Dataclasses by default are not hashable, so we need to define __hash__(). Alternatively, we can set @dataclass(frozen=True),
# but this has a cascading effect where all dataclass fields must also be set to frozen.
def __hash__(self):
return hash(tuple(self.__dict__))
| MondayGraphqlRequester |
python | great-expectations__great_expectations | tests/integration/cloud/rest_contracts/conftest.py | {
"start": 5156,
"end": 9573
} | class ____(pydantic.BaseModel):
"""Represents a Python API (Consumer) request and expected minimal response,
given a state in the Cloud backend (Provider).
The given state is something you know to be true about the Cloud backend data requested.
Args:
method: A string (e.g. "GET" or "POST") or attribute of the RequestMethods class representing a request method.
request_path: A pathlib.Path to the endpoint relative to the base url.
e.g.
```
path = pathlib.Path(
"/", "organizations", organization_id, "data-context-configuration"
)
```
upon_receiving: A string description of the type of request being made.
given: A string description of the state of the Cloud backend data requested.
response_status: The status code associated with the response. An integer between 100 and 599.
response_body: A dictionary or Pact Matcher object representing the response body.
request_body (Optional): A dictionary or Pact Matcher object representing the request body.
request_headers (Optional): A dictionary representing the request headers.
request_parmas (Optional): A dictionary representing the request parameters.
Returns:
ContractInteraction
""" # noqa: E501 # FIXME CoP
class Config:
arbitrary_types_allowed = True
method: Union[RequestMethods, pydantic.StrictStr]
request_path: pathlib.Path
upon_receiving: pydantic.StrictStr
given: pydantic.StrictStr
response_status: Annotated[int, pydantic.Field(strict=True, ge=100, lt=600)]
response_body: PactBody
request_body: Union[PactBody, None] = None
request_headers: Union[dict, None] = None
request_params: Union[dict, None] = None
@pytest.fixture
def run_rest_api_pact_test(
gx_cloud_session: Session,
pact_test: pact.Pact,
) -> Callable:
def _run_pact_test(
contract_interaction: ContractInteraction,
) -> None:
"""Runs a contract test and produces a Pact contract json file in directory:
- tests/integration/cloud/rest_contracts/pacts
Args:
contract_interaction: A ContractInteraction object which represents a Python API (Consumer) request
and expected minimal response, given a state in the Cloud backend (Provider).
Returns:
None
""" # noqa: E501 # FIXME CoP
request: dict[str, str | PactBody] = {
"method": contract_interaction.method,
"path": str(contract_interaction.request_path),
}
if contract_interaction.request_body is not None:
request["body"] = contract_interaction.request_body
if contract_interaction.request_params is not None:
request["query"] = contract_interaction.request_params
request["headers"] = dict(gx_cloud_session.headers)
if contract_interaction.request_headers is not None:
request["headers"].update(contract_interaction.request_headers) # type: ignore[union-attr] # FIXME CoP
gx_cloud_session.headers.update(contract_interaction.request_headers)
response: dict[str, int | PactBody] = {
"status": contract_interaction.response_status,
}
if contract_interaction.response_body is not None:
response["body"] = contract_interaction.response_body
(
pact_test.given(provider_state=contract_interaction.given)
.upon_receiving(scenario=contract_interaction.upon_receiving)
.with_request(**request)
.will_respond_with(**response)
)
request_url = f"http://{PACT_MOCK_HOST}:{PACT_MOCK_PORT}{contract_interaction.request_path}"
with pact_test:
# act
resp = gx_cloud_session.request(
method=contract_interaction.method,
url=request_url,
json=contract_interaction.request_body,
params=contract_interaction.request_params,
)
# assert
assert resp.status_code == contract_interaction.response_status
# TODO more unit test assertions would go here e.g. response body checks
return _run_pact_test
| ContractInteraction |
python | matplotlib__matplotlib | lib/matplotlib/animation.py | {
"start": 22654,
"end": 24220
} | class ____(FFMpegBase, FileMovieWriter):
"""
File-based ffmpeg writer.
Frames are written to temporary files on disk and then stitched together at the end.
This effectively works as a slideshow input to ffmpeg with the fps passed as
``-framerate``, so see also `their notes on frame rates`_ for further details.
.. _their notes on frame rates: https://trac.ffmpeg.org/wiki/Slideshow#Framerates
"""
supported_formats = ['png', 'jpeg', 'tiff', 'raw', 'rgba']
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a collection of temp images
args = []
# For raw frames, we need to explicitly tell ffmpeg the metadata.
if self.frame_format in {'raw', 'rgba'}:
args += [
'-f', 'image2', '-vcodec', 'rawvideo',
'-video_size', '%dx%d' % self.frame_size,
'-pixel_format', 'rgba',
]
args += ['-framerate', str(self.fps), '-i', self._base_temp_name()]
if not self._tmpdir:
args += ['-frames:v', str(self._frame_counter)]
# Logging is quieted because subprocess.PIPE has limited buffer size.
# If you have a lot of frames in your animation and set logging to
# DEBUG, you will have a buffer overrun.
if _log.getEffectiveLevel() > logging.DEBUG:
args += ['-loglevel', 'error']
return [self.bin_path(), *args, *self.output_args]
# Base class for animated GIFs with ImageMagick
| FFMpegFileWriter |
python | great-expectations__great_expectations | great_expectations/execution_engine/sqlite_execution_engine.py | {
"start": 935,
"end": 1061
} | class ____(SqlAlchemyExecutionEngine):
"""SqlAlchemyExecutionEngine for SQLite databases."""
pass
| SqliteExecutionEngine |
python | huggingface__transformers | src/transformers/models/jetmoe/modular_jetmoe.py | {
"start": 15483,
"end": 17221
} | class ____(LlamaDecoderLayer):
def __init__(self, config: JetMoeConfig, layer_idx: Optional[int] = None):
super().__init__(config, layer_idx)
self.input_layernorm = JetMoeRMSNorm(config.hidden_size)
self.self_attention = JetMoeAttention(config, layer_idx)
self.post_attention_layernorm = JetMoeRMSNorm(config.hidden_size)
self.mlp = JetMoeMoE(config)
del self.self_attn
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _, _ = self.self_attention(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| JetMoeDecoderLayer |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 71613,
"end": 72172
} | class ____(lapack_opt_info, _ilp64_opt_info_mixin):
notfounderror = LapackILP64NotFoundError
lapack_order = ['openblas64_', 'openblas_ilp64', 'accelerate']
order_env_var_name = 'NPY_LAPACK_ILP64_ORDER'
def _calc_info(self, name):
print('lapack_ilp64_opt_info._calc_info(name=%s)' % (name))
info = get_info(name + '_lapack')
if self._check_info(info):
self.set_info(**info)
return True
else:
print('%s_lapack does not exist' % (name))
return False
| lapack_ilp64_opt_info |
python | eventlet__eventlet | eventlet/green/http/__init__.py | {
"start": 2837,
"end": 8738
} | class ____(IntEnum):
"""HTTP status codes and reason phrases
Status codes from the following RFCs are all observed:
* RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616
* RFC 6585: Additional HTTP Status Codes
* RFC 3229: Delta encoding in HTTP
* RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518
* RFC 5842: Binding Extensions to WebDAV
* RFC 7238: Permanent Redirect
* RFC 2295: Transparent Content Negotiation in HTTP
* RFC 2774: An HTTP Extension Framework
"""
def __new__(cls, value, phrase, description=''):
obj = int.__new__(cls, value)
obj._value_ = value
obj.phrase = phrase
obj.description = description
return obj
# informational
CONTINUE = 100, 'Continue', 'Request received, please continue'
SWITCHING_PROTOCOLS = (101, 'Switching Protocols',
'Switching to new protocol; obey Upgrade header')
PROCESSING = 102, 'Processing'
# success
OK = 200, 'OK', 'Request fulfilled, document follows'
CREATED = 201, 'Created', 'Document created, URL follows'
ACCEPTED = (202, 'Accepted',
'Request accepted, processing continues off-line')
NON_AUTHORITATIVE_INFORMATION = (203,
'Non-Authoritative Information', 'Request fulfilled from cache')
NO_CONTENT = 204, 'No Content', 'Request fulfilled, nothing follows'
RESET_CONTENT = 205, 'Reset Content', 'Clear input form for further input'
PARTIAL_CONTENT = 206, 'Partial Content', 'Partial content follows'
MULTI_STATUS = 207, 'Multi-Status'
ALREADY_REPORTED = 208, 'Already Reported'
IM_USED = 226, 'IM Used'
# redirection
MULTIPLE_CHOICES = (300, 'Multiple Choices',
'Object has several resources -- see URI list')
MOVED_PERMANENTLY = (301, 'Moved Permanently',
'Object moved permanently -- see URI list')
FOUND = 302, 'Found', 'Object moved temporarily -- see URI list'
SEE_OTHER = 303, 'See Other', 'Object moved -- see Method and URL list'
NOT_MODIFIED = (304, 'Not Modified',
'Document has not changed since given time')
USE_PROXY = (305, 'Use Proxy',
'You must use proxy specified in Location to access this resource')
TEMPORARY_REDIRECT = (307, 'Temporary Redirect',
'Object moved temporarily -- see URI list')
PERMANENT_REDIRECT = (308, 'Permanent Redirect',
'Object moved temporarily -- see URI list')
# client error
BAD_REQUEST = (400, 'Bad Request',
'Bad request syntax or unsupported method')
UNAUTHORIZED = (401, 'Unauthorized',
'No permission -- see authorization schemes')
PAYMENT_REQUIRED = (402, 'Payment Required',
'No payment -- see charging schemes')
FORBIDDEN = (403, 'Forbidden',
'Request forbidden -- authorization will not help')
NOT_FOUND = (404, 'Not Found',
'Nothing matches the given URI')
METHOD_NOT_ALLOWED = (405, 'Method Not Allowed',
'Specified method is invalid for this resource')
NOT_ACCEPTABLE = (406, 'Not Acceptable',
'URI not available in preferred format')
PROXY_AUTHENTICATION_REQUIRED = (407,
'Proxy Authentication Required',
'You must authenticate with this proxy before proceeding')
REQUEST_TIMEOUT = (408, 'Request Timeout',
'Request timed out; try again later')
CONFLICT = 409, 'Conflict', 'Request conflict'
GONE = (410, 'Gone',
'URI no longer exists and has been permanently removed')
LENGTH_REQUIRED = (411, 'Length Required',
'Client must specify Content-Length')
PRECONDITION_FAILED = (412, 'Precondition Failed',
'Precondition in headers is false')
REQUEST_ENTITY_TOO_LARGE = (413, 'Request Entity Too Large',
'Entity is too large')
REQUEST_URI_TOO_LONG = (414, 'Request-URI Too Long',
'URI is too long')
UNSUPPORTED_MEDIA_TYPE = (415, 'Unsupported Media Type',
'Entity body in unsupported format')
REQUESTED_RANGE_NOT_SATISFIABLE = (416,
'Requested Range Not Satisfiable',
'Cannot satisfy request range')
EXPECTATION_FAILED = (417, 'Expectation Failed',
'Expect condition could not be satisfied')
UNPROCESSABLE_ENTITY = 422, 'Unprocessable Entity'
LOCKED = 423, 'Locked'
FAILED_DEPENDENCY = 424, 'Failed Dependency'
UPGRADE_REQUIRED = 426, 'Upgrade Required'
PRECONDITION_REQUIRED = (428, 'Precondition Required',
'The origin server requires the request to be conditional')
TOO_MANY_REQUESTS = (429, 'Too Many Requests',
'The user has sent too many requests in '
'a given amount of time ("rate limiting")')
REQUEST_HEADER_FIELDS_TOO_LARGE = (431,
'Request Header Fields Too Large',
'The server is unwilling to process the request because its header '
'fields are too large')
# server errors
INTERNAL_SERVER_ERROR = (500, 'Internal Server Error',
'Server got itself in trouble')
NOT_IMPLEMENTED = (501, 'Not Implemented',
'Server does not support this operation')
BAD_GATEWAY = (502, 'Bad Gateway',
'Invalid responses from another server/proxy')
SERVICE_UNAVAILABLE = (503, 'Service Unavailable',
'The server cannot process the request due to a high load')
GATEWAY_TIMEOUT = (504, 'Gateway Timeout',
'The gateway server did not receive a timely response')
HTTP_VERSION_NOT_SUPPORTED = (505, 'HTTP Version Not Supported',
'Cannot fulfill request')
VARIANT_ALSO_NEGOTIATES = 506, 'Variant Also Negotiates'
INSUFFICIENT_STORAGE = 507, 'Insufficient Storage'
LOOP_DETECTED = 508, 'Loop Detected'
NOT_EXTENDED = 510, 'Not Extended'
NETWORK_AUTHENTICATION_REQUIRED = (511,
'Network Authentication Required',
'The client needs to authenticate to gain network access')
| HTTPStatus |
python | openai__openai-python | src/openai/types/responses/tool_choice_allowed.py | {
"start": 224,
"end": 1023
} | class ____(BaseModel):
mode: Literal["auto", "required"]
"""Constrains the tools available to the model to a pre-defined set.
`auto` allows the model to pick from among the allowed tools and generate a
message.
`required` requires the model to call one or more of the allowed tools.
"""
tools: List[Dict[str, object]]
"""A list of tool definitions that the model should be allowed to call.
For the Responses API, the list of tool definitions might look like:
```json
[
{ "type": "function", "name": "get_weather" },
{ "type": "mcp", "server_label": "deepwiki" },
{ "type": "image_generation" }
]
```
"""
type: Literal["allowed_tools"]
"""Allowed tool configuration type. Always `allowed_tools`."""
| ToolChoiceAllowed |
python | redis__redis-py | redis/exceptions.py | {
"start": 5390,
"end": 5602
} | class ____(RedisClusterException):
"""
Raised when a transaction or watch is triggered in a pipeline
and not all keys or all commands belong to the same slot.
"""
pass
| CrossSlotTransactionError |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/control_flow_ops_test.py | {
"start": 95208,
"end": 98098
} | class ____(PForTestCase):
def test_create_variable_once(self):
x = array_ops.ones(shape=(3, 2, 2), dtype=dtypes.float32)
y = array_ops.ones(shape=(2, 3), dtype=dtypes.float32)
a_var = []
def f(z):
if not a_var:
a_var.append(variables.Variable(lambda: y, name="a"))
return math_ops.matmul(z, a_var[0] / 16)
pfor_control_flow_ops.vectorized_map(f, x)
@test_util.run_v2_only
def test_create_variable_repeated(self):
x = array_ops.ones(shape=(3, 2, 2), dtype=dtypes.float32)
y = array_ops.ones(shape=(2, 3), dtype=dtypes.float32)
def f(z):
a_var = variables.Variable(lambda: y, name="a") / 4
return math_ops.matmul(z, a_var / 16)
# Note that this error is only raised under v2 behavior.
with self.assertRaisesRegex(
ValueError, "singleton tf.Variable.*on the first call"):
pfor_control_flow_ops.vectorized_map(f, x)
@test_util.run_all_in_graph_and_eager_modes
def test_variable_shape(self):
v = resource_variable_ops.ResourceVariable([1, 2])
def loop_fn(_):
return resource_variable_ops.variable_shape(v.handle)
self._test_loop_fn(loop_fn, 2)
@test_util.run_all_in_graph_and_eager_modes
def test_variable_input(self):
v = resource_variable_ops.ResourceVariable([1, 2])
self.evaluate(v.initializer)
def loop_fn(x):
return x + 1
result = pfor_control_flow_ops.vectorized_map(loop_fn, v)
expected_result = [2, 3]
self.assertAllEqual(result, expected_result)
@test_util.run_all_in_graph_and_eager_modes
def testStatelessCase(self):
def branch1(x):
return x
def branch2(x):
return x + 1
def branch3(x):
return x + 2
x = constant_op.constant(10)
elems = constant_op.constant([1, 0, 0, 0, 2, 1, 0, 2, 0, 1])
def loop_fn(z_i):
return cond_v2.indexed_case(
z_i, [lambda: branch1(x), lambda: branch2(x), lambda: branch3(x)])
result = pfor_control_flow_ops.vectorized_map(
loop_fn, elems, fallback_to_while_loop=False)
expected_result = [11, 10, 10, 10, 12, 11, 10, 12, 10, 11]
self.assertAllEqual(result, expected_result)
@test_util.run_all_in_graph_and_eager_modes
def testStatelessCaseUnstacked(self):
def branch1(x):
return x + 1
def branch2(x):
return x + 2
# Unstacked case input
case_input = constant_op.constant(1)
@def_function.function
def function(z_i):
return cond_v2.indexed_case(case_input,
[lambda: branch1(z_i), lambda: branch2(z_i)])
inputs = constant_op.constant([0, 1, 1, 0, 1, 0, 1, 0, 0])
result = pfor_control_flow_ops.vectorized_map(
function, inputs, fallback_to_while_loop=False)
expected_result = [2, 3, 3, 2, 3, 2, 3, 2, 2]
self.assertAllEqual(result, expected_result)
if __name__ == "__main__":
test.main()
| VariableTest |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/filters/base.py | {
"start": 6176,
"end": 6855
} | class ____(Filter):
"""
Turn any callable into a Filter. The callable is supposed to not take any
arguments.
This can be used as a decorator::
@Condition
def feature_is_active(): # `feature_is_active` becomes a Filter.
return True
:param func: Callable which takes no inputs and returns a boolean.
"""
def __init__(self, func: Callable[[], bool]) -> None:
super().__init__()
self.func = func
def __call__(self) -> bool:
return self.func()
def __repr__(self) -> str:
return f"Condition({self.func!r})"
# Often used as type annotation.
FilterOrBool = Union[Filter, bool]
| Condition |
python | viewflow__viewflow | tests/workflow/test_fields__flow.py | {
"start": 155,
"end": 567
} | class ____(TestCase):
def test_crud(self):
obj = TestFlowModel.objects.create(flow_class=TestFieldsFlow)
self.assertEqual(obj.flow_class, TestFieldsFlow)
obj = TestFlowModel.objects.get()
self.assertEqual(obj.flow_class, TestFieldsFlow)
obj = TestFlowModel.objects.filter(flow_class=TestFieldsFlow).first()
self.assertEqual(obj.flow_class, TestFieldsFlow)
| Test |
python | pypa__setuptools | setuptools/_vendor/importlib_metadata/__init__.py | {
"start": 1053,
"end": 1352
} | class ____(ModuleNotFoundError):
"""The package was not found."""
def __str__(self) -> str:
return f"No package metadata was found for {self.name}"
@property
def name(self) -> str: # type: ignore[override]
(name,) = self.args
return name
| PackageNotFoundError |
python | networkx__networkx | networkx/algorithms/flow/utils.py | {
"start": 1084,
"end": 1270
} | class ____:
"""Active and inactive nodes in a level."""
__slots__ = ("active", "inactive")
def __init__(self):
self.active = set()
self.inactive = set()
| Level |
python | getsentry__sentry | src/sentry/api/serializers/models/environment.py | {
"start": 234,
"end": 359
} | class ____(TypedDict):
id: str
name: str
isHidden: bool
@register(Environment)
| EnvironmentProjectSerializerResponse |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/E23.py | {
"start": 2930,
"end": 3046
} | class ____[A: object="foo"[::-1], B: object =[[["foo", "bar"]]], C: object= bytes]():
pass
| PEP696GoodWithEmptyBases |
python | scipy__scipy | scipy/sparse/linalg/_special_sparse_arrays.py | {
"start": 19386,
"end": 25405
} | class ____(LinearOperator):
"""
Construct a Sakurai matrix in various formats and its eigenvalues.
Constructs the "Sakurai" matrix motivated by reference [1]_:
square real symmetric positive definite and 5-diagonal
with the main diagonal ``[5, 6, 6, ..., 6, 6, 5], the ``+1`` and ``-1``
diagonals filled with ``-4``, and the ``+2`` and ``-2`` diagonals
made of ``1``. Its eigenvalues are analytically known to be
``16. * np.power(np.cos(0.5 * k * np.pi / (n + 1)), 4)``.
The matrix gets ill-conditioned with its size growing.
It is useful for testing and benchmarking sparse eigenvalue solvers
especially those taking advantage of its banded 5-diagonal structure.
See the notes below for details.
Parameters
----------
n : int
The size of the matrix.
dtype : dtype
Numerical type of the array. Default is ``np.int8``.
Methods
-------
toarray()
Construct a dense array from Laplacian data
tosparse()
Construct a sparse array from Laplacian data
tobanded()
The Sakurai matrix in the format for banded symmetric matrices,
i.e., (3, n) ndarray with 3 upper diagonals
placing the main diagonal at the bottom.
eigenvalues
All eigenvalues of the Sakurai matrix ordered ascending.
Notes
-----
Reference [1]_ introduces a generalized eigenproblem for the matrix pair
`A` and `B` where `A` is the identity so we turn it into an eigenproblem
just for the matrix `B` that this function outputs in various formats
together with its eigenvalues.
.. versionadded:: 1.12.0
References
----------
.. [1] T. Sakurai, H. Tadano, Y. Inadomi, and U. Nagashima,
"A moment-based method for large-scale generalized
eigenvalue problems",
Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004).
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg._special_sparse_arrays import Sakurai
>>> from scipy.linalg import eig_banded
>>> n = 6
>>> sak = Sakurai(n)
Since all matrix entries are small integers, ``'int8'`` is
the default dtype for storing matrix representations.
>>> sak.toarray()
array([[ 5, -4, 1, 0, 0, 0],
[-4, 6, -4, 1, 0, 0],
[ 1, -4, 6, -4, 1, 0],
[ 0, 1, -4, 6, -4, 1],
[ 0, 0, 1, -4, 6, -4],
[ 0, 0, 0, 1, -4, 5]], dtype=int8)
>>> sak.tobanded()
array([[ 1, 1, 1, 1, 1, 1],
[-4, -4, -4, -4, -4, -4],
[ 5, 6, 6, 6, 6, 5]], dtype=int8)
>>> sak.tosparse()
<DIAgonal sparse array of dtype 'int8'
with 24 stored elements (5 diagonals) and shape (6, 6)>
>>> np.array_equal(sak.dot(np.eye(n)), sak.tosparse().toarray())
True
>>> sak.eigenvalues()
array([0.03922866, 0.56703972, 2.41789479, 5.97822974,
10.54287655, 14.45473055])
>>> sak.eigenvalues(2)
array([0.03922866, 0.56703972])
The banded form can be used in scipy functions for banded matrices, e.g.,
>>> e = eig_banded(sak.tobanded(), eigvals_only=True)
>>> np.allclose(sak.eigenvalues(), e, atol= n * n * n * np.finfo(float).eps)
True
"""
def __init__(self, n, dtype=np.int8):
self.n = n
self.dtype = dtype
shape = (n, n)
super().__init__(dtype, shape)
def eigenvalues(self, m=None):
"""Return the requested number of eigenvalues.
Parameters
----------
m : int, optional
The positive number of smallest eigenvalues to return.
If not provided, then all eigenvalues will be returned.
Returns
-------
eigenvalues : `np.float64` array
The requested `m` smallest or all eigenvalues, in ascending order.
"""
if m is None:
m = self.n
k = np.arange(self.n + 1 -m, self.n + 1)
return np.flip(16. * np.power(np.cos(0.5 * k * np.pi / (self.n + 1)), 4))
def tobanded(self):
"""
Construct the Sakurai matrix as a banded array.
"""
d0 = np.r_[5, 6 * np.ones(self.n - 2, dtype=self.dtype), 5]
d1 = -4 * np.ones(self.n, dtype=self.dtype)
d2 = np.ones(self.n, dtype=self.dtype)
return np.array([d2, d1, d0]).astype(self.dtype)
def tosparse(self):
"""
Construct the Sakurai matrix in a sparse format.
"""
from scipy.sparse import diags_array
d = self.tobanded()
# the banded format has the main diagonal at the bottom
# `diags_array` inherits dtype from banded
return diags_array([d[0], d[1], d[2], d[1], d[0]], offsets=[-2, -1, 0, 1, 2],
shape=(self.n, self.n), dtype=d.dtype)
def toarray(self):
return self.tosparse().toarray()
def _matvec(self, x):
"""
Construct matrix-free callable banded-matrix-vector multiplication by
the Sakurai matrix without constructing or storing the matrix itself
using the knowledge of its entries and the 5-diagonal format.
"""
x = x.reshape(self.n, -1)
result_dtype = np.promote_types(x.dtype, self.dtype)
sx = np.zeros_like(x, dtype=result_dtype)
sx[0, :] = 5 * x[0, :] - 4 * x[1, :] + x[2, :]
sx[-1, :] = 5 * x[-1, :] - 4 * x[-2, :] + x[-3, :]
sx[1: -1, :] = (6 * x[1: -1, :] - 4 * (x[:-2, :] + x[2:, :])
+ np.pad(x[:-3, :], ((1, 0), (0, 0)))
+ np.pad(x[3:, :], ((0, 1), (0, 0))))
return sx
def _matmat(self, x):
"""
Construct matrix-free callable matrix-matrix multiplication by
the Sakurai matrix without constructing or storing the matrix itself
by reusing the ``_matvec(x)`` that supports both 1D and 2D arrays ``x``.
"""
return self._matvec(x)
def _adjoint(self):
return self
def _transpose(self):
return self
| Sakurai |
python | celery__celery | celery/backends/base.py | {
"start": 47870,
"end": 48464
} | class ____(BaseBackend):
"""Dummy result backend."""
_cache = {} # need this attribute to reset cache in tests.
def store_result(self, *args, **kwargs):
pass
def ensure_chords_allowed(self):
raise NotImplementedError(E_CHORD_NO_BACKEND.strip())
def _is_disabled(self, *args, **kwargs):
raise NotImplementedError(E_NO_BACKEND.strip())
def as_uri(self, *args, **kwargs):
return 'disabled://'
get_state = get_status = get_result = get_traceback = _is_disabled
get_task_meta_for = wait_for = get_many = _is_disabled
| DisabledBackend |
python | django__django | django/contrib/gis/db/backends/mysql/schema.py | {
"start": 227,
"end": 3607
} | class ____(DatabaseSchemaEditor):
sql_add_spatial_index = "CREATE SPATIAL INDEX %(index)s ON %(table)s(%(column)s)"
def quote_value(self, value):
if isinstance(value, self.connection.ops.Adapter):
return super().quote_value(str(value))
return super().quote_value(value)
def _field_indexes_sql(self, model, field):
if isinstance(field, GeometryField) and field.spatial_index and not field.null:
with self.connection.cursor() as cursor:
supports_spatial_index = (
self.connection.introspection.supports_spatial_index(
cursor, model._meta.db_table
)
)
sql = self._create_spatial_index_sql(model, field)
if supports_spatial_index:
return [sql]
else:
logger.error(
f"Cannot create SPATIAL INDEX {sql}. Only MyISAM, Aria, and InnoDB "
f"support them.",
)
return []
return super()._field_indexes_sql(model, field)
def remove_field(self, model, field):
if isinstance(field, GeometryField) and field.spatial_index and not field.null:
sql = self._delete_spatial_index_sql(model, field)
try:
self.execute(sql)
except OperationalError:
logger.error(
"Couldn't remove spatial index: %s (may be expected "
"if your storage engine doesn't support them).",
sql,
)
super().remove_field(model, field)
def _alter_field(
self,
model,
old_field,
new_field,
old_type,
new_type,
old_db_params,
new_db_params,
strict=False,
):
super()._alter_field(
model,
old_field,
new_field,
old_type,
new_type,
old_db_params,
new_db_params,
strict=strict,
)
old_field_spatial_index = (
isinstance(old_field, GeometryField)
and old_field.spatial_index
and not old_field.null
)
new_field_spatial_index = (
isinstance(new_field, GeometryField)
and new_field.spatial_index
and not new_field.null
)
if not old_field_spatial_index and new_field_spatial_index:
self.execute(self._create_spatial_index_sql(model, new_field))
elif old_field_spatial_index and not new_field_spatial_index:
self.execute(self._delete_spatial_index_sql(model, old_field))
def _create_spatial_index_name(self, model, field):
return "%s_%s_id" % (model._meta.db_table, field.column)
def _create_spatial_index_sql(self, model, field):
index_name = self._create_spatial_index_name(model, field)
qn = self.connection.ops.quote_name
return self.sql_add_spatial_index % {
"index": qn(index_name),
"table": qn(model._meta.db_table),
"column": qn(field.column),
}
def _delete_spatial_index_sql(self, model, field):
index_name = self._create_spatial_index_name(model, field)
return self._delete_index_sql(model, index_name)
| MySQLGISSchemaEditor |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-pgvector/destination_pgvector/common/sql/sql_processor.py | {
"start": 2039,
"end": 3707
} | class ____(BaseModel, abc.ABC):
"""Common configuration for SQL connections."""
schema_name: str
"""The name of the schema to write to."""
table_prefix: Optional[str] = ""
"""A prefix to add to created table names."""
@abc.abstractmethod
def get_sql_alchemy_url(self) -> SecretString:
"""Returns a SQL Alchemy URL."""
...
@abc.abstractmethod
def get_database_name(self) -> str:
"""Return the name of the database."""
...
def connect(self) -> None:
"""Attempt to connect, and raise `AirbyteConnectionError` if the connection fails."""
engine = self.get_sql_engine()
try:
connection = engine.connect()
connection.close()
except Exception as ex:
raise exc.AirbyteConnectionError(
message="Could not connect to the database.",
guidance="Check the connection settings and try again.",
) from ex
def get_sql_engine(self) -> Engine:
"""Return a new SQL engine to use."""
return create_engine(
url=self.get_sql_alchemy_url(),
echo=DEBUG_MODE,
execution_options={
"schema_translate_map": {None: self.schema_name},
},
)
def get_vendor_client(self) -> object:
"""Return the vendor-specific client object.
This is used for vendor-specific operations.
Raises `NotImplementedError` if a custom vendor client is not defined.
"""
raise NotImplementedError(
f"The type '{type(self).__name__}' does not define a custom client."
)
| SqlConfig |
python | charliermarsh__ruff | crates/ruff_python_parser/resources/valid/statement/class.py | {
"start": 931,
"end": 982
} | class ____[**P = [int, str]](): ...
# Mixed types
| Test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.