language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | solution/1300-1399/1304.Find N Unique Integers Sum up to Zero/Solution.py | {
"start": 0,
"end": 235
} | class ____:
def sumZero(self, n: int) -> List[int]:
ans = []
for i in range(n >> 1):
ans.append(i + 1)
ans.append(-(i + 1))
if n & 1:
ans.append(0)
return ans
| Solution |
python | astropy__astropy | astropy/coordinates/errors.py | {
"start": 510,
"end": 602
} | class ____(ValueError):
"""
Raised if units are missing or invalid.
"""
| UnitsError |
python | scipy__scipy | scipy/stats/tests/test_mstats_basic.py | {
"start": 55310,
"end": 55629
} | class ____:
def test_result_attributes(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
res = mstats.kruskal(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
# TODO: for all ttest functions, add tests with masked array inputs
| TestKruskal |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed3.py | {
"start": 2839,
"end": 2912
} | class ____(ParentNonOpen7):
a: NotRequired[ReadOnly[str]]
| ChildNotClosed7 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/duplicateDeclaration1.py | {
"start": 112,
"end": 1995
} | class ____:
# This should generate an error.
def f(self):
return 0
# This should generate an error.
def f(self):
return 0
def f(self):
return 1
# This should generate an error.
def g(self):
return 0
g: int
@property
def h(self) -> int:
return 1
@h.setter
def h(self, val: int):
pass
# This should generate an error.
@property
def j(self) -> int:
return 1
def j(self) -> int:
return 3
@overload
def a() -> None: ...
@overload
def a(x: int) -> None: ...
# This should generate an error.
def a(x: int = 3):
pass
def a(x: int = 3):
pass
# This should generate an error.
def b():
pass
b: int = 3
def func1(cond: bool):
if cond:
def a() -> int:
return 3
# This should generate an error because its inferred return
# type differs from b above.
def b():
return 3
# This should generate an error because the parameter names don't match.
def c(a: int, b: str) -> None:
return None
# This should generate an error because the parameter is positional-only.
def d(a: int) -> None:
return None
def e(a: int, /) -> None:
return None
# This should generate an error because the parameter is not positional-only.
f: Callable[[int], None] = lambda a: None
g: Callable[[int], None] = lambda a: None
else:
def a() -> int:
return 2
def b():
return 2
def c(a: int, c: str) -> None:
return None
d: Callable[[int], None] = lambda a: None
e: Callable[[int], None] = lambda a: None
def f(a: int) -> None:
return None
def g(a: int, /) -> None:
return None
| C |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_enum_extension.py | {
"start": 536,
"end": 744
} | class ____(Enum):
red: int
green: int
blue: int
def __init__(self, red: int, green: int, blue: int) -> None:
self.red = red
self.green = green
self.blue = blue
| ColorEnum |
python | langchain-ai__langchain | libs/core/langchain_core/indexing/base.py | {
"start": 14597,
"end": 15556
} | class ____(TypedDict):
"""A generic response for upsert operations.
The upsert response will be used by abstractions that implement an upsert
operation for content that can be upserted by ID.
Upsert APIs that accept inputs with IDs and generate IDs internally
will return a response that includes the IDs that succeeded and the IDs
that failed.
If there are no failures, the failed list will be empty, and the order
of the IDs in the succeeded list will match the order of the input documents.
If there are failures, the response becomes ill defined, and a user of the API
cannot determine which generated ID corresponds to which input document.
It is recommended for users explicitly attach the IDs to the items being
indexed to avoid this issue.
"""
succeeded: list[str]
"""The IDs that were successfully indexed."""
failed: list[str]
"""The IDs that failed to index."""
| UpsertResponse |
python | sanic-org__sanic | sanic/middleware.py | {
"start": 251,
"end": 331
} | class ____(IntEnum):
REQUEST = auto()
RESPONSE = auto()
| MiddlewareLocation |
python | huggingface__transformers | tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py | {
"start": 7315,
"end": 7609
} | class ____(unittest.TestCase, BackboneTesterMixin):
all_model_classes = (VitPoseBackbone,) if is_torch_available() else ()
config_class = VitPoseBackboneConfig
has_attentions = False
def setUp(self):
self.model_tester = VitPoseBackboneModelTester(self)
| VitPoseBackboneTest |
python | ray-project__ray | rllib/utils/tests/test_tf_utils.py | {
"start": 2422,
"end": 9137
} | class ____:
def __init__(self):
# Almost the same as above, but now returns the placeholders and
# gradient.
with tf.Graph().as_default():
loss, init, x_data, y_data = make_linear_network()
sess = tf.Session()
variables = tf_utils.TensorFlowVariables(loss, sess)
optimizer = tf.train.GradientDescentOptimizer(0.9)
grads = optimizer.compute_gradients(loss)
train = optimizer.apply_gradients(grads)
self.values = [loss, variables, init, sess, grads, train, [x_data, y_data]]
sess.run(init)
def training_step(self, weights):
_, variables, _, sess, grads, _, placeholders = self.values
variables.set_weights(weights)
return sess.run(
[grad[0] for grad in grads],
feed_dict=dict(zip(placeholders, [[1] * 100, [2] * 100])),
)
def get_weights(self):
return self.values[1].get_weights()
def test_tensorflow_variables(ray_init_2_cpus):
sess = tf.Session()
loss, init, _, _ = make_linear_network()
sess.run(init)
variables = tf_utils.TensorFlowVariables(loss, sess)
weights = variables.get_weights()
for (name, val) in weights.items():
weights[name] += 1.0
variables.set_weights(weights)
assert weights == variables.get_weights()
loss2, init2, _, _ = make_linear_network("w", "b")
sess.run(init2)
variables2 = tf_utils.TensorFlowVariables(loss2, sess)
weights2 = variables2.get_weights()
for (name, val) in weights2.items():
weights2[name] += 2.0
variables2.set_weights(weights2)
assert weights2 == variables2.get_weights()
flat_weights = variables2.get_flat() + 2.0
variables2.set_flat(flat_weights)
assert_almost_equal(flat_weights, variables2.get_flat())
sess = tf.Session()
variables3 = tf_utils.TensorFlowVariables([loss2], sess=sess)
assert variables3.sess == sess
# Test that the variable names for the two different nets are not
# modified by TensorFlow to be unique (i.e., they should already
# be unique because of the variable prefix).
def test_variable_name_collision(ray_init_2_cpus):
net1 = NetActor()
net2 = NetActor()
# This is checking that the variable names of the two nets are the
# same, i.e., that the names in the weight dictionaries are the same.
net1.values[0].set_weights(net2.values[0].get_weights())
# Test that TensorFlowVariables can take in addition variables through
# input_variables arg and with no loss.
def test_additional_variables_no_loss(ray_init_2_cpus):
net = LossActor(use_loss=False)
assert len(net.values[0].variables.items()) == 1
assert len(net.values[0].placeholders.items()) == 1
net.values[0].set_weights(net.values[0].get_weights())
# Test that TensorFlowVariables can take in addition variables through
# input_variables arg and with a loss.
def test_additional_variables_with_loss(ray_init_2_cpus):
net = LossActor()
assert len(net.values[0].variables.items()) == 3
assert len(net.values[0].placeholders.items()) == 3
net.values[0].set_weights(net.values[0].get_weights())
# Test that different networks on the same worker are independent and
# we can get/set their weights without any interaction.
def test_networks_independent(ray_init_2_cpus):
# Note we use only one worker to ensure that all of the remote
# functions run on the same worker.
net1 = NetActor()
net2 = NetActor()
# Make sure the two networks have different weights. TODO(rkn): Note
# that equality comparisons of numpy arrays normally does not work.
# This only works because at the moment they have size 1.
weights1 = net1.get_weights()
weights2 = net2.get_weights()
assert weights1 != weights2
# Set the weights and get the weights, and make sure they are
# unchanged.
new_weights1 = net1.set_and_get_weights(weights1)
new_weights2 = net2.set_and_get_weights(weights2)
assert weights1 == new_weights1
assert weights2 == new_weights2
# Swap the weights.
new_weights1 = net2.set_and_get_weights(weights1)
new_weights2 = net1.set_and_get_weights(weights2)
assert weights1 == new_weights1
assert weights2 == new_weights2
# This test creates an additional network on the driver so that the
# tensorflow variables on the driver and the worker differ.
def test_network_driver_worker_independent(ray_init_2_cpus):
# Create a network on the driver locally.
sess1 = tf.Session()
loss1, init1, _, _ = make_linear_network()
tf_utils.TensorFlowVariables(loss1, sess1)
sess1.run(init1)
net2 = ray.remote(NetActor).remote()
weights2 = ray.get(net2.get_weights.remote())
new_weights2 = ray.get(net2.set_and_get_weights.remote(net2.get_weights.remote()))
assert weights2 == new_weights2
def test_variables_control_dependencies(ray_init_2_cpus):
# Creates a network and appends a momentum optimizer.
sess = tf.Session()
loss, init, _, _ = make_linear_network()
minimizer = tf.train.MomentumOptimizer(0.9, 0.9).minimize(loss)
net_vars = tf_utils.TensorFlowVariables(minimizer, sess)
sess.run(init)
# Tests if all variables are properly retrieved, 2 variables and 2
# momentum variables.
assert len(net_vars.variables.items()) == 4
def test_remote_training_step(ray_init_2_cpus):
net = ray.remote(TrainActor).remote()
ray.get(net.training_step.remote(net.get_weights.remote()))
def test_remote_training_loss(ray_init_2_cpus):
net = ray.remote(TrainActor).remote()
net_values = TrainActor().values
loss, variables, _, sess, grads, train, placeholders = net_values
before_acc = sess.run(
loss, feed_dict=dict(zip(placeholders, [[2] * 100, [4] * 100]))
)
for _ in range(3):
gradients_list = ray.get(
[net.training_step.remote(variables.get_weights()) for _ in range(2)]
)
mean_grads = [
sum(gradients[i] for gradients in gradients_list) / len(gradients_list)
for i in range(len(gradients_list[0]))
]
feed_dict = {grad[0]: mean_grad for (grad, mean_grad) in zip(grads, mean_grads)}
sess.run(train, feed_dict=feed_dict)
after_acc = sess.run(
loss, feed_dict=dict(zip(placeholders, [[2] * 100, [4] * 100]))
)
assert before_acc < after_acc
if __name__ == "__main__":
# TODO(can): No tensorflow for python 3.12
if sys.version_info >= (3, 12):
sys.exit(0)
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
| TrainActor |
python | spack__spack | lib/spack/spack/fetch_strategy.py | {
"start": 50708,
"end": 51375
} | class ____(URLFetchStrategy):
"""FetchStrategy that pulls from an S3 bucket."""
url_attr = "s3"
@_needs_stage
def fetch(self):
if not self.url.startswith("s3://"):
raise spack.error.FetchError(
f"{self.__class__.__name__} can only fetch from s3:// urls."
)
if self.archive_file:
tty.debug(f"Already downloaded {self.archive_file}")
return
self._fetch_urllib(self.url)
if not self.archive_file:
raise FailedDownloadError(
RuntimeError(f"Missing archive {self.archive_file} after fetching")
)
@fetcher
| S3FetchStrategy |
python | pytorch__pytorch | benchmarks/framework_overhead_benchmark/SimpleAddModule.py | {
"start": 180,
"end": 371
} | class ____(torch.nn.Module):
def __init__(self, add_op):
super().__init__()
self.add_op = add_op
def forward(self, x, y):
return self.add_op(x, y)
| SimpleAddModule |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 72835,
"end": 73372
} | class ____(_PrintableStructure):
_fields_ = [
("version", c_uint),
("isNvleEnabled", c_uint),
]
def __init__(self):
super(c_nvmlNvLinkInfo_v1_t, self).__init__(version=nvmlNvLinkInfo_v1)
NVML_NVLINK_FIRMWARE_UCODE_TYPE_MSE = 0x1
NVML_NVLINK_FIRMWARE_UCODE_TYPE_NETIR = 0x2
NVML_NVLINK_FIRMWARE_UCODE_TYPE_NETIR_UPHY = 0x3
NVML_NVLINK_FIRMWARE_UCODE_TYPE_NETIR_CLN = 0x4
NVML_NVLINK_FIRMWARE_UCODE_TYPE_NETIR_DLN = 0x5
NVML_NVLINK_FIRMWARE_VERSION_LENGTH = 100
| c_nvmlNvLinkInfo_v1_t |
python | joke2k__faker | tests/providers/test_ssn.py | {
"start": 22339,
"end": 23211
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("es_ES")
Faker.seed(0)
def test_vat_id(self):
for _ in range(100):
assert re.search(r"^ES\w\d{8}$|^ES\d{8}\w$|^ES\w\d{7}\w$", self.fake.vat_id())
def test_nie(self):
for _ in range(100):
assert is_nie(self.fake.nie())
def test_nif(self):
for _ in range(100):
assert is_nif(self.fake.nif())
def test_cif(self):
for _ in range(100):
assert is_cif(self.fake.cif())
def test_nuss(self):
for _ in range(50):
nuss = self.fake.nuss()
assert isinstance(nuss, str)
assert 12 == len(nuss)
for _ in range(50):
nuss = self.fake.nuss(company=True)
assert isinstance(nuss, str)
assert 11 == len(nuss)
| TestEsES |
python | fluentpython__example-code-2e | 15-more-types/cafeteria/covariant.py | {
"start": 282,
"end": 1266
} | class ____(Generic[T_co]): # <2>
def __init__(self, beverage: T_co) -> None:
self.beverage = beverage
def dispense(self) -> T_co:
return self.beverage
def install(dispenser: BeverageDispenser[Juice]) -> None: # <3>
"""Install a fruit juice dispenser."""
# end::BEVERAGE_TYPES[]
################################################ covariant dispenser
# tag::INSTALL_JUICE_DISPENSERS[]
juice_dispenser = BeverageDispenser(Juice())
install(juice_dispenser)
orange_juice_dispenser = BeverageDispenser(OrangeJuice())
install(orange_juice_dispenser)
# end::INSTALL_JUICE_DISPENSERS[]
################################################ more general dispenser
# tag::INSTALL_BEVERAGE_DISPENSER[]
beverage_dispenser = BeverageDispenser(Beverage())
install(beverage_dispenser)
## mypy: Argument 1 to "install" has
## incompatible type "BeverageDispenser[Beverage]"
## expected "BeverageDispenser[Juice]"
# end::INSTALL_BEVERAGE_DISPENSER[]
| BeverageDispenser |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_middleware.py | {
"start": 9126,
"end": 9655
} | class ____(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.middleware = NullCharactersMiddleware(None)
def test_request_with_null_chars(self):
request = self.factory.get("/?language=en\x00es&project_slug=myproject")
response = self.middleware(request)
self.assertContains(
response,
"There are NULL (0x00) characters in at least one of the parameters passed to the request.",
status_code=400,
)
| TestNullCharactersMiddleware |
python | redis__redis-py | redis/multidb/healthcheck.py | {
"start": 1247,
"end": 1904
} | class ____(HealthCheckPolicy):
def __init__(self, health_check_probes: int, health_check_delay: float):
if health_check_probes < 1:
raise ValueError("health_check_probes must be greater than 0")
self._health_check_probes = health_check_probes
self._health_check_delay = health_check_delay
@property
def health_check_probes(self) -> int:
return self._health_check_probes
@property
def health_check_delay(self) -> float:
return self._health_check_delay
@abstractmethod
def execute(self, health_checks: List[HealthCheck], database) -> bool:
pass
| AbstractHealthCheckPolicy |
python | apache__airflow | providers/common/sql/tests/unit/common/sql/sensors/test_sql.py | {
"start": 1189,
"end": 11014
} | class ____:
def setup_method(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, schedule=None, default_args=args)
@pytest.mark.db_test
def test_unsupported_conn_type(self):
op = SqlSensor(
task_id="sql_sensor_check",
conn_id="redis_default",
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
dag=self.dag,
)
with pytest.raises(AirflowException):
op.execute({})
@pytest.mark.backend("mysql")
def test_sql_sensor_mysql(self):
op1 = SqlSensor(
task_id="sql_sensor_check_1",
conn_id="mysql_default",
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
dag=self.dag,
)
op1.execute({})
op2 = SqlSensor(
task_id="sql_sensor_check_2",
conn_id="mysql_default",
sql="SELECT count(%s) FROM INFORMATION_SCHEMA.TABLES",
parameters=["table_name"],
dag=self.dag,
)
op2.execute({})
@pytest.mark.backend("postgres")
def test_sql_sensor_postgres(self):
op1 = SqlSensor(
task_id="sql_sensor_check_1",
conn_id="postgres_default",
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
dag=self.dag,
)
op1.execute({})
op2 = SqlSensor(
task_id="sql_sensor_check_2",
conn_id="postgres_default",
sql="SELECT count(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = %s",
parameters=["information_schema"],
dag=self.dag,
)
op2.execute({})
@mock.patch("airflow.providers.common.sql.sensors.sql.BaseHook")
def test_sql_sensor_postgres_poke(self, mock_hook):
op = SqlSensor(
task_id="sql_sensor_check",
conn_id="postgres_default",
sql="SELECT 1",
)
mock_hook.get_connection.return_value.get_hook.return_value = mock.MagicMock(spec=DbApiHook)
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
assert not op.poke({})
mock_get_records.return_value = [[None]]
assert not op.poke({})
mock_get_records.return_value = [["None"]]
assert op.poke({})
mock_get_records.return_value = [[0.0]]
assert not op.poke({})
mock_get_records.return_value = [[0]]
assert not op.poke({})
mock_get_records.return_value = [["0"]]
assert op.poke({})
mock_get_records.return_value = [["1"]]
assert op.poke({})
@mock.patch("airflow.providers.common.sql.sensors.sql.BaseHook")
def test_sql_sensor_postgres_poke_fail_on_empty(self, mock_hook):
op = SqlSensor(
task_id="sql_sensor_check",
conn_id="postgres_default",
sql="SELECT 1",
fail_on_empty=True,
)
mock_hook.get_connection.return_value.get_hook.return_value = mock.MagicMock(spec=DbApiHook)
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
with pytest.raises(AirflowException):
op.poke({})
@mock.patch("airflow.providers.common.sql.sensors.sql.BaseHook")
def test_sql_sensor_postgres_poke_success(self, mock_hook):
op = SqlSensor(
task_id="sql_sensor_check", conn_id="postgres_default", sql="SELECT 1", success=lambda x: x in [1]
)
mock_hook.get_connection.return_value.get_hook.return_value = mock.MagicMock(spec=DbApiHook)
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
assert not op.poke({})
mock_get_records.return_value = [[1]]
assert op.poke({})
mock_get_records.return_value = [["1"]]
assert not op.poke({})
@mock.patch("airflow.providers.common.sql.sensors.sql.BaseHook")
def test_sql_sensor_postgres_poke_failure(
self,
mock_hook,
):
op = SqlSensor(
task_id="sql_sensor_check",
conn_id="postgres_default",
sql="SELECT 1",
failure=lambda x: x in [1],
)
mock_hook.get_connection.return_value.get_hook.return_value = mock.MagicMock(spec=DbApiHook)
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
assert not op.poke({})
mock_get_records.return_value = [[1]]
with pytest.raises(AirflowException):
op.poke({})
@mock.patch("airflow.providers.common.sql.sensors.sql.BaseHook")
def test_sql_sensor_postgres_poke_failure_success(
self,
mock_hook,
):
op = SqlSensor(
task_id="sql_sensor_check",
conn_id="postgres_default",
sql="SELECT 1",
failure=lambda x: x in [1],
success=lambda x: x in [2],
)
mock_hook.get_connection.return_value.get_hook.return_value = mock.MagicMock(spec=DbApiHook)
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
assert not op.poke({})
mock_get_records.return_value = [[1]]
with pytest.raises(AirflowException):
op.poke({})
mock_get_records.return_value = [[2]]
assert op.poke({})
@mock.patch("airflow.providers.common.sql.sensors.sql.BaseHook")
def test_sql_sensor_postgres_poke_failure_success_same(self, mock_hook):
op = SqlSensor(
task_id="sql_sensor_check",
conn_id="postgres_default",
sql="SELECT 1",
failure=lambda x: x in [1],
success=lambda x: x in [1],
)
mock_hook.get_connection.return_value.get_hook.return_value = mock.MagicMock(spec=DbApiHook)
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
assert not op.poke({})
mock_get_records.return_value = [[1]]
with pytest.raises(AirflowException):
op.poke({})
@mock.patch("airflow.providers.common.sql.sensors.sql.BaseHook")
def test_sql_sensor_postgres_poke_invalid_failure(self, mock_hook):
op = SqlSensor(
task_id="sql_sensor_check",
conn_id="postgres_default",
sql="SELECT 1",
failure=[1],
)
mock_hook.get_connection.return_value.get_hook.return_value = mock.MagicMock(spec=DbApiHook)
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = [[1]]
with pytest.raises(AirflowException) as ctx:
op.poke({})
assert str(ctx.value) == "self.failure is present, but not callable -> [1]"
@mock.patch("airflow.providers.common.sql.sensors.sql.BaseHook")
def test_sql_sensor_postgres_poke_invalid_success(
self,
mock_hook,
):
op = SqlSensor(
task_id="sql_sensor_check",
conn_id="postgres_default",
sql="SELECT 1",
success=[1],
)
mock_hook.get_connection.return_value.get_hook.return_value = mock.MagicMock(spec=DbApiHook)
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = [[1]]
with pytest.raises(AirflowException) as ctx:
op.poke({})
assert str(ctx.value) == "self.success is present, but not callable -> [1]"
@pytest.mark.backend("postgres")
def test_sql_sensor_postgres_with_selector(self):
op1 = SqlSensor(
task_id="sql_sensor_check_1",
conn_id="postgres_default",
sql="SELECT 0, 1",
dag=self.dag,
success=lambda x: x in [1],
failure=lambda x: x in [0],
selector=lambda x: x[1],
)
op1.execute({})
op2 = SqlSensor(
task_id="sql_sensor_check_2",
conn_id="postgres_default",
sql="SELECT 0, 1",
dag=self.dag,
success=lambda x: x in [1],
failure=lambda x: x in [0],
selector=lambda x: x[0],
)
with pytest.raises(AirflowException):
op2.poke({})
@pytest.mark.db_test
def test_sql_sensor_hook_params(self):
op = SqlSensor(
task_id="sql_sensor_hook_params",
conn_id="postgres_default",
sql="SELECT 1",
hook_params={
"log_sql": False,
},
)
hook = op._get_hook()
assert hook.log_sql == op.hook_params["log_sql"]
@mock.patch("airflow.providers.common.sql.sensors.sql.BaseHook")
def test_sql_sensor_templated_parameters(self, mock_base_hook):
op = SqlSensor(
task_id="sql_sensor_templated_parameters",
conn_id="snowflake_default",
sql="SELECT %(something)s",
parameters={"something": "{{ logical_date }}"},
)
op.render_template_fields(context={"logical_date": "1970-01-01"})
mock_base_hook.get_connection.return_value.get_hook.return_value = mock.MagicMock(spec=DbApiHook)
mock_get_records = mock_base_hook.get_connection.return_value.get_hook.return_value.get_records
op.execute(context=mock.MagicMock())
mock_get_records.assert_called_once_with("SELECT %(something)s", {"something": "1970-01-01"})
| TestSqlSensor |
python | dask__dask | dask/dataframe/dask_expr/_reductions.py | {
"start": 41502,
"end": 41615
} | class ____(NFirst):
reduction_chunk = staticmethod(_nlast)
reduction_aggregate = staticmethod(_nlast)
| NLast |
python | Pylons__pyramid | tests/test_traversal.py | {
"start": 43587,
"end": 44051
} | class ____(unittest.TestCase):
def _getTargetClass(self):
from pyramid.traversal import DefaultRootFactory
return DefaultRootFactory
def _makeOne(self, environ):
return self._getTargetClass()(environ)
def test_it(self):
class DummyRequest:
pass
root = self._makeOne(DummyRequest())
self.assertEqual(root.__parent__, None)
self.assertEqual(root.__name__, None)
| TestDefaultRootFactory |
python | huggingface__transformers | src/transformers/models/llama/modeling_llama.py | {
"start": 12919,
"end": 14701
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: LlamaConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = LlamaAttention(config=config, layer_idx=layer_idx)
self.mlp = LlamaMLP(config)
self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| LlamaDecoderLayer |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 85734,
"end": 86909
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"name",
"owner_id",
"description",
"visibility",
"template",
"homepage_url",
"has_wiki_enabled",
"has_issues_enabled",
"team_id",
"client_mutation_id",
)
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
owner_id = sgqlc.types.Field(ID, graphql_name="ownerId")
description = sgqlc.types.Field(String, graphql_name="description")
visibility = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryVisibility), graphql_name="visibility"
)
template = sgqlc.types.Field(Boolean, graphql_name="template")
homepage_url = sgqlc.types.Field(URI, graphql_name="homepageUrl")
has_wiki_enabled = sgqlc.types.Field(Boolean, graphql_name="hasWikiEnabled")
has_issues_enabled = sgqlc.types.Field(Boolean, graphql_name="hasIssuesEnabled")
team_id = sgqlc.types.Field(ID, graphql_name="teamId")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| CreateRepositoryInput |
python | neetcode-gh__leetcode | python/1905-count-sub-islands.py | {
"start": 0,
"end": 935
} | class ____:
def countSubIslands(self, grid1: List[List[int]], grid2: List[List[int]]) -> int:
ROWS, COLS = len(grid1), len(grid1[0])
visit = set()
def dfs(r, c):
if (
r < 0
or c < 0
or r == ROWS
or c == COLS
or grid2[r][c] == 0
or (r, c) in visit
):
return True
visit.add((r, c))
res = True
if grid1[r][c] == 0:
res = False
res = dfs(r - 1, c) and res
res = dfs(r + 1, c) and res
res = dfs(r, c - 1) and res
res = dfs(r, c + 1) and res
return res
count = 0
for r in range(ROWS):
for c in range(COLS):
if grid2[r][c] and (r, c) not in visit and dfs(r, c):
count += 1
return count
| Solution |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/stats.py | {
"start": 2042,
"end": 3921
} | class ____(abc.ABC):
"""
A StatsWriter abstract class. A StatsWriter takes in a category, key, scalar value, and step
and writes it out by some method.
"""
def on_add_stat(
self,
category: str,
key: str,
value: float,
aggregation: StatsAggregationMethod = StatsAggregationMethod.AVERAGE,
) -> None:
"""
Callback method for handling an individual stat value as reported to the StatsReporter add_stat
or set_stat methods.
:param category: Category of the statistics. Usually this is the behavior name.
:param key: The type of statistic, e.g. Environment/Reward.
:param value: The value of the statistic.
:param aggregation: The aggregation method for the statistic, default StatsAggregationMethod.AVERAGE.
"""
pass
@abc.abstractmethod
def write_stats(
self, category: str, values: Dict[str, StatsSummary], step: int
) -> None:
"""
Callback to record training information
:param category: Category of the statistics. Usually this is the behavior name.
:param values: Dictionary of statistics.
:param step: The current training step.
:return:
"""
pass
def add_property(
self, category: str, property_type: StatsPropertyType, value: Any
) -> None:
"""
Add a generic property to the StatsWriter. This could be e.g. a Dict of hyperparameters,
a max step count, a trainer type, etc. Note that not all StatsWriters need to be compatible
with all types of properties. For instance, a TB writer doesn't need a max step.
:param category: The category that the property belongs to.
:param property_type: The type of property.
:param value: The property itself.
"""
pass
| StatsWriter |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_format05.py | {
"start": 315,
"end": 1561
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_format05.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [46319488, 46335872]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"marker": {"type": "automatic"},
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | ray-project__ray | rllib/offline/dataset_reader.py | {
"start": 7195,
"end": 11719
} | class ____(InputReader):
"""Reader object that loads data from Ray Dataset.
Examples:
config = {
"input": "dataset",
"input_config": {
"format": "json",
# A single data file, a directory, or anything
# that ray.data.dataset recognizes.
"paths": "/tmp/sample_batches/",
# By default, parallelism=num_workers.
"parallelism": 3,
# Dataset allocates 0.5 CPU for each reader by default.
# Adjust this value based on the size of your offline dataset.
"num_cpus_per_read_task": 0.5,
}
}
"""
@PublicAPI
def __init__(self, ds: ray.data.Dataset, ioctx: Optional[IOContext] = None):
"""Initializes a DatasetReader instance.
Args:
ds: Ray dataset to sample from.
"""
self._ioctx = ioctx or IOContext()
self._default_policy = self.policy_map = None
self.preprocessor = None
self._dataset = ds
self.count = None if not self._dataset else self._dataset.count()
# do this to disable the ray data stdout logging
ray.data.DataContext.get_current().enable_progress_bars = False
# the number of steps to return per call to next()
self.batch_size = self._ioctx.config.get("train_batch_size", 1)
num_workers = self._ioctx.config.get("num_env_runners", 0)
seed = self._ioctx.config.get("seed", None)
if num_workers:
self.batch_size = max(math.ceil(self.batch_size / num_workers), 1)
# We allow the creation of a non-functioning None DatasetReader.
# It's useful for example for a non-rollout local worker.
if ds:
if self._ioctx.worker is not None:
self._policy_map = self._ioctx.worker.policy_map
self._default_policy = self._policy_map.get(DEFAULT_POLICY_ID)
self.preprocessor = (
self._ioctx.worker.preprocessors.get(DEFAULT_POLICY_ID)
if not self._ioctx.config.get("_disable_preprocessors", False)
else None
)
print(
f"DatasetReader {self._ioctx.worker_index} has {ds.count()}, samples."
)
def iterator():
while True:
ds = self._dataset.random_shuffle(seed=seed)
yield from ds.iter_rows()
self._iter = iterator()
else:
self._iter = None
@override(InputReader)
def next(self) -> SampleBatchType:
# next() should not get called on None DatasetReader.
assert self._iter is not None
ret = []
count = 0
while count < self.batch_size:
d = next(self._iter)
# Columns like obs are compressed when written by DatasetWriter.
d = from_json_data(d, self._ioctx.worker)
count += d.count
d = self._preprocess_if_needed(d)
d = postprocess_actions(d, self._ioctx)
d = self._postprocess_if_needed(d)
ret.append(d)
ret = concat_samples(ret)
return ret
def _preprocess_if_needed(self, batch: SampleBatchType) -> SampleBatchType:
# TODO: @kourosh, preprocessor is only supported for single agent case.
if self.preprocessor:
for key in (SampleBatch.CUR_OBS, SampleBatch.NEXT_OBS):
if key in batch:
batch[key] = np.stack(
[self.preprocessor.transform(s) for s in batch[key]]
)
return batch
def _postprocess_if_needed(self, batch: SampleBatchType) -> SampleBatchType:
if not self._ioctx.config.get("postprocess_inputs"):
return batch
if isinstance(batch, SampleBatch):
out = []
for sub_batch in batch.split_by_episode():
if self._default_policy is not None:
out.append(self._default_policy.postprocess_trajectory(sub_batch))
else:
out.append(sub_batch)
return concat_samples(out)
else:
# TODO(ekl) this is trickier since the alignments between agent
# trajectories in the episode are not available any more.
raise NotImplementedError(
"Postprocessing of multi-agent data not implemented yet."
)
| DatasetReader |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/metadata/metadata_value.py | {
"start": 21980,
"end": 22941
} | class ____(
IHaveNew,
MetadataValue[Optional[Union[Sequence[Any], Mapping[str, Any]]]],
):
"""Container class for JSON metadata entry data.
Args:
data (Union[Sequence[Any], Dict[str, Any]]): The JSON data.
"""
data: PublicAttr[Optional[Union[Sequence[Any], Mapping[str, Any]]]]
def __new__(cls, data: Optional[Union[Sequence[Any], Mapping[str, Any]]]):
try:
# check that the value is JSON serializable
seven.dumps(data)
except TypeError:
raise DagsterInvalidMetadata("Value is not JSON serializable.")
return super().__new__(cls, data=data)
@public
@property
def value(self) -> Optional[Union[Sequence[Any], Mapping[str, Any]]]:
"""Optional[Union[Sequence[Any], Dict[str, Any]]]: The wrapped JSON data."""
return self.data
@whitelist_for_serdes(storage_name="MarkdownMetadataEntryData")
@record(kw_only=False)
@public
| JsonMetadataValue |
python | getsentry__sentry | src/sentry/web/frontend/debug/debug_organization_invite_request.py | {
"start": 504,
"end": 1563
} | class ____(View):
def get(self, request: HttpRequest) -> HttpResponse:
org = Organization(id=1, slug="default", name="Default")
requester = User(name="Rick Swan", id=2, email="rick@gmail.com")
OrganizationMember(user_id=requester.id, organization=org, email="james@gmail.com")
pending_member = OrganizationMember(
email="new_member@gmail.com",
organization=org,
inviter_id=requester.id,
invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value,
)
recipient = User(name="James Bond", id=3, email="james@gmail.com")
recipient_member = OrganizationMember(
user_id=recipient.id, organization=org, email="james@gmail.com"
)
notification = InviteRequestNotification(pending_member, requester)
# hack to avoid a query
notification.role_based_recipient_strategy.set_member_in_cache(recipient_member)
return render_preview_email_for_notification(notification, recipient)
| DebugOrganizationInviteRequestEmailView |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 33348,
"end": 36641
} | class ____:
def test_repr(self):
if self.repr is not None:
self.assertEqual(repr(self.set), self.repr)
def check_repr_against_values(self):
text = repr(self.set)
self.assertTrue(text.startswith('{'))
self.assertTrue(text.endswith('}'))
result = text[1:-1].split(', ')
result.sort()
sorted_repr_values = [repr(value) for value in self.values]
sorted_repr_values.sort()
self.assertEqual(result, sorted_repr_values)
def test_length(self):
self.assertEqual(len(self.set), self.length)
def test_self_equality(self):
self.assertEqual(self.set, self.set)
def test_equivalent_equality(self):
self.assertEqual(self.set, self.dup)
def test_copy(self):
self.assertEqual(self.set.copy(), self.dup)
def test_self_union(self):
result = self.set | self.set
self.assertEqual(result, self.dup)
def test_empty_union(self):
result = self.set | empty_set
self.assertEqual(result, self.dup)
def test_union_empty(self):
result = empty_set | self.set
self.assertEqual(result, self.dup)
def test_self_intersection(self):
result = self.set & self.set
self.assertEqual(result, self.dup)
def test_empty_intersection(self):
result = self.set & empty_set
self.assertEqual(result, empty_set)
def test_intersection_empty(self):
result = empty_set & self.set
self.assertEqual(result, empty_set)
def test_self_isdisjoint(self):
result = self.set.isdisjoint(self.set)
self.assertEqual(result, not self.set)
def test_empty_isdisjoint(self):
result = self.set.isdisjoint(empty_set)
self.assertEqual(result, True)
def test_isdisjoint_empty(self):
result = empty_set.isdisjoint(self.set)
self.assertEqual(result, True)
def test_self_symmetric_difference(self):
result = self.set ^ self.set
self.assertEqual(result, empty_set)
def test_empty_symmetric_difference(self):
result = self.set ^ empty_set
self.assertEqual(result, self.set)
def test_self_difference(self):
result = self.set - self.set
self.assertEqual(result, empty_set)
def test_empty_difference(self):
result = self.set - empty_set
self.assertEqual(result, self.dup)
def test_empty_difference_rev(self):
result = empty_set - self.set
self.assertEqual(result, empty_set)
def test_iteration(self):
for v in self.set:
self.assertIn(v, self.values)
setiter = iter(self.set)
self.assertEqual(setiter.__length_hint__(), len(self.set))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.set, proto)
copy = pickle.loads(p)
self.assertEqual(self.set, copy,
"%s != %s" % (self.set, copy))
def test_issue_37219(self):
with self.assertRaises(TypeError):
set().difference(123)
with self.assertRaises(TypeError):
set().difference_update(123)
#------------------------------------------------------------------------------
| _TestBasicOps |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 866128,
"end": 866612
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of PublishSponsorsTier"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "sponsors_tier")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
sponsors_tier = sgqlc.types.Field("SponsorsTier", graphql_name="sponsorsTier")
"""The tier that was published."""
| PublishSponsorsTierPayload |
python | huggingface__transformers | src/transformers/models/bark/modeling_bark.py | {
"start": 10956,
"end": 12795
} | class ____(GradientCheckpointingLayer):
def __init__(self, config, is_causal=False, layer_idx=None):
super().__init__()
if is_causal:
# if causal, the layerNorm bias is optional to stick with Bark choice of leaving optional bias
# in AutoRegressive models (corresponding to the "Text" and the "Coarse" modules)
self.layernorm_1 = nn.LayerNorm(config.hidden_size, bias=config.bias)
self.layernorm_2 = nn.LayerNorm(config.hidden_size, bias=config.bias)
else:
self.layernorm_1 = nn.LayerNorm(config.hidden_size)
self.layernorm_2 = nn.LayerNorm(config.hidden_size)
self.attn = BARK_ATTENTION_CLASSES[config._attn_implementation](
config, is_causal=is_causal, layer_idx=layer_idx
)
self.mlp = BarkMLP(config)
def forward(
self,
hidden_states,
past_key_values=None,
attention_mask=None,
use_cache=False,
output_attentions=False,
cache_position=None,
):
intermediary_hidden_states = self.layernorm_1(hidden_states)
attn_outputs = self.attn(
intermediary_hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
cache_position=cache_position,
)
attn_output = attn_outputs[0] # output_attn: output, present_key_values, (attn_weights)
outputs = attn_outputs[1:]
intermediary_hidden_states = hidden_states + attn_output
intermediary_hidden_states = intermediary_hidden_states + self.mlp(
self.layernorm_2(intermediary_hidden_states)
)
return (intermediary_hidden_states,) + outputs
@auto_docstring
| BarkBlock |
python | google__jax | jax/experimental/roofline/roofline.py | {
"start": 1846,
"end": 2681
} | class ____:
shape: tuple[int, ...]
dtype: ValidRooflineDtype
@classmethod
def from_aval(cls, aval: core.AbstractValue) -> RooflineShape:
if not isinstance(aval, core.ShapedArray):
raise TypeError(f"Expected ShapedArray, got {type(aval)}.")
if not isinstance(aval.dtype, ValidRooflineDtype):
raise TypeError(
f"Expected numpy or prng.KeyTy dtype, got {type(aval.dtype)}."
)
return cls(shape=aval.shape, dtype=aval.dtype)
@property
def size(self) -> int:
return int(np.prod(self.shape))
@property
def bytes(self) -> int:
return int(self.size * self.dtype.itemsize)
@classmethod
def total_bytes(cls, avals: Sequence[core.AbstractValue]) -> int:
return sum(cls.from_aval(aval).bytes for aval in avals)
@dataclass(frozen=True, slots=True, kw_only=True)
| RooflineShape |
python | matplotlib__matplotlib | lib/matplotlib/backends/_backend_tk.py | {
"start": 44053,
"end": 44284
} | class ____(backend_tools.SaveFigureBase):
def trigger(self, *args):
NavigationToolbar2Tk.save_figure(
self._make_classic_style_pseudo_toolbar())
@backend_tools._register_tool_class(FigureCanvasTk)
| SaveFigureTk |
python | psf__requests | src/requests/adapters.py | {
"start": 3108,
"end": 4316
} | class ____:
"""The Base Transport Adapter"""
def __init__(self):
super().__init__()
def send(
self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None
):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
raise NotImplementedError
def close(self):
"""Cleans up adapter specific items."""
raise NotImplementedError
| BaseAdapter |
python | kamyu104__LeetCode-Solutions | Python/longest-common-subsequence.py | {
"start": 41,
"end": 659
} | class ____(object):
def longestCommonSubsequence(self, text1, text2):
"""
:type text1: str
:type text2: str
:rtype: int
"""
if len(text1) < len(text2):
return self.longestCommonSubsequence(text2, text1)
dp = [[0 for _ in xrange(len(text2)+1)] for _ in xrange(2)]
for i in xrange(1, len(text1)+1):
for j in xrange(1, len(text2)+1):
dp[i%2][j] = dp[(i-1)%2][j-1]+1 if text1[i-1] == text2[j-1] \
else max(dp[(i-1)%2][j], dp[i%2][j-1])
return dp[len(text1)%2][len(text2)]
| Solution |
python | tensorflow__tensorflow | tensorflow/python/compiler/xla/tests/pjrt_autoclustering_test.py | {
"start": 1240,
"end": 3060
} | class ____(test.TestCase):
def test_xla_compile_and_run_on_gpu_device(self):
if not test.is_gpu_available() or not test.is_built_with_gpu_support():
test.skipTest("Test only applicable on GPU")
@def_function.function
def arithmetic(x):
return 2 * x + 1
@def_function.function
def conditional(x):
# cond uses switch and merge, which are not supported by XLA based on
# https://docs.google.com/spreadsheets/d/1H8AIDdnlyyaWZOYN3WpBVNOmGOS_M8OyF7IA7kL3fjk/edit?resourcekey=0-I-mIp472YuK8FuBa5Zmzmg#gid=139369773
return cond.cond(math_ops.reduce_sum(x) < 5, lambda: x + x, lambda: x)
@def_function.function
def func(x, y):
return (arithmetic(x) + conditional(y) ** 2) / 2
i1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
# Simple case: all ops supported by XLA
with ops.device("/device:GPU:0"):
with context.collect_graphs(optimized=True) as graphs:
result = arithmetic(i1)
self.assertAllClose(result.numpy(), [[3.0, 5.0], [7.0, 9.0]], atol=1e-05)
graph_ops = [n.op for n in graphs[0].node]
self.assertContainsSubset(["_XlaCompile", "_XlaRun"], graph_ops)
# Complex case: includes ops not supported by XLA (switch and merge)
i2 = constant_op.constant([[5.0, 6.0], [7.0, 8.0]])
with ops.device("/device:GPU:0"):
with context.collect_graphs(optimized=True) as graphs:
result = func(i1, i2)
self.assertAllClose(result.numpy(), [[14.0, 20.5], [28, 36.5]], atol=1e-05)
graph_ops = [n.op for n in graphs[0].node]
self.assertContainsSubset(["_XlaCompile", "_XlaRun"], graph_ops)
# because of the cond, not all ops can be combined into a single _XlaCompile
self.assertGreater(graph_ops.count("_XlaCompile"), 1)
if __name__ == "__main__":
test.main()
| PjrtAutoclusteringTest |
python | ZoranPandovski__al-go-rithms | data_structures/Linked_list/Python/Swap_Nodes_Linkedlist.py | {
"start": 151,
"end": 878
} | class ____:
def swapPairs(self, head: Optional[ListNode]) -> Optional[ListNode]:
if head==None:
return None
if head.next==None:
return head
tmp1=head
tmp2=head.next
while tmp1:
if tmp1 !=None and tmp2 !=None:
print(tmp1.val)
print(tmp2.val)
tmpx_val=tmp1.val
tmpx=tmp1
tmp1.val=tmp2.val
tmp2.val=tmpx_val
tmp1=tmp2.next
if tmp2.next!=None:
tmp2=tmp2.next.next
else:
temp2=None
else:
break
return head
| Solution |
python | django__django | tests/pagination/models.py | {
"start": 31,
"end": 227
} | class ____(models.Model):
headline = models.CharField(max_length=100, default="Default headline")
pub_date = models.DateTimeField()
def __str__(self):
return self.headline
| Article |
python | getsentry__sentry | tests/sentry/seer/endpoints/test_seer_rpc.py | {
"start": 1376,
"end": 2357
} | class ____(APITestCase):
@staticmethod
def _get_path(method_name: str) -> str:
return reverse(
"sentry-api-0-seer-rpc-service",
kwargs={"method_name": method_name},
)
def auth_header(self, path: str, data: dict | str) -> str:
if isinstance(data, dict):
data = orjson.dumps(data).decode()
signature = generate_request_signature(path, data.encode())
return f"rpcsignature {signature}"
def test_invalid_endpoint(self) -> None:
path = self._get_path("not_a_method")
response = self.client.post(path)
assert response.status_code == 403
def test_404(self) -> None:
path = self._get_path("get_organization_slug")
data: dict[str, Any] = {"args": {"org_id": 1}, "meta": {}}
response = self.client.post(
path, data=data, HTTP_AUTHORIZATION=self.auth_header(path, data)
)
assert response.status_code == 404
| TestSeerRpc |
python | numba__numba | numba/tests/test_hashing.py | {
"start": 12557,
"end": 14826
} | class ____(BaseTest):
"""
Test hashing of tuples.
"""
def setUp(self):
if numpy_version >= (2, 0) and numpy_version <= (2, 1):
# Temporarily set promotions state to legacy,
# to ensure overflow logic works
self.initial_state = np._get_promotion_state()
np._set_promotion_state("legacy")
return super().setUp()
def tearDown(self) -> None:
if numpy_version >= (2, 0) and numpy_version <= (2, 1):
# Reset numpy promotion state to initial state
# since the setting is global
np._set_promotion_state(self.initial_state)
return super().tearDown()
def check_tuples(self, value_generator, split):
for values in value_generator:
tuples = [split(a) for a in values]
self.check_hash_values(tuples)
def test_homogeneous_tuples(self):
typ = np.uint64
def split2(i):
"""
Split i's bits into 2 integers.
"""
i = self.safe_construct(typ, i)
return (i & typ(0x5555555555555555),
i & typ(0xaaaaaaaaaaaaaaaa),
)
def split3(i):
"""
Split i's bits into 3 integers.
"""
i = self.safe_construct(typ, i)
return (i & typ(0x2492492492492492),
i & typ(0x4924924924924924),
i & typ(0x9249249249249249),
)
self.check_tuples(self.int_samples(), split2)
self.check_tuples(self.int_samples(), split3)
# Check exact. Sample values from:
# https://github.com/python/cpython/blob/b738237d6792acba85b1f6e6c8993a812c7fd815/Lib/test/test_tuple.py#L80-L93
# Untypable empty tuples are replaced with (7,).
self.check_hash_values([(7,), (0,), (0, 0), (0.5,),
(0.5, (7,), (-2, 3, (4, 6)))])
def test_heterogeneous_tuples(self):
modulo = 2**63
def split(i):
a = i & 0x5555555555555555
b = (i & 0xaaaaaaaa) ^ ((i >> 32) & 0xaaaaaaaa)
return np.int64(a), np.float64(b * 0.0001)
self.check_tuples(self.int_samples(), split)
| TestTupleHashing |
python | python-poetry__poetry | src/poetry/publishing/hash_manager.py | {
"start": 309,
"end": 1825
} | class ____:
def __init__(self) -> None:
self._sha2_hasher = hashlib.sha256()
self._md5_hasher = None
with suppress(ValueError):
# FIPS mode disables MD5
self._md5_hasher = hashlib.md5()
self._blake_hasher = None
with suppress(ValueError, TypeError):
# FIPS mode disables blake2
self._blake_hasher = hashlib.blake2b(digest_size=256 // 8)
def _md5_update(self, content: bytes) -> None:
if self._md5_hasher is not None:
self._md5_hasher.update(content)
def _md5_hexdigest(self) -> str | None:
if self._md5_hasher is not None:
return self._md5_hasher.hexdigest()
return None
def _blake_update(self, content: bytes) -> None:
if self._blake_hasher is not None:
self._blake_hasher.update(content)
def _blake_hexdigest(self) -> str | None:
if self._blake_hasher is not None:
return self._blake_hasher.hexdigest()
return None
def hash(self, file: Path) -> None:
with file.open("rb") as fp:
for content in iter(lambda: fp.read(io.DEFAULT_BUFFER_SIZE), b""):
self._md5_update(content)
self._sha2_hasher.update(content)
self._blake_update(content)
def hexdigest(self) -> Hexdigest:
return Hexdigest(
self._md5_hexdigest(),
self._sha2_hasher.hexdigest(),
self._blake_hexdigest(),
)
| HashManager |
python | Lightning-AI__lightning | tests/tests_pytorch/accelerators/test_xla.py | {
"start": 7646,
"end": 7817
} | class ____(nn.Module):
def __init__(self, layer):
super().__init__()
self.layer = layer
def forward(self, x):
return self.layer(x)
| SubModule |
python | pytorch__pytorch | test/quantization/core/test_top_level_apis.py | {
"start": 140,
"end": 2203
} | class ____(TestCase):
observers = [
"default_affine_fixed_qparams_observer",
"default_debug_observer",
"default_dynamic_quant_observer",
"default_placeholder_observer",
"default_fixed_qparams_range_0to1_observer",
"default_fixed_qparams_range_neg1to1_observer",
"default_float_qparams_observer",
"default_float_qparams_observer_4bit",
"default_histogram_observer",
"default_observer",
"default_per_channel_weight_observer",
"default_reuse_input_observer",
"default_symmetric_fixed_qparams_observer",
"default_weight_observer",
"per_channel_weight_observer_range_neg_127_to_127",
"weight_observer_range_neg_127_to_127",
]
fake_quants = [
"default_affine_fixed_qparams_fake_quant",
"default_dynamic_fake_quant",
"default_embedding_fake_quant",
"default_embedding_fake_quant_4bit",
"default_fake_quant",
"default_fixed_qparams_range_0to1_fake_quant",
"default_fixed_qparams_range_neg1to1_fake_quant",
"default_fused_act_fake_quant",
"default_fused_per_channel_wt_fake_quant",
"default_fused_wt_fake_quant",
"default_histogram_fake_quant",
"default_per_channel_weight_fake_quant",
"default_symmetric_fixed_qparams_fake_quant",
"default_weight_fake_quant",
"fused_per_channel_wt_fake_quant_range_neg_127_to_127",
"fused_wt_fake_quant_range_neg_127_to_127",
]
def _get_observer_ins(self, observer):
obs_func = getattr(torch.ao.quantization, observer)
return obs_func()
def test_observers(self) -> None:
t = torch.rand(1, 2, 3, 4)
for observer in self.observers:
obs = self._get_observer_ins(observer)
obs.forward(t)
def test_fake_quants(self) -> None:
t = torch.rand(1, 2, 3, 4)
for observer in self.fake_quants:
obs = self._get_observer_ins(observer)
obs.forward(t)
| TestDefaultObservers |
python | gevent__gevent | src/greentest/3.14/test_subprocess.py | {
"start": 80437,
"end": 149092
} | class ____(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
@mock.patch("subprocess._fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
@unittest.skipIf(not os.path.exists('/proc/self/status'),
"need /proc/self/status")
def test_restore_signals(self):
# Blindly assume that cat exists on systems with /proc/self/status...
default_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=False)
for line in default_proc_status.splitlines():
if line.startswith(b'SigIgn'):
default_sig_ign_mask = line
break
else:
self.skipTest("SigIgn not found in /proc/self/status.")
restored_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=True)
for line in restored_proc_status.splitlines():
if line.startswith(b'SigIgn'):
restored_sig_ign_mask = line
break
self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask,
msg="restore_signals=True should've unblocked "
"SIGPIPE and friends.")
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getsid(0))"],
start_new_session=True)
except PermissionError as e:
if e.errno != errno.EPERM:
raise # EACCES?
else:
parent_sid = os.getsid(0)
child_sid = int(output)
self.assertNotEqual(parent_sid, child_sid)
@unittest.skipUnless(hasattr(os, 'setpgid') and hasattr(os, 'getpgid'),
'no setpgid or getpgid on platform')
def test_process_group_0(self):
# For code coverage of calling setpgid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getpgid(0))"],
process_group=0)
except PermissionError as e:
if e.errno != errno.EPERM:
raise # EACCES?
else:
parent_pgid = os.getpgid(0)
child_pgid = int(output)
self.assertNotEqual(parent_pgid, child_pgid)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'no setreuid on platform')
def test_user(self):
# For code coverage of the user parameter. We don't care if we get a
# permission error from it depending on the test execution environment,
# that still indicates that it was called.
uid = os.geteuid()
test_users = [65534 if uid != 65534 else 65533, uid]
name_uid = "nobody" if sys.platform != 'darwin' else "unknown"
if pwd is not None:
try:
pwd.getpwnam(name_uid)
test_users.append(name_uid)
except KeyError:
# unknown user name
name_uid = None
for user in test_users:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(user=user, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getuid())"],
user=user,
close_fds=close_fds)
except PermissionError as e: # (EACCES, EPERM)
if e.errno == errno.EACCES:
self.assertEqual(e.filename, sys.executable)
else:
self.assertIsNone(e.filename)
else:
if isinstance(user, str):
user_uid = pwd.getpwnam(user).pw_uid
else:
user_uid = user
child_user = int(output)
self.assertEqual(child_user, user_uid)
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=-1)
with self.assertRaises(OverflowError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ, user=2**64)
if pwd is None and name_uid is not None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=name_uid)
@unittest.skipIf(hasattr(os, 'setreuid'), 'setreuid() available on platform')
def test_user_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=65535)
@unittest.skipUnless(hasattr(os, 'setregid'), 'no setregid() on platform')
def test_group(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
if grp is not None:
group_list.append(name_group)
for group in group_list + [gid]:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(group=group, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getgid())"],
group=group,
close_fds=close_fds)
except PermissionError as e: # (EACCES, EPERM)
self.assertIsNone(e.filename)
else:
if isinstance(group, str):
group_gid = grp.getgrnam(group).gr_gid
else:
group_gid = group
child_group = int(output)
self.assertEqual(child_group, group_gid)
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=-1)
with self.assertRaises(OverflowError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ, group=2**64)
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=name_group)
@unittest.skipIf(hasattr(os, 'setregid'), 'setregid() available on platform')
def test_group_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=65535)
@unittest.skipUnless(hasattr(os, 'setgroups'), 'no setgroups() on platform')
def test_extra_groups(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
self._test_extra_groups_impl(gid=gid, group_list=group_list)
@unittest.skipUnless(hasattr(os, 'setgroups'), 'no setgroups() on platform')
def test_extra_groups_empty_list(self):
self._test_extra_groups_impl(gid=os.getegid(), group_list=[])
def _test_extra_groups_impl(self, *, gid, group_list):
name_group = _get_test_grp_name()
if grp is not None:
group_list.append(name_group)
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os, sys, json; json.dump(os.getgroups(), sys.stdout)"],
extra_groups=group_list)
except PermissionError as e:
self.assertIsNone(e.filename)
self.skipTest("setgroup() EPERM; this test may require root.")
else:
parent_groups = os.getgroups()
child_groups = json.loads(output)
if grp is not None:
desired_gids = [grp.getgrnam(g).gr_gid if isinstance(g, str) else g
for g in group_list]
else:
desired_gids = group_list
self.assertEqual(set(desired_gids), set(child_groups))
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
extra_groups=[name_group])
# No skip necessary, this test won't make it to a setgroup() call.
def test_extra_groups_invalid_gid_t_values(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[-1])
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ,
extra_groups=[2**64])
@unittest.skipIf(mswindows or not hasattr(os, 'umask'),
'POSIX umask() is not available.')
def test_umask(self):
tmpdir = None
try:
tmpdir = tempfile.mkdtemp()
name = os.path.join(tmpdir, "beans")
# We set an unusual umask in the child so as a unique mode
# for us to test the child's touched file for.
subprocess.check_call(
[sys.executable, "-c", f"open({name!r}, 'w').close()"],
umask=0o053)
# Ignore execute permissions entirely in our test,
# filesystems could be mounted to ignore or force that.
st_mode = os.stat(name).st_mode & 0o666
expected_mode = 0o624
self.assertEqual(expected_mode, st_mode,
msg=f'{oct(expected_mode)} != {oct(st_mode)}')
finally:
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._fork_exec,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
enabled = gc.isenabled()
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
finally:
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
self.assertEqual(out, b'apple')
self.assertEqual(err, b'orange')
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = os.read(temp_fds[0], 1024).strip()
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = os.read(stderr_no, 1024).strip()
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
saved_fds = self._save_fds(range(3))
try:
for from_fd in from_fds:
with tempfile.TemporaryFile() as f:
os.dup2(f.fileno(), from_fd)
fd_to_close = (set(range(3)) - set(from_fds)).pop()
os.close(fd_to_close)
arg_names = ['stdin', 'stdout', 'stderr']
kwargs = {}
for from_fd, to_fd in zip(from_fds, to_fds):
kwargs[arg_names[to_fd]] = from_fd
code = textwrap.dedent(r'''
import os, sys
skipped_fd = int(sys.argv[1])
for fd in range(3):
if fd != skipped_fd:
os.write(fd, str(fd).encode('ascii'))
''')
skipped_fd = (set(range(3)) - set(to_fds)).pop()
rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
**kwargs)
self.assertEqual(rc, 0)
for from_fd, to_fd in zip(from_fds, to_fds):
os.lseek(from_fd, 0, os.SEEK_SET)
read_bytes = os.read(from_fd, 1024)
read_fds = list(map(int, read_bytes.decode('ascii')))
msg = textwrap.dedent(f"""
When testing {from_fds} to {to_fds} redirection,
parent descriptor {from_fd} got redirected
to descriptor(s) {read_fds} instead of descriptor {to_fd}.
""")
self.assertEqual([to_fd], read_fds, msg)
finally:
self._restore_fds(saved_fds)
# Check that subprocess can remap std fds correctly even
# if one of them is closed (#32844).
def test_swap_std_fds_with_one_closed(self):
for from_fds in itertools.combinations(range(3), 2):
for to_fds in itertools.permutations(range(3), 2):
self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
ZERO_RETURN_CMD,
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._fork_exec)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._fork_exec)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process
env['LC_ALL'] = 'C'
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(ZERO_RETURN_CMD[0])
args = list(ZERO_RETURN_CMD[1:])
path, program = os.path.split(ZERO_RETURN_CMD[0])
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program]+args)
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'%s' %s" % (abs_program, " ".join(args).encode("utf-8"))
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program]+args, env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program]+args, env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
ZERO_RETURN_CMD,
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
# bpo-32270: Ensure that descriptors specified in pass_fds
# are inherited even if they are used in redirections.
# Contributed by @izbyshev.
def test_pass_fds_redirected(self):
"""Regression test for https://bugs.python.org/issue32270."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
pass_fds = []
for _ in range(2):
fd = os.open(os.devnull, os.O_RDWR)
self.addCleanup(os.close, fd)
pass_fds.append(fd)
stdout_r, stdout_w = os.pipe()
self.addCleanup(os.close, stdout_r)
self.addCleanup(os.close, stdout_w)
pass_fds.insert(1, stdout_w)
with subprocess.Popen([sys.executable, fd_status],
stdin=pass_fds[0],
stdout=pass_fds[1],
stderr=pass_fds[2],
close_fds=True,
pass_fds=pass_fds):
output = os.read(stdout_r, 1024)
fds = {int(num) for num in output.split(b',')}
self.assertEqual(fds, {0, 1, 2} | frozenset(pass_fds), f"output={output!a}")
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = import_helper.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with warnings_helper.check_warnings(('', ResourceWarning)):
p = None
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with warnings_helper.check_warnings(('', ResourceWarning)):
p = None
support.gc_collect() # For PyPy or other GCs.
os.kill(pid, signal.SIGKILL)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError) as err:
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, 0,
False, [], 0, -1,
func, False)
# Attempt to prevent
# "TypeError: fork_exec() takes exactly N arguments (M given)"
# from passing the test. More refactoring to have us start
# with a valid *args list, confirm a good call with that works
# before mutating it in various ways to ensure that bad calls
# with individual arg type errors raise a typeerror would be
# ideal. Saving that for a future PR...
self.assertNotIn('takes exactly', str(err.exception))
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, 0,
None, None, None, -1,
None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = ZERO_RETURN_CMD
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
support.wait_process(proc.pid, exitcode=0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(proc.pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
def test_send_signal_race(self):
# bpo-38630: send_signal() must poll the process exit status to reduce
# the risk of sending the signal to the wrong process.
proc = subprocess.Popen(ZERO_RETURN_CMD)
# wait until the process completes without using the Popen APIs.
support.wait_process(proc.pid, exitcode=0)
# returncode is still None but the process completed.
self.assertIsNone(proc.returncode)
with mock.patch("os.kill") as mock_kill:
proc.send_signal(signal.SIGTERM)
# send_signal() didn't call os.kill() since the process already
# completed.
mock_kill.assert_not_called()
# Don't check the returncode value: the test reads the exit status,
# so Popen failed to read it and uses a default returncode instead.
self.assertIsNotNone(proc.returncode)
def test_send_signal_race2(self):
# bpo-40550: the process might exist between the returncode check and
# the kill operation
p = subprocess.Popen([sys.executable, '-c', 'exit(1)'])
# wait for process to exit
while not p.returncode:
p.poll()
with mock.patch.object(p, 'poll', new=lambda: None):
p.returncode = None
p.send_signal(signal.SIGTERM)
p.kill()
def test_communicate_repeated_call_after_stdout_close(self):
proc = subprocess.Popen([sys.executable, '-c',
'import os, time; os.close(1), time.sleep(2)'],
stdout=subprocess.PIPE)
while True:
try:
proc.communicate(timeout=0.1)
return
except subprocess.TimeoutExpired:
pass
def test_preexec_at_exit(self):
code = f"""if 1:
import atexit
import subprocess
def dummy():
pass
class AtFinalization:
def __del__(self):
print("OK")
subprocess.Popen({ZERO_RETURN_CMD}, preexec_fn=dummy)
print("shouldn't be printed")
at_finalization = AtFinalization()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out.strip(), b"OK")
self.assertIn(b"preexec_fn not supported at interpreter shutdown", err)
@unittest.skipIf(not sysconfig.get_config_var("HAVE_VFORK"),
"vfork() not enabled by configure.")
@strace_helper.requires_strace()
@mock.patch("subprocess._USE_POSIX_SPAWN", new=False)
def test_vfork_used_when_expected(self):
# This is a performance regression test to ensure we default to using
# vfork() when possible.
# Technically this test could pass when posix_spawn is used as well
# because libc tends to implement that internally using vfork. But
# that'd just be testing a libc+kernel implementation detail.
# Are intersted in the system calls:
# clone,clone2,clone3,fork,vfork,exit,exit_group
# Unfortunately using `--trace` with that list to strace fails because
# not all are supported on all platforms (ex. clone2 is ia64 only...)
# So instead use `%process` which is recommended by strace, and contains
# the above.
true_binary = "/bin/true"
strace_args = ["--trace=%process"]
with self.subTest(name="default_is_vfork"):
vfork_result = strace_helper.strace_python(
f"""\
import subprocess
subprocess.check_call([{true_binary!r}])""",
strace_args
)
# Match both vfork() and clone(..., flags=...|CLONE_VFORK|...)
self.assertRegex(vfork_result.event_bytes, br"(?i)vfork")
# Do NOT check that fork() or other clones did not happen.
# If the OS denys the vfork it'll fallback to plain fork().
# Test that each individual thing that would disable the use of vfork
# actually disables it.
for sub_name, preamble, sp_kwarg, expect_permission_error in (
("preexec", "", "preexec_fn=lambda: None", False),
("setgid", "", f"group={os.getgid()}", True),
("setuid", "", f"user={os.getuid()}", True),
("setgroups", "", "extra_groups=[]", True),
):
with self.subTest(name=sub_name):
non_vfork_result = strace_helper.strace_python(
f"""\
import subprocess
{preamble}
try:
subprocess.check_call(
[{true_binary!r}], **dict({sp_kwarg}))
except PermissionError:
if not {expect_permission_error}:
raise""",
strace_args
)
# Ensure neither vfork() or clone(..., flags=...|CLONE_VFORK|...).
self.assertNotRegex(non_vfork_result.event_bytes, br"(?i)vfork")
@unittest.skipUnless(mswindows, "Windows specific tests")
| POSIXProcessTestCase |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataplex.py | {
"start": 28453,
"end": 33168
} | class ____(GoogleCloudBaseOperator):
"""
Creates a DataScan resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param body: Required. The Request body contains an instance of DataScan.
:param data_scan_id: Required. Data Quality scan identifier.
:param update_mask: Mask of fields to update.
:param api_version: The version of the api that will be requested for example 'v1'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:return: Dataplex data scan id
"""
template_fields = ("project_id", "data_scan_id", "body", "impersonation_chain")
template_fields_renderers = {"body": "json"}
def __init__(
self,
project_id: str,
region: str,
data_scan_id: str,
body: dict[str, Any] | DataScan,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
update_mask: dict | FieldMask | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.data_scan_id = data_scan_id
self.body = body
self.update_mask = update_mask
self.api_version = api_version
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
if self.update_mask is not None:
self._update_data_scan(hook)
else:
self.log.info("Creating Dataplex Data Quality scan %s", self.data_scan_id)
try:
operation = hook.create_data_scan(
project_id=self.project_id,
region=self.region,
data_scan_id=self.data_scan_id,
body=self.body,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Dataplex Data Quality scan %s created successfully!", self.data_scan_id)
except AlreadyExists:
self._update_data_scan(hook)
except GoogleAPICallError as e:
raise AirflowException(f"Error creating Data Quality scan {self.data_scan_id}", e)
return self.data_scan_id
def _update_data_scan(self, hook: DataplexHook):
self.log.info("Dataplex Data Quality scan already exists: %s", {self.data_scan_id})
operation = hook.update_data_scan(
project_id=self.project_id,
region=self.region,
data_scan_id=self.data_scan_id,
body=self.body,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Dataplex Data Quality scan %s updated successfully!", self.data_scan_id)
| DataplexCreateOrUpdateDataQualityScanOperator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/streams.py | {
"start": 40856,
"end": 42350
} | class ____(GithubStream):
"""
API docs: https://docs.github.com/en/rest/pulls/pulls?apiVersion=2022-11-28#list-commits-on-a-pull-request
"""
primary_key = "sha"
def __init__(self, parent: HttpStream, **kwargs):
super().__init__(**kwargs)
self.parent = parent
def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str:
return f"repos/{stream_slice['repository']}/pulls/{stream_slice['pull_number']}/commits"
def stream_slices(
self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
parent_stream_slices = self.parent.stream_slices(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state
)
for stream_slice in parent_stream_slices:
parent_records = self.parent.read_records(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state
)
for record in parent_records:
yield {"repository": record["repository"], "pull_number": record["number"]}
def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]:
record = super().transform(record=record, stream_slice=stream_slice)
record["pull_number"] = stream_slice["pull_number"]
return record
| PullRequestCommits |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 40130,
"end": 40705
} | class ____(Blockwise):
_parameters = ["frame", "state_data", "frac", "replace"]
operation = staticmethod(methods.sample)
@functools.cached_property
def _meta(self):
args = [self.operands[0]._meta] + [self.operands[1][0]] + self.operands[2:]
return self.operation(*args)
def _task(self, name: Key, index: int) -> Task:
args = [self._blockwise_arg(self.frame, index)] + [
self.state_data[index],
self.frac,
self.operand("replace"),
]
return Task(name, self.operation, *args)
| Sample |
python | getsentry__sentry-python | tests/integrations/grpc/grpc_test_service_pb2_grpc.py | {
"start": 4645,
"end": 7557
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def TestServe(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc_test_server.gRPCTestService/TestServe',
grpc__test__service__pb2.gRPCTestMessage.SerializeToString,
grpc__test__service__pb2.gRPCTestMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def TestUnaryStream(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/grpc_test_server.gRPCTestService/TestUnaryStream',
grpc__test__service__pb2.gRPCTestMessage.SerializeToString,
grpc__test__service__pb2.gRPCTestMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def TestStreamStream(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/grpc_test_server.gRPCTestService/TestStreamStream',
grpc__test__service__pb2.gRPCTestMessage.SerializeToString,
grpc__test__service__pb2.gRPCTestMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def TestStreamUnary(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_unary(request_iterator, target, '/grpc_test_server.gRPCTestService/TestStreamUnary',
grpc__test__service__pb2.gRPCTestMessage.SerializeToString,
grpc__test__service__pb2.gRPCTestMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| gRPCTestService |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 74726,
"end": 75416
} | class ____(Expr):
"""Returns a tuple of partition lengths"""
_parameters = ["frame"]
@functools.cached_property
def _meta(self):
return tuple()
def _divisions(self):
return (None, None)
def _simplify_down(self):
if isinstance(self.frame, Elemwise):
child = max(self.frame.dependencies(), key=lambda expr: expr.npartitions)
return Lengths(child)
def _layer(self):
name = f"part-{self._name}"
dsk = {
(name, i): (len, (self.frame._name, i))
for i in range(self.frame.npartitions)
}
dsk[(self._name, 0)] = (tuple, list(dsk.keys()))
return dsk
| Lengths |
python | huggingface__transformers | src/transformers/models/voxtral/modeling_voxtral.py | {
"start": 9614,
"end": 14455
} | class ____(VoxtralPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`VoxtralEncoderLayer`].
Args:
config: VoxtralEncoderConfig
"""
# Ignore copy
config: VoxtralEncoderConfig
main_input_name = "input_features"
input_modalities = "audio"
_no_split_modules = ["VoxtralEncoderLayer"]
_can_record_outputs = {
"attentions": VoxtralAttention,
"hidden_states": VoxtralEncoderLayer,
}
def __init__(self, config: VoxtralEncoderConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.num_mel_bins = config.num_mel_bins
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_source_positions
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.conv1 = nn.Conv1d(self.num_mel_bins, embed_dim, kernel_size=3, padding=1)
self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1)
self.embed_positions = nn.Embedding(self.max_source_positions, embed_dim)
self.embed_positions.requires_grad_(False)
self.layers = nn.ModuleList([VoxtralEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
# Ignore copy
self.avg_pooler = nn.AvgPool1d(2, stride=2)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def get_input_embeddings(self) -> nn.Module:
return self.conv1
def set_input_embeddings(self, value: nn.Module):
self.conv1 = value
@check_model_inputs()
def forward(
self,
input_features,
attention_mask=None,
**kwargs: Unpack[TransformersKwargs],
):
r"""
Args:
input_features (`torch.LongTensor` of shape `(batch_size, feature_size, sequence_length)`):
Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]` or a
`numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
`input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding
and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`]
attention_mask (`torch.Tensor`)`, *optional*):
Voxtral does not support masking of the `input_features`, this argument is preserved for compatibility,
but it is not used. By default the silence in the input log mel spectrogram are ignored.
"""
expected_seq_length = self.config.max_source_positions * self.conv1.stride[0] * self.conv2.stride[0]
if input_features.shape[-1] != expected_seq_length:
raise ValueError(
f"Voxtral expects the mel input features to be of length {expected_seq_length}, but found {input_features.shape[-1]}. Make sure to pad the input mel features to {expected_seq_length}."
)
input_features = input_features.to(dtype=self.conv1.weight.dtype, device=self.conv1.weight.device)
inputs_embeds = nn.functional.gelu(self.conv1(input_features))
inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds))
inputs_embeds = inputs_embeds.permute(0, 2, 1)
embed_pos = self.embed_positions.weight
hidden_states = (inputs_embeds + embed_pos).to(inputs_embeds.dtype)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
for idx, encoder_layer in enumerate(self.layers):
layer_outputs = encoder_layer(
hidden_states,
attention_mask=attention_mask,
)
hidden_states = layer_outputs[0]
hidden_states = self.layer_norm(hidden_states)
return BaseModelOutput(
last_hidden_state=hidden_states,
)
# Ignore copy
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers and the output length of the audio encoder
"""
input_lengths = (input_lengths - 1) // 2 + 1
output_lengths = (input_lengths - 2) // 2 + 1
return input_lengths, output_lengths
| VoxtralEncoder |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP004.py | {
"start": 952,
"end": 992
} | class ____(object):
...
object = A
| A |
python | kamyu104__LeetCode-Solutions | Python/race-car.py | {
"start": 64,
"end": 1202
} | class ____(object):
def racecar(self, target):
dp = [0] * (target+1)
for i in xrange(1, target+1):
# 2^(k-1) <= i < 2^k
k = i.bit_length()
# case 1. drive exactly i at best
# seq(i) = A^k
if i == 2**k-1:
dp[i] = k
continue
# case 2. drive cross i at 2^k-1, and turn back to i
# seq(i) = A^k -> R -> seq(2^k-1 - i)
dp[i] = k+1 + dp[2**k-1 - i]
# case 3. drive less then 2^k-1, and turn back some distance,
# and turn back again to make the direction is the same
# seq(i) = shortest(seq(i), A^(k-1) -> R -> A^j -> R ->
# seq(i - (2^(k-1)-1) + (2^j-1)),
# where 0 <= j < k-1)
# => dp[i] = min(dp[i], (k-1) + 1 + j + 1 +
# dp[i - (2**(k-1)-1) + (2**j-1)])
for j in xrange(k-1):
dp[i] = min(dp[i], k+j+1 + dp[i - 2**(k-1) + 2**j])
return dp[-1]
| Solution |
python | getsentry__sentry | src/sentry/issues/auto_source_code_config/task.py | {
"start": 1546,
"end": 9744
} | class ____(StrEnum):
UNEXPECTED_ERROR = "Unexpected error type while calling `get_trees_for_org()`."
LOCK_FAILED = "Failed to acquire lock"
EMPTY_TREES = "The trees are empty."
def process_event(
project_id: int, group_id: int, event_id: str
) -> tuple[list[CodeMapping], list[str]]:
"""
Process errors for customers with source code management installed and calculate code mappings
among other things.
This task is queued at most once per hour per project.
"""
project = Project.objects.get(id=project_id)
org = Organization.objects.get(id=project.organization_id)
set_tag("organization.slug", org.slug)
# When you look at the performance page the user is a default column
set_user({"username": org.slug})
set_tag("project.slug", project.slug)
extra = {
"organization.slug": org.slug,
"project_id": project_id,
"group_id": group_id,
"event_id": event_id,
}
event = fetch_event(project_id, event_id, group_id, extra)
if event is None:
return [], []
platform = event.platform
assert platform is not None
set_tag("platform", platform)
platform_config = PlatformConfig(platform)
if not platform_config.is_supported():
return [], []
frames_to_process = get_frames_to_process(event.data, platform)
if not frames_to_process:
return [], []
code_mappings: list[CodeMapping] = []
in_app_stack_trace_rules: list[str] = []
try:
installation = get_installation(org)
trees = get_trees_for_org(installation, org, extra)
trees_helper = CodeMappingTreesHelper(trees)
code_mappings = trees_helper.generate_code_mappings(frames_to_process, platform)
_, in_app_stack_trace_rules = create_configurations(
code_mappings, installation, project, platform_config
)
except (InstallationNotFoundError, InstallationCannotGetTreesError):
pass
return code_mappings, in_app_stack_trace_rules
def fetch_event(
project_id: int, event_id: str, group_id: int, extra: dict[str, Any]
) -> GroupEvent | Event | None:
event: GroupEvent | Event | None = None
failure_reason = None
try:
event = eventstore.backend.get_event_by_id(project_id, event_id, group_id)
if event is None:
failure_reason = "event_not_found"
except DeadlineExceeded:
failure_reason = "nodestore_deadline_exceeded"
except Exception:
logger.exception("Error fetching event.", extra=extra)
failure_reason = "event_fetching_exception"
if failure_reason:
metrics.incr(
key=f"{METRIC_PREFIX}.failure", tags={"reason": failure_reason}, sample_rate=1.0
)
return event
def process_error(error: ApiError, extra: dict[str, Any]) -> None:
"""Log known issues and report unknown ones"""
if error.json:
json_data: Any = error.json
msg = json_data.get("message")
else:
msg = error.text
extra["error"] = msg
if msg is None:
logger.warning("No message found in ApiError.", extra=extra)
return
elif msg == "Not Found":
logger.warning("The org has uninstalled the Sentry App.", extra=extra)
return
elif msg == "This installation has been suspended":
logger.warning("The org has suspended the Sentry App.", extra=extra)
return
elif msg == "Server Error":
logger.warning("Github failed to respond.", extra=extra)
return
elif msg.startswith("Although you appear to have the correct authorization credentials"):
# Although you appear to have the correct authorization credentials, the
# <github_org_here> organization has an IP allow list enabled, and
# <ip_address_here> is not permitted to access this resource.
logger.warning("The org has suspended the Sentry App. See code comment.", extra=extra)
return
elif msg.startswith("Due to U.S. trade controls law restrictions, this GitHub"):
logger.warning("Github has blocked this org. We will not continue.", extra=extra)
return
# Logging the warning and returning is better than re-raising the error
# Otherwise, API errors would not group them since the HTTPError in the stack
# has unique URLs, thus, separating the errors
logger.warning("Unhandled ApiError occurred. Multiple issues grouped.", extra=extra)
def get_trees_for_org(
installation: IntegrationInstallation, org: Organization, extra: dict[str, Any]
) -> dict[str, Any]:
trees: dict[str, Any] = {}
if not hasattr(installation, "get_trees_for_org"):
return trees
# Acquire the lock for a maximum of 10 minutes
lock = locks.get(key=f"get_trees_for_org:{org.slug}", duration=60 * 10, name="process_pending")
with SCMIntegrationInteractionEvent(
SCMIntegrationInteractionType.DERIVE_CODEMAPPINGS,
provider_key=installation.model.provider,
organization_id=org.id,
integration_id=installation.org_integration.integration_id,
).capture() as lifecycle:
try:
with lock.acquire():
trees = installation.get_trees_for_org()
if not trees:
lifecycle.record_halt(DeriveCodeMappingsErrorReason.EMPTY_TREES, extra=extra)
except ApiError as error:
process_error(error, extra)
lifecycle.record_halt(error, extra)
except UnableToAcquireLock as error:
lifecycle.record_halt(error, extra)
except Exception:
lifecycle.record_failure(DeriveCodeMappingsErrorReason.UNEXPECTED_ERROR, extra=extra)
return trees
def create_configurations(
code_mappings: list[CodeMapping],
installation: IntegrationInstallation,
project: Project,
platform_config: PlatformConfig,
) -> tuple[list[CodeMapping], list[str]]:
"""
Given a set of trees and frames to process, create code mappings & in-app stack trace rules.
Returns a tuple of code mappings and in-app stack trace rules even when running in dry-run mode.
"""
org_integration = installation.org_integration
if not org_integration:
raise InstallationNotFoundError
dry_run = platform_config.is_dry_run_platform(project.organization)
platform = platform_config.platform
tags: Mapping[str, str | bool] = {"platform": platform, "dry_run": dry_run}
with metrics.timer(f"{METRIC_PREFIX}.create_configurations.duration", tags=tags):
for code_mapping in code_mappings:
repository = create_repository(code_mapping.repo.name, org_integration, tags)
create_code_mapping(code_mapping, repository, project, org_integration, tags)
in_app_stack_trace_rules: list[str] = []
if platform_config.creates_in_app_stack_trace_rules():
in_app_stack_trace_rules = save_in_app_stack_trace_rules(
project, code_mappings, platform_config
)
# We return this to allow tests running in dry-run mode to assert
# what would have been created.
return code_mappings, in_app_stack_trace_rules
def create_code_mapping(
code_mapping: CodeMapping,
repository: Repository | None,
project: Project,
org_integration: RpcOrganizationIntegration,
tags: Mapping[str, str | bool],
) -> None:
created = False
if not tags["dry_run"] and repository is not None:
_, created = RepositoryProjectPathConfig.objects.get_or_create(
project=project,
stack_root=code_mapping.stacktrace_root,
defaults={
"repository": repository,
"organization_integration_id": org_integration.id,
"integration_id": org_integration.integration_id,
"organization_id": org_integration.organization_id,
"source_root": code_mapping.source_path,
"default_branch": code_mapping.repo.branch,
"automatically_generated": True,
},
)
if created or tags["dry_run"]:
metrics.incr(key=f"{METRIC_PREFIX}.code_mapping.created", tags=tags, sample_rate=1.0)
| DeriveCodeMappingsErrorReason |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 116073,
"end": 116639
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, api_key: str, company: str):
"""Airbyte Source for Hellobaton.
Args:
name (str): The name of the destination.
api_key (str): authentication key required to access the api endpoints
company (str): Company name that generates your base api url
"""
self.api_key = check.str_param(api_key, "api_key")
self.company = check.str_param(company, "company")
super().__init__("Hellobaton", name)
| HellobatonSource |
python | astropy__astropy | astropy/cosmology/_src/tests/flrw/test_w0wzcdm.py | {
"start": 942,
"end": 2386
} | class ____(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` wz on a Cosmology.
wz is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_wz(self, cosmo_cls, cosmo):
"""Test Parameter ``wz``."""
# on the class
wz = cosmo_cls.parameters["wz"]
assert isinstance(wz, Parameter)
assert "Derivative of the dark energy" in wz.__doc__
assert wz.unit is None
assert wz.default == 0.0
# on the instance
assert cosmo.wz is cosmo.__dict__["wz"]
assert cosmo.wz == self.cls_kwargs["wz"]
def test_init_wz(self, cosmo_cls, ba):
"""Test initialization for values of ``wz``."""
# test that it works with units
ba.arguments["wz"] = ba.arguments["wz"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wz == ba.arguments["wz"]
# also without units
ba.arguments["wz"] = ba.arguments["wz"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wz == ba.arguments["wz"]
# must be dimensionless
ba.arguments["wz"] = 10 * u.km
with pytest.raises(TypeError):
cosmo_cls(*ba.args, **ba.kwargs)
| ParameterwzTestMixin |
python | django__django | django/db/backends/oracle/creation.py | {
"start": 283,
"end": 21026
} | class ____(BaseDatabaseCreation):
@cached_property
def _maindb_connection(self):
"""
This is analogous to other backends' `_nodb_connection` property,
which allows access to an "administrative" connection which can
be used to manage the test databases.
For Oracle, the only connection that can be used for that purpose
is the main (non-test) connection.
"""
settings_dict = settings.DATABASES[self.connection.alias]
user = settings_dict.get("SAVED_USER") or settings_dict["USER"]
password = settings_dict.get("SAVED_PASSWORD") or settings_dict["PASSWORD"]
settings_dict = {**settings_dict, "USER": user, "PASSWORD": password}
DatabaseWrapper = type(self.connection)
return DatabaseWrapper(settings_dict, alias=self.connection.alias)
def _create_test_db(self, verbosity=1, autoclobber=False, keepdb=False):
parameters = self._get_test_db_params()
with self._maindb_connection.cursor() as cursor:
if self._test_database_create():
try:
self._execute_test_db_creation(
cursor, parameters, verbosity, keepdb
)
except Exception as e:
if "ORA-01543" not in str(e):
# All errors except "tablespace already exists" cancel
# tests
self.log("Got an error creating the test database: %s" % e)
sys.exit(2)
if not autoclobber:
confirm = input(
"It appears the test database, %s, already exists. "
"Type 'yes' to delete it, or 'no' to cancel: "
% parameters["user"]
)
if autoclobber or confirm == "yes":
if verbosity >= 1:
self.log(
"Destroying old test database for alias '%s'..."
% self.connection.alias
)
try:
self._execute_test_db_destruction(
cursor, parameters, verbosity
)
except DatabaseError as e:
if "ORA-29857" in str(e):
self._handle_objects_preventing_db_destruction(
cursor, parameters, verbosity, autoclobber
)
else:
# Ran into a database error that isn't about
# leftover objects in the tablespace.
self.log(
"Got an error destroying the old test database: %s"
% e
)
sys.exit(2)
except Exception as e:
self.log(
"Got an error destroying the old test database: %s" % e
)
sys.exit(2)
try:
self._execute_test_db_creation(
cursor, parameters, verbosity, keepdb
)
except Exception as e:
self.log(
"Got an error recreating the test database: %s" % e
)
sys.exit(2)
else:
self.log("Tests cancelled.")
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
self.log("Creating test user...")
try:
self._create_test_user(cursor, parameters, verbosity, keepdb)
except Exception as e:
if "ORA-01920" not in str(e):
# All errors except "user already exists" cancel tests
self.log("Got an error creating the test user: %s" % e)
sys.exit(2)
if not autoclobber:
confirm = input(
"It appears the test user, %s, already exists. Type "
"'yes' to delete it, or 'no' to cancel: "
% parameters["user"]
)
if autoclobber or confirm == "yes":
try:
if verbosity >= 1:
self.log("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
self.log("Creating test user...")
self._create_test_user(
cursor, parameters, verbosity, keepdb
)
except Exception as e:
self.log("Got an error recreating the test user: %s" % e)
sys.exit(2)
else:
self.log("Tests cancelled.")
sys.exit(1)
# Done with main user -- test user and tablespaces created.
self._maindb_connection.close()
self._switch_to_test_user(parameters)
return self.connection.settings_dict["NAME"]
def _switch_to_test_user(self, parameters):
"""
Switch to the user that's used for creating the test database.
Oracle doesn't have the concept of separate databases under the same
user, so a separate user is used; see _create_test_db(). The main user
is also needed for cleanup when testing is completed, so save its
credentials in the SAVED_USER/SAVED_PASSWORD key in the settings dict.
"""
real_settings = settings.DATABASES[self.connection.alias]
real_settings["SAVED_USER"] = self.connection.settings_dict["SAVED_USER"] = (
self.connection.settings_dict["USER"]
)
real_settings["SAVED_PASSWORD"] = self.connection.settings_dict[
"SAVED_PASSWORD"
] = self.connection.settings_dict["PASSWORD"]
real_test_settings = real_settings["TEST"]
test_settings = self.connection.settings_dict["TEST"]
real_test_settings["USER"] = real_settings["USER"] = test_settings["USER"] = (
self.connection.settings_dict["USER"]
) = parameters["user"]
real_settings["PASSWORD"] = self.connection.settings_dict["PASSWORD"] = (
parameters["password"]
)
def set_as_test_mirror(self, primary_settings_dict):
"""
Set this database up to be used in testing as a mirror of a primary
database whose settings are given.
"""
self.connection.settings_dict["USER"] = primary_settings_dict["USER"]
self.connection.settings_dict["PASSWORD"] = primary_settings_dict["PASSWORD"]
def _handle_objects_preventing_db_destruction(
self, cursor, parameters, verbosity, autoclobber
):
# There are objects in the test tablespace which prevent dropping it
# The easy fix is to drop the test user -- but are we allowed to do so?
self.log(
"There are objects in the old test database which prevent its destruction."
"\nIf they belong to the test user, deleting the user will allow the test "
"database to be recreated.\n"
"Otherwise, you will need to find and remove each of these objects, "
"or use a different tablespace.\n"
)
if self._test_user_create():
if not autoclobber:
confirm = input("Type 'yes' to delete user %s: " % parameters["user"])
if autoclobber or confirm == "yes":
try:
if verbosity >= 1:
self.log("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
except Exception as e:
self.log("Got an error destroying the test user: %s" % e)
sys.exit(2)
try:
if verbosity >= 1:
self.log(
"Destroying old test database for alias '%s'..."
% self.connection.alias
)
self._execute_test_db_destruction(cursor, parameters, verbosity)
except Exception as e:
self.log("Got an error destroying the test database: %s" % e)
sys.exit(2)
else:
self.log("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
else:
self.log(
"Django is configured to use pre-existing test user '%s',"
" and will not attempt to delete it." % parameters["user"]
)
self.log("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Return the name of the test database created.
"""
if not self.connection.is_pool:
self.connection.settings_dict["USER"] = self.connection.settings_dict[
"SAVED_USER"
]
self.connection.settings_dict["PASSWORD"] = self.connection.settings_dict[
"SAVED_PASSWORD"
]
self.connection.close()
self.connection.close_pool()
parameters = self._get_test_db_params()
with self._maindb_connection.cursor() as cursor:
if self._test_user_create():
if verbosity >= 1:
self.log("Destroying test user...")
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
self.log("Destroying test database tables...")
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._maindb_connection.close()
self._maindb_connection.close_pool()
def _execute_test_db_creation(self, cursor, parameters, verbosity, keepdb=False):
if verbosity >= 2:
self.log("_create_test_db(): dbname = %s" % parameters["user"])
if self._test_database_oracle_managed_files():
statements = [
"""
CREATE TABLESPACE %(tblspace)s
DATAFILE SIZE %(size)s
AUTOEXTEND ON NEXT %(extsize)s MAXSIZE %(maxsize)s
""",
"""
CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE SIZE %(size_tmp)s
AUTOEXTEND ON NEXT %(extsize_tmp)s MAXSIZE %(maxsize_tmp)s
""",
]
else:
statements = [
"""
CREATE TABLESPACE %(tblspace)s
DATAFILE '%(datafile)s' SIZE %(size)s REUSE
AUTOEXTEND ON NEXT %(extsize)s MAXSIZE %(maxsize)s
""",
"""
CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(datafile_tmp)s' SIZE %(size_tmp)s REUSE
AUTOEXTEND ON NEXT %(extsize_tmp)s MAXSIZE %(maxsize_tmp)s
""",
]
# Ignore "tablespace already exists" error when keepdb is on.
acceptable_ora_err = "ORA-01543" if keepdb else None
self._execute_allow_fail_statements(
cursor, statements, parameters, verbosity, acceptable_ora_err
)
def _create_test_user(self, cursor, parameters, verbosity, keepdb=False):
if verbosity >= 2:
self.log("_create_test_user(): username = %s" % parameters["user"])
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY "%(password)s"
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
QUOTA UNLIMITED ON %(tblspace)s
""",
"""GRANT CREATE SESSION,
CREATE TABLE,
CREATE SEQUENCE,
CREATE PROCEDURE,
CREATE TRIGGER
TO %(user)s""",
]
# Ignore "user already exists" error when keepdb is on
acceptable_ora_err = "ORA-01920" if keepdb else None
success = self._execute_allow_fail_statements(
cursor, statements, parameters, verbosity, acceptable_ora_err
)
# If the password was randomly generated, change the user accordingly.
if not success and self._test_settings_get("PASSWORD") is None:
set_password = 'ALTER USER %(user)s IDENTIFIED BY "%(password)s"'
self._execute_statements(cursor, [set_password], parameters, verbosity)
# Most test suites can be run without "create view" and
# "create materialized view" privileges. But some need it.
for object_type in ("VIEW", "MATERIALIZED VIEW"):
extra = "GRANT CREATE %(object_type)s TO %(user)s"
parameters["object_type"] = object_type
success = self._execute_allow_fail_statements(
cursor, [extra], parameters, verbosity, "ORA-01031"
)
if not success and verbosity >= 2:
self.log(
"Failed to grant CREATE %s permission to test user. This may be ok."
% object_type
)
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
self.log("_execute_test_db_destruction(): dbname=%s" % parameters["user"])
statements = [
"DROP TABLESPACE %(tblspace)s "
"INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS",
"DROP TABLESPACE %(tblspace_temp)s "
"INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
self.log("_destroy_test_user(): user=%s" % parameters["user"])
self.log("Be patient. This can take some time...")
statements = [
"DROP USER %(user)s CASCADE",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(
self, cursor, statements, parameters, verbosity, allow_quiet_fail=False
):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print(stmt)
try:
cursor.execute(stmt)
except Exception as err:
if (not allow_quiet_fail) or verbosity >= 2:
self.log("Failed (%s)" % (err))
raise
def _execute_allow_fail_statements(
self, cursor, statements, parameters, verbosity, acceptable_ora_err
):
"""
Execute statements which are allowed to fail silently if the Oracle
error code given by `acceptable_ora_err` is raised. Return True if the
statements execute without an exception, or False otherwise.
"""
try:
# Statement can fail when acceptable_ora_err is not None
allow_quiet_fail = (
acceptable_ora_err is not None and len(acceptable_ora_err) > 0
)
self._execute_statements(
cursor,
statements,
parameters,
verbosity,
allow_quiet_fail=allow_quiet_fail,
)
return True
except DatabaseError as err:
description = str(err)
if acceptable_ora_err is None or acceptable_ora_err not in description:
raise
return False
def _get_test_db_params(self):
return {
"dbname": self._test_database_name(),
"user": self._test_database_user(),
"password": self._test_database_passwd(),
"tblspace": self._test_database_tblspace(),
"tblspace_temp": self._test_database_tblspace_tmp(),
"datafile": self._test_database_tblspace_datafile(),
"datafile_tmp": self._test_database_tblspace_tmp_datafile(),
"maxsize": self._test_database_tblspace_maxsize(),
"maxsize_tmp": self._test_database_tblspace_tmp_maxsize(),
"size": self._test_database_tblspace_size(),
"size_tmp": self._test_database_tblspace_tmp_size(),
"extsize": self._test_database_tblspace_extsize(),
"extsize_tmp": self._test_database_tblspace_tmp_extsize(),
}
def _test_settings_get(self, key, default=None, prefixed=None):
"""
Return a value from the test settings dict, or a given default, or a
prefixed entry from the main settings dict.
"""
settings_dict = self.connection.settings_dict
val = settings_dict["TEST"].get(key, default)
if val is None and prefixed:
val = TEST_DATABASE_PREFIX + settings_dict[prefixed]
return val
def _test_database_name(self):
return self._test_settings_get("NAME", prefixed="NAME")
def _test_database_create(self):
return self._test_settings_get("CREATE_DB", default=True)
def _test_user_create(self):
return self._test_settings_get("CREATE_USER", default=True)
def _test_database_user(self):
return self._test_settings_get("USER", prefixed="USER")
def _test_database_passwd(self):
password = self._test_settings_get("PASSWORD")
if password is None and self._test_user_create():
# Oracle passwords are limited to 30 chars and can't contain
# symbols.
password = get_random_string(30)
return password
def _test_database_tblspace(self):
return self._test_settings_get("TBLSPACE", prefixed="USER")
def _test_database_tblspace_tmp(self):
settings_dict = self.connection.settings_dict
return settings_dict["TEST"].get(
"TBLSPACE_TMP", TEST_DATABASE_PREFIX + settings_dict["USER"] + "_temp"
)
def _test_database_tblspace_datafile(self):
tblspace = "%s.dbf" % self._test_database_tblspace()
return self._test_settings_get("DATAFILE", default=tblspace)
def _test_database_tblspace_tmp_datafile(self):
tblspace = "%s.dbf" % self._test_database_tblspace_tmp()
return self._test_settings_get("DATAFILE_TMP", default=tblspace)
def _test_database_tblspace_maxsize(self):
return self._test_settings_get("DATAFILE_MAXSIZE", default="500M")
def _test_database_tblspace_tmp_maxsize(self):
return self._test_settings_get("DATAFILE_TMP_MAXSIZE", default="500M")
def _test_database_tblspace_size(self):
return self._test_settings_get("DATAFILE_SIZE", default="50M")
def _test_database_tblspace_tmp_size(self):
return self._test_settings_get("DATAFILE_TMP_SIZE", default="50M")
def _test_database_tblspace_extsize(self):
return self._test_settings_get("DATAFILE_EXTSIZE", default="25M")
def _test_database_tblspace_tmp_extsize(self):
return self._test_settings_get("DATAFILE_TMP_EXTSIZE", default="25M")
def _test_database_oracle_managed_files(self):
return self._test_settings_get("ORACLE_MANAGED_FILES", default=False)
def _get_test_db_name(self):
"""
Return the 'production' DB name to get the test DB creation machinery
to work. This isn't a great deal in this case because DB names as
handled by Django don't have real counterparts in Oracle.
"""
return self.connection.settings_dict["NAME"]
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict["HOST"],
settings_dict["PORT"],
settings_dict["ENGINE"],
settings_dict["NAME"],
self._test_database_user(),
)
| DatabaseCreation |
python | huggingface__transformers | src/transformers/models/led/modeling_led.py | {
"start": 57905,
"end": 59922
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see `past_key_values` input) to speed up sequential decoding.
encoder_global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: Optional[torch.FloatTensor] = None
end_logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
| LEDSeq2SeqQuestionAnsweringModelOutput |
python | getsentry__sentry-python | tests/integrations/starlette/test_starlette.py | {
"start": 37179,
"end": 42863
} | class ____:
"""Wraps any container and makes it non-iterable.
Used to test backwards compatibility with our old way of defining failed_request_status_codes, which allowed
passing in a list of (possibly non-iterable) containers. The Python standard library does not provide any built-in
non-iterable containers, so we have to define our own.
"""
def __init__(self, inner):
self.inner = inner
def __contains__(self, item):
return item in self.inner
parametrize_test_configurable_status_codes_deprecated = pytest.mark.parametrize(
"failed_request_status_codes,status_code,expected_error",
[
(None, 500, True),
(None, 400, False),
([500, 501], 500, True),
([500, 501], 401, False),
([range(400, 499)], 401, True),
([range(400, 499)], 500, False),
([range(400, 499), range(500, 599)], 300, False),
([range(400, 499), range(500, 599)], 403, True),
([range(400, 499), range(500, 599)], 503, True),
([range(400, 403), 500, 501], 401, True),
([range(400, 403), 500, 501], 405, False),
([range(400, 403), 500, 501], 501, True),
([range(400, 403), 500, 501], 503, False),
([], 500, False),
([NonIterableContainer(range(500, 600))], 500, True),
([NonIterableContainer(range(500, 600))], 404, False),
],
)
"""Test cases for configurable status codes (deprecated API).
Also used by the FastAPI tests.
"""
@parametrize_test_configurable_status_codes_deprecated
def test_configurable_status_codes_deprecated(
sentry_init,
capture_events,
failed_request_status_codes,
status_code,
expected_error,
):
with pytest.warns(DeprecationWarning):
starlette_integration = StarletteIntegration(
failed_request_status_codes=failed_request_status_codes
)
sentry_init(integrations=[starlette_integration])
events = capture_events()
async def _error(request):
raise HTTPException(status_code)
app = starlette.applications.Starlette(
routes=[
starlette.routing.Route("/error", _error, methods=["GET"]),
],
)
client = TestClient(app)
client.get("/error")
if expected_error:
assert len(events) == 1
else:
assert not events
@pytest.mark.skipif(
STARLETTE_VERSION < (0, 21),
reason="Requires Starlette >= 0.21, because earlier versions do not support HTTP 'HEAD' requests",
)
def test_transaction_http_method_default(sentry_init, capture_events):
"""
By default OPTIONS and HEAD requests do not create a transaction.
"""
sentry_init(
traces_sample_rate=1.0,
integrations=[
StarletteIntegration(),
],
)
events = capture_events()
starlette_app = starlette_app_factory()
client = TestClient(starlette_app)
client.get("/nomessage")
client.options("/nomessage")
client.head("/nomessage")
assert len(events) == 1
(event,) = events
assert event["request"]["method"] == "GET"
@pytest.mark.skipif(
STARLETTE_VERSION < (0, 21),
reason="Requires Starlette >= 0.21, because earlier versions do not support HTTP 'HEAD' requests",
)
def test_transaction_http_method_custom(sentry_init, capture_events):
sentry_init(
traces_sample_rate=1.0,
integrations=[
StarletteIntegration(
http_methods_to_capture=(
"OPTIONS",
"head",
), # capitalization does not matter
),
],
debug=True,
)
events = capture_events()
starlette_app = starlette_app_factory()
client = TestClient(starlette_app)
client.get("/nomessage")
client.options("/nomessage")
client.head("/nomessage")
assert len(events) == 2
(event1, event2) = events
assert event1["request"]["method"] == "OPTIONS"
assert event2["request"]["method"] == "HEAD"
@parametrize_test_configurable_status_codes
def test_configurable_status_codes(
sentry_init,
capture_events,
failed_request_status_codes,
status_code,
expected_error,
):
integration_kwargs = {}
if failed_request_status_codes is not None:
integration_kwargs["failed_request_status_codes"] = failed_request_status_codes
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
starlette_integration = StarletteIntegration(**integration_kwargs)
sentry_init(integrations=[starlette_integration])
events = capture_events()
async def _error(_):
raise HTTPException(status_code)
app = starlette.applications.Starlette(
routes=[
starlette.routing.Route("/error", _error, methods=["GET"]),
],
)
client = TestClient(app)
client.get("/error")
assert len(events) == int(expected_error)
@pytest.mark.asyncio
async def test_starletterequestextractor_malformed_json_error_handling(sentry_init):
scope = SCOPE.copy()
scope["headers"] = [
[b"content-type", b"application/json"],
]
starlette_request = starlette.requests.Request(scope)
malformed_json = "{invalid json"
malformed_messages = [
{"type": "http.request", "body": malformed_json.encode("utf-8")},
{"type": "http.disconnect"},
]
side_effect = [_mock_receive(msg) for msg in malformed_messages]
starlette_request._receive = mock.Mock(side_effect=side_effect)
extractor = StarletteRequestExtractor(starlette_request)
assert extractor.is_json()
result = await extractor.json()
assert result is None
| NonIterableContainer |
python | vyperlang__vyper | vyper/ast/parse.py | {
"start": 4779,
"end": 18806
} | class ____(python_ast.NodeTransformer):
_source_code: str
_pre_parser: PreParser
_parents: list[python_ast.AST]
def __init__(
self,
source_code: str,
pre_parser: PreParser,
source_id: int,
module_path: Optional[str] = None,
resolved_path: Optional[str] = None,
):
self._source_id = source_id
self._module_path = module_path
self._resolved_path = resolved_path
self._source_code = source_code
self._pre_parser = pre_parser
self._parents = []
self.counter: int = 0
@cached_property
def source_lines(self):
return self._source_code.splitlines(keepends=True)
@cached_property
def line_offsets(self):
ofst = 0
# ensure line_offsets has at least 1 entry for 0-line source
ret = {1: ofst}
for lineno, line in enumerate(self.source_lines):
ret[lineno + 1] = ofst
ofst += len(line)
return ret
def generic_visit(self, node):
"""
Adds location info to all python ast nodes and replaces python ast nodes
that are singletons with a copy so that the location info will be unique,
before annotating the nodes with information that simplifies Vyper node
generation.
"""
if isinstance(node, PYTHON_AST_SINGLETONS):
# for performance reasons, these AST nodes are represented as
# singletons in the C parser. however, since we want to add
# different source annotations for each operator, we create
# a copy here.
node = copy.copy(node)
# adapted from cpython Lib/ast.py. adds line/col info to ast,
# but unlike Lib/ast.py, adjusts *all* ast nodes, not just the
# one that python defines to have line/col info.
# https://github.com/python/cpython/blob/62729d79206014886f5d/Lib/ast.py#L228
for field in LINE_INFO_FIELDS:
if len(self._parents) > 0:
parent = self._parents[-1]
val = getattr(node, field, None)
if val is None:
# try to get the field from the parent
val = getattr(parent, field)
setattr(node, field, val)
else:
assert hasattr(node, field), node
# decorate every node with the original source code to allow
# pretty-printing errors
node.full_source_code = self._source_code
node.node_id = self.counter
self.counter += 1
node.ast_type = node.__class__.__name__
adjustments = self._pre_parser.adjustments
adj = adjustments.get((node.lineno, node.col_offset), 0)
node.col_offset += adj
adj = adjustments.get((node.end_lineno, node.end_col_offset), 0)
node.end_col_offset += adj
start_pos = self.line_offsets[node.lineno] + node.col_offset
end_pos = self.line_offsets[node.end_lineno] + node.end_col_offset
node.src = f"{start_pos}:{end_pos-start_pos}:{self._source_id}"
node.node_source_code = self._source_code[start_pos:end_pos]
# keep track of the current path thru the AST
self._parents.append(node)
try:
node = super().generic_visit(node)
finally:
self._parents.pop()
return node
def _visit_docstring(self, node):
"""
Move a node docstring from body to `doc_string` and annotate it as `DocStr`.
"""
self.generic_visit(node)
if node.body:
n = node.body[0]
if (
isinstance(n, python_ast.Expr)
and isinstance(n.value, python_ast.Constant)
and isinstance(n.value.value, str)
):
self.generic_visit(n.value)
n.value.ast_type = "DocStr"
del node.body[0]
node.doc_string = n.value
return node
def visit_Module(self, node):
node.lineno = 1
node.col_offset = 0
node.end_lineno = max(1, len(self.source_lines))
if len(self.source_lines) > 0:
node.end_col_offset = len(self.source_lines[-1])
else:
node.end_col_offset = 0
# TODO: is this the best place for these? maybe they can be on
# CompilerData instead.
node.path = self._module_path
node.resolved_path = self._resolved_path
node.source_sha256sum = sha256sum(self._source_code)
node.source_id = self._source_id
return self._visit_docstring(node)
def visit_FunctionDef(self, node):
return self._visit_docstring(node)
def visit_ClassDef(self, node):
"""
Convert the `ClassDef` node into a Vyper-specific node type.
Vyper uses `struct` and `interface` in place of `class`, however these
values must be substituted out to create parseable Python. The Python
node is annotated with the desired Vyper type via the `ast_type` member.
"""
self.generic_visit(node)
node.ast_type = self._pre_parser.keyword_translations[(node.lineno, node.col_offset)]
return node
def visit_For(self, node):
"""
Visit a For node, splicing in the loop variable annotation provided by
the pre-parser
"""
key = (node.lineno, node.col_offset)
annotation_tokens = self._pre_parser.for_loop_annotations.pop(key)
if not annotation_tokens:
# a common case for people migrating to 0.4.0, provide a more
# specific error message than "invalid type annotation"
raise SyntaxException(
"missing type annotation\n\n"
" (hint: did you mean something like "
f"`for {node.target.id}: uint256 in ...`?)",
self._source_code,
node.lineno,
node.col_offset,
)
# some kind of black magic. untokenize preserves the line and column
# offsets, giving us something like `\
# \
# \
# uint8`
# that's not a valid python Expr because it is indented.
# but it's good because the code is indented to exactly the same
# offset as it did in the original source!
# (to best understand this, print out annotation_str and
# self._source_code and compare them side-by-side).
#
# what we do here is add in a dummy target which we will remove
# in a bit, but for now lets us keep the line/col offset, and
# *also* gives us a valid AST. it doesn't matter what the dummy
# target name is, since it gets removed in a few lines.
annotation_str = tokenize.untokenize(annotation_tokens)
annotation_str = "dummy_target:" + annotation_str
try:
fake_node = python_ast.parse(annotation_str).body[0]
# do we need to fix location info here?
fake_node = _deepcopy_ast(fake_node)
except SyntaxError as e:
raise SyntaxException(
"invalid type annotation", self._source_code, node.lineno, node.col_offset
) from e
# block things like `for x: uint256 = 5 in ...`
if (value_node := fake_node.value) is not None:
raise SyntaxException(
"invalid type annotation",
self._source_code,
value_node.lineno,
value_node.col_offset,
)
# replace the dummy target name with the real target name.
fake_node.target = node.target
# replace the For node target with the new ann_assign
node.target = fake_node
return self.generic_visit(node)
def visit_Expr(self, node):
"""
Convert the `Yield` node into a Vyper-specific node type.
Vyper substitutes `yield` for non-pythonic statement such as `log`. Prior
to generating Vyper AST, we must annotate `Yield` nodes with their original
value.
Because `Yield` is an expression-statement, we also remove it from it's
enclosing `Expr` node.
"""
self.generic_visit(node)
if isinstance(node.value, python_ast.Yield):
# CMC 2024-03-03 consider unremoving this from the enclosing Expr
node = node.value
key = (node.lineno, node.col_offset)
node.ast_type = self._pre_parser.keyword_translations[key]
return node
def visit_Await(self, node):
start_pos = node.lineno, node.col_offset
self.generic_visit(node)
node.ast_type = self._pre_parser.keyword_translations[start_pos]
return node
def visit_Call(self, node):
# Convert structs declared as `Dict` node for vyper < 0.4.0 to kwargs
if len(node.args) == 1 and isinstance(node.args[0], python_ast.Dict):
msg = "Instantiating a struct using a dictionary is deprecated "
msg += "as of v0.4.0 and will be disallowed in a future release. "
msg += "Use kwargs instead e.g. Foo(a=1, b=2)"
# add full_source_code so that str(VyperException(msg, node)) works
node.full_source_code = self._source_code
vyper_warn(Deprecation(msg, node))
dict_ = node.args[0]
kw_list = []
assert len(dict_.keys) == len(dict_.values)
for key, value in zip(dict_.keys, dict_.values):
replacement_kw_node = python_ast.keyword(key.id, value)
# set locations
for attr in LINE_INFO_FIELDS:
setattr(replacement_kw_node, attr, getattr(key, attr))
kw_list.append(replacement_kw_node)
node.args = []
node.keywords = kw_list
self.generic_visit(node)
return node
def visit_Constant(self, node):
"""
Handle `Constant` when using Python >=3.8
In Python 3.8, `NameConstant`, `Num`, `Str`, and `Bytes` are deprecated
in favor of `Constant`. To maintain consistency across versions, `ast_type`
is modified to create the <=3.7 node classes.
"""
if not isinstance(node.value, bool) and isinstance(node.value, (int, float)):
return self.visit_Num(node)
self.generic_visit(node)
if node.value is None or isinstance(node.value, bool):
node.ast_type = "NameConstant"
elif isinstance(node.value, str):
key = (node.lineno, node.col_offset)
if key in self._pre_parser.hex_string_locations:
if len(node.value) % 2 != 0:
raise SyntaxException(
"Hex string must have an even number of characters",
self._source_code,
node.lineno,
node.col_offset,
)
node.ast_type = "HexBytes"
self._pre_parser.hex_string_locations.remove(key)
else:
node.ast_type = "Str"
elif isinstance(node.value, bytes):
node.ast_type = "Bytes"
elif isinstance(node.value, Ellipsis.__class__):
node.ast_type = "Ellipsis"
else:
raise SyntaxException(
"Invalid syntax (unsupported Python Constant AST node).",
self._source_code,
node.lineno,
node.col_offset,
)
return node
def visit_Num(self, node):
"""
Adjust numeric node class based on the value type.
Python uses `Num` to represent floats and integers. Integers may also
be given in binary, octal, decimal, or hexadecimal format. This method
modifies `ast_type` to separate `Num` into more granular Vyper node
classes.
"""
# modify vyper AST type according to the format of the literal value
self.generic_visit(node)
value = node.node_source_code
# deduce non base-10 types based on prefix
if value.lower()[:2] == "0x":
if len(value) % 2:
raise SyntaxException(
"Hex notation requires an even number of digits",
self._source_code,
node.lineno,
node.col_offset,
)
node.ast_type = "Hex"
node.value = value
elif value.lower()[:2] == "0b":
node.ast_type = "Bytes"
mod = (len(value) - 2) % 8
if mod:
raise SyntaxException(
f"Bit notation requires a multiple of 8 bits. {8-mod} bit(s) are missing.",
self._source_code,
node.lineno,
node.col_offset,
)
node.value = int(value, 2).to_bytes(len(value) // 8, "big")
elif isinstance(node.value, float):
node.ast_type = "Decimal"
node.value = Decimal(value)
elif isinstance(node.value, int):
node.ast_type = "Int"
else: # pragma: nocover
raise CompilerPanic(f"Unexpected type for Constant value: {type(node.value).__name__}")
return node
def visit_UnaryOp(self, node):
"""
Adjust operand value and discard unary operations, where possible.
This is done so that negative decimal literals are accurately represented.
"""
self.generic_visit(node)
is_sub = isinstance(node.op, python_ast.USub)
is_num = hasattr(node.operand, "value") and isinstance(node.operand.value, (int, Decimal))
if is_sub and is_num:
node.operand.value = 0 - node.operand.value
node.operand.col_offset = node.col_offset
node.operand.node_source_code = node.node_source_code
return node.operand
else:
return node
| AnnotatingVisitor |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/event_api.py | {
"start": 9352,
"end": 13851
} | class ____(
NamedTuple(
"_AssetRecordsFilter",
[
("asset_key", PublicAttr[AssetKey]),
("asset_partitions", PublicAttr[Optional[Sequence[str]]]),
("after_timestamp", PublicAttr[Optional[float]]),
("before_timestamp", PublicAttr[Optional[float]]),
("after_storage_id", PublicAttr[Optional[int]]),
("before_storage_id", PublicAttr[Optional[int]]),
("storage_ids", PublicAttr[Optional[Sequence[int]]]),
],
)
):
"""Defines a set of filter fields for fetching a set of asset event records.
Args:
asset_key (Optional[AssetKey]): Asset key for which to get asset event entries / records.
asset_partitions (Optional[List[str]]): Filter parameter such that only asset
events with a partition value matching one of the provided values are returned. Only
valid when the `asset_key` parameter is provided.
after_timestamp (Optional[float]): Filter parameter such that only event records for
events with timestamp greater than the provided value are returned.
before_timestamp (Optional[float]): Filter parameter such that only event records for
events with timestamp less than the provided value are returned.
after_storage_id (Optional[float]): Filter parameter such that only event records for
events with storage_id greater than the provided value are returned.
before_storage_id (Optional[float]): Filter parameter such that only event records for
events with storage_id less than the provided value are returned.
storage_ids (Optional[Sequence[int]]): Filter parameter such that only event records for
the given storage ids are returned.
tags (Optional[Mapping[str, Union[str, Sequence[str]]]]): Filter parameter such that only
events with the given event tags are returned
"""
def __new__(
cls,
asset_key: AssetKey,
asset_partitions: Optional[Sequence[str]] = None,
after_timestamp: Optional[float] = None,
before_timestamp: Optional[float] = None,
after_storage_id: Optional[int] = None,
before_storage_id: Optional[int] = None,
storage_ids: Optional[Sequence[int]] = None,
):
return super().__new__(
cls,
asset_key=check.inst_param(asset_key, "asset_key", AssetKey),
asset_partitions=check.opt_nullable_sequence_param(
asset_partitions, "asset_partitions", of_type=str
),
after_timestamp=check.opt_float_param(after_timestamp, "after_timestamp"),
before_timestamp=check.opt_float_param(before_timestamp, "before_timestamp"),
after_storage_id=check.opt_int_param(after_storage_id, "after_storage_id"),
before_storage_id=check.opt_int_param(before_storage_id, "before_storage_id"),
storage_ids=check.opt_nullable_sequence_param(storage_ids, "storage_ids", of_type=int),
)
def to_event_records_filter(
self, event_type: AssetEventType, cursor: Optional[str] = None, ascending: bool = False
) -> EventRecordsFilter:
before_cursor_storage_id, after_cursor_storage_id = EventRecordsFilter.get_cursor_params(
cursor, ascending
)
if self.before_storage_id and before_cursor_storage_id:
before_cursor = min(self.before_storage_id, before_cursor_storage_id)
else:
before_cursor = (
before_cursor_storage_id if before_cursor_storage_id else self.before_storage_id
)
if self.after_storage_id and after_cursor_storage_id:
after_cursor = max(self.after_storage_id, after_cursor_storage_id)
else:
after_cursor = (
after_cursor_storage_id if after_cursor_storage_id else self.after_storage_id
)
return EventRecordsFilter(
event_type=event_type,
asset_key=self.asset_key,
asset_partitions=self.asset_partitions,
after_cursor=after_cursor,
before_cursor=before_cursor,
after_timestamp=self.after_timestamp,
before_timestamp=self.before_timestamp,
storage_ids=self.storage_ids,
)
@property
def tags(self) -> Optional[Mapping[str, Union[str, Sequence[str]]]]:
return None
@whitelist_for_serdes
| AssetRecordsFilter |
python | huggingface__transformers | src/transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py | {
"start": 14289,
"end": 16715
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.num_experts = config.moe_num_experts
self.hidden_dim = config.hidden_size
self.intermediate_dim = config.moe_intermediate_size
self.use_bias = config.use_bias
self.act_fn = ACT2FN[config.hidden_act]
self.gate_up_proj = nn.Parameter(torch.zeros(self.num_experts, 2 * self.intermediate_dim, self.hidden_dim))
self.down_proj = nn.Parameter(torch.zeros(self.num_experts, self.hidden_dim, self.intermediate_dim))
if self.use_bias:
self.gate_up_proj_bias = nn.Parameter(torch.zeros(self.num_experts, 2 * self.intermediate_dim))
self.down_proj_bias = nn.Parameter(torch.zeros(self.num_experts, self.hidden_dim))
else:
self.gate_up_proj_bias = None
self.down_proj_bias = None
def forward(
self, hidden_states: torch.Tensor, selected_experts: torch.Tensor, routing_weights: torch.Tensor
) -> torch.Tensor:
final_hidden_states = torch.zeros_like(hidden_states)
if selected_experts.numel() == 0:
return final_hidden_states
expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit:
expert_idx = int(expert_idx.item())
idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
current_state = hidden_states[None, top_x].reshape(-1, hidden_states.shape[-1])
gate_inputs = F.linear(
current_state,
self.gate_up_proj[expert_idx],
None if self.gate_up_proj_bias is None else self.gate_up_proj_bias[expert_idx],
)
gate, up = gate_inputs.chunk(2, dim=-1)
current_hidden_states = self.act_fn(gate) * up
current_hidden_states = F.linear(
current_hidden_states,
self.down_proj[expert_idx],
None if self.down_proj_bias is None else self.down_proj_bias[expert_idx],
)
current_hidden_states = current_hidden_states * routing_weights[top_x, idx, None]
final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
return final_hidden_states
| Ernie4_5_MoeExperts |
python | facebook__pyre-check | tools/pysa_integration_tests/tests/runner_lib_test.py | {
"start": 453,
"end": 9874
} | class ____(testslide.TestCase):
def test_parse_annotations(self) -> None:
self.assertEqual(
test_runner_lib.parse_test_annotations_from_source(
textwrap.dedent(
"""
def foo() -> None:
pass
"""
)
),
{},
)
self.assertEqual(
test_runner_lib.parse_test_annotations_from_source(
textwrap.dedent(
"""
@ExpectIssue(code=1000)
def foo() -> None:
pass
"""
)
),
{
"foo": FunctionTestAnnotations(
definition_line=3,
annotations=[
TestAnnotation(
expected=True,
code=1000,
)
],
)
},
)
self.assertEqual(
test_runner_lib.parse_test_annotations_from_source(
textwrap.dedent(
"""
@ExpectIssue(code=1000, line=1, task='T123', currently_found=False)
def foo() -> None:
pass
"""
)
),
{
"foo": FunctionTestAnnotations(
definition_line=3,
annotations=[
TestAnnotation(
expected=True,
code=1000,
line=1,
task="T123",
currently_found=False,
)
],
)
},
)
self.assertEqual(
test_runner_lib.parse_test_annotations_from_source(
textwrap.dedent(
"""
@ExpectIssue(code=1000, line=1)
@ExpectIssue(code=1000, line=2)
@ExpectNoIssue(code=1001)
def foo() -> None:
pass
"""
)
),
{
"foo": FunctionTestAnnotations(
definition_line=5,
annotations=[
TestAnnotation(
expected=True,
code=1000,
line=1,
),
TestAnnotation(
expected=True,
code=1000,
line=2,
),
TestAnnotation(
expected=False,
code=1001,
),
],
)
},
)
# Ignore unknown decorators
self.assertEqual(
test_runner_lib.parse_test_annotations_from_source(
textwrap.dedent(
"""
@ExpectIssue(code=1000)
@other_decorator()
def foo() -> None:
pass
"""
)
),
{
"foo": FunctionTestAnnotations(
definition_line=4,
annotations=[
TestAnnotation(
expected=True,
code=1000,
)
],
)
},
)
with self.assertRaises(test_runner_lib.TestConfigurationException):
test_runner_lib.parse_test_annotations_from_source(
textwrap.dedent(
"""
@ExpectIssue()
def foo() -> None:
pass
"""
)
)
with self.assertRaises(test_runner_lib.TestConfigurationException):
test_runner_lib.parse_test_annotations_from_source(
textwrap.dedent(
"""
@ExpectIssue(code='a')
def foo() -> None:
pass
"""
)
)
with self.assertRaises(test_runner_lib.TestConfigurationException):
test_runner_lib.parse_test_annotations_from_source(
textwrap.dedent(
"""
@ExpectIssue(code=1000, unknown=0)
def foo() -> None:
pass
"""
)
)
@staticmethod
def make_issue(code: int, line: int) -> test_runner_lib.Issue:
return {
"define": "foo",
"code": code,
"path": "foo.py",
"line": line,
"column": 0,
"stop_line": line,
"stop_column": 1,
"description": "dummy issue",
"name": "dummy name",
}
def assert_compare_to_test_annotations_success(
self, issues: List[test_runner_lib.Issue], annotations: List[TestAnnotation]
) -> None:
self.assertEqual(
test_runner_lib.compare_issues_to_test_annotations(
function="foo",
definition_line=0,
code=1000,
issues=issues,
annotations=annotations,
),
[],
)
def assert_compare_to_test_annotations_fail(
self, issues: List[test_runner_lib.Issue], annotations: List[TestAnnotation]
) -> None:
self.assertNotEqual(
test_runner_lib.compare_issues_to_test_annotations(
function="foo",
definition_line=0,
code=1000,
issues=issues,
annotations=annotations,
),
[],
)
def test_compare_to_test_annotations(self) -> None:
self.assert_compare_to_test_annotations_success(
issues=[],
annotations=[],
)
self.assert_compare_to_test_annotations_success(
issues=[RunnerLibTest.make_issue(code=1000, line=1)],
annotations=[TestAnnotation(expected=True, code=1000)],
)
# expecting no issue.
self.assert_compare_to_test_annotations_fail(
issues=[
RunnerLibTest.make_issue(code=1000, line=2),
],
annotations=[
TestAnnotation(expected=False, code=1000),
],
)
# mismatching lines
self.assert_compare_to_test_annotations_fail(
issues=[RunnerLibTest.make_issue(code=1000, line=2)],
annotations=[TestAnnotation(expected=True, code=1000, line=3)],
)
self.assert_compare_to_test_annotations_success(
issues=[
RunnerLibTest.make_issue(code=1000, line=1),
RunnerLibTest.make_issue(code=1000, line=2),
],
annotations=[
TestAnnotation(expected=True, code=1000, line=1),
TestAnnotation(expected=True, code=1000, line=2),
],
)
self.assert_compare_to_test_annotations_fail(
issues=[
RunnerLibTest.make_issue(code=1000, line=1),
RunnerLibTest.make_issue(code=1000, line=2),
],
annotations=[
TestAnnotation(expected=True, code=1000, line=1),
TestAnnotation(expected=True, code=1000, line=3),
],
)
# mismatching number of issues.
self.assert_compare_to_test_annotations_fail(
issues=[
RunnerLibTest.make_issue(code=1000, line=1),
RunnerLibTest.make_issue(code=1000, line=2),
],
annotations=[
TestAnnotation(expected=True, code=1000),
],
)
# mix of ExpectIssue and ExpectNoIssue
self.assert_compare_to_test_annotations_success(
issues=[
RunnerLibTest.make_issue(code=1000, line=2),
],
annotations=[
TestAnnotation(expected=False, code=1000, line=1),
TestAnnotation(expected=True, code=1000, line=2),
TestAnnotation(expected=False, code=1000, line=3),
],
)
# test with currently_found=True
self.assert_compare_to_test_annotations_success(
issues=[
RunnerLibTest.make_issue(code=1000, line=2),
],
annotations=[
TestAnnotation(expected=False, code=1000, line=2, currently_found=True),
],
)
# mix of currently_found=True
self.assert_compare_to_test_annotations_success(
issues=[
RunnerLibTest.make_issue(code=1000, line=2),
RunnerLibTest.make_issue(code=1000, line=3),
],
annotations=[
TestAnnotation(expected=False, code=1000, line=2, currently_found=True),
TestAnnotation(expected=True, code=1000, line=3),
],
)
| RunnerLibTest |
python | allegroai__clearml | clearml/backend_api/services/v2_9/models.py | {
"start": 25311,
"end": 27028
} | class ____(Request):
"""
Delete a model.
:param model: Model ID
:type model: str
:param force: Force. Required if there are tasks that use the model as an
execution model, or if the model's creating task is published.
:type force: bool
"""
_service = "models"
_action = "delete"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"force": {
"description": "Force. Required if there are tasks that use the model as an execution model, or if the model's creating task is published.\n ",
"type": "boolean",
},
"model": {"description": "Model ID", "type": "string"},
},
"required": ["model"],
"type": "object",
}
def __init__(self, model: str, force: Optional[bool] = None, **kwargs: Any) -> None:
super(DeleteRequest, self).__init__(**kwargs)
self.model = model
self.force = force
@schema_property("model")
def model(self) -> str:
return self._property_model
@model.setter
def model(self, value: str) -> None:
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
| DeleteRequest |
python | gevent__gevent | src/gevent/tests/test__socket_dns.py | {
"start": 26829,
"end": 27472
} | class ____(TestCase):
def test_inet(self):
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, socket.AF_INET)
def test_unspec(self):
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, socket.AF_UNSPEC)
def test_badvalue(self):
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, 255)
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, 255000)
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, -1)
@unittest.skipIf(RESOLVER_DNSPYTHON, "Raises the wrong errno")
def test_badtype(self):
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, 'x')
| TestFamily |
python | huggingface__transformers | src/transformers/models/patchtsmixer/modeling_patchtsmixer.py | {
"start": 15014,
"end": 16162
} | class ____(nn.Module):
"""This module mixes the hidden feature dimension.
Args:
config (`PatchTSMixerConfig`):
Configuration.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.norm = PatchTSMixerNormLayer(config)
self.gated_attn = config.gated_attn
self.mlp = PatchTSMixerMLP(
in_features=config.d_model,
out_features=config.d_model,
config=config,
)
if config.gated_attn:
self.gating_block = PatchTSMixerGatedAttention(in_size=config.d_model, out_size=config.d_model)
def forward(self, hidden: torch.Tensor):
"""
Args:
hidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`):
Input tensor to the layer.
Returns:
`torch.Tensor`: Transformed tensor.
"""
residual = hidden
hidden = self.norm(hidden)
hidden = self.mlp(hidden)
if self.gated_attn:
hidden = self.gating_block(hidden)
out = hidden + residual
return out
| FeatureMixerBlock |
python | django__django | tests/db_functions/migrations/0001_setup_extensions.py | {
"start": 189,
"end": 312
} | class ____(migrations.Migration):
# Required for the SHA database functions.
operations = [CryptoExtension()]
| Migration |
python | altair-viz__altair | altair/vegalite/v6/schema/mixins.py | {
"start": 68669,
"end": 86990
} | class ____:
"""A mixin class that defines config methods."""
@use_signature(core.Config)
def configure(self, *args, **kwargs) -> Self:
copy = self.copy(deep=False) # type: ignore[attr-defined]
copy.config = core.Config(*args, **kwargs)
return copy
@use_signature(core.RectConfig)
def configure_arc(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["arc"] = core.RectConfig(*args, **kwargs)
return copy
@use_signature(core.AreaConfig)
def configure_area(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["area"] = core.AreaConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axis(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axis"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisBand(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisBand"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisBottom(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisBottom"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisDiscrete(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisDiscrete"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisLeft(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisLeft"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisPoint(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisPoint"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisQuantitative(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisQuantitative"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisRight(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisRight"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisTemporal(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisTemporal"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisTop(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisTop"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisX(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisX"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisXBand(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisXBand"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisXDiscrete(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisXDiscrete"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisXPoint(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisXPoint"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisXQuantitative(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisXQuantitative"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisXTemporal(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisXTemporal"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisY(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisY"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisYBand(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisYBand"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisYDiscrete(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisYDiscrete"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisYPoint(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisYPoint"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisYQuantitative(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisYQuantitative"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.AxisConfig)
def configure_axisYTemporal(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["axisYTemporal"] = core.AxisConfig(*args, **kwargs)
return copy
@use_signature(core.BarConfig)
def configure_bar(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["bar"] = core.BarConfig(*args, **kwargs)
return copy
@use_signature(core.BoxPlotConfig)
def configure_boxplot(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["boxplot"] = core.BoxPlotConfig(*args, **kwargs)
return copy
@use_signature(core.MarkConfig)
def configure_circle(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["circle"] = core.MarkConfig(*args, **kwargs)
return copy
@use_signature(core.CompositionConfig)
def configure_concat(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["concat"] = core.CompositionConfig(*args, **kwargs)
return copy
@use_signature(core.ErrorBandConfig)
def configure_errorband(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["errorband"] = core.ErrorBandConfig(*args, **kwargs)
return copy
@use_signature(core.ErrorBarConfig)
def configure_errorbar(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["errorbar"] = core.ErrorBarConfig(*args, **kwargs)
return copy
@use_signature(core.CompositionConfig)
def configure_facet(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["facet"] = core.CompositionConfig(*args, **kwargs)
return copy
@use_signature(core.MarkConfig)
def configure_geoshape(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["geoshape"] = core.MarkConfig(*args, **kwargs)
return copy
@use_signature(core.HeaderConfig)
def configure_header(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["header"] = core.HeaderConfig(*args, **kwargs)
return copy
@use_signature(core.HeaderConfig)
def configure_headerColumn(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["headerColumn"] = core.HeaderConfig(*args, **kwargs)
return copy
@use_signature(core.HeaderConfig)
def configure_headerFacet(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["headerFacet"] = core.HeaderConfig(*args, **kwargs)
return copy
@use_signature(core.HeaderConfig)
def configure_headerRow(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["headerRow"] = core.HeaderConfig(*args, **kwargs)
return copy
@use_signature(core.RectConfig)
def configure_image(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["image"] = core.RectConfig(*args, **kwargs)
return copy
@use_signature(core.LegendConfig)
def configure_legend(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["legend"] = core.LegendConfig(*args, **kwargs)
return copy
@use_signature(core.LineConfig)
def configure_line(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["line"] = core.LineConfig(*args, **kwargs)
return copy
@use_signature(core.MarkConfig)
def configure_mark(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["mark"] = core.MarkConfig(*args, **kwargs)
return copy
@use_signature(core.MarkConfig)
def configure_point(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["point"] = core.MarkConfig(*args, **kwargs)
return copy
@use_signature(core.ProjectionConfig)
def configure_projection(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["projection"] = core.ProjectionConfig(*args, **kwargs)
return copy
@use_signature(core.RangeConfig)
def configure_range(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["range"] = core.RangeConfig(*args, **kwargs)
return copy
@use_signature(core.RectConfig)
def configure_rect(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["rect"] = core.RectConfig(*args, **kwargs)
return copy
@use_signature(core.MarkConfig)
def configure_rule(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["rule"] = core.MarkConfig(*args, **kwargs)
return copy
@use_signature(core.ScaleConfig)
def configure_scale(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["scale"] = core.ScaleConfig(*args, **kwargs)
return copy
@use_signature(core.SelectionConfig)
def configure_selection(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["selection"] = core.SelectionConfig(*args, **kwargs)
return copy
@use_signature(core.MarkConfig)
def configure_square(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["square"] = core.MarkConfig(*args, **kwargs)
return copy
@use_signature(core.MarkConfig)
def configure_text(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["text"] = core.MarkConfig(*args, **kwargs)
return copy
@use_signature(core.TickConfig)
def configure_tick(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["tick"] = core.TickConfig(*args, **kwargs)
return copy
@use_signature(core.TitleConfig)
def configure_title(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["title"] = core.TitleConfig(*args, **kwargs)
return copy
@use_signature(core.FormatConfig)
def configure_tooltipFormat(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["tooltipFormat"] = core.FormatConfig(*args, **kwargs)
return copy
@use_signature(core.LineConfig)
def configure_trail(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["trail"] = core.LineConfig(*args, **kwargs)
return copy
@use_signature(core.ViewConfig)
def configure_view(self, *args, **kwargs) -> Self:
copy = self.copy(deep=["config"]) # type: ignore[attr-defined]
if copy.config is Undefined:
copy.config = core.Config()
copy.config["view"] = core.ViewConfig(*args, **kwargs)
return copy
| ConfigMethodMixin |
python | bokeh__bokeh | src/bokeh/models/annotations/labels.py | {
"start": 2232,
"end": 3684
} | class ____(Annotation):
''' Base class for text annotation models such as labels and titles.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
text = TextLike(default="", help="""
A text or LaTeX notation to render.
""")
padding = Padding(default=0, help="""
Extra space between the text of a label and its bounding box (border).
.. note::
This property is experimental and may change at any point.
""")
border_radius = BorderRadius(default=0, help="""
Allows label's box to have rounded corners. For the best results, it
should be used in combination with ``padding``.
.. note::
This property is experimental and may change at any point.
""")
text_props = Include(ScalarTextProps, help="""
The {prop} values for the text.
""")
background_fill_props = Include(ScalarFillProps, prefix="background", help="""
The {prop} values for the text bounding box.
""")
background_hatch_props = Include(ScalarHatchProps, prefix="background", help="""
The {prop} values for the text bounding box.
""")
border_props = Include(ScalarLineProps, prefix="border", help="""
The {prop} values for the text bounding box.
""")
background_fill_color = Override(default=None)
border_line_color = Override(default=None)
| TextAnnotation |
python | walkccc__LeetCode | solutions/1655. Distribute Repeating Integers/1655.py | {
"start": 0,
"end": 1651
} | class ____:
def canDistribute(self, nums: list[int], quantity: list[int]) -> bool:
freqs = list(collections.Counter(nums).values())
# validDistribution[i][j] := True if it's possible to distribute the i-th
# freq into a subset of quantity represented by the bitmask j
validDistribution = self._getValidDistribution(freqs, quantity)
n = len(freqs)
m = len(quantity)
maxMask = 1 << m
# dp[i][j] := true if it's possible to distribute freqs[i..n), where j is
# the bitmask of the selected quantity
dp = [[False] * maxMask for _ in range(n + 1)]
dp[n][maxMask - 1] = True
for i in range(n - 1, -1, -1):
for mask in range(maxMask):
dp[i][mask] = dp[i + 1][mask]
availableMask = ~mask & (maxMask - 1)
submask = availableMask
while submask > 0:
if validDistribution[i][submask]:
dp[i][mask] = dp[i][mask] or dp[i + 1][mask | submask]
submask = (submask - 1) & availableMask
return dp[0][0]
def _getValidDistribution(self, freqs: list[int],
quantity: list[int]) -> list[list[bool]]:
maxMask = 1 << len(quantity)
validDistribution = [[False] * maxMask for _ in range(len(freqs))]
for i, freq in enumerate(freqs):
for mask in range(maxMask):
if freq >= self._getQuantitySum(quantity, mask):
validDistribution[i][mask] = True
return validDistribution
def _getQuantitySum(self, quantity: list[int], mask: int) -> int:
"""Returns the sum of the selected quantity represented by `mask`."""
return sum(q for i, q in enumerate(quantity) if mask >> i & 1)
| Solution |
python | pytorch__pytorch | test/dynamo/test_repros.py | {
"start": 18166,
"end": 18422
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(784, 5)
def forward(self, x, ignored=None, bn_training=False):
return self.linear(x.view(x.shape[0], -1))
| FakeMamlInner |
python | django-compressor__django-compressor | compressor/tests/test_filters.py | {
"start": 9320,
"end": 9619
} | class ____(TestCase):
def test_calmjs_filter(self):
content = """
var foo = "bar";"""
output = """var foo="bar";"""
self.assertEqual(output, CalmjsFilter(content).output())
@override_settings(
COMPRESS_ENABLED=True,
COMPRESS_URL="/static/",
)
| CalmjsTestCase |
python | astropy__astropy | astropy/cosmology/_src/parameter/core.py | {
"start": 467,
"end": 735
} | class ____(Enum):
"""Sentinel values for Parameter fields."""
MISSING = auto()
"""A sentinel value signifying a missing default."""
def __repr__(self) -> str:
return f"<{self.name}>"
MISSING = Sentinel.MISSING
@dataclass(frozen=True)
| Sentinel |
python | readthedocs__readthedocs.org | readthedocs/oauth/migrations/0009_add_missing_model_change_migrations.py | {
"start": 180,
"end": 1633
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("oauth", "0008_add-project-relation"),
]
operations = [
migrations.AlterField(
model_name="remoterepository",
name="clone_url",
field=models.URLField(
blank=True,
max_length=512,
validators=[
django.core.validators.URLValidator(
schemes=["http", "https", "ssh", "git", "svn"]
)
],
verbose_name="Repository clone URL",
),
),
migrations.AlterField(
model_name="remoterepository",
name="ssh_url",
field=models.URLField(
blank=True,
max_length=512,
validators=[django.core.validators.URLValidator(schemes=["ssh"])],
verbose_name="SSH URL",
),
),
migrations.AlterField(
model_name="remoterepository",
name="vcs",
field=models.CharField(
blank=True,
choices=[
("git", "Git"),
("svn", "Subversion"),
("hg", "Mercurial"),
("bzr", "Bazaar"),
],
max_length=200,
verbose_name="vcs",
),
),
]
| Migration |
python | doocs__leetcode | solution/1900-1999/1902.Depth of BST Given Insertion Order/Solution.py | {
"start": 0,
"end": 386
} | class ____:
def maxDepthBST(self, order: List[int]) -> int:
sd = SortedDict({0: 0, inf: 0, order[0]: 1})
ans = 1
for v in order[1:]:
lower = sd.bisect_left(v) - 1
higher = lower + 1
depth = 1 + max(sd.values()[lower], sd.values()[higher])
ans = max(ans, depth)
sd[v] = depth
return ans
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 69854,
"end": 70356
} | class ____(str, Enum):
"""
* `PERIODIC`: Schedules that periodically trigger runs, such as a cron scheduler.
* `ONE_TIME`: One time triggers that fire a single run. This occurs you triggered a single run on demand through the UI or the API.
* `RETRY`: Indicates a run that is triggered as a retry of a previously failed run. This occurs when you request to re-run the job in case of failures.
"""
periodic = "PERIODIC"
onetime = "ONE_TIME"
retry = "RETRY"
| TriggerType |
python | cython__cython | Cython/Compiler/MemoryView.py | {
"start": 15550,
"end": 31485
} | class ____(SliceIter):
def start_loops(self):
code = self.code
code.begin_block()
for i in range(self.ndim):
t = i, self.slice_result, i
code.putln("Py_ssize_t __pyx_temp_extent_%d = %s.shape[%d];" % t)
code.putln("Py_ssize_t __pyx_temp_stride_%d = %s.strides[%d];" % t)
code.putln("char *__pyx_temp_pointer_%d;" % i)
code.putln("Py_ssize_t __pyx_temp_idx_%d;" % i)
code.putln("__pyx_temp_pointer_0 = %s.data;" % self.slice_result)
for i in range(self.ndim):
if i > 0:
code.putln("__pyx_temp_pointer_%d = __pyx_temp_pointer_%d;" % (i, i - 1))
code.putln("for (__pyx_temp_idx_%d = 0; "
"__pyx_temp_idx_%d < __pyx_temp_extent_%d; "
"__pyx_temp_idx_%d++) {" % (i, i, i, i))
return "__pyx_temp_pointer_%d" % (self.ndim - 1)
def end_loops(self):
code = self.code
for i in range(self.ndim - 1, -1, -1):
code.putln("__pyx_temp_pointer_%d += __pyx_temp_stride_%d;" % (i, i))
code.putln("}")
code.end_block()
def copy_c_or_fortran_cname(memview):
if memview.is_c_contig:
c_or_f = 'c'
else:
c_or_f = 'f'
return "__pyx_memoryview_copy_slice_%s_%s" % (
memview.specialization_suffix(), c_or_f)
def get_copy_new_utility(pos, from_memview, to_memview):
if (from_memview.dtype != to_memview.dtype and
not (from_memview.dtype.is_cv_qualified and from_memview.dtype.cv_base_type == to_memview.dtype)):
error(pos, "dtypes must be the same!")
return
if len(from_memview.axes) != len(to_memview.axes):
error(pos, "number of dimensions must be same")
return
if not (to_memview.is_c_contig or to_memview.is_f_contig):
error(pos, "to_memview must be c or f contiguous.")
return
for (access, packing) in from_memview.axes:
if access != 'direct':
error(pos, "cannot handle 'full' or 'ptr' access at this time.")
return
if to_memview.is_c_contig:
mode = 'c'
contig_flag = memview_c_contiguous
else:
assert to_memview.is_f_contig
mode = 'fortran'
contig_flag = memview_f_contiguous
return load_memview_c_utility(
"CopyContentsUtility",
context=dict(
template_context,
mode=mode,
dtype_decl=to_memview.dtype.empty_declaration_code(),
contig_flag=contig_flag,
ndim=to_memview.ndim,
func_cname=copy_c_or_fortran_cname(to_memview),
dtype_is_object=int(to_memview.dtype.is_pyobject),
),
)
def get_axes_specs(env, axes):
'''
get_axes_specs(env, axes) -> list of (access, packing) specs for each axis.
access is one of 'full', 'ptr' or 'direct'
packing is one of 'contig', 'strided' or 'follow'
'''
cythonscope = env.context.cython_scope
cythonscope.load_cythonscope()
viewscope = cythonscope.viewscope
access_specs = tuple([viewscope.lookup(name)
for name in ('full', 'direct', 'ptr')])
packing_specs = tuple([viewscope.lookup(name)
for name in ('contig', 'strided', 'follow')])
is_f_contig, is_c_contig = False, False
default_access, default_packing = 'direct', 'strided'
cf_access, cf_packing = default_access, 'follow'
axes_specs = []
# analyse all axes.
for idx, axis in enumerate(axes):
if not axis.start.is_none:
raise CompileError(axis.start.pos, START_ERR)
if not axis.stop.is_none:
raise CompileError(axis.stop.pos, STOP_ERR)
if axis.step.is_none:
axes_specs.append((default_access, default_packing))
elif isinstance(axis.step, IntNode):
# the packing for the ::1 axis is contiguous,
# all others are cf_packing.
if axis.step.compile_time_value(env) != 1:
raise CompileError(axis.step.pos, STEP_ERR)
axes_specs.append((cf_access, 'cfcontig'))
elif isinstance(axis.step, (NameNode, AttributeNode)):
entry = _get_resolved_spec(env, axis.step)
if entry.name in view_constant_to_access_packing:
axes_specs.append(view_constant_to_access_packing[entry.name])
else:
raise CompileError(axis.step.pos, INVALID_ERR)
else:
raise CompileError(axis.step.pos, INVALID_ERR)
# First, find out if we have a ::1 somewhere
contig_dim = 0
is_contig = False
for idx, (access, packing) in enumerate(axes_specs):
if packing == 'cfcontig':
if is_contig:
raise CompileError(axis.step.pos, BOTH_CF_ERR)
contig_dim = idx
axes_specs[idx] = (access, 'contig')
is_contig = True
if is_contig:
# We have a ::1 somewhere, see if we're C or Fortran contiguous
if contig_dim == len(axes) - 1:
is_c_contig = True
else:
is_f_contig = True
if contig_dim and not axes_specs[contig_dim - 1][0] in ('full', 'ptr'):
raise CompileError(axes[contig_dim].pos,
"Fortran contiguous specifier must follow an indirect dimension")
if is_c_contig:
# Contiguous in the last dimension, find the last indirect dimension
contig_dim = -1
for idx, (access, packing) in enumerate(reversed(axes_specs)):
if access in ('ptr', 'full'):
contig_dim = len(axes) - idx - 1
# Replace 'strided' with 'follow' for any dimension following the last
# indirect dimension, the first dimension or the dimension following
# the ::1.
# int[::indirect, ::1, :, :]
# ^ ^
# int[::indirect, :, :, ::1]
# ^ ^
start = contig_dim + 1
stop = len(axes) - is_c_contig
for idx, (access, packing) in enumerate(axes_specs[start:stop]):
idx = contig_dim + 1 + idx
if access != 'direct':
raise CompileError(axes[idx].pos,
"Indirect dimension may not follow "
"Fortran contiguous dimension")
if packing == 'contig':
raise CompileError(axes[idx].pos,
"Dimension may not be contiguous")
axes_specs[idx] = (access, cf_packing)
if is_c_contig:
# For C contiguity, we need to fix the 'contig' dimension
# after the loop
a, p = axes_specs[-1]
axes_specs[-1] = a, 'contig'
validate_axes_specs([axis.start.pos for axis in axes],
axes_specs,
is_c_contig,
is_f_contig)
return axes_specs
def validate_axes(pos, axes):
if len(axes) >= Options.buffer_max_dims:
error(pos, "More dimensions than the maximum number"
" of buffer dimensions were used.")
return False
return True
def is_cf_contig(specs):
is_c_contig = is_f_contig = False
if len(specs) == 1 and specs == [('direct', 'contig')]:
is_c_contig = True
elif (specs[-1] == ('direct','contig') and
all(axis == ('direct','follow') for axis in specs[:-1])):
# c_contiguous: 'follow', 'follow', ..., 'follow', 'contig'
is_c_contig = True
elif (len(specs) > 1 and
specs[0] == ('direct','contig') and
all(axis == ('direct','follow') for axis in specs[1:])):
# f_contiguous: 'contig', 'follow', 'follow', ..., 'follow'
is_f_contig = True
return is_c_contig, is_f_contig
def get_mode(specs):
is_c_contig, is_f_contig = is_cf_contig(specs)
if is_c_contig:
return 'c'
elif is_f_contig:
return 'fortran'
for access, packing in specs:
if access in ('ptr', 'full'):
return 'full'
return 'strided'
view_constant_to_access_packing = {
'generic': ('full', 'strided'),
'strided': ('direct', 'strided'),
'indirect': ('ptr', 'strided'),
'generic_contiguous': ('full', 'contig'),
'contiguous': ('direct', 'contig'),
'indirect_contiguous': ('ptr', 'contig'),
}
def validate_axes_specs(positions, specs, is_c_contig, is_f_contig):
packing_specs = ('contig', 'strided', 'follow')
access_specs = ('direct', 'ptr', 'full')
# is_c_contig, is_f_contig = is_cf_contig(specs)
has_contig = has_follow = has_strided = has_generic_contig = False
last_indirect_dimension = -1
for idx, (access, packing) in enumerate(specs):
if access == 'ptr':
last_indirect_dimension = idx
for idx, (pos, (access, packing)) in enumerate(zip(positions, specs)):
if not (access in access_specs and
packing in packing_specs):
raise CompileError(pos, "Invalid axes specification.")
if packing == 'strided':
has_strided = True
elif packing == 'contig':
if has_contig:
raise CompileError(pos, "Only one direct contiguous "
"axis may be specified.")
valid_contig_dims = last_indirect_dimension + 1, len(specs) - 1
if idx not in valid_contig_dims and access != 'ptr':
if last_indirect_dimension + 1 != len(specs) - 1:
dims = "dimensions %d and %d" % valid_contig_dims
else:
dims = "dimension %d" % valid_contig_dims[0]
raise CompileError(pos, "Only %s may be contiguous and direct" % dims)
has_contig = access != 'ptr'
elif packing == 'follow':
if has_strided:
raise CompileError(pos, "A memoryview cannot have both follow and strided axis specifiers.")
if not (is_c_contig or is_f_contig):
raise CompileError(pos, "Invalid use of the follow specifier.")
if access in ('ptr', 'full'):
has_strided = False
def _get_resolved_spec(env, spec):
# spec must be a NameNode or an AttributeNode
if isinstance(spec, NameNode):
return _resolve_NameNode(env, spec)
elif isinstance(spec, AttributeNode):
return _resolve_AttributeNode(env, spec)
else:
raise CompileError(spec.pos, INVALID_ERR)
def _resolve_NameNode(env, node):
try:
resolved_name = env.lookup(node.name).name
except AttributeError:
raise CompileError(node.pos, INVALID_ERR)
viewscope = env.context.cython_scope.viewscope
entry = viewscope.lookup(resolved_name)
if entry is None:
raise CompileError(node.pos, NOT_CIMPORTED_ERR)
return entry
def _resolve_AttributeNode(env, node):
path = []
while isinstance(node, AttributeNode):
path.insert(0, node.attribute)
node = node.obj
if isinstance(node, NameNode):
path.insert(0, node.name)
else:
raise CompileError(node.pos, EXPR_ERR)
modnames = path[:-1]
# must be at least 1 module name, o/w not an AttributeNode.
assert modnames
scope = env
for modname in modnames:
mod = scope.lookup(modname)
if not mod or not mod.as_module:
raise CompileError(
node.pos, "undeclared name not builtin: %s" % modname)
scope = mod.as_module
entry = scope.lookup(path[-1])
if not entry:
raise CompileError(node.pos, "No such attribute '%s'" % path[-1])
return entry
#
### Utility loading
#
def load_memview_cy_utility(util_code_name, context=None, **kwargs):
return CythonUtilityCode.load(util_code_name, "MemoryView.pyx",
context=context, **kwargs)
def load_memview_c_utility(
util_code_name, util_code_filename="MemoryView_C.c",
*,
context=None, **kwargs):
if context is None:
return UtilityCode.load(util_code_name, util_code_filename, **kwargs)
else:
return TempitaUtilityCode.load(util_code_name, util_code_filename,
context=context, **kwargs)
def use_cython_array_utility_code(env):
if env.context.shared_utility_qualified_name:
return
cython_scope = env.context.cython_scope
cython_scope.load_cythonscope()
cython_scope.viewscope.lookup('array_cwrapper').used = True
template_context = {
'max_dims': Options.buffer_max_dims,
'memviewslice_name': Naming.memviewslice_cname,
'memslice_init': PyrexTypes.MemoryViewSliceType.default_value,
'THREAD_LOCKS_PREALLOCATED': 8,
}
def _get_memviewslice_declare_code():
memviewslice_declare_code = load_memview_c_utility(
"MemviewSliceStruct",
context=template_context,
requires=[])
return memviewslice_declare_code
atomic_utility = load_memview_c_utility(
"Atomics", util_code_filename="Synchronization.c", context=template_context)
memviewslice_index_helpers = load_memview_c_utility("MemviewSliceIndex")
def _get_typeinfo_to_format_code():
return load_memview_cy_utility(
"BufferFormatFromTypeInfo", requires=[Buffer._typeinfo_to_format_code])
def get_typeinfo_to_format_code(shared_utility_qualified_name):
if shared_utility_qualified_name:
return CythonSharedUtilityCode(
'BufferFormatFromTypeInfo.pxd',
shared_utility_qualified_name,
template_context=template_context,
requires=[])
else:
return _get_typeinfo_to_format_code()
is_contig_utility = load_memview_c_utility("MemviewSliceIsContig")
overlapping_utility = load_memview_c_utility("OverlappingSlices")
refcount_utility = load_memview_c_utility("MemviewRefcount")
slice_init_utility = load_memview_c_utility("MemviewSliceInit")
memviewslice_declare_code = load_memview_c_utility("MemviewSliceStruct", context=template_context)
copy_contents_new_utility = load_memview_c_utility("MemviewSliceCopy")
@Utils.cached_function
def _get_memoryview_utility_code():
memoryview_utility_code = load_memview_cy_utility(
"View.MemoryView",
context=template_context,
requires=[
Buffer.buffer_struct_declare_code,
Buffer.buffer_formats_declare_code,
memviewslice_declare_code,
refcount_utility,
atomic_utility,
is_contig_utility,
overlapping_utility,
copy_contents_new_utility,
],
)
return memoryview_utility_code
@Utils.cached_function
def _get_memoryview_shared_utility_code(shared_utility_qualified_name):
shared_utility_code = CythonSharedUtilityCode(
'MemoryView.pxd',
shared_utility_qualified_name,
template_context=template_context,
requires=[
Buffer.buffer_struct_declare_code,
Buffer.buffer_formats_declare_code,
memviewslice_declare_code,
refcount_utility,
atomic_utility,
],
)
return shared_utility_code
def get_view_utility_code(shared_utility_qualified_name):
if shared_utility_qualified_name:
return _get_memoryview_shared_utility_code(shared_utility_qualified_name)
else:
return _get_memoryview_utility_code()
view_utility_allowlist = ('array', 'memoryview', 'array_cwrapper',
'generic', 'strided', 'indirect', 'contiguous',
'indirect_contiguous')
| StridedSliceIter |
python | PyCQA__isort | tests/unit/test_exceptions.py | {
"start": 645,
"end": 937
} | class ____(TestISortError):
def setup_class(self):
self.instance: exceptions.IntroducedSyntaxErrors = exceptions.IntroducedSyntaxErrors(
"file_path"
)
def test_variables(self):
assert self.instance.file_path == "file_path"
| TestIntroducedSyntaxErrors |
python | SmileyChris__easy-thumbnails | demoproject/mainapp/forms.py | {
"start": 118,
"end": 310
} | class ____(forms.ModelForm):
class Meta:
model = TestImage
fields = ["title", "image"]
widgets = {
"image": ImageClearableFileInput,
}
| TestImageForm |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/build_systems/cmake.py | {
"start": 365,
"end": 1804
} | class ____(PackageBase):
"""Specialized class for packages built using CMake
For more information on the CMake build system, see:
https://cmake.org/cmake/help/latest/
"""
build_system_class = "CMakePackage"
default_buildsystem = "cmake"
build_system("cmake")
depends_on("cmake", type="build", when="build_system=cmake")
def flags_to_build_system_args(self, flags):
"""Translate compiler flags to CMake arguments."""
# Has to be dynamic attribute due to caching
cmake_flag_args = []
for lang, pre in (("C", "c"), ("CXX", "cxx"), ("Fortran", "f")):
lang_flags = " ".join(flags.get(f"{pre}flags", []) + flags.get("cppflags", []))
if lang_flags:
cmake_flag_args.append(f"-DCMAKE_{lang}_FLAGS={lang_flags}")
if flags["ldflags"]:
ldflags = " ".join(flags["ldflags"])
cmake_flag_args.append(f"-DCMAKE_EXE_LINKER_FLAGS={ldflags}")
cmake_flag_args.append(f"-DCMAKE_MODULE_LINKER_FLAGS={ldflags}")
cmake_flag_args.append(f"-DCMAKE_SHARED_LINKER_FLAGS={ldflags}")
if flags["ldlibs"]:
libs_flags = " ".join(flags["ldlibs"])
for lang in ("C", "CXX", "Fortran"):
cmake_flag_args.append(f"-DCMAKE_{lang}_STANDARD_LIBRARIES={libs_flags}")
setattr(self, "cmake_flag_args", cmake_flag_args)
@register_builder("cmake")
| CMakePackage |
python | getsentry__sentry-python | tests/integrations/gcp/test_gcp.py | {
"start": 1342,
"end": 17736
} | class ____(HttpTransport):
def capture_envelope(self, envelope):
envelope_item = envelope_processor(envelope)
print("\\nENVELOPE: {}\\n".format(envelope_item.decode(\"utf-8\")))
def init_sdk(timeout_warning=False, **extra_init_args):
sentry_sdk.init(
dsn="https://123abc@example.com/123",
transport=TestTransport,
integrations=[GcpIntegration(timeout_warning=timeout_warning)],
shutdown_timeout=10,
# excepthook -> dedupe -> event_processor client report gets added
# which we don't really care about for these tests
send_client_reports=False,
**extra_init_args
)
"""
@pytest.fixture
def run_cloud_function():
def inner(code, subprocess_kwargs=()):
envelope_items = []
return_value = None
# STEP : Create a zip of cloud function
subprocess_kwargs = dict(subprocess_kwargs)
with tempfile.TemporaryDirectory() as tmpdir:
main_py = os.path.join(tmpdir, "main.py")
with open(main_py, "w") as f:
f.write(code)
setup_cfg = os.path.join(tmpdir, "setup.cfg")
with open(setup_cfg, "w") as f:
f.write("[install]\nprefix=")
subprocess.check_call(
[sys.executable, "setup.py", "sdist", "-d", os.path.join(tmpdir, "..")],
**subprocess_kwargs,
)
subprocess.check_call(
"pip install ../*.tar.gz -t .",
cwd=tmpdir,
shell=True,
**subprocess_kwargs,
)
stream = os.popen("python {}/main.py".format(tmpdir))
stream_data = stream.read()
stream.close()
for line in stream_data.splitlines():
print("GCP:", line)
if line.startswith("ENVELOPE: "):
line = line[len("ENVELOPE: ") :]
envelope_items.append(json.loads(line))
elif line.startswith("RETURN VALUE: "):
line = line[len("RETURN VALUE: ") :]
return_value = json.loads(line)
else:
continue
stream.close()
return envelope_items, return_value
return inner
def test_handled_exception(run_cloud_function):
envelope_items, return_value = run_cloud_function(
dedent(
"""
functionhandler = None
event = {}
def cloud_function(functionhandler, event):
raise Exception("something went wrong")
"""
)
+ FUNCTIONS_PRELUDE
+ dedent(
"""
init_sdk(timeout_warning=False)
gcp_functions.worker_v1.FunctionHandler.invoke_user_function(functionhandler, event)
"""
)
)
assert envelope_items[0]["level"] == "error"
(exception,) = envelope_items[0]["exception"]["values"]
assert exception["type"] == "Exception"
assert exception["value"] == "something went wrong"
assert exception["mechanism"]["type"] == "gcp"
assert not exception["mechanism"]["handled"]
def test_unhandled_exception(run_cloud_function):
envelope_items, _ = run_cloud_function(
dedent(
"""
functionhandler = None
event = {}
def cloud_function(functionhandler, event):
x = 3/0
return "3"
"""
)
+ FUNCTIONS_PRELUDE
+ dedent(
"""
init_sdk(timeout_warning=False)
gcp_functions.worker_v1.FunctionHandler.invoke_user_function(functionhandler, event)
"""
)
)
assert envelope_items[0]["level"] == "error"
(exception,) = envelope_items[0]["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
assert exception["value"] == "division by zero"
assert exception["mechanism"]["type"] == "gcp"
assert not exception["mechanism"]["handled"]
def test_timeout_error(run_cloud_function):
envelope_items, _ = run_cloud_function(
dedent(
"""
functionhandler = None
event = {}
def cloud_function(functionhandler, event):
sentry_sdk.set_tag("cloud_function", "true")
time.sleep(10)
return "3"
"""
)
+ FUNCTIONS_PRELUDE
+ dedent(
"""
init_sdk(timeout_warning=True)
gcp_functions.worker_v1.FunctionHandler.invoke_user_function(functionhandler, event)
"""
)
)
assert envelope_items[0]["level"] == "error"
(exception,) = envelope_items[0]["exception"]["values"]
assert exception["type"] == "ServerlessTimeoutWarning"
assert (
exception["value"]
== "WARNING : Function is expected to get timed out. Configured timeout duration = 3 seconds."
)
assert exception["mechanism"]["type"] == "threading"
assert not exception["mechanism"]["handled"]
assert envelope_items[0]["tags"]["cloud_function"] == "true"
def test_performance_no_error(run_cloud_function):
envelope_items, _ = run_cloud_function(
dedent(
"""
functionhandler = None
event = {}
def cloud_function(functionhandler, event):
return "test_string"
"""
)
+ FUNCTIONS_PRELUDE
+ dedent(
"""
init_sdk(traces_sample_rate=1.0)
gcp_functions.worker_v1.FunctionHandler.invoke_user_function(functionhandler, event)
"""
)
)
assert envelope_items[0]["type"] == "transaction"
assert envelope_items[0]["contexts"]["trace"]["op"] == "function.gcp"
assert envelope_items[0]["transaction"].startswith("Google Cloud function")
assert envelope_items[0]["transaction_info"] == {"source": "component"}
assert envelope_items[0]["transaction"] in envelope_items[0]["request"]["url"]
def test_performance_error(run_cloud_function):
envelope_items, _ = run_cloud_function(
dedent(
"""
functionhandler = None
event = {}
def cloud_function(functionhandler, event):
raise Exception("something went wrong")
"""
)
+ FUNCTIONS_PRELUDE
+ dedent(
"""
init_sdk(traces_sample_rate=1.0)
gcp_functions.worker_v1.FunctionHandler.invoke_user_function(functionhandler, event)
"""
)
)
assert envelope_items[0]["level"] == "error"
(exception,) = envelope_items[0]["exception"]["values"]
assert exception["type"] == "Exception"
assert exception["value"] == "something went wrong"
assert exception["mechanism"]["type"] == "gcp"
assert not exception["mechanism"]["handled"]
assert envelope_items[1]["type"] == "transaction"
assert envelope_items[1]["contexts"]["trace"]["op"] == "function.gcp"
assert envelope_items[1]["transaction"].startswith("Google Cloud function")
assert envelope_items[1]["transaction"] in envelope_items[0]["request"]["url"]
def test_traces_sampler_gets_correct_values_in_sampling_context(
run_cloud_function,
DictionaryContaining, # noqa:N803
):
# TODO: There are some decent sized hacks below. For more context, see the
# long comment in the test of the same name in the AWS integration. The
# situations there and here aren't identical, but they're similar enough
# that solving one would probably solve both.
import inspect
_, return_value = run_cloud_function(
dedent(
"""
functionhandler = None
event = {
"type": "chase",
"chasers": ["Maisey", "Charlie"],
"num_squirrels": 2,
}
def cloud_function(functionhandler, event):
# this runs after the transaction has started, which means we
# can make assertions about traces_sampler
try:
traces_sampler.assert_any_call(
DictionaryContaining({
"gcp_env": DictionaryContaining({
"function_name": "chase_into_tree",
"function_region": "dogpark",
"function_project": "SquirrelChasing",
}),
"gcp_event": {
"type": "chase",
"chasers": ["Maisey", "Charlie"],
"num_squirrels": 2,
},
})
)
except AssertionError:
# catch the error and return it because the error itself will
# get swallowed by the SDK as an "internal exception"
return {"AssertionError raised": True,}
return {"AssertionError raised": False,}
"""
)
+ FUNCTIONS_PRELUDE
+ dedent(inspect.getsource(DictionaryContaining))
+ dedent(
"""
os.environ["FUNCTION_NAME"] = "chase_into_tree"
os.environ["FUNCTION_REGION"] = "dogpark"
os.environ["GCP_PROJECT"] = "SquirrelChasing"
def _safe_is_equal(x, y):
# copied from conftest.py - see docstring and comments there
try:
is_equal = x.__eq__(y)
except AttributeError:
is_equal = NotImplemented
if is_equal == NotImplemented:
return x == y
return is_equal
traces_sampler = Mock(return_value=True)
init_sdk(
traces_sampler=traces_sampler,
)
gcp_functions.worker_v1.FunctionHandler.invoke_user_function(functionhandler, event)
"""
)
)
assert return_value["AssertionError raised"] is False
def test_error_has_new_trace_context_performance_enabled(run_cloud_function):
"""
Check if an 'trace' context is added to errros and transactions when performance monitoring is enabled.
"""
envelope_items, _ = run_cloud_function(
dedent(
"""
functionhandler = None
event = {}
def cloud_function(functionhandler, event):
sentry_sdk.capture_message("hi")
x = 3/0
return "3"
"""
)
+ FUNCTIONS_PRELUDE
+ dedent(
"""
init_sdk(traces_sample_rate=1.0)
gcp_functions.worker_v1.FunctionHandler.invoke_user_function(functionhandler, event)
"""
)
)
(msg_event, error_event, transaction_event) = envelope_items
assert "trace" in msg_event["contexts"]
assert "trace_id" in msg_event["contexts"]["trace"]
assert "trace" in error_event["contexts"]
assert "trace_id" in error_event["contexts"]["trace"]
assert "trace" in transaction_event["contexts"]
assert "trace_id" in transaction_event["contexts"]["trace"]
assert (
msg_event["contexts"]["trace"]["trace_id"]
== error_event["contexts"]["trace"]["trace_id"]
== transaction_event["contexts"]["trace"]["trace_id"]
)
def test_error_has_new_trace_context_performance_disabled(run_cloud_function):
"""
Check if an 'trace' context is added to errros and transactions when performance monitoring is disabled.
"""
envelope_items, _ = run_cloud_function(
dedent(
"""
functionhandler = None
event = {}
def cloud_function(functionhandler, event):
sentry_sdk.capture_message("hi")
x = 3/0
return "3"
"""
)
+ FUNCTIONS_PRELUDE
+ dedent(
"""
init_sdk(traces_sample_rate=None), # this is the default, just added for clarity
gcp_functions.worker_v1.FunctionHandler.invoke_user_function(functionhandler, event)
"""
)
)
(msg_event, error_event) = envelope_items
assert "trace" in msg_event["contexts"]
assert "trace_id" in msg_event["contexts"]["trace"]
assert "trace" in error_event["contexts"]
assert "trace_id" in error_event["contexts"]["trace"]
assert (
msg_event["contexts"]["trace"]["trace_id"]
== error_event["contexts"]["trace"]["trace_id"]
)
def test_error_has_existing_trace_context_performance_enabled(run_cloud_function):
"""
Check if an 'trace' context is added to errros and transactions
from the incoming 'sentry-trace' header when performance monitoring is enabled.
"""
trace_id = "471a43a4192642f0b136d5159a501701"
parent_span_id = "6e8f22c393e68f19"
parent_sampled = 1
sentry_trace_header = "{}-{}-{}".format(trace_id, parent_span_id, parent_sampled)
envelope_items, _ = run_cloud_function(
dedent(
"""
functionhandler = None
from collections import namedtuple
GCPEvent = namedtuple("GCPEvent", ["headers"])
event = GCPEvent(headers={"sentry-trace": "%s"})
def cloud_function(functionhandler, event):
sentry_sdk.capture_message("hi")
x = 3/0
return "3"
"""
% sentry_trace_header
)
+ FUNCTIONS_PRELUDE
+ dedent(
"""
init_sdk(traces_sample_rate=1.0)
gcp_functions.worker_v1.FunctionHandler.invoke_user_function(functionhandler, event)
"""
)
)
(msg_event, error_event, transaction_event) = envelope_items
assert "trace" in msg_event["contexts"]
assert "trace_id" in msg_event["contexts"]["trace"]
assert "trace" in error_event["contexts"]
assert "trace_id" in error_event["contexts"]["trace"]
assert "trace" in transaction_event["contexts"]
assert "trace_id" in transaction_event["contexts"]["trace"]
assert (
msg_event["contexts"]["trace"]["trace_id"]
== error_event["contexts"]["trace"]["trace_id"]
== transaction_event["contexts"]["trace"]["trace_id"]
== "471a43a4192642f0b136d5159a501701"
)
def test_error_has_existing_trace_context_performance_disabled(run_cloud_function):
"""
Check if an 'trace' context is added to errros and transactions
from the incoming 'sentry-trace' header when performance monitoring is disabled.
"""
trace_id = "471a43a4192642f0b136d5159a501701"
parent_span_id = "6e8f22c393e68f19"
parent_sampled = 1
sentry_trace_header = "{}-{}-{}".format(trace_id, parent_span_id, parent_sampled)
envelope_items, _ = run_cloud_function(
dedent(
"""
functionhandler = None
from collections import namedtuple
GCPEvent = namedtuple("GCPEvent", ["headers"])
event = GCPEvent(headers={"sentry-trace": "%s"})
def cloud_function(functionhandler, event):
sentry_sdk.capture_message("hi")
x = 3/0
return "3"
"""
% sentry_trace_header
)
+ FUNCTIONS_PRELUDE
+ dedent(
"""
init_sdk(traces_sample_rate=None), # this is the default, just added for clarity
gcp_functions.worker_v1.FunctionHandler.invoke_user_function(functionhandler, event)
"""
)
)
(msg_event, error_event) = envelope_items
assert "trace" in msg_event["contexts"]
assert "trace_id" in msg_event["contexts"]["trace"]
assert "trace" in error_event["contexts"]
assert "trace_id" in error_event["contexts"]["trace"]
assert (
msg_event["contexts"]["trace"]["trace_id"]
== error_event["contexts"]["trace"]["trace_id"]
== "471a43a4192642f0b136d5159a501701"
)
def test_span_origin(run_cloud_function):
events, _ = run_cloud_function(
dedent(
"""
functionhandler = None
event = {}
def cloud_function(functionhandler, event):
return "test_string"
"""
)
+ FUNCTIONS_PRELUDE
+ dedent(
"""
init_sdk(traces_sample_rate=1.0)
gcp_functions.worker_v1.FunctionHandler.invoke_user_function(functionhandler, event)
"""
)
)
(event,) = events
assert event["contexts"]["trace"]["origin"] == "auto.function.gcp"
| TestTransport |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 17412,
"end": 17968
} | class ____(VariableResponse):
type: Literal["VariableResult"] = "VariableResult"
@classmethod
def from_variable_response(cls, variable_response: VariableResponse) -> VariableResult:
"""
Get VariableResult from VariableResponse.
VariableResponse is autogenerated from the API schema, so we need to convert it to VariableResult
for communication between the Supervisor and the task process.
"""
return cls(**variable_response.model_dump(exclude_defaults=True), type="VariableResult")
| VariableResult |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefault2.py | {
"start": 1588,
"end": 1693
} | class ____[*Ts = Ts1]: ...
# This should generate an error because default must be unpacked tuple.
| ClassTs6 |
python | ray-project__ray | rllib/env/wrappers/atari_wrappers.py | {
"start": 8421,
"end": 9833
} | class ____(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == "NOOP"
def reset(self, **kwargs):
"""Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
# This environment now uses the pcg64 random number generator which
# does not have `randint` as an attribute only has `integers`.
try:
noops = self.unwrapped.np_random.integers(1, self.noop_max + 1)
# Also still support older versions.
except AttributeError:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
assert noops > 0
obs = None
for _ in range(noops):
obs, _, terminated, truncated, info = self.env.step(self.noop_action)
if terminated or truncated:
obs, info = self.env.reset(**kwargs)
return obs, info
def step(self, ac):
return self.env.step(ac)
@PublicAPI
| NoopResetEnv |
python | PrefectHQ__prefect | src/prefect/server/schemas/sorting.py | {
"start": 2101,
"end": 3221
} | class ____(AutoEnum):
"""Defines task run sorting options."""
ID_DESC = AutoEnum.auto()
EXPECTED_START_TIME_ASC = AutoEnum.auto()
EXPECTED_START_TIME_DESC = AutoEnum.auto()
NAME_ASC = AutoEnum.auto()
NAME_DESC = AutoEnum.auto()
NEXT_SCHEDULED_START_TIME_ASC = AutoEnum.auto()
END_TIME_DESC = AutoEnum.auto()
@db_injector
def as_sql_sort(self, db: "PrefectDBInterface") -> Iterable[sa.ColumnElement[Any]]:
"""Return an expression used to sort task runs"""
sort_mapping: dict[str, Iterable[sa.ColumnElement[Any]]] = {
"ID_DESC": [db.TaskRun.id.desc()],
"EXPECTED_START_TIME_ASC": [db.TaskRun.expected_start_time.asc()],
"EXPECTED_START_TIME_DESC": [db.TaskRun.expected_start_time.desc()],
"NAME_ASC": [db.TaskRun.name.asc()],
"NAME_DESC": [db.TaskRun.name.desc()],
"NEXT_SCHEDULED_START_TIME_ASC": [
db.TaskRun.next_scheduled_start_time.asc()
],
"END_TIME_DESC": [db.TaskRun.end_time.desc()],
}
return sort_mapping[self.value]
| TaskRunSort |
python | python-attrs__attrs | tests/test_funcs.py | {
"start": 19391,
"end": 24293
} | class ____:
"""
Tests for `evolve`.
"""
@given(slots=st.booleans(), frozen=st.booleans())
def test_empty(self, slots, frozen):
"""
Empty classes without changes get copied.
"""
@attr.s(slots=slots, frozen=frozen)
class C:
pass
i1 = C()
i2 = evolve(i1)
assert i1 is not i2
assert i1 == i2
@given(simple_classes())
def test_no_changes(self, C):
"""
No changes means a verbatim copy.
"""
i1 = C()
i2 = evolve(i1)
assert i1 is not i2
assert i1 == i2
@given(simple_classes(), st.data())
def test_change(self, C, data):
"""
Changes work.
"""
# Take the first attribute, and change it.
assume(fields(C)) # Skip classes with no attributes.
field_names = [a.name for a in fields(C)]
original = C()
chosen_names = data.draw(st.sets(st.sampled_from(field_names)))
# We pay special attention to private attributes, they should behave
# like in `__init__`.
change_dict = {
name.replace("_", ""): data.draw(st.integers())
for name in chosen_names
}
changed = evolve(original, **change_dict)
for name in chosen_names:
assert getattr(changed, name) == change_dict[name.replace("_", "")]
@given(simple_classes())
def test_unknown(self, C):
"""
Wanting to change an unknown attribute raises an
AttrsAttributeNotFoundError.
"""
# No generated class will have a four letter attribute.
with pytest.raises(TypeError) as e:
evolve(C(), aaaa=2)
if hasattr(C, "__attrs_init__"):
expected = (
"__attrs_init__() got an unexpected keyword argument 'aaaa'"
)
else:
expected = "__init__() got an unexpected keyword argument 'aaaa'"
assert e.value.args[0].endswith(expected)
def test_validator_failure(self):
"""
TypeError isn't swallowed when validation fails within evolve.
"""
@attr.s
class C:
a = attr.ib(validator=instance_of(int))
with pytest.raises(TypeError) as e:
evolve(C(a=1), a="some string")
m = e.value.args[0]
assert m.startswith("'a' must be <class 'int'>")
def test_private(self):
"""
evolve() acts as `__init__` with regards to private attributes.
"""
@attr.s
class C:
_a = attr.ib()
assert evolve(C(1), a=2)._a == 2
with pytest.raises(TypeError):
evolve(C(1), _a=2)
with pytest.raises(TypeError):
evolve(C(1), a=3, _a=2)
def test_non_init_attrs(self):
"""
evolve() handles `init=False` attributes.
"""
@attr.s
class C:
a = attr.ib()
b = attr.ib(init=False, default=0)
assert evolve(C(1), a=2).a == 2
def test_regression_attrs_classes(self):
"""
evolve() can evolve fields that are instances of attrs classes.
Regression test for #804
"""
@attr.s
class Cls1:
param1 = attr.ib()
@attr.s
class Cls2:
param2 = attr.ib()
obj2a = Cls2(param2="a")
obj2b = Cls2(param2="b")
obj1a = Cls1(param1=obj2a)
assert Cls1(param1=Cls2(param2="b")) == attr.evolve(
obj1a, param1=obj2b
)
def test_dicts(self):
"""
evolve() can replace an attrs class instance with a dict.
See #806
"""
@attr.s
class Cls1:
param1 = attr.ib()
@attr.s
class Cls2:
param2 = attr.ib()
obj2a = Cls2(param2="a")
obj2b = {"foo": 42, "param2": 42}
obj1a = Cls1(param1=obj2a)
assert Cls1({"foo": 42, "param2": 42}) == attr.evolve(
obj1a, param1=obj2b
)
def test_no_inst(self):
"""
Missing inst argument raises a TypeError like Python would.
"""
with pytest.raises(
TypeError, match=r"evolve\(\) takes 1 positional argument"
):
evolve(x=1)
def test_too_many_pos_args(self):
"""
More than one positional argument raises a TypeError like Python would.
"""
with pytest.raises(
TypeError,
match=r"evolve\(\) takes 1 positional argument, but 2 were given",
):
evolve(1, 2)
def test_can_change_inst(self):
"""
If the instance is passed by positional argument, a field named `inst`
can be changed.
"""
@attr.define
class C:
inst: int
assert C(42) == evolve(C(23), inst=42)
| TestEvolve |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol53.py | {
"start": 422,
"end": 472
} | class ____:
def m(self) -> Self: ...
| Impl_CoSelf |
python | weaviate__weaviate-python-client | weaviate/collections/classes/filters.py | {
"start": 13982,
"end": 15543
} | class ____(_FilterBase):
def __init__(self, target: Optional[_TargetRefs] = None) -> None:
self._target = target
self._property = "_id"
def contains_any(self, uuids: Sequence[UUID]) -> _Filters:
"""Filter for objects that has one of the given IDs."""
if len(uuids) == 0:
raise WeaviateInvalidInputError("Filter contains_any must have at least one value")
return _FilterValue(
target=self._target_path(),
value=[get_valid_uuid(val) for val in uuids],
operator=_Operator.CONTAINS_ANY,
)
def contains_none(self, uuids: Sequence[UUID]) -> _Filters:
"""Filter for objects that has none of the given IDs."""
if len(uuids) == 0:
raise WeaviateInvalidInputError("Filter contains_none must have at least one value")
return _FilterValue(
target=self._target_path(),
value=[get_valid_uuid(val) for val in uuids],
operator=_Operator.CONTAINS_NONE,
)
def equal(self, uuid: UUID) -> _Filters:
"""Filter for object that has the given ID."""
return _FilterValue(
target=self._target_path(),
value=get_valid_uuid(uuid),
operator=_Operator.EQUAL,
)
def not_equal(self, uuid: UUID) -> _Filters:
"""Filter our object that has the given ID."""
return _FilterValue(
target=self._target_path(),
value=get_valid_uuid(uuid),
operator=_Operator.NOT_EQUAL,
)
| _FilterById |
python | realpython__materials | python-mutable-immutable/point_typing.py | {
"start": 44,
"end": 208
} | class ____(NamedTuple):
x: float
y: float
def distance(self, other: "Point") -> float:
return math.dist((self.x, self.y), (other.x, other.y))
| Point |
python | google__pytype | pytype/overlays/special_builtins.py | {
"start": 6852,
"end": 8312
} | class ____(ObjectPredicate):
"""The base class for builtin predicates of the form f(obj, value).
Subclasses need to override the following:
_call_predicate(self, node, left, right): The implementation of the predicate.
"""
def _call_predicate(self, node, left, right):
raise NotImplementedError(self.__class__.__name__)
def run(self, node, args, result):
for right in abstract_utils.expand_type_parameter_instances(
args.posargs[1].bindings
):
one_result = []
for left in abstract_utils.expand_type_parameter_instances(
args.posargs[0].bindings
):
node, pyval = self._call_predicate(node, left, right)
one_result.append((left, node, pyval))
unsolvable_matches = any(
isinstance(left.data, abstract.Unsolvable) and pyval in (None, True)
for (left, _, pyval) in one_result
)
for left, result_node, pyval in one_result:
if (
unsolvable_matches
and not isinstance(left.data, abstract.Unsolvable)
and pyval is None
):
# If unsolvable (i.e., Any) satisfies the predicate, then we should
# ignore non-Any values. See test_splits2:AmbiguousIsInstanceTest for
# the reasoning.
pyval = False
result.AddBinding(
self.ctx.convert.bool_values[pyval],
source_set=(left, right),
where=result_node,
)
| BinaryPredicate |
python | scipy__scipy | scipy/stats/_binomtest.py | {
"start": 278,
"end": 13199
} | class ____:
"""
Result of `scipy.stats.binomtest`.
Attributes
----------
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
statistic: float
The estimate of the proportion of successes.
pvalue : float
The p-value of the hypothesis test.
"""
def __init__(self, k, n, alternative, statistic, pvalue):
self.k = k
self.n = n
self.alternative = alternative
self.statistic = statistic
self.pvalue = pvalue
# add alias for backward compatibility
self.proportion_estimate = statistic
def __repr__(self):
s = ("BinomTestResult("
f"k={self.k}, "
f"n={self.n}, "
f"alternative={self.alternative!r}, "
f"statistic={self.statistic}, "
f"pvalue={self.pvalue})")
return s
def proportion_ci(self, confidence_level=0.95, method='exact'):
"""
Compute the confidence interval for ``statistic``.
Parameters
----------
confidence_level : float, optional
Confidence level for the computed confidence interval
of the estimated proportion. Default is 0.95.
method : {'exact', 'wilson', 'wilsoncc'}, optional
Selects the method used to compute the confidence interval
for the estimate of the proportion:
'exact' :
Use the Clopper-Pearson exact method [1]_.
'wilson' :
Wilson's method, without continuity correction ([2]_, [3]_).
'wilsoncc' :
Wilson's method, with continuity correction ([2]_, [3]_).
Default is ``'exact'``.
Returns
-------
ci : ``ConfidenceInterval`` object
The object has attributes ``low`` and ``high`` that hold the
lower and upper bounds of the confidence interval.
References
----------
.. [1] C. J. Clopper and E. S. Pearson, The use of confidence or
fiducial limits illustrated in the case of the binomial,
Biometrika, Vol. 26, No. 4, pp 404-413 (Dec. 1934).
.. [2] E. B. Wilson, Probable inference, the law of succession, and
statistical inference, J. Amer. Stat. Assoc., 22, pp 209-212
(1927).
.. [3] Robert G. Newcombe, Two-sided confidence intervals for the
single proportion: comparison of seven methods, Statistics
in Medicine, 17, pp 857-872 (1998).
Examples
--------
>>> from scipy.stats import binomtest
>>> result = binomtest(k=7, n=50, p=0.1)
>>> result.statistic
0.14
>>> result.proportion_ci()
ConfidenceInterval(low=0.05819170033997342, high=0.26739600249700846)
"""
if method not in ('exact', 'wilson', 'wilsoncc'):
raise ValueError(f"method ('{method}') must be one of 'exact', "
"'wilson' or 'wilsoncc'.")
if not (0 <= confidence_level <= 1):
raise ValueError(f'confidence_level ({confidence_level}) must be in '
'the interval [0, 1].')
if method == 'exact':
low, high = _binom_exact_conf_int(self.k, self.n,
confidence_level,
self.alternative)
else:
# method is 'wilson' or 'wilsoncc'
low, high = _binom_wilson_conf_int(self.k, self.n,
confidence_level,
self.alternative,
correction=method == 'wilsoncc')
return ConfidenceInterval(low=low, high=high)
def _findp(func):
try:
p = brentq(func, 0, 1)
except RuntimeError:
raise RuntimeError('numerical solver failed to converge when '
'computing the confidence limits') from None
except ValueError as exc:
raise ValueError('brentq raised a ValueError; report this to the '
'SciPy developers') from exc
return p
def _binom_exact_conf_int(k, n, confidence_level, alternative):
"""
Compute the estimate and confidence interval for the binomial test.
Returns proportion, prop_low, prop_high
"""
if alternative == 'two-sided':
alpha = (1 - confidence_level) / 2
if k == 0:
plow = 0.0
else:
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
if k == n:
phigh = 1.0
else:
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
elif alternative == 'less':
alpha = 1 - confidence_level
plow = 0.0
if k == n:
phigh = 1.0
else:
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
elif alternative == 'greater':
alpha = 1 - confidence_level
if k == 0:
plow = 0.0
else:
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
phigh = 1.0
return plow, phigh
def _binom_wilson_conf_int(k, n, confidence_level, alternative, correction):
# This function assumes that the arguments have already been validated.
# In particular, `alternative` must be one of 'two-sided', 'less' or
# 'greater'.
p = k / n
if alternative == 'two-sided':
z = ndtri(0.5 + 0.5*confidence_level)
else:
z = ndtri(confidence_level)
# For reference, the formulas implemented here are from
# Newcombe (1998) (ref. [3] in the proportion_ci docstring).
denom = 2*(n + z**2)
center = (2*n*p + z**2)/denom
q = 1 - p
if correction:
if alternative == 'less' or k == 0:
lo = 0.0
else:
dlo = (1 + z*sqrt(z**2 - 2 - 1/n + 4*p*(n*q + 1))) / denom
lo = center - dlo
if alternative == 'greater' or k == n:
hi = 1.0
else:
dhi = (1 + z*sqrt(z**2 + 2 - 1/n + 4*p*(n*q - 1))) / denom
hi = center + dhi
else:
delta = z/denom * sqrt(4*n*p*q + z**2)
if alternative == 'less' or k == 0:
lo = 0.0
else:
lo = center - delta
if alternative == 'greater' or k == n:
hi = 1.0
else:
hi = center + delta
return lo, hi
@xp_capabilities(np_only=True)
def binomtest(k, n, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
The binomial test [1]_ is a test of the null hypothesis that the
probability of success in a Bernoulli experiment is `p`.
Details of the test can be found in many texts on statistics, such
as section 24.5 of [2]_.
Parameters
----------
k : int
The number of successes.
n : int
The number of trials.
p : float, optional
The hypothesized probability of success, i.e. the expected
proportion of successes. The value must be in the interval
``0 <= p <= 1``. The default value is ``p = 0.5``.
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
result : `~scipy.stats._result_classes.BinomTestResult` instance
The return value is an object with the following attributes:
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
statistic : float
The estimate of the proportion of successes.
pvalue : float
The p-value of the hypothesis test.
The object has the following methods:
proportion_ci(confidence_level=0.95, method='exact') :
Compute the confidence interval for ``statistic``.
Notes
-----
.. versionadded:: 1.7.0
References
----------
.. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test
.. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition),
Prentice Hall, Upper Saddle River, New Jersey USA (2010)
Examples
--------
>>> from scipy.stats import binomtest
A car manufacturer claims that no more than 10% of their cars are unsafe.
15 cars are inspected for safety, 3 were found to be unsafe. Test the
manufacturer's claim:
>>> result = binomtest(3, n=15, p=0.1, alternative='greater')
>>> result.pvalue
0.18406106910639114
The null hypothesis cannot be rejected at the 5% level of significance
because the returned p-value is greater than the critical value of 5%.
The test statistic is equal to the estimated proportion, which is simply
``3/15``:
>>> result.statistic
0.2
We can use the `proportion_ci()` method of the result to compute the
confidence interval of the estimate:
>>> result.proportion_ci(confidence_level=0.95)
ConfidenceInterval(low=0.05684686759024681, high=1.0)
"""
k = _validate_int(k, 'k', minimum=0)
n = _validate_int(n, 'n', minimum=1)
if k > n:
raise ValueError(f'k ({k}) must not be greater than n ({n}).')
if not (0 <= p <= 1):
raise ValueError(f"p ({p}) must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError(f"alternative ('{alternative}') not recognized; \n"
"must be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = binom.cdf(k, n, p)
elif alternative == 'greater':
pval = binom.sf(k-1, n, p)
else:
# alternative is 'two-sided'
d = binom.pmf(k, n, p)
rerr = 1 + 1e-7
if k == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif k < p * n:
ix = _binary_search_for_binom_tst(lambda x1: -binom.pmf(x1, n, p),
-d*rerr, np.ceil(p * n), n)
# y is the number of terms between mode and n that are <= d*rerr.
# ix gave us the first term where a(ix) <= d*rerr < a(ix-1)
# if the first equality doesn't hold, y=n-ix. Otherwise, we
# need to include ix as well as the equality holds. Note that
# the equality will hold in very very rare situations due to rerr.
y = n - ix + int(d*rerr == binom.pmf(ix, n, p))
pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p)
else:
ix = _binary_search_for_binom_tst(lambda x1: binom.pmf(x1, n, p),
d*rerr, 0, np.floor(p * n))
# y is the number of terms between 0 and mode that are <= d*rerr.
# we need to add a 1 to account for the 0 index.
# For comparing this with old behavior, see
# tst_binary_srch_for_binom_tst method in test_morestats.
y = ix + 1
pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p)
pval = min(1.0, pval)
result = BinomTestResult(k=k, n=n, alternative=alternative,
statistic=k/n, pvalue=pval)
return result
def _binary_search_for_binom_tst(a, d, lo, hi):
"""
Conducts an implicit binary search on a function specified by `a`.
Meant to be used on the binomial PMF for the case of two-sided tests
to obtain the value on the other side of the mode where the tail
probability should be computed. The values on either side of
the mode are always in order, meaning binary search is applicable.
Parameters
----------
a : callable
The function over which to perform binary search. Its values
for inputs lo and hi should be in ascending order.
d : float
The value to search.
lo : int
The lower end of range to search.
hi : int
The higher end of the range to search.
Returns
-------
int
The index, i between lo and hi
such that a(i)<=d<a(i+1)
"""
while lo < hi:
mid = lo + (hi-lo)//2
midval = a(mid)
if midval < d:
lo = mid+1
elif midval > d:
hi = mid-1
else:
return mid
if a(lo) <= d:
return lo
else:
return lo-1
| BinomTestResult |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-iterable/source_iterable/components.py | {
"start": 255,
"end": 806
} | class ____(DpathExtractor):
common_fields = ("itblInternal", "_type", "createdAt", "email")
def extract_records(self, response: requests.Response) -> Iterable[Mapping[str, Any]]:
jsonl_records = super().extract_records(response=response)
for record_dict in jsonl_records:
record_dict_common_fields = {}
for field in self.common_fields:
record_dict_common_fields[field] = record_dict.pop(field, None)
yield {**record_dict_common_fields, "data": record_dict}
| EventsRecordExtractor |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_stats_v2.py | {
"start": 318,
"end": 35520
} | class ____(APITestCase, OutcomesSnubaTest):
endpoint = "sentry-api-0-organization-stats-v2"
_now = datetime.now(UTC).replace(hour=12, minute=27, second=28, microsecond=0)
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.org = self.organization
self.org.flags.allow_joinleave = False
self.org.save()
self.org2 = self.create_organization()
self.org3 = self.create_organization()
self.project = self.create_project(
name="bar", teams=[self.create_team(organization=self.org, members=[self.user])]
)
self.project2 = self.create_project(
name="foo", teams=[self.create_team(organization=self.org, members=[self.user])]
)
self.project3 = self.create_project(organization=self.org2)
self.user2 = self.create_user(is_superuser=False)
self.create_member(user=self.user2, organization=self.organization, role="member", teams=[])
self.create_member(user=self.user2, organization=self.org3, role="member", teams=[])
self.project4 = self.create_project(
name="users2sproj",
teams=[self.create_team(organization=self.org, members=[self.user2])],
)
self.store_outcomes(
{
"org_id": self.org.id,
"timestamp": self._now - timedelta(hours=1),
"project_id": self.project.id,
"outcome": Outcome.ACCEPTED,
"reason": "none",
"category": DataCategory.ERROR,
"quantity": 1,
},
5,
)
self.store_outcomes(
{
"org_id": self.org.id,
"timestamp": self._now - timedelta(hours=1),
"project_id": self.project.id,
"outcome": Outcome.ACCEPTED,
"reason": "none",
"category": DataCategory.DEFAULT, # test that this shows up under error
"quantity": 1,
}
)
self.store_outcomes(
{
"org_id": self.org.id,
"timestamp": self._now - timedelta(hours=1),
"project_id": self.project.id,
"outcome": Outcome.RATE_LIMITED,
"reason": "smart_rate_limit",
"category": DataCategory.ATTACHMENT,
"quantity": 1024,
}
)
self.store_outcomes(
{
"org_id": self.org.id,
"timestamp": self._now - timedelta(hours=1),
"project_id": self.project2.id,
"outcome": Outcome.RATE_LIMITED,
"reason": "smart_rate_limit",
"category": DataCategory.TRANSACTION,
"quantity": 1,
}
)
# Add profile_duration outcome data
self.store_outcomes(
{
"org_id": self.org.id,
"timestamp": self._now - timedelta(hours=1),
"project_id": self.project.id,
"outcome": Outcome.ACCEPTED,
"reason": "none",
"category": DataCategory.PROFILE_DURATION,
"quantity": 1000, # Duration in milliseconds
},
3,
)
def do_request(self, query, user=None, org=None, status_code=200):
self.login_as(user=user or self.user)
org_slug = (org or self.organization).slug
if status_code >= 400:
return self.get_error_response(org_slug, **query, status_code=status_code)
return self.get_success_response(org_slug, **query, status_code=status_code)
def test_empty_request(self) -> None:
response = self.do_request({}, status_code=400)
assert result_sorted(response.data) == {"detail": 'At least one "field" is required.'}
def test_inaccessible_project(self) -> None:
response = self.do_request({"project": [self.project3.id]}, status_code=403)
assert result_sorted(response.data) == {
"detail": "You do not have permission to perform this action."
}
def test_no_projects_available(self) -> None:
response = self.do_request(
{
"groupBy": ["project"],
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(quantity)"],
"category": ["error", "transaction"],
},
user=self.user2,
org=self.org3,
status_code=400,
)
assert result_sorted(response.data) == {
"detail": "No projects available",
}
def test_unknown_field(self) -> None:
response = self.do_request(
{
"field": ["summ(qarntenty)"],
"statsPeriod": "1d",
"interval": "1d",
},
status_code=400,
)
assert result_sorted(response.data) == {
"detail": 'Invalid field: "summ(qarntenty)"',
}
def test_no_end_param(self) -> None:
response = self.do_request(
{
"field": ["sum(quantity)"],
"interval": "1d",
"start": floor_to_utc_day(self._now).isoformat(),
},
status_code=400,
)
assert result_sorted(response.data) == {"detail": "start and end are both required"}
@freeze_time(_now)
def test_future_request(self) -> None:
response = self.do_request(
{
"field": ["sum(quantity)"],
"interval": "1h",
"category": ["error"],
"start": self._now.replace(hour=15, minute=30, second=0).isoformat(),
"end": self._now.replace(hour=16, minute=30, second=0).isoformat(),
},
status_code=200,
)
assert result_sorted(response.data) == {
"intervals": [
isoformat_z(self._now.replace(hour=12, minute=0, second=0)),
isoformat_z(self._now.replace(hour=13, minute=0, second=0)),
isoformat_z(self._now.replace(hour=14, minute=0, second=0)),
isoformat_z(self._now.replace(hour=15, minute=0, second=0)),
isoformat_z(self._now.replace(hour=16, minute=0, second=0)),
],
"groups": [
{
"by": {},
"series": {"sum(quantity)": [0, 0, 0, 0, 0]},
"totals": {"sum(quantity)": 0},
}
],
"start": isoformat_z(self._now.replace(hour=12, minute=0, second=0)),
"end": isoformat_z(self._now.replace(hour=17, minute=0, second=0)),
}
def test_unknown_category(self) -> None:
response = self.do_request(
{
"field": ["sum(quantity)"],
"statsPeriod": "1d",
"interval": "1d",
"category": "scoobydoo",
},
status_code=400,
)
assert result_sorted(response.data) == {
"detail": 'Invalid category: "scoobydoo"',
}
def test_unknown_outcome(self) -> None:
response = self.do_request(
{
"field": ["sum(quantity)"],
"statsPeriod": "1d",
"interval": "1d",
"category": "error",
"outcome": "scoobydoo",
},
status_code=400,
)
assert result_sorted(response.data) == {
"detail": 'Invalid outcome: "scoobydoo"',
}
def test_unknown_groupby(self) -> None:
response = self.do_request(
{
"field": ["sum(quantity)"],
"groupBy": ["category_"],
"statsPeriod": "1d",
"interval": "1d",
},
status_code=400,
)
assert result_sorted(response.data) == {"detail": 'Invalid groupBy: "category_"'}
def test_resolution_invalid(self) -> None:
self.do_request(
{
"statsPeriod": "1d",
"interval": "bad_interval",
},
org=self.org,
status_code=400,
)
@freeze_time(_now)
def test_attachment_filter_only(self) -> None:
response = self.do_request(
{
"project": [-1],
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(quantity)"],
"category": ["error", "attachment"],
},
status_code=400,
)
assert result_sorted(response.data) == {
"detail": "if filtering by attachment no other category may be present"
}
@freeze_time(_now)
def test_timeseries_interval(self) -> None:
response = self.do_request(
{
"project": [-1],
"category": ["error"],
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(quantity)"],
},
status_code=200,
)
assert result_sorted(response.data) == {
"intervals": [
isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
isoformat_z(floor_to_utc_day(self._now)),
],
"groups": [
{"by": {}, "series": {"sum(quantity)": [0, 6]}, "totals": {"sum(quantity)": 6}}
],
"start": isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
"end": isoformat_z(floor_to_utc_day(self._now) + timedelta(days=1)),
}
response = self.do_request(
{
"project": [-1],
"statsPeriod": "1d",
"interval": "6h",
"field": ["sum(quantity)"],
"category": ["error"],
},
status_code=200,
)
assert result_sorted(response.data) == {
"intervals": [
isoformat_z((self._now - timedelta(days=1)).replace(hour=12, minute=0, second=0)),
isoformat_z((self._now - timedelta(days=1)).replace(hour=18, minute=0, second=0)),
isoformat_z(self._now.replace(hour=0, minute=0, second=0)),
isoformat_z(self._now.replace(hour=6, minute=0, second=0)),
isoformat_z(self._now.replace(hour=12, minute=0, second=0)),
],
"groups": [
{
"by": {},
"series": {"sum(quantity)": [0, 0, 0, 6, 0]},
"totals": {"sum(quantity)": 6},
}
],
"start": isoformat_z(
self._now.replace(hour=12, minute=0, second=0) - timedelta(days=1)
),
"end": isoformat_z(self._now.replace(hour=18, minute=0, second=0)),
}
@freeze_time(_now)
def test_user_org_total_all_accessible(self) -> None:
response = self.do_request(
{
"project": [-1],
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(quantity)"],
"category": ["error", "transaction"],
},
user=self.user2,
status_code=200,
)
assert result_sorted(response.data) == {
"start": isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
"end": isoformat_z(floor_to_utc_day(self._now) + timedelta(days=1)),
"intervals": [
isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
isoformat_z(floor_to_utc_day(self._now)),
],
"groups": [
{"by": {}, "series": {"sum(quantity)": [0, 7]}, "totals": {"sum(quantity)": 7}}
],
}
@freeze_time(_now)
def test_user_no_proj_specific_access(self) -> None:
response = self.do_request(
{
"project": self.project.id,
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(quantity)"],
"category": ["error", "transaction"],
},
user=self.user2,
status_code=403,
)
response = self.do_request(
{
"project": [-1],
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(quantity)"],
"category": ["error", "transaction"],
"groupBy": ["project"],
},
user=self.user2,
status_code=200,
)
assert result_sorted(response.data) == {
"start": isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
"end": isoformat_z(floor_to_utc_day(self._now) + timedelta(days=1)),
"groups": [],
}
@freeze_time(_now)
def test_no_project_access(self) -> None:
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="member", teams=[])
response = self.do_request(
{
"project": [self.project.id],
"statsPeriod": "1d",
"interval": "1d",
"category": ["error", "transaction"],
"field": ["sum(quantity)"],
},
org=self.organization,
user=user,
status_code=403,
)
assert result_sorted(response.data) == {
"detail": "You do not have permission to perform this action."
}
response = self.do_request(
{
"project": [self.project.id],
"groupBy": ["project"],
"statsPeriod": "1d",
"interval": "1d",
"category": ["error", "transaction"],
"field": ["sum(quantity)"],
},
org=self.organization,
user=user,
status_code=403,
)
assert result_sorted(response.data) == {
"detail": "You do not have permission to perform this action."
}
@freeze_time(_now)
def test_open_membership_semantics(self) -> None:
self.org.flags.allow_joinleave = True
self.org.save()
response = self.do_request(
{
"project": [-1],
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(quantity)"],
"category": ["error", "transaction"],
"groupBy": ["project"],
},
user=self.user2,
status_code=200,
)
assert result_sorted(response.data) == {
"start": isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
"end": isoformat_z(floor_to_utc_day(self._now) + timedelta(days=1)),
"groups": [
{
"by": {"project": self.project.id},
"totals": {"sum(quantity)": 6},
},
{
"by": {"project": self.project2.id},
"totals": {"sum(quantity)": 1},
},
],
}
@freeze_time(_now)
def test_org_simple(self) -> None:
response = self.do_request(
{
"statsPeriod": "2d",
"interval": "1d",
"field": ["sum(quantity)"],
"groupBy": ["category", "outcome", "reason"],
},
org=self.org,
status_code=200,
)
assert result_sorted(response.data) == {
"groups": [
{
"by": {
"category": "attachment",
"outcome": "rate_limited",
"reason": "spike_protection",
},
"series": {"sum(quantity)": [0, 0, 1024]},
"totals": {"sum(quantity)": 1024},
},
{
"by": {"category": "error", "outcome": "accepted", "reason": "none"},
"series": {"sum(quantity)": [0, 0, 6]},
"totals": {"sum(quantity)": 6},
},
{
"by": {"category": "profile_duration", "outcome": "accepted", "reason": "none"},
"series": {"sum(quantity)": [0, 0, 3000]},
"totals": {"sum(quantity)": 3000},
},
{
"by": {
"category": "transaction",
"outcome": "rate_limited",
"reason": "spike_protection",
},
"series": {"sum(quantity)": [0, 0, 1]},
"totals": {"sum(quantity)": 1},
},
],
"intervals": [
isoformat_z(floor_to_utc_day(self._now) - timedelta(days=2)),
isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
isoformat_z(floor_to_utc_day(self._now)),
],
"start": isoformat_z(floor_to_utc_day(self._now) - timedelta(days=2)),
"end": isoformat_z(floor_to_utc_day(self._now) + timedelta(days=1)),
}
@freeze_time(_now)
def test_staff_org_individual_category(self) -> None:
staff_user = self.create_user(is_staff=True, is_superuser=True)
self.login_as(user=staff_user, superuser=True)
category_group_mapping = {
"attachment": {
"by": {
"outcome": "rate_limited",
"reason": "spike_protection",
},
"totals": {"sum(quantity)": 1024},
"series": {"sum(quantity)": [0, 0, 1024]},
},
"error": {
"by": {"outcome": "accepted", "reason": "none"},
"totals": {"sum(quantity)": 6},
"series": {"sum(quantity)": [0, 0, 6]},
},
"transaction": {
"by": {
"reason": "spike_protection",
"outcome": "rate_limited",
},
"totals": {"sum(quantity)": 1},
"series": {"sum(quantity)": [0, 0, 1]},
},
}
# Test each category individually
for category in ["attachment", "error", "transaction"]:
response = self.do_request(
{
"category": category,
"statsPeriod": "2d",
"interval": "1d",
"field": ["sum(quantity)"],
"groupBy": ["outcome", "reason"],
},
org=self.org,
status_code=200,
)
assert result_sorted(response.data) == {
"start": isoformat_z(floor_to_utc_day(self._now) - timedelta(days=2)),
"end": isoformat_z(floor_to_utc_day(self._now) + timedelta(days=1)),
"intervals": [
isoformat_z(floor_to_utc_day(self._now) - timedelta(days=2)),
isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
isoformat_z(floor_to_utc_day(self._now)),
],
"groups": [category_group_mapping[category]],
}
@freeze_time(_now)
def test_org_multiple_fields(self) -> None:
response = self.do_request(
{
"statsPeriod": "2d",
"interval": "1d",
"field": ["sum(quantity)", "sum(times_seen)"],
"groupBy": ["category", "outcome", "reason"],
},
org=self.org,
status_code=200,
)
assert result_sorted(response.data) == {
"start": isoformat_z(floor_to_utc_day(self._now) - timedelta(days=2)),
"end": isoformat_z(floor_to_utc_day(self._now) + timedelta(days=1)),
"intervals": [
isoformat_z(floor_to_utc_day(self._now) - timedelta(days=2)),
isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
isoformat_z(floor_to_utc_day(self._now)),
],
"groups": [
{
"by": {
"reason": "spike_protection",
"outcome": "rate_limited",
"category": "attachment",
},
"totals": {"sum(quantity)": 1024, "sum(times_seen)": 1},
"series": {"sum(quantity)": [0, 0, 1024], "sum(times_seen)": [0, 0, 1]},
},
{
"by": {"category": "error", "reason": "none", "outcome": "accepted"},
"totals": {"sum(quantity)": 6, "sum(times_seen)": 6},
"series": {"sum(quantity)": [0, 0, 6], "sum(times_seen)": [0, 0, 6]},
},
{
"by": {"category": "profile_duration", "reason": "none", "outcome": "accepted"},
"totals": {"sum(quantity)": 3000, "sum(times_seen)": 3},
"series": {"sum(quantity)": [0, 0, 3000], "sum(times_seen)": [0, 0, 3]},
},
{
"by": {
"category": "transaction",
"reason": "spike_protection",
"outcome": "rate_limited",
},
"totals": {"sum(quantity)": 1, "sum(times_seen)": 1},
"series": {"sum(quantity)": [0, 0, 1], "sum(times_seen)": [0, 0, 1]},
},
],
}
@freeze_time(_now)
def test_org_group_by_project(self) -> None:
response = self.do_request(
{
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(times_seen)"],
"groupBy": ["project"],
"category": ["error", "transaction"],
},
org=self.org,
status_code=200,
)
assert result_sorted(response.data) == {
"start": isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
"end": isoformat_z(floor_to_utc_day(self._now) + timedelta(days=1)),
"groups": [
{
"by": {"project": self.project.id},
"totals": {"sum(times_seen)": 6},
},
{
"by": {"project": self.project2.id},
"totals": {"sum(times_seen)": 1},
},
],
}
@freeze_time(_now)
def test_org_project_totals_per_project(self) -> None:
response_per_group = self.do_request(
{
"statsPeriod": "1d",
"interval": "1h",
"field": ["sum(times_seen)"],
"groupBy": ["project"],
"category": ["error", "transaction"],
},
org=self.org,
status_code=200,
)
response_total = self.do_request(
{
"statsPeriod": "1d",
"interval": "1h",
"field": ["sum(times_seen)"],
"category": ["error", "transaction"],
},
org=self.org,
status_code=200,
)
per_group_total = 0
for total in response_per_group.data["groups"]:
per_group_total += total["totals"]["sum(times_seen)"]
assert response_per_group.status_code == 200, response_per_group.content
assert response_total.status_code == 200, response_total.content
assert response_total.data["groups"][0]["totals"]["sum(times_seen)"] == per_group_total
@freeze_time(_now)
def test_project_filter(self) -> None:
response = self.do_request(
{
"project": self.project.id,
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(quantity)"],
"category": ["error", "transaction"],
},
org=self.org,
status_code=200,
)
assert result_sorted(response.data) == {
"start": isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
"end": isoformat_z(floor_to_utc_day(self._now) + timedelta(days=1)),
"intervals": [
isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
isoformat_z(floor_to_utc_day(self._now)),
],
"groups": [
{"by": {}, "totals": {"sum(quantity)": 6}, "series": {"sum(quantity)": [0, 6]}}
],
}
@freeze_time(_now)
def test_staff_project_filter(self) -> None:
staff_user = self.create_user(is_staff=True, is_superuser=True)
self.login_as(user=staff_user, superuser=True)
shared_query_params = {
"field": "sum(quantity)",
"groupBy": ["outcome", "reason"],
"interval": "1d",
"statsPeriod": "1d",
}
shared_data = {
"start": isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
"end": isoformat_z(floor_to_utc_day(self._now) + timedelta(days=1)),
"intervals": [
isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
isoformat_z(floor_to_utc_day(self._now)),
],
}
# Test error category
response = self.do_request(
{
**shared_query_params,
"category": "error",
"project": self.project.id,
},
org=self.org,
status_code=200,
)
assert result_sorted(response.data) == {
**shared_data,
"groups": [
{
"by": {"outcome": "accepted", "reason": "none"},
"totals": {"sum(quantity)": 6},
"series": {"sum(quantity)": [0, 6]},
},
],
}
# Test transaction category
response = self.do_request(
{
**shared_query_params,
"category": "transaction",
"project": self.project2.id,
},
org=self.org,
status_code=200,
)
assert result_sorted(response.data) == {
**shared_data,
"groups": [
{
"by": {"outcome": "rate_limited", "reason": "spike_protection"},
"totals": {"sum(quantity)": 1},
"series": {"sum(quantity)": [0, 1]},
}
],
}
@freeze_time(_now)
def test_reason_filter(self) -> None:
response = self.do_request(
{
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(times_seen)"],
"reason": ["spike_protection"],
"groupBy": ["category"],
},
org=self.org,
status_code=200,
)
assert result_sorted(response.data) == {
"start": isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
"end": isoformat_z(floor_to_utc_day(self._now) + timedelta(days=1)),
"intervals": [
isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
isoformat_z(floor_to_utc_day(self._now)),
],
"groups": [
{
"by": {"category": "attachment"},
"totals": {"sum(times_seen)": 1},
"series": {"sum(times_seen)": [0, 1]},
},
{
"by": {"category": "transaction"},
"totals": {"sum(times_seen)": 1},
"series": {"sum(times_seen)": [0, 1]},
},
],
}
@freeze_time(_now)
def test_outcome_filter(self) -> None:
response = self.do_request(
{
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(quantity)"],
"outcome": "accepted",
"category": ["error", "transaction"],
},
org=self.org,
status_code=200,
)
assert result_sorted(response.data) == {
"start": isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
"end": isoformat_z(floor_to_utc_day(self._now) + timedelta(days=1)),
"intervals": [
isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
isoformat_z(floor_to_utc_day(self._now)),
],
"groups": [
{"by": {}, "totals": {"sum(quantity)": 6}, "series": {"sum(quantity)": [0, 6]}}
],
}
@freeze_time(_now)
def test_category_filter(self) -> None:
response = self.do_request(
{
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(quantity)"],
"category": "error",
},
org=self.org,
status_code=200,
)
assert result_sorted(response.data) == {
"start": isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
"end": isoformat_z(floor_to_utc_day(self._now) + timedelta(days=1)),
"intervals": [
isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
isoformat_z(floor_to_utc_day(self._now)),
],
"groups": [
{"by": {}, "totals": {"sum(quantity)": 6}, "series": {"sum(quantity)": [0, 6]}}
],
}
@freeze_time(_now)
def test_minute_interval_sum_quantity(self) -> None:
response = self.do_request(
{
"statsPeriod": "1h",
"interval": "15m",
"field": ["sum(quantity)"],
"category": "error",
},
org=self.org,
status_code=200,
)
assert result_sorted(response.data) == {
"start": isoformat_z(self._now.replace(hour=11, minute=15, second=0)),
"end": isoformat_z(self._now.replace(hour=12, minute=30, second=0)),
"intervals": [
isoformat_z(self._now.replace(hour=11, minute=15, second=0)),
isoformat_z(self._now.replace(hour=11, minute=30, second=0)),
isoformat_z(self._now.replace(hour=11, minute=45, second=0)),
isoformat_z(self._now.replace(hour=12, minute=00, second=0)),
isoformat_z(self._now.replace(hour=12, minute=15, second=0)),
],
"groups": [
{
"by": {},
"totals": {"sum(quantity)": 6},
"series": {"sum(quantity)": [6, 0, 0, 0, 0]},
}
],
}
@freeze_time(_now)
def test_minute_interval_sum_times_seen(self) -> None:
response = self.do_request(
{
"statsPeriod": "1h",
"interval": "15m",
"field": ["sum(times_seen)"],
"category": "error",
}
)
assert response.status_code == 200, response.content
assert result_sorted(response.data) == {
"start": isoformat_z(self._now.replace(hour=11, minute=15, second=0)),
"end": isoformat_z(self._now.replace(hour=12, minute=30, second=0)),
"intervals": [
isoformat_z(self._now.replace(hour=11, minute=15, second=0)),
isoformat_z(self._now.replace(hour=11, minute=30, second=0)),
isoformat_z(self._now.replace(hour=11, minute=45, second=0)),
isoformat_z(self._now.replace(hour=12, minute=00, second=0)),
isoformat_z(self._now.replace(hour=12, minute=15, second=0)),
],
"groups": [
{
"by": {},
"totals": {"sum(times_seen)": 6},
"series": {"sum(times_seen)": [6, 0, 0, 0, 0]},
}
],
}
@freeze_time(_now)
def test_profile_duration_filter(self) -> None:
"""Test that profile_duration data is correctly filtered and returned"""
response = self.do_request(
{
"project": [-1],
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(quantity)"],
"category": ["profile_duration"],
},
status_code=200,
)
assert result_sorted(response.data) == {
"start": isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
"end": isoformat_z(floor_to_utc_day(self._now) + timedelta(days=1)),
"intervals": [
isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
isoformat_z(floor_to_utc_day(self._now)),
],
"groups": [
{
"by": {},
"series": {"sum(quantity)": [0, 3000]}, # 3 outcomes * 1000ms = 3000
"totals": {"sum(quantity)": 3000},
}
],
}
@freeze_time(_now)
def test_profile_duration_groupby(self) -> None:
"""Test that profile_duration data is correctly grouped"""
response = self.do_request(
{
"project": [-1],
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(quantity)"],
"groupBy": ["category"],
"category": ["profile_duration"],
},
status_code=200,
)
assert result_sorted(response.data) == {
"start": isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
"end": isoformat_z(floor_to_utc_day(self._now) + timedelta(days=1)),
"intervals": [
isoformat_z(floor_to_utc_day(self._now) - timedelta(days=1)),
isoformat_z(floor_to_utc_day(self._now)),
],
"groups": [
{
"by": {"category": "profile_duration"},
"series": {"sum(quantity)": [0, 3000]},
"totals": {"sum(quantity)": 3000},
}
],
}
def result_sorted(result):
"""sort the groups of the results array by the `by` object, ensuring a stable order"""
def stable_dict(d):
return tuple(sorted(d.items(), key=lambda t: t[0]))
if "groups" in result:
result["groups"].sort(key=lambda group: stable_dict(group["by"]))
return result
# TEST invalid parameter
| OrganizationStatsTestV2 |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 412534,
"end": 416767
} | class ____:
unif = stats.uniform(0, 1)
ih1 = stats.irwinhall(1)
ih10 = stats.irwinhall(10)
def test_stats_ih10(self):
# from Wolfram Alpha "mean variance skew kurtosis UniformSumDistribution[10]"
# W|A uses Pearson's definition of kurtosis so subtract 3
# should be exact integer division converted to fp64, without any further ops
assert_array_max_ulp(self.ih10.stats('mvsk'), (5, 10/12, 0, -3/25))
def test_moments_ih10(self):
# from Wolfram Alpha "values moments UniformSumDistribution[10]"
# algo should use integer division converted to fp64, without any further ops
# so these should be precise to the ulpm if not exact
vals = [5, 155 / 6, 275 / 2, 752, 12650 / 3,
677465 / 28, 567325 / 4,
15266213 / 18, 10333565 / 2]
moments = [self.ih10.moment(n+1) for n in range(len(vals))]
assert_array_max_ulp(moments, vals)
# also from Wolfram Alpha "50th moment UniformSumDistribution[10]"
m50 = self.ih10.moment(50)
m50_exact = 17453002755350010529309685557285098151740985685/4862
assert_array_max_ulp(m50, m50_exact)
def test_pdf_ih1_unif(self):
# IH(1) PDF is by definition U(0,1)
# we should be too, but differences in floating point eval order happen
# it's unclear if we can get down to the single ulp for doubles unless
# quads are used we're within 6-10 ulps otherwise (across sf/cdf/pdf)
# which is pretty good
pts = np.linspace(0, 1, 100)
pdf_unif = self.unif.pdf(pts)
pdf_ih1 = self.ih1.pdf(pts)
assert_array_max_ulp(pdf_ih1, pdf_unif, maxulp=10)
def test_pdf_ih2_triangle(self):
# IH(2) PDF is a triangle
ih2 = stats.irwinhall(2)
npts = 101
pts = np.linspace(0, 2, npts)
expected = np.linspace(0, 2, npts)
expected[(npts + 1) // 2:] = 2 - expected[(npts + 1) // 2:]
pdf_ih2 = ih2.pdf(pts)
assert_array_max_ulp(pdf_ih2, expected, maxulp=10)
def test_cdf_ih1_unif(self):
# CDF of IH(1) should be identical to uniform
pts = np.linspace(0, 1, 100)
cdf_unif = self.unif.cdf(pts)
cdf_ih1 = self.ih1.cdf(pts)
assert_array_max_ulp(cdf_ih1, cdf_unif, maxulp=10)
def test_cdf(self):
# CDF of IH is symmetric so CDF should be 0.5 at n/2
n = np.arange(1, 10)
ih = stats.irwinhall(n)
ih_cdf = ih.cdf(n / 2)
exact = np.repeat(1/2, len(n))
# should be identically 1/2 but fp order of eval differences happen
assert_array_max_ulp(ih_cdf, exact, maxulp=10)
def test_cdf_ih10_exact(self):
# from Wolfram Alpha "values CDF[UniformSumDistribution[10], x] x=0 to x=10"
# symmetric about n/2, i.e., cdf[n-x] = 1-cdf[x] = sf[x]
vals = [0, 1 / 3628800, 169 / 604800, 24427 / 1814400,
252023 / 1814400, 1 / 2, 1562377 / 1814400,
1789973 / 1814400, 604631 / 604800,
3628799 / 3628800, 1]
# essentially a test of bspline evaluation
# this and the other ones are mostly to detect regressions
assert_array_max_ulp(self.ih10.cdf(np.arange(11)), vals, maxulp=10)
assert_array_max_ulp(self.ih10.cdf(1/10), 1/36288000000000000, maxulp=10)
ref = 36287999999999999/36288000000000000
assert_array_max_ulp(self.ih10.cdf(99/10), ref, maxulp=10)
def test_pdf_ih10_exact(self):
# from Wolfram Alpha "values PDF[UniformSumDistribution[10], x] x=0 to x=10"
# symmetric about n/2 = 5
vals = [0, 1 / 362880, 251 / 181440, 913 / 22680, 44117 / 181440]
vals += [15619 / 36288] + vals[::-1]
assert_array_max_ulp(self.ih10.pdf(np.arange(11)), vals, maxulp=10)
def test_sf_ih10_exact(self):
assert_allclose(self.ih10.sf(np.arange(11)), 1 - self.ih10.cdf(np.arange(11)))
# from Wolfram Alpha "SurvivalFunction[UniformSumDistribution[10],x] at x=1/10"
# and symmetry about n/2 = 5
# W|A returns 1 for CDF @ x=9.9
ref = 36287999999999999/36288000000000000
assert_array_max_ulp(self.ih10.sf(1/10), ref, maxulp=10)
| TestIrwinHall |
python | davidhalter__parso | parso/python/errors.py | {
"start": 19614,
"end": 19959
} | class ____(SyntaxRule):
message = "'break' outside loop"
def is_issue(self, leaf):
in_loop = False
for block in self._normalizer.context.blocks:
if block.type in ('for_stmt', 'while_stmt'):
in_loop = True
return not in_loop
@ErrorFinder.register_rule(value='continue')
| _BreakOutsideLoop |
python | pytorch__pytorch | benchmarks/dynamo/runner.py | {
"start": 20008,
"end": 20609
} | class ____:
def __init__(
self, suites, devices, dtypes, compilers, flag_compilers, mode, output_dir
):
self.suites = suites
self.devices = devices
self.dtypes = dtypes
self.compilers = compilers
self.flag_compilers = flag_compilers
self.output_dir = output_dir
self.mode = mode
def has_header(self, output_filename):
header_present = False
with open(output_filename) as f:
line = f.readline()
if "dev" in line:
header_present = True
return header_present
| Parser |
python | pyparsing__pyparsing | pyparsing/core.py | {
"start": 99466,
"end": 99749
} | class ____(ParserElement):
"""Abstract :class:`ParserElement` subclass, for defining atomic
matching patterns.
"""
def __init__(self) -> None:
super().__init__(savelist=False)
def _generateDefaultName(self) -> str:
return type(self).__name__
| Token |
python | pydata__xarray | xarray/core/dtypes.py | {
"start": 556,
"end": 8323
} | class ____:
def __lt__(self, other):
return True
def __eq__(self, other):
return isinstance(other, type(self))
# Equivalence to np.inf (-np.inf) for object-type
INF = AlwaysGreaterThan()
NINF = AlwaysLessThan()
# Pairs of types that, if both found, should be promoted to object dtype
# instead of following NumPy's own type-promotion rules. These type promotion
# rules match pandas instead. For reference, see the NumPy type hierarchy:
# https://numpy.org/doc/stable/reference/arrays.scalars.html
PROMOTE_TO_OBJECT: tuple[tuple[type[np.generic], type[np.generic]], ...] = (
(np.number, np.character), # numpy promotes to character
(np.bool_, np.character), # numpy promotes to character
(np.bytes_, np.str_), # numpy promotes to unicode
)
def maybe_promote(dtype: np.dtype) -> tuple[np.dtype, Any]:
"""Simpler equivalent of pandas.core.common._maybe_promote
Parameters
----------
dtype : np.dtype
Returns
-------
dtype : Promoted dtype that can hold missing values.
fill_value : Valid missing value for the promoted dtype.
"""
# N.B. these casting rules should match pandas
dtype_: np.typing.DTypeLike
fill_value: Any
if HAS_STRING_DTYPE and np.issubdtype(dtype, np.dtypes.StringDType()):
# for now, we always promote string dtypes to object for consistency with existing behavior
# TODO: refactor this once we have a better way to handle numpy vlen-string dtypes
dtype_ = object
fill_value = np.nan
elif isdtype(dtype, "real floating"):
dtype_ = dtype
fill_value = np.nan
elif np.issubdtype(dtype, np.timedelta64):
# See https://github.com/numpy/numpy/issues/10685
# np.timedelta64 is a subclass of np.integer
# Check np.timedelta64 before np.integer
fill_value = np.timedelta64("NaT")
dtype_ = dtype
elif isdtype(dtype, "integral"):
dtype_ = np.float32 if dtype.itemsize <= 2 else np.float64
fill_value = np.nan
elif isdtype(dtype, "complex floating"):
dtype_ = dtype
fill_value = np.nan + np.nan * 1j
elif np.issubdtype(dtype, np.datetime64):
dtype_ = dtype
fill_value = np.datetime64("NaT")
else:
dtype_ = object
fill_value = np.nan
dtype_out = np.dtype(dtype_)
fill_value = dtype_out.type(fill_value)
return dtype_out, fill_value
NAT_TYPES = {np.datetime64("NaT").dtype, np.timedelta64("NaT").dtype}
def get_fill_value(dtype):
"""Return an appropriate fill value for this dtype.
Parameters
----------
dtype : np.dtype
Returns
-------
fill_value : Missing value corresponding to this dtype.
"""
_, fill_value = maybe_promote(dtype)
return fill_value
def get_pos_infinity(dtype, max_for_int=False):
"""Return an appropriate positive infinity for this dtype.
Parameters
----------
dtype : np.dtype
max_for_int : bool
Return np.iinfo(dtype).max instead of np.inf
Returns
-------
fill_value : positive infinity value corresponding to this dtype.
"""
if isdtype(dtype, "real floating"):
return np.inf
if isdtype(dtype, "integral"):
if max_for_int:
return np.iinfo(dtype).max
else:
return np.inf
if isdtype(dtype, "complex floating"):
return np.inf + 1j * np.inf
if isdtype(dtype, "bool"):
return True
return np.array(INF, dtype=object)
def get_neg_infinity(dtype, min_for_int=False):
"""Return an appropriate positive infinity for this dtype.
Parameters
----------
dtype : np.dtype
min_for_int : bool
Return np.iinfo(dtype).min instead of -np.inf
Returns
-------
fill_value : positive infinity value corresponding to this dtype.
"""
if isdtype(dtype, "real floating"):
return -np.inf
if isdtype(dtype, "integral"):
if min_for_int:
return np.iinfo(dtype).min
else:
return -np.inf
if isdtype(dtype, "complex floating"):
return -np.inf - 1j * np.inf
if isdtype(dtype, "bool"):
return False
return np.array(NINF, dtype=object)
def is_datetime_like(dtype) -> bool:
"""Check if a dtype is a subclass of the numpy datetime types"""
return _is_numpy_subdtype(dtype, (np.datetime64, np.timedelta64))
def is_object(dtype) -> bool:
"""Check if a dtype is object"""
return _is_numpy_subdtype(dtype, object)
def is_string(dtype) -> bool:
"""Check if a dtype is a string dtype"""
return _is_numpy_subdtype(dtype, (np.str_, np.character))
def _is_numpy_subdtype(dtype, kind) -> bool:
if not isinstance(dtype, np.dtype):
return False
kinds = kind if isinstance(kind, tuple) else (kind,)
return any(np.issubdtype(dtype, kind) for kind in kinds)
def isdtype(dtype, kind: str | tuple[str, ...], xp=None) -> bool:
"""Compatibility wrapper for isdtype() from the array API standard.
Unlike xp.isdtype(), kind must be a string.
"""
# TODO(shoyer): remove this wrapper when Xarray requires
# numpy>=2 and pandas extensions arrays are implemented in
# Xarray via the array API
if not isinstance(kind, str) and not (
isinstance(kind, tuple) and all(isinstance(k, str) for k in kind) # type: ignore[redundant-expr]
):
raise TypeError(f"kind must be a string or a tuple of strings: {kind!r}")
if isinstance(dtype, np.dtype):
return npcompat.isdtype(dtype, kind)
elif pd.api.types.is_extension_array_dtype(dtype): # noqa: TID251
# we never want to match pandas extension array dtypes
return False
else:
if xp is None:
xp = np
return xp.isdtype(dtype, kind)
def preprocess_types(t):
if isinstance(t, str | bytes):
return type(t)
elif isinstance(dtype := getattr(t, "dtype", t), np.dtype) and (
np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.bytes_)
):
# drop the length from numpy's fixed-width string dtypes, it is better to
# recalculate
# TODO(keewis): remove once the minimum version of `numpy.result_type` does this
# for us
return dtype.type
else:
return t
def result_type(
*arrays_and_dtypes: np.typing.ArrayLike | np.typing.DTypeLike | None,
xp=None,
) -> np.dtype:
"""Like np.result_type, but with type promotion rules matching pandas.
Examples of changed behavior:
number + string -> object (not string)
bytes + unicode -> object (not unicode)
Parameters
----------
*arrays_and_dtypes : list of arrays and dtypes
The dtype is extracted from both numpy and dask arrays.
Returns
-------
numpy.dtype for the result.
"""
# TODO (keewis): replace `array_api_compat.result_type` with `xp.result_type` once we
# can require a version of the Array API that supports passing scalars to it.
from xarray.core.duck_array_ops import get_array_namespace
if xp is None:
xp = get_array_namespace(arrays_and_dtypes)
types = {
array_api_compat.result_type(preprocess_types(t), xp=xp)
for t in arrays_and_dtypes
}
if any(isinstance(t, np.dtype) for t in types):
# only check if there's numpy dtypes – the array API does not
# define the types we're checking for
for left, right in PROMOTE_TO_OBJECT:
if any(np.issubdtype(t, left) for t in types) and any(
np.issubdtype(t, right) for t in types
):
return np.dtype(object)
return array_api_compat.result_type(
*map(preprocess_types, arrays_and_dtypes), xp=xp
)
| AlwaysLessThan |
python | ray-project__ray | python/ray/train/v2/tests/util.py | {
"start": 4402,
"end": 7193
} | class ____(ObjectRefWrapper):
"""Mock object that returns the object passed in without going through ray.put."""
def __init__(self, obj):
self._obj = obj
def get(self):
return self._obj
_RUN_ID = "mock_run_id"
def create_mock_train_run(
status: RunStatus = RunStatus.RUNNING,
controller_actor_id: Optional[str] = None,
end_time_ns: Optional[int] = None,
id: Optional[str] = None,
status_detail: Optional[str] = None,
):
return TrainRun(
schema_version=0,
id=id or _RUN_ID,
name="test_run",
job_id=uuid.uuid4().hex,
controller_actor_id=controller_actor_id or uuid.uuid4().hex,
status=status,
status_detail=status_detail,
start_time_ns=time.time_ns(),
end_time_ns=end_time_ns,
controller_log_file_path="/tmp/ray/session_xxx/logs/train/ray-train-app-controller.log",
)
def create_mock_train_run_attempt(
attempt_id: str = "mock_attempt_id",
status: RunAttemptStatus = RunAttemptStatus.RUNNING,
end_time_ns: Optional[int] = None,
run_id: Optional[str] = None,
worker_status: Optional[ActorStatus] = ActorStatus.ALIVE,
status_detail: Optional[str] = None,
):
worker = TrainWorker(
world_rank=0,
local_rank=0,
node_rank=0,
actor_id=uuid.uuid4().hex,
node_id=uuid.uuid4().hex,
node_ip="127.0.0.1",
pid=1234,
gpu_ids=[0],
status=worker_status,
resources=TrainResources(resources={"CPU": 1}),
log_file_path="/tmp/ray/session_xxx/logs/train/ray-train-app-worker.log",
)
return TrainRunAttempt(
schema_version=0,
attempt_id=attempt_id,
run_id=run_id or _RUN_ID,
status=status,
status_detail=status_detail,
start_time_ns=time.time_ns(),
resources=[TrainResources(resources={"CPU": 1})],
workers=[worker],
end_time_ns=end_time_ns,
)
def create_dummy_run_context(**kwargs: dict) -> TrainRunContext:
"""Create a standardized TrainRunContext for testing.
Args:
**kwargs: Optional overrides for the default configuration.
Returns:
TrainRunContext: A standardized TrainRunContext instance for testing.
"""
from ray.train import BackendConfig, DataConfig
from ray.train.v2._internal.execution.context import TrainRunContext
from ray.train.v2.api.config import RunConfig, ScalingConfig
config = dict(
run_config=RunConfig(name="test"),
train_loop_config={},
scaling_config=ScalingConfig(num_workers=1),
backend_config=BackendConfig(),
datasets={},
dataset_config=DataConfig(),
)
config.update(kwargs)
return TrainRunContext(**config)
| DummyObjectRefWrapper |
python | doocs__leetcode | solution/3000-3099/3023.Find Pattern in Infinite Stream I/Solution.py | {
"start": 105,
"end": 797
} | class ____:
def findPattern(
self, stream: Optional["InfiniteStream"], pattern: List[int]
) -> int:
a = b = 0
m = len(pattern)
half = m >> 1
mask1 = (1 << half) - 1
mask2 = (1 << (m - half)) - 1
for i in range(half):
a |= pattern[i] << (half - 1 - i)
for i in range(half, m):
b |= pattern[i] << (m - 1 - i)
x = y = 0
for i in count(1):
v = stream.next()
y = y << 1 | v
v = y >> (m - half) & 1
y &= mask2
x = x << 1 | v
x &= mask1
if i >= m and a == x and b == y:
return i - m
| Solution |
python | coleifer__peewee | tests/base_models.py | {
"start": 911,
"end": 1047
} | class ____(TestModel):
user = ForeignKeyField(User, backref='tweets')
content = TextField()
timestamp = TimestampField()
| Tweet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.