language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dagster-io__dagster | python_modules/dagster/dagster/_utils/aiodataloader.py | {
"start": 1007,
"end": 1277
} | class ____(Generic[KeyT, ReturnT]):
def __init__(
self,
batch_load_fn: Callable[[Iterable[KeyT]], Coroutine[Any, Any, Iterable[ReturnT]]],
max_batch_size: Optional[int] = None,
):
self.max_batch_size = max_batch_size
| _BaseDataLoader |
python | pydantic__pydantic | tests/mypy/outputs/mypy-plugin_ini/decorator_implicit_classmethod.py | {
"start": 181,
"end": 1099
} | class ____(BaseModel):
a: int
@field_validator('a')
def f_val(cls, value: int) -> int:
reveal_type(cls)
# MYPY: note: Revealed type is "type[tests.mypy.modules.decorator_implicit_classmethod.Model]"
return value
@model_validator(mode='before')
def m_val_before(cls, values: dict[str, object]) -> dict[str, object]:
reveal_type(cls)
# MYPY: note: Revealed type is "type[tests.mypy.modules.decorator_implicit_classmethod.Model]"
return values
@model_validator(mode='after')
def m_val_after(self) -> 'Model':
reveal_type(self)
# MYPY: note: Revealed type is "tests.mypy.modules.decorator_implicit_classmethod.Model"
return self
@model_serializer
def m_ser(self) -> dict[str, object]:
reveal_type(self)
# MYPY: note: Revealed type is "tests.mypy.modules.decorator_implicit_classmethod.Model"
return self.model_dump()
| Model |
python | numba__numba | numba/tests/test_npdatetime.py | {
"start": 8983,
"end": 21434
} | class ____(TestCase):
jitargs = dict(forceobj=True)
def jit(self, pyfunc):
return jit(**self.jitargs)(pyfunc)
def test_add(self):
f = self.jit(add_usecase)
def check(a, b, expected):
self.assertPreciseEqual(f(a, b), expected)
self.assertPreciseEqual(f(b, a), expected)
check(TD(1), TD(2), TD(3))
check(TD(1, 's'), TD(2, 's'), TD(3, 's'))
# Implicit unit promotion
check(TD(1, 's'), TD(2, 'us'), TD(1000002, 'us'))
check(TD(1, 'W'), TD(2, 'D'), TD(9, 'D'))
# NaTs
check(TD('NaT'), TD(1), TD('NaT'))
check(TD('NaT', 's'), TD(1, 'D'), TD('NaT', 's'))
check(TD('NaT', 's'), TD(1, 'ms'), TD('NaT', 'ms'))
# Cannot add days and months
with self.assertRaises((TypeError, TypingError)):
f(TD(1, 'M'), TD(1, 'D'))
def test_sub(self):
f = self.jit(sub_usecase)
def check(a, b, expected):
self.assertPreciseEqual(f(a, b), expected)
self.assertPreciseEqual(f(b, a), -expected)
check(TD(3), TD(2), TD(1))
check(TD(3, 's'), TD(2, 's'), TD(1, 's'))
# Implicit unit promotion
check(TD(3, 's'), TD(2, 'us'), TD(2999998, 'us'))
check(TD(1, 'W'), TD(2, 'D'), TD(5, 'D'))
# NaTs
check(TD('NaT'), TD(1), TD('NaT'))
check(TD('NaT', 's'), TD(1, 'D'), TD('NaT', 's'))
check(TD('NaT', 's'), TD(1, 'ms'), TD('NaT', 'ms'))
# Cannot sub days to months
with self.assertRaises((TypeError, TypingError)):
f(TD(1, 'M'), TD(1, 'D'))
def test_mul(self):
f = self.jit(mul_usecase)
def check(a, b, expected):
self.assertPreciseEqual(f(a, b), expected)
self.assertPreciseEqual(f(b, a), expected)
# non-int64 int * timedelta64
check(TD(3), np.uint32(2), TD(6))
# int * timedelta64
check(TD(3), 2, TD(6))
check(TD(3, 'ps'), 2, TD(6, 'ps'))
check(TD('NaT', 'ps'), 2, TD('NaT', 'ps'))
# float * timedelta64
check(TD(7), 1.5, TD(10))
check(TD(-7), 1.5, TD(-10))
check(TD(7, 'ps'), -1.5, TD(-10, 'ps'))
check(TD(-7), -1.5, TD(10))
check(TD('NaT', 'ps'), -1.5, TD('NaT', 'ps'))
check(TD(7, 'ps'), float('nan'), TD('NaT', 'ps'))
# wraparound on overflow
check(TD(2**62, 'ps'), 16, TD(0, 'ps'))
def test_div(self):
div = self.jit(div_usecase)
floordiv = self.jit(floordiv_usecase)
def check(a, b, expected):
self.assertPreciseEqual(div(a, b), expected)
self.assertPreciseEqual(floordiv(a, b), expected)
# timedelta64 / non-int64 int
check(TD(-3, 'ps'), np.uint32(2), TD(-1, 'ps'))
# timedelta64 / int
check(TD(3), 2, TD(1))
check(TD(-3, 'ps'), 2, TD(-1, 'ps'))
check(TD('NaT', 'ps'), 2, TD('NaT', 'ps'))
check(TD(3, 'ps'), 0, TD('NaT', 'ps'))
check(TD('NaT', 'ps'), 0, TD('NaT', 'ps'))
# timedelta64 / float
check(TD(7), 0.5, TD(14))
check(TD(-7, 'ps'), 1.5, TD(-4, 'ps'))
check(TD('NaT', 'ps'), 2.5, TD('NaT', 'ps'))
check(TD(3, 'ps'), 0.0, TD('NaT', 'ps'))
check(TD('NaT', 'ps'), 0.0, TD('NaT', 'ps'))
check(TD(3, 'ps'), float('nan'), TD('NaT', 'ps'))
check(TD('NaT', 'ps'), float('nan'), TD('NaT', 'ps'))
def test_homogeneous_div(self):
div = self.jit(div_usecase)
def check(a, b, expected):
self.assertPreciseEqual(div(a, b), expected)
# timedelta64 / timedelta64
check(TD(7), TD(3), 7. / 3.)
check(TD(7, 'us'), TD(3, 'ms'), 7. / 3000.)
check(TD(7, 'ms'), TD(3, 'us'), 7000. / 3.)
check(TD(7), TD(0), float('+inf'))
check(TD(-7), TD(0), float('-inf'))
check(TD(0), TD(0), float('nan'))
# NaTs
check(TD('nat'), TD(3), float('nan'))
check(TD(3), TD('nat'), float('nan'))
check(TD('nat'), TD(0), float('nan'))
# Cannot div months with days
with self.assertRaises((TypeError, TypingError)):
div(TD(1, 'M'), TD(1, 'D'))
def test_eq_ne(self):
eq = self.jit(eq_usecase)
ne = self.jit(ne_usecase)
def check(a, b, expected):
expected_val = expected
not_expected_val = not expected
# all NaT comparisons are False, including NaT==NaT,
# conversely != is True
if np.isnat(a) or np.isnat(a):
expected_val = False
not_expected_val = True
self.assertPreciseEqual(eq(a, b), expected_val)
self.assertPreciseEqual(eq(b, a), expected_val)
self.assertPreciseEqual(ne(a, b), not_expected_val)
self.assertPreciseEqual(ne(b, a), not_expected_val)
check(TD(1), TD(2), False)
check(TD(1), TD(1), True)
check(TD(1, 's'), TD(2, 's'), False)
check(TD(1, 's'), TD(1, 's'), True)
check(TD(2000, 's'), TD(2, 's'), False)
check(TD(2000, 'ms'), TD(2, 's'), True)
check(TD(1, 'Y'), TD(12, 'M'), True)
# NaTs
check(TD('Nat'), TD('Nat'), True)
check(TD('Nat', 'ms'), TD('Nat', 's'), True)
check(TD('Nat'), TD(1), False)
# Incompatible units => timedeltas compare unequal
if numpy_version < (1, 25):
check(TD(1, 'Y'), TD(365, 'D'), False)
check(TD(1, 'Y'), TD(366, 'D'), False)
# ... except when both are NaT!
check(TD('NaT', 'W'), TD('NaT', 'D'), True)
else:
# incompatible units raise
# The exception is different depending on Python mode
with self.assertRaises((TypeError, TypingError)):
eq(TD(1, 'Y'), TD(365, 'D'))
with self.assertRaises((TypeError, TypingError)):
ne(TD(1, 'Y'), TD(365, 'D'))
def test_lt_ge(self):
lt = self.jit(lt_usecase)
ge = self.jit(ge_usecase)
def check(a, b, expected):
expected_val = expected
not_expected_val = not expected
# since np 1.16 all NaT magnitude comparisons including equality
# are False (as NaT == NaT is now False)
if np.isnat(a) or np.isnat(a):
expected_val = False
not_expected_val = False
self.assertPreciseEqual(lt(a, b), expected_val)
self.assertPreciseEqual(ge(a, b), not_expected_val)
check(TD(1), TD(2), True)
check(TD(1), TD(1), False)
check(TD(2), TD(1), False)
check(TD(1, 's'), TD(2, 's'), True)
check(TD(1, 's'), TD(1, 's'), False)
check(TD(2, 's'), TD(1, 's'), False)
check(TD(1, 'm'), TD(61, 's'), True)
check(TD(1, 'm'), TD(60, 's'), False)
# NaTs
check(TD('Nat'), TD('Nat'), False)
check(TD('Nat', 'ms'), TD('Nat', 's'), False)
check(TD('Nat'), TD(-(2**63)+1), True)
# Incompatible units => exception raised
with self.assertRaises((TypeError, TypingError)):
lt(TD(1, 'Y'), TD(365, 'D'))
with self.assertRaises((TypeError, TypingError)):
ge(TD(1, 'Y'), TD(365, 'D'))
# ... even when both are NaT
with self.assertRaises((TypeError, TypingError)):
lt(TD('NaT', 'Y'), TD('NaT', 'D'))
with self.assertRaises((TypeError, TypingError)):
ge(TD('NaT', 'Y'), TD('NaT', 'D'))
def test_le_gt(self):
le = self.jit(le_usecase)
gt = self.jit(gt_usecase)
def check(a, b, expected):
expected_val = expected
not_expected_val = not expected
# since np 1.16 all NaT magnitude comparisons including equality
# are False (as NaT == NaT is now False)
if np.isnat(a) or np.isnat(a):
expected_val = False
not_expected_val = False
self.assertPreciseEqual(le(a, b), expected_val)
self.assertPreciseEqual(gt(a, b), not_expected_val)
check(TD(1), TD(2), True)
check(TD(1), TD(1), True)
check(TD(2), TD(1), False)
check(TD(1, 's'), TD(2, 's'), True)
check(TD(1, 's'), TD(1, 's'), True)
check(TD(2, 's'), TD(1, 's'), False)
check(TD(1, 'm'), TD(61, 's'), True)
check(TD(1, 'm'), TD(60, 's'), True)
check(TD(1, 'm'), TD(59, 's'), False)
# NaTs
check(TD('Nat'), TD('Nat'), True)
check(TD('Nat', 'ms'), TD('Nat', 's'), True)
check(TD('Nat'), TD(-(2**63)+1), True)
# Incompatible units => exception raised
with self.assertRaises((TypeError, TypingError)):
le(TD(1, 'Y'), TD(365, 'D'))
with self.assertRaises((TypeError, TypingError)):
gt(TD(1, 'Y'), TD(365, 'D'))
# ... even when both are NaT
with self.assertRaises((TypeError, TypingError)):
le(TD('NaT', 'Y'), TD('NaT', 'D'))
with self.assertRaises((TypeError, TypingError)):
gt(TD('NaT', 'Y'), TD('NaT', 'D'))
def test_pos(self):
pos = self.jit(pos_usecase)
def check(a):
self.assertPreciseEqual(pos(a), +a)
check(TD(3))
check(TD(-4))
check(TD(3, 'ms'))
check(TD(-4, 'ms'))
check(TD('NaT'))
check(TD('NaT', 'ms'))
def test_neg(self):
neg = self.jit(neg_usecase)
def check(a):
self.assertPreciseEqual(neg(a), -a)
check(TD(3))
check(TD(-4))
check(TD(3, 'ms'))
check(TD(-4, 'ms'))
check(TD('NaT'))
check(TD('NaT', 'ms'))
def test_abs(self):
f = self.jit(abs_usecase)
def check(a):
self.assertPreciseEqual(f(a), abs(a))
check(TD(3))
check(TD(-4))
check(TD(3, 'ms'))
check(TD(-4, 'ms'))
check(TD('NaT'))
check(TD('NaT', 'ms'))
def test_hash(self):
f = self.jit(hash_usecase)
def check(a):
if numpy_version >= (2, 2):
# Generic timedeltas (those without a unit)
# are no longer hashable beyond NumPy 2.2
# Non-generic timedeltas will have dtype name
# as timedelta64[<unit>]
if a.dtype.name == 'timedelta64':
return
# If the function is not being compiled in objmode
# then the hash should be equal to the hash of the
# integer representation of the timedelta
if self.jitargs.get('nopython', False):
self.assertPreciseEqual(f(a), a.astype(int))
else:
self.assertPreciseEqual(f(a), hash(a))
else:
self.assertPreciseEqual(f(a), hash(a))
TD_CASES = ((3,), (-4,), (3, 'ms'), (-4, 'ms'), (27, 'D'),
(2, 'D'), (2, 'W'), (2, 'Y'), (3, 'W'),
(365, 'D'), (10000, 'D'), (-10000, 'D'),
('NaT',), ('NaT', 'ms'), ('NaT', 'D'), (-1,))
DT_CASES = (('2014',), ('2016',), ('2000',), ('2014-02',),
('2014-03',), ('2014-04',), ('2016-02',), ('2000-12-31',),
('2014-01-16',), ('2014-01-05',), ('2014-01-07',),
('2014-01-06',), ('2014-02-02',), ('2014-02-27',),
('2014-02-16',), ('2014-03-01',), ('2000-01-01T01:02:03.002Z',),
('2000-01-01T01:02:03Z',), ('NaT',))
for case, typ in zip(TD_CASES + DT_CASES,
(TD,) * len(TD_CASES) + (DT,) * len(TD_CASES)):
check(typ(*case))
if numpy_version >= (2, 2):
with self.assertRaises(ValueError) as raises:
f(TD(3))
self.assertIn("Can't hash generic timedelta64", str(raises.exception))
def _test_min_max(self, usecase):
f = self.jit(usecase)
def check(a, b):
self.assertPreciseEqual(f(a, b), usecase(a, b))
for cases in (
(TD(0), TD(1), TD(2), TD('NaT')),
(TD(0, 's'), TD(1, 's'), TD(2, 's'), TD('NaT', 's')),
):
for a, b in itertools.product(cases, cases):
check(a, b)
def test_min(self):
self._test_min_max(min_usecase)
def test_max(self):
self._test_min_max(max_usecase)
| TestTimedeltaArithmetic |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/dataproc.py | {
"start": 52843,
"end": 87303
} | class ____(GoogleBaseAsyncHook):
"""
Asynchronous interaction with Google Cloud Dataproc APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
sync_hook_class = DataprocHook
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(gcp_conn_id=gcp_conn_id, impersonation_chain=impersonation_chain, **kwargs)
self._cached_client: JobControllerAsyncClient | None = None
async def get_cluster_client(self, region: str | None = None) -> ClusterControllerAsyncClient:
"""Create a ClusterControllerAsyncClient."""
client_options = None
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-dataproc.googleapis.com:443")
sync_hook = await self.get_sync_hook()
return ClusterControllerAsyncClient(
credentials=sync_hook.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
async def get_template_client(self, region: str | None = None) -> WorkflowTemplateServiceAsyncClient:
"""Create a WorkflowTemplateServiceAsyncClient."""
client_options = None
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-dataproc.googleapis.com:443")
sync_hook = await self.get_sync_hook()
return WorkflowTemplateServiceAsyncClient(
credentials=sync_hook.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
async def get_job_client(self, region: str | None = None) -> JobControllerAsyncClient:
"""Create a JobControllerAsyncClient."""
if self._cached_client is None:
client_options = None
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-dataproc.googleapis.com:443")
sync_hook = await self.get_sync_hook()
self._cached_client = JobControllerAsyncClient(
credentials=sync_hook.get_credentials(),
client_info=CLIENT_INFO,
client_options=client_options,
)
return self._cached_client
async def get_batch_client(self, region: str | None = None) -> BatchControllerAsyncClient:
"""Create a BatchControllerAsyncClient."""
client_options = None
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-dataproc.googleapis.com:443")
sync_hook = await self.get_sync_hook()
return BatchControllerAsyncClient(
credentials=sync_hook.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
async def get_operations_client(self, region: str) -> OperationsClient:
"""Create a OperationsClient."""
template_client = await self.get_template_client(region=region)
return template_client.transport.operations_client
@GoogleBaseHook.fallback_to_default_project_id
async def get_cluster(
self,
region: str,
cluster_name: str,
project_id: str,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Cluster:
"""
Get a cluster.
:param region: Cloud Dataproc region in which to handle the request.
:param cluster_name: Name of the cluster to get.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = await self.get_cluster_client(region=region)
result = await client.get_cluster(
request={"project_id": project_id, "region": region, "cluster_name": cluster_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
async def create_cluster(
self,
region: str,
project_id: str,
cluster_name: str,
cluster_config: dict | Cluster | None = None,
virtual_cluster_config: dict | None = None,
labels: dict[str, str] | None = None,
request_id: str | None = None,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
"""
Create a cluster in a project.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region in which to handle the request.
:param cluster_name: Name of the cluster to create.
:param labels: Labels that will be assigned to created cluster.
:param cluster_config: The cluster config to create. If a dict is
provided, it must be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.ClusterConfig`.
:param virtual_cluster_config: The virtual cluster config, used when
creating a Dataproc cluster that does not directly control the
underlying compute resources, for example, when creating a
Dataproc-on-GKE cluster with
:class:`~google.cloud.dataproc_v1.types.VirtualClusterConfig`.
:param request_id: A unique id used to identify the request. If the
server receives two *CreateClusterRequest* requests with the same
ID, the second request will be ignored, and an operation created
for the first one and stored in the backend is returned.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
# Dataproc labels must conform to the following regex:
# [a-z]([-a-z0-9]*[a-z0-9])? (current airflow version string follows
# semantic versioning spec: x.y.z).
labels = labels or {}
labels.update({"airflow-version": "v" + airflow_version.replace(".", "-").replace("+", "-")})
cluster = {
"project_id": project_id,
"cluster_name": cluster_name,
}
if virtual_cluster_config is not None:
cluster["virtual_cluster_config"] = virtual_cluster_config # type: ignore
if cluster_config is not None:
cluster["config"] = cluster_config # type: ignore
cluster["labels"] = labels # type: ignore
client = await self.get_cluster_client(region=region)
result = await client.create_cluster(
request={
"project_id": project_id,
"region": region,
"cluster": cluster,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
async def delete_cluster(
self,
region: str,
cluster_name: str,
project_id: str,
cluster_uuid: str | None = None,
request_id: str | None = None,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
"""
Delete a cluster in a project.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region in which to handle the request.
:param cluster_name: Name of the cluster to delete.
:param cluster_uuid: If specified, the RPC should fail if cluster with
the UUID does not exist.
:param request_id: A unique id used to identify the request. If the
server receives two *DeleteClusterRequest* requests with the same
ID, the second request will be ignored, and an operation created
for the first one and stored in the backend is returned.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = await self.get_cluster_client(region=region)
result = await client.delete_cluster(
request={
"project_id": project_id,
"region": region,
"cluster_name": cluster_name,
"cluster_uuid": cluster_uuid,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
async def diagnose_cluster(
self,
region: str,
cluster_name: str,
project_id: str,
tarball_gcs_dir: str | None = None,
diagnosis_interval: dict | Interval | None = None,
jobs: MutableSequence[str] | None = None,
yarn_application_ids: MutableSequence[str] | None = None,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
"""
Get cluster diagnostic information.
After the operation completes, the response contains the Cloud Storage URI of the diagnostic output report containing a summary of collected diagnostics.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region in which to handle the request.
:param cluster_name: Name of the cluster.
:param tarball_gcs_dir: The output Cloud Storage directory for the diagnostic tarball. If not specified, a task-specific directory in the cluster's staging bucket will be used.
:param diagnosis_interval: Time interval in which diagnosis should be carried out on the cluster.
:param jobs: Specifies a list of jobs on which diagnosis is to be performed. Format: `projects/{project}/regions/{region}/jobs/{job}`
:param yarn_application_ids: Specifies a list of yarn applications on which diagnosis is to be performed.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = await self.get_cluster_client(region=region)
result = await client.diagnose_cluster(
request={
"project_id": project_id,
"region": region,
"cluster_name": cluster_name,
"tarball_gcs_dir": tarball_gcs_dir,
"diagnosis_interval": diagnosis_interval,
"jobs": jobs,
"yarn_application_ids": yarn_application_ids,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
async def list_clusters(
self,
region: str,
filter_: str,
project_id: str,
page_size: int | None = None,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
List all regions/{region}/clusters in a project.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param filter_: To constrain the clusters to. Case-sensitive.
:param page_size: The maximum number of resources contained in the
underlying API response. If page streaming is performed
per-resource, this parameter does not affect the return value. If
page streaming is performed per-page, this determines the maximum
number of resources in a page.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = await self.get_cluster_client(region=region)
result = await client.list_clusters(
request={"project_id": project_id, "region": region, "filter": filter_, "page_size": page_size},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
async def update_cluster(
self,
cluster_name: str,
cluster: dict | Cluster,
update_mask: dict | FieldMask,
project_id: str,
region: str,
graceful_decommission_timeout: dict | Duration | None = None,
request_id: str | None = None,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
"""
Update a cluster in a project.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param cluster_name: The cluster name.
:param cluster: Changes to the cluster. If a dict is provided, it must
be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.Cluster`.
:param update_mask: Specifies the path, relative to ``Cluster``, of the
field to update. For example, to change the number of workers in a
cluster to 5, this would be specified as
``config.worker_config.num_instances``, and the ``PATCH`` request
body would specify the new value:
.. code-block:: python
{"config": {"workerConfig": {"numInstances": "5"}}}
Similarly, to change the number of preemptible workers in a cluster
to 5, this would be ``config.secondary_worker_config.num_instances``
and the ``PATCH`` request body would be:
.. code-block:: python
{"config": {"secondaryWorkerConfig": {"numInstances": "5"}}}
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1.types.FieldMask`.
:param graceful_decommission_timeout: Timeout for graceful YARN
decommissioning. Graceful decommissioning allows removing nodes from
the cluster without interrupting jobs in progress. Timeout specifies
how long to wait for jobs in progress to finish before forcefully
removing nodes (and potentially interrupting jobs). Default timeout
is 0 (for forceful decommission), and the maximum allowed timeout is
one day.
Only supported on Dataproc image versions 1.2 and higher.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1.types.Duration`.
:param request_id: A unique id used to identify the request. If the
server receives two *UpdateClusterRequest* requests with the same
ID, the second request will be ignored, and an operation created
for the first one and stored in the backend is returned.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
client = await self.get_cluster_client(region=region)
operation = await client.update_cluster(
request={
"project_id": project_id,
"region": region,
"cluster_name": cluster_name,
"cluster": cluster,
"update_mask": update_mask,
"graceful_decommission_timeout": graceful_decommission_timeout,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
async def create_workflow_template(
self,
template: dict | WorkflowTemplate,
project_id: str,
region: str,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> WorkflowTemplate:
"""
Create a new workflow template.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param template: The Dataproc workflow template to create. If a dict is
provided, it must be of the same form as the protobuf message
WorkflowTemplate.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = await self.get_template_client(region)
parent = f"projects/{project_id}/regions/{region}"
return await client.create_workflow_template(
request={"parent": parent, "template": template}, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
async def instantiate_workflow_template(
self,
template_name: str,
project_id: str,
region: str,
version: int | None = None,
request_id: str | None = None,
parameters: dict[str, str] | None = None,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
"""
Instantiate a template and begins execution.
:param template_name: Name of template to instantiate.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param version: Version of workflow template to instantiate. If
specified, the workflow will be instantiated only if the current
version of the workflow template has the supplied version. This
option cannot be used to instantiate a previous version of workflow
template.
:param request_id: A tag that prevents multiple concurrent workflow
instances with the same tag from running. This mitigates risk of
concurrent instances started due to retries.
:param parameters: Map from parameter names to values that should be
used for those parameters. Values may not exceed 100 characters.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = await self.get_template_client(region)
name = f"projects/{project_id}/regions/{region}/workflowTemplates/{template_name}"
operation = await client.instantiate_workflow_template(
request={"name": name, "version": version, "request_id": request_id, "parameters": parameters},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
async def instantiate_inline_workflow_template(
self,
template: dict | WorkflowTemplate,
project_id: str,
region: str,
request_id: str | None = None,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
"""
Instantiate a template and begin execution.
:param template: The workflow template to instantiate. If a dict is
provided, it must be of the same form as the protobuf message
WorkflowTemplate.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param request_id: A tag that prevents multiple concurrent workflow
instances with the same tag from running. This mitigates risk of
concurrent instances started due to retries.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = await self.get_template_client(region)
parent = f"projects/{project_id}/regions/{region}"
operation = await client.instantiate_inline_workflow_template(
request={"parent": parent, "template": template, "request_id": request_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
async def get_operation(self, region, operation_name):
operations_client = await self.get_operations_client(region)
return await operations_client.get_operation(name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
async def get_job(
self,
job_id: str,
project_id: str,
region: str,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Job:
"""
Get the resource representation for a job in a project.
:param job_id: Dataproc job ID.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = await self.get_job_client(region=region)
job = await client.get_job(
request={"project_id": project_id, "region": region, "job_id": job_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return job
@GoogleBaseHook.fallback_to_default_project_id
async def submit_job(
self,
job: dict | Job,
project_id: str,
region: str,
request_id: str | None = None,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Job:
"""
Submit a job to a cluster.
:param job: The job resource. If a dict is provided, it must be of the
same form as the protobuf message Job.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param request_id: A tag that prevents multiple concurrent workflow
instances with the same tag from running. This mitigates risk of
concurrent instances started due to retries.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = await self.get_job_client(region=region)
return await client.submit_job(
request={"project_id": project_id, "region": region, "job": job, "request_id": request_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
async def cancel_job(
self,
job_id: str,
project_id: str,
region: str | None = None,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Job:
"""
Start a job cancellation request.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param job_id: The job ID.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = await self.get_job_client(region=region)
job = await client.cancel_job(
request={"project_id": project_id, "region": region, "job_id": job_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return job
@GoogleBaseHook.fallback_to_default_project_id
async def create_batch(
self,
region: str,
project_id: str,
batch: dict | Batch,
batch_id: str | None = None,
request_id: str | None = None,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
"""
Create a batch workload.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param batch: The batch to create.
:param batch_id: The ID to use for the batch, which will become the
final component of the batch's resource name. This value must be of
4-63 characters. Valid characters are ``[a-z][0-9]-``.
:param request_id: A unique id used to identify the request. If the
server receives two *CreateBatchRequest* requests with the same
ID, the second request will be ignored, and an operation created
for the first one and stored in the backend is returned.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = await self.get_batch_client(region)
parent = f"projects/{project_id}/regions/{region}"
result = await client.create_batch(
request={
"parent": parent,
"batch": batch,
"batch_id": batch_id,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
async def delete_batch(
self,
batch_id: str,
region: str,
project_id: str,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete the batch workload resource.
:param batch_id: The batch ID.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = await self.get_batch_client(region)
name = f"projects/{project_id}/locations/{region}/batches/{batch_id}"
await client.delete_batch(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
async def get_batch(
self,
batch_id: str,
region: str,
project_id: str,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Batch:
"""
Get the batch workload resource representation.
:param batch_id: The batch ID.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = await self.get_batch_client(region)
name = f"projects/{project_id}/locations/{region}/batches/{batch_id}"
result = await client.get_batch(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
async def list_batches(
self,
region: str,
project_id: str,
page_size: int | None = None,
page_token: str | None = None,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
filter: str | None = None,
order_by: str | None = None,
):
"""
List batch workloads.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param page_size: The maximum number of batches to return in each
response. The service may return fewer than this value. The default
page size is 20; the maximum page size is 1000.
:param page_token: A page token received from a previous ``ListBatches``
call. Provide this token to retrieve the subsequent page.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param filter: Result filters as specified in ListBatchesRequest
:param order_by: How to order results as specified in ListBatchesRequest
"""
client = await self.get_batch_client(region)
parent = f"projects/{project_id}/regions/{region}"
result = await client.list_batches(
request={
"parent": parent,
"page_size": page_size,
"page_token": page_token,
"filter": filter,
"order_by": order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| DataprocAsyncHook |
python | getsentry__sentry | src/sentry/users/services/user_option/model.py | {
"start": 315,
"end": 500
} | class ____(RpcModel):
id: int = -1
user_id: int = -1
value: Any = None
key: str = ""
project_id: int | None = None
organization_id: int | None = None
| RpcUserOption |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-iterable/source_iterable/streams.py | {
"start": 17985,
"end": 18102
} | class ____(IterableExportEventsStreamAdjustableRange):
data_field = "inboxMessageImpression"
| InboxMessageImpression |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_batch.py | {
"start": 7699,
"end": 10084
} | class ____(GoogleCloudBaseOperator):
"""
List Cloud Batch jobs.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param filter: The filter based on which to list the jobs. If left empty, all the jobs are listed.
:param limit: The number of jobs to list. If left empty,
all the jobs matching the filter will be returned.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"region",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
gcp_conn_id: str = "google_cloud_default",
filter: str | None = None,
limit: int | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.filter = filter
self.limit = limit
if limit is not None and limit < 0:
raise AirflowException("The limit for the list jobs request should be greater or equal to zero")
def execute(self, context: Context):
hook: CloudBatchHook = CloudBatchHook(self.gcp_conn_id, self.impersonation_chain)
jobs_list = hook.list_jobs(
region=self.region, project_id=self.project_id, filter=self.filter, limit=self.limit
)
return [Job.to_dict(job) for job in jobs_list]
| CloudBatchListJobsOperator |
python | kevin1024__vcrpy | vcr/errors.py | {
"start": 0,
"end": 1849
} | class ____(Exception):
def __init__(self, *args, **kwargs):
self.cassette = kwargs["cassette"]
self.failed_request = kwargs["failed_request"]
message = self._get_message(kwargs["cassette"], kwargs["failed_request"])
super().__init__(message)
@staticmethod
def _get_message(cassette, failed_request):
"""Get the final message related to the exception"""
# Get the similar requests in the cassette that
# have match the most with the request.
best_matches = cassette.find_requests_with_most_matches(failed_request)
if best_matches:
# Build a comprehensible message to put in the exception.
best_matches_msg = (
f"Found {len(best_matches)} similar requests "
f"with {len(best_matches[0][2])} different matcher(s) :\n"
)
for idx, best_match in enumerate(best_matches, start=1):
request, succeeded_matchers, failed_matchers_assertion_msgs = best_match
best_matches_msg += (
f"\n{idx} - ({request!r}).\n"
f"Matchers succeeded : {succeeded_matchers}\n"
"Matchers failed :\n"
)
for failed_matcher, assertion_msg in failed_matchers_assertion_msgs:
best_matches_msg += f"{failed_matcher} - assertion failure :\n{assertion_msg}\n"
else:
best_matches_msg = "No similar requests, that have not been played, found."
return (
f"Can't overwrite existing cassette ({cassette._path!r}) in "
f"your current record mode ({cassette.record_mode!r}).\n"
f"No match for the request ({failed_request!r}) was found.\n"
f"{best_matches_msg}"
)
| CannotOverwriteExistingCassetteException |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/tests/cloud/test_jobs.py | {
"start": 17421,
"end": 24020
} | class ____:
async def test_run_steps_override_error(self, dbt_cloud_credentials):
with pytest.raises(ValueError, match="Do not set `steps_override"):
await retry_dbt_cloud_job_run_subset_and_wait_for_completion(
dbt_cloud_credentials=dbt_cloud_credentials,
trigger_job_run_options=TriggerJobRunOptions(steps_override=["step"]),
run_id=12,
)
@pytest.mark.parametrize(
"trigger_job_run_options",
[TriggerJobRunOptions(timeout_seconds_override=42), None],
)
@pytest.mark.parametrize(
"exe_command",
["run", "run-operation"],
)
async def test_retry_run(
self,
trigger_job_run_options,
exe_command,
dbt_cloud_credentials,
):
with respx.mock(using="httpx", assert_all_called=False) as respx_mock:
respx_mock.route(host="127.0.0.1").pass_through()
respx_mock.get(
"https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/",
headers=HEADERS,
).mock(
return_value=Response(
200,
json={
"data": {
"id": 10000,
"generate_docs": False,
"generate_sources": False,
}
},
)
)
# mock get_dbt_cloud_run_info
respx_mock.get(
"https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/",
headers=HEADERS,
).mock(
return_value=Response(
200,
json={
"data": {
"id": 10000,
"status": 20, # failed status
"run_steps": [
{
"id": 432100123,
"run_id": 10000,
"account_id": 123456789,
"index": 1,
"name": "Clone Git Repository",
"status_humanized": "Success",
},
{
"id": 432100124,
"run_id": 10000,
"account_id": 123456789,
"index": 2,
"name": "Create Profile from Connection Snowflake ",
"status_humanized": "Success",
},
{
"id": 432100125,
"run_id": 10000,
"account_id": 123456789,
"index": 3,
"name": "Invoke dbt with `dbt deps`",
"status_humanized": "Success",
},
{
"run_id": 10000,
"account_id": 123456789,
"index": 4,
"name": f"Invoke dbt with `dbt {exe_command}`",
"status_humanized": "Error",
},
],
"job_id": "1",
}
},
)
)
# mock list_dbt_cloud_run_artifacts
respx_mock.get(
"https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/",
headers=HEADERS,
).mock(return_value=Response(200, json={"data": ["run_results.json"]}))
# mock get_dbt_cloud_run_artifact
respx_mock.get(
"https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/run_results.json", # noqa
headers=HEADERS,
).mock(
return_value=Response(
200,
json={
"metadata": {"env": {"DBT_CLOUD_JOB_ID": "1"}},
"results": [
{
"status": "fail",
"message": "FAIL 1",
"failures": None,
"unique_id": "model.jaffle_shop.stg_customers",
},
],
},
)
)
respx_mock.post(
"https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/",
headers=HEADERS,
).mock(
return_value=Response(
200, json={"data": {"id": 10000, "project_id": 12345}}
)
)
with pytest.raises(DbtCloudJobRunFailed, match="Triggered job run with"):
await retry_dbt_cloud_job_run_subset_and_wait_for_completion(
dbt_cloud_credentials=dbt_cloud_credentials,
run_id=10000,
trigger_job_run_options=trigger_job_run_options,
)
@pytest.fixture
def real_dbt_cloud_job_id():
job_id = os.environ.get("DBT_CLOUD_JOB_ID")
if not job_id:
pytest.skip("DBT_CLOUD_JOB_ID not set")
return job_id
@pytest.fixture
def real_dbt_cloud_api_key():
api_key = os.environ.get("DBT_CLOUD_API_KEY")
if not api_key:
pytest.skip("DBT_CLOUD_API_KEY not set")
return api_key
@pytest.fixture
def real_dbt_cloud_account_id():
account_id = os.environ.get("DBT_CLOUD_ACCOUNT_ID")
if not account_id:
pytest.skip("DBT_CLOUD_ACCOUNT_ID not set")
return account_id
@pytest.mark.integration
async def test_run_real_dbt_cloud_job(
real_dbt_cloud_job_id, real_dbt_cloud_api_key, real_dbt_cloud_account_id
):
result = await trigger_dbt_cloud_job_run_and_wait_for_completion(
dbt_cloud_credentials=DbtCloudCredentials(
api_key=real_dbt_cloud_api_key, account_id=real_dbt_cloud_account_id
),
job_id=real_dbt_cloud_job_id,
poll_frequency_seconds=1,
)
assert result.get("status") == 10
| TestRetryDbtCloudRunJobSubsetAndWaitForCompletion |
python | apache__airflow | providers/apache/flink/tests/unit/apache/flink/sensors/test_flink_kubernetes.py | {
"start": 38775,
"end": 54014
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(conn_id="kubernetes_default", conn_type="kubernetes", extra=json.dumps({}))
)
create_connection_without_db(
Connection(
conn_id="kubernetes_default",
conn_type="kubernetes",
extra=json.dumps({}),
)
)
create_connection_without_db(
Connection(
conn_id="kubernetes_with_namespace",
conn_type="kubernetes",
extra=json.dumps({"extra__kubernetes__namespace": "mock_namespace"}),
)
)
args = {"owner": "airflow", "start_date": timezone.datetime(2020, 2, 1)}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_READY_CLUSTER,
)
def test_cluster_ready_state(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = FlinkKubernetesSensor(
application_name="flink-stream-example", dag=self.dag, task_id="test_task_id"
)
assert sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="flink.apache.org",
name="flink-stream-example",
namespace="default",
plural="flinkdeployments",
version="v1beta1",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_ERROR_CLUSTER,
)
def test_cluster_error_state(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = FlinkKubernetesSensor(
application_name="flink-stream-example", dag=self.dag, task_id="test_task_id"
)
with pytest.raises(AirflowException):
sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="flink.apache.org",
name="flink-stream-example",
namespace="default",
plural="flinkdeployments",
version="v1beta1",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_NO_STATE_CLUSTER,
)
def test_new_application(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = FlinkKubernetesSensor(
application_name="flink-stream-example", dag=self.dag, task_id="test_task_id"
)
assert not sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="flink.apache.org",
name="flink-stream-example",
namespace="default",
plural="flinkdeployments",
version="v1beta1",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_DEPLOYED_NOT_READY_CLUSTER,
)
def test_deployed_not_ready_cluster(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = FlinkKubernetesSensor(
application_name="flink-stream-example", dag=self.dag, task_id="test_task_id"
)
assert not sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="flink.apache.org",
name="flink-stream-example",
namespace="default",
plural="flinkdeployments",
version="v1beta1",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_DEPLOYING_CLUSTER,
)
def test_deploying_cluster(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = FlinkKubernetesSensor(
application_name="flink-stream-example", dag=self.dag, task_id="test_task_id"
)
assert not sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="flink.apache.org",
name="flink-stream-example",
namespace="default",
plural="flinkdeployments",
version="v1beta1",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_MISSING_CLUSTER,
)
def test_missing_cluster(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = FlinkKubernetesSensor(
application_name="flink-stream-example", dag=self.dag, task_id="test_task_id"
)
with pytest.raises(AirflowException):
sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="flink.apache.org",
name="flink-stream-example",
namespace="default",
plural="flinkdeployments",
version="v1beta1",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_READY_CLUSTER,
)
def test_namespace_from_sensor(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = FlinkKubernetesSensor(
application_name="flink-stream-example",
dag=self.dag,
kubernetes_conn_id="kubernetes_with_namespace",
namespace="sensor_namespace",
task_id="test_task_id",
)
sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="flink.apache.org",
name="flink-stream-example",
namespace="sensor_namespace",
plural="flinkdeployments",
version="v1beta1",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_READY_CLUSTER,
)
def test_api_group_and_version_from_sensor(self, mock_get_namespaced_crd, mock_kubernetes_hook):
api_group = "flink.apache.org"
api_version = "v1beta1"
sensor = FlinkKubernetesSensor(
application_name="flink-stream-example",
dag=self.dag,
kubernetes_conn_id="kubernetes_with_namespace",
task_id="test_task_id",
api_group=api_group,
api_version=api_version,
)
sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group=api_group,
name="flink-stream-example",
namespace="mock_namespace",
plural="flinkdeployments",
version=api_version,
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_READY_CLUSTER,
)
def test_namespace_from_connection(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = FlinkKubernetesSensor(
application_name="flink-stream-example",
dag=self.dag,
kubernetes_conn_id="kubernetes_with_namespace",
task_id="test_task_id",
)
sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="flink.apache.org",
name="flink-stream-example",
namespace="mock_namespace",
plural="flinkdeployments",
version="v1beta1",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_ERROR_CLUSTER,
)
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_pod_logs",
return_value=TEST_POD_LOGS,
)
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_namespaced_pod_list",
return_value=TASK_MANAGER_POD_LIST,
)
def test_driver_logging_failure(
self, mock_namespaced_pod_list, mock_pod_logs, mock_namespaced_crd, mock_kube_conn, caplog
):
sensor = FlinkKubernetesSensor(
application_name="flink-stream-example",
attach_log=True,
dag=self.dag,
task_id="test_task_id",
)
with pytest.raises(AirflowException):
sensor.poke(None)
mock_namespaced_pod_list.assert_called_once_with(
namespace="default", watch=False, label_selector="component=taskmanager,app=flink-stream-example"
)
mock_pod_logs.assert_called_once_with("basic-example-taskmanager-1-1", namespace="default")
assert TEST_POD_LOG_RESULT in caplog.messages
mock_namespaced_crd.assert_called_once_with(
group="flink.apache.org",
version="v1beta1",
namespace="default",
plural="flinkdeployments",
name="flink-stream-example",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_READY_CLUSTER,
)
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_pod_logs",
return_value=TEST_POD_LOGS,
)
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_namespaced_pod_list",
return_value=TASK_MANAGER_POD_LIST,
)
def test_driver_logging_completed(
self, mock_namespaced_pod_list, mock_pod_logs, mock_namespaced_crd, mock_kube_conn, caplog
):
sensor = FlinkKubernetesSensor(
application_name="flink-stream-example",
attach_log=True,
dag=self.dag,
task_id="test_task_id",
)
sensor.poke(None)
mock_namespaced_pod_list.assert_called_once_with(
namespace="default", watch=False, label_selector="component=taskmanager,app=flink-stream-example"
)
mock_pod_logs.assert_called_once_with("basic-example-taskmanager-1-1", namespace="default")
assert TEST_POD_LOG_RESULT in caplog.messages
mock_namespaced_crd.assert_called_once_with(
group="flink.apache.org",
version="v1beta1",
namespace="default",
plural="flinkdeployments",
name="flink-stream-example",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_READY_CLUSTER,
)
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_pod_logs",
return_value=TEST_POD_LOGS,
)
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_namespaced_pod_list",
return_value=TASK_MANAGER_POD_LIST,
)
def test_logging_taskmanager_from_taskmanager_namespace_when_namespace_is_set(
self, mock_namespaced_pod_list, mock_pod_logs, mock_namespaced_crd, mock_kube_conn
):
namespace = "different-namespace123456"
namespae_name = "test123"
sensor = FlinkKubernetesSensor(
application_name="flink-stream-example",
namespace=namespace,
taskmanager_pods_namespace=namespae_name,
attach_log=True,
dag=self.dag,
task_id="test_task_id",
)
sensor.poke(context=None)
mock_namespaced_pod_list.assert_called_once_with(
namespace=namespae_name,
watch=False,
label_selector="component=taskmanager,app=flink-stream-example",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_READY_CLUSTER,
)
@patch("logging.Logger.info")
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_pod_logs",
return_value=TEST_POD_LOGS,
)
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_namespaced_pod_list",
return_value=TASK_MANAGER_POD_LIST,
)
def test_logging_taskmanager_from_non_default_namespace(
self, mock_namespaced_pod_list, mock_pod_logs, mock_namespaced_crd, mock_kube_conn, caplog
):
namespae_name = "test123"
sensor = FlinkKubernetesSensor(
application_name="flink-stream-example",
namespace=namespae_name,
attach_log=True,
dag=self.dag,
task_id="test_task_id",
)
sensor.poke(context=None)
mock_namespaced_pod_list.assert_called_once_with(
namespace=namespae_name,
watch=False,
label_selector="component=taskmanager,app=flink-stream-example",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_READY_CLUSTER,
)
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_pod_logs",
side_effect=ApiException("Test api exception"),
)
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_namespaced_pod_list",
return_value=TASK_MANAGER_POD_LIST,
)
def test_driver_logging_error(
self, mock_namespaced_pod_list, mock_pod_logs, mock_namespaced_crd, mock_kube_conn, caplog
):
sensor = FlinkKubernetesSensor(
application_name="flink-stream-example",
attach_log=True,
dag=self.dag,
task_id="test_task_id",
)
sensor.poke(None)
assert (ANY, logging.WARNING, ANY) in caplog.record_tuples, "Expected something logged at warning"
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_ERROR_CLUSTER,
)
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_pod_logs",
side_effect=ApiException("Test api exception"),
)
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_namespaced_pod_list",
return_value=TASK_MANAGER_POD_LIST,
)
def test_driver_logging_error_missing_state(
self, mock_namespaced_pod_list, mock_pod_logs, mock_namespaced_crd, mock_kube_conn, caplog
):
sensor = FlinkKubernetesSensor(
application_name="flink-stream-example",
attach_log=True,
dag=self.dag,
task_id="test_task_id",
)
with pytest.raises(AirflowException):
sensor.poke(None)
assert (ANY, logging.WARNING, ANY) in caplog.record_tuples, "Expected something logged at warning"
| TestFlinkKubernetesSensor |
python | PyCQA__pylint | tests/functional/u/used/used_before_assignment_py311.py | {
"start": 247,
"end": 541
} | class ____(Enum):
"""A lovely enum."""
VAL1 = 1
VAL2 = 2
def do_thing(val: MyEnum) -> None:
"""Do a thing."""
if val is MyEnum.VAL1:
note = 'got 1'
elif val is MyEnum.VAL2:
note = 'got 2'
else:
assert_never(val)
print('Note:', note)
| MyEnum |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/triggers/bedrock.py | {
"start": 5423,
"end": 7332
} | class ____(AwsBaseWaiterTrigger):
"""
Trigger when a Bedrock ingestion job reaches the COMPLETE state.
:param knowledge_base_id: The unique identifier of the knowledge base for which to get information.
:param data_source_id: The unique identifier of the data source in the ingestion job.
:param ingestion_job_id: The unique identifier of the ingestion job.
:param waiter_delay: The amount of time in seconds to wait between attempts. (default: 60)
:param waiter_max_attempts: The maximum number of attempts to be made. (default: 10)
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
*,
knowledge_base_id: str,
data_source_id: str,
ingestion_job_id: str,
waiter_delay: int = 60,
waiter_max_attempts: int = 10,
aws_conn_id: str | None = None,
) -> None:
super().__init__(
serialized_fields={
"knowledge_base_id": knowledge_base_id,
"data_source_id": data_source_id,
"ingestion_job_id": ingestion_job_id,
},
waiter_name="ingestion_job_complete",
waiter_args={
"knowledgeBaseId": knowledge_base_id,
"dataSourceId": data_source_id,
"ingestionJobId": ingestion_job_id,
},
failure_message="Bedrock ingestion job creation failed.",
status_message="Status of Bedrock ingestion job is",
status_queries=["status"],
return_key="ingestion_job_id",
return_value=ingestion_job_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return BedrockAgentHook(aws_conn_id=self.aws_conn_id)
| BedrockIngestionJobTrigger |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1480339,
"end": 1480566
} | class ____(TopLevelSpec):
"""TopLevelRepeatSpec schema wrapper."""
_schema = {"$ref": "#/definitions/TopLevelRepeatSpec"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| TopLevelRepeatSpec |
python | tensorflow__tensorflow | tensorflow/python/compiler/mlir/mlir_test.py | {
"start": 1214,
"end": 4233
} | class ____(test.TestCase):
def testImport(self):
"""Tests the basic flow of `tf.mlir.experimental.convert_graph_def`."""
mlir_module = mlir.convert_graph_def('')
# An empty graph should contain at least an empty main function.
self.assertIn('func @main', mlir_module)
def testInvalidPbtxt(self):
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Could not parse input proto'):
mlir.convert_graph_def('some invalid proto')
def testGraphDefToTf(self):
"""Tests the basic flow of `tf.mlir.experimental.convert_graph_def`
with tf-standard-pipeline converting all the way to the TF dialect.
"""
tensor_shape = (10, 10)
@def_function.function(
input_signature=(
tensor_spec.TensorSpec(shape=tensor_shape, dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=tensor_shape, dtype=dtypes.float32),
))
def add_func(lhs, rhs):
return math_ops.add(lhs, rhs)
tf_graph_def = add_func.get_concrete_function().graph.as_graph_def()
mlir_tf = import_graphdef(
tf_graph_def,
"tf-standard-pipeline",
False,
input_names=["lhs", "rhs"],
input_data_types=["DT_FLOAT", "DT_FLOAT"],
input_data_shapes=["10,10", "10,10"],
output_names=["Add"])
# Check whether the mlir-function signature has the mentioned
# inputs and outputs.
self.assertRegex(
mlir_tf,
r"func @main\(%arg0: tensor<10x10xf32>, %arg1: tensor<10x10xf32>")
self.assertRegex(mlir_tf, r'inputs = "lhs,rhs"')
self.assertRegex(mlir_tf, r'outputs = "Add"')
# Same check with scalar input (empty input shape).
mlir_tf = import_graphdef(
tf_graph_def,
"tf-standard-pipeline",
False,
input_names=["lhs", "rhs"],
input_data_types=["DT_FLOAT", "DT_FLOAT"],
input_data_shapes=["", ""],
output_names=["Add"])
self.assertRegex(mlir_tf,
r"func @main\(%arg0: tensor<f32>, %arg1: tensor<f32>")
# Test invalid test cases where no. of input names is invalid/wrong.
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Length of input node array and data type doesn't match"):
import_graphdef(
tf_graph_def,
"tf-standard-pipeline",
False,
input_names=["lhs"],
input_data_types=["DT_FLOAT", "DT_FLOAT"],
input_data_shapes=["10,10", "10,10"],
output_names=["Add"])
# Test invalid test cases where the input shapes argument is wrong.
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Dimensions must be equal"):
import_graphdef(
tf_graph_def,
"tf-standard-pipeline",
False,
input_names=["lhs", "rhs"],
input_data_types=["DT_FLOAT", "DT_FLOAT"],
input_data_shapes=["10,11", "10,10"],
output_names=["Add"])
| MLIRGraphDefImportTest |
python | spack__spack | lib/spack/spack/test/cmd/uninstall.py | {
"start": 7303,
"end": 15146
} | class ____:
"""Tests an installation with two environments e1 and e2, which each have
shared package installations:
e1 has diamond-link-left -> diamond-link-bottom
e2 has diamond-link-right -> diamond-link-bottom
"""
env = SpackCommand("env")
add = SpackCommand("add")
concretize = SpackCommand("concretize")
find = SpackCommand("find")
@pytest.fixture(scope="function")
def environment_setup(self, mock_packages, mutable_database, install_mockery):
TestUninstallFromEnv.env("create", "e1")
e1 = spack.environment.read("e1")
with e1:
TestUninstallFromEnv.add("diamond-link-left")
TestUninstallFromEnv.add("diamond-link-bottom")
TestUninstallFromEnv.concretize()
install("--fake")
TestUninstallFromEnv.env("create", "e2")
e2 = spack.environment.read("e2")
with e2:
TestUninstallFromEnv.add("diamond-link-right")
TestUninstallFromEnv.add("diamond-link-bottom")
TestUninstallFromEnv.concretize()
install("--fake")
yield "environment_setup"
TestUninstallFromEnv.env("rm", "e1", "-y")
TestUninstallFromEnv.env("rm", "e2", "-y")
def test_basic_env_sanity(self, environment_setup):
for env_name in ["e1", "e2"]:
e = spack.environment.read(env_name)
with e:
for _, concretized_spec in e.concretized_specs():
assert concretized_spec.installed
def test_uninstall_force_dependency_shared_between_envs(self, environment_setup):
"""If you "spack uninstall -f --dependents diamond-link-bottom" from
e1, then all packages should be uninstalled (but not removed) from
both e1 and e2.
"""
e1 = spack.environment.read("e1")
with e1:
uninstall("-f", "-y", "--dependents", "diamond-link-bottom")
# The specs should still be in the environment, since
# --remove was not specified
assert set(root.name for (root, _) in e1.concretized_specs()) == set(
["diamond-link-left", "diamond-link-bottom"]
)
for _, concretized_spec in e1.concretized_specs():
assert not concretized_spec.installed
# Everything in e2 depended on diamond-link-bottom, so should also
# have been uninstalled. The roots should be unchanged though.
e2 = spack.environment.read("e2")
with e2:
assert set(root.name for (root, _) in e2.concretized_specs()) == set(
["diamond-link-right", "diamond-link-bottom"]
)
for _, concretized_spec in e2.concretized_specs():
assert not concretized_spec.installed
def test_uninstall_remove_dependency_shared_between_envs(self, environment_setup):
"""If you "spack uninstall --dependents --remove diamond-link-bottom" from
e1, then all packages are removed from e1 (it is now empty);
diamond-link-left is also uninstalled (since only e1 needs it) but
diamond-link-bottom is not uninstalled (since e2 needs it).
"""
e1 = spack.environment.read("e1")
with e1:
dtdiamondleft = next(
concrete
for (_, concrete) in e1.concretized_specs()
if concrete.name == "diamond-link-left"
)
output = uninstall("-y", "--dependents", "--remove", "diamond-link-bottom")
assert "The following specs will be removed but not uninstalled" in output
assert not list(e1.roots())
assert not dtdiamondleft.installed
# Since -f was not specified, all specs in e2 should still be installed
# (and e2 should be unchanged)
e2 = spack.environment.read("e2")
with e2:
assert set(root.name for (root, _) in e2.concretized_specs()) == set(
["diamond-link-right", "diamond-link-bottom"]
)
for _, concretized_spec in e2.concretized_specs():
assert concretized_spec.installed
def test_uninstall_dependency_shared_between_envs_fail(self, environment_setup):
"""If you "spack uninstall --dependents diamond-link-bottom" from
e1 (without --remove or -f), then this should fail (this is needed by
e2).
"""
e1 = spack.environment.read("e1")
with e1:
output = uninstall("-y", "--dependents", "diamond-link-bottom", fail_on_error=False)
assert "There are still dependents." in output
assert "use `spack env remove`" in output
# The environment should be unchanged and nothing should have been
# uninstalled
assert set(root.name for (root, _) in e1.concretized_specs()) == set(
["diamond-link-left", "diamond-link-bottom"]
)
for _, concretized_spec in e1.concretized_specs():
assert concretized_spec.installed
def test_uninstall_force_and_remove_dependency_shared_between_envs(self, environment_setup):
"""If you "spack uninstall -f --dependents --remove diamond-link-bottom" from
e1, then all packages should be uninstalled and removed from e1.
All packages will also be uninstalled from e2, but the roots will
remain unchanged.
"""
e1 = spack.environment.read("e1")
with e1:
dtdiamondleft = next(
concrete
for (_, concrete) in e1.concretized_specs()
if concrete.name == "diamond-link-left"
)
uninstall("-f", "-y", "--dependents", "--remove", "diamond-link-bottom")
assert not list(e1.roots())
assert not dtdiamondleft.installed
e2 = spack.environment.read("e2")
with e2:
assert set(root.name for (root, _) in e2.concretized_specs()) == set(
["diamond-link-right", "diamond-link-bottom"]
)
for _, concretized_spec in e2.concretized_specs():
assert not concretized_spec.installed
def test_uninstall_keep_dependents_dependency_shared_between_envs(self, environment_setup):
"""If you "spack uninstall -f --remove diamond-link-bottom" from
e1, then diamond-link-bottom should be uninstalled, which leaves
"dangling" references in both environments, since
diamond-link-left and diamond-link-right both need it.
"""
e1 = spack.environment.read("e1")
with e1:
dtdiamondleft = next(
concrete
for (_, concrete) in e1.concretized_specs()
if concrete.name == "diamond-link-left"
)
uninstall("-f", "-y", "--remove", "diamond-link-bottom")
# diamond-link-bottom was removed from the list of roots (note that
# it would still be installed since diamond-link-left depends on it)
assert set(x.name for x in e1.roots()) == set(["diamond-link-left"])
assert dtdiamondleft.installed
e2 = spack.environment.read("e2")
with e2:
assert set(root.name for (root, _) in e2.concretized_specs()) == set(
["diamond-link-right", "diamond-link-bottom"]
)
dtdiamondright = next(
concrete
for (_, concrete) in e2.concretized_specs()
if concrete.name == "diamond-link-right"
)
assert dtdiamondright.installed
dtdiamondbottom = next(
concrete
for (_, concrete) in e2.concretized_specs()
if concrete.name == "diamond-link-bottom"
)
assert not dtdiamondbottom.installed
| TestUninstallFromEnv |
python | milvus-io__pymilvus | pymilvus/client/abstract.py | {
"start": 6159,
"end": 7480
} | class ____:
def __init__(self, raw: Any):
self._raw = raw
self.name = None
self.description = None
self.type = None
self.params = {}
self.input_field_names = []
self.input_field_ids = []
self.output_field_names = []
self.output_field_ids = []
self.id = 0
self.__pack(self._raw)
def __pack(self, raw: Any):
self.name = raw.name
self.description = raw.description
self.id = raw.id
self.type = FunctionType(raw.type)
self.params = {}
for param in raw.params:
self.params[param.key] = param.value
self.input_field_names = raw.input_field_names
self.input_field_ids = raw.input_field_ids
self.output_field_names = raw.output_field_names
self.output_field_ids = raw.output_field_ids
def dict(self):
return {
"name": self.name,
"id": self.id,
"description": self.description,
"type": self.type,
"params": self.params,
"input_field_names": self.input_field_names,
"input_field_ids": self.input_field_ids,
"output_field_names": self.output_field_names,
"output_field_ids": self.output_field_ids,
}
| FunctionSchema |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_missouri_zip.py | {
"start": 747,
"end": 1751
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_missouri_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_missouri_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidMissouriZip |
python | ray-project__ray | python/ray/dashboard/modules/job/tests/test_cli.py | {
"start": 17533,
"end": 18048
} | class ____:
def test_address(self, mock_sdk_client):
_job_cli_group_test_address(mock_sdk_client, "delete", "fake_job_id")
def test_delete(self, mock_sdk_client):
runner = CliRunner()
mock_client_instance = mock_sdk_client.return_value
with set_env_var("RAY_ADDRESS", "env_addr"):
result = runner.invoke(job_cli_group, ["delete", "job_id"])
check_exit_code(result, 0)
mock_client_instance.delete_job.assert_called_with("job_id")
| TestDelete |
python | dagster-io__dagster | python_modules/dagster/dagster/components/testing/utils.py | {
"start": 4265,
"end": 14297
} | class ____:
"""A sandbox for testing components.
This sandbox provides a number of utilities for scaffolding, modifying, and loading components
from a temporary defs folder. This makes it easy to test components in isolation.
"""
project_root: Path
defs_folder_path: Path
project_name: str
@contextmanager
def build_component_tree(self) -> Iterator[ComponentTree]:
"""Builds a ComponentTree from this sandbox's defs folder."""
with alter_sys_path(to_add=[str(self.project_root / "src")], to_remove=[]):
module_path = f"{self.project_name}.defs"
try:
tree = ComponentTree(
defs_module=importlib.import_module(module_path),
project_root=self.project_root,
)
yield tree
finally:
modules_to_remove = [name for name in sys.modules if name.startswith(module_path)]
for name in modules_to_remove:
del sys.modules[name]
@contextmanager
def build_all_defs(self) -> Iterator[Definitions]:
"""Builds a Definitions object corresponding to all components in the sandbox.
This is equivalent to what would be loaded from a real Dagster project with this
sandbox's defs folder.
"""
with self.build_component_tree() as tree:
yield tree.build_defs()
@public
@contextmanager
def load_component_and_build_defs(
self, defs_path: Path
) -> Iterator[tuple[Component, Definitions]]:
"""Loads a Component object at the given path and builds the corresponding Definitions.
Args:
defs_path: The path to the component to load.
Returns:
A tuple of the Component and Definitions objects.
Example:
.. code-block:: python
with scaffold_defs_sandbox() as sandbox:
defs_path = sandbox.scaffold_component(component_cls=MyComponent)
with sandbox.load_component_and_build_defs(defs_path=defs_path) as (
component,
defs,
):
assert isinstance(component, MyComponent)
assert defs.get_asset_def("my_asset").key == AssetKey("my_asset")
"""
with self.build_component_tree() as tree:
component = tree.load_component(defs_path)
defs = tree.build_defs(defs_path)
yield component, defs
@public
def scaffold_component(
self,
component_cls: Any,
defs_path: Optional[Union[Path, str]] = None,
scaffold_params: Optional[dict[str, Any]] = None,
scaffold_format: ScaffoldFormatOptions = "yaml",
defs_yaml_contents: Optional[dict[str, Any]] = None,
) -> Path:
"""Scaffolds a component into the defs folder.
Args:
component_cls: The component class to scaffold.
defs_path: The path to the component. (defaults to a random name)
scaffold_params: The parameters to pass to the scaffolder.
scaffold_format: The format to use for scaffolding.
defs_yaml_contents: The body of the component to update the defs.yaml file with.
Returns:
The path to the component.
Example:
.. code-block:: python
with scaffold_defs_sandbox() as sandbox:
defs_path = sandbox.scaffold_component(component_cls=MyComponent)
assert (defs_path / "defs.yaml").exists()
"""
defs_path = self.defs_folder_path / (defs_path or random_importable_name())
typename = get_original_module_name(component_cls)
scaffold_object(
path=defs_path,
typename=typename,
json_params=json.dumps(scaffold_params) if scaffold_params else None,
scaffold_format=scaffold_format,
project_root=self.project_root,
)
if defs_yaml_contents:
(defs_path / "defs.yaml").write_text(yaml.safe_dump(defs_yaml_contents))
return defs_path
@contextmanager
def swap_defs_file(self, defs_path: Path, defs_yaml_contents: Optional[dict[str, Any]]):
check.invariant(
defs_path.suffix == ".yaml",
"Attributes are only supported for yaml components",
)
check.invariant(defs_path.exists(), "defs.yaml must exist")
# no need to override there is no component body
if defs_yaml_contents is None:
yield
return
temp_dir = Path(tempfile.mkdtemp())
temp_path = temp_dir / defs_path.name
try:
shutil.copy2(defs_path, temp_path)
defs_path.write_text(yaml.safe_dump(defs_yaml_contents))
yield
finally:
if temp_path.exists():
defs_path.unlink(missing_ok=True)
shutil.copy2(temp_path, defs_path)
shutil.rmtree(temp_dir)
@contextmanager
def activate_venv_for_project(self) -> Iterator[None]:
"""Activates the project's venv and ensures the src directory is in sys.path."""
injected_path = str(self.project_root / "src")
try:
sys.path.insert(1, injected_path)
venv_path = (self.project_root / ".venv").absolute()
with environ(
{
"VIRTUAL_ENV": str(venv_path),
"PATH": os.pathsep.join(
[
str(venv_path / ("Scripts" if sys.platform == "win32" else "bin")),
os.getenv("PATH", ""),
]
),
}
):
yield
finally:
sys.path.remove(injected_path)
@public
@contextmanager
def create_defs_folder_sandbox(
*,
project_name: Optional[str] = None,
) -> Iterator[DefsFolderSandbox]:
"""Create a lightweight sandbox to scaffold and instantiate components. Useful
for those authoring custom components.
This function creates a temporary project that mimics the ``defs`` folder portion
of a real Dagster project. It then yields a :py:class:`DefsFolderSandbox` object which can be used to
scaffold and load components.
:py:class:`DefsFolderSandbox` has a few properties useful for different types of tests:
* ``defs_folder_path``: The absolute path to the ``defs`` folder. The user can inspect and load files from scaffolded components, e.g. ``(defs_folder_path / "my_component" / "defs.yaml").exists()``
* ``project_name``: If not provided, a random name is generated.
Once the sandbox is created, you can load all definitions using the ``load`` method on :py:class:`DefsFolderSandbox`, or with the ``load_component_at_path`` method.
This sandbox does not provide complete environmental isolation, but does provide some isolation guarantees
to do its best to isolate the test from and restore the environment after the test.
* A file structure like this is created: ``<<temp folder>> / src / <<project_name>> / defs``
* ``<<temp folder>> / src`` is placed in ``sys.path`` during the loading process
* Any modules loaded during the process that descend from defs module are evicted from ``sys.modules`` on cleanup.
Args:
project_name: Optional name for the project (default: random name).
Returns:
Iterator[DefsFolderSandbox]: A context manager that yields a DefsFolderSandbox
Example:
.. code-block:: python
with create_defs_folder_sandbox() as sandbox:
defs_path = sandbox.scaffold_component(component_cls=MyComponent)
assert (defs_path / "defs.yaml").exists()
assert (defs_path / "my_component_config_file.yaml").exists() # produced by MyComponentScaffolder
with create_defs_folder_sandbox() as sandbox:
defs_path = sandbox.scaffold_component(
component_cls=MyComponent,
defs_yaml_contents={"type": "MyComponent", "attributes": {"asset_key": "my_asset"}},
)
with sandbox.load_component_and_build_defs(defs_path=defs_path) as (component, defs):
assert isinstance(component, MyComponent)
assert defs.get_asset_def("my_asset").key == AssetKey("my_asset")
"""
project_name = project_name or random_importable_name()
with tempfile.TemporaryDirectory() as project_root_str:
project_root = Path(project_root_str)
defs_folder_path = project_root / "src" / project_name / "defs"
defs_folder_path.mkdir(parents=True, exist_ok=True)
dg_toml_path = project_root / "dg.toml"
dg_toml_path.write_text(
textwrap.dedent(f"""
directory_type = "project"
[project]
root_module = "{project_name}"
registry_modules = ["dagster_dbt"]
""")
)
yield DefsFolderSandbox(
project_root=project_root,
defs_folder_path=defs_folder_path,
project_name=project_name,
)
def copy_code_to_file(fn: Callable, file_path: Path) -> None:
"""Takes a function and writes the body of the function to a file.
Args:
fn: The function to write to the file.
file_path: The path to the file to write the function to.
Example:
.. code-block:: python
def code_to_copy() -> None:
import dagster as dg
def execute_fn(context) -> dg.MaterializeResult:
return dg.MaterializeResult()
copy_code_to_file(code_to_copy, sandbox.defs_folder_path / "execute.py")
"""
source_code_text = inspect.getsource(fn)
source_code_text = "\n".join(source_code_text.split("\n")[1:])
dedented_source_code_text = textwrap.dedent(source_code_text)
file_path.write_text(dedented_source_code_text)
| DefsFolderSandbox |
python | numpy__numpy | numpy/_core/tests/test_scalar_ctors.py | {
"start": 2336,
"end": 2807
} | class ____:
def test_intp(self):
# Ticket #99
assert_equal(1024, np.intp(1024))
def test_uint64_from_negative(self):
with pytest.raises(OverflowError):
np.uint64(-2)
int_types = [np.byte, np.short, np.intc, np.long, np.longlong]
uint_types = [np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong]
float_types = [np.half, np.single, np.double, np.longdouble]
cfloat_types = [np.csingle, np.cdouble, np.clongdouble]
| TestFromInt |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/translate.py | {
"start": 6082,
"end": 12018
} | class ____(GoogleCloudBaseOperator):
"""
Translate text content of moderate amount, for larger volumes of text please use the TranslateTextBatchOperator.
Wraps the Google cloud Translate Text (Advanced) functionality.
See https://cloud.google.com/translate/docs/advanced/translating-text-v3
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:TranslateTextOperator`.
:param project_id: Optional. The ID of the Google Cloud project that the
service belongs to.
If not provided default project_id is used.
:param location: optional. The ID of the Google Cloud location that the
service belongs to. if not specified, 'global' is used.
Non-global location is required for requests using AutoML models or custom glossaries.
:param contents: Required. The sequence of content strings to be translated.
Limited to 1024 items with 30_000 codepoints total recommended.
:param mime_type: Optional. The format of the source text, If left blank,
the MIME type defaults to "text/html".
:param source_language_code: Optional. The ISO-639 language code of the
input text if known. If not specified, attempted to recognize automatically.
:param target_language_code: Required. The ISO-639 language code to use
for translation of the input text.
:param model: Optional. The ``model`` type requested for this translation.
If not provided, the default Google model (NMT) will be used.
The format depends on model type:
- AutoML Translation models:
``projects/{project-number-or-id}/locations/{location-id}/models/{model-id}``
- General (built-in) models:
``projects/{project-number-or-id}/locations/{location-id}/models/general/nmt``
- Translation LLM models:
``projects/{project-number-or-id}/locations/{location-id}/models/general/translation-llm``
For global (non-region) requests, use 'global' ``location-id``.
:param glossary_config: Optional. Glossary to be applied.
:param transliteration_config: Optional. Transliteration to be applied.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"contents",
"target_language_code",
"mime_type",
"source_language_code",
"model",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
contents: Sequence[str],
source_language_code: str | None = None,
target_language_code: str,
mime_type: str | None = None,
location: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
model: str | None = None,
transliteration_config: TransliterationConfig | None = None,
glossary_config: TranslateTextGlossaryConfig | None = None,
labels: str | None = None,
timeout: float | _MethodDefault = DEFAULT,
retry: Retry | _MethodDefault | None = DEFAULT,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.contents = contents
self.source_language_code = source_language_code
self.target_language_code = target_language_code
self.mime_type = mime_type
self.location = location
self.labels = labels
self.model = model
self.transliteration_config = transliteration_config
self.glossary_config = glossary_config
self.metadate = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = TranslateHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Starting the text translation run")
translation_result = hook.translate_text(
contents=self.contents,
source_language_code=self.source_language_code,
target_language_code=self.target_language_code,
mime_type=self.mime_type,
location=self.location,
labels=self.labels,
model=self.model,
transliteration_config=self.transliteration_config,
glossary_config=self.glossary_config,
timeout=self.timeout,
retry=self.retry,
metadata=self.metadate,
)
self.log.info("Text translation run complete")
return translation_result
except GoogleAPICallError as e:
self.log.error("An error occurred executing translate_text method: \n%s", e)
raise AirflowException(e)
| TranslateTextOperator |
python | django__django | tests/admin_views/test_nav_sidebar.py | {
"start": 313,
"end": 814
} | class ____(admin.AdminSite):
enable_nav_sidebar = False
site_with_sidebar = AdminSiteWithSidebar(name="test_with_sidebar")
site_without_sidebar = AdminSiteWithoutSidebar(name="test_without_sidebar")
site_with_sidebar.register(User)
site_with_sidebar.register(Héllo)
urlpatterns = [
path("test_sidebar/admin/", site_with_sidebar.urls),
path("test_wihout_sidebar/admin/", site_without_sidebar.urls),
]
@override_settings(ROOT_URLCONF="admin_views.test_nav_sidebar")
| AdminSiteWithoutSidebar |
python | django__django | tests/check_framework/template_test_apps/different_tags_app/apps.py | {
"start": 36,
"end": 147
} | class ____(AppConfig):
name = "check_framework.template_test_apps.different_tags_app"
| DifferentTagsAppAppConfig |
python | walkccc__LeetCode | solutions/420. Strong Password Checker/420.py | {
"start": 0,
"end": 1638
} | class ____:
def strongPasswordChecker(self, password: str) -> int:
n = len(password)
missing = self._getMissing(password)
# the number of replacements to deal with 3 repeating characters
replaces = 0
# the number of sequences that can be substituted with 1 deletions,
# (3k)-seqs
oneSeq = 0
# the number of sequences that can be substituted with 2 deletions,
# (3k + 1)-seqs
twoSeq = 0
i = 2
while i < n:
if password[i] == password[i - 1] and password[i - 1] == password[i - 2]:
length = 2 # the length of the repeating password
while i < n and password[i] == password[i - 1]:
length += 1
i += 1
replaces += length // 3 # 'aaaaaaa' -> 'aaxaaxa'
if length % 3 == 0:
oneSeq += 1
if length % 3 == 1:
twoSeq += 1
else:
i += 1
if n < 6:
return max(6 - n, missing)
if n <= 20:
return max(replaces, missing)
deletes = n - 20
# Each replacement in (3k)-seqs can be substituted with 1 deletions.
replaces -= min(oneSeq, deletes)
# Each replacement in (3k + 1)-seqs can be substituted with 2 deletions.
replaces -= min(max(deletes - oneSeq, 0), twoSeq * 2) // 2
# Each replacement in other seqs can be substituted with 3 deletions.
replaces -= max(deletes - oneSeq - twoSeq * 2, 0) // 3
return deletes + max(replaces, missing)
def _getMissing(self, password: str) -> int:
return (3
- any(c.isupper() for c in password)
- any(c.islower() for c in password)
- any(c.isdigit() for c in password))
| Solution |
python | catalyst-team__catalyst | catalyst/core/callback.py | {
"start": 4254,
"end": 4382
} | class ____(IMetricCallback):
"""Criterion callback interface, abstraction over criterion step."""
pass
| ICriterionCallback |
python | numba__numba | numba/core/errors.py | {
"start": 19472,
"end": 20025
} | class ____(TypingError):
def __init__(self, value, attr, loc=None):
module = getattr(value, 'pymod', None)
if module is not None and module == np:
# unsupported numpy feature.
msg = ("Use of unsupported NumPy function 'numpy.%s' "
"or unsupported use of the function.") % attr
else:
msg = "Unknown attribute '{attr}' of type {type}"
msg = msg.format(type=value, attr=attr)
super(UntypedAttributeError, self).__init__(msg, loc=loc)
| UntypedAttributeError |
python | pypa__pip | src/pip/_vendor/rich/console.py | {
"start": 9274,
"end": 10224
} | class ____:
"""Context manager to capture the result of printing to the console.
See :meth:`~rich.console.Console.capture` for how to use.
Args:
console (Console): A console instance to capture output.
"""
def __init__(self, console: "Console") -> None:
self._console = console
self._result: Optional[str] = None
def __enter__(self) -> "Capture":
self._console.begin_capture()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self._result = self._console.end_capture()
def get(self) -> str:
"""Get the result of the capture."""
if self._result is None:
raise CaptureError(
"Capture result is not available until context manager exits."
)
return self._result
| Capture |
python | getsentry__sentry | src/sentry/replays/usecases/ingest/__init__.py | {
"start": 2029,
"end": 2108
} | class ____(msgspec.Struct, gc=False, tag_field="type", tag=4):
pass
| MetaEvent |
python | pydantic__pydantic | tests/test_validate_call.py | {
"start": 38025,
"end": 38495
} | class ____(BaseModel):
z: int
M = M0
def test_uses_local_ns():
class M1(BaseModel):
y: int
M = M1 # noqa: F841
def foo():
class M2(BaseModel):
z: int
M = M2 # noqa: F841
@validate_call(validate_return=True)
def bar(m: 'M') -> 'M':
return m
assert bar({'z': 1}) == M2(z=1)
foo()
# The class needs to be defined at the module level
# For 'DeferBuildClass' to resolve:
| M0 |
python | bokeh__bokeh | src/bokeh/core/property/json.py | {
"start": 1276,
"end": 2588
} | class ____(String):
""" Accept JSON string values.
The value is transmitted and received by BokehJS as a *string*
containing JSON content. i.e., you must use ``JSON.parse`` to unpack
the value into a JavaScript hash.
Args:
default (string, optional) :
A default value for attributes created from this property to
have.
help (str or None, optional) :
A documentation string for this property. (default: None)
"""
def validate(self, value: Any, detail: bool = True) -> None:
super().validate(value, detail)
try:
import json
json.loads(value)
except ValueError:
msg = "" if not detail else f"expected JSON text, got {value!r}"
raise ValueError(msg)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| JSON |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_comprehend.py | {
"start": 986,
"end": 6467
} | class ____:
@pytest.mark.parametrize(
("test_hook", "service_name"),
[pytest.param(ComprehendHook(), "comprehend", id="comprehend")],
)
def test_comprehend_hook(self, test_hook, service_name):
comprehend_hook = ComprehendHook()
assert comprehend_hook.conn is not None
@mock.patch.object(ComprehendHook, "conn")
def test_validate_document_classifier_training_status_for_success(self, mock_conn):
mock_conn.describe_document_classifier.return_value = {
"DocumentClassifierProperties": {
"DocumentClassifierArn": "arn:aws:comprehend:us-east-1:123456789012:document-classifier/docs-clasf/version/v1",
"LanguageCode": "en",
"Status": "TRAINED",
"Message": "",
"SubmitTime": "2024-06-08T21:41:36.867Z",
"EndTime": "2024-06-08T21:52:02.249Z",
"TrainingStartTime": "2024-06-08T21:44:39.596Z",
"TrainingEndTime": "2024-06-08T21:51:00.021Z",
"InputDataConfig": {
"DataFormat": "COMPREHEND_CSV",
"S3Uri": "s3://test/native-doc.csv",
"DocumentType": "SEMI_STRUCTURED_DOCUMENT",
"Documents": {"S3Uri": "s3://test/input-docs/"},
"DocumentReaderConfig": {
"DocumentReadAction": "TEXTRACT_DETECT_DOCUMENT_TEXT",
"DocumentReadMode": "SERVICE_DEFAULT",
},
},
"OutputDataConfig": {
"S3Uri": "s3://test/training_output/183167903796-CLR-b56fd4cf0b5bcc11c9409dfb431cd585/output/output.tar.gz"
},
"ClassifierMetadata": {
"NumberOfLabels": 2,
"NumberOfTrainedDocuments": 16,
"NumberOfTestDocuments": 2,
"EvaluationMetrics": {
"Accuracy": 1,
"Precision": 1,
"Recall": 1,
"F1Score": 1,
"MicroPrecision": 1,
"MicroRecall": 1,
"MicroF1Score": 1,
"HammingLoss": 0,
},
},
"DataAccessRoleArn": "arn:aws:iam::123456789012:role/ComprehendExecutionRole",
"Mode": "MULTI_CLASS",
"VersionName": "v1",
}
}
classifier_arn = "arn:aws:comprehend:us-east-1:123456789012:document-classifier/docs-clasf/version/v1"
ComprehendHook().validate_document_classifier_training_status(document_classifier_arn=classifier_arn)
@mock.patch.object(ComprehendHook, "conn")
def test_validate_document_classifier_training_status_for_warning(self, mock_conn):
mock_conn.describe_document_classifier.return_value = {
"DocumentClassifierProperties": {
"DocumentClassifierArn": "arn:aws:comprehend:us-east-1:123456789012:document-classifier/docs-clasf/version/v1",
"LanguageCode": "en",
"Status": "TRAINED_WITH_WARNING",
"Message": "Unable to parse some documents. See details in the output S3 location. These documents have been skipped.",
"SubmitTime": "2024-06-08T21:41:36.867Z",
"EndTime": "2024-06-08T21:52:02.249Z",
"TrainingStartTime": "2024-06-08T21:44:39.596Z",
"TrainingEndTime": "2024-06-08T21:51:00.021Z",
"InputDataConfig": {
"DataFormat": "COMPREHEND_CSV",
"S3Uri": "s3://test/native-doc.csv",
"DocumentType": "SEMI_STRUCTURED_DOCUMENT",
"Documents": {"S3Uri": "s3://test/input-docs/"},
"DocumentReaderConfig": {
"DocumentReadAction": "TEXTRACT_DETECT_DOCUMENT_TEXT",
"DocumentReadMode": "SERVICE_DEFAULT",
},
},
"OutputDataConfig": {
"S3Uri": "s3://test/training_output/183167903796-CLR-b56fd4cf0b5bcc11c9409dfb431cd585/output/output.tar.gz"
},
"ClassifierMetadata": {
"NumberOfLabels": 2,
"NumberOfTrainedDocuments": 16,
"NumberOfTestDocuments": 2,
"EvaluationMetrics": {
"Accuracy": 1,
"Precision": 1,
"Recall": 1,
"F1Score": 1,
"MicroPrecision": 1,
"MicroRecall": 1,
"MicroF1Score": 1,
"HammingLoss": 0,
},
},
"DataAccessRoleArn": "arn:aws:iam::123456789012:role/ComprehendExecutionRole",
"Mode": "MULTI_CLASS",
"VersionName": "v1",
}
}
classifier_arn = "arn:aws:comprehend:us-east-1:123456789012:document-classifier/docs-clasf/version/v1"
with pytest.raises(
AirflowException, match="Warnings in AWS Comprehend document classifier training."
):
ComprehendHook().validate_document_classifier_training_status(
document_classifier_arn=classifier_arn, fail_on_warnings=True
)
| TestComprehendHook |
python | ray-project__ray | rllib/utils/spaces/space_utils.py | {
"start": 195,
"end": 17983
} | class ____(np.ndarray):
"""A ndarray-wrapper the usage of which indicates that there a batch dim exists.
This is such that our `batch()` utility can distinguish between having to
stack n individual batch items (each one w/o any batch dim) vs having to
concatenate n already batched items (each one possibly with a different batch
dim, but definitely with some batch dim).
TODO (sven): Maybe replace this by a list-override instead.
"""
def __new__(cls, input_array):
# Use __new__ to create a new instance of our subclass.
obj = np.asarray(input_array).view(cls)
return obj
@DeveloperAPI
def get_original_space(space: gym.Space) -> gym.Space:
"""Returns the original space of a space, if any.
This function recursively traverses the given space and returns the original space
at the very end of the chain.
Args:
space: The space to get the original space for.
Returns:
The original space or the given space itself if no original space is found.
"""
if hasattr(space, "original_space"):
return get_original_space(space.original_space)
else:
return space
@DeveloperAPI
def is_composite_space(space: gym.Space) -> bool:
"""Returns true, if the space is composite.
Note, we follow here the glossary of `gymnasium` by which any spoace
that holds other spaces is defined as being 'composite'.
Args:
space: The space to be checked for being composed of other spaces.
Returns:
True, if the space is composed of other spaces, otherwise False.
"""
if type(space) in [
gym.spaces.Dict,
gym.spaces.Graph,
gym.spaces.Sequence,
gym.spaces.Tuple,
]:
return True
else:
return False
@DeveloperAPI
def flatten_space(space: gym.Space) -> List[gym.Space]:
"""Flattens a gym.Space into its primitive components.
Primitive components are any non Tuple/Dict spaces.
Args:
space: The gym.Space to flatten. This may be any
supported type (including nested Tuples and Dicts).
Returns:
List[gym.Space]: The flattened list of primitive Spaces. This list
does not contain Tuples or Dicts anymore.
"""
def _helper_flatten(space_, return_list):
from ray.rllib.utils.spaces.flexdict import FlexDict
if isinstance(space_, gym.spaces.Tuple):
for s in space_:
_helper_flatten(s, return_list)
elif isinstance(space_, (gym.spaces.Dict, FlexDict)):
for k in sorted(space_.spaces):
_helper_flatten(space_[k], return_list)
else:
return_list.append(space_)
ret = []
_helper_flatten(space, ret)
return ret
@DeveloperAPI
def get_base_struct_from_space(space):
"""Returns a Tuple/Dict Space as native (equally structured) py tuple/dict.
Args:
space: The Space to get the python struct for.
Returns:
Union[dict,tuple,gym.Space]: The struct equivalent to the given Space.
Note that the returned struct still contains all original
"primitive" Spaces (e.g. Box, Discrete).
.. testcode::
:skipif: True
get_base_struct_from_space(Dict({
"a": Box(),
"b": Tuple([Discrete(2), Discrete(3)])
}))
.. testoutput::
dict(a=Box(), b=tuple(Discrete(2), Discrete(3)))
"""
def _helper_struct(space_):
if isinstance(space_, gym.spaces.Tuple):
return tuple(_helper_struct(s) for s in space_)
elif isinstance(space_, gym.spaces.Dict):
return {k: _helper_struct(space_[k]) for k in space_.spaces}
else:
return space_
return _helper_struct(space)
@DeveloperAPI
def get_dummy_batch_for_space(
space: gym.Space,
batch_size: int = 32,
*,
fill_value: Union[float, int, str] = 0.0,
time_size: Optional[int] = None,
time_major: bool = False,
one_hot_discrete: bool = False,
) -> np.ndarray:
"""Returns batched dummy data (using `batch_size`) for the given `space`.
Note: The returned batch will not pass a `space.contains(batch)` test
as an additional batch dimension has to be added at axis 0, unless `batch_size` is
set to 0.
Args:
space: The space to get a dummy batch for.
batch_size: The required batch size (B). Note that this can also
be 0 (only if `time_size` is None!), which will result in a
non-batched sample for the given space (no batch dim).
fill_value: The value to fill the batch with
or "random" for random values.
time_size: If not None, add an optional time axis
of `time_size` size to the returned batch. This time axis might either
be inserted at axis=1 (default) or axis=0, if `time_major` is True.
time_major: If True AND `time_size` is not None, return batch
as shape [T x B x ...], otherwise as [B x T x ...]. If `time_size`
if None, ignore this setting and return [B x ...].
one_hot_discrete: If True, will return one-hot vectors (instead of
int-values) for those sub-components of a (possibly complex) `space`
that are Discrete or MultiDiscrete. Note that in case `fill_value` is 0.0,
this will result in zero-hot vectors (where all slots have a value of 0.0).
Returns:
The dummy batch of size `bqtch_size` matching the given space.
"""
# Complex spaces. Perform recursive calls of this function.
if isinstance(space, (gym.spaces.Dict, gym.spaces.Tuple, dict, tuple)):
base_struct = space
if isinstance(space, (gym.spaces.Dict, gym.spaces.Tuple)):
base_struct = get_base_struct_from_space(space)
return tree.map_structure(
lambda s: get_dummy_batch_for_space(
space=s,
batch_size=batch_size,
fill_value=fill_value,
time_size=time_size,
time_major=time_major,
one_hot_discrete=one_hot_discrete,
),
base_struct,
)
if one_hot_discrete:
if isinstance(space, gym.spaces.Discrete):
space = gym.spaces.Box(0.0, 1.0, (space.n,), np.float32)
elif isinstance(space, gym.spaces.MultiDiscrete):
space = gym.spaces.Box(0.0, 1.0, (np.sum(space.nvec),), np.float32)
# Primitive spaces: Box, Discrete, MultiDiscrete.
# Random values: Use gym's sample() method.
if fill_value == "random":
if time_size is not None:
assert batch_size > 0 and time_size > 0
if time_major:
return np.array(
[
[space.sample() for _ in range(batch_size)]
for t in range(time_size)
],
dtype=space.dtype,
)
else:
return np.array(
[
[space.sample() for t in range(time_size)]
for _ in range(batch_size)
],
dtype=space.dtype,
)
else:
return np.array(
[space.sample() for _ in range(batch_size)]
if batch_size > 0
else space.sample(),
dtype=space.dtype,
)
# Fill value given: Use np.full.
else:
if time_size is not None:
assert batch_size > 0 and time_size > 0
if time_major:
shape = [time_size, batch_size]
else:
shape = [batch_size, time_size]
else:
shape = [batch_size] if batch_size > 0 else []
return np.full(
shape + list(space.shape), fill_value=fill_value, dtype=space.dtype
)
@DeveloperAPI
def flatten_to_single_ndarray(input_):
"""Returns a single np.ndarray given a list/tuple of np.ndarrays.
Args:
input_ (Union[List[np.ndarray], np.ndarray]): The list of ndarrays or
a single ndarray.
Returns:
np.ndarray: The result after concatenating all single arrays in input_.
.. testcode::
:skipif: True
flatten_to_single_ndarray([
np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
np.array([7, 8, 9]),
])
.. testoutput::
np.array([
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0
])
"""
# Concatenate complex inputs.
if isinstance(input_, (list, tuple, dict)):
expanded = []
for in_ in tree.flatten(input_):
expanded.append(np.reshape(in_, [-1]))
input_ = np.concatenate(expanded, axis=0).flatten()
return input_
@DeveloperAPI
def batch(
list_of_structs: List[Any],
*,
individual_items_already_have_batch_dim: Union[bool, str] = False,
):
"""Converts input from a list of (nested) structs to a (nested) struct of batches.
Input: List of structs (each of these structs representing a single batch item).
[
{"a": 1, "b": (4, 7.0)}, <- batch item 1
{"a": 2, "b": (5, 8.0)}, <- batch item 2
{"a": 3, "b": (6, 9.0)}, <- batch item 3
]
Output: Struct of different batches (each batch has size=3 b/c there were 3 items
in the original list):
{
"a": np.array([1, 2, 3]),
"b": (np.array([4, 5, 6]), np.array([7.0, 8.0, 9.0]))
}
Args:
list_of_structs: The list of (possibly nested) structs. Each item
in this list represents a single batch item.
individual_items_already_have_batch_dim: True, if the individual items in
`list_of_structs` already have a batch dim. In this case, we will
concatenate (instead of stack) at the end. In the example above, this would
look like this: Input: [{"a": [1], "b": ([4], [7.0])}, ...] -> Output: same
as in above example.
If the special value "auto" is used,
Returns:
The struct of component batches. Each leaf item in this struct represents the
batch for a single component (in case struct is tuple/dict). If the input is a
simple list of primitive items, e.g. a list of floats, a np.array of floats
will be returned.
"""
if not list_of_structs:
raise ValueError("Input `list_of_structs` does not contain any items.")
# TODO (sven): Maybe replace this by a list-override (usage of which indicated
# this method that concatenate should be used (not stack)).
if individual_items_already_have_batch_dim == "auto":
flat = tree.flatten(list_of_structs[0])
individual_items_already_have_batch_dim = isinstance(flat[0], BatchedNdArray)
np_func = np.concatenate if individual_items_already_have_batch_dim else np.stack
ret = tree.map_structure(
lambda *s: np.ascontiguousarray(np_func(s, axis=0)), *list_of_structs
)
return ret
@DeveloperAPI
def unbatch(batches_struct):
"""Converts input from (nested) struct of batches to batch of structs.
Input: Struct of different batches (each batch has size=3):
{
"a": np.array([1, 2, 3]),
"b": (np.array([4, 5, 6]), np.array([7.0, 8.0, 9.0]))
}
Output: Batch (list) of structs (each of these structs representing a
single action):
[
{"a": 1, "b": (4, 7.0)}, <- action 1
{"a": 2, "b": (5, 8.0)}, <- action 2
{"a": 3, "b": (6, 9.0)}, <- action 3
]
Args:
batches_struct: The struct of component batches. Each leaf item
in this struct represents the batch for a single component
(in case struct is tuple/dict).
Alternatively, `batches_struct` may also simply be a batch of
primitives (non tuple/dict).
Returns:
The list of individual structs. Each item in the returned list represents a
single (maybe complex) batch item.
"""
flat_batches = tree.flatten(batches_struct)
out = []
for batch_pos in range(len(flat_batches[0])):
out.append(
tree.unflatten_as(
batches_struct,
[flat_batches[i][batch_pos] for i in range(len(flat_batches))],
)
)
return out
@DeveloperAPI
def clip_action(action, action_space):
"""Clips all components in `action` according to the given Space.
Only applies to Box components within the action space.
Args:
action: The action to be clipped. This could be any complex
action, e.g. a dict or tuple.
action_space: The action space struct,
e.g. `{"a": Distrete(2)}` for a space: Dict({"a": Discrete(2)}).
Returns:
Any: The input action, but clipped by value according to the space's
bounds.
"""
def map_(a, s):
if isinstance(s, gym.spaces.Box):
a = np.clip(a, s.low, s.high)
return a
return tree.map_structure(map_, action, action_space)
@DeveloperAPI
def unsquash_action(action, action_space_struct):
"""Unsquashes all components in `action` according to the given Space.
Inverse of `normalize_action()`. Useful for mapping policy action
outputs (normalized between -1.0 and 1.0) to an env's action space.
Unsquashing results in cont. action component values between the
given Space's bounds (`low` and `high`). This only applies to Box
components within the action space, whose dtype is float32 or float64.
Args:
action: The action to be unsquashed. This could be any complex
action, e.g. a dict or tuple.
action_space_struct: The action space struct,
e.g. `{"a": Box()}` for a space: Dict({"a": Box()}).
Returns:
Any: The input action, but unsquashed, according to the space's
bounds. An unsquashed action is ready to be sent to the
environment (`BaseEnv.send_actions([unsquashed actions])`).
"""
def map_(a, s):
if (
isinstance(s, gym.spaces.Box)
and np.all(s.bounded_below)
and np.all(s.bounded_above)
):
if s.dtype == np.float32 or s.dtype == np.float64:
# Assuming values are roughly between -1.0 and 1.0 ->
# unsquash them to the given bounds.
a = s.low + (a + 1.0) * (s.high - s.low) / 2.0
# Clip to given bounds, just in case the squashed values were
# outside [-1.0, 1.0].
a = np.clip(a, s.low, s.high)
elif np.issubdtype(s.dtype, np.integer):
# For Categorical and MultiCategorical actions, shift the selection
# into the proper range.
a = s.low + a
return a
return tree.map_structure(map_, action, action_space_struct)
@DeveloperAPI
def normalize_action(action, action_space_struct):
"""Normalizes all (Box) components in `action` to be in [-1.0, 1.0].
Inverse of `unsquash_action()`. Useful for mapping an env's action
(arbitrary bounded values) to a [-1.0, 1.0] interval.
This only applies to Box components within the action space, whose
dtype is float32 or float64.
Args:
action: The action to be normalized. This could be any complex
action, e.g. a dict or tuple.
action_space_struct: The action space struct,
e.g. `{"a": Box()}` for a space: Dict({"a": Box()}).
Returns:
Any: The input action, but normalized, according to the space's
bounds.
"""
def map_(a, s):
if isinstance(s, gym.spaces.Box) and (
s.dtype == np.float32 or s.dtype == np.float64
):
# Normalize values to be exactly between -1.0 and 1.0.
a = ((a - s.low) * 2.0) / (s.high - s.low) - 1.0
return a
return tree.map_structure(map_, action, action_space_struct)
@DeveloperAPI
def convert_element_to_space_type(element: Any, sampled_element: Any) -> Any:
"""Convert all the components of the element to match the space dtypes.
Args:
element: The element to be converted.
sampled_element: An element sampled from a space to be matched
to.
Returns:
The input element, but with all its components converted to match
the space dtypes.
"""
def map_(elem, s):
if isinstance(s, np.ndarray):
if not isinstance(elem, np.ndarray):
assert isinstance(
elem, (float, int)
), f"ERROR: `elem` ({elem}) must be np.array, float or int!"
if s.shape == ():
elem = np.array(elem, dtype=s.dtype)
else:
raise ValueError(
"Element should be of type np.ndarray but is instead of \
type {}".format(
type(elem)
)
)
elif s.dtype != elem.dtype:
elem = elem.astype(s.dtype)
# Gymnasium now uses np.int_64 as the dtype of a Discrete action space
elif isinstance(s, int) or isinstance(s, np.int_):
if isinstance(elem, float) and elem.is_integer():
elem = int(elem)
# Note: This does not check if the float element is actually an integer
if isinstance(elem, np.float_):
elem = np.int64(elem)
return elem
return tree.map_structure(map_, element, sampled_element, check_types=False)
| BatchedNdArray |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_django/DJ001.py | {
"start": 137,
"end": 543
} | class ____(models.Model):
charfield = models.CharField(max_length=255, null=True)
textfield = models.TextField(max_length=255, null=True)
slugfield = models.SlugField(max_length=255, null=True)
emailfield = models.EmailField(max_length=255, null=True)
filepathfield = models.FilePathField(max_length=255, null=True)
urlfield = models.URLField(max_length=255, null=True)
| IncorrectModel |
python | kubernetes-client__python | kubernetes/client/models/v1beta2_resource_claim_status.py | {
"start": 383,
"end": 7596
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'allocation': 'V1beta2AllocationResult',
'devices': 'list[V1beta2AllocatedDeviceStatus]',
'reserved_for': 'list[V1beta2ResourceClaimConsumerReference]'
}
attribute_map = {
'allocation': 'allocation',
'devices': 'devices',
'reserved_for': 'reservedFor'
}
def __init__(self, allocation=None, devices=None, reserved_for=None, local_vars_configuration=None): # noqa: E501
"""V1beta2ResourceClaimStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._allocation = None
self._devices = None
self._reserved_for = None
self.discriminator = None
if allocation is not None:
self.allocation = allocation
if devices is not None:
self.devices = devices
if reserved_for is not None:
self.reserved_for = reserved_for
@property
def allocation(self):
"""Gets the allocation of this V1beta2ResourceClaimStatus. # noqa: E501
:return: The allocation of this V1beta2ResourceClaimStatus. # noqa: E501
:rtype: V1beta2AllocationResult
"""
return self._allocation
@allocation.setter
def allocation(self, allocation):
"""Sets the allocation of this V1beta2ResourceClaimStatus.
:param allocation: The allocation of this V1beta2ResourceClaimStatus. # noqa: E501
:type: V1beta2AllocationResult
"""
self._allocation = allocation
@property
def devices(self):
"""Gets the devices of this V1beta2ResourceClaimStatus. # noqa: E501
Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers. # noqa: E501
:return: The devices of this V1beta2ResourceClaimStatus. # noqa: E501
:rtype: list[V1beta2AllocatedDeviceStatus]
"""
return self._devices
@devices.setter
def devices(self, devices):
"""Sets the devices of this V1beta2ResourceClaimStatus.
Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers. # noqa: E501
:param devices: The devices of this V1beta2ResourceClaimStatus. # noqa: E501
:type: list[V1beta2AllocatedDeviceStatus]
"""
self._devices = devices
@property
def reserved_for(self):
"""Gets the reserved_for of this V1beta2ResourceClaimStatus. # noqa: E501
ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 256 such reservations. This may get increased in the future, but not reduced. # noqa: E501
:return: The reserved_for of this V1beta2ResourceClaimStatus. # noqa: E501
:rtype: list[V1beta2ResourceClaimConsumerReference]
"""
return self._reserved_for
@reserved_for.setter
def reserved_for(self, reserved_for):
"""Sets the reserved_for of this V1beta2ResourceClaimStatus.
ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 256 such reservations. This may get increased in the future, but not reduced. # noqa: E501
:param reserved_for: The reserved_for of this V1beta2ResourceClaimStatus. # noqa: E501
:type: list[V1beta2ResourceClaimConsumerReference]
"""
self._reserved_for = reserved_for
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2ResourceClaimStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2ResourceClaimStatus):
return True
return self.to_dict() != other.to_dict()
| V1beta2ResourceClaimStatus |
python | pypa__pip | tests/lib/__init__.py | {
"start": 42822,
"end": 43414
} | class ____:
def pip(self, *args: str | pathlib.Path) -> InMemoryPipResult:
orig_stdout = sys.stdout
stdout = StringIO()
sys.stdout = stdout
try:
returncode = pip_entry_point([os.fspath(a) for a in args])
except SystemExit as e:
if isinstance(e.code, int):
returncode = e.code
elif e.code:
returncode = 1
else:
returncode = 0
finally:
sys.stdout = orig_stdout
return InMemoryPipResult(returncode, stdout.getvalue())
| InMemoryPip |
python | spack__spack | lib/spack/spack/modules/tcl.py | {
"start": 2370,
"end": 2615
} | class ____(BaseModuleFileWriter):
"""Writer class for tcl module files."""
default_template = "modules/modulefile.tcl"
modulerc_header = ["#%Module4.7"]
hide_cmd_format = "module-hide --soft --hidden-loaded %s"
| TclModulefileWriter |
python | dask__dask | dask/dataframe/tseries/resample.py | {
"start": 6542,
"end": 6604
} | class ____(ResampleReduction):
how = "median"
| ResampleMedian |
python | walkccc__LeetCode | solutions/2646. Minimize the Total Price of the Trips/2646.py | {
"start": 0,
"end": 1513
} | class ____:
def minimumTotalPrice(self, n: int, edges: list[list[int]], price: list[int],
trips: list[list[int]]) -> int:
graph = [[] for _ in range(n)]
for u, v in edges:
graph[u].append(v)
graph[v].append(u)
# count[i] := the number of times i is traversed
count = [0] * n
def dfsCount(u: int, prev: int, end: int, path: list[int]) -> None:
path.append(u)
if u == end:
for i in path:
count[i] += 1
return
for v in graph[u]:
if v != prev:
dfsCount(v, u, end, path)
path.pop()
for start, end in trips:
dfsCount(start, -1, end, [])
@functools.lru_cache(None)
def dfs(u: int, prev: int, parentHalved: bool) -> int:
"""
Returns the minimum price sum for the i-th node, where its parent is
halved parent or not halved not.
"""
sumWithFullNode = price[u] * count[u] + sum(dfs(v, u, False)
for v in graph[u]
if v != prev)
if parentHalved: # Can't halve this node if its parent was halved.
return sumWithFullNode
sumWithHalvedNode = (price[u] // 2) * count[u] + sum(dfs(v, u, True)
for v in graph[u]
if v != prev)
return min(sumWithFullNode, sumWithHalvedNode)
return dfs(0, -1, False)
| Solution |
python | pallets__werkzeug | src/werkzeug/wrappers/response.py | {
"start": 1044,
"end": 31166
} | class ____(_SansIOResponse):
"""Represents an outgoing WSGI HTTP response with body, status, and
headers. Has properties and methods for using the functionality
defined by various HTTP specs.
The response body is flexible to support different use cases. The
simple form is passing bytes, or a string which will be encoded as
UTF-8. Passing an iterable of bytes or strings makes this a
streaming response. A generator is particularly useful for building
a CSV file in memory or using SSE (Server Sent Events). A file-like
object is also iterable, although the
:func:`~werkzeug.utils.send_file` helper should be used in that
case.
The response object is itself a WSGI application callable. When
called (:meth:`__call__`) with ``environ`` and ``start_response``,
it will pass its status and headers to ``start_response`` then
return its body as an iterable.
.. code-block:: python
from werkzeug.wrappers.response import Response
def index():
return Response("Hello, World!")
def application(environ, start_response):
path = environ.get("PATH_INFO") or "/"
if path == "/":
response = index()
else:
response = Response("Not Found", status=404)
return response(environ, start_response)
:param response: The data for the body of the response. A string or
bytes, or tuple or list of strings or bytes, for a fixed-length
response, or any other iterable of strings or bytes for a
streaming response. Defaults to an empty body.
:param status: The status code for the response. Either an int, in
which case the default status message is added, or a string in
the form ``{code} {message}``, like ``404 Not Found``. Defaults
to 200.
:param headers: A :class:`~werkzeug.datastructures.Headers` object,
or a list of ``(key, value)`` tuples that will be converted to a
``Headers`` object.
:param mimetype: The mime type (content type without charset or
other parameters) of the response. If the value starts with
``text/`` (or matches some other special cases), the charset
will be added to create the ``content_type``.
:param content_type: The full content type of the response.
Overrides building the value from ``mimetype``.
:param direct_passthrough: Pass the response body directly through
as the WSGI iterable. This can be used when the body is a binary
file or other iterator of bytes, to skip some unnecessary
checks. Use :func:`~werkzeug.utils.send_file` instead of setting
this manually.
.. versionchanged:: 2.1
Old ``BaseResponse`` and mixin classes were removed.
.. versionchanged:: 2.0
Combine ``BaseResponse`` and mixins into a single ``Response``
class.
.. versionchanged:: 0.5
The ``direct_passthrough`` parameter was added.
"""
#: if set to `False` accessing properties on the response object will
#: not try to consume the response iterator and convert it into a list.
#:
#: .. versionadded:: 0.6.2
#:
#: That attribute was previously called `implicit_seqence_conversion`.
#: (Notice the typo). If you did use this feature, you have to adapt
#: your code to the name change.
implicit_sequence_conversion = True
#: If a redirect ``Location`` header is a relative URL, make it an
#: absolute URL, including scheme and domain.
#:
#: .. versionchanged:: 2.1
#: This is disabled by default, so responses will send relative
#: redirects.
#:
#: .. versionadded:: 0.8
autocorrect_location_header = False
#: Should this response object automatically set the content-length
#: header if possible? This is true by default.
#:
#: .. versionadded:: 0.8
automatically_set_content_length = True
#: The response body to send as the WSGI iterable. A list of strings
#: or bytes represents a fixed-length response, any other iterable
#: is a streaming response. Strings are encoded to bytes as UTF-8.
#:
#: Do not set to a plain string or bytes, that will cause sending
#: the response to be very inefficient as it will iterate one byte
#: at a time.
response: t.Iterable[str] | t.Iterable[bytes]
def __init__(
self,
response: t.Iterable[bytes] | bytes | t.Iterable[str] | str | None = None,
status: int | str | HTTPStatus | None = None,
headers: t.Mapping[str, str | t.Iterable[str]]
| t.Iterable[tuple[str, str]]
| None = None,
mimetype: str | None = None,
content_type: str | None = None,
direct_passthrough: bool = False,
) -> None:
super().__init__(
status=status,
headers=headers,
mimetype=mimetype,
content_type=content_type,
)
#: Pass the response body directly through as the WSGI iterable.
#: This can be used when the body is a binary file or other
#: iterator of bytes, to skip some unnecessary checks. Use
#: :func:`~werkzeug.utils.send_file` instead of setting this
#: manually.
self.direct_passthrough = direct_passthrough
self._on_close: list[t.Callable[[], t.Any]] = []
# we set the response after the headers so that if a class changes
# the charset attribute, the data is set in the correct charset.
if response is None:
self.response = []
elif isinstance(response, (str, bytes, bytearray)):
self.set_data(response)
else:
self.response = response
def call_on_close(self, func: t.Callable[[], t.Any]) -> t.Callable[[], t.Any]:
"""Adds a function to the internal list of functions that should
be called as part of closing down the response. Since 0.7 this
function also returns the function that was passed so that this
can be used as a decorator.
.. versionadded:: 0.6
"""
self._on_close.append(func)
return func
def __repr__(self) -> str:
if self.is_sequence:
body_info = f"{sum(map(len, self.iter_encoded()))} bytes"
else:
body_info = "streamed" if self.is_streamed else "likely-streamed"
return f"<{type(self).__name__} {body_info} [{self.status}]>"
@classmethod
def force_type(
cls, response: Response, environ: WSGIEnvironment | None = None
) -> Response:
"""Enforce that the WSGI response is a response object of the current
type. Werkzeug will use the :class:`Response` internally in many
situations like the exceptions. If you call :meth:`get_response` on an
exception you will get back a regular :class:`Response` object, even
if you are using a custom subclass.
This method can enforce a given response type, and it will also
convert arbitrary WSGI callables into response objects if an environ
is provided::
# convert a Werkzeug response object into an instance of the
# MyResponseClass subclass.
response = MyResponseClass.force_type(response)
# convert any WSGI application into a response object
response = MyResponseClass.force_type(response, environ)
This is especially useful if you want to post-process responses in
the main dispatcher and use functionality provided by your subclass.
Keep in mind that this will modify response objects in place if
possible!
:param response: a response object or wsgi application.
:param environ: a WSGI environment object.
:return: a response object.
"""
if not isinstance(response, Response):
if environ is None:
raise TypeError(
"cannot convert WSGI application into response"
" objects without an environ"
)
from ..test import run_wsgi_app
response = Response(*run_wsgi_app(response, environ))
response.__class__ = cls
return response
@classmethod
def from_app(
cls, app: WSGIApplication, environ: WSGIEnvironment, buffered: bool = False
) -> Response:
"""Create a new response object from an application output. This
works best if you pass it an application that returns a generator all
the time. Sometimes applications may use the `write()` callable
returned by the `start_response` function. This tries to resolve such
edge cases automatically. But if you don't get the expected output
you should set `buffered` to `True` which enforces buffering.
:param app: the WSGI application to execute.
:param environ: the WSGI environment to execute against.
:param buffered: set to `True` to enforce buffering.
:return: a response object.
"""
from ..test import run_wsgi_app
return cls(*run_wsgi_app(app, environ, buffered))
@t.overload
def get_data(self, as_text: t.Literal[False] = False) -> bytes: ...
@t.overload
def get_data(self, as_text: t.Literal[True]) -> str: ...
def get_data(self, as_text: bool = False) -> bytes | str:
"""The string representation of the response body. Whenever you call
this property the response iterable is encoded and flattened. This
can lead to unwanted behavior if you stream big data.
This behavior can be disabled by setting
:attr:`implicit_sequence_conversion` to `False`.
If `as_text` is set to `True` the return value will be a decoded
string.
.. versionadded:: 0.9
"""
self._ensure_sequence()
rv = b"".join(self.iter_encoded())
if as_text:
return rv.decode()
return rv
def set_data(self, value: bytes | str) -> None:
"""Sets a new string as response. The value must be a string or
bytes. If a string is set it's encoded to the charset of the
response (utf-8 by default).
.. versionadded:: 0.9
"""
if isinstance(value, str):
value = value.encode()
self.response = [value]
if self.automatically_set_content_length:
self.headers["Content-Length"] = str(len(value))
data = property(
get_data,
set_data,
doc="A descriptor that calls :meth:`get_data` and :meth:`set_data`.",
)
def calculate_content_length(self) -> int | None:
"""Returns the content length if available or `None` otherwise."""
try:
self._ensure_sequence()
except RuntimeError:
return None
return sum(len(x) for x in self.iter_encoded())
def _ensure_sequence(self, mutable: bool = False) -> None:
"""This method can be called by methods that need a sequence. If
`mutable` is true, it will also ensure that the response sequence
is a standard Python list.
.. versionadded:: 0.6
"""
if self.is_sequence:
# if we need a mutable object, we ensure it's a list.
if mutable and not isinstance(self.response, list):
self.response = list(self.response) # type: ignore
return
if self.direct_passthrough:
raise RuntimeError(
"Attempted implicit sequence conversion but the"
" response object is in direct passthrough mode."
)
if not self.implicit_sequence_conversion:
raise RuntimeError(
"The response object required the iterable to be a"
" sequence, but the implicit conversion was disabled."
" Call make_sequence() yourself."
)
self.make_sequence()
def make_sequence(self) -> None:
"""Converts the response iterator in a list. By default this happens
automatically if required. If `implicit_sequence_conversion` is
disabled, this method is not automatically called and some properties
might raise exceptions. This also encodes all the items.
.. versionadded:: 0.6
"""
if not self.is_sequence:
# if we consume an iterable we have to ensure that the close
# method of the iterable is called if available when we tear
# down the response
close = getattr(self.response, "close", None)
self.response = list(self.iter_encoded())
if close is not None:
self.call_on_close(close)
def iter_encoded(self) -> t.Iterator[bytes]:
"""Iter the response encoded with the encoding of the response.
If the response object is invoked as WSGI application the return
value of this method is used as application iterator unless
:attr:`direct_passthrough` was activated.
"""
# Encode in a separate function so that self.response is fetched
# early. This allows us to wrap the response with the return
# value from get_app_iter or iter_encoded.
return _iter_encoded(self.response)
@property
def is_streamed(self) -> bool:
"""If the response is streamed (the response is not an iterable with
a length information) this property is `True`. In this case streamed
means that there is no information about the number of iterations.
This is usually `True` if a generator is passed to the response object.
This is useful for checking before applying some sort of post
filtering that should not take place for streamed responses.
"""
try:
len(self.response) # type: ignore
except (TypeError, AttributeError):
return True
return False
@property
def is_sequence(self) -> bool:
"""If the iterator is buffered, this property will be `True`. A
response object will consider an iterator to be buffered if the
response attribute is a list or tuple.
.. versionadded:: 0.6
"""
return isinstance(self.response, (tuple, list))
def close(self) -> None:
"""Close the wrapped response if possible. You can also use the object
in a with statement which will automatically close it.
.. versionadded:: 0.9
Can now be used in a with statement.
"""
if hasattr(self.response, "close"):
self.response.close()
for func in self._on_close:
func()
def __enter__(self) -> Response:
return self
def __exit__(self, exc_type, exc_value, tb): # type: ignore
self.close()
def freeze(self) -> None:
"""Make the response object ready to be pickled. Does the
following:
* Buffer the response into a list, ignoring
:attr:`implicity_sequence_conversion` and
:attr:`direct_passthrough`.
* Set the ``Content-Length`` header.
* Generate an ``ETag`` header if one is not already set.
.. versionchanged:: 2.1
Removed the ``no_etag`` parameter.
.. versionchanged:: 2.0
An ``ETag`` header is always added.
.. versionchanged:: 0.6
The ``Content-Length`` header is set.
"""
# Always freeze the encoded response body, ignore
# implicit_sequence_conversion and direct_passthrough.
self.response = list(self.iter_encoded())
self.headers["Content-Length"] = str(sum(map(len, self.response)))
self.add_etag()
def get_wsgi_headers(self, environ: WSGIEnvironment) -> Headers:
"""This is automatically called right before the response is started
and returns headers modified for the given environment. It returns a
copy of the headers from the response with some modifications applied
if necessary.
For example the location header (if present) is joined with the root
URL of the environment. Also the content length is automatically set
to zero here for certain status codes.
.. versionchanged:: 0.6
Previously that function was called `fix_headers` and modified
the response object in place. Also since 0.6, IRIs in location
and content-location headers are handled properly.
Also starting with 0.6, Werkzeug will attempt to set the content
length if it is able to figure it out on its own. This is the
case if all the strings in the response iterable are already
encoded and the iterable is buffered.
:param environ: the WSGI environment of the request.
:return: returns a new :class:`~werkzeug.datastructures.Headers`
object.
"""
headers = Headers(self.headers)
location: str | None = None
content_location: str | None = None
content_length: str | int | None = None
status = self.status_code
# iterate over the headers to find all values in one go. Because
# get_wsgi_headers is used each response that gives us a tiny
# speedup.
for key, value in headers:
ikey = key.lower()
if ikey == "location":
location = value
elif ikey == "content-location":
content_location = value
elif ikey == "content-length":
content_length = value
if location is not None:
location = iri_to_uri(location)
if self.autocorrect_location_header:
# Make the location header an absolute URL.
current_url = get_current_url(environ, strip_querystring=True)
current_url = iri_to_uri(current_url)
location = urljoin(current_url, location)
headers["Location"] = location
# make sure the content location is a URL
if content_location is not None:
headers["Content-Location"] = iri_to_uri(content_location)
if 100 <= status < 200 or status == 204:
# Per section 3.3.2 of RFC 7230, "a server MUST NOT send a
# Content-Length header field in any response with a status
# code of 1xx (Informational) or 204 (No Content)."
headers.remove("Content-Length")
elif status == 304:
remove_entity_headers(headers)
# if we can determine the content length automatically, we
# should try to do that. But only if this does not involve
# flattening the iterator or encoding of strings in the
# response. We however should not do that if we have a 304
# response.
if (
self.automatically_set_content_length
and self.is_sequence
and content_length is None
and status not in (204, 304)
and not (100 <= status < 200)
):
content_length = sum(len(x) for x in self.iter_encoded())
headers["Content-Length"] = str(content_length)
return headers
def get_app_iter(self, environ: WSGIEnvironment) -> t.Iterable[bytes]:
"""Returns the application iterator for the given environ. Depending
on the request method and the current status code the return value
might be an empty response rather than the one from the response.
If the request method is `HEAD` or the status code is in a range
where the HTTP specification requires an empty response, an empty
iterable is returned.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: a response iterable.
"""
status = self.status_code
if (
environ["REQUEST_METHOD"] == "HEAD"
or 100 <= status < 200
or status in (204, 304)
):
iterable: t.Iterable[bytes] = ()
elif self.direct_passthrough:
return self.response # type: ignore
else:
iterable = self.iter_encoded()
return ClosingIterator(iterable, self.close)
def get_wsgi_response(
self, environ: WSGIEnvironment
) -> tuple[t.Iterable[bytes], str, list[tuple[str, str]]]:
"""Returns the final WSGI response as tuple. The first item in
the tuple is the application iterator, the second the status and
the third the list of headers. The response returned is created
specially for the given environment. For example if the request
method in the WSGI environment is ``'HEAD'`` the response will
be empty and only the headers and status code will be present.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: an ``(app_iter, status, headers)`` tuple.
"""
headers = self.get_wsgi_headers(environ)
app_iter = self.get_app_iter(environ)
return app_iter, self.status, headers.to_wsgi_list()
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterable[bytes]:
"""Process this response as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
:return: an application iterator
"""
app_iter, status, headers = self.get_wsgi_response(environ)
start_response(status, headers)
return app_iter
# JSON
#: A module or other object that has ``dumps`` and ``loads``
#: functions that match the API of the built-in :mod:`json` module.
json_module = json
@property
def json(self) -> t.Any | None:
"""The parsed JSON data if :attr:`mimetype` indicates JSON
(:mimetype:`application/json`, see :attr:`is_json`).
Calls :meth:`get_json` with default arguments.
"""
return self.get_json()
@t.overload
def get_json(self, force: bool = ..., silent: t.Literal[False] = ...) -> t.Any: ...
@t.overload
def get_json(self, force: bool = ..., silent: bool = ...) -> t.Any | None: ...
def get_json(self, force: bool = False, silent: bool = False) -> t.Any | None:
"""Parse :attr:`data` as JSON. Useful during testing.
If the mimetype does not indicate JSON
(:mimetype:`application/json`, see :attr:`is_json`), this
returns ``None``.
Unlike :meth:`Request.get_json`, the result is not cached.
:param force: Ignore the mimetype and always try to parse JSON.
:param silent: Silence parsing errors and return ``None``
instead.
"""
if not (force or self.is_json):
return None
data = self.get_data()
try:
return self.json_module.loads(data)
except ValueError:
if not silent:
raise
return None
# Stream
@cached_property
def stream(self) -> ResponseStream:
"""The response iterable as write-only stream."""
return ResponseStream(self)
def _wrap_range_response(self, start: int, length: int) -> None:
"""Wrap existing Response in case of Range Request context."""
if self.status_code == 206:
self.response = _RangeWrapper(self.response, start, length) # type: ignore
def _is_range_request_processable(self, environ: WSGIEnvironment) -> bool:
"""Return ``True`` if `Range` header is present and if underlying
resource is considered unchanged when compared with `If-Range` header.
"""
return (
"HTTP_IF_RANGE" not in environ
or not is_resource_modified(
environ,
self.headers.get("etag"),
None,
self.headers.get("last-modified"),
ignore_if_range=False,
)
) and "HTTP_RANGE" in environ
def _process_range_request(
self,
environ: WSGIEnvironment,
complete_length: int | None,
accept_ranges: bool | str,
) -> bool:
"""Handle Range Request related headers (RFC7233). If `Accept-Ranges`
header is valid, and Range Request is processable, we set the headers
as described by the RFC, and wrap the underlying response in a
RangeWrapper.
Returns ``True`` if Range Request can be fulfilled, ``False`` otherwise.
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
if `Range` header could not be parsed or satisfied.
.. versionchanged:: 2.0
Returns ``False`` if the length is 0.
"""
from ..exceptions import RequestedRangeNotSatisfiable
if (
not accept_ranges
or complete_length is None
or complete_length == 0
or not self._is_range_request_processable(environ)
):
return False
if accept_ranges is True:
accept_ranges = "bytes"
parsed_range = parse_range_header(environ.get("HTTP_RANGE"))
if parsed_range is None:
raise RequestedRangeNotSatisfiable(complete_length)
range_tuple = parsed_range.range_for_length(complete_length)
content_range_header = parsed_range.to_content_range_header(complete_length)
if range_tuple is None or content_range_header is None:
raise RequestedRangeNotSatisfiable(complete_length)
content_length = range_tuple[1] - range_tuple[0]
self.headers["Content-Length"] = str(content_length)
self.headers["Accept-Ranges"] = accept_ranges
self.content_range = content_range_header
self.status_code = 206
self._wrap_range_response(range_tuple[0], content_length)
return True
def make_conditional(
self,
request_or_environ: WSGIEnvironment | Request,
accept_ranges: bool | str = False,
complete_length: int | None = None,
) -> Response:
"""Make the response conditional to the request. This method works
best if an etag was defined for the response already. The `add_etag`
method can be used to do that. If called without etag just the date
header is set.
This does nothing if the request method in the request or environ is
anything but GET or HEAD.
For optimal performance when handling range requests, it's recommended
that your response data object implements `seekable`, `seek` and `tell`
methods as described by :py:class:`io.IOBase`. Objects returned by
:meth:`~werkzeug.wsgi.wrap_file` automatically implement those methods.
It does not remove the body of the response because that's something
the :meth:`__call__` function does for us automatically.
Returns self so that you can do ``return resp.make_conditional(req)``
but modifies the object in-place.
:param request_or_environ: a request object or WSGI environment to be
used to make the response conditional
against.
:param accept_ranges: This parameter dictates the value of
`Accept-Ranges` header. If ``False`` (default),
the header is not set. If ``True``, it will be set
to ``"bytes"``. If it's a string, it will use this
value.
:param complete_length: Will be used only in valid Range Requests.
It will set `Content-Range` complete length
value and compute `Content-Length` real value.
This parameter is mandatory for successful
Range Requests completion.
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
if `Range` header could not be parsed or satisfied.
.. versionchanged:: 2.0
Range processing is skipped if length is 0 instead of
raising a 416 Range Not Satisfiable error.
"""
environ = _get_environ(request_or_environ)
if environ["REQUEST_METHOD"] in ("GET", "HEAD"):
# if the date is not in the headers, add it now. We however
# will not override an already existing header. Unfortunately
# this header will be overridden by many WSGI servers including
# wsgiref.
if "date" not in self.headers:
self.headers["Date"] = http_date()
is206 = self._process_range_request(environ, complete_length, accept_ranges)
if not is206 and not is_resource_modified(
environ,
self.headers.get("etag"),
None,
self.headers.get("last-modified"),
):
if parse_etags(environ.get("HTTP_IF_MATCH")):
self.status_code = 412
else:
self.status_code = 304
if (
self.automatically_set_content_length
and "content-length" not in self.headers
):
length = self.calculate_content_length()
if length is not None:
self.headers["Content-Length"] = str(length)
return self
def add_etag(self, overwrite: bool = False, weak: bool = False) -> None:
"""Add an etag for the current response if there is none yet.
.. versionchanged:: 2.0
SHA-1 is used to generate the value. MD5 may not be
available in some environments.
"""
if overwrite or "etag" not in self.headers:
self.set_etag(generate_etag(self.get_data()), weak)
| Response |
python | doocs__leetcode | solution/3100-3199/3183.The Number of Ways to Make the Sum/Solution.py | {
"start": 0,
"end": 419
} | class ____:
def numberOfWays(self, n: int) -> int:
mod = 10**9 + 7
coins = [1, 2, 6]
f = [0] * (n + 1)
f[0] = 1
for x in coins:
for j in range(x, n + 1):
f[j] = (f[j] + f[j - x]) % mod
ans = f[n]
if n >= 4:
ans = (ans + f[n - 4]) % mod
if n >= 8:
ans = (ans + f[n - 8]) % mod
return ans
| Solution |
python | pypa__warehouse | dev/flake8/checkers.py | {
"start": 857,
"end": 6513
} | class ____(ast.NodeVisitor):
def __init__(self, filename: str) -> None:
self.errors: list[tuple[int, int, str]] = []
self.filename = filename
def check_for_backref(self, node) -> None:
def _check_keywords(keywords: list[ast.keyword]) -> None:
for kw in keywords:
if kw.arg == "backref":
self.errors.append((kw.lineno, kw.col_offset, WH002_msg))
# Nodes can be either Attribute or Name, and depending on the type
# of node, the value.func can be either an attr or an id.
# TODO: This is aching for a better way to do this.
if isinstance(node.value, ast.Call):
if (
isinstance(node.value.func, ast.Attribute)
and node.value.func.attr == "relationship"
and isinstance(node.value.keywords, list)
):
_check_keywords(node.value.keywords)
elif (
isinstance(node.value.func, ast.Name)
and node.value.func.id == "relationship"
and isinstance(node.value.keywords, list)
):
_check_keywords(node.value.keywords)
def template_exists(self, template_name: str) -> bool:
repo_root = Path(__file__).parent.parent.parent
# If the template name is a full package path, check if it exists
# in the package's templates directory.
if ":" in template_name:
pkg, resource = template_name.split(":", 1)
pkg_path = repo_root.joinpath(*pkg.split("."))
resource_path = pkg_path / resource
return resource_path.is_file()
settings = {}
# TODO: Replace with actual configuration retrieval if it makes sense
# Get Jinja2 search paths from warehouse config
# settings = configure().get_settings()
search_paths = settings.get("jinja2.searchpath", [])
# If not set, fallback to default templates path
if not search_paths:
search_paths = [
str(repo_root / "warehouse" / "templates"),
str(repo_root / "warehouse" / "admin" / "templates"),
]
for path in search_paths:
if Path(path, template_name).is_file():
return True
return False
def visit_Name(self, node: ast.Name) -> None: # noqa: N802
if node.id == "urlparse":
self.errors.append((node.lineno, node.col_offset, WH001_msg))
self.generic_visit(node)
def visit_Attribute(self, node: ast.Attribute) -> None: # noqa: N802
if (
node.attr == "urlparse"
and isinstance(node.value, ast.Attribute)
and node.value.value.id == "urllib"
):
self.errors.append((node.lineno, node.col_offset, WH001_msg))
self.generic_visit(node)
def visit_Assign(self, node: ast.Assign) -> None: # noqa: N802
self.check_for_backref(node)
self.generic_visit(node)
def visit_AnnAssign(self, node: ast.AnnAssign) -> None: # noqa: N802
self.check_for_backref(node)
self.generic_visit(node)
def is_metrics_method_call(self, node: ast.Call) -> bool:
"""Check if this is a call to a metrics method."""
if not isinstance(node.func, ast.Attribute):
return False
# Check for metrics.<method>()
if isinstance(node.func.value, ast.Name) and node.func.value.id == "metrics":
return True
# Check for request.metrics.<method>() or any_obj.metrics.<method>()
if (
isinstance(node.func.value, ast.Attribute)
and node.func.value.attr == "metrics"
):
return True
return False
def check_metrics_tags(self, node: ast.Call) -> None:
"""Check that tags parameter in metrics calls is a list."""
if not self.is_metrics_method_call(node):
return
# Check keyword arguments for tags=
for kw in node.keywords:
if kw.arg == "tags":
# tags should be None, a variable (Name), or a List
# Flag if it's a literal non-list type (string, tuple, dict, set, etc.)
if isinstance(kw.value, (ast.Constant, ast.Tuple, ast.Dict, ast.Set)):
# Allow None
if isinstance(kw.value, ast.Constant) and kw.value.value is None:
continue
self.errors.append(
(kw.value.lineno, kw.value.col_offset, WH004_msg)
)
def visit_Call(self, node: ast.Call) -> None: # noqa: N802
self.check_metrics_tags(node)
self.generic_visit(node)
def visit_FunctionDef(self, node: ast.FunctionDef) -> None: # noqa: N802
for decorator in node.decorator_list:
if (
isinstance(decorator, ast.Call)
and getattr(decorator.func, "id", None) == "view_config"
):
for kw in decorator.keywords:
if (
kw.arg == "renderer"
and isinstance(kw.value, ast.Constant)
# TODO: Is there a "string-that-looks-like-a-filename"?
and kw.value.value not in ["json", "xmlrpc", "string"]
):
if not self.template_exists(kw.value.value):
self.errors.append(
(kw.value.lineno, kw.value.col_offset, WH003_msg)
)
self.generic_visit(node)
| WarehouseVisitor |
python | facebook__pyre-check | client/tests/daemon_socket_test.py | {
"start": 467,
"end": 5144
} | class ____(testslide.TestCase):
def test_get_md5_short(self) -> None:
# Test different servers are differentiable
project_root = Path("project_root")
relative_local_root_a = Path("my/project")
relative_local_root_b = Path("my/otherproject")
md5_hash_a = get_md5_short(
(str(project_root) + "//" + str(relative_local_root_a))
)
md5_hash_a_recomputed = get_md5_short(
(str(project_root) + "//" + str(relative_local_root_a))
)
md5_hash_b = get_md5_short(
(str(project_root) + "//" + str(relative_local_root_b))
)
self.assertTrue(md5_hash_a == md5_hash_a_recomputed)
self.assertFalse(md5_hash_a == md5_hash_b)
# Test socket name length
project_root = Path("project_root" * 100)
relative_local_root = Path("my/project")
md5_hash = get_md5_short((str(project_root) + "//" + str(relative_local_root)))
self.assertTrue(len(md5_hash) == HASH_LENGTH)
def test_get_project_identifier(self) -> None:
project_root = Path("project_root")
self.assertEqual(
get_project_identifier(project_root, relative_local_root=None),
str(project_root),
)
self.assertEqual(
get_project_identifier(project_root, relative_local_root="relative"),
str(project_root) + "//relative",
)
def _assert_socket_path(
self,
socket_root: Path,
project_root: Path,
relative_local_root: Optional[str],
flavor: PyreFlavor = PyreFlavor.CLASSIC,
suffix: str = "",
) -> None:
md5_hash = get_md5_short(
get_project_identifier(project_root, relative_local_root)
)
self.assertEqual(
_get_socket_path_in_root(
socket_root,
get_project_identifier(project_root, relative_local_root),
flavor,
),
socket_root / f"pyre_server_{md5_hash}{suffix}.sock",
)
def test_get_socket_path(self) -> None:
socket_root = Path("socket_root")
# With local directory
self._assert_socket_path(
socket_root=socket_root,
project_root=Path("project_root"),
relative_local_root="my/project",
)
# No local directory
self._assert_socket_path(
socket_root=socket_root,
project_root=Path("project_root"),
relative_local_root=None,
)
# No local directory
self._assert_socket_path(
socket_root=socket_root,
project_root=Path("project_root"),
relative_local_root=None,
flavor=PyreFlavor.SHADOW,
suffix="__shadow",
)
def test_find_socket_files(self) -> None:
with tempfile.TemporaryDirectory(dir="/tmp") as socket_root:
socket_root_path = Path(socket_root)
socket_a = _get_socket_path_in_root(
socket_root_path,
project_identifier="a",
flavor=PyreFlavor.CLASSIC,
)
socket_a.touch()
self.assertEqual(
set(find_socket_files(socket_root_path)),
{socket_a},
)
socket_b = _get_socket_path_in_root(
socket_root_path,
project_identifier="b//relative_to_b",
flavor=PyreFlavor.CLASSIC,
)
socket_b.touch()
self.assertEqual(
set(find_socket_files(socket_root_path)),
{socket_a, socket_b},
)
socket_c = _get_socket_path_in_root(
socket_root_path,
project_identifier="c",
flavor=PyreFlavor.SHADOW,
)
socket_c.touch()
self.assertEqual(
set(find_socket_files(socket_root_path)),
{socket_a, socket_b, socket_c},
)
def test_no_flavor_leads_to_too_long_name(self) -> None:
# This isn't really a unit test of functionality per se; it is a
# sanity check to make sure that PyreFlavor never leads to
# socket name too long to be instantiated.
for flavor in PyreFlavor:
path = _get_socket_path_in_root(
socket_root=Path("/dummy/socket/root"),
project_identifier="dummy_project_identifier",
flavor=flavor,
)
self.assertTrue(
len(str(path)) < 100,
msg=f"Path {path} is too long for a socket path",
)
| SocketTest |
python | plotly__plotly.py | plotly/graph_objs/waterfall/_decreasing.py | {
"start": 233,
"end": 2458
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "waterfall"
_path_str = "waterfall.decreasing"
_valid_props = {"marker"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.waterfall.decreasing.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.waterfall.decreasing.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.waterfall.decreasing.Marke
r` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, **kwargs):
"""
Construct a new Decreasing object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.waterfall.Decreasing`
marker
:class:`plotly.graph_objects.waterfall.decreasing.Marke
r` instance or dict with compatible properties
Returns
-------
Decreasing
"""
super().__init__("decreasing")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.waterfall.Decreasing
constructor must be a dict or
an instance of :class:`plotly.graph_objs.waterfall.Decreasing`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Decreasing |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_mutation.py | {
"start": 383,
"end": 5548
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'apply_configuration': 'V1beta1ApplyConfiguration',
'json_patch': 'V1beta1JSONPatch',
'patch_type': 'str'
}
attribute_map = {
'apply_configuration': 'applyConfiguration',
'json_patch': 'jsonPatch',
'patch_type': 'patchType'
}
def __init__(self, apply_configuration=None, json_patch=None, patch_type=None, local_vars_configuration=None): # noqa: E501
"""V1beta1Mutation - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._apply_configuration = None
self._json_patch = None
self._patch_type = None
self.discriminator = None
if apply_configuration is not None:
self.apply_configuration = apply_configuration
if json_patch is not None:
self.json_patch = json_patch
self.patch_type = patch_type
@property
def apply_configuration(self):
"""Gets the apply_configuration of this V1beta1Mutation. # noqa: E501
:return: The apply_configuration of this V1beta1Mutation. # noqa: E501
:rtype: V1beta1ApplyConfiguration
"""
return self._apply_configuration
@apply_configuration.setter
def apply_configuration(self, apply_configuration):
"""Sets the apply_configuration of this V1beta1Mutation.
:param apply_configuration: The apply_configuration of this V1beta1Mutation. # noqa: E501
:type: V1beta1ApplyConfiguration
"""
self._apply_configuration = apply_configuration
@property
def json_patch(self):
"""Gets the json_patch of this V1beta1Mutation. # noqa: E501
:return: The json_patch of this V1beta1Mutation. # noqa: E501
:rtype: V1beta1JSONPatch
"""
return self._json_patch
@json_patch.setter
def json_patch(self, json_patch):
"""Sets the json_patch of this V1beta1Mutation.
:param json_patch: The json_patch of this V1beta1Mutation. # noqa: E501
:type: V1beta1JSONPatch
"""
self._json_patch = json_patch
@property
def patch_type(self):
"""Gets the patch_type of this V1beta1Mutation. # noqa: E501
patchType indicates the patch strategy used. Allowed values are \"ApplyConfiguration\" and \"JSONPatch\". Required. # noqa: E501
:return: The patch_type of this V1beta1Mutation. # noqa: E501
:rtype: str
"""
return self._patch_type
@patch_type.setter
def patch_type(self, patch_type):
"""Sets the patch_type of this V1beta1Mutation.
patchType indicates the patch strategy used. Allowed values are \"ApplyConfiguration\" and \"JSONPatch\". Required. # noqa: E501
:param patch_type: The patch_type of this V1beta1Mutation. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and patch_type is None: # noqa: E501
raise ValueError("Invalid value for `patch_type`, must not be `None`") # noqa: E501
self._patch_type = patch_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1Mutation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1Mutation):
return True
return self.to_dict() != other.to_dict()
| V1beta1Mutation |
python | PrefectHQ__prefect | src/prefect/runner/storage.py | {
"start": 28592,
"end": 33647
} | class ____:
"""
Sets the working directory in the local filesystem.
Parameters:
Path: Local file path to set the working directory for the flow
Examples:
Sets the working directory for the local path to the flow:
```python
from prefect.runner.storage import Localstorage
storage = LocalStorage(
path="/path/to/local/flow_directory",
)
```
"""
def __init__(
self,
path: str,
pull_interval: Optional[int] = None,
):
self._path = Path(path).resolve()
self._logger = get_logger("runner.storage.local-storage")
self._storage_base_path = Path.cwd()
self._pull_interval = pull_interval
@property
def destination(self) -> Path:
return self._path
def set_base_path(self, path: Path) -> None:
self._storage_base_path = path
@property
def pull_interval(self) -> Optional[int]:
return self._pull_interval
async def pull_code(self) -> None:
# Local storage assumes the code already exists on the local filesystem
# and does not need to be pulled from a remote location
pass
def to_pull_step(self) -> dict[str, Any]:
"""
Returns a dictionary representation of the storage object that can be
used as a deployment pull step.
"""
step = {
"prefect.deployments.steps.set_working_directory": {
"directory": str(self.destination)
}
}
return step
def __eq__(self, __value: Any) -> bool:
if isinstance(__value, LocalStorage):
return self._path == __value._path
return False
def __repr__(self) -> str:
return f"LocalStorage(path={self._path!r})"
def create_storage_from_source(
source: str, pull_interval: Optional[int] = 60
) -> RunnerStorage:
"""
Creates a storage object from a URL.
Args:
url: The URL to create a storage object from. Supports git and `fsspec`
URLs.
pull_interval: The interval at which to pull contents from remote storage to
local storage
Returns:
RunnerStorage: A runner storage compatible object
"""
logger = get_logger("runner.storage")
parsed_source = urlparse(source)
if parsed_source.scheme == "git" or parsed_source.path.endswith(".git"):
return GitRepository(url=source, pull_interval=pull_interval)
elif parsed_source.scheme in ("file", "local"):
source_path = source.split("://", 1)[-1]
return LocalStorage(path=source_path, pull_interval=pull_interval)
elif parsed_source.scheme in fsspec.available_protocols():
return RemoteStorage(url=source, pull_interval=pull_interval)
else:
logger.debug("No valid fsspec protocol found for URL, assuming local storage.")
return LocalStorage(path=source, pull_interval=pull_interval)
def _format_token_from_credentials(
netloc: str,
credentials: dict[str, Any] | GitCredentials,
block: Block | None = None,
) -> str:
"""
Formats the credentials block for the git provider.
If the block implements _GitCredentialsFormatter protocol, delegates to it.
Otherwise, uses generic formatting for plain dict credentials.
Args:
netloc: The network location (hostname) of the git repository
credentials: Dictionary containing credential information
block: Optional Block object that may implement the formatter protocol
"""
if block is not None and isinstance(block, _GitCredentialsFormatter):
# Reconstruct full URL for context (scheme doesn't matter for formatting)
url = f"https://{netloc}"
return block.format_git_credentials(url)
username = credentials.get("username") if credentials else None
password = credentials.get("password") if credentials else None
token = credentials.get("token") if credentials else None
access_token = credentials.get("access_token") if credentials else None
user_provided_token: str | Secret[str] | None = access_token or token or password
if isinstance(user_provided_token, Secret):
user_provided_token = user_provided_token.get()
if not user_provided_token:
raise ValueError(
"Please provide a `token` or `password` in your Credentials block to clone"
" a repo."
)
if username:
return f"{username}:{user_provided_token}"
# Fallback for plain dict credentials without a block
return user_provided_token
def _strip_auth_from_url(url: str) -> str:
parsed = urlparse(url)
# Construct a new netloc without the auth info
netloc = parsed.hostname
if parsed.port and netloc:
netloc += f":{parsed.port}"
# Build the sanitized URL
return urlunparse(
(
parsed.scheme,
netloc,
parsed.path,
parsed.params,
parsed.query,
parsed.fragment,
)
)
| LocalStorage |
python | matplotlib__matplotlib | lib/matplotlib/_mathtext.py | {
"start": 27374,
"end": 27849
} | class ____(DejaVuFonts):
"""
A font handling class for the DejaVu Serif fonts
If a glyph is not found it will fallback to Stix Serif
"""
_fontmap = {
'rm': 'DejaVu Serif',
'it': 'DejaVu Serif:italic',
'bf': 'DejaVu Serif:weight=bold',
'bfit': 'DejaVu Serif:italic:bold',
'sf': 'DejaVu Sans',
'tt': 'DejaVu Sans Mono',
'ex': 'DejaVu Serif Display',
0: 'DejaVu Serif',
}
| DejaVuSerifFonts |
python | realpython__materials | hashtable/05_separate_chaining/hashtable.py | {
"start": 137,
"end": 3385
} | class ____:
@classmethod
def from_dict(cls, dictionary, capacity=None):
hash_table = cls(capacity or len(dictionary))
for key, value in dictionary.items():
hash_table[key] = value
return hash_table
def __init__(self, capacity=8, load_factor_threshold=0.6):
if capacity < 1:
raise ValueError("Capacity must be a positive number")
if not (0 < load_factor_threshold <= 1):
raise ValueError("Load factor must be a number between (0, 1]")
self._buckets = [deque() for _ in range(capacity)]
self._load_factor_threshold = load_factor_threshold
def __len__(self):
return len(self.pairs)
def __iter__(self):
yield from self.keys
def __delitem__(self, key):
match self._find(key):
case bucket, index, _:
del bucket[index]
case _:
raise KeyError(key)
def __setitem__(self, key, value):
if self.load_factor >= self._load_factor_threshold:
self._resize_and_rehash()
match self._find(key):
case deque() as bucket, index, (key, _):
bucket[index] = Pair(key, value)
case bucket:
bucket.append(Pair(key, value))
def __getitem__(self, key):
match self._find(key):
case _, _, pair:
return pair.value
case _:
raise KeyError(key)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def __eq__(self, other):
if self is other:
return True
if type(self) is not type(other):
return False
return set(self.pairs) == set(other.pairs)
def __str__(self):
pairs = []
for key, value in self.pairs:
pairs.append(f"{key!r}: {value!r}")
return "{" + ", ".join(pairs) + "}"
def __repr__(self):
cls = self.__class__.__name__
return f"{cls}.from_dict({str(self)})"
def copy(self):
return HashTable.from_dict(dict(self.pairs), self.capacity)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
@property
def pairs(self):
return {pair for bucket in self._buckets for pair in bucket}
@property
def values(self):
return [pair.value for pair in self.pairs]
@property
def keys(self):
return {pair.key for pair in self.pairs}
@property
def capacity(self):
return len(self._buckets)
@property
def load_factor(self):
return len(self) / self.capacity
def _index(self, key):
return hash(key) % self.capacity
def _resize_and_rehash(self):
copy = HashTable(capacity=self.capacity * 2)
for key, value in self.pairs:
copy[key] = value
self._buckets = copy._buckets
def _find(self, key):
bucket = self._buckets[self._index(key)]
for index, pair in enumerate(bucket):
if pair.key == key:
return bucket, index, pair
return bucket
| HashTable |
python | pypa__warehouse | tests/unit/integration/vulnerabilities/osv/test_views.py | {
"start": 186,
"end": 5349
} | class ____:
def test_report_vulnerabilities(self, pyramid_request, metrics, monkeypatch):
pyramid_request.headers = {
"VULN-PUBLIC-KEY-IDENTIFIER": "vuln_pub_key_id",
"VULN-PUBLIC-KEY-SIGNATURE": "vuln_pub_key_sig",
}
pyramid_request.body = """[{
"project": "vuln_project",
"versions": [
"v1",
"v2"
],
"id": "vuln_id",
"link": "vulns.com/vuln_id",
"aliases": [
"vuln_alias"
]
}]"""
pyramid_request.json_body = [
{
"project": "vuln_project",
"versions": ["v1", "v2"],
"id": "vuln_id",
"link": "vulns.com/vuln_id",
"aliases": ["vuln_alias"],
}
]
pyramid_request.find_service = lambda *a, **k: metrics
http = pyramid_request.http = pretend.stub()
verify = pretend.call_recorder(lambda **k: True)
verifier = pretend.stub(verify=verify)
verifier_cls = pretend.call_recorder(lambda **k: verifier)
monkeypatch.setattr(osv, "VulnerabilityReportVerifier", verifier_cls)
delay = pretend.call_recorder(lambda **k: None)
task = pretend.call_recorder(lambda a: pretend.stub(delay=delay))
pyramid_request.task = task
response = views.report_vulnerabilities(pyramid_request)
assert response.status_code == 204
assert verifier_cls.calls == [pretend.call(session=http, metrics=metrics)]
assert verify.calls == [
pretend.call(
payload="""[{
"project": "vuln_project",
"versions": [
"v1",
"v2"
],
"id": "vuln_id",
"link": "vulns.com/vuln_id",
"aliases": [
"vuln_alias"
]
}]""",
key_id="vuln_pub_key_id",
signature="vuln_pub_key_sig",
)
]
assert task.calls == [pretend.call(views.analyze_vulnerability_task)]
assert delay.calls == [
pretend.call(
vulnerability_report={
"project": "vuln_project",
"versions": ["v1", "v2"],
"id": "vuln_id",
"link": "vulns.com/vuln_id",
"aliases": ["vuln_alias"],
},
origin="osv",
)
]
def test_report_vulnerabilities_verify_fail(self, monkeypatch, pyramid_request):
pyramid_request.headers = {
"VULN-PUBLIC-KEY-IDENTIFIER": "vuln_pub_key_id",
"VULN-PUBLIC-KEY-SIGNATURE": "vuln_pub_key_sig",
}
pyramid_request.body = """[{
"project": "vuln_project",
"versions": [
"v1",
"v2"
],
"id": "vuln_id",
"link": "vulns.com/vuln_id",
"aliases": [
"vuln_alias"
]
}]"""
pyramid_request.http = pretend.stub()
verify = pretend.call_recorder(lambda **k: False)
verifier = pretend.stub(verify=verify)
verifier_cls = pretend.call_recorder(lambda **k: verifier)
monkeypatch.setattr(osv, "VulnerabilityReportVerifier", verifier_cls)
response = views.report_vulnerabilities(pyramid_request)
assert response.status_int == 400
def test_report_vulnerabilities_verify_invalid_json(self, metrics, monkeypatch):
verify = pretend.call_recorder(lambda **k: True)
verifier = pretend.stub(verify=verify)
verifier_cls = pretend.call_recorder(lambda **k: verifier)
monkeypatch.setattr(osv, "VulnerabilityReportVerifier", verifier_cls)
# We need to raise on a property access, can't do that with a stub.
class Request:
headers = {
"VULN-PUBLIC-KEY-IDENTIFIER": "vuln_pub_key_id",
"VULN-PUBLIC-KEY-SIGNATURE": "vuln_pub_key_sig",
}
body = "["
@property
def json_body(self):
return json.loads(self.body)
def find_service(self, *a, **k):
return metrics
response = pretend.stub(status_int=200)
http = pretend.stub()
request = Request()
response = views.report_vulnerabilities(request)
assert response.status_int == 400
assert metrics.increment.calls == [
pretend.call(
"warehouse.vulnerabilties.error.payload.json_error", tags=["origin:osv"]
)
]
def test_report_vulnerabilities_verify_invalid_vuln(
self, monkeypatch, pyramid_request
):
pyramid_request.headers = {
"VULN-PUBLIC-KEY-IDENTIFIER": "vuln_pub_key_id",
"VULN-PUBLIC-KEY-SIGNATURE": "vuln_pub_key_sig",
}
pyramid_request.body = "{}" # not a list
pyramid_request.json_body = {}
pyramid_request.http = pretend.stub()
verify = pretend.call_recorder(lambda **k: True)
verifier = pretend.stub(verify=verify)
verifier_cls = pretend.call_recorder(lambda **k: verifier)
monkeypatch.setattr(osv, "VulnerabilityReportVerifier", verifier_cls)
response = views.report_vulnerabilities(pyramid_request)
assert response.status_int == 400
| TestReportVulnerabilities |
python | joke2k__faker | tests/providers/test_phone_number.py | {
"start": 12621,
"end": 13206
} | class ____:
"""Test es_CO phone number provider methods"""
def test_phone_number(self, faker, num_samples):
pattern: Pattern = re.compile(
r"((\+?57|\(\+57\))?60\d)?\d{7}|"
r"((\+?57 |\(\+57\) )?60\d )?\d{3} \d{2} \d{2}|"
r"(\+?57|\(\+57\))?3[012]\d{8}|"
r"(\+?57 |\(\+57\) )?3[012]\d \d{3} \d{2} \d{2}|"
r"01800\d{7}|"
r"01 800\d \d{3} \d{3}"
)
for _ in range(num_samples):
phone_number = faker.phone_number()
assert pattern.fullmatch(phone_number)
| TestEsCo |
python | pytorch__pytorch | benchmarks/instruction_counts/execution/runner.py | {
"start": 3122,
"end": 10365
} | class ____:
def __init__(
self,
work_items: tuple[WorkOrder, ...],
core_pool: Optional[CorePool] = None,
cadence: float = 1.0,
) -> None:
self._work_items: tuple[WorkOrder, ...] = work_items
self._core_pool: CorePool = core_pool or CorePool(0, CPU_COUNT - 4)
self._cadence: float = cadence
# Working state.
self._work_queue: list[WorkOrder] = list(work_items)
self._active_jobs: list[InProgress] = []
self._results: dict[WorkOrder, WorkerOutput] = {}
# Debug information for ETA and error messages.
self._start_time: float = -1
self._durations: dict[WorkOrder, float] = {}
self._currently_processed: Optional[WorkOrder] = None
if len(work_items) != len(set(work_items)):
raise ValueError("Duplicate work items.")
def run(self) -> dict[WorkOrder, WorkerOutput]:
try:
return self._run()
except KeyboardInterrupt:
print("\n\nKeyboardInterrupt (ctrl-c) detected. Shutting down children.")
self._force_shutdown(verbose=False)
raise
except subprocess.TimeoutExpired:
print("\n\nJob timed out. Shutting down children.")
self._force_shutdown(verbose=True)
raise
except WorkerFailed as e:
print("Shutting down all outstanding jobs before re-raising.")
self._force_shutdown(verbose=True)
print(f"Cmd: {e.cmd}")
if e.wrapped_trace:
print(e.wrapped_trace)
else:
print("Unknown failure. (Worker did not report exception contents.)")
raise
except BaseException:
print("\n\nUnknown exception. Shutting down jobs before re-raising.")
self._force_shutdown(verbose=True)
raise
def _run(self) -> dict[WorkOrder, WorkerOutput]:
self._start_time = time.time()
self._canary_import()
while self._work_queue or self._active_jobs:
t0 = time.time()
self._update_active_jobs()
self._enqueue_new_jobs()
self._print_progress()
time.sleep(max(self._cadence - (time.time() - t0), 0.0))
print(f"\nTotal time: {time.time() - self._start_time:.0f} seconds")
return self._results.copy()
def _update_active_jobs(self) -> None:
active_jobs: list[InProgress] = []
for job in self._active_jobs:
self._currently_processed = job.work_order
if not job.check_finished():
active_jobs.append(job)
continue
result: Union[WorkerOutput, WorkerFailure] = job.result
if isinstance(result, WorkerOutput):
self._results[job.work_order] = result
assert job.cpu_list is not None
self._core_pool.release(job.cpu_list)
self._durations[job.work_order] = job.duration
else:
assert isinstance(result, WorkerFailure)
raise WorkerFailed(cmd=job.proc.cmd, wrapped_trace=result.failure_trace)
self._currently_processed = None
self._active_jobs.clear()
self._active_jobs.extend(active_jobs)
def _enqueue_new_jobs(self) -> None:
work_queue: list[WorkOrder] = []
for i, work_order in enumerate(self._work_queue):
self._currently_processed = work_order
cpu_list = self._core_pool.reserve(work_order.timer_args.num_threads)
if cpu_list is None:
work_queue.append(work_order)
else:
self._active_jobs.append(InProgress(work_order, cpu_list))
# Stagger creation. This helps with contention.
time.sleep(0.5)
self._currently_processed = None
self._work_queue.clear()
self._work_queue.extend(work_queue)
def _print_progress(self) -> None:
fraction = f"{len(self._results)} / {len(self._work_items)}"
elapsed = f"{time.time() - self._start_time:.0f} seconds"
if len(self._results) < 5:
eta = "Unknown"
else:
remaining = len(self._work_items) - len(self._results)
iters_remaining = math.ceil(remaining / self._core_pool._num_cores)
mean_time = sum(self._durations.values()) / len(self._durations)
eta_minutes = math.ceil(iters_remaining * mean_time / 60)
eta = f"~{eta_minutes:.0f} minute{'s' if eta_minutes > 1 else ''}"
print(f"\r{fraction} ({elapsed}), ETA: {eta}", end="")
def _force_shutdown(self, verbose: bool = False) -> None:
"""Try to interrupt jobs, and kill if need be.
We would prefer to softly terminate jobs so that they have a chance to
clean up before shutting down.
"""
for job in self._active_jobs:
job.proc.interrupt()
if verbose and self._currently_processed is not None:
print(
textwrap.dedent(
f"""
Failed when processing the following Job:
Label: {self._currently_processed.label}
AutoLabels: {self._currently_processed.autolabels}
Source cmd: {self._currently_processed.source_cmd}
"""
).strip()
+ "\n"
)
if self._active_jobs:
time.sleep(0.5)
remaining_jobs = [j for j in self._active_jobs if j.proc.poll() is None]
if remaining_jobs:
print(
f"SIGINT sent to {len(self._active_jobs)} jobs, "
f"{len(remaining_jobs)} have not yet exited.\n"
"Entering short cleanup loop, after which stragglers will "
"be forcibly terminated."
)
for _ in range(5):
time.sleep(2.0)
remaining_jobs = [j for j in remaining_jobs if j.proc.poll() is None]
if remaining_jobs:
print(f"{len(remaining_jobs)} still remain.")
else:
print("All remaining jobs have gracefully terminated.")
return
print(f"{len(remaining_jobs)} jobs refused to exit. Forcibly terminating.")
for j in remaining_jobs:
j.proc.terminate()
def _canary_import(self) -> None:
"""Make sure we can import torch before launching a slew of workers."""
source_cmds: set[str] = set()
for w in self._work_items:
if w.source_cmd is not None:
source_cmds.add(f"{w.source_cmd} && ")
for source_cmd in source_cmds or {""}:
cmd = f'{source_cmd}{PYTHON_CMD} -c "import torch"'
proc = subprocess.run(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
executable=SHELL,
)
if proc.returncode:
raise ImportError(
f"Failed to import torch in subprocess: {cmd}\n{proc.stdout}"
)
| Runner |
python | pypa__pipenv | pipenv/vendor/tomlkit/items.py | {
"start": 9918,
"end": 11216
} | class ____(Key):
"""A single key"""
def __init__(
self,
k: str,
t: KeyType | None = None,
sep: str | None = None,
original: str | None = None,
) -> None:
if t is None:
if not k or any(
c not in string.ascii_letters + string.digits + "-" + "_" for c in k
):
t = KeyType.Basic
else:
t = KeyType.Bare
self.t = t
if sep is None:
sep = " = "
self.sep = sep
self.key = k
if original is None:
key_str = escape_string(k) if t == KeyType.Basic else k
original = f"{t.value}{key_str}{t.value}"
self._original = original
self._keys = [self]
self._dotted = False
@property
def delimiter(self) -> str:
"""The delimiter: double quote/single quote/none"""
return self.t.value
def is_bare(self) -> bool:
"""Check if the key is bare"""
return self.t == KeyType.Bare
def __hash__(self) -> int:
return hash(self.key)
def __eq__(self, other: Any) -> bool:
if isinstance(other, Key):
return isinstance(other, SingleKey) and self.key == other.key
return self.key == other
| SingleKey |
python | pandas-dev__pandas | pandas/tests/series/methods/test_isin.py | {
"start": 214,
"end": 9236
} | class ____:
def test_isin(self):
s = Series(["A", "B", "C", "a", "B", "B", "A", "C"])
result = s.isin(["A", "C"])
expected = Series([True, False, True, False, False, False, True, True])
tm.assert_series_equal(result, expected)
# GH#16012
# This specific issue has to have a series over 1e6 in len, but the
# comparison array (in_list) must be large enough so that numpy doesn't
# do a manual masking trick that will avoid this issue altogether
s = Series(list("abcdefghijk" * 10**5))
# If numpy doesn't do the manual comparison/mask, these
# unorderable mixed types are what cause the exception in numpy
in_list = [-1, "a", "b", "G", "Y", "Z", "E", "K", "E", "S", "I", "R", "R"] * 6
assert s.isin(in_list).sum() == 200000
def test_isin_with_string_scalar(self):
# GH#4763
s = Series(["A", "B", "C", "a", "B", "B", "A", "C"])
msg = (
r"only list-like objects are allowed to be passed to isin\(\), "
r"you passed a `str`"
)
with pytest.raises(TypeError, match=msg):
s.isin("a")
s = Series(["aaa", "b", "c"])
with pytest.raises(TypeError, match=msg):
s.isin("aaa")
def test_isin_datetimelike_mismatched_reso(self):
expected = Series([True, True, False, False, False])
ser = Series(date_range("jan-01-2013", "jan-05-2013"))
# fails on dtype conversion in the first place
day_values = np.asarray(ser[0:2].values).astype("datetime64[D]")
result = ser.isin(day_values)
tm.assert_series_equal(result, expected)
dta = ser[:2]._values.astype("M8[s]")
result = ser.isin(dta)
tm.assert_series_equal(result, expected)
def test_isin_datetimelike_mismatched_reso_list(self):
expected = Series([True, True, False, False, False])
ser = Series(date_range("jan-01-2013", "jan-05-2013"))
dta = ser[:2]._values.astype("M8[s]")
result = ser.isin(list(dta))
tm.assert_series_equal(result, expected)
def test_isin_with_i8(self):
# GH#5021
expected = Series([True, True, False, False, False])
expected2 = Series([False, True, False, False, False])
# datetime64[ns]
s = Series(date_range("jan-01-2013", "jan-05-2013"))
result = s.isin(s[0:2])
tm.assert_series_equal(result, expected)
result = s.isin(s[0:2].values)
tm.assert_series_equal(result, expected)
result = s.isin([s[1]])
tm.assert_series_equal(result, expected2)
result = s.isin([np.datetime64(s[1])])
tm.assert_series_equal(result, expected2)
result = s.isin(set(s[0:2]))
tm.assert_series_equal(result, expected)
# timedelta64[ns]
s = Series(pd.to_timedelta(range(5), unit="D"))
result = s.isin(s[0:2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
def test_isin_empty(self, empty):
# see GH#16991
s = Series(["a", "b"])
expected = Series([False, False])
result = s.isin(empty)
tm.assert_series_equal(expected, result)
def test_isin_read_only(self):
# https://github.com/pandas-dev/pandas/issues/37174
arr = np.array([1, 2, 3])
arr.setflags(write=False)
s = Series([1, 2, 3])
result = s.isin(arr)
expected = Series([True, True, True])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [object, None])
def test_isin_dt64_values_vs_ints(self, dtype):
# GH#36621 dont cast integers to datetimes for isin
dti = date_range("2013-01-01", "2013-01-05")
ser = Series(dti)
comps = np.asarray([1356998400000000000], dtype=dtype)
res = dti.isin(comps)
expected = np.array([False] * len(dti), dtype=bool)
tm.assert_numpy_array_equal(res, expected)
res = ser.isin(comps)
tm.assert_series_equal(res, Series(expected))
res = pd.core.algorithms.isin(ser, comps)
tm.assert_numpy_array_equal(res, expected)
def test_isin_tzawareness_mismatch(self):
dti = date_range("2013-01-01", "2013-01-05")
ser = Series(dti)
other = dti.tz_localize("UTC")
res = dti.isin(other)
expected = np.array([False] * len(dti), dtype=bool)
tm.assert_numpy_array_equal(res, expected)
res = ser.isin(other)
tm.assert_series_equal(res, Series(expected))
res = pd.core.algorithms.isin(ser, other)
tm.assert_numpy_array_equal(res, expected)
def test_isin_period_freq_mismatch(self):
dti = date_range("2013-01-01", "2013-01-05")
pi = dti.to_period("M")
ser = Series(pi)
# We construct another PeriodIndex with the same i8 values
# but different dtype
dtype = dti.to_period("Y").dtype
other = PeriodArray._simple_new(pi.asi8, dtype=dtype)
res = pi.isin(other)
expected = np.array([False] * len(pi), dtype=bool)
tm.assert_numpy_array_equal(res, expected)
res = ser.isin(other)
tm.assert_series_equal(res, Series(expected))
res = pd.core.algorithms.isin(ser, other)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("values", [[-9.0, 0.0], [-9, 0]])
def test_isin_float_in_int_series(self, values):
# GH#19356 GH#21804
ser = Series(values)
result = ser.isin([-9, -0.5])
expected = Series([True, False])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["boolean", "Int64", "Float64"])
@pytest.mark.parametrize(
"data,values,expected",
[
([0, 1, 0], [1], [False, True, False]),
([0, 1, 0], [1, pd.NA], [False, True, False]),
([0, pd.NA, 0], [1, 0], [True, False, True]),
([0, 1, pd.NA], [1, pd.NA], [False, True, True]),
([0, 1, pd.NA], [1, np.nan], [False, True, False]),
([0, pd.NA, pd.NA], [np.nan, pd.NaT, None], [False, False, False]),
],
)
def test_isin_masked_types(self, dtype, data, values, expected):
# GH#42405
ser = Series(data, dtype=dtype)
result = ser.isin(values)
expected = Series(expected, dtype="boolean")
tm.assert_series_equal(result, expected)
def test_isin_large_series_mixed_dtypes_and_nan(monkeypatch):
# https://github.com/pandas-dev/pandas/issues/37094
# combination of object dtype for the values
# and > _MINIMUM_COMP_ARR_LEN elements
min_isin_comp = 5
ser = Series([1, 2, np.nan] * min_isin_comp)
with monkeypatch.context() as m:
m.setattr(algorithms, "_MINIMUM_COMP_ARR_LEN", min_isin_comp)
result = ser.isin({"foo", "bar"})
expected = Series([False] * 3 * min_isin_comp)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype, data, values, expected",
[
("boolean", [pd.NA, False, True], [False, pd.NA], [True, True, False]),
("Int64", [pd.NA, 2, 1], [1, pd.NA], [True, False, True]),
("boolean", [pd.NA, False, True], [pd.NA, True, "a", 20], [True, False, True]),
("boolean", [pd.NA, False, True], [], [False, False, False]),
("Float64", [20.0, 30.0, pd.NA], [pd.NA], [False, False, True]),
],
)
def test_isin_large_series_and_pdNA(dtype, data, values, expected, monkeypatch):
# https://github.com/pandas-dev/pandas/issues/60678
# combination of large series (> _MINIMUM_COMP_ARR_LEN elements) and
# values contains pdNA
min_isin_comp = 2
ser = Series(data, dtype=dtype)
expected = Series(expected, dtype="boolean")
with monkeypatch.context() as m:
m.setattr(algorithms, "_MINIMUM_COMP_ARR_LEN", min_isin_comp)
result = ser.isin(values)
tm.assert_series_equal(result, expected)
def test_isin_complex_numbers():
# GH 17927
array = [0, 1j, 1j, 1, 1 + 1j, 1 + 2j, 1 + 1j]
result = Series(array).isin([1j, 1 + 1j, 1 + 2j])
expected = Series([False, True, True, False, True, True, True], dtype=bool)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,is_in",
[([1, [2]], [1]), (["simple str", [{"values": 3}]], ["simple str"])],
)
def test_isin_filtering_with_mixed_object_types(data, is_in):
# GH 20883
ser = Series(data)
result = ser.isin(is_in)
expected = Series([True, False])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("data", [[1, 2, 3], [1.0, 2.0, 3.0]])
@pytest.mark.parametrize("isin", [[1, 2], [1.0, 2.0]])
def test_isin_filtering_on_iterable(data, isin):
# GH 50234
ser = Series(data)
result = ser.isin(i for i in isin)
expected_result = Series([True, True, False])
tm.assert_series_equal(result, expected_result)
| TestSeriesIsIn |
python | ray-project__ray | release/ray_release/tests/test_state_machine.py | {
"start": 2674,
"end": 2762
} | class ____:
def unblock_job(self, *args, **kwargs):
return {}
| MockBuildkiteJob |
python | rapidsai__cudf | python/cudf/cudf_pandas_tests/test_array_function.py | {
"start": 540,
"end": 638
} | class ____:
def __array_function__(self, func, types, args, kwargs):
return "slow"
| Slow2 |
python | joke2k__faker | faker/providers/misc/en_PH/__init__.py | {
"start": 42,
"end": 4397
} | class ____(MiscProvider):
"""
Provider for miscellaneous data for en_PH locale
This class also houses all other provider methods that would have otherwise been weird to place in another provider.
"""
gemstone_names = (
"Agate",
"Amber",
"Amethyst",
"Aquamarine",
"Citrine",
"Diamond",
"Emerald",
"Garnet",
"Jade",
"Jasper",
"Lapis Lazuli",
"Moonstone",
"Onyx",
"Opal",
"Peridot",
"Ruby",
"Sapphire",
"Sardonyx",
"Sunstone",
"Topaz",
"Turquoise",
"Zircon",
)
mountain_names = (
"Apo",
"Arayat",
"Atok",
"Banahaw",
"Bulusan",
"Caraballo",
"Cordillera",
"Cresta",
"Halcon",
"Hibok-Hibok",
"Iriga",
"Kanlaon",
"Makiling",
"Malinao",
"Mariveles",
"Matumtum",
"Mayon",
"Palali",
"Palanan",
"Pao",
"Pinatubo",
"Samat",
"Sicaba",
"Sierra Madre",
"Tabayoc",
)
plant_names = (
"Acacia",
"Agoho",
"Akle",
"Anahaw",
"Anonas",
"Anubing",
"Aranga",
"Asparagus",
"Atis",
"Avocado",
"Azalea",
"Azucena",
"Bagtikan",
"Bakawan",
"Balete",
"Balimbing",
"Banaba",
"Banuyo",
"Banyan",
"Baticulin",
"Batino",
"Bauhinia",
"Bouganvilla",
"Caballero",
"Cabbage",
"Calantas",
"Calumpang",
"Camachile",
"Camia",
"Campanilla",
"Carissa",
"Carrot",
"Catmon",
"Cattleya",
"Cauliflower",
"Celery",
"Champaca",
"Chico",
"Coconut",
"Cucumber",
"Cypress",
"Dao",
"Dapdap",
"Dita",
"Duhat",
"Dungon",
"Gladiola",
"Gloriosa",
"Granada",
"Guijo",
"Gumamela",
"Intsia",
"Ipil",
"Jacaranda",
"Jasmine",
"Kaimito",
"Kalachuchi",
"Kalamansi",
"Kamagong",
"Kamias",
"Lanzones",
"Lawaan",
"Lily",
"Lumbayao",
"Mabolo",
"Macapuno",
"Macopa",
"Magnolia",
"Mahogany",
"Malugay",
"Mayapis",
"Melon",
"Milflower",
"Molave",
"Mushroom",
"Mustard",
"Narra",
"Nipa",
"Oleander",
"Oliva",
"Orchid",
"Palm",
"Pandan",
"Pepper",
"Piña",
"Raddish",
"Rosas",
"Sampaguita",
"Sampaloc",
"Santan",
"Santol",
"Sineguelas",
"Squash",
"Supa",
"Talisay",
"Tamarind",
"Tanguile",
"Tindalo",
"Tulip",
"Yakal",
"Zinia",
)
space_object_names = (
"Andromeda",
"Antares",
"Aquarius",
"Aries",
"Asteroid",
"Cancer",
"Canopus",
"Capricorn",
"Comet",
"Constellation",
"Earth",
"Galaxy",
"Gemini",
"Hercules",
"Hydra",
"Juno",
"Jupiter",
"Leo",
"Libra",
"Mars",
"Mercury",
"Milky Way",
"Neptune",
"Orion",
"Pisces",
"Planet",
"Pluto",
"Polaris",
"Sagittarius",
"Saturn",
"Scorpio",
"Taurus",
"Uranus",
"Venus",
"Virgo",
"Zodiac",
)
random_object_names = gemstone_names + mountain_names + plant_names + space_object_names
def gemstone_name(self) -> str:
return self.random_element(self.gemstone_names)
def mountain_name(self) -> str:
return self.random_element(self.mountain_names)
def plant_name(self) -> str:
return self.random_element(self.plant_names)
def space_object_name(self) -> str:
return self.random_element(self.space_object_names)
def random_object_name(self) -> str:
return self.random_element(self.random_object_names)
| Provider |
python | kamyu104__LeetCode-Solutions | Python/maximize-number-of-subsequences-in-a-string.py | {
"start": 48,
"end": 513
} | class ____(object):
def maximumSubsequenceCount(self, text, pattern):
"""
:type text: str
:type pattern: str
:rtype: int
"""
result = cnt1 = cnt2 = 0
for c in text:
if c == pattern[1]:
result += cnt1
cnt2 += 1
if c == pattern[0]:
cnt1 += 1
return result + max(cnt1, cnt2) # add pattern[1] at back or pattern[0] at front
| Solution |
python | huggingface__transformers | tests/models/fastspeech2_conformer/test_modeling_fastspeech2_conformer.py | {
"start": 24928,
"end": 34184
} | class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (FastSpeech2ConformerWithHifiGan,) if is_torch_available() else ()
test_resize_embeddings = False
is_encoder_decoder = True
def setUp(self):
self.model_tester = FastSpeech2ConformerWithHifiGanTester(self)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
return inputs_dict
def test_duration_energy_pitch_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.model_config.return_dict = True
seq_len = self.model_tester.seq_length
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
# duration
self.assertListEqual(list(outputs.duration_outputs.shape), [self.model_tester.batch_size, seq_len])
# energy
self.assertListEqual(list(outputs.energy_outputs.shape), [self.model_tester.batch_size, seq_len, 1])
# pitch
self.assertListEqual(list(outputs.pitch_outputs.shape), [self.model_tester.batch_size, seq_len, 1])
def test_hidden_states_output(self):
def _check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
for idx, hidden_states in enumerate([outputs.encoder_hidden_states, outputs.decoder_hidden_states]):
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertIsInstance(hidden_states, (list, tuple))
expected_batch_size, expected_seq_length, expected_hidden_size = hidden_states[0].shape
self.assertEqual(expected_batch_size, self.model_tester.batch_size)
# Only test encoder seq_length since decoder seq_length is variable based on inputs
if idx == 0:
self.assertEqual(expected_seq_length, self.model_tester.seq_length)
self.assertEqual(expected_hidden_size, self.model_tester.hidden_size)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
inputs_dict["output_hidden_states"] = True
_check_hidden_states_output(inputs_dict, config, FastSpeech2ConformerWithHifiGan)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.model_config.output_hidden_states = True
_check_hidden_states_output(inputs_dict, config, FastSpeech2ConformerWithHifiGan)
def test_save_load_strict(self):
config, _ = self.model_tester.prepare_config_and_inputs()
model = FastSpeech2ConformerWithHifiGan(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
_, info = FastSpeech2ConformerWithHifiGan.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], set())
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
model = FastSpeech2ConformerWithHifiGan(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = [
"input_ids",
"attention_mask",
"spectrogram_labels",
"duration_labels",
"pitch_labels",
"energy_labels",
"speaker_ids",
"lang_ids",
"speaker_embedding",
"return_dict",
"output_attentions",
"output_hidden_states",
]
self.assertListEqual(arg_names, expected_arg_names)
# Override as FastSpeech2Conformer does not output cross attentions
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.model_config.output_hidden_states = True
config.model_config.output_attentions = True
model = FastSpeech2ConformerWithHifiGan(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, FastSpeech2ConformerModel)
outputs = model(**inputs)
output = outputs[0]
encoder_hidden_states = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
decoder_hidden_states = outputs.decoder_hidden_states[0]
decoder_hidden_states.retain_grad()
encoder_attentions = outputs.encoder_attentions[0]
encoder_attentions.retain_grad()
decoder_attentions = outputs.decoder_attentions[0]
decoder_attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(decoder_hidden_states.grad)
self.assertIsNotNone(encoder_attentions.grad)
self.assertIsNotNone(decoder_attentions.grad)
def test_attention_outputs(self):
"""
Custom `test_attention_outputs` since FastSpeech2Conformer does not output cross attentions, has variable
decoder attention shape, and uniquely outputs energy, pitch, and durations.
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.model_config.return_dict = True
seq_len = self.model_tester.seq_length
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.model_config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(len(outputs.encoder_attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.model_config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
encoder_attentions = outputs.encoder_attentions
self.assertEqual(len(encoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(encoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
out_len = len(outputs)
correct_outlen = 8
self.assertEqual(out_len, correct_outlen)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 2
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
@slow
def test_model_from_pretrained(self):
model = FastSpeech2ConformerModel.from_pretrained("espnet/fastspeech2_conformer")
self.assertIsNotNone(model)
@unittest.skip(reason="FastSpeech2Conformer does not accept inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="FastSpeech2Conformer has no input embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(
"FastSpeech2Conformer predicts durations in linear domain during inference"
"Even small differences on hidden states lead to different durations, due to `torch.round`"
)
def test_batching_equivalence(self):
pass
@require_torch
@require_g2p_en
@slow
| FastSpeech2ConformerWithHifiGanTest |
python | walkccc__LeetCode | solutions/2812. Find the Safest Path in a Grid/2812.py | {
"start": 0,
"end": 1632
} | class ____:
def maximumSafenessFactor(self, grid: list[list[int]]) -> int:
self.DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
n = len(grid)
distToThief = self._getDistToThief(grid)
def hasValidPath(safeness: int) -> bool:
if distToThief[0][0] < safeness:
return False
q = collections.deque([(0, 0)])
seen = {(0, 0)}
while q:
i, j = q.popleft()
if distToThief[i][j] < safeness:
continue
if i == n - 1 and j == n - 1:
return True
for dx, dy in self.DIRS:
x = i + dx
y = j + dy
if x < 0 or x == n or y < 0 or y == n:
continue
if (x, y) in seen:
continue
q.append((x, y))
seen.add((x, y))
return False
return bisect.bisect_left(range(n * 2), True,
key=lambda m: not hasValidPath(m)) - 1
def _getDistToThief(self, grid: list[list[int]]) -> list[list[int]]:
n = len(grid)
distToThief = [[0] * n for _ in range(n)]
q = collections.deque()
seen = set()
for i in range(n):
for j in range(n):
if grid[i][j] == 1:
q.append((i, j))
seen.add((i, j))
dist = 0
while q:
for _ in range(len(q)):
i, j = q.popleft()
distToThief[i][j] = dist
for dx, dy in self.DIRS:
x = i + dx
y = j + dy
if x < 0 or x == n or y < 0 or y == n:
continue
if (x, y) in seen:
continue
q.append((x, y))
seen.add((x, y))
dist += 1
return distToThief
| Solution |
python | doocs__leetcode | solution/2400-2499/2433.Find The Original Array of Prefix Xor/Solution.py | {
"start": 0,
"end": 127
} | class ____:
def findArray(self, pref: List[int]) -> List[int]:
return [a ^ b for a, b in pairwise([0] + pref)]
| Solution |
python | gevent__gevent | src/gevent/testing/support.py | {
"start": 1627,
"end": 1829
} | class ____(object):
# A descriptor-like object that will
# only be used if the actual stdlib module
# doesn't have the value.
def __init__(self, value):
self.value = value
| _Default |
python | django__django | tests/admin_utils/models.py | {
"start": 872,
"end": 940
} | class ____(Article):
class Meta:
proxy = True
| ArticleProxy |
python | huggingface__transformers | src/transformers/models/t5gemma/modular_t5gemma.py | {
"start": 15005,
"end": 15383
} | class ____(Gemma2MLP):
def __init__(self, config):
super().__init__(config)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, x):
hidden_states = self.act_fn(self.gate_proj(x)) * self.up_proj(x)
hidden_states = self.dropout(hidden_states)
down_proj = self.down_proj(hidden_states)
return down_proj
| T5GemmaMLP |
python | sanic-org__sanic | sanic/cli/arguments.py | {
"start": 7049,
"end": 8298
} | class ____(Group):
name = "Development"
def attach(self):
self.container.add_argument(
"--debug",
dest="debug",
action="store_true",
help="Run the server in debug mode",
)
self.container.add_argument(
"-r",
"--reload",
"--auto-reload",
dest="auto_reload",
action="store_true",
help=(
"Watch source directory for file changes and reload on changes"
),
)
self.container.add_argument(
"-R",
"--reload-dir",
dest="path",
action="append",
help="Extra directories to watch and reload on changes",
)
self.container.add_argument(
"-d",
"--dev",
dest="dev",
action="store_true",
help=("debug + auto reload"),
)
self.container.add_argument(
"--auto-tls",
dest="auto_tls",
action="store_true",
help=(
"Create a temporary TLS certificate for local development "
"(requires mkcert or trustme)"
),
)
| DevelopmentGroup |
python | huggingface__transformers | src/transformers/models/align/modeling_align.py | {
"start": 25925,
"end": 26815
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.self = AlignTextSelfAttention(config)
self.output = AlignTextSelfOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
**kwargs,
) -> tuple[torch.Tensor]:
self_outputs = self.self(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
**kwargs,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->AlignText
| AlignTextAttention |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 91580,
"end": 93998
} | class ____:
def get_httpserver_options(self):
# Use a small chunk size so flow control is relevant even though
# all the data arrives at once.
return dict(chunk_size=10, decompress_request=True)
def get_http_client(self):
# simple_httpclient only: curl doesn't support body_producer.
return SimpleAsyncHTTPClient()
# Test all the slightly different code paths for fixed, chunked, etc bodies.
def test_flow_control_fixed_body(self: typing.Any):
response = self.fetch("/", body="abcdefghijklmnopqrstuvwxyz", method="POST")
response.rethrow()
self.assertEqual(
json_decode(response.body),
dict(
methods=[
"prepare",
"data_received",
"data_received",
"data_received",
"post",
]
),
)
def test_flow_control_chunked_body(self: typing.Any):
chunks = [b"abcd", b"efgh", b"ijkl"]
@gen.coroutine
def body_producer(write):
for i in chunks:
yield write(i)
response = self.fetch("/", body_producer=body_producer, method="POST")
response.rethrow()
self.assertEqual(
json_decode(response.body),
dict(
methods=[
"prepare",
"data_received",
"data_received",
"data_received",
"post",
]
),
)
def test_flow_control_compressed_body(self: typing.Any):
bytesio = BytesIO()
gzip_file = gzip.GzipFile(mode="w", fileobj=bytesio)
gzip_file.write(b"abcdefghijklmnopqrstuvwxyz")
gzip_file.close()
compressed_body = bytesio.getvalue()
response = self.fetch(
"/",
body=compressed_body,
method="POST",
headers={"Content-Encoding": "gzip"},
)
response.rethrow()
self.assertEqual(
json_decode(response.body),
dict(
methods=[
"prepare",
"data_received",
"data_received",
"data_received",
"post",
]
),
)
| BaseStreamingRequestFlowControlTest |
python | pypa__setuptools | setuptools/_vendor/zipp/__init__.py | {
"start": 3690,
"end": 5999
} | class ____(InitializedState, SanitizedNames, zipfile.ZipFile):
"""
A ZipFile subclass that ensures that implied directories
are always included in the namelist.
>>> list(CompleteDirs._implied_dirs(['foo/bar.txt', 'foo/bar/baz.txt']))
['foo/', 'foo/bar/']
>>> list(CompleteDirs._implied_dirs(['foo/bar.txt', 'foo/bar/baz.txt', 'foo/bar/']))
['foo/']
"""
@staticmethod
def _implied_dirs(names):
parents = itertools.chain.from_iterable(map(_parents, names))
as_dirs = (p + posixpath.sep for p in parents)
return _dedupe(_difference(as_dirs, names))
def namelist(self):
names = super().namelist()
return names + list(self._implied_dirs(names))
def _name_set(self):
return set(self.namelist())
def resolve_dir(self, name):
"""
If the name represents a directory, return that name
as a directory (with the trailing slash).
"""
names = self._name_set()
dirname = name + '/'
dir_match = name not in names and dirname in names
return dirname if dir_match else name
def getinfo(self, name):
"""
Supplement getinfo for implied dirs.
"""
try:
return super().getinfo(name)
except KeyError:
if not name.endswith('/') or name not in self._name_set():
raise
return zipfile.ZipInfo(filename=name)
@classmethod
def make(cls, source):
"""
Given a source (filename or zipfile), return an
appropriate CompleteDirs subclass.
"""
if isinstance(source, CompleteDirs):
return source
if not isinstance(source, zipfile.ZipFile):
return cls(source)
# Only allow for FastLookup when supplied zipfile is read-only
if 'r' not in source.mode:
cls = CompleteDirs
source.__class__ = cls
return source
@classmethod
def inject(cls, zf: zipfile.ZipFile) -> zipfile.ZipFile:
"""
Given a writable zip file zf, inject directory entries for
any directories implied by the presence of children.
"""
for name in cls._implied_dirs(zf.namelist()):
zf.writestr(name, b"")
return zf
| CompleteDirs |
python | pypa__warehouse | tests/common/db/packaging.py | {
"start": 4702,
"end": 5057
} | class ____(WarehouseFactory):
class Meta:
model = JournalEntry
name = factory.Faker("word")
version = factory.Sequence(lambda n: str(n) + ".0")
submitted_date = factory.Faker(
"date_time_between_dates", datetime_start=datetime.datetime(2008, 1, 1)
)
submitted_by = factory.SubFactory(UserFactory)
| JournalEntryFactory |
python | django__django | tests/messages_tests/urls.py | {
"start": 1847,
"end": 2020
} | class ____(SuccessMessageMixin, FormView):
form_class = ContactForm
success_url = show
success_message = "%(name)s was created successfully"
| ContactFormViewWithMsg |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 94753,
"end": 95114
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("project_id", "client_mutation_id")
project_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectId")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| DeleteProjectInput |
python | plotly__plotly.py | plotly/graph_objs/heatmap/colorbar/_tickformatstop.py | {
"start": 233,
"end": 8514
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "heatmap.colorbar"
_path_str = "heatmap.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.heatmap.colorb
ar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.heatmap.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmap.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickformatstop |
python | scipy__scipy | scipy/special/tests/test_legendre.py | {
"start": 12690,
"end": 21309
} | class ____:
@pytest.mark.parametrize("shape", [(1000,), (4, 9), (3, 5, 7)])
@pytest.mark.parametrize("branch_cut", [2, 3])
@pytest.mark.parametrize("z_min, z_max", [(-10 - 10j, 10 + 10j),
(-1, 1), (-10j, 10j)])
@pytest.mark.parametrize("norm", [True, False])
def test_specific(self, shape, branch_cut, z_min, z_max, norm):
rng = np.random.default_rng(1234)
z = rng.uniform(z_min.real, z_max.real, shape) + \
1j * rng.uniform(z_min.imag, z_max.imag, shape)
p, p_jac = assoc_legendre_p_all(4, 4,
z, branch_cut=branch_cut, norm=norm, diff_n=1)
np.testing.assert_allclose(p[0, 0],
assoc_legendre_p_0_0(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[0, 1], 0)
np.testing.assert_allclose(p[0, 2], 0)
np.testing.assert_allclose(p[0, 3], 0)
np.testing.assert_allclose(p[0, 4], 0)
np.testing.assert_allclose(p[0, -4], 0)
np.testing.assert_allclose(p[0, -3], 0)
np.testing.assert_allclose(p[0, -2], 0)
np.testing.assert_allclose(p[0, -1], 0)
np.testing.assert_allclose(p[1, 0],
assoc_legendre_p_1_0(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[1, 1],
assoc_legendre_p_1_1(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[1, 2], 0)
np.testing.assert_allclose(p[1, 3], 0)
np.testing.assert_allclose(p[1, 4], 0)
np.testing.assert_allclose(p[1, -4], 0)
np.testing.assert_allclose(p[1, -3], 0)
np.testing.assert_allclose(p[1, -2], 0)
np.testing.assert_allclose(p[1, -1],
assoc_legendre_p_1_m1(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[2, 0],
assoc_legendre_p_2_0(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[2, 1],
assoc_legendre_p_2_1(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[2, 2],
assoc_legendre_p_2_2(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[2, 3], 0)
np.testing.assert_allclose(p[2, 4], 0)
np.testing.assert_allclose(p[2, -4], 0)
np.testing.assert_allclose(p[2, -3], 0)
np.testing.assert_allclose(p[2, -2],
assoc_legendre_p_2_m2(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[2, -1],
assoc_legendre_p_2_m1(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[3, 0],
assoc_legendre_p_3_0(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[3, 1],
assoc_legendre_p_3_1(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[3, 2],
assoc_legendre_p_3_2(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[3, 3],
assoc_legendre_p_3_3(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[3, 4], 0)
np.testing.assert_allclose(p[3, -4], 0)
np.testing.assert_allclose(p[3, -3],
assoc_legendre_p_3_m3(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[3, -2],
assoc_legendre_p_3_m2(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[3, -1],
assoc_legendre_p_3_m1(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[4, 0],
assoc_legendre_p_4_0(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[4, 1],
assoc_legendre_p_4_1(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[4, 2],
assoc_legendre_p_4_2(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[4, 3],
assoc_legendre_p_4_3(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[4, 4],
assoc_legendre_p_4_4(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[4, -4],
assoc_legendre_p_4_m4(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[4, -3],
assoc_legendre_p_4_m3(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[4, -2],
assoc_legendre_p_4_m2(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p[4, -1],
assoc_legendre_p_4_m1(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[0, 0],
assoc_legendre_p_0_0_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[0, 1], 0)
np.testing.assert_allclose(p_jac[0, 2], 0)
np.testing.assert_allclose(p_jac[0, 3], 0)
np.testing.assert_allclose(p_jac[0, 4], 0)
np.testing.assert_allclose(p_jac[0, -4], 0)
np.testing.assert_allclose(p_jac[0, -3], 0)
np.testing.assert_allclose(p_jac[0, -2], 0)
np.testing.assert_allclose(p_jac[0, -1], 0)
np.testing.assert_allclose(p_jac[1, 0],
assoc_legendre_p_1_0_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[1, 1],
assoc_legendre_p_1_1_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[1, 2], 0)
np.testing.assert_allclose(p_jac[1, 3], 0)
np.testing.assert_allclose(p_jac[1, 4], 0)
np.testing.assert_allclose(p_jac[1, -4], 0)
np.testing.assert_allclose(p_jac[1, -3], 0)
np.testing.assert_allclose(p_jac[1, -2], 0)
np.testing.assert_allclose(p_jac[1, -1],
assoc_legendre_p_1_m1_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[2, 0],
assoc_legendre_p_2_0_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[2, 1],
assoc_legendre_p_2_1_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[2, 2],
assoc_legendre_p_2_2_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[2, 3], 0)
np.testing.assert_allclose(p_jac[2, 4], 0)
np.testing.assert_allclose(p_jac[2, -4], 0)
np.testing.assert_allclose(p_jac[2, -3], 0)
np.testing.assert_allclose(p_jac[2, -2],
assoc_legendre_p_2_m2_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[2, -1],
assoc_legendre_p_2_m1_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[3, 0],
assoc_legendre_p_3_0_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[3, 1],
assoc_legendre_p_3_1_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[3, 2],
assoc_legendre_p_3_2_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[3, 3],
assoc_legendre_p_3_3_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[3, 4], 0)
np.testing.assert_allclose(p_jac[3, -4], 0)
np.testing.assert_allclose(p_jac[3, -3],
assoc_legendre_p_3_m3_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[3, -2],
assoc_legendre_p_3_m2_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[3, -1],
assoc_legendre_p_3_m1_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[4, 0],
assoc_legendre_p_4_0_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[4, 1],
assoc_legendre_p_4_1_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[4, 2],
assoc_legendre_p_4_2_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[4, 3],
assoc_legendre_p_4_3_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[4, 4],
assoc_legendre_p_4_4_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[4, -4],
assoc_legendre_p_4_m4_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[4, -3],
assoc_legendre_p_4_m3_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[4, -2],
assoc_legendre_p_4_m2_jac(z, branch_cut=branch_cut, norm=norm))
np.testing.assert_allclose(p_jac[4, -1],
assoc_legendre_p_4_m1_jac(z, branch_cut=branch_cut, norm=norm))
| TestMultiAssocLegendreP |
python | realpython__materials | python-mcp-client/source-code-final/mcp_client/handlers.py | {
"start": 122,
"end": 3597
} | class ____:
"""Handle OpenAI API interaction and MCP tool execution."""
def __init__(self, client_session: ClientSession):
self.client_session = client_session
if not (api_key := os.getenv("OPENAI_API_KEY")):
raise RuntimeError(
"Error: OPENAI_API_KEY environment variable not set",
)
self.openai = OpenAI(api_key=api_key)
async def process_query(self, query: str) -> str:
"""Process a query using OpenAI and available MCP tools."""
# Get initial Model's response and decision on tool calls
messages = [{"role": "user", "content": query}]
initial_response = self.openai.chat.completions.create(
model=MODEL,
max_tokens=MAX_TOKENS,
messages=messages,
tools=await self._get_tools(),
)
current_message = initial_response.choices[0].message
result_parts = []
if current_message.content:
result_parts.append(current_message.content)
# Handle tool usage if present
if tool_calls := current_message.tool_calls:
messages.append(
{
"role": "assistant",
"content": current_message.content or "",
"tool_calls": tool_calls,
}
)
# Execute tools
for tool_call in tool_calls:
tool_result = await self._execute_tool(tool_call)
result_parts.append(tool_result["log"])
messages.append(tool_result["message"])
# Get final Model's response after tool execution
final_response = self.openai.chat.completions.create(
model=MODEL,
max_tokens=MAX_TOKENS,
messages=messages,
)
if content := final_response.choices[0].message.content:
result_parts.append(content)
return "Assistant: " + "\n".join(result_parts)
async def _get_tools(self) -> list:
"""Get MCP tools formatted for OpenAI."""
response = await self.client_session.list_tools()
return [
{
"type": "function",
"function": {
"name": tool.name,
"description": tool.description or "No description",
"parameters": getattr(
tool,
"inputSchema",
{"type": "object", "properties": {}},
),
},
}
for tool in response.tools
]
async def _execute_tool(self, tool_call) -> dict:
"""Execute an MCP tool call and return formatted result."""
tool_name = tool_call.function.name
tool_args = json.loads(tool_call.function.arguments or "{}")
try:
result = await self.client_session.call_tool(
tool_name,
tool_args,
)
content = result.content[0].text if result.content else ""
log = f"[Used {tool_name}({tool_args})]"
except Exception as e:
content = f"Error: {e}"
log = f"[{content}]"
return {
"log": log,
"message": {
"role": "tool",
"tool_call_id": tool_call.id,
"content": content,
},
}
| OpenAIQueryHandler |
python | great-expectations__great_expectations | great_expectations/data_context/store/validation_definition_store.py | {
"start": 677,
"end": 3791
} | class ____(Store):
_key_class = StringKey
def __init__(
self,
store_backend: dict | None = None,
runtime_environment: dict | None = None,
store_name: str = "no_store_name",
) -> None:
store_backend_class = self._determine_store_backend_class(store_backend)
if store_backend and issubclass(store_backend_class, TupleStoreBackend):
store_backend["filepath_suffix"] = store_backend.get("filepath_suffix", ".json")
super().__init__(
store_backend=store_backend,
runtime_environment=runtime_environment,
store_name=store_name,
)
def get_key(self, name: str, id: str | None = None) -> GXCloudIdentifier | StringKey:
"""Given a name and optional ID, build the correct key for use in the ValidationDefinitionStore.""" # noqa: E501 # FIXME CoP
if self.cloud_mode:
return GXCloudIdentifier(
resource_type=GXCloudRESTResource.VALIDATION_DEFINITION,
id=id,
resource_name=name,
)
return StringKey(key=name)
@override
@staticmethod
def gx_cloud_response_json_to_object_dict(response_json: dict) -> dict:
response_data = response_json["data"]
validation_data: dict
if isinstance(response_data, list):
if len(response_data) != 1:
if len(response_data) == 0:
msg = f"Cannot parse empty data from GX Cloud payload: {response_json}"
else:
msg = f"Cannot parse multiple items from GX Cloud payload: {response_json}"
raise ValueError(msg)
validation_data = response_data[0]
else:
validation_data = response_data
return validation_data
@override
@staticmethod
def _convert_raw_json_to_object_dict(data: dict) -> dict:
return data
@override
def serialize(self, value):
# In order to enable the custom json_encoders in ValidationDefinition, we need to set `models_as_dict` off # noqa: E501 # FIXME CoP
# Ref: https://docs.pydantic.dev/1.10/usage/exporting_models/#serialising-self-reference-or-other-models
output = value.json(models_as_dict=False, indent=2, sort_keys=True)
if self.cloud_mode:
output_dict = json.loads(output)
output_dict.pop("id", None)
return output_dict
else:
return output
@override
def deserialize(self, value):
from great_expectations.core.validation_definition import ValidationDefinition
if self.cloud_mode:
return ValidationDefinition.parse_obj(value)
return ValidationDefinition.parse_raw(value)
@override
def _add(self, key: DataContextKey, value: ValidationDefinition, **kwargs):
if not self.cloud_mode:
# this logic should move to the store backend, but is implemented here for now
value.id = str(uuid.uuid4())
return super()._add(key=key, value=value, **kwargs)
| ValidationDefinitionStore |
python | Pylons__pyramid | tests/test_session.py | {
"start": 12576,
"end": 17963
} | class ____(SharedCookieSessionTests, unittest.TestCase):
def _makeOne(self, request, **kw):
from pyramid.session import SignedCookieSessionFactory
kw.setdefault('secret', 'secret')
return SignedCookieSessionFactory(**kw)(request)
def _serialize(self, value, salt=b'pyramid.session.', hashalg='sha512'):
import base64
import hashlib
import hmac
import json
digestmod = lambda: hashlib.new(hashalg)
cstruct = json.dumps(value).encode('utf-8')
sig = hmac.new(salt + b'secret', cstruct, digestmod).digest()
return base64.urlsafe_b64encode(sig + cstruct).rstrip(b'=')
def test_reissue_not_triggered(self):
import time
request = testing.DummyRequest()
cookieval = self._serialize((time.time(), 0, {'state': 1}))
request.cookies['session'] = cookieval
session = self._makeOne(request, reissue_time=1)
self.assertEqual(session['state'], 1)
self.assertFalse(session._dirty)
def test_reissue_never(self):
request = testing.DummyRequest()
cookieval = self._serialize((0, 0, {'state': 1}))
request.cookies['session'] = cookieval
session = self._makeOne(request, reissue_time=None, timeout=None)
self.assertEqual(session['state'], 1)
self.assertFalse(session._dirty)
def test_reissue_str_triggered(self):
import time
request = testing.DummyRequest()
cookieval = self._serialize((time.time() - 2, 0, {'state': 1}))
request.cookies['session'] = cookieval
session = self._makeOne(request, reissue_time='0')
self.assertEqual(session['state'], 1)
self.assertTrue(session._dirty)
def test_reissue_invalid(self):
request = testing.DummyRequest()
self.assertRaises(
ValueError, self._makeOne, request, reissue_time='invalid value'
)
def test_cookie_max_age_invalid(self):
request = testing.DummyRequest()
self.assertRaises(
ValueError, self._makeOne, request, max_age='invalid value'
)
def test_custom_salt(self):
import time
request = testing.DummyRequest()
cookieval = self._serialize((time.time(), 0, {'state': 1}), salt=b'f.')
request.cookies['session'] = cookieval
session = self._makeOne(request, salt=b'f.')
self.assertEqual(session['state'], 1)
def test_salt_mismatch(self):
import time
request = testing.DummyRequest()
cookieval = self._serialize((time.time(), 0, {'state': 1}), salt=b'f.')
request.cookies['session'] = cookieval
session = self._makeOne(request, salt=b'g.')
self.assertEqual(session, {})
def test_custom_hashalg(self):
import time
request = testing.DummyRequest()
cookieval = self._serialize(
(time.time(), 0, {'state': 1}), hashalg='sha1'
)
request.cookies['session'] = cookieval
session = self._makeOne(request, hashalg='sha1')
self.assertEqual(session['state'], 1)
def test_hashalg_mismatch(self):
import time
request = testing.DummyRequest()
cookieval = self._serialize(
(time.time(), 0, {'state': 1}), hashalg='sha1'
)
request.cookies['session'] = cookieval
session = self._makeOne(request, hashalg='sha256')
self.assertEqual(session, {})
def test_secret_mismatch(self):
import time
request = testing.DummyRequest()
cookieval = self._serialize((time.time(), 0, {'state': 1}))
request.cookies['session'] = cookieval
session = self._makeOne(request, secret='evilsecret')
self.assertEqual(session, {})
def test_custom_serializer(self):
import base64
from hashlib import sha512
import hmac
import time
request = testing.DummyRequest()
serializer = DummySerializer()
cstruct = serializer.dumps((time.time(), 0, {'state': 1}))
sig = hmac.new(b'pyramid.session.secret', cstruct, sha512).digest()
cookieval = base64.urlsafe_b64encode(sig + cstruct).rstrip(b'=')
request.cookies['session'] = cookieval
session = self._makeOne(request, serializer=serializer)
self.assertEqual(session['state'], 1)
def test_invalid_data_size(self):
import base64
from hashlib import sha512
request = testing.DummyRequest()
num_bytes = sha512().digest_size - 1
cookieval = base64.b64encode(b' ' * num_bytes)
request.cookies['session'] = cookieval
session = self._makeOne(request)
self.assertEqual(session, {})
def test_very_long_key(self):
verylongkey = b'a' * 1024
import webob
request = testing.DummyRequest()
session = self._makeOne(request, secret=verylongkey)
session['a'] = 1
callbacks = request.response_callbacks
self.assertEqual(len(callbacks), 1)
response = webob.Response()
try:
result = callbacks[0](request, response)
except TypeError: # pragma: no cover
self.fail('HMAC failed to initialize due to key length.')
self.assertEqual(result, None)
self.assertTrue('Set-Cookie' in dict(response.headerlist))
| TestSignedCookieSession |
python | PyCQA__pylint | tests/functional/u/unused/unused_private_member.py | {
"start": 9245,
"end": 9453
} | class ____:
"""Regression test for issue 5569"""
@classmethod
def b(cls) -> None:
cls.__a = '' # [unused-private-member]
def a(self):
return type(self).__a
| TypeSelfCallInMethod |
python | numpy__numpy | benchmarks/benchmarks/bench_core.py | {
"start": 52,
"end": 2635
} | class ____(Benchmark):
def setup(self):
self.l100 = range(100)
self.l50 = range(50)
self.float_l1000 = [float(i) for i in range(1000)]
self.float64_l1000 = [np.float64(i) for i in range(1000)]
self.int_l1000 = list(range(1000))
self.l = [np.arange(1000), np.arange(1000)]
self.l_view = [memoryview(a) for a in self.l]
self.l10x10 = np.ones((10, 10))
self.float64_dtype = np.dtype(np.float64)
self.arr = np.arange(10000).reshape(100, 100)
def time_array_1(self):
np.array(1)
def time_array_empty(self):
np.array([])
def time_array_l1(self):
np.array([1])
def time_array_l100(self):
np.array(self.l100)
def time_array_float_l1000(self):
np.array(self.float_l1000)
def time_array_float_l1000_dtype(self):
np.array(self.float_l1000, dtype=self.float64_dtype)
def time_array_float64_l1000(self):
np.array(self.float64_l1000)
def time_array_int_l1000(self):
np.array(self.int_l1000)
def time_array_l(self):
np.array(self.l)
def time_array_l_view(self):
np.array(self.l_view)
def time_can_cast(self):
np.can_cast(self.l10x10, self.float64_dtype)
def time_tobytes_noncontiguous(self):
self.arr.T.tobytes()
def time_can_cast_same_kind(self):
np.can_cast(self.l10x10, self.float64_dtype, casting="same_kind")
def time_vstack_l(self):
np.vstack(self.l)
def time_hstack_l(self):
np.hstack(self.l)
def time_dstack_l(self):
np.dstack(self.l)
def time_arange_100(self):
np.arange(100)
def time_zeros_100(self):
np.zeros(100)
def time_ones_100(self):
np.ones(100)
def time_empty_100(self):
np.empty(100)
def time_empty_like(self):
np.empty_like(self.l10x10)
def time_eye_100(self):
np.eye(100)
def time_identity_100(self):
np.identity(100)
def time_eye_3000(self):
np.eye(3000)
def time_identity_3000(self):
np.identity(3000)
def time_diag_l100(self):
np.diag(self.l100)
def time_diagflat_l100(self):
np.diagflat(self.l100)
def time_diagflat_l50_l50(self):
np.diagflat([self.l50, self.l50])
def time_triu_l10x10(self):
np.triu(self.l10x10)
def time_tril_l10x10(self):
np.tril(self.l10x10)
def time_triu_indices_500(self):
np.triu_indices(500)
def time_tril_indices_500(self):
np.tril_indices(500)
| Core |
python | sqlalchemy__sqlalchemy | test/sql/test_metadata.py | {
"start": 207014,
"end": 209642
} | class ____(fixtures.TestBase):
@contextmanager
def _fixture(self):
from sqlalchemy.engine.default import DefaultDialect
class CopyDialectOptionsTestDialect(DefaultDialect):
construct_arguments = [
(Table, {"some_table_arg": None}),
(Column, {"some_column_arg": None}),
(Index, {"some_index_arg": None}),
(PrimaryKeyConstraint, {"some_pk_arg": None}),
(UniqueConstraint, {"some_uq_arg": None}),
]
def load(dialect_name):
if dialect_name == "copydialectoptionstest":
return CopyDialectOptionsTestDialect
else:
raise exc.NoSuchModuleError("no dialect %r" % dialect_name)
with mock.patch("sqlalchemy.dialects.registry.load", load):
yield
@classmethod
def check_dialect_options_(cls, t):
eq_(
t.dialect_kwargs["copydialectoptionstest_some_table_arg"],
"a1",
)
eq_(
t.c.foo.dialect_kwargs["copydialectoptionstest_some_column_arg"],
"a2",
)
eq_(
t.primary_key.dialect_kwargs["copydialectoptionstest_some_pk_arg"],
"a3",
)
eq_(
list(t.indexes)[0].dialect_kwargs[
"copydialectoptionstest_some_index_arg"
],
"a4",
)
eq_(
list(c for c in t.constraints if isinstance(c, UniqueConstraint))[
0
].dialect_kwargs["copydialectoptionstest_some_uq_arg"],
"a5",
)
def test_dialect_options_are_copied(self):
with self._fixture():
t1 = Table(
"t",
MetaData(),
Column(
"foo",
Integer,
copydialectoptionstest_some_column_arg="a2",
),
Column("bar", Integer),
PrimaryKeyConstraint(
"foo", copydialectoptionstest_some_pk_arg="a3"
),
UniqueConstraint(
"bar", copydialectoptionstest_some_uq_arg="a5"
),
copydialectoptionstest_some_table_arg="a1",
)
Index(
"idx",
t1.c.foo,
copydialectoptionstest_some_index_arg="a4",
)
self.check_dialect_options_(t1)
m2 = MetaData()
t2 = t1.to_metadata(m2) # make a copy
self.check_dialect_options_(t2)
| CopyDialectOptionsTest |
python | readthedocs__readthedocs.org | readthedocs/api/v3/serializers.py | {
"start": 36368,
"end": 37514
} | class ____(serializers.ModelSerializer):
project = serializers.SlugRelatedField(slug_field="slug", read_only=True)
_links = EnvironmentVariableLinksSerializer(source="*", read_only=True)
class Meta:
model = EnvironmentVariable
fields = [
"pk",
"created",
"modified",
"name",
"value",
"public",
"project",
"_links",
]
extra_kwargs = {
"value": {"write_only": True},
}
def create(self, validated_data):
validate_environment_variable_size(
project=validated_data["project"],
new_env_value=validated_data["value"],
error_class=serializers.ValidationError,
)
return super().create(validated_data)
def update(self, instance, validated_data):
validate_environment_variable_size(
project=instance.project,
new_env_value=validated_data["value"],
error_class=serializers.ValidationError,
)
return super().update(instance, validated_data)
| EnvironmentVariableSerializer |
python | RaRe-Technologies__gensim | gensim/test/test_fasttext.py | {
"start": 51693,
"end": 52000
} | class ____(unittest.TestCase):
def test_compatibility_true(self):
m = FT_gensim.load(datapath('compatible-hash-true.model'))
self.assertTrue(m.wv.compatible_hash)
def test_hash_native(self):
m = load_native()
self.assertTrue(m.wv.compatible_hash)
| HashCompatibilityTest |
python | pydantic__pydantic | pydantic/_internal/_decorators_v1.py | {
"start": 4282,
"end": 6185
} | class ____(Protocol):
"""V2 validator with mode='after'."""
def __call__(
self, __fields_tuple: RootValidatorFieldsTuple, __info: core_schema.ValidationInfo
) -> RootValidatorFieldsTuple: ...
def make_v1_generic_root_validator(
validator: V1RootValidatorFunction, pre: bool
) -> V2CoreBeforeRootValidator | V2CoreAfterRootValidator:
"""Wrap a V1 style root validator for V2 compatibility.
Args:
validator: The V1 style field validator.
pre: Whether the validator is a pre validator.
Returns:
A wrapped V2 style validator.
"""
if pre is True:
# mode='before' for pydantic-core
def _wrapper1(values: RootValidatorValues, _: core_schema.ValidationInfo) -> RootValidatorValues:
return validator(values)
return _wrapper1
# mode='after' for pydantic-core
def _wrapper2(fields_tuple: RootValidatorFieldsTuple, _: core_schema.ValidationInfo) -> RootValidatorFieldsTuple:
if len(fields_tuple) == 2:
# dataclass, this is easy
values, init_vars = fields_tuple
values = validator(values)
return values, init_vars
else:
# ugly hack: to match v1 behaviour, we merge values and model_extra, then split them up based on fields
# afterwards
model_dict, model_extra, fields_set = fields_tuple
if model_extra:
fields = set(model_dict.keys())
model_dict.update(model_extra)
model_dict_new = validator(model_dict)
for k in list(model_dict_new.keys()):
if k not in fields:
model_extra[k] = model_dict_new.pop(k)
else:
model_dict_new = validator(model_dict)
return model_dict_new, model_extra, fields_set
return _wrapper2
| V2CoreAfterRootValidator |
python | matplotlib__matplotlib | lib/matplotlib/backend_bases.py | {
"start": 124995,
"end": 132356
} | class ____:
"""
Base class for all tool containers, e.g. toolbars.
Attributes
----------
toolmanager : `.ToolManager`
The tools with which this `ToolContainerBase` wants to communicate.
"""
_icon_extension = '.png'
"""
Toolcontainer button icon image format extension
**String**: Image extension
"""
def __init__(self, toolmanager):
self.toolmanager = toolmanager
toolmanager.toolmanager_connect(
'tool_message_event',
lambda event: self.set_message(event.message))
toolmanager.toolmanager_connect(
'tool_removed_event',
lambda event: self.remove_toolitem(event.tool.name))
def _tool_toggled_cbk(self, event):
"""
Capture the 'tool_trigger_[name]'
This only gets used for toggled tools.
"""
self.toggle_toolitem(event.tool.name, event.tool.toggled)
def add_tool(self, tool, group, position=-1):
"""
Add a tool to this container.
Parameters
----------
tool : tool_like
The tool to add, see `.ToolManager.get_tool`.
group : str
The name of the group to add this tool to.
position : int, default: -1
The position within the group to place this tool.
"""
tool = self.toolmanager.get_tool(tool)
image = self._get_image_filename(tool)
toggle = getattr(tool, 'toggled', None) is not None
self.add_toolitem(tool.name, group, position,
image, tool.description, toggle)
if toggle:
self.toolmanager.toolmanager_connect('tool_trigger_%s' % tool.name,
self._tool_toggled_cbk)
# If initially toggled
if tool.toggled:
self.toggle_toolitem(tool.name, True)
def _get_image_filename(self, tool):
"""Resolve a tool icon's filename."""
if not tool.image:
return None
if os.path.isabs(tool.image):
filename = tool.image
else:
if "image" in getattr(tool, "__dict__", {}):
raise ValueError("If 'tool.image' is an instance variable, "
"it must be an absolute path")
for cls in type(tool).__mro__:
if "image" in vars(cls):
try:
src = inspect.getfile(cls)
break
except (OSError, TypeError):
raise ValueError("Failed to locate source file "
"where 'tool.image' is defined") from None
else:
raise ValueError("Failed to find parent class defining 'tool.image'")
filename = str(pathlib.Path(src).parent / tool.image)
for filename in [filename, filename + self._icon_extension]:
if os.path.isfile(filename):
return os.path.abspath(filename)
for fname in [ # Fallback; once deprecation elapses.
tool.image,
tool.image + self._icon_extension,
cbook._get_data_path("images", tool.image),
cbook._get_data_path("images", tool.image + self._icon_extension),
]:
if os.path.isfile(fname):
_api.warn_deprecated(
"3.9", message=f"Loading icon {tool.image!r} from the current "
"directory or from Matplotlib's image directory. This behavior "
"is deprecated since %(since)s and will be removed in %(removal)s; "
"Tool.image should be set to a path relative to the Tool's source "
"file, or to an absolute path.")
return os.path.abspath(fname)
def trigger_tool(self, name):
"""
Trigger the tool.
Parameters
----------
name : str
Name (id) of the tool triggered from within the container.
"""
self.toolmanager.trigger_tool(name, sender=self)
def add_toolitem(self, name, group, position, image, description, toggle):
"""
A hook to add a toolitem to the container.
This hook must be implemented in each backend and contains the
backend-specific code to add an element to the toolbar.
.. warning::
This is part of the backend implementation and should
not be called by end-users. They should instead call
`.ToolContainerBase.add_tool`.
The callback associated with the button click event
must be *exactly* ``self.trigger_tool(name)``.
Parameters
----------
name : str
Name of the tool to add, this gets used as the tool's ID and as the
default label of the buttons.
group : str
Name of the group that this tool belongs to.
position : int
Position of the tool within its group, if -1 it goes at the end.
image : str
Filename of the image for the button or `None`.
description : str
Description of the tool, used for the tooltips.
toggle : bool
* `True` : The button is a toggle (change the pressed/unpressed
state between consecutive clicks).
* `False` : The button is a normal button (returns to unpressed
state after release).
"""
raise NotImplementedError
def toggle_toolitem(self, name, toggled):
"""
A hook to toggle a toolitem without firing an event.
This hook must be implemented in each backend and contains the
backend-specific code to silently toggle a toolbar element.
.. warning::
This is part of the backend implementation and should
not be called by end-users. They should instead call
`.ToolManager.trigger_tool` or `.ToolContainerBase.trigger_tool`
(which are equivalent).
Parameters
----------
name : str
Id of the tool to toggle.
toggled : bool
Whether to set this tool as toggled or not.
"""
raise NotImplementedError
def remove_toolitem(self, name):
"""
A hook to remove a toolitem from the container.
This hook must be implemented in each backend and contains the
backend-specific code to remove an element from the toolbar; it is
called when `.ToolManager` emits a ``tool_removed_event``.
Because some tools are present only on the `.ToolManager` but not on
the `ToolContainerBase`, this method must be a no-op when called on a tool
absent from the container.
.. warning::
This is part of the backend implementation and should
not be called by end-users. They should instead call
`.ToolManager.remove_tool`.
Parameters
----------
name : str
Name of the tool to remove.
"""
raise NotImplementedError
def set_message(self, s):
"""
Display a message on the toolbar.
Parameters
----------
s : str
Message text.
"""
raise NotImplementedError
| ToolContainerBase |
python | getsentry__sentry | tests/relay_integration/lang/javascript/test_plugin.py | {
"start": 2084,
"end": 103049
} | class ____(RelayStoreHelper):
@pytest.fixture(autouse=True)
def initialize(self, default_projectkey, default_project, set_sentry_option, live_server):
self.project = default_project
self.projectkey = default_projectkey
self.organization = self.project.organization
self.min_ago = before_now(minutes=1).isoformat()
# We disable scraping per-test when necessary.
self.project.update_option("sentry:scrape_javascript", True)
with set_sentry_option("system.url-prefix", live_server.url):
# Run test case
yield
@requires_symbolicator
@pytest.mark.symbolicator
def test_adds_contexts_without_device(self) -> None:
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"request": {
"url": "http://example.com",
"headers": [
[
"User-Agent",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/28.0.1500.72 Safari/537.36",
]
],
},
}
event = self.post_and_retrieve_event(data)
contexts = event.interfaces["contexts"].to_json()
assert contexts.get("os") == {
"os": "Windows 8",
"name": "Windows",
"version": "8",
"type": "os",
}
assert contexts.get("device") is None
@requires_symbolicator
@pytest.mark.symbolicator
def test_adds_contexts_with_device(self) -> None:
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"request": {
"url": "http://example.com",
"headers": [
[
"User-Agent",
"Mozilla/5.0 (Linux; U; Android 4.3; en-us; SCH-R530U Build/JSS15J) AppleWebKit/534.30 ("
"KHTML, like Gecko) Version/4.0 Mobile Safari/534.30 USCC-R530U",
]
],
},
}
event = self.post_and_retrieve_event(data)
contexts = event.interfaces["contexts"].to_json()
assert contexts.get("os") == {
"os": "Android 4.3",
"name": "Android",
"type": "os",
"version": "4.3",
}
browser_context = contexts.get("browser")
# The `user_agent` field was added retroactively so the browser context assertion needs to be forwards and backwards compatible
assert browser_context.pop("user_agent", None) in (
None,
"Mozilla/5.0 (Linux; U; Android 4.3; en-us; SCH-R530U Build/JSS15J) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30 USCC-R530U",
)
assert browser_context == {
"browser": "Android 4.3",
"name": "Android",
"type": "browser",
"version": "4.3",
}
assert contexts.get("device") == {
"family": "Samsung SCH-R530U",
"type": "device",
"model": "SCH-R530U",
"name": "Galaxy S3",
"brand": "Samsung",
}
@requires_symbolicator
@pytest.mark.symbolicator
def test_adds_contexts_with_ps4_device(self) -> None:
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"request": {
"url": "http://example.com",
"headers": [
[
"User-Agent",
"Mozilla/5.0 (PlayStation 4 3.55) AppleWebKit/537.78 (KHTML, like Gecko)",
]
],
},
}
event = self.post_and_retrieve_event(data)
contexts = event.interfaces["contexts"].to_json()
assert contexts.get("os") is None
assert contexts.get("browser") is None
assert contexts.get("device") == {
"family": "PlayStation 4",
"type": "device",
"model": "PlayStation 4",
"brand": "Sony",
}
@requires_symbolicator
@pytest.mark.symbolicator
def test_error_message_translations(self) -> None:
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"logentry": {
"formatted": "ReferenceError: Impossible de d\xe9finir une propri\xe9t\xe9 \xab foo \xbb : objet non "
"extensible"
},
"exception": {
"values": [
{"type": "Error", "value": "P\u0159\xedli\u0161 mnoho soubor\u016f"},
{
"type": "Error",
"value": "foo: wyst\u0105pi\u0142 nieoczekiwany b\u0142\u0105d podczas pr\xf3by uzyskania "
"informacji o metadanych",
},
]
},
}
event = self.post_and_retrieve_event(data)
message = event.interfaces["logentry"]
assert (
message.formatted
== "ReferenceError: Cannot define property 'foo': object is not extensible"
)
exception = event.interfaces["exception"]
assert exception.values[0].value == "Too many files"
assert (
exception.values[1].value
== "foo: an unexpected failure occurred while trying to obtain metadata information"
)
@requires_symbolicator
@pytest.mark.symbolicator
def test_nonhandled_frames_inapp_normalization(self) -> None:
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "node",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "native",
"lineno": 1,
"colno": 1,
"in_app": True,
},
{
"abs_path": "[native code]",
"lineno": 1,
"colno": 1,
"in_app": True,
},
{
"abs_path": "app://dist/bundle/file.min.js",
"lineno": 1,
"colno": 1,
"in_app": True,
},
]
},
}
]
},
}
event = self.post_and_retrieve_event(data)
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
assert not frame_list[0].in_app # should be overwritten due to `native` abs_path
assert not frame_list[1].in_app # should be overwritten due to `[native code]` abs_path
assert frame_list[2].in_app # should not be touched and retain `in_app: true`
raw_frame_list = exception.values[0].raw_stacktrace.frames
# none of the raw frames should be altered
assert raw_frame_list[0].in_app
assert raw_frame_list[1].in_app
assert raw_frame_list[2].in_app
def _create_source_files_and_sourcemaps(self, release):
for file in ["file.min.js", "file1.js", "file2.js", "file.sourcemap.js"]:
with open(get_fixture_path(file), "rb") as f:
f1 = File.objects.create(
name=file,
type="release.file",
headers={},
)
f1.putfile(f)
ReleaseFile.objects.create(
name=f"http://example.com/{f1.name}",
release_id=release.id,
organization_id=self.project.organization_id,
file=f1,
)
@requires_symbolicator
@pytest.mark.symbolicator
def test_sourcemap_source_expansion(self) -> None:
self.project.update_option("sentry:scrape_javascript", False)
release = Release.objects.create(
organization_id=self.project.organization_id, version="abc"
)
release.add_project(self.project)
self._create_source_files_and_sourcemaps(release)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": "abc",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 39,
},
# NOTE: Intentionally source is not retrieved from this HTML file
{
"function": 'function: "HTMLDocument.<anonymous>"',
"abs_path": "http//example.com/index.html",
"filename": "index.html",
"lineno": 283,
"colno": 17,
"in_app": False,
},
# NOTE: a mixed stack trace with a native frame:
{
"instruction_addr": "0xd10349",
},
]
},
}
]
},
}
event = self.post_and_retrieve_event(data)
assert event.data["errors"] == [
{
"type": "js_no_source",
"symbolicator_type": "missing_source",
"url": "http//example.com/index.html",
}
]
scraping_attempts = sorted(
event.data["scraping_attempts"], key=lambda attempt: attempt["url"]
)
assert scraping_attempts == [
{"status": "not_attempted", "url": "http://example.com/file.min.js"},
{"status": "not_attempted", "url": "http://example.com/file.sourcemap.js"},
{"status": "not_attempted", "url": "http://example.com/file1.js"},
]
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.data["resolved_with"] == "release-old"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function add(a, b) {", '\t"use strict";']
expected = "\treturn a + b; // fôo"
assert frame.context_line == expected
assert frame.post_context == ["}"]
raw_frame_list = exception.values[0].raw_stacktrace.frames
raw_frame = raw_frame_list[0]
assert not raw_frame.pre_context
assert (
raw_frame.context_line
== 'function add(a,b){"use strict";return a+b}function multiply(a,b){"use strict";return a*b}function '
'divide(a,b){"use strict";try{return multip {snip}'
)
assert raw_frame.post_context == ["//@ sourceMappingURL=file.sourcemap.js"]
assert raw_frame.lineno == 1
# Since we couldn't expand source for the 2nd frame, both
# its raw and original form should be identical, apart from `data.symbolicated`
assert not get_path(frame_list[1], "data", "symbolicated", default=False)
assert raw_frame_list[1].abs_path == frame_list[1].abs_path
assert raw_frame_list[1].filename == frame_list[1].filename
assert raw_frame_list[1].function == frame_list[1].function
assert raw_frame_list[1].in_app == frame_list[1].in_app
assert raw_frame_list[1].lineno == frame_list[1].lineno
assert raw_frame_list[1].colno == frame_list[1].colno
# The second non-js frame should be untouched
assert raw_frame_list[2] == frame_list[2]
@requires_symbolicator
@pytest.mark.symbolicator
def test_sourcemap_webpack(self) -> None:
self.project.update_option("sentry:scrape_javascript", False)
release = Release.objects.create(
organization_id=self.project.organization_id, version="abc"
)
release.add_project(self.project)
for file in [
"webpack1.min.js",
"webpack2.min.js",
"webpack1.min.js.map",
"webpack2.min.js.map",
]:
with open(get_fixture_path(file), "rb") as f:
f1 = File.objects.create(
name=file,
type="release.file",
headers={},
)
f1.putfile(f)
ReleaseFile.objects.create(
name=f"http://example.com/{f1.name}",
release_id=release.id,
organization_id=self.project.organization_id,
file=f1,
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": "abc",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/webpack1.min.js",
"filename": "webpack1.min.js",
"lineno": 1,
"colno": 183,
"function": "i",
},
{
"abs_path": "http://example.com/webpack2.min.js",
"filename": "webpack2.min.js",
"lineno": 1,
"colno": 183,
"function": "i",
},
]
},
}
]
},
}
event = self.post_and_retrieve_event(data)
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
scraping_attempts = sorted(
event.data["scraping_attempts"], key=lambda attempt: attempt["url"]
)
assert scraping_attempts == [
{"url": "http://example.com/webpack1.min.js", "status": "not_attempted"},
{"url": "http://example.com/webpack1.min.js.map", "status": "not_attempted"},
{"url": "http://example.com/webpack2.min.js", "status": "not_attempted"},
{"url": "http://example.com/webpack2.min.js.map", "status": "not_attempted"},
]
# The first frame should be in_app.
first_frame = frame_list[0]
assert first_frame.in_app
assert first_frame.function == "test"
assert first_frame.pre_context == [
" cb(data);",
" }",
"",
" function test() {",
" var data = {failed: true, value: 42};",
]
assert first_frame.context_line == " invoke(data);"
assert first_frame.post_context == [
" }",
"",
" return test;",
"})();",
]
# The second frame should be identical to the first, except not in_app.
second_frame = frame_list[1]
assert not second_frame.in_app
assert second_frame.function == first_frame.function
assert second_frame.context_line == first_frame.context_line
assert second_frame.pre_context == first_frame.pre_context
assert second_frame.post_context == first_frame.post_context
@pytest.mark.skip(reason="flaky: #94543")
@requires_symbolicator
@pytest.mark.symbolicator
def test_sourcemap_embedded_source_expansion(self) -> None:
self.project.update_option("sentry:scrape_javascript", False)
release = Release.objects.create(
organization_id=self.project.organization_id, version="abc"
)
release.add_project(self.project)
for file in ["embedded.js", "embedded.js.map"]:
with open(get_fixture_path(file), "rb") as f:
f1 = File.objects.create(
name=file,
type="release.file",
headers={},
)
f1.putfile(f)
ReleaseFile.objects.create(
name=f"http://example.com/{f1.name}",
release_id=release.id,
organization_id=self.project.organization_id,
file=f1,
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": "abc",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/embedded.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 39,
},
# NOTE: Intentionally source is not retrieved from this HTML file
{
"function": 'function: "HTMLDocument.<anonymous>"',
"abs_path": "http//example.com/index.html",
"filename": "index.html",
"lineno": 283,
"colno": 17,
"in_app": False,
},
]
},
}
]
},
}
event = self.post_and_retrieve_event(data)
assert event.data["errors"] == [
{
"type": "js_no_source",
"symbolicator_type": "missing_source",
"url": "http//example.com/index.html",
}
]
scraping_attempts = sorted(
event.data["scraping_attempts"], key=lambda attempt: attempt["url"]
)
assert scraping_attempts == [
{"status": "not_attempted", "url": "http://example.com/embedded.js"},
{"status": "not_attempted", "url": "http://example.com/embedded.js.map"},
]
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.data["resolved_with"] == "release-old"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function add(a, b) {", '\t"use strict";']
expected = "\treturn a + b; // fôo"
assert frame.context_line == expected
assert frame.post_context == ["}"]
@requires_symbolicator
@pytest.mark.symbolicator
def test_sourcemap_nofiles_source_expansion(self) -> None:
project = self.project
release = Release.objects.create(organization_id=project.organization_id, version="abc")
release.add_project(project)
with open(get_fixture_path("nofiles.js"), "rb") as f:
f_minified = File.objects.create(
name="nofiles.js", type="release.file", headers={"Content-Type": "application/json"}
)
f_minified.putfile(f)
ReleaseFile.objects.create(
name=f"~/{f_minified.name}",
release_id=release.id,
organization_id=project.organization_id,
file=f_minified,
)
with open(get_fixture_path("nofiles.js.map"), "rb") as f:
f_sourcemap = File.objects.create(
name="nofiles.js.map",
type="release.file",
headers={"Content-Type": "application/json"},
)
f_sourcemap.putfile(f)
ReleaseFile.objects.create(
name=f"app:///{f_sourcemap.name}",
release_id=release.id,
organization_id=project.organization_id,
file=f_sourcemap,
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": "abc",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [{"abs_path": "app:///nofiles.js", "lineno": 1, "colno": 39}]
},
}
]
},
}
event = self.post_and_retrieve_event(data)
assert "errors" not in event.data
scraping_attempts = sorted(
event.data["scraping_attempts"], key=lambda attempt: attempt["url"]
)
assert scraping_attempts == [
{"url": "app:///nofiles.js", "status": "not_attempted"},
{"url": "app:///nofiles.js.map", "status": "not_attempted"},
]
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
assert len(frame_list) == 1
frame = frame_list[0]
assert frame.data["resolved_with"] == "release-old"
assert frame.data["symbolicated"]
assert frame.abs_path == "app:///nofiles.js"
assert frame.pre_context == ["function add(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a + b; // fôo"
assert frame.post_context == ["}"]
@requires_symbolicator
@pytest.mark.symbolicator
def test_indexed_sourcemap_source_expansion(self) -> None:
self.project.update_option("sentry:scrape_javascript", False)
release = Release.objects.create(
organization_id=self.project.organization_id, version="abc"
)
release.add_project(self.project)
for file in ["indexed.min.js", "file1.js", "file2.js", "indexed.sourcemap.js"]:
with open(get_fixture_path(file), "rb") as f:
f1 = File.objects.create(
name=file,
type="release.file",
headers={},
)
f1.putfile(f)
ReleaseFile.objects.create(
name=f"http://example.com/{f1.name}",
release_id=release.id,
organization_id=self.project.organization_id,
file=f1,
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": "abc",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/indexed.min.js",
"filename": "indexed.min.js",
"lineno": 1,
"colno": 39,
},
{
"abs_path": "http://example.com/indexed.min.js",
"filename": "indexed.min.js",
"lineno": 2,
"colno": 44,
},
]
},
}
]
},
}
event = self.post_and_retrieve_event(data)
assert "errors" not in event.data
scraping_attempts = sorted(
event.data["scraping_attempts"], key=lambda attempt: attempt["url"]
)
assert scraping_attempts == [
{"status": "not_attempted", "url": "http://example.com/file1.js"},
{"status": "not_attempted", "url": "http://example.com/file2.js"},
{"status": "not_attempted", "url": "http://example.com/indexed.min.js"},
{"status": "not_attempted", "url": "http://example.com/indexed.sourcemap.js"},
]
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.data["resolved_with"] == "release-old"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function add(a, b) {", '\t"use strict";']
expected = "\treturn a + b; // fôo"
assert frame.context_line == expected
assert frame.post_context == ["}"]
raw_frame_list = exception.values[0].raw_stacktrace.frames
raw_frame = raw_frame_list[0]
assert not raw_frame.pre_context
assert raw_frame.context_line == 'function add(a,b){"use strict";return a+b}'
assert raw_frame.post_context == [
'function multiply(a,b){"use strict";return a*b}function divide(a,b){"use strict";try{return multiply('
"add(a,b),a,b)/c}catch(e){Raven.captureE {snip}",
"//# sourceMappingURL=indexed.sourcemap.js",
]
assert raw_frame.lineno == 1
frame = frame_list[1]
assert frame.data["resolved_with"] == "release-old"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function multiply(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a * b;"
assert frame.post_context == [
"}",
"function divide(a, b) {",
'\t"use strict";',
"\ttry {",
"\t\treturn multiply(add(a, b), a, b) / c;",
]
raw_frame = raw_frame_list[1]
assert raw_frame.pre_context == ['function add(a,b){"use strict";return a+b}']
assert (
raw_frame.context_line
== 'function multiply(a,b){"use strict";return a*b}function divide(a,b){"use strict";try{return multiply('
"add(a,b),a,b)/c}catch(e){Raven.captureE {snip}"
)
assert raw_frame.post_context == ["//# sourceMappingURL=indexed.sourcemap.js"]
assert raw_frame.lineno == 2
@requires_symbolicator
@pytest.mark.symbolicator
def test_expansion_via_debug(self) -> None:
project = self.project
release = Release.objects.create(organization_id=project.organization_id, version="abc")
release.add_project(project)
# file.min.js
# ------------
with open(get_fixture_path("file.min.js"), "rb") as f:
f_minified = File.objects.create(
name="file.min.js",
type="release.file",
headers={"Content-Type": "application/json"},
)
f_minified.putfile(f)
# Intentionally omit hostname - use alternate artifact path lookup instead
# /file1.js vs http://example.com/file1.js
ReleaseFile.objects.create(
name=f"~/{f_minified.name}?foo=bar",
release_id=release.id,
organization_id=project.organization_id,
file=f_minified,
)
# file1.js
# ---------
with open(get_fixture_path("file1.js"), "rb") as f:
f1 = File.objects.create(
name="file1.js", type="release.file", headers={"Content-Type": "application/json"}
)
f1.putfile(f)
ReleaseFile.objects.create(
name=f"http://example.com/{f1.name}",
release_id=release.id,
organization_id=project.organization_id,
file=f1,
)
# file2.js
# ----------
with open(get_fixture_path("file2.js"), "rb") as f:
f2 = File.objects.create(
name="file2.js", type="release.file", headers={"Content-Type": "application/json"}
)
f2.putfile(f)
ReleaseFile.objects.create(
name=f"http://example.com/{f2.name}",
release_id=release.id,
organization_id=project.organization_id,
file=f2,
)
# To verify that the full url has priority over the relative url,
# we will also add a second ReleaseFile alias for file2.js (f3) w/o
# hostname that points to an empty file. If the processor chooses
# this empty file over the correct file2.js, it will not locate
# context for the 2nd frame.
with open(get_fixture_path("empty.js"), "rb") as f:
f2_empty = File.objects.create(
name="empty.js", type="release.file", headers={"Content-Type": "application/json"}
)
f2_empty.putfile(f)
ReleaseFile.objects.create(
name=f"~/{f2.name}", # intentionally using f2.name ("file2.js")
release_id=release.id,
organization_id=project.organization_id,
file=f2_empty,
)
# sourcemap
# ----------
with open(get_fixture_path("file.sourcemap.js"), "rb") as f:
f_sourcemap = File.objects.create(
name="file.sourcemap.js",
type="release.file",
headers={"Content-Type": "application/json"},
)
f_sourcemap.putfile(f)
ReleaseFile.objects.create(
name=f"http://example.com/{f_sourcemap.name}",
release_id=release.id,
organization_id=project.organization_id,
file=f_sourcemap,
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": "abc",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/file.min.js?foo=bar",
"filename": "file.min.js",
"lineno": 1,
"colno": 39,
},
{
"abs_path": "http://example.com/file.min.js?foo=bar",
"filename": "file.min.js",
"lineno": 1,
"colno": 79,
},
]
},
}
]
},
}
event = self.post_and_retrieve_event(data)
assert "errors" not in event.data
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.data["resolved_with"] == "release-old"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function add(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a + b; // fôo"
assert frame.post_context == ["}"]
frame = frame_list[1]
assert frame.data["resolved_with"] == "release-old"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function multiply(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a * b;"
assert frame.post_context == [
"}",
"function divide(a, b) {",
'\t"use strict";',
"\ttry {",
"\t\treturn multiply(add(a, b), a, b) / c;",
]
@requires_symbolicator
@pytest.mark.symbolicator
def test_expansion_via_distribution_release_artifacts(self) -> None:
project = self.project
release = Release.objects.create(organization_id=project.organization_id, version="abc")
release.add_project(project)
dist = release.add_dist("foo")
# file.min.js
# ------------
with open(get_fixture_path("file.min.js"), "rb") as f:
f_minified = File.objects.create(
name="file.min.js",
type="release.file",
headers={"Content-Type": "application/json"},
)
f_minified.putfile(f)
# Intentionally omit hostname - use alternate artifact path lookup instead
# /file1.js vs http://example.com/file1.js
ReleaseFile.objects.create(
name=f"~/{f_minified.name}?foo=bar",
release_id=release.id,
dist_id=dist.id,
organization_id=project.organization_id,
file=f_minified,
)
# file1.js
# ---------
with open(get_fixture_path("file1.js"), "rb") as f:
f1 = File.objects.create(
name="file1.js",
type="release.file",
headers={"Content-Type": "application/json"},
)
f1.putfile(f)
ReleaseFile.objects.create(
name=f"http://example.com/{f1.name}",
release_id=release.id,
dist_id=dist.id,
organization_id=project.organization_id,
file=f1,
)
# file2.js
# ----------
with open(get_fixture_path("file2.js"), "rb") as f:
f2 = File.objects.create(
name="file2.js",
type="release.file",
headers={"Content-Type": "application/json"},
)
f2.putfile(f)
ReleaseFile.objects.create(
name=f"http://example.com/{f2.name}",
release_id=release.id,
dist_id=dist.id,
organization_id=project.organization_id,
file=f2,
)
# To verify that the full url has priority over the relative url,
# we will also add a second ReleaseFile alias for file2.js (f3) w/o
# hostname that points to an empty file. If the processor chooses
# this empty file over the correct file2.js, it will not locate
# context for the 2nd frame.
with open(get_fixture_path("empty.js"), "rb") as f:
f2_empty = File.objects.create(
name="empty.js",
type="release.file",
headers={"Content-Type": "application/json"},
)
f2_empty.putfile(f)
ReleaseFile.objects.create(
name=f"~/{f2.name}", # intentionally using f2.name ("file2.js")
release_id=release.id,
dist_id=dist.id,
organization_id=project.organization_id,
file=f2_empty,
)
# sourcemap
# ----------
with open(get_fixture_path("file.sourcemap.js"), "rb") as f:
f_sourcemap = File.objects.create(
name="file.sourcemap.js",
type="release.file",
headers={"Content-Type": "application/json"},
)
f_sourcemap.putfile(f)
ReleaseFile.objects.create(
name=f"http://example.com/{f_sourcemap.name}",
release_id=release.id,
dist_id=dist.id,
organization_id=project.organization_id,
file=f_sourcemap,
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": "abc",
"dist": "foo",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/file.min.js?foo=bar",
"filename": "file.min.js",
"lineno": 1,
"colno": 39,
},
{
"abs_path": "http://example.com/file.min.js?foo=bar",
"filename": "file.min.js",
"lineno": 1,
"colno": 79,
},
]
},
}
]
},
}
event = self.post_and_retrieve_event(data)
assert "errors" not in event.data
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.data["resolved_with"] == "release-old"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function add(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a + b; // fôo"
assert frame.post_context == ["}"]
frame = frame_list[1]
assert frame.data["resolved_with"] == "release-old"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function multiply(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a * b;"
assert frame.post_context == [
"}",
"function divide(a, b) {",
'\t"use strict";',
"\ttry {",
"\t\treturn multiply(add(a, b), a, b) / c;",
]
def _test_expansion_via_release_archive(self, link_sourcemaps: bool):
project = self.project
release = Release.objects.create(organization_id=project.organization_id, version="abc")
release.add_project(project)
manifest = {
"org": self.organization.slug,
"release": release.version,
"files": {
"files/_/_/file.min.js": {
"url": "http://example.com/file.min.js",
"type": "minified_source",
},
"files/_/_/file1.js": {
"url": "http://example.com/file1.js",
"type": "source",
},
"files/_/_/file2.js": {
"url": "http://example.com/file2.js",
"type": "source",
},
"files/_/_/file.sourcemap.js": {
"url": "http://example.com/file.sourcemap.js",
"type": "source_map",
},
},
}
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
for rel_path, entry in manifest["files"].items():
name = os.path.basename(rel_path)
content = load_fixture(name)
if name == "file.min.js" and not link_sourcemaps:
# Remove link to source map, add to header instead
content = content.replace(b"//@ sourceMappingURL=file.sourcemap.js", b"")
entry["headers"] = {"Sourcemap": "file.sourcemap.js"}
zip_file.writestr(rel_path, content)
zip_file.writestr("manifest.json", json.dumps(manifest))
compressed.seek(0)
file = File.objects.create(name="doesnt_matter", type="release.bundle")
file.putfile(compressed)
update_artifact_index(release, None, file)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": "abc",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 39,
},
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 79,
},
]
},
}
]
},
}
event = self.post_and_retrieve_event(data)
assert "errors" not in event.data
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.data["resolved_with"] == "release-old"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function add(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a + b; // fôo"
assert frame.post_context == ["}"]
frame = frame_list[1]
assert frame.data["resolved_with"] == "release-old"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function multiply(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a * b;"
assert frame.post_context == [
"}",
"function divide(a, b) {",
'\t"use strict";',
"\ttry {",
"\t\treturn multiply(add(a, b), a, b) / c;",
]
@requires_symbolicator
@pytest.mark.symbolicator
def test_expansion_via_release_archive(self) -> None:
self._test_expansion_via_release_archive(link_sourcemaps=True)
@requires_symbolicator
@pytest.mark.symbolicator
def test_expansion_via_release_archive_no_sourcemap_link(self) -> None:
self._test_expansion_via_release_archive(link_sourcemaps=False)
@requires_symbolicator
@pytest.mark.symbolicator
def test_node_processing(self) -> None:
project = self.project
release = Release.objects.create(
organization_id=project.organization_id, version="nodeabc123"
)
release.add_project(project)
with open(get_fixture_path("dist.bundle.js"), "rb") as f:
f_minified = File.objects.create(
name="dist.bundle.js",
type="release.file",
headers={"Content-Type": "application/javascript"},
)
f_minified.putfile(f)
ReleaseFile.objects.create(
name=f"~/{f_minified.name}",
release_id=release.id,
organization_id=project.organization_id,
file=f_minified,
)
with open(get_fixture_path("dist.bundle.js.map"), "rb") as f:
f_sourcemap = File.objects.create(
name="dist.bundle.js.map",
type="release.file",
headers={"Content-Type": "application/javascript"},
)
f_sourcemap.putfile(f)
ReleaseFile.objects.create(
name=f"~/{f_sourcemap.name}",
release_id=release.id,
organization_id=project.organization_id,
file=f_sourcemap,
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "node",
"release": "nodeabc123",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"filename": "app:///dist.bundle.js",
"function": "bar",
"lineno": 9,
"colno": 2321,
},
{
"filename": "app:///dist.bundle.js",
"function": "foo",
"lineno": 3,
"colno": 2308,
},
{
"filename": "app:///dist.bundle.js",
"function": "App",
"lineno": 3,
"colno": 1011,
},
{
"filename": "app:///dist.bundle.js",
"function": "Object.<anonymous>",
"lineno": 1,
"colno": 1014,
},
{
"filename": "app:///dist.bundle.js",
"function": "__webpack_require__",
"lineno": 20,
"colno": 30,
},
{
"filename": "app:///dist.bundle.js",
"function": "<unknown>",
"lineno": 18,
"colno": 63,
},
]
},
}
]
},
}
event = self.post_and_retrieve_event(data)
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
assert len(frame_list) == 6
def assert_abs_path(abs_path):
# This makes the test assertion forward compatible with percent-encoded URLs
# See https://github.com/getsentry/symbolicator/pull/1137
assert abs_path in (
"webpack:///webpack/bootstrap d9a5a31d9276b73873d3",
"webpack:///webpack/bootstrap%20d9a5a31d9276b73873d3",
)
assert_abs_path(frame_list[0].abs_path)
assert frame_list[0].function == "bar"
assert frame_list[0].lineno == 8
assert_abs_path(frame_list[1].abs_path)
assert frame_list[1].function == "foo"
assert frame_list[1].lineno == 2
assert_abs_path(frame_list[2].abs_path)
assert frame_list[2].function == "App"
assert frame_list[2].lineno == 2
# 1:1014 in the minified source file is _unmapped_.
# There are no tokens in the sourcemap for line 1.
assert frame_list[3].abs_path == "app:///dist.bundle.js"
assert frame_list[3].function == "Object.<anonymous>"
assert frame_list[3].lineno == 1
assert frame_list[3].colno == 1014
assert_abs_path(frame_list[4].abs_path)
assert frame_list[4].function == "__webpack_require__"
assert frame_list[4].lineno == 19
# 18:63 in the minified source file is _unmapped_.
# There are no tokens in the sourcemap for line 18.
assert frame_list[5].abs_path == "app:///dist.bundle.js"
assert frame_list[5].function == "<unknown>"
assert frame_list[5].lineno == 18
assert frame_list[5].colno == 63
@responses.activate
def test_no_fetch_from_http(self) -> None:
responses.add(
responses.GET,
"http://example.com/node_app.min.js",
body=load_fixture("node_app.min.js"),
content_type="application/javascript; charset=utf-8",
)
responses.add(
responses.GET,
"http://example.com/node_app.min.js.map",
body=load_fixture("node_app.min.js.map"),
content_type="application/javascript; charset=utf-8",
)
responses.add_passthru(
settings.SENTRY_SNUBA + "/tests/entities/generic_metrics_counters/insert",
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "node",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "node_bootstrap.js",
"filename": "node_bootstrap.js",
"lineno": 1,
"colno": 38,
},
{
"abs_path": "timers.js",
"filename": "timers.js",
"lineno": 1,
"colno": 39,
},
{
"abs_path": "webpack:///internal",
"filename": "internal",
"lineno": 1,
"colno": 43,
},
{
"abs_path": "webpack:///~/some_dep/file.js",
"filename": "file.js",
"lineno": 1,
"colno": 41,
},
{
"abs_path": "webpack:///./node_modules/file.js",
"filename": "file.js",
"lineno": 1,
"colno": 42,
},
{
"abs_path": "http://example.com/node_app.min.js",
"filename": "node_app.min.js",
"lineno": 1,
"colno": 40,
},
]
},
}
]
},
}
event = self.post_and_retrieve_event(data)
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
# This one should not process, so this one should be none.
assert exception.values[0].raw_stacktrace is None
# None of the in app should update
for x in range(6):
assert not frame_list[x].in_app
@responses.activate
def test_html_file_with_query_param_ending_with_js_extension(self) -> None:
responses.add(
responses.GET,
"http://example.com/file.html",
body=(
"<!doctype html><html><head></head><body><script>/*legit case*/</script></body></html>"
),
)
responses.add_passthru(
settings.SENTRY_SNUBA + "/tests/entities/generic_metrics_counters/insert",
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/file.html?sw=iddqd1337.js",
"filename": "file.html",
"lineno": 1,
"colno": 1,
},
]
},
}
]
},
}
event = self.post_and_retrieve_event(data)
assert "errors" not in event.data
@requires_symbolicator
@pytest.mark.symbolicator
def test_expansion_with_debug_id(self) -> None:
project = self.project
release = Release.objects.create(organization_id=project.organization_id, version="abc")
release.add_project(project)
debug_id = "c941d872-af1f-4f0c-a7ff-ad3d295fe153"
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
zip_file.writestr("files/_/_/file.min.js", load_fixture("file.min.js"))
zip_file.writestr("files/_/_/file1.js", load_fixture("file1.js"))
zip_file.writestr("files/_/_/file2.js", load_fixture("file2.js"))
zip_file.writestr("files/_/_/empty.js", load_fixture("empty.js"))
zip_file.writestr(
"files/_/_/file.wc.sourcemap.js", load_fixture("file.wc.sourcemap.js")
)
zip_file.writestr(
"manifest.json",
json.dumps(
{
"org": self.organization.slug,
"release": release.version,
"files": {
"files/_/_/file.min.js": {
"url": "~/file.min.js",
"type": "minified_source",
"headers": {
"content-type": "application/json",
"debug-id": debug_id,
"sourcemap": "file.sourcemap.js",
},
},
"files/_/_/file1.js": {
"url": "~/file1.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/file2.js": {
"url": "~/file2.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/empty.js": {
"url": "~/empty.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/file.wc.sourcemap.js": {
"url": "~/file.wc.sourcemap.js",
"type": "source_map",
"headers": {
"content-type": "application/json",
"debug-id": debug_id,
},
},
},
}
),
)
compressed.seek(0)
file = File.objects.create(name="bundle.zip", type="artifact.bundle")
file.putfile(compressed)
# We want to also store the release files for this bundle, to check if they work together.
compressed.seek(0)
file_for_release = File.objects.create(name="bundle.zip", type="release.bundle")
file_for_release.putfile(compressed)
update_artifact_index(release, None, file_for_release)
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id, bundle_id=uuid4(), file=file, artifact_count=5
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
DebugIdArtifactBundle.objects.create(
organization_id=self.organization.id,
debug_id=debug_id,
artifact_bundle=artifact_bundle,
source_file_type=SourceFileType.MINIFIED_SOURCE.value,
)
DebugIdArtifactBundle.objects.create(
organization_id=self.organization.id,
debug_id=debug_id,
artifact_bundle=artifact_bundle,
source_file_type=SourceFileType.SOURCE_MAP.value,
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": "abc",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 39,
},
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 79,
},
# We want also to test the source without minification.
{
"abs_path": "http://example.com/file1.js",
"filename": "file1.js",
"lineno": 3,
"colno": 12,
},
]
},
}
]
},
"debug_meta": {
"images": [
{
"type": "sourcemap",
"debug_id": debug_id,
"code_file": "http://example.com/file.min.js",
}
]
},
}
event = self.post_and_retrieve_event(data)
assert "errors" not in event.data
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.data["resolved_with"] == "debug-id"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function add(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a + b; // fôo"
assert frame.post_context == ["}"]
frame = frame_list[1]
assert frame.data["resolved_with"] == "debug-id"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function multiply(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a * b;"
assert frame.post_context == [
"}",
"function divide(a, b) {",
'\t"use strict";',
"\ttry {",
"\t\treturn multiply(add(a, b), a, b) / c;",
]
frame = frame_list[2]
assert "resolved_with" not in frame.data
assert not frame.data.get("symbolicated", False)
assert frame.pre_context == ["function add(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a + b; // fôo"
assert frame.post_context == ["}"]
@requires_symbolicator
@pytest.mark.symbolicator
def test_expansion_with_debug_id_and_sourcemap_without_sources_content(self) -> None:
debug_id = "c941d872-af1f-4f0c-a7ff-ad3d295fe153"
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
zip_file.writestr("files/_/_/file.min.js", load_fixture("file.min.js"))
zip_file.writestr("files/_/_/file1.js", load_fixture("file1.js"))
zip_file.writestr("files/_/_/file2.js", load_fixture("file2.js"))
zip_file.writestr("files/_/_/empty.js", load_fixture("empty.js"))
zip_file.writestr("files/_/_/file.sourcemap.js", load_fixture("file.sourcemap.js"))
zip_file.writestr(
"manifest.json",
json.dumps(
{
"files": {
"files/_/_/file.min.js": {
"url": "~/file.min.js",
"type": "minified_source",
"headers": {
"content-type": "application/json",
"debug-id": debug_id,
"sourcemap": "file.sourcemap.js",
},
},
"files/_/_/file1.js": {
"url": "~/file1.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/file2.js": {
"url": "~/file2.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/empty.js": {
"url": "~/empty.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/file.sourcemap.js": {
"url": "~/file.sourcemap.js",
"type": "source_map",
"headers": {
"content-type": "application/json",
"debug-id": debug_id,
},
},
}
}
),
)
compressed.seek(0)
file = File.objects.create(name="bundle.zip", type="artifact.bundle")
file.putfile(compressed)
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id, bundle_id=uuid4(), file=file, artifact_count=5
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
DebugIdArtifactBundle.objects.create(
organization_id=self.organization.id,
debug_id=debug_id,
artifact_bundle=artifact_bundle,
source_file_type=SourceFileType.MINIFIED_SOURCE.value,
)
DebugIdArtifactBundle.objects.create(
organization_id=self.organization.id,
debug_id=debug_id,
artifact_bundle=artifact_bundle,
source_file_type=SourceFileType.SOURCE_MAP.value,
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": "abc",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 39,
},
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 79,
},
]
},
}
]
},
"debug_meta": {
"images": [
{
"type": "sourcemap",
"debug_id": debug_id,
"code_file": "http://example.com/file.min.js",
}
]
},
}
event = self.post_and_retrieve_event(data)
assert "errors" not in event.data
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.data["resolved_with"] == "debug-id"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function add(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a + b; // fôo"
assert frame.post_context == ["}"]
frame = frame_list[1]
assert frame.data["resolved_with"] == "debug-id"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function multiply(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a * b;"
assert frame.post_context == [
"}",
"function divide(a, b) {",
'\t"use strict";',
"\ttry {",
"\t\treturn multiply(add(a, b), a, b) / c;",
]
@requires_symbolicator
@pytest.mark.symbolicator
def test_expansion_with_debug_id_and_malformed_sourcemap(self) -> None:
debug_id = "c941d872-af1f-4f0c-a7ff-ad3d295fe153"
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
zip_file.writestr("files/_/_/file.min.js", load_fixture("file.min.js"))
zip_file.writestr("files/_/_/file1.js", load_fixture("file1.js"))
zip_file.writestr("files/_/_/file2.js", load_fixture("file2.js"))
zip_file.writestr("files/_/_/empty.js", load_fixture("empty.js"))
zip_file.writestr(
"files/_/_/file.malformed.sourcemap.js", load_fixture("file.malformed.sourcemap.js")
)
zip_file.writestr(
"manifest.json",
json.dumps(
{
"files": {
"files/_/_/file.min.js": {
"url": "~/file.min.js",
"type": "minified_source",
"headers": {
"content-type": "application/json",
"debug-id": debug_id,
"sourcemap": "file.malformed.sourcemap.js",
},
},
"files/_/_/file1.js": {
"url": "~/file1.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/file2.js": {
"url": "~/file2.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/empty.js": {
"url": "~/empty.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/file.malformed.sourcemap.js": {
"url": "~/file.malformed.sourcemap.js",
"type": "source_map",
"headers": {
"content-type": "application/json",
"debug-id": debug_id,
},
},
}
}
),
)
compressed.seek(0)
file = File.objects.create(name="bundle.zip", type="artifact.bundle")
file.putfile(compressed)
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id, bundle_id=uuid4(), file=file, artifact_count=5
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
DebugIdArtifactBundle.objects.create(
organization_id=self.organization.id,
debug_id=debug_id,
artifact_bundle=artifact_bundle,
source_file_type=SourceFileType.MINIFIED_SOURCE.value,
)
DebugIdArtifactBundle.objects.create(
organization_id=self.organization.id,
debug_id=debug_id,
artifact_bundle=artifact_bundle,
source_file_type=SourceFileType.SOURCE_MAP.value,
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": "abc",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 39,
},
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 79,
},
]
},
}
]
},
"debug_meta": {
"images": [
{
"type": "sourcemap",
"debug_id": debug_id,
"code_file": "http://example.com/file.min.js",
}
]
},
}
event = self.post_and_retrieve_event(data)
assert len(event.data["errors"]) == 1
assert event.data["errors"][0] == {
"type": "js_invalid_source",
"symbolicator_type": "malformed_sourcemap",
"url": "http://example.com/file.malformed.sourcemap.js",
}
@requires_symbolicator
@pytest.mark.symbolicator
def test_expansion_with_debug_id_not_found(self) -> None:
project = self.project
release = Release.objects.create(organization_id=project.organization_id, version="abc")
release.add_project(project)
manifest = {
"org": self.organization.slug,
"release": release.version,
"files": {
"files/_/_/file.min.js": {
"url": "http://example.com/file.min.js",
"type": "minified_source",
},
"files/_/_/file1.js": {
"url": "http://example.com/file1.js",
"type": "source",
},
"files/_/_/file2.js": {
"url": "http://example.com/file2.js",
"type": "source",
},
"files/_/_/file.sourcemap.js": {
"url": "http://example.com/file.sourcemap.js",
"type": "source_map",
},
},
}
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
for rel_path, entry in manifest["files"].items():
name = os.path.basename(rel_path)
content = load_fixture(name)
if name == "file.min.js":
# Remove link to source map, add to header instead
content = content.replace(b"//@ sourceMappingURL=file.sourcemap.js", b"")
entry["headers"] = {"Sourcemap": "file.sourcemap.js"}
zip_file.writestr(rel_path, content)
zip_file.writestr("manifest.json", json.dumps(manifest))
compressed.seek(0)
file = File.objects.create(name="release_bundle.zip", type="release.bundle")
file.putfile(compressed)
update_artifact_index(release, None, file)
debug_id = "c941d872-af1f-4f0c-a7ff-ad3d295fe153"
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": "abc",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 39,
},
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 79,
},
# We want also to test the source without minification.
{
"abs_path": "http://example.com/file1.js",
"filename": "file1.js",
"lineno": 3,
"colno": 12,
},
]
},
}
]
},
"debug_meta": {
"images": [
{
"type": "sourcemap",
"debug_id": debug_id,
"code_file": "http://example.com/file.min.js",
}
]
},
}
event = self.post_and_retrieve_event(data)
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.pre_context == ["function add(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a + b; // fôo"
assert frame.post_context == ["}"]
frame = frame_list[1]
assert frame.pre_context == ["function multiply(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a * b;"
assert frame.post_context == [
"}",
"function divide(a, b) {",
'\t"use strict";',
"\ttry {",
"\t\treturn multiply(add(a, b), a, b) / c;",
]
frame = frame_list[2]
assert frame.pre_context == ["function add(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a + b; // fôo"
assert frame.post_context == ["}"]
@requires_symbolicator
@pytest.mark.symbolicator
def test_expansion_with_release_dist_pair_x(self) -> None:
project = self.project
release = Release.objects.create(organization_id=project.organization_id, version="abc")
release.add_project(project)
dist = release.add_dist("android")
# We want to also add debug_id information inside the manifest but not in the stack trace to replicate a
# real edge case that we can incur in.
debug_id = "c941d872-af1f-4f0c-a7ff-ad3d295fe153"
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
zip_file.writestr("files/_/_/file.min.js", load_fixture("file.min.js"))
zip_file.writestr("files/_/_/file1.js", load_fixture("file1.js"))
zip_file.writestr("files/_/_/file2.js", load_fixture("file2.js"))
zip_file.writestr("files/_/_/empty.js", load_fixture("empty.js"))
zip_file.writestr(
"files/_/_/file.wc.sourcemap.js", load_fixture("file.wc.sourcemap.js")
)
zip_file.writestr(
"manifest.json",
json.dumps(
{
"files": {
"files/_/_/file.min.js": {
"url": "~/file.min.js",
"type": "minified_source",
"headers": {
"content-type": "application/json",
"sourcemap": "file.wc.sourcemap.js",
"debug-id": debug_id,
},
},
"files/_/_/file1.js": {
"url": "~/file1.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/file2.js": {
"url": "~/file2.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/empty.js": {
"url": "~/empty.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/file.wc.sourcemap.js": {
"url": "~/file.wc.sourcemap.js",
"type": "source_map",
"headers": {
"content-type": "application/json",
"debug-id": debug_id,
},
},
},
}
),
)
compressed.seek(0)
file = File.objects.create(name="bundle.zip", type="artifact.bundle")
file.putfile(compressed)
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id, bundle_id=uuid4(), file=file, artifact_count=5
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
ReleaseArtifactBundle.objects.create(
organization_id=self.organization.id,
release_name=release.version,
dist_name=dist.name,
artifact_bundle=artifact_bundle,
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": release.version,
"dist": dist.name,
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 39,
},
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 79,
},
# We want also to test the source without minification.
{
"abs_path": "http://example.com/file1.js",
"filename": "file1.js",
"lineno": 3,
"colno": 12,
},
]
},
}
]
},
}
event = self.post_and_retrieve_event(data)
assert "errors" not in event.data
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.data["resolved_with"] == "release"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function add(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a + b; // fôo"
assert frame.post_context == ["}"]
frame = frame_list[1]
assert frame.data["resolved_with"] == "release"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function multiply(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a * b;"
assert frame.post_context == [
"}",
"function divide(a, b) {",
'\t"use strict";',
"\ttry {",
"\t\treturn multiply(add(a, b), a, b) / c;",
]
frame = frame_list[2]
assert frame.pre_context == ["function add(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a + b; // fôo"
assert frame.post_context == ["}"]
@requires_symbolicator
@pytest.mark.symbolicator
def test_expansion_with_release_dist_pair_and_sourcemap_without_sources_content(self) -> None:
project = self.project
release = Release.objects.create(organization_id=project.organization_id, version="abc")
release.add_project(project)
dist = release.add_dist("android")
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
zip_file.writestr("files/_/_/file.min.js", load_fixture("file.min.js"))
zip_file.writestr("files/_/_/file1.js", load_fixture("file1.js"))
zip_file.writestr("files/_/_/file2.js", load_fixture("file2.js"))
zip_file.writestr("files/_/_/empty.js", load_fixture("empty.js"))
zip_file.writestr("files/_/_/file.sourcemap.js", load_fixture("file.sourcemap.js"))
zip_file.writestr(
"manifest.json",
json.dumps(
{
"files": {
"files/_/_/file.min.js": {
"url": "~/file.min.js",
"type": "minified_source",
"headers": {
"content-type": "application/json",
"sourcemap": "file.sourcemap.js",
},
},
"files/_/_/file1.js": {
"url": "~/file1.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/file2.js": {
"url": "~/file2.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/empty.js": {
"url": "~/empty.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/file.sourcemap.js": {
"url": "~/file.sourcemap.js",
"type": "source_map",
"headers": {
"content-type": "application/json",
},
},
}
}
),
)
compressed.seek(0)
file = File.objects.create(name="bundle.zip", type="artifact.bundle")
file.putfile(compressed)
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id, bundle_id=uuid4(), file=file, artifact_count=5
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
ReleaseArtifactBundle.objects.create(
organization_id=self.organization.id,
release_name=release.version,
dist_name=dist.name,
artifact_bundle=artifact_bundle,
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": release.version,
"dist": dist.name,
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 39,
},
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 79,
},
]
},
}
]
},
}
event = self.post_and_retrieve_event(data)
assert "errors" not in event.data
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.data["resolved_with"] == "release"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function add(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a + b; // fôo"
assert frame.post_context == ["}"]
frame = frame_list[1]
assert frame.data["resolved_with"] == "release"
assert frame.data["symbolicated"]
assert frame.pre_context == ["function multiply(a, b) {", '\t"use strict";']
assert frame.context_line == "\treturn a * b;"
assert frame.post_context == [
"}",
"function divide(a, b) {",
'\t"use strict";',
"\ttry {",
"\t\treturn multiply(add(a, b), a, b) / c;",
]
@requires_symbolicator
@pytest.mark.symbolicator
def test_expansion_with_release_and_malformed_sourcemap(self) -> None:
project = self.project
release = Release.objects.create(organization_id=project.organization_id, version="abc")
release.add_project(project)
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
zip_file.writestr("files/_/_/file.min.js", load_fixture("file.min.js"))
zip_file.writestr("files/_/_/file1.js", load_fixture("file1.js"))
zip_file.writestr("files/_/_/file2.js", load_fixture("file2.js"))
zip_file.writestr("files/_/_/empty.js", load_fixture("empty.js"))
zip_file.writestr(
"files/_/_/file.malformed.sourcemap.js", load_fixture("file.malformed.sourcemap.js")
)
zip_file.writestr(
"manifest.json",
json.dumps(
{
"files": {
"files/_/_/file.min.js": {
"url": "~/file.min.js",
"type": "minified_source",
"headers": {
"content-type": "application/json",
"sourcemap": "file.malformed.sourcemap.js",
},
},
"files/_/_/file1.js": {
"url": "~/file1.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/file2.js": {
"url": "~/file2.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/empty.js": {
"url": "~/empty.js",
"type": "source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/file.malformed.sourcemap.js": {
"url": "~/file.malformed.sourcemap.js",
"type": "source_map",
"headers": {
"content-type": "application/json",
},
},
}
}
),
)
compressed.seek(0)
file = File.objects.create(name="bundle.zip", type="artifact.bundle")
file.putfile(compressed)
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id, bundle_id=uuid4(), file=file, artifact_count=5
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
ReleaseArtifactBundle.objects.create(
organization_id=self.organization.id,
release_name=release.version,
artifact_bundle=artifact_bundle,
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": release.version,
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 39,
},
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 79,
},
]
},
}
]
},
}
event = self.post_and_retrieve_event(data)
assert len(event.data["errors"]) == 1
assert event.data["errors"][0] == {
"type": "js_invalid_source",
"symbolicator_type": "malformed_sourcemap",
"url": "http://example.com/file.malformed.sourcemap.js",
}
def generate_symbolicated_in_app_event_data(self, frames):
return {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": "abc",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": frames,
},
}
]
},
}
@requires_symbolicator
@pytest.mark.symbolicator
def test_symbolicated_in_app_after_symbolication(self) -> None:
self.project.update_option("sentry:scrape_javascript", False)
release = Release.objects.create(
organization_id=self.project.organization_id, version="abc"
)
release.add_project(self.project)
self._create_source_files_and_sourcemaps(release)
data = self.generate_symbolicated_in_app_event_data(
[
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 39,
"in_app": True,
},
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 183,
"in_app": True,
},
]
)
event = self.post_and_retrieve_event(data)
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
# Verify both frames are symbolicated and in_app
assert frame_list[0].data["symbolicated"] is True
assert frame_list[0].in_app is True
assert frame_list[1].data["symbolicated"] is True
assert frame_list[1].in_app is True
# Verify symbolicated_in_app is True since all in_app frames are symbolicated
assert event.data["symbolicated_in_app"] is True
@requires_symbolicator
@pytest.mark.symbolicator
def test_symbolicated_in_app_false_with_unsymbolicated_frame(self) -> None:
self.project.update_option("sentry:scrape_javascript", False)
release = Release.objects.create(
organization_id=self.project.organization_id, version="abc"
)
release.add_project(self.project)
# Create source files and sourcemaps for file.min.js
for file in ["file.min.js", "file1.js", "file2.js", "file.sourcemap.js"]:
with open(get_fixture_path(file), "rb") as f:
f1 = File.objects.create(
name=file,
type="release.file",
headers={},
)
f1.putfile(f)
ReleaseFile.objects.create(
name=f"http://example.com/{f1.name}",
release_id=release.id,
organization_id=self.project.organization_id,
file=f1,
)
data = self.generate_symbolicated_in_app_event_data(
[
{
"abs_path": "http://example.com/file.min.js",
"filename": "file.min.js",
"lineno": 1,
"colno": 39,
"in_app": True,
},
{
"abs_path": "http://example.com/webpack2.min.js",
"filename": "webpack2.min.js",
"lineno": 1,
"colno": 183,
"in_app": True,
},
]
)
event = self.post_and_retrieve_event(data)
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
# First frame should be symbolicated and in_app
assert frame_list[0].data["symbolicated"] is True
assert frame_list[0].in_app is True
# Second frame should not be symbolicated but is in_app
assert frame_list[1].data["symbolicated"] is False
assert frame_list[1].in_app is True
# Verify symbolicated_in_app is False since not all in_app frames are symbolicated
assert event.data["symbolicated_in_app"] is False
@requires_symbolicator
@pytest.mark.symbolicator
def test_symbolicated_in_app_none_with_no_in_app_frames(self) -> None:
self.project.update_option("sentry:scrape_javascript", False)
release = Release.objects.create(
organization_id=self.project.organization_id, version="abc"
)
release.add_project(self.project)
data = self.generate_symbolicated_in_app_event_data(
[
{
"abs_path": "http://example.com/webpack2.min.js",
"filename": "webpack2.min.js",
"lineno": 1,
"colno": 39,
"in_app": False,
},
{
"abs_path": "http://example.com/webpack2.min.js",
"filename": "webpack2.min.js",
"lineno": 1,
"colno": 183,
"in_app": False,
},
]
)
event = self.post_and_retrieve_event(data)
exception = event.interfaces["exception"]
frame_list = exception.values[0].stacktrace.frames
# Verify both frames are not symbolicated and not in_app
assert frame_list[0].data["symbolicated"] is False
assert frame_list[0].in_app is False
assert frame_list[1].data["symbolicated"] is False
assert frame_list[1].in_app is False
# Verify symbolicated_in_app is None since there are no in_app frames
# Using get() since the key might not exist when the value would be None
assert event.data.get("symbolicated_in_app") is None
| TestJavascriptIntegration |
python | ray-project__ray | python/ray/util/collective/const.py | {
"start": 530,
"end": 865
} | class ____(Enum):
"""ray.util.collective environment variables."""
NCCL_USE_MULTISTREAM = auto(), lambda v: (v or "True") == "True"
@property
def val(self):
"""Return the output of the lambda against the system's env value."""
_, default_fn = self.value
return default_fn(os.getenv(self.name))
| ENV |
python | pennersr__django-allauth | allauth/headless/socialaccount/views.py | {
"start": 1009,
"end": 1911
} | class ____(APIView):
input_class = SignupInput
def handle(self, request, *args, **kwargs):
self.sociallogin = flows.signup.get_pending_signup(self.request)
if not self.sociallogin:
return ConflictResponse(request)
if not get_socialaccount_adapter().is_open_for_signup(
request, self.sociallogin
):
return ForbiddenResponse(request)
return super().handle(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return SocialLoginResponse(request, self.sociallogin)
def post(self, request, *args, **kwargs):
response = flows.signup.signup_by_form(
self.request, self.sociallogin, self.input
)
return AuthenticationResponse.from_response(request, response)
def get_input_kwargs(self):
return {"sociallogin": self.sociallogin}
| ProviderSignupView |
python | huggingface__transformers | src/transformers/models/roformer/tokenization_roformer.py | {
"start": 10140,
"end": 20534
} | class ____(PreTrainedTokenizer):
r"""
Construct a RoFormer tokenizer. Based on [Rust Jieba](https://pypi.org/project/rjieba/).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
do_basic_tokenize (`bool`, *optional*, defaults to `True`):
Whether or not to do basic tokenization before WordPiece.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
Example:
```python
>>> from transformers import RoFormerTokenizer
>>> tokenizer = RoFormerTokenizer.from_pretrained("junnyu/roformer_chinese_base")
>>> tokenizer.tokenize("今天天气非常好。")
['今', '天', '天', '气', '非常', '好', '。']
```"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(
self,
vocab_file,
do_lower_case=True,
do_basic_tokenize=True,
never_split=None,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
tokenize_chinese_chars=True,
strip_accents=None,
**kwargs,
):
if not os.path.isfile(vocab_file):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
try:
import rjieba
except ImportError:
raise ImportError(
"You need to install rjieba to use RoFormerTokenizer. "
"See https://pypi.org/project/rjieba/ for installation."
)
self.jieba = rjieba
super().__init__(
do_lower_case=do_lower_case,
do_basic_tokenize=do_basic_tokenize,
never_split=never_split,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs,
)
@property
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
@property
def vocab_size(self):
return len(self.vocab)
def __getstate__(self):
state = self.__dict__.copy()
state["jieba"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
import rjieba
self.jieba = rjieba
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text, use_jieba=True):
split_tokens = []
if use_jieba:
for wholword in self.jieba.cut(text, False):
if wholword in self.vocab:
split_tokens.append(wholword)
else:
# use bert tokenizer to _tokenize
char_list = self._tokenize(wholword, use_jieba=False)
split_tokens.extend(char_list)
else:
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
# If the token is part of the never_split set
if token in self.basic_tokenizer.never_split:
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoFormer sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
else:
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!"
)
index = token_index
writer.write(token + "\n")
index += 1
return (vocab_file,)
__all__ = ["RoFormerTokenizer"]
| RoFormerTokenizer |
python | bokeh__bokeh | src/bokeh/application/handlers/notebook.py | {
"start": 2099,
"end": 5228
} | class ____(CodeHandler):
''' A Handler that uses code in a Jupyter notebook for modifying Bokeh
Documents.
'''
_logger_text = "%s: call to %s() ignored when running notebooks with the 'bokeh' command."
_origin = "Notebook"
def __init__(self, *, filename: PathLike, argv: list[str] = [], package: ModuleType | None = None) -> None:
'''
Keywords:
filename (str) : a path to a Jupyter notebook (".ipynb") file
'''
nbformat = import_required('nbformat', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.')
nbconvert = import_required('nbconvert', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.')
class StripMagicsProcessor(nbconvert.preprocessors.Preprocessor):
"""
Preprocessor to convert notebooks to Python source while stripping
out all magics (i.e IPython specific syntax).
"""
_magic_pattern = re.compile(r'^\s*(?P<magic>%%\w\w+)($|(\s+))')
def strip_magics(self, source: str) -> str:
"""
Given the source of a cell, filter out all cell and line magics.
"""
filtered: list[str] = []
for line in source.splitlines():
match = self._magic_pattern.match(line)
if match is None:
filtered.append(line)
else:
msg = 'Stripping out IPython magic {magic} in code cell {cell}'
message = msg.format(cell=self._cell_counter, magic=match.group('magic'))
log.warning(message)
return '\n'.join(filtered)
def preprocess_cell(self, cell, resources, index):
if cell['cell_type'] == 'code':
self._cell_counter += 1
cell['source'] = self.strip_magics(cell['source'])
return cell, resources
def __call__(self, nb, resources):
self._cell_counter = 0
return self.preprocess(nb,resources)
preprocessors = [StripMagicsProcessor()]
with open(filename, encoding="utf-8") as f:
nb = nbformat.read(f, nbformat.NO_CONVERT)
exporter = nbconvert.PythonExporter()
for preprocessor in preprocessors:
exporter.register_preprocessor(preprocessor)
source, _ = exporter.from_notebook_node(nb)
source = source.replace('get_ipython().run_line_magic', '')
source = source.replace('get_ipython().magic', '')
super().__init__(source=source, filename=filename, argv=argv, package=package)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| NotebookHandler |
python | optuna__optuna | optuna/artifacts/exceptions.py | {
"start": 44,
"end": 334
} | class ____(OptunaError):
"""Exception raised when an artifact is not found.
It is typically raised while calling
:meth:`~optuna.artifacts._protocol.ArtifactStore.open_reader` or
:meth:`~optuna.artifacts._protocol.ArtifactStore.remove` methods.
"""
...
| ArtifactNotFound |
python | cherrypy__cherrypy | cherrypy/process/servers.py | {
"start": 3477,
"end": 3555
} | class ____:
"""Timeout constants."""
occupied = 5
free = 1
| Timeouts |
python | milvus-io__pymilvus | tests/test_async_grpc_handler.py | {
"start": 243,
"end": 19592
} | class ____:
"""Test cases for AsyncGrpcHandler class"""
@pytest.mark.asyncio
async def test_load_partitions_refresh_attribute(self) -> None:
"""
Test that load_partitions correctly accesses request.refresh instead of request.is_refresh.
This test verifies the fix for issue #2796.
"""
# Setup mock channel and stub
mock_channel = AsyncMock()
mock_channel.channel_ready = AsyncMock()
mock_channel.close = AsyncMock()
mock_channel._unary_unary_interceptors = []
# Create handler with mocked channel
handler = AsyncGrpcHandler(channel=mock_channel)
handler._is_channel_ready = True
# Mock the async stub
mock_stub = AsyncMock()
handler._async_stub = mock_stub
# Create a mock response for LoadPartitions
mock_response = MagicMock()
mock_status = MagicMock()
mock_status.code = 0
mock_status.error_code = 0
mock_status.reason = ""
mock_response.status = mock_status
mock_stub.LoadPartitions = AsyncMock(return_value=mock_response)
# Mock wait_for_loading_partitions to avoid actual waiting
handler.wait_for_loading_partitions = AsyncMock()
# Mock Prepare.load_partitions to return a request with refresh attribute
with patch('pymilvus.client.async_grpc_handler.Prepare') as mock_prepare, \
patch('pymilvus.client.async_grpc_handler.check_pass_param'), \
patch('pymilvus.client.async_grpc_handler.check_status'), \
patch('pymilvus.client.async_grpc_handler._api_level_md', return_value={}):
# Create mock request with refresh attribute (not is_refresh)
mock_request = MagicMock()
mock_request.refresh = True # This is the correct attribute name
mock_prepare.load_partitions.return_value = mock_request
# Call load_partitions
await handler.load_partitions(
collection_name="test_collection",
partition_names=["partition1", "partition2"],
replica_number=1,
timeout=30,
refresh=True
)
# Verify that Prepare.load_partitions was called correctly
mock_prepare.load_partitions.assert_called_once_with(
collection_name="test_collection",
partition_names=["partition1", "partition2"],
replica_number=1,
refresh=True
)
# Verify that wait_for_loading_partitions was called with is_refresh parameter
# correctly set from request.refresh (not request.is_refresh)
handler.wait_for_loading_partitions.assert_called_once_with(
collection_name="test_collection",
partition_names=["partition1", "partition2"],
is_refresh=True, # Should be the value from request.refresh
timeout=30,
refresh=True
)
@pytest.mark.asyncio
async def test_load_partitions_without_refresh(self) -> None:
"""Test load_partitions when refresh parameter is not provided"""
# Setup mock channel and stub
mock_channel = AsyncMock()
mock_channel.channel_ready = AsyncMock()
mock_channel.close = AsyncMock()
mock_channel._unary_unary_interceptors = []
# Create handler with mocked channel
handler = AsyncGrpcHandler(channel=mock_channel)
handler._is_channel_ready = True
# Mock the async stub
mock_stub = AsyncMock()
handler._async_stub = mock_stub
# Create a mock response for LoadPartitions
mock_response = MagicMock()
mock_status = MagicMock()
mock_status.code = 0
mock_status.error_code = 0
mock_status.reason = ""
mock_response.status = mock_status
mock_stub.LoadPartitions = AsyncMock(return_value=mock_response)
# Mock wait_for_loading_partitions
handler.wait_for_loading_partitions = AsyncMock()
# Mock Prepare.load_partitions
with patch('pymilvus.client.async_grpc_handler.Prepare') as mock_prepare, \
patch('pymilvus.client.async_grpc_handler.check_pass_param'), \
patch('pymilvus.client.async_grpc_handler.check_status'), \
patch('pymilvus.client.async_grpc_handler._api_level_md', return_value={}):
# Create mock request with default refresh value
mock_request = MagicMock()
mock_request.refresh = False # Default value when not specified
mock_prepare.load_partitions.return_value = mock_request
# Call load_partitions without refresh parameter
await handler.load_partitions(
collection_name="test_collection",
partition_names=["partition1"],
timeout=30
)
# Verify that wait_for_loading_partitions was called with is_refresh=False
handler.wait_for_loading_partitions.assert_called_once_with(
collection_name="test_collection",
partition_names=["partition1"],
is_refresh=False, # Should be False when not specified
timeout=30
)
@pytest.mark.asyncio
async def test_wait_for_loading_partitions(self) -> None:
"""Test wait_for_loading_partitions method"""
# Setup mock channel
mock_channel = AsyncMock()
mock_channel._unary_unary_interceptors = []
# Create handler with mocked channel
handler = AsyncGrpcHandler(channel=mock_channel)
handler._is_channel_ready = True
# Mock get_loading_progress to return 100 immediately
handler.get_loading_progress = AsyncMock(return_value=100)
# Call wait_for_loading_partitions
await handler.wait_for_loading_partitions(
collection_name="test_collection",
partition_names=["partition1", "partition2"],
is_refresh=True,
timeout=30
)
# Verify that get_loading_progress was called
handler.get_loading_progress.assert_called_once_with(
"test_collection",
["partition1", "partition2"],
timeout=30,
is_refresh=True
)
@pytest.mark.asyncio
async def test_wait_for_loading_partitions_timeout(self) -> None:
"""Test that wait_for_loading_partitions raises exception on timeout"""
# Setup mock channel
mock_channel = AsyncMock()
mock_channel._unary_unary_interceptors = []
# Create handler with mocked channel
handler = AsyncGrpcHandler(channel=mock_channel)
handler._is_channel_ready = True
# Mock get_loading_progress to always return less than 100
handler.get_loading_progress = AsyncMock(return_value=50)
# Call wait_for_loading_partitions with very short timeout
with pytest.raises(MilvusException) as exc_info:
await handler.wait_for_loading_partitions(
collection_name="test_collection",
partition_names=["partition1"],
is_refresh=False,
timeout=0.001 # Very short timeout to trigger timeout error
)
assert "wait for loading partition timeout" in str(exc_info.value)
@pytest.mark.asyncio
async def test_load_partitions_with_resource_groups(self) -> None:
"""Test load_partitions with additional parameters like resource_groups"""
# Setup mock channel and stub
mock_channel = AsyncMock()
mock_channel.channel_ready = AsyncMock()
mock_channel.close = AsyncMock()
mock_channel._unary_unary_interceptors = []
# Create handler with mocked channel
handler = AsyncGrpcHandler(channel=mock_channel)
handler._is_channel_ready = True
# Mock the async stub
mock_stub = AsyncMock()
handler._async_stub = mock_stub
# Create a mock response for LoadPartitions
mock_response = MagicMock()
mock_status = MagicMock()
mock_status.code = 0
mock_status.error_code = 0
mock_status.reason = ""
mock_response.status = mock_status
mock_stub.LoadPartitions = AsyncMock(return_value=mock_response)
# Mock wait_for_loading_partitions
handler.wait_for_loading_partitions = AsyncMock()
# Mock Prepare.load_partitions
with patch('pymilvus.client.async_grpc_handler.Prepare') as mock_prepare, \
patch('pymilvus.client.async_grpc_handler.check_pass_param'), \
patch('pymilvus.client.async_grpc_handler.check_status'), \
patch('pymilvus.client.async_grpc_handler._api_level_md', return_value={}):
# Create mock request
mock_request = MagicMock()
mock_request.refresh = False
mock_prepare.load_partitions.return_value = mock_request
# Call load_partitions with resource_groups
await handler.load_partitions(
collection_name="test_collection",
partition_names=["partition1"],
replica_number=2,
resource_groups=["rg1", "rg2"],
timeout=30
)
# Verify that Prepare.load_partitions was called with resource_groups
mock_prepare.load_partitions.assert_called_once_with(
collection_name="test_collection",
partition_names=["partition1"],
replica_number=2,
resource_groups=["rg1", "rg2"]
)
@pytest.mark.asyncio
async def test_create_index_with_nested_field(self) -> None:
"""
Test that create_index works with nested field names (e.g., "chunks[text_vector]").
This test verifies the fix for issue where AsyncMilvusClient.create_index
failed for nested fields in Array of Struct.
"""
# Setup mock channel and stub
mock_channel = AsyncMock()
mock_channel.channel_ready = AsyncMock()
mock_channel._unary_unary_interceptors = []
handler = AsyncGrpcHandler(channel=mock_channel)
handler._is_channel_ready = True
mock_stub = AsyncMock()
handler._async_stub = mock_stub
# Mock wait_for_creating_index to return success
handler.wait_for_creating_index = AsyncMock(return_value=(True, ""))
handler.alloc_timestamp = AsyncMock(return_value=12345)
# Mock CreateIndex response
mock_create_response = MagicMock()
mock_status = MagicMock()
mock_status.code = 0
mock_status.reason = ""
mock_create_response.status = mock_status
mock_stub.CreateIndex = AsyncMock(return_value=mock_create_response)
with patch('pymilvus.client.async_grpc_handler.Prepare') as mock_prepare, \
patch('pymilvus.client.async_grpc_handler.check_pass_param'), \
patch('pymilvus.client.async_grpc_handler.check_status'), \
patch('pymilvus.client.async_grpc_handler._api_level_md', return_value={}):
# Create mock index request
mock_index_request = MagicMock()
mock_prepare.create_index_request.return_value = mock_index_request
# Call create_index with a nested field name (Array of Struct field path)
nested_field_name = "chunks[text_vector]"
index_params = {
"metric_type": "MAX_SIM_COSINE",
"index_type": "HNSW",
"params": {"M": 16, "efConstruction": 200}
}
await handler.create_index(
collection_name="test_collection",
field_name=nested_field_name,
params=index_params,
index_name="test_index"
)
# Verify that Prepare.create_index_request was called with the nested field name
mock_prepare.create_index_request.assert_called_once_with(
"test_collection",
nested_field_name,
index_params,
index_name="test_index"
)
# Verify that CreateIndex was called on the stub
# The key point is that no MilvusException was raised before this call
# (which would have happened with the old client-side validation)
mock_stub.CreateIndex.assert_called_once()
# Verify wait_for_creating_index was called
handler.wait_for_creating_index.assert_called_once()
@pytest.mark.asyncio
async def test_search_with_embedding_list(self) -> None:
"""
Test that search works with EmbeddingList input data.
This test verifies the fix for issue where AsyncMilvusClient.search
failed when using EmbeddingList for array-of-vector searches.
"""
# Setup mock channel and stub
mock_channel = AsyncMock()
mock_channel.channel_ready = AsyncMock()
mock_channel._unary_unary_interceptors = []
handler = AsyncGrpcHandler(channel=mock_channel)
handler._is_channel_ready = True
mock_stub = AsyncMock()
handler._async_stub = mock_stub
# Mock Search response with proper SearchResultData structure
mock_search_result_data = schema_pb2.SearchResultData(
num_queries=2,
top_k=0,
scores=[],
ids=schema_pb2.IDs(int_id=schema_pb2.LongArray(data=[])),
topks=[],
primary_field_name="id"
)
mock_search_response = MagicMock()
mock_status = MagicMock()
mock_status.code = 0
mock_status.reason = ""
mock_search_response.status = mock_status
mock_search_response.results = mock_search_result_data
mock_search_response.session_ts = 0
mock_stub.Search = AsyncMock(return_value=mock_search_response)
# Create EmbeddingList data
from pymilvus.client.embedding_list import EmbeddingList
emb_list1 = EmbeddingList()
emb_list1.add([0.1, 0.2, 0.3, 0.4, 0.5])
emb_list2 = EmbeddingList()
emb_list2.add([0.5, 0.4, 0.3, 0.2, 0.1])
data = [emb_list1, emb_list2]
with patch('pymilvus.client.async_grpc_handler.Prepare') as mock_prepare, \
patch('pymilvus.client.async_grpc_handler.check_pass_param'), \
patch('pymilvus.client.async_grpc_handler.check_status'), \
patch('pymilvus.client.async_grpc_handler._api_level_md', return_value={}):
# Mock search_requests_with_expr to return a request
mock_request = MagicMock()
mock_prepare.search_requests_with_expr.return_value = mock_request
await handler.search(
collection_name="test_collection",
data=data,
anns_field="vector",
param={"metric_type": "COSINE"},
limit=10
)
# Verify that Prepare.search_requests_with_expr was called
mock_prepare.search_requests_with_expr.assert_called_once()
call_args = mock_prepare.search_requests_with_expr.call_args
# Verify that is_embedding_list was passed as True in kwargs
assert call_args.kwargs.get("is_embedding_list") is True
# Verify data was converted (not EmbeddingList objects anymore)
passed_data = call_args[0][1] # data is the second positional argument
assert isinstance(passed_data, list)
assert not isinstance(passed_data[0], EmbeddingList)
# The data should be converted to flat arrays
assert isinstance(passed_data[0], (list, np.ndarray))
# Verify Search was called
mock_stub.Search.assert_called_once()
@pytest.mark.asyncio
async def test_hybrid_search_with_embedding_list(self) -> None:
"""
Test that hybrid_search works with EmbeddingList input data.
"""
# Setup mock channel and stub
mock_channel = AsyncMock()
mock_channel.channel_ready = AsyncMock()
mock_channel._unary_unary_interceptors = []
handler = AsyncGrpcHandler(channel=mock_channel)
handler._is_channel_ready = True
mock_stub = AsyncMock()
handler._async_stub = mock_stub
# Mock HybridSearch response with proper SearchResultData structure
mock_hybrid_result_data = schema_pb2.SearchResultData(
num_queries=1,
top_k=0,
scores=[],
ids=schema_pb2.IDs(int_id=schema_pb2.LongArray(data=[])),
topks=[],
primary_field_name="id"
)
mock_hybrid_response = MagicMock()
mock_status = MagicMock()
mock_status.code = 0
mock_status.reason = ""
mock_hybrid_response.status = mock_status
mock_hybrid_response.results = mock_hybrid_result_data
mock_stub.HybridSearch = AsyncMock(return_value=mock_hybrid_response)
# Create AnnSearchRequest with EmbeddingList
from pymilvus.client.embedding_list import EmbeddingList
from pymilvus.client.abstract import AnnSearchRequest
import numpy as np
emb_list = EmbeddingList()
emb_list.add([0.1, 0.2, 0.3])
req = AnnSearchRequest(
data=[emb_list],
anns_field="vector",
param={"metric_type": "COSINE"},
limit=10
)
with patch('pymilvus.client.async_grpc_handler.Prepare') as mock_prepare, \
patch('pymilvus.client.async_grpc_handler.check_pass_param'), \
patch('pymilvus.client.async_grpc_handler.check_status'), \
patch('pymilvus.client.async_grpc_handler._api_level_md', return_value={}):
# Mock search_requests_with_expr and hybrid_search_request_with_ranker
mock_search_request = MagicMock()
mock_hybrid_request = MagicMock()
mock_prepare.search_requests_with_expr.return_value = mock_search_request
mock_prepare.hybrid_search_request_with_ranker.return_value = mock_hybrid_request
# Mock rerank (BaseRanker)
mock_ranker = MagicMock()
await handler.hybrid_search(
collection_name="test_collection",
reqs=[req],
rerank=mock_ranker,
limit=10
)
# Verify that search_requests_with_expr was called with converted data
mock_prepare.search_requests_with_expr.assert_called_once()
call_args = mock_prepare.search_requests_with_expr.call_args
# Verify is_embedding_list flag was set
assert call_args.kwargs.get("is_embedding_list") is True
# Verify data was converted
passed_data = call_args[0][1]
assert isinstance(passed_data, list)
assert not isinstance(passed_data[0], EmbeddingList)
# Verify HybridSearch was called
mock_stub.HybridSearch.assert_called_once()
| TestAsyncGrpcHandler |
python | pypa__warehouse | warehouse/packaging/interfaces.py | {
"start": 2414,
"end": 2537
} | class ____(Exception):
"""Base exception for project name unavailability errors."""
pass
| ProjectNameUnavailableError |
python | huggingface__transformers | src/transformers/models/sam3_tracker/modeling_sam3_tracker.py | {
"start": 5335,
"end": 5891
} | class ____(PreTrainedModel):
config_class = Sam3TrackerConfig
base_model_prefix = "sam3_tracker"
main_input_name = "pixel_values"
input_modalities = ("image",)
_supports_sdpa = True
_supports_flash_attn_2 = True
_supports_attention_backend = True
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Sam3TrackerModel):
if module.no_memory_embedding is not None:
init.zeros_(module.no_memory_embedding)
| Sam3TrackerPreTrainedModel |
python | apache__airflow | airflow-core/src/airflow/ti_deps/deps/mapped_task_expanded.py | {
"start": 880,
"end": 1412
} | class ____(BaseTIDep):
"""Checks that a mapped task has been expanded before its TaskInstance can run."""
NAME = "Task has been mapped"
IGNORABLE = False
IS_TASK_DEP = False
def _get_dep_statuses(self, ti, session, dep_context):
if dep_context.ignore_unmapped_tasks:
return
if ti.map_index == -1:
yield self._failing_status(reason="The task has yet to be mapped!")
return
yield self._passing_status(reason="The task has been mapped")
| MappedTaskIsExpanded |
python | apache__airflow | providers/segment/src/airflow/providers/segment/operators/segment_track_event.py | {
"start": 1100,
"end": 2708
} | class ____(BaseOperator):
"""
Send Track Event to Segment for a specified user_id and event.
:param user_id: The ID for this user in your database. (templated)
:param event: The name of the event you're tracking. (templated)
:param properties: A dictionary of properties for the event. (templated)
:param segment_conn_id: The connection ID to use when connecting to Segment.
:param segment_debug_mode: Determines whether Segment should run in debug mode.
Defaults to False
"""
template_fields: Sequence[str] = ("user_id", "event", "properties")
ui_color = "#ffd700"
def __init__(
self,
*,
user_id: str,
event: str,
properties: dict | None = None,
segment_conn_id: str = "segment_default",
segment_debug_mode: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.user_id = user_id
self.event = event
properties = properties or {}
self.properties = properties
self.segment_debug_mode = segment_debug_mode
self.segment_conn_id = segment_conn_id
def execute(self, context: Context) -> None:
hook = SegmentHook(segment_conn_id=self.segment_conn_id, segment_debug_mode=self.segment_debug_mode)
self.log.info(
"Sending track event (%s) for user id: %s with properties: %s",
self.event,
self.user_id,
self.properties,
)
hook.track(user_id=self.user_id, event=self.event, properties=self.properties) # type: ignore
| SegmentTrackEventOperator |
python | dask__dask | dask/config.py | {
"start": 10722,
"end": 24734
} | class ____:
"""Temporarily set configuration values within a context manager
Parameters
----------
arg : mapping or None, optional
A mapping of configuration key-value pairs to set.
**kwargs :
Additional key-value pairs to set. If ``arg`` is provided, values set
in ``arg`` will be applied before those in ``kwargs``.
Double-underscores (``__``) in keyword arguments will be replaced with
``.``, allowing nested values to be easily set.
Examples
--------
>>> import dask
Set ``'foo.bar'`` in a context, by providing a mapping.
>>> with dask.config.set({'foo.bar': 123}):
... pass
Set ``'foo.bar'`` in a context, by providing a keyword argument.
>>> with dask.config.set(foo__bar=123):
... pass
Set ``'foo.bar'`` globally.
>>> dask.config.set(foo__bar=123) # doctest: +SKIP
See Also
--------
dask.config.get
"""
config: dict
# [(op, path, value), ...]
_record: list[tuple[Literal["insert", "replace"], tuple[str, ...], Any]]
def __init__(
self,
arg: Mapping | None = None,
config: dict | None = None,
lock: threading.Lock = config_lock,
**kwargs,
):
if config is None: # Keep Sphinx autofunction documentation clean
config = global_config
with lock:
self.config = config
self._record = []
if arg is not None:
for key, value in arg.items():
key = check_deprecations(key)
self._assign(key.split("."), value, config)
if kwargs:
for key, value in kwargs.items():
key = key.replace("__", ".")
key = check_deprecations(key)
self._assign(key.split("."), value, config)
def __enter__(self):
return self.config
def __exit__(self, type, value, traceback):
for op, path, value in reversed(self._record):
d = self.config
if op == "replace":
for key in path[:-1]:
d = d.setdefault(key, {})
d[path[-1]] = value
else: # insert
for key in path[:-1]:
try:
d = d[key]
except KeyError:
break
else:
d.pop(path[-1], None)
def _assign(
self,
keys: Sequence[str],
value: Any,
d: dict,
path: tuple[str, ...] = (),
record: bool = True,
) -> None:
"""Assign value into a nested configuration dictionary
Parameters
----------
keys : Sequence[str]
The nested path of keys to assign the value.
value : object
d : dict
The part of the nested dictionary into which we want to assign the
value
path : tuple[str], optional
The path history up to this point.
record : bool, optional
Whether this operation needs to be recorded to allow for rollback.
"""
key = canonical_name(keys[0], d)
path = path + (key,)
if len(keys) == 1:
if record:
if key in d:
self._record.append(("replace", path, d[key]))
else:
self._record.append(("insert", path, None))
d[key] = value
else:
if key not in d:
if record:
self._record.append(("insert", path, None))
d[key] = {}
# No need to record subsequent operations after an insert
record = False
self._assign(keys[1:], value, d[key], path, record=record)
def collect(paths: list[str] = paths, env: Mapping[str, str] | None = None) -> dict:
"""
Collect configuration from paths and environment variables
Parameters
----------
paths : list[str]
A list of paths to search for yaml config files
env : Mapping[str, str]
The system environment variables
Returns
-------
config: dict
See Also
--------
dask.config.refresh: collect configuration and update into primary config
"""
if env is None:
env = os.environ
configs = [*collect_yaml(paths=paths), collect_env(env=env)]
return merge(*configs)
def refresh(
config: dict | None = None, defaults: list[Mapping] = defaults, **kwargs
) -> None:
"""
Update configuration by re-reading yaml files and env variables
This mutates the global dask.config.config, or the config parameter if
passed in.
This goes through the following stages:
1. Clearing out all old configuration
2. Updating from the stored defaults from downstream libraries
(see update_defaults)
3. Updating from yaml files and environment variables
4. Automatically renaming deprecated keys (with a warning)
Note that some functionality only checks configuration once at startup and
may not change behavior, even if configuration changes. It is recommended
to restart your python process if convenient to ensure that new
configuration changes take place.
See Also
--------
dask.config.collect: for parameters
dask.config.update_defaults
"""
if config is None: # Keep Sphinx autofunction documentation clean
config = global_config
config.clear()
for d in defaults:
update(config, d, priority="old")
update(config, collect(**kwargs))
rename(deprecations, config)
def get(
key: str,
default: Any = no_default,
config: dict | None = None,
override_with: Any = None,
) -> Any:
"""
Get elements from global config
If ``override_with`` is not None this value will be passed straight back.
Useful for getting kwarg defaults from Dask config.
Use '.' for nested access
Examples
--------
>>> from dask import config
>>> config.get('foo') # doctest: +SKIP
{'x': 1, 'y': 2}
>>> config.get('foo.x') # doctest: +SKIP
1
>>> config.get('foo.x.y', default=123) # doctest: +SKIP
123
>>> config.get('foo.y', override_with=None) # doctest: +SKIP
2
>>> config.get('foo.y', override_with=3) # doctest: +SKIP
3
See Also
--------
dask.config.set
"""
if override_with is not None:
return override_with
if config is None: # Keep Sphinx autofunction documentation clean
config = global_config
keys = key.split(".")
result = config
for k in keys:
k = canonical_name(k, result)
try:
result = result[k]
except (TypeError, IndexError, KeyError):
if default is no_default:
raise
return default
return result
def pop(key: str, default: Any = no_default, config: dict = config) -> Any:
"""Like ``get``, but remove the element if found
See Also
--------
dask.config.get
dask.config.set
"""
keys = key.split(".")
result = config
for i, k in enumerate(keys):
k = canonical_name(k, result)
try:
if i == len(keys) - 1:
return result.pop(k)
else:
result = result[k]
except (TypeError, IndexError, KeyError):
if default is no_default:
raise
return default
def update_defaults(
new: Mapping, config: dict = config, defaults: list[Mapping] = defaults
) -> None:
"""Add a new set of defaults to the configuration
It does two things:
1. Add the defaults to a global collection to be used by refresh later
2. Updates the global config with the new configuration.
Old values are prioritized over new ones, unless the current value
is the old default, in which case it's updated to the new default.
"""
current_defaults = merge(*defaults)
defaults.append(new)
update(config, new, priority="new-defaults", defaults=current_defaults)
def expand_environment_variables(config: Any) -> Any:
"""Expand environment variables in a nested config dictionary
This function will recursively search through any nested dictionaries
and/or lists.
Parameters
----------
config : dict, iterable, or str
Input object to search for environment variables
Returns
-------
config : same type as input
Examples
--------
>>> expand_environment_variables({'x': [1, 2, '$USER']}) # doctest: +SKIP
{'x': [1, 2, 'my-username']}
"""
if isinstance(config, Mapping):
return {k: expand_environment_variables(v) for k, v in config.items()}
elif isinstance(config, str):
return os.path.expandvars(config)
elif isinstance(config, (list, tuple, builtins.set)):
return type(config)(expand_environment_variables(v) for v in config)
else:
return config
#: Mapping of {deprecated key: new key} for renamed keys, or {deprecated key: None} for
#: removed keys. All deprecated keys must use '-' instead of '_'.
#: This is used in three places:
#: 1. In refresh(), which calls rename() to rename and warn upon loading
#: from ~/.config/dask.yaml, DASK_ env variables, etc.
#: 2. in distributed/config.py and equivalent modules, where we perform additional
#: distributed-specific renames for the yaml/env config and enrich this dict
#: 3. from individual calls to dask.config.set(), which internally invoke
# check_deprecations()
deprecations: dict[str, str | None] = {
"fuse-ave-width": "optimization.fuse.ave-width",
"fuse-max-height": "optimization.fuse.max-height",
"fuse-max-width": "optimization.fuse.max-width",
"fuse-rename-keys": "optimization.fuse.rename-keys",
"fuse-max-depth-new-edges": "optimization.fuse.max-depth-new-edges",
# See https://github.com/dask/distributed/pull/4916
"ucx.cuda-copy": "distributed.ucx.cuda_copy",
"ucx.tcp": "distributed.ucx.tcp",
"ucx.nvlink": "distributed.ucx.nvlink",
"ucx.infiniband": "distributed.ucx.infiniband",
"ucx.rdmacm": "distributed.ucx.rdmacm",
"ucx.net-devices": "distributed.ucx.net-devices",
"ucx.reuse-endpoints": "distributed.ucx.reuse-endpoints",
"rmm.pool-size": "distributed.rmm.pool-size",
"shuffle": "dataframe.shuffle.algorithm",
"array.rechunk-threshold": "array.rechunk.threshold",
"dataframe.shuffle.algorithm": "dataframe.shuffle.method",
"dataframe.shuffle-compression": "dataframe.shuffle.compression",
"admin.traceback.shorten.what": "admin.traceback.shorten", # changed in 2023.9.0
"array.shuffle.chunksize-tolerance": "array.chunk-size-tolerance",
}
def rename(
deprecations: Mapping[str, str | None] = deprecations, config: dict = config
) -> None:
"""Rename old keys to new keys
This helps migrate older configuration versions over time
See Also
--------
check_deprecations
"""
for key in deprecations:
try:
value = pop(key, config=config)
except (TypeError, IndexError, KeyError):
continue
key = canonical_name(key, config=config)
new = check_deprecations(key, deprecations)
if new:
set({new: value}, config=config)
def check_deprecations(
key: str, deprecations: Mapping[str, str | None] = deprecations
) -> str:
"""Check if the provided value has been renamed or removed
Parameters
----------
key : str
The configuration key to check
deprecations : Dict[str, str]
The mapping of aliases
Examples
--------
>>> deprecations = {"old_key": "new_key", "invalid": None}
>>> check_deprecations("old_key", deprecations=deprecations) # doctest: +SKIP
FutureWarning: Dask configuration key 'old_key' has been deprecated; please use "new_key" instead
>>> check_deprecations("invalid", deprecations=deprecations)
Traceback (most recent call last):
...
ValueError: Dask configuration key 'invalid' has been removed
>>> check_deprecations("another_key", deprecations=deprecations)
'another_key'
Returns
-------
new: str
The proper key, whether the original (if no deprecation) or the aliased
value
See Also
--------
rename
"""
old = key.replace("_", "-")
if old in deprecations:
new = deprecations[old]
if new:
warnings.warn(
f"Dask configuration key {key!r} has been deprecated; "
f"please use {new!r} instead",
FutureWarning,
)
return new
else:
raise ValueError(f"Dask configuration key {key!r} has been removed")
else:
return key
def serialize(data: Any) -> str:
"""Serialize config data into a string.
Typically used to pass config via the ``DASK_INTERNAL_INHERIT_CONFIG`` environment variable.
Parameters
----------
data: json-serializable object
The data to serialize
Returns
-------
serialized_data: str
The serialized data as a string
"""
return base64.urlsafe_b64encode(json.dumps(data).encode()).decode()
def deserialize(data: str) -> Any:
"""De-serialize config data into the original object.
Typically when receiving config via the ``DASK_INTERNAL_INHERIT_CONFIG`` environment variable.
Parameters
----------
data: str
String serialized by :func:`dask.config.serialize`
Returns
-------
deserialized_data: obj
The de-serialized data
"""
return json.loads(base64.urlsafe_b64decode(data.encode()).decode())
def _initialize() -> None:
fn = os.path.join(os.path.dirname(__file__), "dask.yaml")
with open(fn) as f:
_defaults = yaml.safe_load(f)
update_defaults(_defaults)
refresh()
_initialize()
| set |
python | tensorflow__tensorflow | tensorflow/python/keras/optimizer_v1.py | {
"start": 10384,
"end": 12940
} | class ____(Optimizer):
"""Adagrad optimizer.
Adagrad is an optimizer with parameter-specific learning rates,
which are adapted relative to how frequently a parameter gets
updated during training. The more updates a parameter receives,
the smaller the updates.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Initial learning rate.
epsilon: float >= 0. If `None`, defaults to `backend.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
# References
- [Adaptive Subgradient Methods for Online Learning and Stochastic
Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
"""
def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs):
super(Adagrad, self).__init__(**kwargs)
with backend.name_scope(self.__class__.__name__):
self.lr = backend.variable(lr, name='lr')
self.decay = backend.variable(decay, name='decay')
self.iterations = backend.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = backend.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def _create_all_weights(self, params):
shapes = [backend.int_shape(p) for p in params]
accumulators = [backend.zeros(shape) for shape in shapes]
self.weights = accumulators
return accumulators
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
accumulators = self._create_all_weights(params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (
1. /
(1. +
self.decay * math_ops.cast(self.iterations,
backend.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
new_a = a + math_ops.square(g) # update accumulator
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (backend.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(backend.get_value(self.lr)),
'decay': float(backend.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adagrad, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| Adagrad |
python | ray-project__ray | python/ray/air/_internal/device_manager/npu.py | {
"start": 617,
"end": 3478
} | class ____(TorchDeviceManager):
"""Ascend NPU device manager"""
@staticmethod
def register_custom_torch_dist_backend():
if NPU_TORCH_PACKAGE_AVAILABLE:
import torch_npu # noqa: F401, F811
def is_available(self) -> bool:
if not NPU_TORCH_PACKAGE_AVAILABLE:
return False
return torch.npu.is_available()
def get_devices(self) -> List[torch.device]:
"""Gets the correct torch device list configured for this process.
Returns a list of torch NPU devices allocated for the current worker.
If no NPUs are assigned, then it returns a list with a single CPU device.
"""
if NPU_TORCH_PACKAGE_AVAILABLE and torch.npu.is_available():
npu_ids = [
str(id)
for id in ray.get_runtime_context().get_accelerator_ids()[
ray_constants.NPU
]
]
device_ids = []
if len(npu_ids) > 0:
npu_visible_str = os.environ.get(ASCEND_RT_VISIBLE_DEVICES_ENV_VAR, "")
if npu_visible_str and npu_visible_str != "NoDevFiles":
npu_visible_list = npu_visible_str.split(",")
else:
npu_visible_list = []
for npu_id in npu_ids:
try:
device_ids.append(npu_visible_list.index(npu_id))
except IndexError:
raise RuntimeError(
"ASCEND_RT_VISIBLE_DEVICES set incorrectly. "
f"Got {npu_visible_str}, expected to include {npu_id}. "
"Did you override the `ASCEND_RT_VISIBLE_DEVICES` "
"environment variable?"
)
else:
# If called on the driver or outside of Ray Train, return the
# 0th device.
device_ids.append(0)
devices = [torch.device(f"npu:{device_id}") for device_id in device_ids]
else:
raise RuntimeError(
"Using NPUTorchDeviceManager but torch npu is not available."
)
return devices
def set_device(self, device: Union[torch.device, int]):
torch.npu.set_device(device)
def supports_stream(self) -> bool:
"""Validate if the device type support to create a stream"""
return True
def create_stream(self, device):
"""Create a stream on NPU device"""
return torch.npu.Stream(device)
def get_stream_context(self, stream):
"""Get a torch.stream context on NPU device"""
return torch.npu.stream(stream)
def get_current_stream(self):
"""Get current stream for NPU device"""
return torch.npu.current_stream()
| NPUTorchDeviceManager |
python | huggingface__transformers | src/transformers/models/pegasus_x/modeling_pegasus_x.py | {
"start": 1680,
"end": 2845
} | class ____:
"""Wrapper for dimension info."""
batch_size: int # batch size
seq_len: int # token length
block_size: int # block size
num_heads: int # num heads
hidden_dim: int # hidden dim
dim_per_head: int # dim per head
num_blocks: int # num blocks
global_len: int # global length
padded_seq_len: int # padded token seq length
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_bart.BartScaledWordEmbedding with Bart->PegasusX
| DimensionInfo |
python | lepture__authlib | authlib/jose/rfc7518/jws_algs.py | {
"start": 1065,
"end": 1993
} | class ____(JWSAlgorithm):
"""HMAC using SHA algorithms for JWS. Available algorithms:
- HS256: HMAC using SHA-256
- HS384: HMAC using SHA-384
- HS512: HMAC using SHA-512
"""
SHA256 = hashlib.sha256
SHA384 = hashlib.sha384
SHA512 = hashlib.sha512
def __init__(self, sha_type):
self.name = f"HS{sha_type}"
self.description = f"HMAC using SHA-{sha_type}"
self.hash_alg = getattr(self, f"SHA{sha_type}")
def prepare_key(self, raw_data):
return OctKey.import_key(raw_data)
def sign(self, msg, key):
# it is faster than the one in cryptography
op_key = key.get_op_key("sign")
return hmac.new(op_key, msg, self.hash_alg).digest()
def verify(self, msg, sig, key):
op_key = key.get_op_key("verify")
v_sig = hmac.new(op_key, msg, self.hash_alg).digest()
return hmac.compare_digest(sig, v_sig)
| HMACAlgorithm |
python | scipy__scipy | scipy/linalg/tests/test_basic.py | {
"start": 86944,
"end": 90335
} | class ____:
def test_basic1(self):
c = np.array([1, 2, 3, 5])
b = np.array([1, -1, 1, 0])
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
def test_basic2(self):
# b is a 2-d matrix.
c = np.array([1, 2, -3, -5])
b = np.arange(12).reshape(4, 3)
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
def test_basic3(self):
# b is a 3-d matrix.
c = np.array([1, 2, -3, -5])
b = np.arange(24).reshape(4, 3, 2)
x = solve_circulant(c, b)
y = solve(circulant(c), b.reshape(4, -1)).reshape(b.shape)
assert_allclose(x, y)
def test_complex(self):
# Complex b and c
c = np.array([1+2j, -3, 4j, 5])
b = np.arange(8).reshape(4, 2) + 0.5j
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
def test_random_b_and_c(self):
# Random b and c
rng = np.random.RandomState(54321)
c = rng.standard_normal(50)
b = rng.standard_normal(50)
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
def test_singular(self):
# c gives a singular circulant matrix.
c = np.array([1, 1, 0, 0])
b = np.array([1, 2, 3, 4])
x = solve_circulant(c, b, singular='lstsq')
y, res, rnk, s = lstsq(circulant(c), b)
assert_allclose(x, y)
assert_raises(LinAlgError, solve_circulant, x, y)
def test_axis_args(self):
# Test use of caxis, baxis and outaxis.
# c has shape (2, 1, 4)
c = np.array([[[-1, 2.5, 3, 3.5]], [[1, 6, 6, 6.5]]])
# b has shape (3, 4)
b = np.array([[0, 0, 1, 1], [1, 1, 0, 0], [1, -1, 0, 0]])
x = solve_circulant(c, b, baxis=1)
assert_equal(x.shape, (4, 2, 3))
expected = np.empty_like(x)
expected[:, 0, :] = solve(circulant(c[0].ravel()), b.T)
expected[:, 1, :] = solve(circulant(c[1].ravel()), b.T)
assert_allclose(x, expected)
x = solve_circulant(c, b, baxis=1, outaxis=-1)
assert_equal(x.shape, (2, 3, 4))
assert_allclose(np.moveaxis(x, -1, 0), expected)
# np.swapaxes(c, 1, 2) has shape (2, 4, 1); b.T has shape (4, 3).
x = solve_circulant(np.swapaxes(c, 1, 2), b.T, caxis=1)
assert_equal(x.shape, (4, 2, 3))
assert_allclose(x, expected)
def test_native_list_arguments(self):
# Same as test_basic1 using python's native list.
c = [1, 2, 3, 5]
b = [1, -1, 1, 0]
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
@pytest.mark.parametrize('dt_c', [int, float, np.float32, complex, np.complex64])
@pytest.mark.parametrize('dt_b', [int, float, np.float32, complex, np.complex64])
def test_empty(self, dt_c, dt_b):
c = np.array([], dtype=dt_c)
b = np.array([], dtype=dt_b)
x = solve_circulant(c, b)
assert x.shape == (0,)
assert x.dtype == solve_circulant(np.arange(3, dtype=dt_c),
np.ones(3, dtype=dt_b)).dtype
b = np.empty((0, 0), dtype=dt_b)
x1 = solve_circulant(c, b)
assert x1.shape == (0, 0)
assert x1.dtype == x.dtype
| TestSolveCirculant |
python | google__jax | jax/_src/export/shape_poly.py | {
"start": 9152,
"end": 13828
} | class ____:
"""Represents a multiplication of factors.
The representation is a sequence of _DimFactor factors along with their
integer exponents (>= 1). The empty sequence represents the constant 1.
"""
__slots__ = ["_factors", "_hash", "_size"]
def __init__(self, sorted_factors: SortedFactors):
self._factors = sorted_factors
self._hash = None
self._size = sum((1 + f_exp * f._size) for f, f_exp in self._factors)
def __hash__(self):
if self._hash is None:
self._hash = hash(tuple(self._factors))
return self._hash
def __str__(self):
return "*".join(f"{fact}^{exponent}" if exponent != 1 else str(fact)
for fact, exponent in sorted(self._factors))
__repr__ = __str__
@staticmethod
def from_var(v: str) -> _DimTerm:
return _DimTerm(((_DimFactor.from_var(v), 1),))
@staticmethod
def from_factor(f: _DimFactor, f_exp: int):
return _DimTerm(((f, f_exp),))
@staticmethod
def from_operation(operation: str, *operands: DimSize,
scope: SymbolicScope) -> _DimTerm:
return _DimTerm(((_DimFactor.from_operation(operation, *operands,
scope=scope), 1),))
def to_var(self) -> str | None:
"""Extract the variable name from a term.
Return None if the term is not a single variable."""
a = self.to_factor()
return a.to_var() if a is not None else None
def to_factor(self) -> _DimFactor | None:
"""Extract the single factor from a term.
Return None if the term is not a single factor."""
if len(self._factors) > 1: return None
(f, f_exp), = self._factors
if f_exp != 1: return None
return f
def get_vars(self) -> set[str]:
# All the vars that appear in the term.
acc = set()
for (f, _) in self._factors:
acc.update(f.get_vars())
return acc
@property
def is_constant(self):
return not self._factors
def _syntactic_cmp(self, other: _DimTerm) -> int:
"""Returns -1 if self < other, 0 if self == other, 1 if self > other.
The comparison is done lexicographically (syntactic), to be used for sorting.
The result is not related to the semantic value.
"""
if c := cmp_comparable(self._size, other._size): return c
def cmp_factor(s_f: tuple[_DimFactor, int], o_f: tuple[_DimFactor, int]) -> int:
if c := s_f[0]._syntactic_cmp(o_f[0]): return c
# Consider the terms with exponents to be expanded as multiplications.
# Then a higher exponent for a "large" factor should lead to a "larger" term.
return cmp_comparable(s_f[1], o_f[1])
return cmp_sequence(self._factors, other._factors, cmp_factor)
def __lt__(self, other: _DimTerm):
"""Lexicographic comparison"""
return self._syntactic_cmp(other) < 0
def __le__(self, other: _DimTerm):
"""Lexicographic comparison"""
return self._syntactic_cmp(other) <= 0
def __gt__(self, other: _DimTerm):
"""Lexicographic comparison"""
return self._syntactic_cmp(other) > 0
def __ge__(self, other: _DimTerm):
"""Lexicographic comparison"""
return self._syntactic_cmp(other) >= 0
def __eq__(self, other) -> bool:
if not isinstance(other, _DimTerm): return False
return self._syntactic_cmp(other) == 0
def __ne__(self, other) -> bool:
return not (self == other)
def mul(self, other: _DimTerm) -> _DimTerm:
"""
Returns the product with another term. Example: (n^2*m) * n == n^3 * m.
"""
return _DimTerm(_DimExpr._linear_combination_sorted_pairs(self._factors, 0, 1,
other._factors, 0, 1))
def divide(self, divisor: _DimTerm) -> _DimTerm:
"""
Divides by another term. Raises a InconclusiveDimensionOperation
if the result is not a term.
For example, (n^3 * m) // n == n^2*m, but n // m fails.
"""
new_factors = _DimExpr._linear_combination_sorted_pairs(self._factors, 0, 1,
divisor._factors, 0, -1)
for _, f_exp in new_factors:
if f_exp <= 0:
raise InconclusiveDimensionOperation(f"Cannot divide {self} by {divisor}.")
return _DimTerm(new_factors)
def evaluate(self, env: DimVarEnv, scope: SymbolicScope):
prod = lambda xs: functools.reduce(_evaluate_multiply, xs) if xs else core.dim_constant(1)
def pow_opt(v, p: int):
return v if p == 1 else prod([v] * p)
return prod([pow_opt(f.evaluate(env, scope), exp) for f, exp in self._factors])
def __deepcopy__(self, memo):
return _DimTerm(copy.deepcopy(self._factors, memo))
# The constant 1, as a term.
_DimTerm_one = _DimTerm(())
| _DimTerm |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.